Skip to content

Commit 3a76a34

Browse files
committed
fix test fail
1 parent e14bd65 commit 3a76a34

File tree

2 files changed

+47
-13
lines changed

2 files changed

+47
-13
lines changed

paimon-core/src/main/java/org/apache/paimon/table/format/FormatReadBuilder.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,9 @@ private FileRecordReader<InternalRow> createFileReader(
169169

170170
// Create FormatReaderFactory directly
171171
FormatReaderFactory readerFactory =
172-
formatDiscover.discover(formatIdentifier).createReaderFactory(readType(), filters);
172+
formatDiscover
173+
.discover(formatIdentifier)
174+
.createReaderFactory(table.rowType(), readType(), filters);
173175
Pair<int[], RowType> partitionMapping =
174176
PartitionUtils.getPartitionMapping(
175177
table.partitionKeys(), table.rowType().getFields(), table.partitionType());

paimon-core/src/test/java/org/apache/paimon/catalog/CatalogTestBase.java

Lines changed: 44 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -28,13 +28,15 @@
2828
import org.apache.paimon.format.FileFormatFactory;
2929
import org.apache.paimon.format.FormatWriter;
3030
import org.apache.paimon.format.FormatWriterFactory;
31+
import org.apache.paimon.format.HadoopCompressionType;
3132
import org.apache.paimon.format.SupportsDirectWrite;
3233
import org.apache.paimon.format.csv.CsvFileFormatFactory;
3334
import org.apache.paimon.format.parquet.ParquetFileFormatFactory;
3435
import org.apache.paimon.fs.FileIO;
3536
import org.apache.paimon.fs.Path;
3637
import org.apache.paimon.fs.PositionOutputStream;
3738
import org.apache.paimon.fs.ResolvingFileIO;
39+
import org.apache.paimon.io.DataFilePathFactory;
3840
import org.apache.paimon.options.CatalogOptions;
3941
import org.apache.paimon.options.ConfigOption;
4042
import org.apache.paimon.options.Options;
@@ -589,11 +591,13 @@ void testFormatTableRead(boolean partitioned) throws Exception {
589591
String dbName = "test_db";
590592
catalog.createDatabase(dbName, true);
591593
int partitionValue = 10;
594+
HadoopCompressionType compressionType = HadoopCompressionType.GZIP;
592595
Schema.Builder schemaBuilder = Schema.newBuilder();
593596
schemaBuilder.column("f1", DataTypes.INT());
594597
schemaBuilder.column("dt", DataTypes.INT());
595598
schemaBuilder.option("type", "format-table");
596599
schemaBuilder.option("target-file-size", "1 kb");
600+
schemaBuilder.option("file.compression", compressionType.value());
597601
String[] formats = {
598602
"csv", "parquet",
599603
};
@@ -620,19 +624,46 @@ void testFormatTableRead(boolean partitioned) throws Exception {
620624
Map<String, String> partitionSpec = null;
621625
if (partitioned) {
622626
Path partitionPath =
623-
new Path(
624-
String.format(
625-
"%s/%s/%s",
626-
table.location(), "dt=" + partitionValue, "data"));
627+
new Path(String.format("%s/%s", table.location(), "dt=" + partitionValue));
628+
DataFilePathFactory dataFilePathFactory =
629+
new DataFilePathFactory(
630+
partitionPath,
631+
format,
632+
"data",
633+
"change",
634+
true,
635+
compressionType.value(),
636+
null);
627637
Path diffPartitionPath =
628-
new Path(String.format("%s/%s/%s", table.location(), "dt=" + 11, "data"));
629-
write(factory, partitionPath, datas);
630-
write(factory, diffPartitionPath, dataWithDiffPartition);
638+
new Path(String.format("%s/%s", table.location(), "dt=" + 11));
639+
DataFilePathFactory diffPartitionPathFactory =
640+
new DataFilePathFactory(
641+
diffPartitionPath,
642+
format,
643+
"data",
644+
"change",
645+
true,
646+
compressionType.value(),
647+
null);
648+
write(factory, dataFilePathFactory.newPath(), compressionType.value(), datas);
649+
write(
650+
factory,
651+
diffPartitionPathFactory.newPath(),
652+
compressionType.value(),
653+
dataWithDiffPartition);
631654
partitionSpec = new HashMap<>();
632655
partitionSpec.put("dt", "" + partitionValue);
633656
} else {
634-
Path filePath = new Path(table.location(), "data");
635-
write(factory, filePath, datas);
657+
DataFilePathFactory dataFilePathFactory =
658+
new DataFilePathFactory(
659+
new Path(table.location()),
660+
format,
661+
"data",
662+
"change",
663+
true,
664+
compressionType.value(),
665+
null);
666+
write(factory, dataFilePathFactory.newPath(), compressionType.value(), datas);
636667
}
637668
List<InternalRow> readData = read(table, null, partitionSpec);
638669

@@ -652,15 +683,16 @@ protected FileFormatFactory buildFileFormatFactory(String format) {
652683
}
653684
}
654685

655-
protected void write(FormatWriterFactory factory, Path file, InternalRow... rows)
686+
protected void write(
687+
FormatWriterFactory factory, Path file, String compression, InternalRow... rows)
656688
throws IOException {
657689
FormatWriter writer;
658690
PositionOutputStream out = null;
659691
if (factory instanceof SupportsDirectWrite) {
660-
writer = ((SupportsDirectWrite) factory).create(fileIO, file, "gzip");
692+
writer = ((SupportsDirectWrite) factory).create(fileIO, file, compression);
661693
} else {
662694
out = fileIO.newOutputStream(file, true);
663-
writer = factory.create(out, "gzip");
695+
writer = factory.create(out, compression);
664696
}
665697
for (InternalRow row : rows) {
666698
writer.addElement(row);

0 commit comments

Comments
 (0)