Skip to content

Commit 00af9ad

Browse files
authored
Fix that field nullability affects write (#24)
1 parent 5e7d468 commit 00af9ad

File tree

3 files changed

+121
-11
lines changed

3 files changed

+121
-11
lines changed

paimon_python_java/paimon-python-java-bridge/src/main/java/org/apache/paimon/python/BytesWriter.java

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818

1919
package org.apache.paimon.python;
2020

21+
import org.apache.paimon.arrow.ArrowUtils;
2122
import org.apache.paimon.arrow.reader.ArrowBatchReader;
2223
import org.apache.paimon.data.InternalRow;
2324
import org.apache.paimon.table.sink.TableWrite;
@@ -27,26 +28,42 @@
2728
import org.apache.arrow.memory.RootAllocator;
2829
import org.apache.arrow.vector.VectorSchemaRoot;
2930
import org.apache.arrow.vector.ipc.ArrowStreamReader;
31+
import org.apache.arrow.vector.types.pojo.Field;
3032

3133
import java.io.ByteArrayInputStream;
34+
import java.util.List;
35+
import java.util.stream.Collectors;
3236

3337
/** Write Arrow bytes to Paimon. */
3438
public class BytesWriter {
3539

3640
private final TableWrite tableWrite;
3741
private final ArrowBatchReader arrowBatchReader;
3842
private final BufferAllocator allocator;
43+
private final List<Field> arrowFields;
3944

4045
public BytesWriter(TableWrite tableWrite, RowType rowType) {
4146
this.tableWrite = tableWrite;
4247
this.arrowBatchReader = new ArrowBatchReader(rowType);
4348
this.allocator = new RootAllocator();
49+
arrowFields =
50+
rowType.getFields().stream()
51+
.map(f -> ArrowUtils.toArrowField(f.name(), f.type()))
52+
.collect(Collectors.toList());
4453
}
4554

4655
public void write(byte[] bytes) throws Exception {
4756
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
4857
ArrowStreamReader arrowStreamReader = new ArrowStreamReader(bais, allocator);
4958
VectorSchemaRoot vsr = arrowStreamReader.getVectorSchemaRoot();
59+
if (!checkTypesIgnoreNullability(arrowFields, vsr.getSchema().getFields())) {
60+
throw new RuntimeException(
61+
String.format(
62+
"Input schema isn't consistent with table schema.\n"
63+
+ "\tTable schema is: %s\n"
64+
+ "\tInput schema is: %s",
65+
arrowFields, vsr.getSchema().getFields()));
66+
}
5067

5168
while (arrowStreamReader.loadNextBatch()) {
5269
Iterable<InternalRow> rows = arrowBatchReader.readBatch(vsr);
@@ -60,4 +77,24 @@ public void write(byte[] bytes) throws Exception {
6077
public void close() {
6178
allocator.close();
6279
}
80+
81+
private boolean checkTypesIgnoreNullability(
82+
List<Field> expectedFields, List<Field> actualFields) {
83+
if (expectedFields.size() != actualFields.size()) {
84+
return false;
85+
}
86+
87+
for (int i = 0; i < expectedFields.size(); i++) {
88+
Field expectedField = expectedFields.get(i);
89+
Field actualField = actualFields.get(i);
90+
// ArrowType doesn't have nullability (similar to DataTypeRoot)
91+
if (!actualField.getType().equals(expectedField.getType())
92+
|| !checkTypesIgnoreNullability(
93+
expectedField.getChildren(), actualField.getChildren())) {
94+
return false;
95+
}
96+
}
97+
98+
return true;
99+
}
63100
}

paimon_python_java/pypaimon.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -218,24 +218,23 @@ def __init__(self, j_batch_table_write, j_row_type, arrow_schema: pa.Schema):
218218

219219
def write_arrow(self, table):
220220
for record_batch in table.to_reader():
221-
# TODO: can we use a reusable stream?
222-
stream = pa.BufferOutputStream()
223-
with pa.RecordBatchStreamWriter(stream, self._arrow_schema) as writer:
224-
writer.write(record_batch)
225-
arrow_bytes = stream.getvalue().to_pybytes()
226-
self._j_bytes_writer.write(arrow_bytes)
221+
# TODO: can we use a reusable stream in #_write_arrow_batch ?
222+
self._write_arrow_batch(record_batch)
227223

228224
def write_arrow_batch(self, record_batch):
225+
self._write_arrow_batch(record_batch)
226+
227+
def write_pandas(self, dataframe: pd.DataFrame):
228+
record_batch = pa.RecordBatch.from_pandas(dataframe, schema=self._arrow_schema)
229+
self._write_arrow_batch(record_batch)
230+
231+
def _write_arrow_batch(self, record_batch):
229232
stream = pa.BufferOutputStream()
230-
with pa.RecordBatchStreamWriter(stream, self._arrow_schema) as writer:
233+
with pa.RecordBatchStreamWriter(stream, record_batch.schema) as writer:
231234
writer.write(record_batch)
232235
arrow_bytes = stream.getvalue().to_pybytes()
233236
self._j_bytes_writer.write(arrow_bytes)
234237

235-
def write_pandas(self, dataframe: pd.DataFrame):
236-
record_batch = pa.RecordBatch.from_pandas(dataframe, schema=self._arrow_schema)
237-
self.write_arrow_batch(record_batch)
238-
239238
def prepare_commit(self) -> List['CommitMessage']:
240239
j_commit_messages = self._j_batch_table_write.prepareCommit()
241240
return list(map(lambda cm: CommitMessage(cm), j_commit_messages))

paimon_python_java/tests/test_write_and_read.py

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import unittest
2323
import pandas as pd
2424
import pyarrow as pa
25+
from py4j.protocol import Py4JJavaError
2526

2627
from paimon_python_api import Schema
2728
from paimon_python_java import Catalog
@@ -371,3 +372,76 @@ def test_overwrite(self):
371372
df2['f0'] = df2['f0'].astype('int32')
372373
pd.testing.assert_frame_equal(
373374
actual_df2.reset_index(drop=True), df2.reset_index(drop=True))
375+
376+
def testWriteWrongSchema(self):
377+
schema = Schema(self.simple_pa_schema)
378+
self.catalog.create_table('default.test_wrong_schema', schema, False)
379+
table = self.catalog.get_table('default.test_wrong_schema')
380+
381+
data = {
382+
'f0': [1, 2, 3],
383+
'f1': ['a', 'b', 'c'],
384+
}
385+
df = pd.DataFrame(data)
386+
schema = pa.schema([
387+
('f0', pa.int64()),
388+
('f1', pa.string())
389+
])
390+
record_batch = pa.RecordBatch.from_pandas(df, schema)
391+
392+
write_builder = table.new_batch_write_builder()
393+
table_write = write_builder.new_write()
394+
395+
with self.assertRaises(Py4JJavaError) as e:
396+
table_write.write_arrow_batch(record_batch)
397+
self.assertEqual(
398+
str(e.exception.java_exception),
399+
'''java.lang.RuntimeException: Input schema isn't consistent with table schema.
400+
\tTable schema is: [f0: Int(32, true), f1: Utf8]
401+
\tInput schema is: [f0: Int(64, true), f1: Utf8]''')
402+
403+
def testIgnoreNullable(self):
404+
pa_schema1 = pa.schema([
405+
('f0', pa.int32(), False),
406+
('f1', pa.string())
407+
])
408+
409+
pa_schema2 = pa.schema([
410+
('f0', pa.int32()),
411+
('f1', pa.string())
412+
])
413+
414+
# write nullable to non-null
415+
self._testIgnoreNullableImpl('test_ignore_nullable1', pa_schema1, pa_schema2)
416+
417+
# write non-null to nullable
418+
self._testIgnoreNullableImpl('test_ignore_nullable2', pa_schema2, pa_schema1)
419+
420+
def _testIgnoreNullableImpl(self, table_name, table_schema, data_schema):
421+
schema = Schema(table_schema)
422+
self.catalog.create_table(f'default.{table_name}', schema, False)
423+
table = self.catalog.get_table(f'default.{table_name}')
424+
425+
data = {
426+
'f0': [1, 2, 3],
427+
'f1': ['a', 'b', 'c'],
428+
}
429+
df = pd.DataFrame(data)
430+
record_batch = pa.RecordBatch.from_pandas(pd.DataFrame(data), data_schema)
431+
432+
write_builder = table.new_batch_write_builder()
433+
table_write = write_builder.new_write()
434+
table_commit = write_builder.new_commit()
435+
table_write.write_arrow_batch(record_batch)
436+
table_commit.commit(table_write.prepare_commit())
437+
438+
table_write.close()
439+
table_commit.close()
440+
441+
read_builder = table.new_read_builder()
442+
table_scan = read_builder.new_scan()
443+
table_read = read_builder.new_read()
444+
actual_df = table_read.to_pandas(table_scan.plan().splits())
445+
df['f0'] = df['f0'].astype('int32')
446+
pd.testing.assert_frame_equal(
447+
actual_df.reset_index(drop=True), df.reset_index(drop=True))

0 commit comments

Comments
 (0)