aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorchrislu <chris.lu@gmail.com>2024-04-24 23:04:47 -0700
committerchrislu <chris.lu@gmail.com>2024-04-24 23:04:47 -0700
commitd88c1872ac2fed116f13dd8660de1b169ea61ca5 (patch)
tree440f7dd0c90f0c34076bfc73145ba25529595890
parentd7e5f6b2a55b7b52952c02ad315f117147dbb29a (diff)
downloadseaweedfs-d88c1872ac2fed116f13dd8660de1b169ea61ca5.tar.xz
seaweedfs-d88c1872ac2fed116f13dd8660de1b169ea61ca5.zip
works with single level of list
-rw-r--r--weed/mq/schema/to_parquet_schema.go7
-rw-r--r--weed/mq/schema/to_parquet_value.go27
-rw-r--r--weed/mq/schema/to_schema_value.go66
-rw-r--r--weed/mq/schema/write_parquet_test.go31
4 files changed, 71 insertions, 60 deletions
diff --git a/weed/mq/schema/to_parquet_schema.go b/weed/mq/schema/to_parquet_schema.go
index 3019bc8a7..ce30e3917 100644
--- a/weed/mq/schema/to_parquet_schema.go
+++ b/weed/mq/schema/to_parquet_schema.go
@@ -21,6 +21,7 @@ func toParquetFieldType(fieldType *schema_pb.Type) (dataType parquet.Node, err e
switch fieldType.Kind.(type) {
case *schema_pb.Type_ScalarType:
dataType, err = toParquetFieldTypeScalar(fieldType.GetScalarType())
+ dataType = parquet.Optional(dataType)
case *schema_pb.Type_RecordType:
dataType, err = toParquetFieldTypeRecord(fieldType.GetRecordType())
case *schema_pb.Type_ListType:
@@ -29,6 +30,7 @@ func toParquetFieldType(fieldType *schema_pb.Type) (dataType parquet.Node, err e
return nil, fmt.Errorf("unknown field type: %T", fieldType.Kind)
}
+
return dataType, err
}
@@ -37,7 +39,7 @@ func toParquetFieldTypeList(listType *schema_pb.ListType) (parquet.Node, error)
if err != nil {
return nil, err
}
- return parquet.List(elementType), nil
+ return parquet.Repeated(elementType), nil
}
func toParquetFieldTypeScalar(scalarType schema_pb.ScalarType) (parquet.Node, error) {
@@ -67,9 +69,6 @@ func toParquetFieldTypeRecord(recordType *schema_pb.RecordType) (parquet.Node, e
if err != nil {
return nil, err
}
- if !field.IsRequired {
- parquetFieldType = parquet.Optional(parquetFieldType)
- }
recordNode[field.Name] = parquetFieldType
}
return recordNode, nil
diff --git a/weed/mq/schema/to_parquet_value.go b/weed/mq/schema/to_parquet_value.go
index a5b981f4d..f3907a657 100644
--- a/weed/mq/schema/to_parquet_value.go
+++ b/weed/mq/schema/to_parquet_value.go
@@ -6,28 +6,33 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
)
-func rowBuilderVisit(rowBuilder *parquet.RowBuilder, fieldType *schema_pb.Type, fieldValue *schema_pb.Value, columnIndex int) error {
+func rowBuilderVisit(rowBuilder *parquet.RowBuilder, fieldType *schema_pb.Type, fieldValue *schema_pb.Value, columnIndex int) (endIndex int, err error) {
switch fieldType.Kind.(type) {
case *schema_pb.Type_ScalarType:
- parquetValue, err := toParquetValue(fieldValue)
+ endIndex = columnIndex+1
+ var parquetValue parquet.Value
+ parquetValue, err = toParquetValue(fieldValue)
if err != nil {
- return err
+ return
}
rowBuilder.Add(columnIndex, parquetValue)
+ // fmt.Printf("rowBuilder.Add %d %v\n", columnIndex, parquetValue)
case *schema_pb.Type_ListType:
+ rowBuilder.Next(columnIndex)
+ // fmt.Printf("rowBuilder.Next %d\n", columnIndex)
+
elementType := fieldType.GetListType().ElementType
for _, value := range fieldValue.GetListValue().Values {
- if err := rowBuilderVisit(rowBuilder, elementType, value, columnIndex); err != nil {
- return err
+ if endIndex, err = rowBuilderVisit(rowBuilder, elementType, value, columnIndex); err != nil {
+ return
}
}
- rowBuilder.Next(columnIndex)
}
- return nil
+ return
}
func AddRecordValue(rowBuilder *parquet.RowBuilder, recordType *schema_pb.RecordType, recordValue *schema_pb.RecordValue) error {
- visitor := func(fieldType *schema_pb.Type, fieldValue *schema_pb.Value, index int) error {
+ visitor := func(fieldType *schema_pb.Type, fieldValue *schema_pb.Value, index int) (endIndex int, err error) {
return rowBuilderVisit(rowBuilder, fieldType, fieldValue, index)
}
fieldType := &schema_pb.Type{Kind: &schema_pb.Type_RecordType{RecordType: recordType}}
@@ -38,7 +43,7 @@ func AddRecordValue(rowBuilder *parquet.RowBuilder, recordType *schema_pb.Record
// typeValueVisitor is a function that is called for each value in a schema_pb.Value
// Find the column index.
// intended to be used in RowBuilder.Add(columnIndex, value)
-type typeValueVisitor func(fieldType *schema_pb.Type, fieldValue *schema_pb.Value, index int) error
+type typeValueVisitor func(fieldType *schema_pb.Type, fieldValue *schema_pb.Value, index int) (endIndex int, err error)
func visitValue(fieldType *schema_pb.Type, fieldValue *schema_pb.Value, visitor typeValueVisitor) (err error) {
_, err = doVisitValue(fieldType, fieldValue, 0, visitor)
@@ -50,9 +55,9 @@ func visitValue(fieldType *schema_pb.Type, fieldValue *schema_pb.Value, visitor
func doVisitValue(fieldType *schema_pb.Type, fieldValue *schema_pb.Value, columnIndex int, visitor typeValueVisitor) (endIndex int, err error) {
switch fieldType.Kind.(type) {
case *schema_pb.Type_ScalarType:
- return columnIndex+1, visitor(fieldType, fieldValue, columnIndex)
+ return visitor(fieldType, fieldValue, columnIndex)
case *schema_pb.Type_ListType:
- return columnIndex+1, visitor(fieldType, fieldValue, columnIndex)
+ return visitor(fieldType, fieldValue, columnIndex)
case *schema_pb.Type_RecordType:
for _, field := range fieldType.GetRecordType().Fields {
fieldValue, found := fieldValue.GetRecordValue().Fields[field.Name]
diff --git a/weed/mq/schema/to_schema_value.go b/weed/mq/schema/to_schema_value.go
index 9f8cd5d91..5dbab91f4 100644
--- a/weed/mq/schema/to_schema_value.go
+++ b/weed/mq/schema/to_schema_value.go
@@ -8,78 +8,76 @@ import (
func ToRecordValue(recordType *schema_pb.RecordType, row parquet.Row) (*schema_pb.RecordValue, error) {
values := []parquet.Value(row)
- recordValue, _, err := toRecordValue(recordType, values, 0)
+ recordValue, _, _, err := toRecordValue(recordType, values, 0, 0)
if err != nil {
return nil, err
}
return recordValue.GetRecordValue(), nil
}
-func ToValue(t *schema_pb.Type, values []parquet.Value, columnIndex int) (value *schema_pb.Value, endIndex int, err error) {
+func ToValue(t *schema_pb.Type, values []parquet.Value, valueIndex, columnIndex int) (value *schema_pb.Value, endValueIndex, endColumnIndex int, err error) {
switch t.Kind.(type) {
case *schema_pb.Type_ScalarType:
- return toScalarValue(t.GetScalarType(), values, columnIndex)
+ value, err = toScalarValue(t.GetScalarType(), values, valueIndex, columnIndex)
+ return value, valueIndex + 1, columnIndex + 1, err
case *schema_pb.Type_ListType:
- return toListValue(t.GetListType(), values, columnIndex)
+ return toListValue(t.GetListType(), values, valueIndex, columnIndex)
case *schema_pb.Type_RecordType:
- return toRecordValue(t.GetRecordType(), values, columnIndex)
+ return toRecordValue(t.GetRecordType(), values, valueIndex, columnIndex)
}
- return nil, 0, fmt.Errorf("unsupported type: %v", t)
+ return nil, 0, 0, fmt.Errorf("unsupported type: %v", t)
}
-func toRecordValue(recordType *schema_pb.RecordType, values []parquet.Value, columnIndex int) (*schema_pb.Value, int, error) {
+func toRecordValue(recordType *schema_pb.RecordType, values []parquet.Value, valueIndex, columnIndex int) (*schema_pb.Value, int, int, error) {
recordValue := schema_pb.RecordValue{Fields: make(map[string]*schema_pb.Value)}
for _, field := range recordType.Fields {
- fieldValue, endIndex, err := ToValue(field.Type, values, columnIndex)
+ fieldValue, endValueIndex, endColumnIndex, err := ToValue(field.Type, values, valueIndex, columnIndex)
if err != nil {
- return nil, 0, err
+ return nil, 0, 0, err
}
- if endIndex == columnIndex {
- continue
- }
- columnIndex = endIndex
+ columnIndex = endColumnIndex
+ valueIndex = endValueIndex
recordValue.Fields[field.Name] = fieldValue
}
- return &schema_pb.Value{Kind: &schema_pb.Value_RecordValue{RecordValue: &recordValue}}, columnIndex, nil
+ return &schema_pb.Value{Kind: &schema_pb.Value_RecordValue{RecordValue: &recordValue}}, valueIndex, columnIndex, nil
}
-func toListValue(listType *schema_pb.ListType, values []parquet.Value, index int) (listValue *schema_pb.Value, endIndex int, err error) {
+func toListValue(listType *schema_pb.ListType, values []parquet.Value, valueIndex, columnIndex int) (listValue *schema_pb.Value, endValueIndex, endColumnIndex int, err error) {
listValues := make([]*schema_pb.Value, 0)
var value *schema_pb.Value
- for i := index; i < len(values); {
- value, endIndex, err = ToValue(listType.ElementType, values, i)
- if err != nil {
- return nil, 0, err
- }
- if endIndex == i {
+ for ;valueIndex < len(values); {
+ if values[valueIndex].Column() != columnIndex {
break
}
+ value, valueIndex, endColumnIndex, err = ToValue(listType.ElementType, values, valueIndex, columnIndex)
+ if err != nil {
+ return nil, 0,0, err
+ }
listValues = append(listValues, value)
- i = endIndex
}
- return &schema_pb.Value{Kind: &schema_pb.Value_ListValue{ListValue: &schema_pb.ListValue{Values: listValues}}}, endIndex, nil
+ return &schema_pb.Value{Kind: &schema_pb.Value_ListValue{ListValue: &schema_pb.ListValue{Values: listValues}}}, valueIndex, endColumnIndex, nil
}
-func toScalarValue(scalarType schema_pb.ScalarType, values []parquet.Value, columnIndex int) (*schema_pb.Value, int, error) {
- value := values[columnIndex]
+func toScalarValue(scalarType schema_pb.ScalarType, values []parquet.Value, valueIndex, columnIndex int) (*schema_pb.Value, error) {
+ value := values[valueIndex]
if value.Column() != columnIndex {
- return nil, columnIndex, nil
+ return nil, nil
}
switch scalarType {
case schema_pb.ScalarType_BOOLEAN:
- return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: value.Boolean()}}, columnIndex + 1, nil
+ return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: value.Boolean()}}, nil
case schema_pb.ScalarType_INTEGER:
- return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: value.Int32()}}, columnIndex + 1, nil
+ return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: value.Int32()}}, nil
case schema_pb.ScalarType_LONG:
- return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: value.Int64()}}, columnIndex + 1, nil
+ return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: value.Int64()}}, nil
case schema_pb.ScalarType_FLOAT:
- return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: value.Float()}}, columnIndex + 1, nil
+ return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: value.Float()}}, nil
case schema_pb.ScalarType_DOUBLE:
- return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: value.Double()}}, columnIndex + 1, nil
+ return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: value.Double()}}, nil
case schema_pb.ScalarType_BYTES:
- return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: value.ByteArray()}}, columnIndex + 1, nil
+ return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: value.ByteArray()}}, nil
case schema_pb.ScalarType_STRING:
- return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(value.ByteArray())}}, columnIndex + 1, nil
+ return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(value.ByteArray())}}, nil
}
- return nil, columnIndex, fmt.Errorf("unsupported scalar type: %v", scalarType)
+ return nil, fmt.Errorf("unsupported scalar type: %v", scalarType)
}
diff --git a/weed/mq/schema/write_parquet_test.go b/weed/mq/schema/write_parquet_test.go
index 1b4ecdf59..02b0a09d4 100644
--- a/weed/mq/schema/write_parquet_test.go
+++ b/weed/mq/schema/write_parquet_test.go
@@ -27,7 +27,8 @@ func TestWriteParquet(t *testing.T) {
t.Fatalf("ToParquetSchema failed: %v", err)
}
fmt.Printf("ParquetSchema: %v\n", parquetSchema)
- parquet.PrintSchema(os.Stdout, "example", parquetSchema)
+
+ fmt.Printf("Go Type: %+v\n", parquetSchema.GoType())
filename := "example.parquet"
@@ -50,21 +51,29 @@ func testWritingParquetFile(t *testing.T, filename string, parquetSchema *parque
defer file.Close()
writer := parquet.NewWriter(file, parquetSchema, parquet.Compression(&zstd.Codec{Level: zstd.SpeedDefault}))
rowBuilder := parquet.NewRowBuilder(parquetSchema)
- for i := 0; i < 128; i++ {
+ for i := 0; i < 128*1024; i++ {
rowBuilder.Reset()
// generate random data
- AddRecordValue(rowBuilder, recordType, NewRecordValueBuilder().
- AddLongValue("ID", int64(1+i)).
- AddLongValue("CreatedAt", 2*int64(i)).
+ recordValue := NewRecordValueBuilder().
+ AddLongValue("ID", 1+int64(i)).
+ AddLongValue("CreatedAt", 2+2*int64(i)).
AddRecordValue("Person", NewRecordValueBuilder().
AddStringValue("zName", fmt.Sprintf("john_%d", i)).
AddStringListValue("emails",
- fmt.Sprintf("john_%d@y.com", i),
- fmt.Sprintf("john_%d@g.com", i),
- fmt.Sprintf("john_%d@t.com", i))).
- AddStringValue("Company", fmt.Sprintf("company_%d", i)).Build())
+ fmt.Sprintf("john_%d@a.com", i),
+ fmt.Sprintf("john_%d@b.com", i),
+ fmt.Sprintf("john_%d@c.com", i),
+ fmt.Sprintf("john_%d@d.com", i),
+ fmt.Sprintf("john_%d@e.com", i))).
+ AddStringValue("Company", fmt.Sprintf("company_%d", i)).Build()
+ AddRecordValue(rowBuilder, recordType, recordValue)
+
+ // fmt.Printf("RecordValue: %v\n", recordValue)
row := rowBuilder.Row()
+
+ // fmt.Printf("Row: %+v\n", row)
+
if err != nil {
t.Fatalf("rowBuilder.Build failed: %v", err)
}
@@ -98,11 +107,11 @@ func testReadingParquetFile(t *testing.T, filename string, parquetSchema *parque
for i := 0; i < rowCount; i++ {
row := rows[i]
// convert parquet row to schema_pb.RecordValue
- recordValue, err := ToRecordValue(recordType, row)
+ _, err := ToRecordValue(recordType, row)
if err != nil {
t.Fatalf("ToRecordValue failed: %v", err)
}
- fmt.Printf("RecordValue: %v\n", recordValue)
+ // fmt.Printf("RecordValue: %v\n", recordValue)
}
total += rowCount
}