Coverage Report

Created: 2026-04-07 18:20

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/parquet/schema_desc.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/parquet/schema_desc.h"
19
20
#include <ctype.h>
21
22
#include <algorithm>
23
#include <ostream>
24
#include <utility>
25
26
#include "common/cast_set.h"
27
#include "common/logging.h"
28
#include "core/data_type/data_type_array.h"
29
#include "core/data_type/data_type_factory.hpp"
30
#include "core/data_type/data_type_map.h"
31
#include "core/data_type/data_type_struct.h"
32
#include "core/data_type/define_primitive_type.h"
33
#include "format/generic_reader.h"
34
#include "format/table/table_schema_change_helper.h"
35
#include "util/slice.h"
36
#include "util/string_util.h"
37
38
namespace doris {
39
#include "common/compile_check_begin.h"
40
41
2.40k
static bool is_group_node(const tparquet::SchemaElement& schema) {
42
2.40k
    return schema.num_children > 0;
43
2.40k
}
44
45
442
static bool is_list_node(const tparquet::SchemaElement& schema) {
46
442
    return schema.__isset.converted_type && schema.converted_type == tparquet::ConvertedType::LIST;
47
442
}
48
49
518
static bool is_map_node(const tparquet::SchemaElement& schema) {
50
518
    return schema.__isset.converted_type &&
51
518
           (schema.converted_type == tparquet::ConvertedType::MAP ||
52
228
            schema.converted_type == tparquet::ConvertedType::MAP_KEY_VALUE);
53
518
}
54
55
2.14k
static bool is_repeated_node(const tparquet::SchemaElement& schema) {
56
2.14k
    return schema.__isset.repetition_type &&
57
2.14k
           schema.repetition_type == tparquet::FieldRepetitionType::REPEATED;
58
2.14k
}
59
60
76
static bool is_required_node(const tparquet::SchemaElement& schema) {
61
76
    return schema.__isset.repetition_type &&
62
76
           schema.repetition_type == tparquet::FieldRepetitionType::REQUIRED;
63
76
}
64
65
2.22k
static bool is_optional_node(const tparquet::SchemaElement& schema) {
66
2.22k
    return schema.__isset.repetition_type &&
67
2.22k
           schema.repetition_type == tparquet::FieldRepetitionType::OPTIONAL;
68
2.22k
}
69
70
152
static int num_children_node(const tparquet::SchemaElement& schema) {
71
152
    return schema.__isset.num_children ? schema.num_children : 0;
72
152
}
73
74
/**
75
 * `repeated_parent_def_level` is the definition level of the first ancestor node whose repetition_type equals REPEATED.
76
 * Empty array/map values are not stored in doris columns, so have to use `repeated_parent_def_level` to skip the
77
 * empty or null values in ancestor node.
78
 *
79
 * For instance, considering an array of strings with 3 rows like the following:
80
 * null, [], [a, b, c]
81
 * We can store four elements in data column: null, a, b, c
82
 * and the offsets column is: 1, 1, 4
83
 * and the null map is: 1, 0, 0
84
 * For the i-th row in array column: range from `offsets[i - 1]` until `offsets[i]` represents the elements in this row,
85
 * so we can't store empty array/map values in doris data column.
86
 * As a comparison, spark does not require `repeated_parent_def_level`,
87
 * because the spark column stores empty array/map values , and use anther length column to indicate empty values.
88
 * Please reference: https://github.com/apache/spark/blob/master/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnVector.java
89
 *
90
 * Furthermore, we can also avoid store null array/map values in doris data column.
91
 * The same three rows as above, We can only store three elements in data column: a, b, c
92
 * and the offsets column is: 0, 0, 3
93
 * and the null map is: 1, 0, 0
94
 *
95
 * Inherit the repetition and definition level from parent node, if the parent node is repeated,
96
 * we should set repeated_parent_def_level = definition_level, otherwise as repeated_parent_def_level.
97
 * @param parent parent node
98
 * @param repeated_parent_def_level the first ancestor node whose repetition_type equals REPEATED
99
 */
100
518
static void set_child_node_level(FieldSchema* parent, int16_t repeated_parent_def_level) {
101
1.08k
    for (auto& child : parent->children) {
102
1.08k
        child.repetition_level = parent->repetition_level;
103
1.08k
        child.definition_level = parent->definition_level;
104
1.08k
        child.repeated_parent_def_level = repeated_parent_def_level;
105
1.08k
    }
106
518
}
107
108
152
static bool is_struct_list_node(const tparquet::SchemaElement& schema) {
109
152
    const std::string& name = schema.name;
110
152
    static const Slice array_slice("array", 5);
111
152
    static const Slice tuple_slice("_tuple", 6);
112
152
    Slice slice(name);
113
152
    return slice == array_slice || slice.ends_with(tuple_slice);
114
152
}
115
116
0
std::string FieldSchema::debug_string() const {
117
0
    std::stringstream ss;
118
0
    ss << "FieldSchema(name=" << name << ", R=" << repetition_level << ", D=" << definition_level;
119
0
    if (children.size() > 0) {
120
0
        ss << ", type=" << data_type->get_name() << ", children=[";
121
0
        for (int i = 0; i < children.size(); ++i) {
122
0
            if (i != 0) {
123
0
                ss << ", ";
124
0
            }
125
0
            ss << children[i].debug_string();
126
0
        }
127
0
        ss << "]";
128
0
    } else {
129
0
        ss << ", physical_type=" << physical_type;
130
0
        ss << " , doris_type=" << data_type->get_name();
131
0
    }
132
0
    ss << ")";
133
0
    return ss.str();
134
0
}
135
136
107
Status FieldDescriptor::parse_from_thrift(const std::vector<tparquet::SchemaElement>& t_schemas) {
137
107
    if (t_schemas.size() == 0 || !is_group_node(t_schemas[0])) {
138
0
        return Status::InvalidArgument("Wrong parquet root schema element");
139
0
    }
140
107
    const auto& root_schema = t_schemas[0];
141
107
    _fields.resize(root_schema.num_children);
142
107
    _next_schema_pos = 1;
143
144
1.24k
    for (int i = 0; i < root_schema.num_children; ++i) {
145
1.14k
        RETURN_IF_ERROR(parse_node_field(t_schemas, _next_schema_pos, &_fields[i]));
146
1.14k
        if (_name_to_field.find(_fields[i].name) != _name_to_field.end()) {
147
0
            return Status::InvalidArgument("Duplicated field name: {}", _fields[i].name);
148
0
        }
149
1.14k
        _name_to_field.emplace(_fields[i].name, &_fields[i]);
150
1.14k
    }
151
152
107
    if (_next_schema_pos != t_schemas.size()) {
153
0
        return Status::InvalidArgument("Remaining {} unparsed schema elements",
154
0
                                       t_schemas.size() - _next_schema_pos);
155
0
    }
156
157
107
    return Status::OK();
158
107
}
159
160
Status FieldDescriptor::parse_node_field(const std::vector<tparquet::SchemaElement>& t_schemas,
161
2.22k
                                         size_t curr_pos, FieldSchema* node_field) {
162
2.22k
    if (curr_pos >= t_schemas.size()) {
163
0
        return Status::InvalidArgument("Out-of-bounds index of schema elements");
164
0
    }
165
2.22k
    auto& t_schema = t_schemas[curr_pos];
166
2.22k
    if (is_group_node(t_schema)) {
167
        // nested structure or nullable list
168
518
        return parse_group_field(t_schemas, curr_pos, node_field);
169
518
    }
170
1.70k
    if (is_repeated_node(t_schema)) {
171
        // repeated <primitive-type> <name> (LIST)
172
        // produce required list<element>
173
0
        node_field->repetition_level++;
174
0
        node_field->definition_level++;
175
0
        node_field->children.resize(1);
176
0
        set_child_node_level(node_field, node_field->definition_level);
177
0
        auto child = &node_field->children[0];
178
0
        parse_physical_field(t_schema, false, child);
179
180
0
        node_field->name = t_schema.name;
181
0
        node_field->lower_case_name = to_lower(t_schema.name);
182
0
        node_field->data_type = std::make_shared<DataTypeArray>(make_nullable(child->data_type));
183
0
        _next_schema_pos = curr_pos + 1;
184
0
        node_field->field_id = t_schema.__isset.field_id ? t_schema.field_id : -1;
185
1.70k
    } else {
186
1.70k
        bool is_optional = is_optional_node(t_schema);
187
1.70k
        if (is_optional) {
188
1.59k
            node_field->definition_level++;
189
1.59k
        }
190
1.70k
        parse_physical_field(t_schema, is_optional, node_field);
191
1.70k
        _next_schema_pos = curr_pos + 1;
192
1.70k
    }
193
1.70k
    return Status::OK();
194
2.22k
}
195
196
void FieldDescriptor::parse_physical_field(const tparquet::SchemaElement& physical_schema,
197
1.70k
                                           bool is_nullable, FieldSchema* physical_field) {
198
1.70k
    physical_field->name = physical_schema.name;
199
1.70k
    physical_field->lower_case_name = to_lower(physical_field->name);
200
1.70k
    physical_field->parquet_schema = physical_schema;
201
1.70k
    physical_field->physical_type = physical_schema.type;
202
1.70k
    physical_field->column_id = UNASSIGNED_COLUMN_ID; // Initialize column_id
203
1.70k
    _physical_fields.push_back(physical_field);
204
1.70k
    physical_field->physical_column_index = cast_set<int>(_physical_fields.size() - 1);
205
1.70k
    auto type = get_doris_type(physical_schema, is_nullable);
206
1.70k
    physical_field->data_type = type.first;
207
1.70k
    physical_field->is_type_compatibility = type.second;
208
1.70k
    physical_field->field_id = physical_schema.__isset.field_id ? physical_schema.field_id : -1;
209
1.70k
}
210
211
std::pair<DataTypePtr, bool> FieldDescriptor::get_doris_type(
212
1.70k
        const tparquet::SchemaElement& physical_schema, bool nullable) {
213
1.70k
    std::pair<DataTypePtr, bool> ans = {std::make_shared<DataTypeNothing>(), false};
214
1.70k
    try {
215
1.70k
        if (physical_schema.__isset.logicalType) {
216
716
            ans = convert_to_doris_type(physical_schema.logicalType, nullable);
217
986
        } else if (physical_schema.__isset.converted_type) {
218
226
            ans = convert_to_doris_type(physical_schema, nullable);
219
226
        }
220
1.70k
    } catch (...) {
221
        // now the Not supported exception are ignored
222
        // so those byte_array maybe be treated as varbinary(now) : string(before)
223
0
    }
224
1.70k
    if (ans.first->get_primitive_type() == PrimitiveType::INVALID_TYPE) {
225
760
        switch (physical_schema.type) {
226
93
        case tparquet::Type::BOOLEAN:
227
93
            ans.first = DataTypeFactory::instance().create_data_type(TYPE_BOOLEAN, nullable);
228
93
            break;
229
189
        case tparquet::Type::INT32:
230
189
            ans.first = DataTypeFactory::instance().create_data_type(TYPE_INT, nullable);
231
189
            break;
232
123
        case tparquet::Type::INT64:
233
123
            ans.first = DataTypeFactory::instance().create_data_type(TYPE_BIGINT, nullable);
234
123
            break;
235
12
        case tparquet::Type::INT96:
236
12
            if (_enable_mapping_timestamp_tz) {
237
                // treat INT96 as TIMESTAMPTZ
238
0
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_TIMESTAMPTZ, nullable,
239
0
                                                                         0, 6);
240
12
            } else {
241
                // in most cases, it's a nano timestamp
242
12
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_DATETIMEV2, nullable,
243
12
                                                                         0, 6);
244
12
            }
245
12
            break;
246
93
        case tparquet::Type::FLOAT:
247
93
            ans.first = DataTypeFactory::instance().create_data_type(TYPE_FLOAT, nullable);
248
93
            break;
249
205
        case tparquet::Type::DOUBLE:
250
205
            ans.first = DataTypeFactory::instance().create_data_type(TYPE_DOUBLE, nullable);
251
205
            break;
252
45
        case tparquet::Type::BYTE_ARRAY:
253
45
            if (_enable_mapping_varbinary) {
254
                // if physical_schema not set logicalType and converted_type,
255
                // we treat BYTE_ARRAY as VARBINARY by default, so that we can read all data directly.
256
22
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_VARBINARY, nullable);
257
23
            } else {
258
23
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable);
259
23
            }
260
45
            break;
261
0
        case tparquet::Type::FIXED_LEN_BYTE_ARRAY:
262
0
            ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable);
263
0
            break;
264
0
        default:
265
0
            throw Exception(Status::InternalError("Not supported parquet logicalType{}",
266
0
                                                  physical_schema.type));
267
0
            break;
268
760
        }
269
760
    }
270
1.70k
    return ans;
271
1.70k
}
272
273
std::pair<DataTypePtr, bool> FieldDescriptor::convert_to_doris_type(
274
716
        tparquet::LogicalType logicalType, bool nullable) {
275
716
    std::pair<DataTypePtr, bool> ans = {std::make_shared<DataTypeNothing>(), false};
276
716
    bool& is_type_compatibility = ans.second;
277
716
    if (logicalType.__isset.STRING) {
278
472
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable);
279
472
    } else if (logicalType.__isset.DECIMAL) {
280
114
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_DECIMAL128I, nullable,
281
114
                                                                 logicalType.DECIMAL.precision,
282
114
                                                                 logicalType.DECIMAL.scale);
283
130
    } else if (logicalType.__isset.DATE) {
284
57
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_DATEV2, nullable);
285
73
    } else if (logicalType.__isset.INTEGER) {
286
0
        if (logicalType.INTEGER.isSigned) {
287
0
            if (logicalType.INTEGER.bitWidth <= 8) {
288
0
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_TINYINT, nullable);
289
0
            } else if (logicalType.INTEGER.bitWidth <= 16) {
290
0
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_SMALLINT, nullable);
291
0
            } else if (logicalType.INTEGER.bitWidth <= 32) {
292
0
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_INT, nullable);
293
0
            } else {
294
0
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_BIGINT, nullable);
295
0
            }
296
0
        } else {
297
0
            is_type_compatibility = true;
298
0
            if (logicalType.INTEGER.bitWidth <= 8) {
299
0
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_SMALLINT, nullable);
300
0
            } else if (logicalType.INTEGER.bitWidth <= 16) {
301
0
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_INT, nullable);
302
0
            } else if (logicalType.INTEGER.bitWidth <= 32) {
303
0
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_BIGINT, nullable);
304
0
            } else {
305
0
                ans.first = DataTypeFactory::instance().create_data_type(TYPE_LARGEINT, nullable);
306
0
            }
307
0
        }
308
73
    } else if (logicalType.__isset.TIME) {
309
0
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_TIMEV2, nullable);
310
73
    } else if (logicalType.__isset.TIMESTAMP) {
311
69
        if (_enable_mapping_timestamp_tz) {
312
0
            if (logicalType.TIMESTAMP.isAdjustedToUTC) {
313
                // treat TIMESTAMP with isAdjustedToUTC as TIMESTAMPTZ
314
0
                ans.first = DataTypeFactory::instance().create_data_type(
315
0
                        TYPE_TIMESTAMPTZ, nullable, 0,
316
0
                        logicalType.TIMESTAMP.unit.__isset.MILLIS ? 3 : 6);
317
0
                return ans;
318
0
            }
319
0
        }
320
69
        ans.first = DataTypeFactory::instance().create_data_type(
321
69
                TYPE_DATETIMEV2, nullable, 0, logicalType.TIMESTAMP.unit.__isset.MILLIS ? 3 : 6);
322
69
    } else if (logicalType.__isset.JSON) {
323
0
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable);
324
4
    } else if (logicalType.__isset.UUID) {
325
4
        if (_enable_mapping_varbinary) {
326
3
            ans.first = DataTypeFactory::instance().create_data_type(TYPE_VARBINARY, nullable, -1,
327
3
                                                                     -1, 16);
328
3
        } else {
329
1
            ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable);
330
1
        }
331
4
    } else if (logicalType.__isset.FLOAT16) {
332
0
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_FLOAT, nullable);
333
0
    } else {
334
0
        throw Exception(Status::InternalError("Not supported parquet logicalType"));
335
0
    }
336
716
    return ans;
337
716
}
338
339
std::pair<DataTypePtr, bool> FieldDescriptor::convert_to_doris_type(
340
226
        const tparquet::SchemaElement& physical_schema, bool nullable) {
341
226
    std::pair<DataTypePtr, bool> ans = {std::make_shared<DataTypeNothing>(), false};
342
226
    bool& is_type_compatibility = ans.second;
343
226
    switch (physical_schema.converted_type) {
344
106
    case tparquet::ConvertedType::type::UTF8:
345
106
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable);
346
106
        break;
347
24
    case tparquet::ConvertedType::type::DECIMAL:
348
24
        ans.first = DataTypeFactory::instance().create_data_type(
349
24
                TYPE_DECIMAL128I, nullable, physical_schema.precision, physical_schema.scale);
350
24
        break;
351
24
    case tparquet::ConvertedType::type::DATE:
352
24
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_DATEV2, nullable);
353
24
        break;
354
0
    case tparquet::ConvertedType::type::TIME_MILLIS:
355
0
        [[fallthrough]];
356
0
    case tparquet::ConvertedType::type::TIME_MICROS:
357
0
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_TIMEV2, nullable);
358
0
        break;
359
0
    case tparquet::ConvertedType::type::TIMESTAMP_MILLIS:
360
0
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_DATETIMEV2, nullable, 0, 3);
361
0
        break;
362
24
    case tparquet::ConvertedType::type::TIMESTAMP_MICROS:
363
24
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_DATETIMEV2, nullable, 0, 6);
364
24
        break;
365
24
    case tparquet::ConvertedType::type::INT_8:
366
24
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_TINYINT, nullable);
367
24
        break;
368
0
    case tparquet::ConvertedType::type::UINT_8:
369
0
        is_type_compatibility = true;
370
0
        [[fallthrough]];
371
24
    case tparquet::ConvertedType::type::INT_16:
372
24
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_SMALLINT, nullable);
373
24
        break;
374
0
    case tparquet::ConvertedType::type::UINT_16:
375
0
        is_type_compatibility = true;
376
0
        [[fallthrough]];
377
0
    case tparquet::ConvertedType::type::INT_32:
378
0
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_INT, nullable);
379
0
        break;
380
0
    case tparquet::ConvertedType::type::UINT_32:
381
0
        is_type_compatibility = true;
382
0
        [[fallthrough]];
383
0
    case tparquet::ConvertedType::type::INT_64:
384
0
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_BIGINT, nullable);
385
0
        break;
386
0
    case tparquet::ConvertedType::type::UINT_64:
387
0
        is_type_compatibility = true;
388
0
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_LARGEINT, nullable);
389
0
        break;
390
0
    case tparquet::ConvertedType::type::JSON:
391
0
        ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable);
392
0
        break;
393
0
    default:
394
0
        throw Exception(Status::InternalError("Not supported parquet ConvertedType: {}",
395
0
                                              physical_schema.converted_type));
396
226
    }
397
226
    return ans;
398
226
}
399
400
Status FieldDescriptor::parse_group_field(const std::vector<tparquet::SchemaElement>& t_schemas,
401
518
                                          size_t curr_pos, FieldSchema* group_field) {
402
518
    auto& group_schema = t_schemas[curr_pos];
403
518
    if (is_map_node(group_schema)) {
404
        // the map definition:
405
        // optional group <name> (MAP) {
406
        //   repeated group map (MAP_KEY_VALUE) {
407
        //     required <type> key;
408
        //     optional <type> value;
409
        //   }
410
        // }
411
76
        return parse_map_field(t_schemas, curr_pos, group_field);
412
76
    }
413
442
    if (is_list_node(group_schema)) {
414
        // the list definition:
415
        // optional group <name> (LIST) {
416
        //   repeated group [bag | list] { // hive or spark
417
        //     optional <type> [array_element | element]; // hive or spark
418
        //   }
419
        // }
420
152
        return parse_list_field(t_schemas, curr_pos, group_field);
421
152
    }
422
423
290
    if (is_repeated_node(group_schema)) {
424
0
        group_field->repetition_level++;
425
0
        group_field->definition_level++;
426
0
        group_field->children.resize(1);
427
0
        set_child_node_level(group_field, group_field->definition_level);
428
0
        auto struct_field = &group_field->children[0];
429
        // the list of struct:
430
        // repeated group <name> (LIST) {
431
        //   optional/required <type> <name>;
432
        //   ...
433
        // }
434
        // produce a non-null list<struct>
435
0
        RETURN_IF_ERROR(parse_struct_field(t_schemas, curr_pos, struct_field));
436
437
0
        group_field->name = group_schema.name;
438
0
        group_field->lower_case_name = to_lower(group_field->name);
439
0
        group_field->column_id = UNASSIGNED_COLUMN_ID; // Initialize column_id
440
0
        group_field->data_type =
441
0
                std::make_shared<DataTypeArray>(make_nullable(struct_field->data_type));
442
0
        group_field->field_id = group_schema.__isset.field_id ? group_schema.field_id : -1;
443
290
    } else {
444
290
        RETURN_IF_ERROR(parse_struct_field(t_schemas, curr_pos, group_field));
445
290
    }
446
447
290
    return Status::OK();
448
290
}
449
450
Status FieldDescriptor::parse_list_field(const std::vector<tparquet::SchemaElement>& t_schemas,
451
152
                                         size_t curr_pos, FieldSchema* list_field) {
452
    // the list definition:
453
    // spark and hive have three level schemas but with different schema name
454
    // spark: <column-name> - "list" - "element"
455
    // hive: <column-name> - "bag" - "array_element"
456
    // parse three level schemas to two level primitive like: LIST<INT>,
457
    // or nested structure like: LIST<MAP<INT, INT>>
458
152
    auto& first_level = t_schemas[curr_pos];
459
152
    if (first_level.num_children != 1) {
460
0
        return Status::InvalidArgument("List element should have only one child");
461
0
    }
462
463
152
    if (curr_pos + 1 >= t_schemas.size()) {
464
0
        return Status::InvalidArgument("List element should have the second level schema");
465
0
    }
466
467
152
    if (first_level.repetition_type == tparquet::FieldRepetitionType::REPEATED) {
468
0
        return Status::InvalidArgument("List element can't be a repeated schema");
469
0
    }
470
471
    // the repeated schema element
472
152
    auto& second_level = t_schemas[curr_pos + 1];
473
152
    if (second_level.repetition_type != tparquet::FieldRepetitionType::REPEATED) {
474
0
        return Status::InvalidArgument("The second level of list element should be repeated");
475
0
    }
476
477
    // This indicates if this list is nullable.
478
152
    bool is_optional = is_optional_node(first_level);
479
152
    if (is_optional) {
480
116
        list_field->definition_level++;
481
116
    }
482
152
    list_field->repetition_level++;
483
152
    list_field->definition_level++;
484
152
    list_field->children.resize(1);
485
152
    FieldSchema* list_child = &list_field->children[0];
486
487
152
    size_t num_children = num_children_node(second_level);
488
152
    if (num_children > 0) {
489
152
        if (num_children == 1 && !is_struct_list_node(second_level)) {
490
            // optional field, and the third level element is the nested structure in list
491
            // produce nested structure like: LIST<INT>, LIST<MAP>, LIST<LIST<...>>
492
            // skip bag/list, it's a repeated element.
493
152
            set_child_node_level(list_field, list_field->definition_level);
494
152
            RETURN_IF_ERROR(parse_node_field(t_schemas, curr_pos + 2, list_child));
495
152
        } else {
496
            // required field, produce the list of struct
497
0
            set_child_node_level(list_field, list_field->definition_level);
498
0
            RETURN_IF_ERROR(parse_struct_field(t_schemas, curr_pos + 1, list_child));
499
0
        }
500
152
    } else if (num_children == 0) {
501
        // required two level list, for compatibility reason.
502
0
        set_child_node_level(list_field, list_field->definition_level);
503
0
        parse_physical_field(second_level, false, list_child);
504
0
        _next_schema_pos = curr_pos + 2;
505
0
    }
506
507
152
    list_field->name = first_level.name;
508
152
    list_field->lower_case_name = to_lower(first_level.name);
509
152
    list_field->column_id = UNASSIGNED_COLUMN_ID; // Initialize column_id
510
152
    list_field->data_type =
511
152
            std::make_shared<DataTypeArray>(make_nullable(list_field->children[0].data_type));
512
152
    if (is_optional) {
513
116
        list_field->data_type = make_nullable(list_field->data_type);
514
116
    }
515
152
    list_field->field_id = first_level.__isset.field_id ? first_level.field_id : -1;
516
517
152
    return Status::OK();
518
152
}
519
520
Status FieldDescriptor::parse_map_field(const std::vector<tparquet::SchemaElement>& t_schemas,
521
76
                                        size_t curr_pos, FieldSchema* map_field) {
522
    // the map definition in parquet:
523
    // optional group <name> (MAP) {
524
    //   repeated group map (MAP_KEY_VALUE) {
525
    //     required <type> key;
526
    //     optional <type> value;
527
    //   }
528
    // }
529
    // Map value can be optional, the map without values is a SET
530
76
    if (curr_pos + 2 >= t_schemas.size()) {
531
0
        return Status::InvalidArgument("Map element should have at least three levels");
532
0
    }
533
76
    auto& map_schema = t_schemas[curr_pos];
534
76
    if (map_schema.num_children != 1) {
535
0
        return Status::InvalidArgument(
536
0
                "Map element should have only one child(name='map', type='MAP_KEY_VALUE')");
537
0
    }
538
76
    if (is_repeated_node(map_schema)) {
539
0
        return Status::InvalidArgument("Map element can't be a repeated schema");
540
0
    }
541
76
    auto& map_key_value = t_schemas[curr_pos + 1];
542
76
    if (!is_group_node(map_key_value) || !is_repeated_node(map_key_value)) {
543
0
        return Status::InvalidArgument(
544
0
                "the second level in map must be a repeated group(key and value)");
545
0
    }
546
76
    auto& map_key = t_schemas[curr_pos + 2];
547
76
    if (!is_required_node(map_key)) {
548
0
        LOG(WARNING) << "Filed " << map_schema.name << " is map type, but with nullable key column";
549
0
    }
550
551
76
    if (map_key_value.num_children == 1) {
552
        // The map with three levels is a SET
553
0
        return parse_list_field(t_schemas, curr_pos, map_field);
554
0
    }
555
76
    if (map_key_value.num_children != 2) {
556
        // A standard map should have four levels
557
0
        return Status::InvalidArgument(
558
0
                "the second level in map(MAP_KEY_VALUE) should have two children");
559
0
    }
560
    // standard map
561
76
    bool is_optional = is_optional_node(map_schema);
562
76
    if (is_optional) {
563
52
        map_field->definition_level++;
564
52
    }
565
76
    map_field->repetition_level++;
566
76
    map_field->definition_level++;
567
568
    // Directly create key and value children instead of intermediate key_value node
569
76
    map_field->children.resize(2);
570
    // map is a repeated node, we should set the `repeated_parent_def_level` of its children as `definition_level`
571
76
    set_child_node_level(map_field, map_field->definition_level);
572
573
76
    auto key_field = &map_field->children[0];
574
76
    auto value_field = &map_field->children[1];
575
576
    // Parse key and value fields directly from the key_value group's children
577
76
    _next_schema_pos = curr_pos + 2; // Skip key_value group, go directly to key
578
76
    RETURN_IF_ERROR(parse_node_field(t_schemas, _next_schema_pos, key_field));
579
76
    RETURN_IF_ERROR(parse_node_field(t_schemas, _next_schema_pos, value_field));
580
581
76
    map_field->name = map_schema.name;
582
76
    map_field->lower_case_name = to_lower(map_field->name);
583
76
    map_field->column_id = UNASSIGNED_COLUMN_ID; // Initialize column_id
584
76
    map_field->data_type = std::make_shared<DataTypeMap>(make_nullable(key_field->data_type),
585
76
                                                         make_nullable(value_field->data_type));
586
76
    if (is_optional) {
587
52
        map_field->data_type = make_nullable(map_field->data_type);
588
52
    }
589
76
    map_field->field_id = map_schema.__isset.field_id ? map_schema.field_id : -1;
590
591
76
    return Status::OK();
592
76
}
593
594
Status FieldDescriptor::parse_struct_field(const std::vector<tparquet::SchemaElement>& t_schemas,
595
290
                                           size_t curr_pos, FieldSchema* struct_field) {
596
    // the nested column in parquet, parse group to struct.
597
290
    auto& struct_schema = t_schemas[curr_pos];
598
290
    bool is_optional = is_optional_node(struct_schema);
599
290
    if (is_optional) {
600
276
        struct_field->definition_level++;
601
276
    }
602
290
    auto num_children = struct_schema.num_children;
603
290
    struct_field->children.resize(num_children);
604
290
    set_child_node_level(struct_field, struct_field->repeated_parent_def_level);
605
290
    _next_schema_pos = curr_pos + 1;
606
1.06k
    for (int i = 0; i < num_children; ++i) {
607
776
        RETURN_IF_ERROR(parse_node_field(t_schemas, _next_schema_pos, &struct_field->children[i]));
608
776
    }
609
290
    struct_field->name = struct_schema.name;
610
290
    struct_field->lower_case_name = to_lower(struct_field->name);
611
290
    struct_field->column_id = UNASSIGNED_COLUMN_ID; // Initialize column_id
612
613
290
    struct_field->field_id = struct_schema.__isset.field_id ? struct_schema.field_id : -1;
614
290
    DataTypes res_data_types;
615
290
    std::vector<String> names;
616
1.06k
    for (int i = 0; i < num_children; ++i) {
617
776
        res_data_types.push_back(make_nullable(struct_field->children[i].data_type));
618
776
        names.push_back(struct_field->children[i].name);
619
776
    }
620
290
    struct_field->data_type = std::make_shared<DataTypeStruct>(res_data_types, names);
621
290
    if (is_optional) {
622
276
        struct_field->data_type = make_nullable(struct_field->data_type);
623
276
    }
624
290
    return Status::OK();
625
290
}
626
627
5
int FieldDescriptor::get_column_index(const std::string& column) const {
628
15
    for (int32_t i = 0; i < _fields.size(); i++) {
629
15
        if (_fields[i].name == column) {
630
5
            return i;
631
5
        }
632
15
    }
633
0
    return -1;
634
5
}
635
636
966
FieldSchema* FieldDescriptor::get_column(const std::string& name) const {
637
966
    auto it = _name_to_field.find(name);
638
966
    if (it != _name_to_field.end()) {
639
966
        return it->second;
640
966
    }
641
0
    throw Exception(Status::InternalError("Name {} not found in FieldDescriptor!", name));
642
0
    return nullptr;
643
966
}
644
645
14
void FieldDescriptor::get_column_names(std::unordered_set<std::string>* names) const {
646
14
    names->clear();
647
210
    for (const FieldSchema& f : _fields) {
648
210
        names->emplace(f.name);
649
210
    }
650
14
}
651
652
0
std::string FieldDescriptor::debug_string() const {
653
0
    std::stringstream ss;
654
0
    ss << "fields=[";
655
0
    for (int i = 0; i < _fields.size(); ++i) {
656
0
        if (i != 0) {
657
0
            ss << ", ";
658
0
        }
659
0
        ss << _fields[i].debug_string();
660
0
    }
661
0
    ss << "]";
662
0
    return ss.str();
663
0
}
664
665
33
void FieldDescriptor::assign_ids() {
666
33
    uint64_t next_id = 1;
667
357
    for (auto& field : _fields) {
668
357
        field.assign_ids(next_id);
669
357
    }
670
33
}
671
672
0
const FieldSchema* FieldDescriptor::find_column_by_id(uint64_t column_id) const {
673
0
    for (const auto& field : _fields) {
674
0
        if (auto result = field.find_column_by_id(column_id)) {
675
0
            return result;
676
0
        }
677
0
    }
678
0
    return nullptr;
679
0
}
680
681
1.89k
void FieldSchema::assign_ids(uint64_t& next_id) {
682
1.89k
    column_id = next_id++;
683
684
1.89k
    for (auto& child : children) {
685
1.54k
        child.assign_ids(next_id);
686
1.54k
    }
687
688
1.89k
    max_column_id = next_id - 1;
689
1.89k
}
690
691
0
const FieldSchema* FieldSchema::find_column_by_id(uint64_t target_id) const {
692
0
    if (column_id == target_id) {
693
0
        return this;
694
0
    }
695
696
0
    for (const auto& child : children) {
697
0
        if (auto result = child.find_column_by_id(target_id)) {
698
0
            return result;
699
0
        }
700
0
    }
701
702
0
    return nullptr;
703
0
}
704
705
335
uint64_t FieldSchema::get_column_id() const {
706
335
    return column_id;
707
335
}
708
709
0
void FieldSchema::set_column_id(uint64_t id) {
710
0
    column_id = id;
711
0
}
712
713
88
uint64_t FieldSchema::get_max_column_id() const {
714
88
    return max_column_id;
715
88
}
716
717
#include "common/compile_check_end.h"
718
719
} // namespace doris