be/src/format/parquet/schema_desc.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "format/parquet/schema_desc.h" |
19 | | |
20 | | #include <ctype.h> |
21 | | |
22 | | #include <algorithm> |
23 | | #include <ostream> |
24 | | #include <utility> |
25 | | |
26 | | #include "common/cast_set.h" |
27 | | #include "common/logging.h" |
28 | | #include "core/data_type/data_type_array.h" |
29 | | #include "core/data_type/data_type_factory.hpp" |
30 | | #include "core/data_type/data_type_map.h" |
31 | | #include "core/data_type/data_type_struct.h" |
32 | | #include "core/data_type/define_primitive_type.h" |
33 | | #include "format/table/table_format_reader.h" |
34 | | #include "format/table/table_schema_change_helper.h" |
35 | | #include "util/slice.h" |
36 | | #include "util/string_util.h" |
37 | | |
38 | | namespace doris { |
39 | | #include "common/compile_check_begin.h" |
40 | | |
41 | 141k | static bool is_group_node(const tparquet::SchemaElement& schema) { |
42 | 141k | return schema.num_children > 0; |
43 | 141k | } |
44 | | |
45 | 16.3k | static bool is_list_node(const tparquet::SchemaElement& schema) { |
46 | 16.3k | return schema.__isset.converted_type && schema.converted_type == tparquet::ConvertedType::LIST; |
47 | 16.3k | } |
48 | | |
49 | 24.0k | static bool is_map_node(const tparquet::SchemaElement& schema) { |
50 | 24.0k | return schema.__isset.converted_type && |
51 | 24.0k | (schema.converted_type == tparquet::ConvertedType::MAP || |
52 | 18.0k | schema.converted_type == tparquet::ConvertedType::MAP_KEY_VALUE); |
53 | 24.0k | } |
54 | | |
55 | 120k | static bool is_repeated_node(const tparquet::SchemaElement& schema) { |
56 | 120k | return schema.__isset.repetition_type && |
57 | 120k | schema.repetition_type == tparquet::FieldRepetitionType::REPEATED; |
58 | 120k | } |
59 | | |
60 | 7.73k | static bool is_required_node(const tparquet::SchemaElement& schema) { |
61 | 7.73k | return schema.__isset.repetition_type && |
62 | 7.73k | schema.repetition_type == tparquet::FieldRepetitionType::REQUIRED; |
63 | 7.73k | } |
64 | | |
65 | 122k | static bool is_optional_node(const tparquet::SchemaElement& schema) { |
66 | 122k | return schema.__isset.repetition_type && |
67 | 122k | schema.repetition_type == tparquet::FieldRepetitionType::OPTIONAL; |
68 | 122k | } |
69 | | |
70 | 10.3k | static int num_children_node(const tparquet::SchemaElement& schema) { |
71 | 10.3k | return schema.__isset.num_children ? schema.num_children : 0; |
72 | 10.3k | } |
73 | | |
74 | | /** |
75 | | * `repeated_parent_def_level` is the definition level of the first ancestor node whose repetition_type equals REPEATED. |
76 | | * Empty array/map values are not stored in doris columns, so have to use `repeated_parent_def_level` to skip the |
77 | | * empty or null values in ancestor node. |
78 | | * |
79 | | * For instance, considering an array of strings with 3 rows like the following: |
80 | | * null, [], [a, b, c] |
81 | | * We can store four elements in data column: null, a, b, c |
82 | | * and the offsets column is: 1, 1, 4 |
83 | | * and the null map is: 1, 0, 0 |
84 | | * For the i-th row in array column: range from `offsets[i - 1]` until `offsets[i]` represents the elements in this row, |
85 | | * so we can't store empty array/map values in doris data column. |
86 | | * As a comparison, spark does not require `repeated_parent_def_level`, |
87 | | * because the spark column stores empty array/map values , and use anther length column to indicate empty values. |
88 | | * Please reference: https://github.com/apache/spark/blob/master/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnVector.java |
89 | | * |
90 | | * Furthermore, we can also avoid store null array/map values in doris data column. |
91 | | * The same three rows as above, We can only store three elements in data column: a, b, c |
92 | | * and the offsets column is: 0, 0, 3 |
93 | | * and the null map is: 1, 0, 0 |
94 | | * |
95 | | * Inherit the repetition and definition level from parent node, if the parent node is repeated, |
96 | | * we should set repeated_parent_def_level = definition_level, otherwise as repeated_parent_def_level. |
97 | | * @param parent parent node |
98 | | * @param repeated_parent_def_level the first ancestor node whose repetition_type equals REPEATED |
99 | | */ |
100 | 24.1k | static void set_child_node_level(FieldSchema* parent, int16_t repeated_parent_def_level) { |
101 | 40.4k | for (auto& child : parent->children) { |
102 | 40.4k | child.repetition_level = parent->repetition_level; |
103 | 40.4k | child.definition_level = parent->definition_level; |
104 | 40.4k | child.repeated_parent_def_level = repeated_parent_def_level; |
105 | 40.4k | } |
106 | 24.1k | } |
107 | | |
108 | 10.2k | static bool is_struct_list_node(const tparquet::SchemaElement& schema) { |
109 | 10.2k | const std::string& name = schema.name; |
110 | 10.2k | static const Slice array_slice("array", 5); |
111 | 10.2k | static const Slice tuple_slice("_tuple", 6); |
112 | 10.2k | Slice slice(name); |
113 | 10.2k | return slice == array_slice || slice.ends_with(tuple_slice); |
114 | 10.2k | } |
115 | | |
116 | 0 | std::string FieldSchema::debug_string() const { |
117 | 0 | std::stringstream ss; |
118 | 0 | ss << "FieldSchema(name=" << name << ", R=" << repetition_level << ", D=" << definition_level; |
119 | 0 | if (children.size() > 0) { |
120 | 0 | ss << ", type=" << data_type->get_name() << ", children=["; |
121 | 0 | for (int i = 0; i < children.size(); ++i) { |
122 | 0 | if (i != 0) { |
123 | 0 | ss << ", "; |
124 | 0 | } |
125 | 0 | ss << children[i].debug_string(); |
126 | 0 | } |
127 | 0 | ss << "]"; |
128 | 0 | } else { |
129 | 0 | ss << ", physical_type=" << physical_type; |
130 | 0 | ss << " , doris_type=" << data_type->get_name(); |
131 | 0 | } |
132 | 0 | ss << ")"; |
133 | 0 | return ss.str(); |
134 | 0 | } |
135 | | |
136 | 11.3k | Status FieldDescriptor::parse_from_thrift(const std::vector<tparquet::SchemaElement>& t_schemas) { |
137 | 11.3k | if (t_schemas.size() == 0 || !is_group_node(t_schemas[0])) { |
138 | 0 | return Status::InvalidArgument("Wrong parquet root schema element"); |
139 | 0 | } |
140 | 11.3k | const auto& root_schema = t_schemas[0]; |
141 | 11.3k | _fields.resize(root_schema.num_children); |
142 | 11.3k | _next_schema_pos = 1; |
143 | | |
144 | 94.0k | for (int i = 0; i < root_schema.num_children; ++i) { |
145 | 82.6k | RETURN_IF_ERROR(parse_node_field(t_schemas, _next_schema_pos, &_fields[i])); |
146 | 82.6k | if (_name_to_field.find(_fields[i].name) != _name_to_field.end()) { |
147 | 0 | return Status::InvalidArgument("Duplicated field name: {}", _fields[i].name); |
148 | 0 | } |
149 | 82.6k | _name_to_field.emplace(_fields[i].name, &_fields[i]); |
150 | 82.6k | } |
151 | | |
152 | 11.3k | if (_next_schema_pos != t_schemas.size()) { |
153 | 0 | return Status::InvalidArgument("Remaining {} unparsed schema elements", |
154 | 0 | t_schemas.size() - _next_schema_pos); |
155 | 0 | } |
156 | | |
157 | 11.3k | return Status::OK(); |
158 | 11.3k | } |
159 | | |
160 | | Status FieldDescriptor::parse_node_field(const std::vector<tparquet::SchemaElement>& t_schemas, |
161 | 122k | size_t curr_pos, FieldSchema* node_field) { |
162 | 122k | if (curr_pos >= t_schemas.size()) { |
163 | 0 | return Status::InvalidArgument("Out-of-bounds index of schema elements"); |
164 | 0 | } |
165 | 122k | auto& t_schema = t_schemas[curr_pos]; |
166 | 122k | if (is_group_node(t_schema)) { |
167 | | // nested structure or nullable list |
168 | 24.0k | return parse_group_field(t_schemas, curr_pos, node_field); |
169 | 24.0k | } |
170 | 98.7k | if (is_repeated_node(t_schema)) { |
171 | | // repeated <primitive-type> <name> (LIST) |
172 | | // produce required list<element> |
173 | 36 | node_field->repetition_level++; |
174 | 36 | node_field->definition_level++; |
175 | 36 | node_field->children.resize(1); |
176 | 36 | set_child_node_level(node_field, node_field->definition_level); |
177 | 36 | auto child = &node_field->children[0]; |
178 | 36 | parse_physical_field(t_schema, false, child); |
179 | | |
180 | 36 | node_field->name = t_schema.name; |
181 | 36 | node_field->lower_case_name = to_lower(t_schema.name); |
182 | 36 | node_field->data_type = std::make_shared<DataTypeArray>(make_nullable(child->data_type)); |
183 | 36 | _next_schema_pos = curr_pos + 1; |
184 | 36 | node_field->field_id = t_schema.__isset.field_id ? t_schema.field_id : -1; |
185 | 98.7k | } else { |
186 | 98.7k | bool is_optional = is_optional_node(t_schema); |
187 | 98.7k | if (is_optional) { |
188 | 75.3k | node_field->definition_level++; |
189 | 75.3k | } |
190 | 98.7k | parse_physical_field(t_schema, is_optional, node_field); |
191 | 98.7k | _next_schema_pos = curr_pos + 1; |
192 | 98.7k | } |
193 | 98.7k | return Status::OK(); |
194 | 122k | } |
195 | | |
196 | | void FieldDescriptor::parse_physical_field(const tparquet::SchemaElement& physical_schema, |
197 | 98.8k | bool is_nullable, FieldSchema* physical_field) { |
198 | 98.8k | physical_field->name = physical_schema.name; |
199 | 98.8k | physical_field->lower_case_name = to_lower(physical_field->name); |
200 | 98.8k | physical_field->parquet_schema = physical_schema; |
201 | 98.8k | physical_field->physical_type = physical_schema.type; |
202 | 98.8k | physical_field->column_id = UNASSIGNED_COLUMN_ID; // Initialize column_id |
203 | 98.8k | _physical_fields.push_back(physical_field); |
204 | 98.8k | physical_field->physical_column_index = cast_set<int>(_physical_fields.size() - 1); |
205 | 98.8k | auto type = get_doris_type(physical_schema, is_nullable); |
206 | 98.8k | physical_field->data_type = type.first; |
207 | 98.8k | physical_field->is_type_compatibility = type.second; |
208 | 98.8k | physical_field->field_id = physical_schema.__isset.field_id ? physical_schema.field_id : -1; |
209 | 98.8k | } |
210 | | |
211 | | std::pair<DataTypePtr, bool> FieldDescriptor::get_doris_type( |
212 | 98.8k | const tparquet::SchemaElement& physical_schema, bool nullable) { |
213 | 98.8k | std::pair<DataTypePtr, bool> ans = {std::make_shared<DataTypeNothing>(), false}; |
214 | 98.8k | try { |
215 | 98.8k | if (physical_schema.__isset.logicalType) { |
216 | 40.9k | ans = convert_to_doris_type(physical_schema.logicalType, nullable); |
217 | 57.8k | } else if (physical_schema.__isset.converted_type) { |
218 | 12.1k | ans = convert_to_doris_type(physical_schema, nullable); |
219 | 12.1k | } |
220 | 98.8k | } catch (...) { |
221 | | // now the Not supported exception are ignored |
222 | | // so those byte_array maybe be treated as varbinary(now) : string(before) |
223 | 56 | } |
224 | 98.8k | if (ans.first->get_primitive_type() == PrimitiveType::INVALID_TYPE) { |
225 | 45.9k | switch (physical_schema.type) { |
226 | 3.42k | case tparquet::Type::BOOLEAN: |
227 | 3.42k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_BOOLEAN, nullable); |
228 | 3.42k | break; |
229 | 17.4k | case tparquet::Type::INT32: |
230 | 17.4k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_INT, nullable); |
231 | 17.4k | break; |
232 | 10.5k | case tparquet::Type::INT64: |
233 | 10.5k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_BIGINT, nullable); |
234 | 10.5k | break; |
235 | 1.26k | case tparquet::Type::INT96: |
236 | 1.26k | if (_enable_mapping_timestamp_tz) { |
237 | | // treat INT96 as TIMESTAMPTZ |
238 | 8 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_TIMESTAMPTZ, nullable, |
239 | 8 | 0, 6); |
240 | 1.26k | } else { |
241 | | // in most cases, it's a nano timestamp |
242 | 1.26k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_DATETIMEV2, nullable, |
243 | 1.26k | 0, 6); |
244 | 1.26k | } |
245 | 1.26k | break; |
246 | 6.64k | case tparquet::Type::FLOAT: |
247 | 6.64k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_FLOAT, nullable); |
248 | 6.64k | break; |
249 | 5.44k | case tparquet::Type::DOUBLE: |
250 | 5.44k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_DOUBLE, nullable); |
251 | 5.44k | break; |
252 | 1.17k | case tparquet::Type::BYTE_ARRAY: |
253 | 1.17k | if (_enable_mapping_varbinary) { |
254 | | // if physical_schema not set logicalType and converted_type, |
255 | | // we treat BYTE_ARRAY as VARBINARY by default, so that we can read all data directly. |
256 | 170 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_VARBINARY, nullable); |
257 | 1.00k | } else { |
258 | 1.00k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable); |
259 | 1.00k | } |
260 | 1.17k | break; |
261 | 20 | case tparquet::Type::FIXED_LEN_BYTE_ARRAY: |
262 | 20 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable); |
263 | 20 | break; |
264 | 0 | default: |
265 | 0 | throw Exception(Status::InternalError("Not supported parquet logicalType{}", |
266 | 0 | physical_schema.type)); |
267 | 0 | break; |
268 | 45.9k | } |
269 | 45.9k | } |
270 | 98.7k | return ans; |
271 | 98.7k | } |
272 | | |
273 | | std::pair<DataTypePtr, bool> FieldDescriptor::convert_to_doris_type( |
274 | 40.8k | tparquet::LogicalType logicalType, bool nullable) { |
275 | 40.8k | std::pair<DataTypePtr, bool> ans = {std::make_shared<DataTypeNothing>(), false}; |
276 | 40.8k | bool& is_type_compatibility = ans.second; |
277 | 40.8k | if (logicalType.__isset.STRING) { |
278 | 22.1k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable); |
279 | 22.1k | } else if (logicalType.__isset.DECIMAL) { |
280 | 10.9k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_DECIMAL128I, nullable, |
281 | 10.9k | logicalType.DECIMAL.precision, |
282 | 10.9k | logicalType.DECIMAL.scale); |
283 | 10.9k | } else if (logicalType.__isset.DATE) { |
284 | 2.20k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_DATEV2, nullable); |
285 | 5.67k | } else if (logicalType.__isset.INTEGER) { |
286 | 2.51k | if (logicalType.INTEGER.isSigned) { |
287 | 2.24k | if (logicalType.INTEGER.bitWidth <= 8) { |
288 | 676 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_TINYINT, nullable); |
289 | 1.57k | } else if (logicalType.INTEGER.bitWidth <= 16) { |
290 | 1.12k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_SMALLINT, nullable); |
291 | 1.12k | } else if (logicalType.INTEGER.bitWidth <= 32) { |
292 | 198 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_INT, nullable); |
293 | 246 | } else { |
294 | 246 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_BIGINT, nullable); |
295 | 246 | } |
296 | 2.24k | } else { |
297 | 264 | is_type_compatibility = true; |
298 | 264 | if (logicalType.INTEGER.bitWidth <= 8) { |
299 | 66 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_SMALLINT, nullable); |
300 | 198 | } else if (logicalType.INTEGER.bitWidth <= 16) { |
301 | 66 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_INT, nullable); |
302 | 132 | } else if (logicalType.INTEGER.bitWidth <= 32) { |
303 | 70 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_BIGINT, nullable); |
304 | 70 | } else { |
305 | 62 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_LARGEINT, nullable); |
306 | 62 | } |
307 | 264 | } |
308 | 3.16k | } else if (logicalType.__isset.TIME) { |
309 | 40 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_TIMEV2, nullable); |
310 | 3.12k | } else if (logicalType.__isset.TIMESTAMP) { |
311 | 3.08k | if (_enable_mapping_timestamp_tz) { |
312 | 76 | if (logicalType.TIMESTAMP.isAdjustedToUTC) { |
313 | | // treat TIMESTAMP with isAdjustedToUTC as TIMESTAMPTZ |
314 | 76 | ans.first = DataTypeFactory::instance().create_data_type( |
315 | 76 | TYPE_TIMESTAMPTZ, nullable, 0, |
316 | 76 | logicalType.TIMESTAMP.unit.__isset.MILLIS ? 3 : 6); |
317 | 76 | return ans; |
318 | 76 | } |
319 | 76 | } |
320 | 3.00k | ans.first = DataTypeFactory::instance().create_data_type( |
321 | 3.00k | TYPE_DATETIMEV2, nullable, 0, logicalType.TIMESTAMP.unit.__isset.MILLIS ? 3 : 6); |
322 | 3.00k | } else if (logicalType.__isset.JSON) { |
323 | 4 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable); |
324 | 36 | } else if (logicalType.__isset.UUID) { |
325 | 12 | if (_enable_mapping_varbinary) { |
326 | 7 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_VARBINARY, nullable, -1, |
327 | 7 | -1, 16); |
328 | 7 | } else { |
329 | 5 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable); |
330 | 5 | } |
331 | 24 | } else if (logicalType.__isset.FLOAT16) { |
332 | 20 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_FLOAT, nullable); |
333 | 20 | } else { |
334 | 4 | throw Exception(Status::InternalError("Not supported parquet logicalType")); |
335 | 4 | } |
336 | 40.8k | return ans; |
337 | 40.8k | } |
338 | | |
339 | | std::pair<DataTypePtr, bool> FieldDescriptor::convert_to_doris_type( |
340 | 12.1k | const tparquet::SchemaElement& physical_schema, bool nullable) { |
341 | 12.1k | std::pair<DataTypePtr, bool> ans = {std::make_shared<DataTypeNothing>(), false}; |
342 | 12.1k | bool& is_type_compatibility = ans.second; |
343 | 12.1k | switch (physical_schema.converted_type) { |
344 | 7.88k | case tparquet::ConvertedType::type::UTF8: |
345 | 7.88k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable); |
346 | 7.88k | break; |
347 | 1.72k | case tparquet::ConvertedType::type::DECIMAL: |
348 | 1.72k | ans.first = DataTypeFactory::instance().create_data_type( |
349 | 1.72k | TYPE_DECIMAL128I, nullable, physical_schema.precision, physical_schema.scale); |
350 | 1.72k | break; |
351 | 1.17k | case tparquet::ConvertedType::type::DATE: |
352 | 1.17k | ans.first = DataTypeFactory::instance().create_data_type(TYPE_DATEV2, nullable); |
353 | 1.17k | break; |
354 | 0 | case tparquet::ConvertedType::type::TIME_MILLIS: |
355 | 0 | [[fallthrough]]; |
356 | 0 | case tparquet::ConvertedType::type::TIME_MICROS: |
357 | 0 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_TIMEV2, nullable); |
358 | 0 | break; |
359 | 24 | case tparquet::ConvertedType::type::TIMESTAMP_MILLIS: |
360 | 24 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_DATETIMEV2, nullable, 0, 3); |
361 | 24 | break; |
362 | 52 | case tparquet::ConvertedType::type::TIMESTAMP_MICROS: |
363 | 52 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_DATETIMEV2, nullable, 0, 6); |
364 | 52 | break; |
365 | 340 | case tparquet::ConvertedType::type::INT_8: |
366 | 340 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_TINYINT, nullable); |
367 | 340 | break; |
368 | 8 | case tparquet::ConvertedType::type::UINT_8: |
369 | 8 | is_type_compatibility = true; |
370 | 8 | [[fallthrough]]; |
371 | 355 | case tparquet::ConvertedType::type::INT_16: |
372 | 355 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_SMALLINT, nullable); |
373 | 355 | break; |
374 | 8 | case tparquet::ConvertedType::type::UINT_16: |
375 | 8 | is_type_compatibility = true; |
376 | 8 | [[fallthrough]]; |
377 | 48 | case tparquet::ConvertedType::type::INT_32: |
378 | 48 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_INT, nullable); |
379 | 48 | break; |
380 | 24 | case tparquet::ConvertedType::type::UINT_32: |
381 | 24 | is_type_compatibility = true; |
382 | 24 | [[fallthrough]]; |
383 | 360 | case tparquet::ConvertedType::type::INT_64: |
384 | 360 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_BIGINT, nullable); |
385 | 360 | break; |
386 | 148 | case tparquet::ConvertedType::type::UINT_64: |
387 | 148 | is_type_compatibility = true; |
388 | 148 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_LARGEINT, nullable); |
389 | 148 | break; |
390 | 4 | case tparquet::ConvertedType::type::JSON: |
391 | 4 | ans.first = DataTypeFactory::instance().create_data_type(TYPE_STRING, nullable); |
392 | 4 | break; |
393 | 20 | default: |
394 | 20 | throw Exception(Status::InternalError("Not supported parquet ConvertedType: {}", |
395 | 20 | physical_schema.converted_type)); |
396 | 12.1k | } |
397 | 12.1k | return ans; |
398 | 12.1k | } |
399 | | |
400 | | Status FieldDescriptor::parse_group_field(const std::vector<tparquet::SchemaElement>& t_schemas, |
401 | 24.0k | size_t curr_pos, FieldSchema* group_field) { |
402 | 24.0k | auto& group_schema = t_schemas[curr_pos]; |
403 | 24.0k | if (is_map_node(group_schema)) { |
404 | | // the map definition: |
405 | | // optional group <name> (MAP) { |
406 | | // repeated group map (MAP_KEY_VALUE) { |
407 | | // required <type> key; |
408 | | // optional <type> value; |
409 | | // } |
410 | | // } |
411 | 7.73k | return parse_map_field(t_schemas, curr_pos, group_field); |
412 | 7.73k | } |
413 | 16.3k | if (is_list_node(group_schema)) { |
414 | | // the list definition: |
415 | | // optional group <name> (LIST) { |
416 | | // repeated group [bag | list] { // hive or spark |
417 | | // optional <type> [array_element | element]; // hive or spark |
418 | | // } |
419 | | // } |
420 | 10.3k | return parse_list_field(t_schemas, curr_pos, group_field); |
421 | 10.3k | } |
422 | | |
423 | 5.97k | if (is_repeated_node(group_schema)) { |
424 | 32 | group_field->repetition_level++; |
425 | 32 | group_field->definition_level++; |
426 | 32 | group_field->children.resize(1); |
427 | 32 | set_child_node_level(group_field, group_field->definition_level); |
428 | 32 | auto struct_field = &group_field->children[0]; |
429 | | // the list of struct: |
430 | | // repeated group <name> (LIST) { |
431 | | // optional/required <type> <name>; |
432 | | // ... |
433 | | // } |
434 | | // produce a non-null list<struct> |
435 | 32 | RETURN_IF_ERROR(parse_struct_field(t_schemas, curr_pos, struct_field)); |
436 | | |
437 | 32 | group_field->name = group_schema.name; |
438 | 32 | group_field->lower_case_name = to_lower(group_field->name); |
439 | 32 | group_field->column_id = UNASSIGNED_COLUMN_ID; // Initialize column_id |
440 | 32 | group_field->data_type = |
441 | 32 | std::make_shared<DataTypeArray>(make_nullable(struct_field->data_type)); |
442 | 32 | group_field->field_id = group_schema.__isset.field_id ? group_schema.field_id : -1; |
443 | 5.94k | } else { |
444 | 5.94k | RETURN_IF_ERROR(parse_struct_field(t_schemas, curr_pos, group_field)); |
445 | 5.94k | } |
446 | | |
447 | 5.97k | return Status::OK(); |
448 | 5.97k | } |
449 | | |
450 | | Status FieldDescriptor::parse_list_field(const std::vector<tparquet::SchemaElement>& t_schemas, |
451 | 10.3k | size_t curr_pos, FieldSchema* list_field) { |
452 | | // the list definition: |
453 | | // spark and hive have three level schemas but with different schema name |
454 | | // spark: <column-name> - "list" - "element" |
455 | | // hive: <column-name> - "bag" - "array_element" |
456 | | // parse three level schemas to two level primitive like: LIST<INT>, |
457 | | // or nested structure like: LIST<MAP<INT, INT>> |
458 | 10.3k | auto& first_level = t_schemas[curr_pos]; |
459 | 10.3k | if (first_level.num_children != 1) { |
460 | 0 | return Status::InvalidArgument("List element should have only one child"); |
461 | 0 | } |
462 | | |
463 | 10.3k | if (curr_pos + 1 >= t_schemas.size()) { |
464 | 0 | return Status::InvalidArgument("List element should have the second level schema"); |
465 | 0 | } |
466 | | |
467 | 10.3k | if (first_level.repetition_type == tparquet::FieldRepetitionType::REPEATED) { |
468 | 0 | return Status::InvalidArgument("List element can't be a repeated schema"); |
469 | 0 | } |
470 | | |
471 | | // the repeated schema element |
472 | 10.3k | auto& second_level = t_schemas[curr_pos + 1]; |
473 | 10.3k | if (second_level.repetition_type != tparquet::FieldRepetitionType::REPEATED) { |
474 | 0 | return Status::InvalidArgument("The second level of list element should be repeated"); |
475 | 0 | } |
476 | | |
477 | | // This indicates if this list is nullable. |
478 | 10.3k | bool is_optional = is_optional_node(first_level); |
479 | 10.3k | if (is_optional) { |
480 | 9.48k | list_field->definition_level++; |
481 | 9.48k | } |
482 | 10.3k | list_field->repetition_level++; |
483 | 10.3k | list_field->definition_level++; |
484 | 10.3k | list_field->children.resize(1); |
485 | 10.3k | FieldSchema* list_child = &list_field->children[0]; |
486 | | |
487 | 10.3k | size_t num_children = num_children_node(second_level); |
488 | 10.3k | if (num_children > 0) { |
489 | 10.3k | if (num_children == 1 && !is_struct_list_node(second_level)) { |
490 | | // optional field, and the third level element is the nested structure in list |
491 | | // produce nested structure like: LIST<INT>, LIST<MAP>, LIST<LIST<...>> |
492 | | // skip bag/list, it's a repeated element. |
493 | 10.2k | set_child_node_level(list_field, list_field->definition_level); |
494 | 10.2k | RETURN_IF_ERROR(parse_node_field(t_schemas, curr_pos + 2, list_child)); |
495 | 10.2k | } else { |
496 | | // required field, produce the list of struct |
497 | 34 | set_child_node_level(list_field, list_field->definition_level); |
498 | 34 | RETURN_IF_ERROR(parse_struct_field(t_schemas, curr_pos + 1, list_child)); |
499 | 34 | } |
500 | 10.3k | } else if (num_children == 0) { |
501 | | // required two level list, for compatibility reason. |
502 | 48 | set_child_node_level(list_field, list_field->definition_level); |
503 | 48 | parse_physical_field(second_level, false, list_child); |
504 | 48 | _next_schema_pos = curr_pos + 2; |
505 | 48 | } |
506 | | |
507 | 10.3k | list_field->name = first_level.name; |
508 | 10.3k | list_field->lower_case_name = to_lower(first_level.name); |
509 | 10.3k | list_field->column_id = UNASSIGNED_COLUMN_ID; // Initialize column_id |
510 | 10.3k | list_field->data_type = |
511 | 10.3k | std::make_shared<DataTypeArray>(make_nullable(list_field->children[0].data_type)); |
512 | 10.3k | if (is_optional) { |
513 | 9.48k | list_field->data_type = make_nullable(list_field->data_type); |
514 | 9.48k | } |
515 | 10.3k | list_field->field_id = first_level.__isset.field_id ? first_level.field_id : -1; |
516 | | |
517 | 10.3k | return Status::OK(); |
518 | 10.3k | } |
519 | | |
520 | | Status FieldDescriptor::parse_map_field(const std::vector<tparquet::SchemaElement>& t_schemas, |
521 | 7.73k | size_t curr_pos, FieldSchema* map_field) { |
522 | | // the map definition in parquet: |
523 | | // optional group <name> (MAP) { |
524 | | // repeated group map (MAP_KEY_VALUE) { |
525 | | // required <type> key; |
526 | | // optional <type> value; |
527 | | // } |
528 | | // } |
529 | | // Map value can be optional, the map without values is a SET |
530 | 7.73k | if (curr_pos + 2 >= t_schemas.size()) { |
531 | 0 | return Status::InvalidArgument("Map element should have at least three levels"); |
532 | 0 | } |
533 | 7.73k | auto& map_schema = t_schemas[curr_pos]; |
534 | 7.73k | if (map_schema.num_children != 1) { |
535 | 2 | return Status::InvalidArgument( |
536 | 2 | "Map element should have only one child(name='map', type='MAP_KEY_VALUE')"); |
537 | 2 | } |
538 | 7.73k | if (is_repeated_node(map_schema)) { |
539 | 0 | return Status::InvalidArgument("Map element can't be a repeated schema"); |
540 | 0 | } |
541 | 7.73k | auto& map_key_value = t_schemas[curr_pos + 1]; |
542 | 7.73k | if (!is_group_node(map_key_value) || !is_repeated_node(map_key_value)) { |
543 | 0 | return Status::InvalidArgument( |
544 | 0 | "the second level in map must be a repeated group(key and value)"); |
545 | 0 | } |
546 | 7.73k | auto& map_key = t_schemas[curr_pos + 2]; |
547 | 7.73k | if (!is_required_node(map_key)) { |
548 | 452 | LOG(WARNING) << "Filed " << map_schema.name << " is map type, but with nullable key column"; |
549 | 452 | } |
550 | | |
551 | 7.73k | if (map_key_value.num_children == 1) { |
552 | | // The map with three levels is a SET |
553 | 0 | return parse_list_field(t_schemas, curr_pos, map_field); |
554 | 0 | } |
555 | 7.73k | if (map_key_value.num_children != 2) { |
556 | | // A standard map should have four levels |
557 | 0 | return Status::InvalidArgument( |
558 | 0 | "the second level in map(MAP_KEY_VALUE) should have two children"); |
559 | 0 | } |
560 | | // standard map |
561 | 7.73k | bool is_optional = is_optional_node(map_schema); |
562 | 7.73k | if (is_optional) { |
563 | 7.10k | map_field->definition_level++; |
564 | 7.10k | } |
565 | 7.73k | map_field->repetition_level++; |
566 | 7.73k | map_field->definition_level++; |
567 | | |
568 | | // Directly create key and value children instead of intermediate key_value node |
569 | 7.73k | map_field->children.resize(2); |
570 | | // map is a repeated node, we should set the `repeated_parent_def_level` of its children as `definition_level` |
571 | 7.73k | set_child_node_level(map_field, map_field->definition_level); |
572 | | |
573 | 7.73k | auto key_field = &map_field->children[0]; |
574 | 7.73k | auto value_field = &map_field->children[1]; |
575 | | |
576 | | // Parse key and value fields directly from the key_value group's children |
577 | 7.73k | _next_schema_pos = curr_pos + 2; // Skip key_value group, go directly to key |
578 | 7.73k | RETURN_IF_ERROR(parse_node_field(t_schemas, _next_schema_pos, key_field)); |
579 | 7.73k | RETURN_IF_ERROR(parse_node_field(t_schemas, _next_schema_pos, value_field)); |
580 | | |
581 | 7.73k | map_field->name = map_schema.name; |
582 | 7.73k | map_field->lower_case_name = to_lower(map_field->name); |
583 | 7.73k | map_field->column_id = UNASSIGNED_COLUMN_ID; // Initialize column_id |
584 | 7.73k | map_field->data_type = std::make_shared<DataTypeMap>(make_nullable(key_field->data_type), |
585 | 7.73k | make_nullable(value_field->data_type)); |
586 | 7.73k | if (is_optional) { |
587 | 7.10k | map_field->data_type = make_nullable(map_field->data_type); |
588 | 7.10k | } |
589 | 7.73k | map_field->field_id = map_schema.__isset.field_id ? map_schema.field_id : -1; |
590 | | |
591 | 7.73k | return Status::OK(); |
592 | 7.73k | } |
593 | | |
594 | | Status FieldDescriptor::parse_struct_field(const std::vector<tparquet::SchemaElement>& t_schemas, |
595 | 6.02k | size_t curr_pos, FieldSchema* struct_field) { |
596 | | // the nested column in parquet, parse group to struct. |
597 | 6.02k | auto& struct_schema = t_schemas[curr_pos]; |
598 | 6.02k | bool is_optional = is_optional_node(struct_schema); |
599 | 6.02k | if (is_optional) { |
600 | 5.18k | struct_field->definition_level++; |
601 | 5.18k | } |
602 | 6.02k | auto num_children = struct_schema.num_children; |
603 | 6.02k | struct_field->children.resize(num_children); |
604 | 6.02k | set_child_node_level(struct_field, struct_field->repeated_parent_def_level); |
605 | 6.02k | _next_schema_pos = curr_pos + 1; |
606 | 20.5k | for (int i = 0; i < num_children; ++i) { |
607 | 14.5k | RETURN_IF_ERROR(parse_node_field(t_schemas, _next_schema_pos, &struct_field->children[i])); |
608 | 14.5k | } |
609 | 6.01k | struct_field->name = struct_schema.name; |
610 | 6.01k | struct_field->lower_case_name = to_lower(struct_field->name); |
611 | 6.01k | struct_field->column_id = UNASSIGNED_COLUMN_ID; // Initialize column_id |
612 | | |
613 | 6.01k | struct_field->field_id = struct_schema.__isset.field_id ? struct_schema.field_id : -1; |
614 | 6.01k | DataTypes res_data_types; |
615 | 6.01k | std::vector<String> names; |
616 | 20.5k | for (int i = 0; i < num_children; ++i) { |
617 | 14.5k | res_data_types.push_back(make_nullable(struct_field->children[i].data_type)); |
618 | 14.5k | names.push_back(struct_field->children[i].name); |
619 | 14.5k | } |
620 | 6.01k | struct_field->data_type = std::make_shared<DataTypeStruct>(res_data_types, names); |
621 | 6.01k | if (is_optional) { |
622 | 5.18k | struct_field->data_type = make_nullable(struct_field->data_type); |
623 | 5.18k | } |
624 | 6.01k | return Status::OK(); |
625 | 6.02k | } |
626 | | |
627 | 5 | int FieldDescriptor::get_column_index(const std::string& column) const { |
628 | 15 | for (int32_t i = 0; i < _fields.size(); i++) { |
629 | 15 | if (_fields[i].name == column) { |
630 | 5 | return i; |
631 | 5 | } |
632 | 15 | } |
633 | 0 | return -1; |
634 | 5 | } |
635 | | |
636 | 973k | FieldSchema* FieldDescriptor::get_column(const std::string& name) const { |
637 | 973k | auto it = _name_to_field.find(name); |
638 | 974k | if (it != _name_to_field.end()) { |
639 | 974k | return it->second; |
640 | 974k | } |
641 | 18.4E | throw Exception(Status::InternalError("Name {} not found in FieldDescriptor!", name)); |
642 | 0 | return nullptr; |
643 | 973k | } |
644 | | |
645 | 35.1k | void FieldDescriptor::get_column_names(std::unordered_set<std::string>* names) const { |
646 | 35.1k | names->clear(); |
647 | 315k | for (const FieldSchema& f : _fields) { |
648 | 315k | names->emplace(f.name); |
649 | 315k | } |
650 | 35.1k | } |
651 | | |
652 | 0 | std::string FieldDescriptor::debug_string() const { |
653 | 0 | std::stringstream ss; |
654 | 0 | ss << "fields=["; |
655 | 0 | for (int i = 0; i < _fields.size(); ++i) { |
656 | 0 | if (i != 0) { |
657 | 0 | ss << ", "; |
658 | 0 | } |
659 | 0 | ss << _fields[i].debug_string(); |
660 | 0 | } |
661 | 0 | ss << "]"; |
662 | 0 | return ss.str(); |
663 | 0 | } |
664 | | |
665 | 29.6k | void FieldDescriptor::assign_ids() { |
666 | 29.6k | uint64_t next_id = 1; |
667 | 217k | for (auto& field : _fields) { |
668 | 217k | field.assign_ids(next_id); |
669 | 217k | } |
670 | 29.6k | } |
671 | | |
672 | 0 | const FieldSchema* FieldDescriptor::find_column_by_id(uint64_t column_id) const { |
673 | 0 | for (const auto& field : _fields) { |
674 | 0 | if (auto result = field.find_column_by_id(column_id)) { |
675 | 0 | return result; |
676 | 0 | } |
677 | 0 | } |
678 | 0 | return nullptr; |
679 | 0 | } |
680 | | |
681 | 308k | void FieldSchema::assign_ids(uint64_t& next_id) { |
682 | 308k | column_id = next_id++; |
683 | | |
684 | 308k | for (auto& child : children) { |
685 | 91.1k | child.assign_ids(next_id); |
686 | 91.1k | } |
687 | | |
688 | 308k | max_column_id = next_id - 1; |
689 | 308k | } |
690 | | |
691 | 0 | const FieldSchema* FieldSchema::find_column_by_id(uint64_t target_id) const { |
692 | 0 | if (column_id == target_id) { |
693 | 0 | return this; |
694 | 0 | } |
695 | | |
696 | 0 | for (const auto& child : children) { |
697 | 0 | if (auto result = child.find_column_by_id(target_id)) { |
698 | 0 | return result; |
699 | 0 | } |
700 | 0 | } |
701 | | |
702 | 0 | return nullptr; |
703 | 0 | } |
704 | | |
705 | 272k | uint64_t FieldSchema::get_column_id() const { |
706 | 272k | return column_id; |
707 | 272k | } |
708 | | |
709 | 0 | void FieldSchema::set_column_id(uint64_t id) { |
710 | 0 | column_id = id; |
711 | 0 | } |
712 | | |
713 | 33.5k | uint64_t FieldSchema::get_max_column_id() const { |
714 | 33.5k | return max_column_id; |
715 | 33.5k | } |
716 | | |
717 | | #include "common/compile_check_end.h" |
718 | | |
719 | | } // namespace doris |