be/src/util/json/json_parser.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | // This file is copied from |
18 | | // https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/JSONParsers/SimdJSONParser.cpp |
19 | | // and modified by Doris |
20 | | |
21 | | #include "util/json/json_parser.h" |
22 | | |
23 | | #include <fmt/format.h> |
24 | | #include <glog/logging.h> |
25 | | |
26 | | #include <algorithm> |
27 | | #include <cassert> |
28 | | #include <string_view> |
29 | | #include <vector> |
30 | | |
31 | | #include "common/cast_set.h" |
32 | | // IWYU pragma: keep |
33 | | #include "common/status.h" |
34 | | #include "util/json/path_in_data.h" |
35 | | #include "util/json/simd_json_parser.h" |
36 | | |
37 | | namespace doris { |
38 | | |
39 | | template <typename ParserImpl> |
40 | | std::optional<ParseResult> JSONDataParser<ParserImpl>::parse(const char* begin, size_t length, |
41 | 80.6k | const ParseConfig& config) { |
42 | 80.6k | Element document; |
43 | 80.6k | if (!parser.parse(begin, length, document)) { |
44 | 13 | return {}; |
45 | 13 | } |
46 | 80.6k | ParseContext context; |
47 | | // deprecated_enable_flatten_nested controls nested path traversal |
48 | | // NestedGroup expansion is now handled at storage layer |
49 | 80.6k | context.deprecated_enable_flatten_nested = config.deprecated_enable_flatten_nested; |
50 | 80.6k | context.check_duplicate_json_path = config.check_duplicate_json_path; |
51 | 80.6k | context.is_top_array = document.isArray(); |
52 | 80.6k | traverse(document, context); |
53 | 80.6k | ParseResult result; |
54 | 80.6k | result.values = std::move(context.values); |
55 | 80.6k | result.paths.reserve(context.paths.size()); |
56 | 1.35M | for (auto&& path : context.paths) { |
57 | 1.35M | result.paths.emplace_back(std::move(path)); |
58 | 1.35M | } |
59 | 80.6k | return result; |
60 | 80.6k | } |
61 | | |
62 | | template <typename ParserImpl> |
63 | 2.08M | void JSONDataParser<ParserImpl>::traverse(const Element& element, ParseContext& ctx) { |
64 | | // checkStackSize(); |
65 | 2.08M | if (element.isObject()) { |
66 | 144k | traverseObject(element.getObject(), ctx); |
67 | 1.93M | } else if (element.isArray()) { |
68 | | // allow nested arrays (multi-level) for NestedGroup; deeper levels are |
69 | | // handled by VariantNestedBuilder with a max-depth guard. |
70 | 55.3k | has_nested = false; |
71 | 55.3k | check_has_nested_object(element); |
72 | 55.3k | ctx.has_nested_in_flatten = has_nested && ctx.deprecated_enable_flatten_nested; |
73 | 55.3k | if (has_nested && !ctx.deprecated_enable_flatten_nested) { |
74 | | // Parse nested arrays to JsonbField |
75 | 209 | JsonbWriter writer; |
76 | 209 | traverseArrayAsJsonb(element.getArray(), writer); |
77 | 209 | appendValueIfNotDuplicate( |
78 | 209 | ctx, ctx.builder.get_parts(), |
79 | 209 | Field::create_field<TYPE_JSONB>(JsonbField(writer.getOutput()->getBuffer(), |
80 | 209 | writer.getOutput()->getSize()))); |
81 | 55.1k | } else { |
82 | 55.1k | traverseArray(element.getArray(), ctx); |
83 | 55.1k | } |
84 | | // we should set has_nested_in_flatten to false when traverse array finished for next array otherwise it will be true for next array |
85 | 55.3k | ctx.has_nested_in_flatten = false; |
86 | 1.88M | } else { |
87 | 1.88M | appendValueIfNotDuplicate(ctx, ctx.builder.get_parts(), getValueAsField(element)); |
88 | 1.88M | } |
89 | 2.08M | } |
90 | | |
91 | | template <typename ParserImpl> |
92 | | void JSONDataParser<ParserImpl>::appendValueIfNotDuplicate(ParseContext& ctx, |
93 | | const PathInData::Parts& path, |
94 | 1.94M | Field&& value) { |
95 | 1.94M | if (ctx.check_duplicate_json_path) { |
96 | 32 | PathInData path_in_data(path); |
97 | 32 | if (!ctx.visited_path_names.emplace(path_in_data.get_path()).second) { |
98 | 10 | return; |
99 | 10 | } |
100 | 32 | } |
101 | 1.94M | ctx.paths.push_back(path); |
102 | 1.94M | ctx.values.push_back(std::move(value)); |
103 | 1.94M | } |
104 | | |
105 | | template <typename ParserImpl> |
106 | 144k | void JSONDataParser<ParserImpl>::traverseObject(const JSONObject& object, ParseContext& ctx) { |
107 | 144k | ctx.paths.reserve(ctx.paths.size() + object.size()); |
108 | 144k | ctx.values.reserve(ctx.values.size() + object.size()); |
109 | 1.42M | auto check_key_length = [](const auto& key) { |
110 | 1.42M | const size_t max_key_length = cast_set<size_t>(config::variant_max_json_key_length); |
111 | 1.42M | if (key.size() > max_key_length) { |
112 | 336 | throw doris::Exception( |
113 | 336 | doris::ErrorCode::INVALID_ARGUMENT, |
114 | 336 | fmt::format("Key length exceeds maximum allowed size of {} bytes.", |
115 | 336 | max_key_length)); |
116 | 336 | } |
117 | 1.42M | }; |
118 | 1.42M | auto traverse_object_member = [&](const auto& key, const auto& value) { |
119 | 1.42M | check_key_length(key); |
120 | 1.42M | ctx.builder.append(key, false); |
121 | 1.42M | traverse(value, ctx); |
122 | 1.42M | ctx.builder.pop_back(); |
123 | 1.42M | }; |
124 | | |
125 | 1.56M | for (auto it = object.begin(); it != object.end(); ++it) { |
126 | 1.42M | const auto& [key, value] = *it; |
127 | 1.42M | traverse_object_member(key, value); |
128 | 1.42M | } |
129 | 144k | } |
130 | | |
131 | | template <typename ParserImpl> |
132 | 1.12M | void JSONDataParser<ParserImpl>::check_has_nested_object(const Element& element) { |
133 | 1.12M | if (element.isArray()) { |
134 | 105k | const JSONArray& array = element.getArray(); |
135 | 1.17M | for (auto it = array.begin(); it != array.end(); ++it) { |
136 | 1.06M | check_has_nested_object(*it); |
137 | 1.06M | } |
138 | 105k | } |
139 | 1.12M | if (element.isObject()) { |
140 | 23.0k | has_nested = true; |
141 | 23.0k | } |
142 | 1.12M | } |
143 | | |
144 | | template <typename ParserImpl> |
145 | 4.31k | void JSONDataParser<ParserImpl>::traverseAsJsonb(const Element& element, JsonbWriter& writer) { |
146 | 4.31k | if (element.isObject()) { |
147 | 1.49k | traverseObjectAsJsonb(element.getObject(), writer); |
148 | 2.82k | } else if (element.isArray()) { |
149 | 7 | traverseArrayAsJsonb(element.getArray(), writer); |
150 | 2.81k | } else { |
151 | 2.81k | writeValueAsJsonb(element, writer); |
152 | 2.81k | } |
153 | 4.31k | } |
154 | | |
155 | | template <typename ParserImpl> |
156 | | void JSONDataParser<ParserImpl>::traverseObjectAsJsonb(const JSONObject& object, |
157 | 1.49k | JsonbWriter& writer) { |
158 | 1.49k | writer.writeStartObject(); |
159 | 4.93k | for (auto it = object.begin(); it != object.end(); ++it) { |
160 | 3.64k | const auto& [key, value] = *it; |
161 | 3.64k | const size_t max_key_length = cast_set<size_t>(config::variant_max_json_key_length); |
162 | 3.64k | if (key.size() > max_key_length) { |
163 | 201 | throw doris::Exception( |
164 | 201 | doris::ErrorCode::INVALID_ARGUMENT, |
165 | 201 | fmt::format("Key length exceeds maximum allowed size of {} bytes.", |
166 | 201 | max_key_length)); |
167 | 201 | } |
168 | 3.44k | writer.writeKey(key.data(), cast_set<uint8_t>(key.size())); |
169 | 3.44k | traverseAsJsonb(value, writer); |
170 | 3.44k | } |
171 | 1.28k | writer.writeEndObject(); |
172 | 1.28k | } |
173 | | |
174 | | template <typename ParserImpl> |
175 | 216 | void JSONDataParser<ParserImpl>::traverseArrayAsJsonb(const JSONArray& array, JsonbWriter& writer) { |
176 | 216 | writer.writeStartArray(); |
177 | 1.08k | for (auto it = array.begin(); it != array.end(); ++it) { |
178 | 873 | traverseAsJsonb(*it, writer); |
179 | 873 | } |
180 | 216 | writer.writeEndArray(); |
181 | 216 | } |
182 | | |
183 | | // check isPrefix in PathInData::Parts. like : [{"a": {"c": {"b": 1}}}, {"a": {"c": 2.2}}], "a.c" is prefix of "a.c.b" |
184 | | // return true if prefix is a prefix of parts |
185 | 6 | static bool is_prefix(const PathInData::Parts& prefix, const PathInData::Parts& parts) { |
186 | 6 | if (prefix.size() >= parts.size()) { |
187 | 5 | return false; |
188 | 5 | } |
189 | 2 | for (size_t i = 0; i < prefix.size(); ++i) { |
190 | 1 | if (prefix[i].key != parts[i].key) { |
191 | 0 | return false; |
192 | 0 | } |
193 | 1 | } |
194 | 1 | return true; |
195 | 1 | } |
196 | | |
197 | | template <typename ParserImpl> |
198 | 55.1k | void JSONDataParser<ParserImpl>::traverseArray(const JSONArray& array, ParseContext& ctx) { |
199 | | /// Traverse elements of array and collect an array of fields by each path. |
200 | 55.1k | ParseArrayContext array_ctx; |
201 | 55.1k | array_ctx.has_nested_in_flatten = ctx.has_nested_in_flatten; |
202 | 55.1k | array_ctx.is_top_array = ctx.is_top_array; |
203 | 55.1k | array_ctx.check_duplicate_json_path = ctx.check_duplicate_json_path; |
204 | 55.1k | array_ctx.total_size = array.size(); |
205 | 633k | for (auto it = array.begin(); it != array.end(); ++it) { |
206 | 577k | traverseArrayElement(*it, array_ctx); |
207 | 577k | ++array_ctx.current_size; |
208 | 577k | } |
209 | 55.1k | auto&& arrays_by_path = array_ctx.arrays_by_path; |
210 | 55.1k | if (arrays_by_path.empty()) { |
211 | 4 | appendValueIfNotDuplicate(ctx, ctx.builder.get_parts(), |
212 | 4 | Field::create_field<TYPE_ARRAY>(Array())); |
213 | 55.1k | } else { |
214 | 55.1k | ctx.paths.reserve(ctx.paths.size() + arrays_by_path.size()); |
215 | 55.1k | ctx.values.reserve(ctx.values.size() + arrays_by_path.size()); |
216 | 113k | for (auto it = arrays_by_path.begin(); it != arrays_by_path.end(); ++it) { |
217 | 58.1k | auto&& [path, path_array] = it->second; |
218 | | /// Merge prefix path and path of array element. |
219 | 58.1k | ctx.builder.append(path, true); |
220 | 58.1k | appendValueIfNotDuplicate(ctx, ctx.builder.get_parts(), |
221 | 58.1k | Field::create_field<TYPE_ARRAY>(std::move(path_array))); |
222 | 58.1k | ctx.builder.pop_back(path.size()); |
223 | 58.1k | } |
224 | 55.1k | } |
225 | 55.1k | } |
226 | | |
227 | | template <typename ParserImpl> |
228 | | void JSONDataParser<ParserImpl>::traverseArrayElement(const Element& element, |
229 | 577k | ParseArrayContext& ctx) { |
230 | 577k | ParseContext element_ctx; |
231 | 577k | element_ctx.has_nested_in_flatten = ctx.has_nested_in_flatten; |
232 | 577k | element_ctx.is_top_array = ctx.is_top_array; |
233 | 577k | element_ctx.check_duplicate_json_path = ctx.check_duplicate_json_path; |
234 | 577k | traverse(element, element_ctx); |
235 | 577k | auto& paths = element_ctx.paths; |
236 | 577k | auto& values = element_ctx.values; |
237 | | |
238 | 577k | if (element_ctx.has_nested_in_flatten && element_ctx.is_top_array) { |
239 | 6 | checkAmbiguousStructure(ctx, paths); |
240 | 6 | } |
241 | | |
242 | 577k | size_t size = paths.size(); |
243 | 577k | size_t keys_to_update = ctx.arrays_by_path.size(); |
244 | | |
245 | 1.15M | for (size_t i = 0; i < size; ++i) { |
246 | 580k | if (values[i].is_null()) { |
247 | 11.7k | continue; |
248 | 11.7k | } |
249 | | |
250 | 569k | UInt128 hash = PathInData::get_parts_hash(paths[i]); |
251 | 569k | auto found = ctx.arrays_by_path.find(hash); |
252 | | |
253 | 569k | if (found != ctx.arrays_by_path.end()) { |
254 | 510k | handleExistingPath(found->second, paths[i], values[i], ctx, keys_to_update); |
255 | 510k | } else { |
256 | 58.1k | handleNewPath(hash, paths[i], values[i], ctx); |
257 | 58.1k | } |
258 | 569k | } |
259 | | |
260 | | // always fill missed values to keep element-level association between keys. |
261 | 577k | if (keys_to_update) { |
262 | 10.2k | fillMissedValuesInArrays(ctx); |
263 | 10.2k | } |
264 | 577k | } |
265 | | |
266 | | // check if the structure of top_array is ambiguous like: |
267 | | // [{"a": {"b": {"c": 1}}}, {"a": {"b": 1}}] a.b is ambiguous |
268 | | template <typename ParserImpl> |
269 | | void JSONDataParser<ParserImpl>::checkAmbiguousStructure( |
270 | 6 | const ParseArrayContext& ctx, const std::vector<PathInData::Parts>& paths) { |
271 | 6 | for (auto&& current_path : paths) { |
272 | 8 | for (auto it = ctx.arrays_by_path.begin(); it != ctx.arrays_by_path.end(); ++it) { |
273 | 3 | auto&& [p, _] = it->second; |
274 | 3 | if (is_prefix(p, current_path) || is_prefix(current_path, p)) { |
275 | 1 | throw doris::Exception(doris::ErrorCode::INVALID_ARGUMENT, |
276 | 1 | "Ambiguous structure of top_array nested subcolumns: {}, {}", |
277 | 1 | PathInData(p).to_jsonpath(), |
278 | 1 | PathInData(current_path).to_jsonpath()); |
279 | 1 | } |
280 | 3 | } |
281 | 6 | } |
282 | 6 | } |
283 | | |
284 | | template <typename ParserImpl> |
285 | | void JSONDataParser<ParserImpl>::handleExistingPath(std::pair<PathInData::Parts, Array>& path_data, |
286 | | const PathInData::Parts& path, Field& value, |
287 | | ParseArrayContext& ctx, |
288 | 510k | size_t& keys_to_update) { |
289 | 510k | auto& path_array = path_data.second; |
290 | | // keep arrays aligned for all keys (including top-level arrays). |
291 | 510k | assert(path_array.size() == ctx.current_size); |
292 | | // If current element of array is part of Nested, |
293 | | // collect its size or check it if the size of |
294 | | // the Nested has been already collected. |
295 | 510k | auto nested_key = getNameOfNested(path, value); |
296 | 510k | if (!nested_key.empty()) { |
297 | 0 | size_t array_size = value.get<TYPE_ARRAY>().size(); |
298 | 0 | auto& current_nested_sizes = ctx.nested_sizes_by_key[nested_key]; |
299 | 0 | if (current_nested_sizes.size() == ctx.current_size) { |
300 | 0 | current_nested_sizes.push_back(array_size); |
301 | 0 | } else if (array_size != current_nested_sizes.back()) { |
302 | 0 | throw doris::Exception(doris::ErrorCode::INTERNAL_ERROR, |
303 | 0 | "Array sizes mismatched ({} and {})", array_size, |
304 | 0 | current_nested_sizes.back()); |
305 | 0 | } |
306 | 0 | } |
307 | | |
308 | 510k | path_array.push_back(std::move(value)); |
309 | 510k | --keys_to_update; |
310 | 510k | } |
311 | | |
312 | | template <typename ParserImpl> |
313 | | void JSONDataParser<ParserImpl>::handleNewPath(UInt128 hash, const PathInData::Parts& path, |
314 | 58.1k | Field& value, ParseArrayContext& ctx) { |
315 | 58.1k | Array path_array; |
316 | 58.1k | path_array.reserve(ctx.total_size); |
317 | | |
318 | | // always resize to keep alignment. |
319 | 58.1k | path_array.resize(ctx.current_size); |
320 | | |
321 | 58.1k | auto nested_key = getNameOfNested(path, value); |
322 | 58.1k | if (!nested_key.empty()) { |
323 | 3 | size_t array_size = value.get<TYPE_ARRAY>().size(); |
324 | 3 | auto& current_nested_sizes = ctx.nested_sizes_by_key[nested_key]; |
325 | 3 | if (current_nested_sizes.empty()) { |
326 | 2 | current_nested_sizes.resize(ctx.current_size); |
327 | 2 | } else { |
328 | | // If newly added element is part of the Nested then |
329 | | // resize its elements to keep correct sizes of Nested arrays. |
330 | 3 | for (size_t j = 0; j < ctx.current_size; ++j) { |
331 | 2 | path_array[j] = Field::create_field<TYPE_ARRAY>(Array(current_nested_sizes[j])); |
332 | 2 | } |
333 | 1 | } |
334 | 3 | if (current_nested_sizes.size() == ctx.current_size) { |
335 | 2 | current_nested_sizes.push_back(array_size); |
336 | 2 | } else if (array_size != current_nested_sizes.back()) { |
337 | 0 | throw doris::Exception(doris::ErrorCode::INTERNAL_ERROR, |
338 | 0 | "Array sizes mismatched ({} and {})", array_size, |
339 | 0 | current_nested_sizes.back()); |
340 | 0 | } |
341 | 3 | } |
342 | | |
343 | 58.1k | path_array.push_back(std::move(value)); |
344 | 58.1k | auto& elem = ctx.arrays_by_path[hash]; |
345 | 58.1k | elem.first = std::move(path); |
346 | 58.1k | elem.second = std::move(path_array); |
347 | 58.1k | } |
348 | | |
349 | | template <typename ParserImpl> |
350 | 10.2k | void JSONDataParser<ParserImpl>::fillMissedValuesInArrays(ParseArrayContext& ctx) { |
351 | 20.5k | for (auto it = ctx.arrays_by_path.begin(); it != ctx.arrays_by_path.end(); ++it) { |
352 | 10.2k | auto& [path, path_array] = it->second; |
353 | 10.2k | assert(path_array.size() == ctx.current_size || path_array.size() == ctx.current_size + 1); |
354 | 10.2k | if (path_array.size() == ctx.current_size) { |
355 | 10.2k | bool inserted = tryInsertDefaultFromNested(ctx, path, path_array); |
356 | 10.2k | if (!inserted) { |
357 | 10.2k | path_array.emplace_back(); |
358 | 10.2k | } |
359 | 10.2k | } |
360 | 10.2k | } |
361 | 10.2k | } |
362 | | |
363 | | template <typename ParserImpl> |
364 | | bool JSONDataParser<ParserImpl>::tryInsertDefaultFromNested(ParseArrayContext& ctx, |
365 | | const PathInData::Parts& path, |
366 | 10.2k | Array& array) { |
367 | | /// If there is a collected size of current Nested |
368 | | /// then insert array of this size as a default value. |
369 | 10.2k | if (path.empty() || array.empty()) { |
370 | 10.2k | return false; |
371 | 10.2k | } |
372 | | /// Last element is not Null, because otherwise this path wouldn't exist. |
373 | 1 | auto nested_key = getNameOfNested(path, array.back()); |
374 | 1 | if (nested_key.empty()) { |
375 | 1 | return false; |
376 | 1 | } |
377 | 0 | auto mapped = ctx.nested_sizes_by_key.find(nested_key); |
378 | 0 | if (mapped == ctx.nested_sizes_by_key.end()) { |
379 | 0 | return false; |
380 | 0 | } |
381 | 0 | auto& current_nested_sizes = mapped->second; |
382 | 0 | assert(current_nested_sizes.size() == ctx.current_size || |
383 | 0 | current_nested_sizes.size() == ctx.current_size + 1); |
384 | | /// If all keys of Nested were missed then add a zero length. |
385 | 0 | if (current_nested_sizes.size() == ctx.current_size) { |
386 | 0 | current_nested_sizes.push_back(0); |
387 | 0 | } |
388 | 0 | size_t array_size = current_nested_sizes.back(); |
389 | 0 | array.push_back(Field::create_field<TYPE_ARRAY>(Array(array_size))); |
390 | 0 | return true; |
391 | 0 | } |
392 | | |
393 | | template <typename ParserImpl> |
394 | | StringRef JSONDataParser<ParserImpl>::getNameOfNested(const PathInData::Parts& path, |
395 | 569k | const Field& value) { |
396 | 569k | if (value.get_type() != PrimitiveType::TYPE_ARRAY || path.empty()) { |
397 | 569k | return {}; |
398 | 569k | } |
399 | | /// Find first key that is marked as nested, |
400 | | /// because we may have struct of Nested and there could be |
401 | | /// several arrays with the same prefix, but with independent sizes. |
402 | | /// Consider we have array element with type `k2 Struct(k3 Nested(...), k5 Nested(...))` |
403 | | /// Then subcolumns `k2.k3` and `k2.k5` may have indepented sizes and we should extract |
404 | | /// `k3` and `k5` keys instead of `k2`. |
405 | 3 | for (const auto& part : path) { |
406 | 3 | if (part.is_nested) { |
407 | 3 | return {part.key.data(), part.key.size()}; |
408 | 3 | } |
409 | 3 | } |
410 | 0 | return {}; |
411 | 3 | } |
412 | | |
413 | | template class JSONDataParser<SimdJSONParser>; |
414 | | } // namespace doris |