Coverage Report

Created: 2026-05-09 18:55

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/parquet/vparquet_column_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/parquet/vparquet_column_reader.h"
19
20
#include <gen_cpp/parquet_types.h>
21
#include <limits.h>
22
#include <sys/types.h>
23
24
#include <algorithm>
25
#include <utility>
26
27
#include "common/status.h"
28
#include "core/column/column.h"
29
#include "core/column/column_array.h"
30
#include "core/column/column_map.h"
31
#include "core/column/column_nullable.h"
32
#include "core/column/column_struct.h"
33
#include "core/data_type/data_type_array.h"
34
#include "core/data_type/data_type_map.h"
35
#include "core/data_type/data_type_nullable.h"
36
#include "core/data_type/data_type_struct.h"
37
#include "core/data_type/define_primitive_type.h"
38
#include "format/parquet/level_decoder.h"
39
#include "format/parquet/schema_desc.h"
40
#include "format/parquet/vparquet_column_chunk_reader.h"
41
#include "io/fs/tracing_file_reader.h"
42
#include "runtime/runtime_profile.h"
43
44
namespace doris {
45
static void fill_struct_null_map(FieldSchema* field, NullMap& null_map,
46
                                 const std::vector<level_t>& rep_levels,
47
11
                                 const std::vector<level_t>& def_levels) {
48
11
    size_t num_levels = def_levels.size();
49
11
    DCHECK_EQ(num_levels, rep_levels.size());
50
11
    size_t origin_size = null_map.size();
51
11
    null_map.resize(origin_size + num_levels);
52
11
    size_t pos = origin_size;
53
26
    for (size_t i = 0; i < num_levels; ++i) {
54
        // skip the levels affect its ancestor or its descendants
55
15
        if (def_levels[i] < field->repeated_parent_def_level ||
56
15
            rep_levels[i] > field->repetition_level) {
57
0
            continue;
58
0
        }
59
15
        if (def_levels[i] >= field->definition_level) {
60
15
            null_map[pos++] = 0;
61
15
        } else {
62
0
            null_map[pos++] = 1;
63
0
        }
64
15
    }
65
11
    null_map.resize(pos);
66
11
}
67
68
static void fill_array_offset(FieldSchema* field, ColumnArray::Offsets64& offsets_data,
69
                              NullMap* null_map_ptr, const std::vector<level_t>& rep_levels,
70
2
                              const std::vector<level_t>& def_levels) {
71
2
    size_t num_levels = rep_levels.size();
72
2
    DCHECK_EQ(num_levels, def_levels.size());
73
2
    size_t origin_size = offsets_data.size();
74
2
    offsets_data.resize(origin_size + num_levels);
75
2
    if (null_map_ptr != nullptr) {
76
2
        null_map_ptr->resize(origin_size + num_levels);
77
2
    }
78
2
    size_t offset_pos = origin_size - 1;
79
8
    for (size_t i = 0; i < num_levels; ++i) {
80
        // skip the levels affect its ancestor or its descendants
81
6
        if (def_levels[i] < field->repeated_parent_def_level ||
82
6
            rep_levels[i] > field->repetition_level) {
83
0
            continue;
84
0
        }
85
6
        if (rep_levels[i] == field->repetition_level) {
86
4
            offsets_data[offset_pos]++;
87
4
            continue;
88
4
        }
89
2
        offset_pos++;
90
2
        offsets_data[offset_pos] = offsets_data[offset_pos - 1];
91
2
        if (def_levels[i] >= field->definition_level) {
92
2
            offsets_data[offset_pos]++;
93
2
        }
94
2
        if (def_levels[i] >= field->definition_level - 1) {
95
2
            (*null_map_ptr)[offset_pos] = 0;
96
2
        } else {
97
0
            (*null_map_ptr)[offset_pos] = 1;
98
0
        }
99
2
    }
100
2
    offsets_data.resize(offset_pos + 1);
101
2
    if (null_map_ptr != nullptr) {
102
2
        null_map_ptr->resize(offset_pos + 1);
103
2
    }
104
2
}
105
106
Status ParquetColumnReader::create(io::FileReaderSPtr file, FieldSchema* field,
107
                                   const tparquet::RowGroup& row_group, const RowRanges& row_ranges,
108
                                   const cctz::time_zone* ctz, io::IOContext* io_ctx,
109
                                   std::unique_ptr<ParquetColumnReader>& reader,
110
                                   size_t max_buf_size,
111
                                   std::unordered_map<int, tparquet::OffsetIndex>& col_offsets,
112
                                   RuntimeState* state, bool in_collection,
113
                                   const std::set<uint64_t>& column_ids,
114
132
                                   const std::set<uint64_t>& filter_column_ids) {
115
132
    size_t total_rows = row_group.num_rows;
116
132
    if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
117
2
        std::unique_ptr<ParquetColumnReader> element_reader;
118
2
        RETURN_IF_ERROR(create(file, &field->children[0], row_group, row_ranges, ctz, io_ctx,
119
2
                               element_reader, max_buf_size, col_offsets, state, true, column_ids,
120
2
                               filter_column_ids));
121
2
        auto array_reader = ArrayColumnReader::create_unique(row_ranges, total_rows, ctz, io_ctx);
122
2
        element_reader->set_column_in_nested();
123
2
        RETURN_IF_ERROR(array_reader->init(std::move(element_reader), field));
124
2
        array_reader->_filter_column_ids = filter_column_ids;
125
2
        reader.reset(array_reader.release());
126
130
    } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
127
0
        std::unique_ptr<ParquetColumnReader> key_reader;
128
0
        std::unique_ptr<ParquetColumnReader> value_reader;
129
130
0
        if (column_ids.empty() ||
131
0
            column_ids.find(field->children[0].get_column_id()) != column_ids.end()) {
132
            // Create key reader
133
0
            RETURN_IF_ERROR(create(file, &field->children[0], row_group, row_ranges, ctz, io_ctx,
134
0
                                   key_reader, max_buf_size, col_offsets, state, true, column_ids,
135
0
                                   filter_column_ids));
136
0
        } else {
137
0
            auto skip_reader = std::make_unique<SkipReadingReader>(row_ranges, total_rows, ctz,
138
0
                                                                   io_ctx, &field->children[0]);
139
0
            key_reader = std::move(skip_reader);
140
0
        }
141
142
0
        if (column_ids.empty() ||
143
0
            column_ids.find(field->children[1].get_column_id()) != column_ids.end()) {
144
            // Create value reader
145
0
            RETURN_IF_ERROR(create(file, &field->children[1], row_group, row_ranges, ctz, io_ctx,
146
0
                                   value_reader, max_buf_size, col_offsets, state, true, column_ids,
147
0
                                   filter_column_ids));
148
0
        } else {
149
0
            auto skip_reader = std::make_unique<SkipReadingReader>(row_ranges, total_rows, ctz,
150
0
                                                                   io_ctx, &field->children[0]);
151
0
            value_reader = std::move(skip_reader);
152
0
        }
153
154
0
        auto map_reader = MapColumnReader::create_unique(row_ranges, total_rows, ctz, io_ctx);
155
0
        key_reader->set_column_in_nested();
156
0
        value_reader->set_column_in_nested();
157
0
        RETURN_IF_ERROR(map_reader->init(std::move(key_reader), std::move(value_reader), field));
158
0
        map_reader->_filter_column_ids = filter_column_ids;
159
0
        reader.reset(map_reader.release());
160
130
    } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
161
11
        std::unordered_map<std::string, std::unique_ptr<ParquetColumnReader>> child_readers;
162
11
        child_readers.reserve(field->children.size());
163
11
        int non_skip_reader_idx = -1;
164
37
        for (int i = 0; i < field->children.size(); ++i) {
165
26
            auto& child = field->children[i];
166
26
            std::unique_ptr<ParquetColumnReader> child_reader;
167
26
            if (column_ids.empty() || column_ids.find(child.get_column_id()) != column_ids.end()) {
168
22
                RETURN_IF_ERROR(create(file, &child, row_group, row_ranges, ctz, io_ctx,
169
22
                                       child_reader, max_buf_size, col_offsets, state,
170
22
                                       in_collection, column_ids, filter_column_ids));
171
22
                child_readers[child.name] = std::move(child_reader);
172
                // Record the first non-SkippingReader
173
22
                if (non_skip_reader_idx == -1) {
174
11
                    non_skip_reader_idx = i;
175
11
                }
176
22
            } else {
177
4
                auto skip_reader = std::make_unique<SkipReadingReader>(row_ranges, total_rows, ctz,
178
4
                                                                       io_ctx, &child);
179
4
                skip_reader->_filter_column_ids = filter_column_ids;
180
4
                child_readers[child.name] = std::move(skip_reader);
181
4
            }
182
26
            child_readers[child.name]->set_column_in_nested();
183
26
        }
184
        // If all children are SkipReadingReader, force the first child to call create
185
11
        if (non_skip_reader_idx == -1) {
186
0
            std::unique_ptr<ParquetColumnReader> child_reader;
187
0
            RETURN_IF_ERROR(create(file, &field->children[0], row_group, row_ranges, ctz, io_ctx,
188
0
                                   child_reader, max_buf_size, col_offsets, state, in_collection,
189
0
                                   column_ids, filter_column_ids));
190
0
            child_reader->set_column_in_nested();
191
0
            child_readers[field->children[0].name] = std::move(child_reader);
192
0
        }
193
11
        auto struct_reader = StructColumnReader::create_unique(row_ranges, total_rows, ctz, io_ctx);
194
11
        RETURN_IF_ERROR(struct_reader->init(std::move(child_readers), field));
195
11
        struct_reader->_filter_column_ids = filter_column_ids;
196
11
        reader.reset(struct_reader.release());
197
119
    } else {
198
119
        auto physical_index = field->physical_column_index;
199
119
        const tparquet::OffsetIndex* offset_index =
200
119
                col_offsets.find(physical_index) != col_offsets.end() ? &col_offsets[physical_index]
201
119
                                                                      : nullptr;
202
203
119
        const tparquet::ColumnChunk& chunk = row_group.columns[physical_index];
204
119
        if (in_collection) {
205
3
            if (offset_index == nullptr) {
206
3
                auto scalar_reader = ScalarColumnReader<true, false>::create_unique(
207
3
                        row_ranges, total_rows, chunk, offset_index, ctz, io_ctx);
208
209
3
                RETURN_IF_ERROR(scalar_reader->init(file, field, max_buf_size, state));
210
3
                scalar_reader->_filter_column_ids = filter_column_ids;
211
3
                reader.reset(scalar_reader.release());
212
3
            } else {
213
0
                auto scalar_reader = ScalarColumnReader<true, true>::create_unique(
214
0
                        row_ranges, total_rows, chunk, offset_index, ctz, io_ctx);
215
216
0
                RETURN_IF_ERROR(scalar_reader->init(file, field, max_buf_size, state));
217
0
                scalar_reader->_filter_column_ids = filter_column_ids;
218
0
                reader.reset(scalar_reader.release());
219
0
            }
220
116
        } else {
221
116
            if (offset_index == nullptr) {
222
116
                auto scalar_reader = ScalarColumnReader<false, false>::create_unique(
223
116
                        row_ranges, total_rows, chunk, offset_index, ctz, io_ctx);
224
225
116
                RETURN_IF_ERROR(scalar_reader->init(file, field, max_buf_size, state));
226
116
                scalar_reader->_filter_column_ids = filter_column_ids;
227
116
                reader.reset(scalar_reader.release());
228
116
            } else {
229
0
                auto scalar_reader = ScalarColumnReader<false, true>::create_unique(
230
0
                        row_ranges, total_rows, chunk, offset_index, ctz, io_ctx);
231
232
0
                RETURN_IF_ERROR(scalar_reader->init(file, field, max_buf_size, state));
233
0
                scalar_reader->_filter_column_ids = filter_column_ids;
234
0
                reader.reset(scalar_reader.release());
235
0
            }
236
116
        }
237
119
    }
238
132
    return Status::OK();
239
132
}
240
241
void ParquetColumnReader::_generate_read_ranges(RowRange page_row_range,
242
274
                                                RowRanges* result_ranges) const {
243
274
    result_ranges->add(page_row_range);
244
274
    RowRanges::ranges_intersection(*result_ranges, _row_ranges, result_ranges);
245
274
}
246
247
template <bool IN_COLLECTION, bool OFFSET_INDEX>
248
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::init(io::FileReaderSPtr file,
249
                                                             FieldSchema* field,
250
                                                             size_t max_buf_size,
251
119
                                                             RuntimeState* state) {
252
119
    _field_schema = field;
253
119
    auto& chunk_meta = _chunk_meta.meta_data;
254
119
    int64_t chunk_start = has_dict_page(chunk_meta) ? chunk_meta.dictionary_page_offset
255
119
                                                    : chunk_meta.data_page_offset;
256
119
    size_t chunk_len = chunk_meta.total_compressed_size;
257
119
    size_t prefetch_buffer_size = std::min(chunk_len, max_buf_size);
258
119
    if ((typeid_cast<doris::io::TracingFileReader*>(file.get()) &&
259
119
         typeid_cast<io::MergeRangeFileReader*>(
260
53
                 ((doris::io::TracingFileReader*)(file.get()))->inner_reader().get())) ||
261
119
        typeid_cast<io::MergeRangeFileReader*>(file.get())) {
262
        // turn off prefetch data when using MergeRangeFileReader
263
119
        prefetch_buffer_size = 0;
264
119
    }
265
119
    _stream_reader = std::make_unique<io::BufferedFileStreamReader>(file, chunk_start, chunk_len,
266
119
                                                                    prefetch_buffer_size);
267
119
    ParquetPageReadContext ctx(
268
119
            (state == nullptr) ? true : state->query_options().enable_parquet_file_page_cache);
269
270
119
    _chunk_reader = std::make_unique<ColumnChunkReader<IN_COLLECTION, OFFSET_INDEX>>(
271
119
            _stream_reader.get(), &_chunk_meta, field, _offset_index, _total_rows, _io_ctx, ctx);
272
119
    RETURN_IF_ERROR(_chunk_reader->init());
273
119
    return Status::OK();
274
119
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE4initESt10shared_ptrINS_2io10FileReaderEEPNS_11FieldSchemaEmPNS_12RuntimeStateE
_ZN5doris18ScalarColumnReaderILb1ELb0EE4initESt10shared_ptrINS_2io10FileReaderEEPNS_11FieldSchemaEmPNS_12RuntimeStateE
Line
Count
Source
251
3
                                                             RuntimeState* state) {
252
3
    _field_schema = field;
253
3
    auto& chunk_meta = _chunk_meta.meta_data;
254
3
    int64_t chunk_start = has_dict_page(chunk_meta) ? chunk_meta.dictionary_page_offset
255
3
                                                    : chunk_meta.data_page_offset;
256
3
    size_t chunk_len = chunk_meta.total_compressed_size;
257
3
    size_t prefetch_buffer_size = std::min(chunk_len, max_buf_size);
258
3
    if ((typeid_cast<doris::io::TracingFileReader*>(file.get()) &&
259
3
         typeid_cast<io::MergeRangeFileReader*>(
260
0
                 ((doris::io::TracingFileReader*)(file.get()))->inner_reader().get())) ||
261
3
        typeid_cast<io::MergeRangeFileReader*>(file.get())) {
262
        // turn off prefetch data when using MergeRangeFileReader
263
3
        prefetch_buffer_size = 0;
264
3
    }
265
3
    _stream_reader = std::make_unique<io::BufferedFileStreamReader>(file, chunk_start, chunk_len,
266
3
                                                                    prefetch_buffer_size);
267
3
    ParquetPageReadContext ctx(
268
3
            (state == nullptr) ? true : state->query_options().enable_parquet_file_page_cache);
269
270
3
    _chunk_reader = std::make_unique<ColumnChunkReader<IN_COLLECTION, OFFSET_INDEX>>(
271
3
            _stream_reader.get(), &_chunk_meta, field, _offset_index, _total_rows, _io_ctx, ctx);
272
3
    RETURN_IF_ERROR(_chunk_reader->init());
273
3
    return Status::OK();
274
3
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE4initESt10shared_ptrINS_2io10FileReaderEEPNS_11FieldSchemaEmPNS_12RuntimeStateE
_ZN5doris18ScalarColumnReaderILb0ELb0EE4initESt10shared_ptrINS_2io10FileReaderEEPNS_11FieldSchemaEmPNS_12RuntimeStateE
Line
Count
Source
251
116
                                                             RuntimeState* state) {
252
116
    _field_schema = field;
253
116
    auto& chunk_meta = _chunk_meta.meta_data;
254
116
    int64_t chunk_start = has_dict_page(chunk_meta) ? chunk_meta.dictionary_page_offset
255
116
                                                    : chunk_meta.data_page_offset;
256
116
    size_t chunk_len = chunk_meta.total_compressed_size;
257
116
    size_t prefetch_buffer_size = std::min(chunk_len, max_buf_size);
258
116
    if ((typeid_cast<doris::io::TracingFileReader*>(file.get()) &&
259
116
         typeid_cast<io::MergeRangeFileReader*>(
260
53
                 ((doris::io::TracingFileReader*)(file.get()))->inner_reader().get())) ||
261
116
        typeid_cast<io::MergeRangeFileReader*>(file.get())) {
262
        // turn off prefetch data when using MergeRangeFileReader
263
116
        prefetch_buffer_size = 0;
264
116
    }
265
116
    _stream_reader = std::make_unique<io::BufferedFileStreamReader>(file, chunk_start, chunk_len,
266
116
                                                                    prefetch_buffer_size);
267
116
    ParquetPageReadContext ctx(
268
116
            (state == nullptr) ? true : state->query_options().enable_parquet_file_page_cache);
269
270
116
    _chunk_reader = std::make_unique<ColumnChunkReader<IN_COLLECTION, OFFSET_INDEX>>(
271
116
            _stream_reader.get(), &_chunk_meta, field, _offset_index, _total_rows, _io_ctx, ctx);
272
116
    RETURN_IF_ERROR(_chunk_reader->init());
273
116
    return Status::OK();
274
116
}
275
276
template <bool IN_COLLECTION, bool OFFSET_INDEX>
277
244
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::_skip_values(size_t num_values) {
278
244
    if (num_values == 0) {
279
142
        return Status::OK();
280
142
    }
281
102
    if (_chunk_reader->max_def_level() > 0) {
282
102
        LevelDecoder& def_decoder = _chunk_reader->def_level_decoder();
283
102
        size_t skipped = 0;
284
102
        size_t null_size = 0;
285
102
        size_t nonnull_size = 0;
286
217
        while (skipped < num_values) {
287
115
            level_t def_level = -1;
288
115
            size_t loop_skip = def_decoder.get_next_run(&def_level, num_values - skipped);
289
115
            if (loop_skip == 0) {
290
0
                std::stringstream ss;
291
0
                auto& bit_reader = def_decoder.rle_decoder().bit_reader();
292
0
                ss << "def_decoder buffer (hex): ";
293
0
                for (size_t i = 0; i < bit_reader.max_bytes(); ++i) {
294
0
                    ss << std::hex << std::setw(2) << std::setfill('0')
295
0
                       << static_cast<int>(bit_reader.buffer()[i]) << " ";
296
0
                }
297
0
                LOG(WARNING) << ss.str();
298
0
                return Status::InternalError("Failed to decode definition level.");
299
0
            }
300
115
            if (def_level < _field_schema->definition_level) {
301
8
                null_size += loop_skip;
302
107
            } else {
303
107
                nonnull_size += loop_skip;
304
107
            }
305
115
            skipped += loop_skip;
306
115
        }
307
102
        if (null_size > 0) {
308
5
            RETURN_IF_ERROR(_chunk_reader->skip_values(null_size, false));
309
5
        }
310
102
        if (nonnull_size > 0) {
311
101
            RETURN_IF_ERROR(_chunk_reader->skip_values(nonnull_size, true));
312
101
        }
313
102
    } else {
314
0
        RETURN_IF_ERROR(_chunk_reader->skip_values(num_values));
315
0
    }
316
102
    return Status::OK();
317
102
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE12_skip_valuesEm
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb0EE12_skip_valuesEm
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE12_skip_valuesEm
_ZN5doris18ScalarColumnReaderILb0ELb0EE12_skip_valuesEm
Line
Count
Source
277
244
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::_skip_values(size_t num_values) {
278
244
    if (num_values == 0) {
279
142
        return Status::OK();
280
142
    }
281
102
    if (_chunk_reader->max_def_level() > 0) {
282
102
        LevelDecoder& def_decoder = _chunk_reader->def_level_decoder();
283
102
        size_t skipped = 0;
284
102
        size_t null_size = 0;
285
102
        size_t nonnull_size = 0;
286
217
        while (skipped < num_values) {
287
115
            level_t def_level = -1;
288
115
            size_t loop_skip = def_decoder.get_next_run(&def_level, num_values - skipped);
289
115
            if (loop_skip == 0) {
290
0
                std::stringstream ss;
291
0
                auto& bit_reader = def_decoder.rle_decoder().bit_reader();
292
0
                ss << "def_decoder buffer (hex): ";
293
0
                for (size_t i = 0; i < bit_reader.max_bytes(); ++i) {
294
0
                    ss << std::hex << std::setw(2) << std::setfill('0')
295
0
                       << static_cast<int>(bit_reader.buffer()[i]) << " ";
296
0
                }
297
0
                LOG(WARNING) << ss.str();
298
0
                return Status::InternalError("Failed to decode definition level.");
299
0
            }
300
115
            if (def_level < _field_schema->definition_level) {
301
8
                null_size += loop_skip;
302
107
            } else {
303
107
                nonnull_size += loop_skip;
304
107
            }
305
115
            skipped += loop_skip;
306
115
        }
307
102
        if (null_size > 0) {
308
5
            RETURN_IF_ERROR(_chunk_reader->skip_values(null_size, false));
309
5
        }
310
102
        if (nonnull_size > 0) {
311
101
            RETURN_IF_ERROR(_chunk_reader->skip_values(nonnull_size, true));
312
101
        }
313
102
    } else {
314
0
        RETURN_IF_ERROR(_chunk_reader->skip_values(num_values));
315
0
    }
316
102
    return Status::OK();
317
102
}
318
319
template <bool IN_COLLECTION, bool OFFSET_INDEX>
320
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::_read_values(size_t num_values,
321
                                                                     ColumnPtr& doris_column,
322
                                                                     DataTypePtr& type,
323
                                                                     FilterMap& filter_map,
324
244
                                                                     bool is_dict_filter) {
325
244
    if (num_values == 0) {
326
0
        return Status::OK();
327
0
    }
328
244
    MutableColumnPtr data_column;
329
244
    std::vector<uint16_t> null_map;
330
244
    NullMap* map_data_column = nullptr;
331
244
    if (doris_column->is_nullable()) {
332
242
        SCOPED_RAW_TIMER(&_decode_null_map_time);
333
        // doris_column either originates from a mutable block in vparquet_group_reader
334
        // or is a newly created ColumnPtr, and therefore can be modified.
335
242
        auto* nullable_column =
336
242
                assert_cast<ColumnNullable*>(const_cast<IColumn*>(doris_column.get()));
337
338
242
        data_column = nullable_column->get_nested_column_ptr();
339
242
        map_data_column = &(nullable_column->get_null_map_data());
340
242
        if (_chunk_reader->max_def_level() > 0) {
341
174
            LevelDecoder& def_decoder = _chunk_reader->def_level_decoder();
342
174
            size_t has_read = 0;
343
174
            bool prev_is_null = true;
344
348
            while (has_read < num_values) {
345
174
                level_t def_level;
346
174
                size_t loop_read = def_decoder.get_next_run(&def_level, num_values - has_read);
347
174
                if (loop_read == 0) {
348
0
                    std::stringstream ss;
349
0
                    auto& bit_reader = def_decoder.rle_decoder().bit_reader();
350
0
                    ss << "def_decoder buffer (hex): ";
351
0
                    for (size_t i = 0; i < bit_reader.max_bytes(); ++i) {
352
0
                        ss << std::hex << std::setw(2) << std::setfill('0')
353
0
                           << static_cast<int>(bit_reader.buffer()[i]) << " ";
354
0
                    }
355
0
                    LOG(WARNING) << ss.str();
356
0
                    return Status::InternalError("Failed to decode definition level.");
357
0
                }
358
359
174
                bool is_null = def_level < _field_schema->definition_level;
360
174
                if (!(prev_is_null ^ is_null)) {
361
57
                    null_map.emplace_back(0);
362
57
                }
363
174
                size_t remaining = loop_read;
364
174
                while (remaining > USHRT_MAX) {
365
0
                    null_map.emplace_back(USHRT_MAX);
366
0
                    null_map.emplace_back(0);
367
0
                    remaining -= USHRT_MAX;
368
0
                }
369
174
                null_map.emplace_back((u_short)remaining);
370
174
                prev_is_null = is_null;
371
174
                has_read += loop_read;
372
174
            }
373
174
        }
374
242
    } else {
375
2
        if (_chunk_reader->max_def_level() > 0) {
376
0
            return Status::Corruption("Not nullable column has null values in parquet file");
377
0
        }
378
2
        data_column = doris_column->assume_mutable();
379
2
    }
380
244
    if (null_map.size() == 0) {
381
70
        size_t remaining = num_values;
382
70
        while (remaining > USHRT_MAX) {
383
0
            null_map.emplace_back(USHRT_MAX);
384
0
            null_map.emplace_back(0);
385
0
            remaining -= USHRT_MAX;
386
0
        }
387
70
        null_map.emplace_back((u_short)remaining);
388
70
    }
389
244
    ColumnSelectVector select_vector;
390
244
    {
391
244
        SCOPED_RAW_TIMER(&_decode_null_map_time);
392
244
        RETURN_IF_ERROR(select_vector.init(null_map, num_values, map_data_column, &filter_map,
393
244
                                           _filter_map_index));
394
244
        _filter_map_index += num_values;
395
244
    }
396
0
    return _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter);
397
244
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE12_read_valuesEmRNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEb
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb0EE12_read_valuesEmRNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEb
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE12_read_valuesEmRNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEb
_ZN5doris18ScalarColumnReaderILb0ELb0EE12_read_valuesEmRNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEb
Line
Count
Source
324
244
                                                                     bool is_dict_filter) {
325
244
    if (num_values == 0) {
326
0
        return Status::OK();
327
0
    }
328
244
    MutableColumnPtr data_column;
329
244
    std::vector<uint16_t> null_map;
330
244
    NullMap* map_data_column = nullptr;
331
244
    if (doris_column->is_nullable()) {
332
242
        SCOPED_RAW_TIMER(&_decode_null_map_time);
333
        // doris_column either originates from a mutable block in vparquet_group_reader
334
        // or is a newly created ColumnPtr, and therefore can be modified.
335
242
        auto* nullable_column =
336
242
                assert_cast<ColumnNullable*>(const_cast<IColumn*>(doris_column.get()));
337
338
242
        data_column = nullable_column->get_nested_column_ptr();
339
242
        map_data_column = &(nullable_column->get_null_map_data());
340
242
        if (_chunk_reader->max_def_level() > 0) {
341
174
            LevelDecoder& def_decoder = _chunk_reader->def_level_decoder();
342
174
            size_t has_read = 0;
343
174
            bool prev_is_null = true;
344
348
            while (has_read < num_values) {
345
174
                level_t def_level;
346
174
                size_t loop_read = def_decoder.get_next_run(&def_level, num_values - has_read);
347
174
                if (loop_read == 0) {
348
0
                    std::stringstream ss;
349
0
                    auto& bit_reader = def_decoder.rle_decoder().bit_reader();
350
0
                    ss << "def_decoder buffer (hex): ";
351
0
                    for (size_t i = 0; i < bit_reader.max_bytes(); ++i) {
352
0
                        ss << std::hex << std::setw(2) << std::setfill('0')
353
0
                           << static_cast<int>(bit_reader.buffer()[i]) << " ";
354
0
                    }
355
0
                    LOG(WARNING) << ss.str();
356
0
                    return Status::InternalError("Failed to decode definition level.");
357
0
                }
358
359
174
                bool is_null = def_level < _field_schema->definition_level;
360
174
                if (!(prev_is_null ^ is_null)) {
361
57
                    null_map.emplace_back(0);
362
57
                }
363
174
                size_t remaining = loop_read;
364
174
                while (remaining > USHRT_MAX) {
365
0
                    null_map.emplace_back(USHRT_MAX);
366
0
                    null_map.emplace_back(0);
367
0
                    remaining -= USHRT_MAX;
368
0
                }
369
174
                null_map.emplace_back((u_short)remaining);
370
174
                prev_is_null = is_null;
371
174
                has_read += loop_read;
372
174
            }
373
174
        }
374
242
    } else {
375
2
        if (_chunk_reader->max_def_level() > 0) {
376
0
            return Status::Corruption("Not nullable column has null values in parquet file");
377
0
        }
378
2
        data_column = doris_column->assume_mutable();
379
2
    }
380
244
    if (null_map.size() == 0) {
381
70
        size_t remaining = num_values;
382
70
        while (remaining > USHRT_MAX) {
383
0
            null_map.emplace_back(USHRT_MAX);
384
0
            null_map.emplace_back(0);
385
0
            remaining -= USHRT_MAX;
386
0
        }
387
70
        null_map.emplace_back((u_short)remaining);
388
70
    }
389
244
    ColumnSelectVector select_vector;
390
244
    {
391
244
        SCOPED_RAW_TIMER(&_decode_null_map_time);
392
244
        RETURN_IF_ERROR(select_vector.init(null_map, num_values, map_data_column, &filter_map,
393
244
                                           _filter_map_index));
394
244
        _filter_map_index += num_values;
395
244
    }
396
0
    return _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter);
397
244
}
398
399
/**
400
 * Load the nested column data of complex type.
401
 * A row of complex type may be stored across two(or more) pages, and the parameter `align_rows` indicates that
402
 * whether the reader should read the remaining value of the last row in previous page.
403
 */
404
template <bool IN_COLLECTION, bool OFFSET_INDEX>
405
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::_read_nested_column(
406
        ColumnPtr& doris_column, DataTypePtr& type, FilterMap& filter_map, size_t batch_size,
407
13
        size_t* read_rows, bool* eof, bool is_dict_filter) {
408
13
    _rep_levels.clear();
409
13
    _def_levels.clear();
410
411
    // Handle nullable columns
412
13
    MutableColumnPtr data_column;
413
13
    NullMap* map_data_column = nullptr;
414
13
    if (doris_column->is_nullable()) {
415
13
        SCOPED_RAW_TIMER(&_decode_null_map_time);
416
        // doris_column either originates from a mutable block in vparquet_group_reader
417
        // or is a newly created ColumnPtr, and therefore can be modified.
418
13
        auto* nullable_column =
419
13
                const_cast<ColumnNullable*>(assert_cast<const ColumnNullable*>(doris_column.get()));
420
13
        data_column = nullable_column->get_nested_column_ptr();
421
13
        map_data_column = &(nullable_column->get_null_map_data());
422
13
    } else {
423
0
        if (_field_schema->data_type->is_nullable()) {
424
0
            return Status::Corruption("Not nullable column has null values in parquet file");
425
0
        }
426
0
        data_column = doris_column->assume_mutable();
427
0
    }
428
429
13
    std::vector<uint16_t> null_map;
430
13
    std::unordered_set<size_t> ancestor_null_indices;
431
13
    std::vector<uint8_t> nested_filter_map_data;
432
433
13
    auto read_and_fill_data = [&](size_t before_rep_level_sz, size_t filter_map_index) {
434
13
        RETURN_IF_ERROR(_chunk_reader->fill_def(_def_levels));
435
13
        std::unique_ptr<FilterMap> nested_filter_map = std::make_unique<FilterMap>();
436
13
        if (filter_map.has_filter()) {
437
0
            RETURN_IF_ERROR(gen_filter_map(filter_map, filter_map_index, before_rep_level_sz,
438
0
                                           _rep_levels.size(), nested_filter_map_data,
439
0
                                           &nested_filter_map));
440
0
        }
441
442
13
        null_map.clear();
443
13
        ancestor_null_indices.clear();
444
13
        RETURN_IF_ERROR(gen_nested_null_map(before_rep_level_sz, _rep_levels.size(), null_map,
445
13
                                            ancestor_null_indices));
446
447
13
        ColumnSelectVector select_vector;
448
13
        {
449
13
            SCOPED_RAW_TIMER(&_decode_null_map_time);
450
13
            RETURN_IF_ERROR(select_vector.init(
451
13
                    null_map,
452
13
                    _rep_levels.size() - before_rep_level_sz - ancestor_null_indices.size(),
453
13
                    map_data_column, nested_filter_map.get(), 0, &ancestor_null_indices));
454
13
        }
455
456
13
        RETURN_IF_ERROR(
457
13
                _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter));
458
13
        if (ancestor_null_indices.size() != 0) {
459
0
            RETURN_IF_ERROR(_chunk_reader->skip_values(ancestor_null_indices.size(), false));
460
0
        }
461
13
        if (filter_map.has_filter()) {
462
0
            auto new_rep_sz = before_rep_level_sz;
463
0
            for (size_t idx = before_rep_level_sz; idx < _rep_levels.size(); idx++) {
464
0
                if (nested_filter_map_data[idx - before_rep_level_sz]) {
465
0
                    _rep_levels[new_rep_sz] = _rep_levels[idx];
466
0
                    _def_levels[new_rep_sz] = _def_levels[idx];
467
0
                    new_rep_sz++;
468
0
                }
469
0
            }
470
0
            _rep_levels.resize(new_rep_sz);
471
0
            _def_levels.resize(new_rep_sz);
472
0
        }
473
13
        return Status::OK();
474
13
    };
Unexecuted instantiation: _ZZN5doris18ScalarColumnReaderILb1ELb1EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbbENKUlmmE_clEmm
_ZZN5doris18ScalarColumnReaderILb1ELb0EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbbENKUlmmE_clEmm
Line
Count
Source
433
3
    auto read_and_fill_data = [&](size_t before_rep_level_sz, size_t filter_map_index) {
434
3
        RETURN_IF_ERROR(_chunk_reader->fill_def(_def_levels));
435
3
        std::unique_ptr<FilterMap> nested_filter_map = std::make_unique<FilterMap>();
436
3
        if (filter_map.has_filter()) {
437
0
            RETURN_IF_ERROR(gen_filter_map(filter_map, filter_map_index, before_rep_level_sz,
438
0
                                           _rep_levels.size(), nested_filter_map_data,
439
0
                                           &nested_filter_map));
440
0
        }
441
442
3
        null_map.clear();
443
3
        ancestor_null_indices.clear();
444
3
        RETURN_IF_ERROR(gen_nested_null_map(before_rep_level_sz, _rep_levels.size(), null_map,
445
3
                                            ancestor_null_indices));
446
447
3
        ColumnSelectVector select_vector;
448
3
        {
449
3
            SCOPED_RAW_TIMER(&_decode_null_map_time);
450
3
            RETURN_IF_ERROR(select_vector.init(
451
3
                    null_map,
452
3
                    _rep_levels.size() - before_rep_level_sz - ancestor_null_indices.size(),
453
3
                    map_data_column, nested_filter_map.get(), 0, &ancestor_null_indices));
454
3
        }
455
456
3
        RETURN_IF_ERROR(
457
3
                _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter));
458
3
        if (ancestor_null_indices.size() != 0) {
459
0
            RETURN_IF_ERROR(_chunk_reader->skip_values(ancestor_null_indices.size(), false));
460
0
        }
461
3
        if (filter_map.has_filter()) {
462
0
            auto new_rep_sz = before_rep_level_sz;
463
0
            for (size_t idx = before_rep_level_sz; idx < _rep_levels.size(); idx++) {
464
0
                if (nested_filter_map_data[idx - before_rep_level_sz]) {
465
0
                    _rep_levels[new_rep_sz] = _rep_levels[idx];
466
0
                    _def_levels[new_rep_sz] = _def_levels[idx];
467
0
                    new_rep_sz++;
468
0
                }
469
0
            }
470
0
            _rep_levels.resize(new_rep_sz);
471
0
            _def_levels.resize(new_rep_sz);
472
0
        }
473
3
        return Status::OK();
474
3
    };
Unexecuted instantiation: _ZZN5doris18ScalarColumnReaderILb0ELb1EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbbENKUlmmE_clEmm
_ZZN5doris18ScalarColumnReaderILb0ELb0EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbbENKUlmmE_clEmm
Line
Count
Source
433
10
    auto read_and_fill_data = [&](size_t before_rep_level_sz, size_t filter_map_index) {
434
10
        RETURN_IF_ERROR(_chunk_reader->fill_def(_def_levels));
435
10
        std::unique_ptr<FilterMap> nested_filter_map = std::make_unique<FilterMap>();
436
10
        if (filter_map.has_filter()) {
437
0
            RETURN_IF_ERROR(gen_filter_map(filter_map, filter_map_index, before_rep_level_sz,
438
0
                                           _rep_levels.size(), nested_filter_map_data,
439
0
                                           &nested_filter_map));
440
0
        }
441
442
10
        null_map.clear();
443
10
        ancestor_null_indices.clear();
444
10
        RETURN_IF_ERROR(gen_nested_null_map(before_rep_level_sz, _rep_levels.size(), null_map,
445
10
                                            ancestor_null_indices));
446
447
10
        ColumnSelectVector select_vector;
448
10
        {
449
10
            SCOPED_RAW_TIMER(&_decode_null_map_time);
450
10
            RETURN_IF_ERROR(select_vector.init(
451
10
                    null_map,
452
10
                    _rep_levels.size() - before_rep_level_sz - ancestor_null_indices.size(),
453
10
                    map_data_column, nested_filter_map.get(), 0, &ancestor_null_indices));
454
10
        }
455
456
10
        RETURN_IF_ERROR(
457
10
                _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter));
458
10
        if (ancestor_null_indices.size() != 0) {
459
0
            RETURN_IF_ERROR(_chunk_reader->skip_values(ancestor_null_indices.size(), false));
460
0
        }
461
10
        if (filter_map.has_filter()) {
462
0
            auto new_rep_sz = before_rep_level_sz;
463
0
            for (size_t idx = before_rep_level_sz; idx < _rep_levels.size(); idx++) {
464
0
                if (nested_filter_map_data[idx - before_rep_level_sz]) {
465
0
                    _rep_levels[new_rep_sz] = _rep_levels[idx];
466
0
                    _def_levels[new_rep_sz] = _def_levels[idx];
467
0
                    new_rep_sz++;
468
0
                }
469
0
            }
470
0
            _rep_levels.resize(new_rep_sz);
471
0
            _def_levels.resize(new_rep_sz);
472
0
        }
473
10
        return Status::OK();
474
10
    };
475
476
15
    while (_current_range_idx < _row_ranges.range_size()) {
477
13
        size_t left_row =
478
13
                std::max(_current_row_index, _row_ranges.get_range_from(_current_range_idx));
479
13
        size_t right_row = std::min(left_row + batch_size - *read_rows,
480
13
                                    (size_t)_row_ranges.get_range_to(_current_range_idx));
481
13
        _current_row_index = left_row;
482
13
        RETURN_IF_ERROR(_chunk_reader->seek_to_nested_row(left_row));
483
13
        size_t load_rows = 0;
484
13
        bool cross_page = false;
485
13
        size_t before_rep_level_sz = _rep_levels.size();
486
13
        RETURN_IF_ERROR(_chunk_reader->load_page_nested_rows(_rep_levels, right_row - left_row,
487
13
                                                             &load_rows, &cross_page));
488
13
        RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index));
489
13
        _filter_map_index += load_rows;
490
13
        while (cross_page) {
491
0
            before_rep_level_sz = _rep_levels.size();
492
0
            RETURN_IF_ERROR(_chunk_reader->load_cross_page_nested_row(_rep_levels, &cross_page));
493
0
            RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index - 1));
494
0
        }
495
13
        *read_rows += load_rows;
496
13
        _current_row_index += load_rows;
497
13
        _current_range_idx += (_current_row_index == _row_ranges.get_range_to(_current_range_idx));
498
13
        if (*read_rows == batch_size) {
499
11
            break;
500
11
        }
501
13
    }
502
13
    *eof = _current_range_idx == _row_ranges.range_size();
503
13
    return Status::OK();
504
13
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbb
_ZN5doris18ScalarColumnReaderILb1ELb0EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbb
Line
Count
Source
407
3
        size_t* read_rows, bool* eof, bool is_dict_filter) {
408
3
    _rep_levels.clear();
409
3
    _def_levels.clear();
410
411
    // Handle nullable columns
412
3
    MutableColumnPtr data_column;
413
3
    NullMap* map_data_column = nullptr;
414
3
    if (doris_column->is_nullable()) {
415
3
        SCOPED_RAW_TIMER(&_decode_null_map_time);
416
        // doris_column either originates from a mutable block in vparquet_group_reader
417
        // or is a newly created ColumnPtr, and therefore can be modified.
418
3
        auto* nullable_column =
419
3
                const_cast<ColumnNullable*>(assert_cast<const ColumnNullable*>(doris_column.get()));
420
3
        data_column = nullable_column->get_nested_column_ptr();
421
3
        map_data_column = &(nullable_column->get_null_map_data());
422
3
    } else {
423
0
        if (_field_schema->data_type->is_nullable()) {
424
0
            return Status::Corruption("Not nullable column has null values in parquet file");
425
0
        }
426
0
        data_column = doris_column->assume_mutable();
427
0
    }
428
429
3
    std::vector<uint16_t> null_map;
430
3
    std::unordered_set<size_t> ancestor_null_indices;
431
3
    std::vector<uint8_t> nested_filter_map_data;
432
433
3
    auto read_and_fill_data = [&](size_t before_rep_level_sz, size_t filter_map_index) {
434
3
        RETURN_IF_ERROR(_chunk_reader->fill_def(_def_levels));
435
3
        std::unique_ptr<FilterMap> nested_filter_map = std::make_unique<FilterMap>();
436
3
        if (filter_map.has_filter()) {
437
3
            RETURN_IF_ERROR(gen_filter_map(filter_map, filter_map_index, before_rep_level_sz,
438
3
                                           _rep_levels.size(), nested_filter_map_data,
439
3
                                           &nested_filter_map));
440
3
        }
441
442
3
        null_map.clear();
443
3
        ancestor_null_indices.clear();
444
3
        RETURN_IF_ERROR(gen_nested_null_map(before_rep_level_sz, _rep_levels.size(), null_map,
445
3
                                            ancestor_null_indices));
446
447
3
        ColumnSelectVector select_vector;
448
3
        {
449
3
            SCOPED_RAW_TIMER(&_decode_null_map_time);
450
3
            RETURN_IF_ERROR(select_vector.init(
451
3
                    null_map,
452
3
                    _rep_levels.size() - before_rep_level_sz - ancestor_null_indices.size(),
453
3
                    map_data_column, nested_filter_map.get(), 0, &ancestor_null_indices));
454
3
        }
455
456
3
        RETURN_IF_ERROR(
457
3
                _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter));
458
3
        if (ancestor_null_indices.size() != 0) {
459
3
            RETURN_IF_ERROR(_chunk_reader->skip_values(ancestor_null_indices.size(), false));
460
3
        }
461
3
        if (filter_map.has_filter()) {
462
3
            auto new_rep_sz = before_rep_level_sz;
463
3
            for (size_t idx = before_rep_level_sz; idx < _rep_levels.size(); idx++) {
464
3
                if (nested_filter_map_data[idx - before_rep_level_sz]) {
465
3
                    _rep_levels[new_rep_sz] = _rep_levels[idx];
466
3
                    _def_levels[new_rep_sz] = _def_levels[idx];
467
3
                    new_rep_sz++;
468
3
                }
469
3
            }
470
3
            _rep_levels.resize(new_rep_sz);
471
3
            _def_levels.resize(new_rep_sz);
472
3
        }
473
3
        return Status::OK();
474
3
    };
475
476
3
    while (_current_range_idx < _row_ranges.range_size()) {
477
3
        size_t left_row =
478
3
                std::max(_current_row_index, _row_ranges.get_range_from(_current_range_idx));
479
3
        size_t right_row = std::min(left_row + batch_size - *read_rows,
480
3
                                    (size_t)_row_ranges.get_range_to(_current_range_idx));
481
3
        _current_row_index = left_row;
482
3
        RETURN_IF_ERROR(_chunk_reader->seek_to_nested_row(left_row));
483
3
        size_t load_rows = 0;
484
3
        bool cross_page = false;
485
3
        size_t before_rep_level_sz = _rep_levels.size();
486
3
        RETURN_IF_ERROR(_chunk_reader->load_page_nested_rows(_rep_levels, right_row - left_row,
487
3
                                                             &load_rows, &cross_page));
488
3
        RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index));
489
3
        _filter_map_index += load_rows;
490
3
        while (cross_page) {
491
0
            before_rep_level_sz = _rep_levels.size();
492
0
            RETURN_IF_ERROR(_chunk_reader->load_cross_page_nested_row(_rep_levels, &cross_page));
493
0
            RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index - 1));
494
0
        }
495
3
        *read_rows += load_rows;
496
3
        _current_row_index += load_rows;
497
3
        _current_range_idx += (_current_row_index == _row_ranges.get_range_to(_current_range_idx));
498
3
        if (*read_rows == batch_size) {
499
3
            break;
500
3
        }
501
3
    }
502
3
    *eof = _current_range_idx == _row_ranges.range_size();
503
3
    return Status::OK();
504
3
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbb
_ZN5doris18ScalarColumnReaderILb0ELb0EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbb
Line
Count
Source
407
10
        size_t* read_rows, bool* eof, bool is_dict_filter) {
408
10
    _rep_levels.clear();
409
10
    _def_levels.clear();
410
411
    // Handle nullable columns
412
10
    MutableColumnPtr data_column;
413
10
    NullMap* map_data_column = nullptr;
414
10
    if (doris_column->is_nullable()) {
415
10
        SCOPED_RAW_TIMER(&_decode_null_map_time);
416
        // doris_column either originates from a mutable block in vparquet_group_reader
417
        // or is a newly created ColumnPtr, and therefore can be modified.
418
10
        auto* nullable_column =
419
10
                const_cast<ColumnNullable*>(assert_cast<const ColumnNullable*>(doris_column.get()));
420
10
        data_column = nullable_column->get_nested_column_ptr();
421
10
        map_data_column = &(nullable_column->get_null_map_data());
422
10
    } else {
423
0
        if (_field_schema->data_type->is_nullable()) {
424
0
            return Status::Corruption("Not nullable column has null values in parquet file");
425
0
        }
426
0
        data_column = doris_column->assume_mutable();
427
0
    }
428
429
10
    std::vector<uint16_t> null_map;
430
10
    std::unordered_set<size_t> ancestor_null_indices;
431
10
    std::vector<uint8_t> nested_filter_map_data;
432
433
10
    auto read_and_fill_data = [&](size_t before_rep_level_sz, size_t filter_map_index) {
434
10
        RETURN_IF_ERROR(_chunk_reader->fill_def(_def_levels));
435
10
        std::unique_ptr<FilterMap> nested_filter_map = std::make_unique<FilterMap>();
436
10
        if (filter_map.has_filter()) {
437
10
            RETURN_IF_ERROR(gen_filter_map(filter_map, filter_map_index, before_rep_level_sz,
438
10
                                           _rep_levels.size(), nested_filter_map_data,
439
10
                                           &nested_filter_map));
440
10
        }
441
442
10
        null_map.clear();
443
10
        ancestor_null_indices.clear();
444
10
        RETURN_IF_ERROR(gen_nested_null_map(before_rep_level_sz, _rep_levels.size(), null_map,
445
10
                                            ancestor_null_indices));
446
447
10
        ColumnSelectVector select_vector;
448
10
        {
449
10
            SCOPED_RAW_TIMER(&_decode_null_map_time);
450
10
            RETURN_IF_ERROR(select_vector.init(
451
10
                    null_map,
452
10
                    _rep_levels.size() - before_rep_level_sz - ancestor_null_indices.size(),
453
10
                    map_data_column, nested_filter_map.get(), 0, &ancestor_null_indices));
454
10
        }
455
456
10
        RETURN_IF_ERROR(
457
10
                _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter));
458
10
        if (ancestor_null_indices.size() != 0) {
459
10
            RETURN_IF_ERROR(_chunk_reader->skip_values(ancestor_null_indices.size(), false));
460
10
        }
461
10
        if (filter_map.has_filter()) {
462
10
            auto new_rep_sz = before_rep_level_sz;
463
10
            for (size_t idx = before_rep_level_sz; idx < _rep_levels.size(); idx++) {
464
10
                if (nested_filter_map_data[idx - before_rep_level_sz]) {
465
10
                    _rep_levels[new_rep_sz] = _rep_levels[idx];
466
10
                    _def_levels[new_rep_sz] = _def_levels[idx];
467
10
                    new_rep_sz++;
468
10
                }
469
10
            }
470
10
            _rep_levels.resize(new_rep_sz);
471
10
            _def_levels.resize(new_rep_sz);
472
10
        }
473
10
        return Status::OK();
474
10
    };
475
476
12
    while (_current_range_idx < _row_ranges.range_size()) {
477
10
        size_t left_row =
478
10
                std::max(_current_row_index, _row_ranges.get_range_from(_current_range_idx));
479
10
        size_t right_row = std::min(left_row + batch_size - *read_rows,
480
10
                                    (size_t)_row_ranges.get_range_to(_current_range_idx));
481
10
        _current_row_index = left_row;
482
10
        RETURN_IF_ERROR(_chunk_reader->seek_to_nested_row(left_row));
483
10
        size_t load_rows = 0;
484
10
        bool cross_page = false;
485
10
        size_t before_rep_level_sz = _rep_levels.size();
486
10
        RETURN_IF_ERROR(_chunk_reader->load_page_nested_rows(_rep_levels, right_row - left_row,
487
10
                                                             &load_rows, &cross_page));
488
10
        RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index));
489
10
        _filter_map_index += load_rows;
490
10
        while (cross_page) {
491
0
            before_rep_level_sz = _rep_levels.size();
492
0
            RETURN_IF_ERROR(_chunk_reader->load_cross_page_nested_row(_rep_levels, &cross_page));
493
0
            RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index - 1));
494
0
        }
495
10
        *read_rows += load_rows;
496
10
        _current_row_index += load_rows;
497
10
        _current_range_idx += (_current_row_index == _row_ranges.get_range_to(_current_range_idx));
498
10
        if (*read_rows == batch_size) {
499
8
            break;
500
8
        }
501
10
    }
502
10
    *eof = _current_range_idx == _row_ranges.range_size();
503
10
    return Status::OK();
504
10
}
505
506
template <bool IN_COLLECTION, bool OFFSET_INDEX>
507
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::read_dict_values_to_column(
508
2
        MutableColumnPtr& doris_column, bool* has_dict) {
509
2
    bool loaded;
510
2
    RETURN_IF_ERROR(_try_load_dict_page(&loaded, has_dict));
511
2
    if (loaded && *has_dict) {
512
2
        return _chunk_reader->read_dict_values_to_column(doris_column);
513
2
    }
514
0
    return Status::OK();
515
2
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE26read_dict_values_to_columnERNS_3COWINS_7IColumnEE11mutable_ptrIS3_EEPb
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb0EE26read_dict_values_to_columnERNS_3COWINS_7IColumnEE11mutable_ptrIS3_EEPb
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE26read_dict_values_to_columnERNS_3COWINS_7IColumnEE11mutable_ptrIS3_EEPb
_ZN5doris18ScalarColumnReaderILb0ELb0EE26read_dict_values_to_columnERNS_3COWINS_7IColumnEE11mutable_ptrIS3_EEPb
Line
Count
Source
508
2
        MutableColumnPtr& doris_column, bool* has_dict) {
509
2
    bool loaded;
510
2
    RETURN_IF_ERROR(_try_load_dict_page(&loaded, has_dict));
511
2
    if (loaded && *has_dict) {
512
2
        return _chunk_reader->read_dict_values_to_column(doris_column);
513
2
    }
514
0
    return Status::OK();
515
2
}
516
template <bool IN_COLLECTION, bool OFFSET_INDEX>
517
Result<MutableColumnPtr>
518
ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::convert_dict_column_to_string_column(
519
0
        const ColumnInt32* dict_column) {
520
0
    return _chunk_reader->convert_dict_column_to_string_column(dict_column);
521
0
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE36convert_dict_column_to_string_columnEPKNS_12ColumnVectorILNS_13PrimitiveTypeE5EEE
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb0EE36convert_dict_column_to_string_columnEPKNS_12ColumnVectorILNS_13PrimitiveTypeE5EEE
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE36convert_dict_column_to_string_columnEPKNS_12ColumnVectorILNS_13PrimitiveTypeE5EEE
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb0EE36convert_dict_column_to_string_columnEPKNS_12ColumnVectorILNS_13PrimitiveTypeE5EEE
522
523
template <bool IN_COLLECTION, bool OFFSET_INDEX>
524
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::_try_load_dict_page(bool* loaded,
525
2
                                                                            bool* has_dict) {
526
    // _chunk_reader init will load first page header to check whether has dict page
527
2
    *loaded = true;
528
2
    *has_dict = _chunk_reader->has_dict();
529
2
    return Status::OK();
530
2
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE19_try_load_dict_pageEPbS2_
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb0EE19_try_load_dict_pageEPbS2_
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE19_try_load_dict_pageEPbS2_
_ZN5doris18ScalarColumnReaderILb0ELb0EE19_try_load_dict_pageEPbS2_
Line
Count
Source
525
2
                                                                            bool* has_dict) {
526
    // _chunk_reader init will load first page header to check whether has dict page
527
2
    *loaded = true;
528
2
    *has_dict = _chunk_reader->has_dict();
529
2
    return Status::OK();
530
2
}
531
532
template <bool IN_COLLECTION, bool OFFSET_INDEX>
533
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::read_column_data(
534
        ColumnPtr& doris_column, const DataTypePtr& type,
535
        const std::shared_ptr<TableSchemaChangeHelper::Node>& root_node, FilterMap& filter_map,
536
        size_t batch_size, size_t* read_rows, bool* eof, bool is_dict_filter,
537
287
        int64_t real_column_size) {
538
287
    if (_converter == nullptr) {
539
114
        _converter = parquet::PhysicalToLogicalConverter::get_converter(
540
114
                _field_schema, _field_schema->data_type, type, _ctz, is_dict_filter);
541
114
        if (!_converter->support()) {
542
0
            return Status::InternalError(
543
0
                    "The column type of '{}' is not supported: {}, is_dict_filter: {}, "
544
0
                    "src_logical_type: {}, dst_logical_type: {}",
545
0
                    _field_schema->name, _converter->get_error_msg(), is_dict_filter,
546
0
                    _field_schema->data_type->get_name(), type->get_name());
547
0
        }
548
114
    }
549
    // !FIXME: We should verify whether the get_physical_column logic is correct, why do we return a doris_column?
550
287
    ColumnPtr resolved_column =
551
287
            _converter->get_physical_column(_field_schema->physical_type, _field_schema->data_type,
552
287
                                            doris_column, type, is_dict_filter);
553
287
    DataTypePtr& resolved_type = _converter->get_physical_type();
554
555
287
    _def_levels.clear();
556
287
    _rep_levels.clear();
557
287
    *read_rows = 0;
558
559
287
    if (_in_nested) {
560
13
        RETURN_IF_ERROR(_read_nested_column(resolved_column, resolved_type, filter_map, batch_size,
561
13
                                            read_rows, eof, is_dict_filter));
562
13
        return _converter->convert(resolved_column, _field_schema->data_type, type, doris_column,
563
13
                                   is_dict_filter);
564
13
    }
565
566
274
    int64_t right_row = 0;
567
274
    if constexpr (OFFSET_INDEX == false) {
568
274
        RETURN_IF_ERROR(_chunk_reader->parse_page_header());
569
274
        right_row = _chunk_reader->page_end_row();
570
274
    } else {
571
0
        right_row = _chunk_reader->page_end_row();
572
0
    }
573
574
274
    do {
575
        // generate the row ranges that should be read
576
274
        RowRanges read_ranges;
577
274
        _generate_read_ranges(RowRange {_current_row_index, right_row}, &read_ranges);
578
274
        if (read_ranges.count() == 0) {
579
            // skip the whole page
580
63
            _current_row_index = right_row;
581
211
        } else {
582
211
            bool skip_whole_batch = false;
583
            // Determining whether to skip page or batch will increase the calculation time.
584
            // When the filtering effect is greater than 60%, it is possible to skip the page or batch.
585
211
            if (filter_map.has_filter() && filter_map.filter_ratio() > 0.6) {
586
                // lazy read
587
0
                size_t remaining_num_values = read_ranges.count();
588
0
                if (batch_size >= remaining_num_values &&
589
0
                    filter_map.can_filter_all(remaining_num_values, _filter_map_index)) {
590
                    // We can skip the whole page if the remaining values are filtered by predicate columns
591
0
                    _filter_map_index += remaining_num_values;
592
0
                    _current_row_index = right_row;
593
0
                    *read_rows = remaining_num_values;
594
0
                    break;
595
0
                }
596
0
                skip_whole_batch = batch_size <= remaining_num_values &&
597
0
                                   filter_map.can_filter_all(batch_size, _filter_map_index);
598
0
                if (skip_whole_batch) {
599
0
                    _filter_map_index += batch_size;
600
0
                }
601
0
            }
602
            // load page data to decode or skip values
603
211
            RETURN_IF_ERROR(_chunk_reader->parse_page_header());
604
211
            RETURN_IF_ERROR(_chunk_reader->load_page_data_idempotent());
605
211
            size_t has_read = 0;
606
344
            for (size_t idx = 0; idx < read_ranges.range_size(); idx++) {
607
244
                auto range = read_ranges.get_range(idx);
608
                // generate the skipped values
609
244
                size_t skip_values = range.from() - _current_row_index;
610
244
                RETURN_IF_ERROR(_skip_values(skip_values));
611
244
                _current_row_index += skip_values;
612
                // generate the read values
613
244
                size_t read_values =
614
244
                        std::min((size_t)(range.to() - range.from()), batch_size - has_read);
615
244
                if (skip_whole_batch) {
616
0
                    RETURN_IF_ERROR(_skip_values(read_values));
617
244
                } else {
618
244
                    RETURN_IF_ERROR(_read_values(read_values, resolved_column, resolved_type,
619
244
                                                 filter_map, is_dict_filter));
620
244
                }
621
244
                has_read += read_values;
622
244
                *read_rows += read_values;
623
244
                _current_row_index += read_values;
624
244
                if (has_read == batch_size) {
625
111
                    break;
626
111
                }
627
244
            }
628
211
        }
629
274
    } while (false);
630
631
274
    if (right_row == _current_row_index) {
632
101
        if (!_chunk_reader->has_next_page()) {
633
101
            *eof = true;
634
101
        } else {
635
0
            RETURN_IF_ERROR(_chunk_reader->next_page());
636
0
        }
637
101
    }
638
639
274
    {
640
274
        SCOPED_RAW_TIMER(&_convert_time);
641
274
        RETURN_IF_ERROR(_converter->convert(resolved_column, _field_schema->data_type, type,
642
274
                                            doris_column, is_dict_filter));
643
274
    }
644
274
    return Status::OK();
645
274
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE16read_column_dataERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERKSt10shared_ptrIKNS_9IDataTypeEERKS8_INS_23TableSchemaChangeHelper4NodeEERNS_9FilterMapEmPmPbbl
_ZN5doris18ScalarColumnReaderILb1ELb0EE16read_column_dataERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERKSt10shared_ptrIKNS_9IDataTypeEERKS8_INS_23TableSchemaChangeHelper4NodeEERNS_9FilterMapEmPmPbbl
Line
Count
Source
537
3
        int64_t real_column_size) {
538
3
    if (_converter == nullptr) {
539
3
        _converter = parquet::PhysicalToLogicalConverter::get_converter(
540
3
                _field_schema, _field_schema->data_type, type, _ctz, is_dict_filter);
541
3
        if (!_converter->support()) {
542
0
            return Status::InternalError(
543
0
                    "The column type of '{}' is not supported: {}, is_dict_filter: {}, "
544
0
                    "src_logical_type: {}, dst_logical_type: {}",
545
0
                    _field_schema->name, _converter->get_error_msg(), is_dict_filter,
546
0
                    _field_schema->data_type->get_name(), type->get_name());
547
0
        }
548
3
    }
549
    // !FIXME: We should verify whether the get_physical_column logic is correct, why do we return a doris_column?
550
3
    ColumnPtr resolved_column =
551
3
            _converter->get_physical_column(_field_schema->physical_type, _field_schema->data_type,
552
3
                                            doris_column, type, is_dict_filter);
553
3
    DataTypePtr& resolved_type = _converter->get_physical_type();
554
555
3
    _def_levels.clear();
556
3
    _rep_levels.clear();
557
3
    *read_rows = 0;
558
559
3
    if (_in_nested) {
560
3
        RETURN_IF_ERROR(_read_nested_column(resolved_column, resolved_type, filter_map, batch_size,
561
3
                                            read_rows, eof, is_dict_filter));
562
3
        return _converter->convert(resolved_column, _field_schema->data_type, type, doris_column,
563
3
                                   is_dict_filter);
564
3
    }
565
566
0
    int64_t right_row = 0;
567
0
    if constexpr (OFFSET_INDEX == false) {
568
0
        RETURN_IF_ERROR(_chunk_reader->parse_page_header());
569
0
        right_row = _chunk_reader->page_end_row();
570
    } else {
571
        right_row = _chunk_reader->page_end_row();
572
    }
573
574
0
    do {
575
        // generate the row ranges that should be read
576
0
        RowRanges read_ranges;
577
0
        _generate_read_ranges(RowRange {_current_row_index, right_row}, &read_ranges);
578
0
        if (read_ranges.count() == 0) {
579
            // skip the whole page
580
0
            _current_row_index = right_row;
581
0
        } else {
582
0
            bool skip_whole_batch = false;
583
            // Determining whether to skip page or batch will increase the calculation time.
584
            // When the filtering effect is greater than 60%, it is possible to skip the page or batch.
585
0
            if (filter_map.has_filter() && filter_map.filter_ratio() > 0.6) {
586
                // lazy read
587
0
                size_t remaining_num_values = read_ranges.count();
588
0
                if (batch_size >= remaining_num_values &&
589
0
                    filter_map.can_filter_all(remaining_num_values, _filter_map_index)) {
590
                    // We can skip the whole page if the remaining values are filtered by predicate columns
591
0
                    _filter_map_index += remaining_num_values;
592
0
                    _current_row_index = right_row;
593
0
                    *read_rows = remaining_num_values;
594
0
                    break;
595
0
                }
596
0
                skip_whole_batch = batch_size <= remaining_num_values &&
597
0
                                   filter_map.can_filter_all(batch_size, _filter_map_index);
598
0
                if (skip_whole_batch) {
599
0
                    _filter_map_index += batch_size;
600
0
                }
601
0
            }
602
            // load page data to decode or skip values
603
0
            RETURN_IF_ERROR(_chunk_reader->parse_page_header());
604
0
            RETURN_IF_ERROR(_chunk_reader->load_page_data_idempotent());
605
0
            size_t has_read = 0;
606
0
            for (size_t idx = 0; idx < read_ranges.range_size(); idx++) {
607
0
                auto range = read_ranges.get_range(idx);
608
                // generate the skipped values
609
0
                size_t skip_values = range.from() - _current_row_index;
610
0
                RETURN_IF_ERROR(_skip_values(skip_values));
611
0
                _current_row_index += skip_values;
612
                // generate the read values
613
0
                size_t read_values =
614
0
                        std::min((size_t)(range.to() - range.from()), batch_size - has_read);
615
0
                if (skip_whole_batch) {
616
0
                    RETURN_IF_ERROR(_skip_values(read_values));
617
0
                } else {
618
0
                    RETURN_IF_ERROR(_read_values(read_values, resolved_column, resolved_type,
619
0
                                                 filter_map, is_dict_filter));
620
0
                }
621
0
                has_read += read_values;
622
0
                *read_rows += read_values;
623
0
                _current_row_index += read_values;
624
0
                if (has_read == batch_size) {
625
0
                    break;
626
0
                }
627
0
            }
628
0
        }
629
0
    } while (false);
630
631
0
    if (right_row == _current_row_index) {
632
0
        if (!_chunk_reader->has_next_page()) {
633
0
            *eof = true;
634
0
        } else {
635
0
            RETURN_IF_ERROR(_chunk_reader->next_page());
636
0
        }
637
0
    }
638
639
0
    {
640
0
        SCOPED_RAW_TIMER(&_convert_time);
641
0
        RETURN_IF_ERROR(_converter->convert(resolved_column, _field_schema->data_type, type,
642
0
                                            doris_column, is_dict_filter));
643
0
    }
644
0
    return Status::OK();
645
0
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE16read_column_dataERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERKSt10shared_ptrIKNS_9IDataTypeEERKS8_INS_23TableSchemaChangeHelper4NodeEERNS_9FilterMapEmPmPbbl
_ZN5doris18ScalarColumnReaderILb0ELb0EE16read_column_dataERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERKSt10shared_ptrIKNS_9IDataTypeEERKS8_INS_23TableSchemaChangeHelper4NodeEERNS_9FilterMapEmPmPbbl
Line
Count
Source
537
284
        int64_t real_column_size) {
538
284
    if (_converter == nullptr) {
539
111
        _converter = parquet::PhysicalToLogicalConverter::get_converter(
540
111
                _field_schema, _field_schema->data_type, type, _ctz, is_dict_filter);
541
111
        if (!_converter->support()) {
542
0
            return Status::InternalError(
543
0
                    "The column type of '{}' is not supported: {}, is_dict_filter: {}, "
544
0
                    "src_logical_type: {}, dst_logical_type: {}",
545
0
                    _field_schema->name, _converter->get_error_msg(), is_dict_filter,
546
0
                    _field_schema->data_type->get_name(), type->get_name());
547
0
        }
548
111
    }
549
    // !FIXME: We should verify whether the get_physical_column logic is correct, why do we return a doris_column?
550
284
    ColumnPtr resolved_column =
551
284
            _converter->get_physical_column(_field_schema->physical_type, _field_schema->data_type,
552
284
                                            doris_column, type, is_dict_filter);
553
284
    DataTypePtr& resolved_type = _converter->get_physical_type();
554
555
284
    _def_levels.clear();
556
284
    _rep_levels.clear();
557
284
    *read_rows = 0;
558
559
284
    if (_in_nested) {
560
10
        RETURN_IF_ERROR(_read_nested_column(resolved_column, resolved_type, filter_map, batch_size,
561
10
                                            read_rows, eof, is_dict_filter));
562
10
        return _converter->convert(resolved_column, _field_schema->data_type, type, doris_column,
563
10
                                   is_dict_filter);
564
10
    }
565
566
274
    int64_t right_row = 0;
567
274
    if constexpr (OFFSET_INDEX == false) {
568
274
        RETURN_IF_ERROR(_chunk_reader->parse_page_header());
569
274
        right_row = _chunk_reader->page_end_row();
570
    } else {
571
        right_row = _chunk_reader->page_end_row();
572
    }
573
574
274
    do {
575
        // generate the row ranges that should be read
576
274
        RowRanges read_ranges;
577
274
        _generate_read_ranges(RowRange {_current_row_index, right_row}, &read_ranges);
578
274
        if (read_ranges.count() == 0) {
579
            // skip the whole page
580
63
            _current_row_index = right_row;
581
211
        } else {
582
211
            bool skip_whole_batch = false;
583
            // Determining whether to skip page or batch will increase the calculation time.
584
            // When the filtering effect is greater than 60%, it is possible to skip the page or batch.
585
211
            if (filter_map.has_filter() && filter_map.filter_ratio() > 0.6) {
586
                // lazy read
587
0
                size_t remaining_num_values = read_ranges.count();
588
0
                if (batch_size >= remaining_num_values &&
589
0
                    filter_map.can_filter_all(remaining_num_values, _filter_map_index)) {
590
                    // We can skip the whole page if the remaining values are filtered by predicate columns
591
0
                    _filter_map_index += remaining_num_values;
592
0
                    _current_row_index = right_row;
593
0
                    *read_rows = remaining_num_values;
594
0
                    break;
595
0
                }
596
0
                skip_whole_batch = batch_size <= remaining_num_values &&
597
0
                                   filter_map.can_filter_all(batch_size, _filter_map_index);
598
0
                if (skip_whole_batch) {
599
0
                    _filter_map_index += batch_size;
600
0
                }
601
0
            }
602
            // load page data to decode or skip values
603
211
            RETURN_IF_ERROR(_chunk_reader->parse_page_header());
604
211
            RETURN_IF_ERROR(_chunk_reader->load_page_data_idempotent());
605
211
            size_t has_read = 0;
606
344
            for (size_t idx = 0; idx < read_ranges.range_size(); idx++) {
607
244
                auto range = read_ranges.get_range(idx);
608
                // generate the skipped values
609
244
                size_t skip_values = range.from() - _current_row_index;
610
244
                RETURN_IF_ERROR(_skip_values(skip_values));
611
244
                _current_row_index += skip_values;
612
                // generate the read values
613
244
                size_t read_values =
614
244
                        std::min((size_t)(range.to() - range.from()), batch_size - has_read);
615
244
                if (skip_whole_batch) {
616
0
                    RETURN_IF_ERROR(_skip_values(read_values));
617
244
                } else {
618
244
                    RETURN_IF_ERROR(_read_values(read_values, resolved_column, resolved_type,
619
244
                                                 filter_map, is_dict_filter));
620
244
                }
621
244
                has_read += read_values;
622
244
                *read_rows += read_values;
623
244
                _current_row_index += read_values;
624
244
                if (has_read == batch_size) {
625
111
                    break;
626
111
                }
627
244
            }
628
211
        }
629
274
    } while (false);
630
631
274
    if (right_row == _current_row_index) {
632
101
        if (!_chunk_reader->has_next_page()) {
633
101
            *eof = true;
634
101
        } else {
635
0
            RETURN_IF_ERROR(_chunk_reader->next_page());
636
0
        }
637
101
    }
638
639
274
    {
640
274
        SCOPED_RAW_TIMER(&_convert_time);
641
274
        RETURN_IF_ERROR(_converter->convert(resolved_column, _field_schema->data_type, type,
642
274
                                            doris_column, is_dict_filter));
643
274
    }
644
274
    return Status::OK();
645
274
}
646
647
Status ArrayColumnReader::init(std::unique_ptr<ParquetColumnReader> element_reader,
648
2
                               FieldSchema* field) {
649
2
    _field_schema = field;
650
2
    _element_reader = std::move(element_reader);
651
2
    return Status::OK();
652
2
}
653
654
Status ArrayColumnReader::read_column_data(
655
        ColumnPtr& doris_column, const DataTypePtr& type,
656
        const std::shared_ptr<TableSchemaChangeHelper::Node>& root_node, FilterMap& filter_map,
657
        size_t batch_size, size_t* read_rows, bool* eof, bool is_dict_filter,
658
2
        int64_t real_column_size) {
659
2
    MutableColumnPtr data_column;
660
2
    NullMap* null_map_ptr = nullptr;
661
2
    if (doris_column->is_nullable()) {
662
2
        auto mutable_column = doris_column->assume_mutable();
663
2
        auto* nullable_column = assert_cast<ColumnNullable*>(mutable_column.get());
664
2
        null_map_ptr = &nullable_column->get_null_map_data();
665
2
        data_column = nullable_column->get_nested_column_ptr();
666
2
    } else {
667
0
        if (_field_schema->data_type->is_nullable()) {
668
0
            return Status::Corruption("Not nullable column has null values in parquet file");
669
0
        }
670
0
        data_column = doris_column->assume_mutable();
671
0
    }
672
2
    if (type->get_primitive_type() != PrimitiveType::TYPE_ARRAY) {
673
0
        return Status::Corruption(
674
0
                "Wrong data type for column '{}', expected Array type, actual type: {}.",
675
0
                _field_schema->name, type->get_name());
676
0
    }
677
678
2
    ColumnPtr& element_column = assert_cast<ColumnArray&>(*data_column).get_data_ptr();
679
2
    const DataTypePtr& element_type =
680
2
            (assert_cast<const DataTypeArray*>(remove_nullable(type).get()))->get_nested_type();
681
    // read nested column
682
2
    RETURN_IF_ERROR(_element_reader->read_column_data(element_column, element_type,
683
2
                                                      root_node->get_element_node(), filter_map,
684
2
                                                      batch_size, read_rows, eof, is_dict_filter));
685
2
    if (*read_rows == 0) {
686
0
        return Status::OK();
687
0
    }
688
689
2
    ColumnArray::Offsets64& offsets_data = assert_cast<ColumnArray&>(*data_column).get_offsets();
690
    // fill offset and null map
691
2
    fill_array_offset(_field_schema, offsets_data, null_map_ptr, _element_reader->get_rep_level(),
692
2
                      _element_reader->get_def_level());
693
2
    DCHECK_EQ(element_column->size(), offsets_data.back());
694
2
#ifndef NDEBUG
695
2
    doris_column->sanity_check();
696
2
#endif
697
2
    return Status::OK();
698
2
}
699
700
Status MapColumnReader::init(std::unique_ptr<ParquetColumnReader> key_reader,
701
                             std::unique_ptr<ParquetColumnReader> value_reader,
702
0
                             FieldSchema* field) {
703
0
    _field_schema = field;
704
0
    _key_reader = std::move(key_reader);
705
0
    _value_reader = std::move(value_reader);
706
0
    return Status::OK();
707
0
}
708
709
Status MapColumnReader::read_column_data(
710
        ColumnPtr& doris_column, const DataTypePtr& type,
711
        const std::shared_ptr<TableSchemaChangeHelper::Node>& root_node, FilterMap& filter_map,
712
        size_t batch_size, size_t* read_rows, bool* eof, bool is_dict_filter,
713
0
        int64_t real_column_size) {
714
0
    MutableColumnPtr data_column;
715
0
    NullMap* null_map_ptr = nullptr;
716
0
    if (doris_column->is_nullable()) {
717
0
        auto mutable_column = doris_column->assume_mutable();
718
0
        auto* nullable_column = assert_cast<ColumnNullable*>(mutable_column.get());
719
0
        null_map_ptr = &nullable_column->get_null_map_data();
720
0
        data_column = nullable_column->get_nested_column_ptr();
721
0
    } else {
722
0
        if (_field_schema->data_type->is_nullable()) {
723
0
            return Status::Corruption("Not nullable column has null values in parquet file");
724
0
        }
725
0
        data_column = doris_column->assume_mutable();
726
0
    }
727
0
    if (remove_nullable(type)->get_primitive_type() != PrimitiveType::TYPE_MAP) {
728
0
        return Status::Corruption(
729
0
                "Wrong data type for column '{}', expected Map type, actual type id {}.",
730
0
                _field_schema->name, type->get_name());
731
0
    }
732
733
0
    auto& map = assert_cast<ColumnMap&>(*data_column);
734
0
    const DataTypePtr& key_type =
735
0
            assert_cast<const DataTypeMap*>(remove_nullable(type).get())->get_key_type();
736
0
    const DataTypePtr& value_type =
737
0
            assert_cast<const DataTypeMap*>(remove_nullable(type).get())->get_value_type();
738
0
    ColumnPtr& key_column = map.get_keys_ptr();
739
0
    ColumnPtr& value_column = map.get_values_ptr();
740
741
0
    size_t key_rows = 0;
742
0
    size_t value_rows = 0;
743
0
    bool key_eof = false;
744
0
    bool value_eof = false;
745
0
    int64_t orig_col_column_size = key_column->size();
746
747
0
    RETURN_IF_ERROR(_key_reader->read_column_data(key_column, key_type, root_node->get_key_node(),
748
0
                                                  filter_map, batch_size, &key_rows, &key_eof,
749
0
                                                  is_dict_filter));
750
751
0
    while (value_rows < key_rows && !value_eof) {
752
0
        size_t loop_rows = 0;
753
0
        RETURN_IF_ERROR(_value_reader->read_column_data(
754
0
                value_column, value_type, root_node->get_value_node(), filter_map,
755
0
                key_rows - value_rows, &loop_rows, &value_eof, is_dict_filter,
756
0
                key_column->size() - orig_col_column_size));
757
0
        value_rows += loop_rows;
758
0
    }
759
0
    DCHECK_EQ(key_rows, value_rows);
760
0
    *read_rows = key_rows;
761
0
    *eof = key_eof;
762
763
0
    if (*read_rows == 0) {
764
0
        return Status::OK();
765
0
    }
766
767
0
    DCHECK_EQ(key_column->size(), value_column->size());
768
    // fill offset and null map
769
0
    fill_array_offset(_field_schema, map.get_offsets(), null_map_ptr, _key_reader->get_rep_level(),
770
0
                      _key_reader->get_def_level());
771
0
    DCHECK_EQ(key_column->size(), map.get_offsets().back());
772
0
#ifndef NDEBUG
773
0
    doris_column->sanity_check();
774
0
#endif
775
0
    return Status::OK();
776
0
}
777
778
Status StructColumnReader::init(
779
        std::unordered_map<std::string, std::unique_ptr<ParquetColumnReader>>&& child_readers,
780
11
        FieldSchema* field) {
781
11
    _field_schema = field;
782
11
    _child_readers = std::move(child_readers);
783
11
    return Status::OK();
784
11
}
785
Status StructColumnReader::read_column_data(
786
        ColumnPtr& doris_column, const DataTypePtr& type,
787
        const std::shared_ptr<TableSchemaChangeHelper::Node>& root_node, FilterMap& filter_map,
788
        size_t batch_size, size_t* read_rows, bool* eof, bool is_dict_filter,
789
11
        int64_t real_column_size) {
790
11
    MutableColumnPtr data_column;
791
11
    NullMap* null_map_ptr = nullptr;
792
11
    if (doris_column->is_nullable()) {
793
11
        auto mutable_column = doris_column->assume_mutable();
794
11
        auto* nullable_column = assert_cast<ColumnNullable*>(mutable_column.get());
795
11
        null_map_ptr = &nullable_column->get_null_map_data();
796
11
        data_column = nullable_column->get_nested_column_ptr();
797
11
    } else {
798
0
        if (_field_schema->data_type->is_nullable()) {
799
0
            return Status::Corruption("Not nullable column has null values in parquet file");
800
0
        }
801
0
        data_column = doris_column->assume_mutable();
802
0
    }
803
11
    if (type->get_primitive_type() != PrimitiveType::TYPE_STRUCT) {
804
0
        return Status::Corruption(
805
0
                "Wrong data type for column '{}', expected Struct type, actual type id {}.",
806
0
                _field_schema->name, type->get_name());
807
0
    }
808
809
11
    auto& doris_struct = assert_cast<ColumnStruct&>(*data_column);
810
11
    const auto* doris_struct_type = assert_cast<const DataTypeStruct*>(remove_nullable(type).get());
811
812
11
    int64_t not_missing_column_id = -1;
813
11
    size_t not_missing_orig_column_size = 0;
814
11
    std::vector<size_t> missing_column_idxs {};
815
11
    std::vector<size_t> skip_reading_column_idxs {};
816
817
11
    _read_column_names.clear();
818
819
37
    for (size_t i = 0; i < doris_struct.tuple_size(); ++i) {
820
26
        ColumnPtr& doris_field = doris_struct.get_column_ptr(i);
821
26
        auto& doris_type = doris_struct_type->get_element(i);
822
26
        auto& doris_name = doris_struct_type->get_element_name(i);
823
26
        if (!root_node->children_column_exists(doris_name)) {
824
0
            missing_column_idxs.push_back(i);
825
0
            VLOG_DEBUG << "[ParquetReader] Missing column in schema: column_idx[" << i
826
0
                       << "], doris_name: " << doris_name << " (column not exists in root node)";
827
0
            continue;
828
0
        }
829
26
        auto file_name = root_node->children_file_column_name(doris_name);
830
831
        // Check if this is a SkipReadingReader - we should skip it when choosing reference column
832
        // because SkipReadingReader doesn't know the actual data size in nested context
833
26
        bool is_skip_reader =
834
26
                dynamic_cast<SkipReadingReader*>(_child_readers[file_name].get()) != nullptr;
835
836
26
        if (is_skip_reader) {
837
            // Store SkipReadingReader columns to fill them later based on reference column size
838
4
            skip_reading_column_idxs.push_back(i);
839
4
            continue;
840
4
        }
841
842
        // Only add non-SkipReadingReader columns to _read_column_names
843
        // This ensures get_rep_level() and get_def_level() return valid levels
844
22
        _read_column_names.emplace_back(file_name);
845
846
22
        size_t field_rows = 0;
847
22
        bool field_eof = false;
848
22
        if (not_missing_column_id == -1) {
849
11
            not_missing_column_id = i;
850
11
            not_missing_orig_column_size = doris_field->size();
851
11
            RETURN_IF_ERROR(_child_readers[file_name]->read_column_data(
852
11
                    doris_field, doris_type, root_node->get_children_node(doris_name), filter_map,
853
11
                    batch_size, &field_rows, &field_eof, is_dict_filter));
854
11
            *read_rows = field_rows;
855
11
            *eof = field_eof;
856
            /*
857
             * Considering the issue in the `_read_nested_column` function where data may span across pages, leading
858
             * to missing definition and repetition levels, when filling the null_map of the struct later, it is
859
             * crucial to use the definition and repetition levels from the first read column
860
             * (since `_read_nested_column` is not called repeatedly).
861
             *
862
             *  It is worth mentioning that, theoretically, any sub-column can be chosen to fill the null_map,
863
             *  and selecting the shortest one will offer better performance
864
             */
865
11
        } else {
866
22
            while (field_rows < *read_rows && !field_eof) {
867
11
                size_t loop_rows = 0;
868
11
                RETURN_IF_ERROR(_child_readers[file_name]->read_column_data(
869
11
                        doris_field, doris_type, root_node->get_children_node(doris_name),
870
11
                        filter_map, *read_rows - field_rows, &loop_rows, &field_eof,
871
11
                        is_dict_filter));
872
11
                field_rows += loop_rows;
873
11
            }
874
11
            DCHECK_EQ(*read_rows, field_rows);
875
            //            DCHECK_EQ(*eof, field_eof);
876
11
        }
877
22
    }
878
879
11
    int64_t missing_column_sz = -1;
880
881
11
    if (not_missing_column_id == -1) {
882
        // All queried columns are missing in the file (e.g., all added after schema change)
883
        // We need to pick a column from _field_schema children that exists in the file for RL/DL reference
884
0
        std::string reference_file_column_name;
885
0
        std::unique_ptr<ParquetColumnReader>* reference_reader = nullptr;
886
887
0
        for (const auto& child : _field_schema->children) {
888
0
            auto it = _child_readers.find(child.name);
889
0
            if (it != _child_readers.end()) {
890
                // Skip SkipReadingReader as they don't have valid RL/DL
891
0
                bool is_skip_reader = dynamic_cast<SkipReadingReader*>(it->second.get()) != nullptr;
892
0
                if (!is_skip_reader) {
893
0
                    reference_file_column_name = child.name;
894
0
                    reference_reader = &(it->second);
895
0
                    break;
896
0
                }
897
0
            }
898
0
        }
899
900
0
        if (reference_reader != nullptr) {
901
            // Read the reference column to get correct RL/DL information
902
            // TODO: Optimize by only reading RL/DL without actual data decoding
903
904
            // We need to find the FieldSchema for the reference column from _field_schema children
905
0
            FieldSchema* ref_field_schema = nullptr;
906
0
            for (auto& child : _field_schema->children) {
907
0
                if (child.name == reference_file_column_name) {
908
0
                    ref_field_schema = &child;
909
0
                    break;
910
0
                }
911
0
            }
912
913
0
            if (ref_field_schema == nullptr) {
914
0
                return Status::InternalError(
915
0
                        "Cannot find field schema for reference column '{}' in struct '{}'",
916
0
                        reference_file_column_name, _field_schema->name);
917
0
            }
918
919
            // Create a temporary column to hold the data (we'll use its size for missing_column_sz)
920
0
            ColumnPtr temp_column = ref_field_schema->data_type->create_column();
921
0
            auto temp_type = ref_field_schema->data_type;
922
923
0
            size_t field_rows = 0;
924
0
            bool field_eof = false;
925
926
            // Use ConstNode for the reference column instead of looking up from root_node.
927
            // The reference column is only used to get RL/DL information for determining the number
928
            // of elements in the struct. It may be a column that has been dropped from the table
929
            // schema (e.g., 'removed' field), but still exists in older parquet files.
930
            // Since we don't need schema mapping for this column (we just need its RL/DL levels),
931
            // using ConstNode is safe and avoids the issue where the reference column doesn't exist
932
            // in root_node (because it was dropped from table schema).
933
0
            auto ref_child_node = TableSchemaChangeHelper::ConstNode::get_instance();
934
0
            not_missing_orig_column_size = temp_column->size();
935
936
0
            RETURN_IF_ERROR((*reference_reader)
937
0
                                    ->read_column_data(temp_column, temp_type, ref_child_node,
938
0
                                                       filter_map, batch_size, &field_rows,
939
0
                                                       &field_eof, is_dict_filter));
940
941
0
            *read_rows = field_rows;
942
0
            *eof = field_eof;
943
944
            // Store this reference column name for get_rep_level/get_def_level to use
945
0
            _read_column_names.emplace_back(reference_file_column_name);
946
947
0
            missing_column_sz = temp_column->size() - not_missing_orig_column_size;
948
0
        } else {
949
0
            return Status::Corruption(
950
0
                    "Cannot read struct '{}': all queried columns are missing and no reference "
951
0
                    "column found in file",
952
0
                    _field_schema->name);
953
0
        }
954
0
    }
955
956
    //  This missing_column_sz is not *read_rows. Because read_rows returns the number of rows.
957
    //  For example: suppose we have a column array<struct<a:int,b:string>>,
958
    //  where b is a newly added column, that is, a missing column.
959
    //  There are two rows of data in this column,
960
    //      [{1,null},{2,null},{3,null}]
961
    //      [{4,null},{5,null}]
962
    //  When you first read subcolumn a, you read 5 data items and the value of *read_rows is 2.
963
    //  You should insert 5 records into subcolumn b instead of 2.
964
11
    if (missing_column_sz == -1) {
965
11
        missing_column_sz = doris_struct.get_column(not_missing_column_id).size() -
966
11
                            not_missing_orig_column_size;
967
11
    }
968
969
    // Fill SkipReadingReader columns with the correct amount of data based on reference column
970
    // Let SkipReadingReader handle the data filling through its read_column_data method
971
11
    for (auto idx : skip_reading_column_idxs) {
972
4
        auto& doris_field = doris_struct.get_column_ptr(idx);
973
4
        auto& doris_type = const_cast<DataTypePtr&>(doris_struct_type->get_element(idx));
974
4
        auto& doris_name = const_cast<String&>(doris_struct_type->get_element_name(idx));
975
4
        auto file_name = root_node->children_file_column_name(doris_name);
976
977
4
        size_t field_rows = 0;
978
4
        bool field_eof = false;
979
4
        RETURN_IF_ERROR(_child_readers[file_name]->read_column_data(
980
4
                doris_field, doris_type, root_node->get_children_node(doris_name), filter_map,
981
4
                missing_column_sz, &field_rows, &field_eof, is_dict_filter, missing_column_sz));
982
4
    }
983
984
    // Fill truly missing columns (not in root_node) with null or default value
985
11
    for (auto idx : missing_column_idxs) {
986
0
        auto& doris_field = doris_struct.get_column_ptr(idx);
987
0
        auto& doris_type = doris_struct_type->get_element(idx);
988
0
        DCHECK(doris_type->is_nullable());
989
0
        auto mutable_column = doris_field->assume_mutable();
990
0
        auto* nullable_column = static_cast<ColumnNullable*>(mutable_column.get());
991
0
        nullable_column->insert_many_defaults(missing_column_sz);
992
0
    }
993
994
11
    if (null_map_ptr != nullptr) {
995
11
        fill_struct_null_map(_field_schema, *null_map_ptr, this->get_rep_level(),
996
11
                             this->get_def_level());
997
11
    }
998
11
#ifndef NDEBUG
999
11
    doris_column->sanity_check();
1000
11
#endif
1001
11
    return Status::OK();
1002
11
}
1003
1004
template class ScalarColumnReader<true, true>;
1005
template class ScalarColumnReader<true, false>;
1006
template class ScalarColumnReader<false, true>;
1007
template class ScalarColumnReader<false, false>;
1008
1009
}; // namespace doris