Coverage Report

Created: 2026-04-16 17:24

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/parquet/vparquet_column_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/parquet/vparquet_column_reader.h"
19
20
#include <gen_cpp/parquet_types.h>
21
#include <limits.h>
22
#include <sys/types.h>
23
24
#include <algorithm>
25
#include <utility>
26
27
#include "common/status.h"
28
#include "core/column/column.h"
29
#include "core/column/column_array.h"
30
#include "core/column/column_map.h"
31
#include "core/column/column_nullable.h"
32
#include "core/column/column_struct.h"
33
#include "core/data_type/data_type_array.h"
34
#include "core/data_type/data_type_map.h"
35
#include "core/data_type/data_type_nullable.h"
36
#include "core/data_type/data_type_struct.h"
37
#include "core/data_type/define_primitive_type.h"
38
#include "format/parquet/level_decoder.h"
39
#include "format/parquet/schema_desc.h"
40
#include "format/parquet/vparquet_column_chunk_reader.h"
41
#include "io/fs/tracing_file_reader.h"
42
#include "runtime/runtime_profile.h"
43
44
namespace doris {
45
static void fill_struct_null_map(FieldSchema* field, NullMap& null_map,
46
                                 const std::vector<level_t>& rep_levels,
47
10
                                 const std::vector<level_t>& def_levels) {
48
10
    size_t num_levels = def_levels.size();
49
10
    DCHECK_EQ(num_levels, rep_levels.size());
50
10
    size_t origin_size = null_map.size();
51
10
    null_map.resize(origin_size + num_levels);
52
10
    size_t pos = origin_size;
53
24
    for (size_t i = 0; i < num_levels; ++i) {
54
        // skip the levels affect its ancestor or its descendants
55
14
        if (def_levels[i] < field->repeated_parent_def_level ||
56
14
            rep_levels[i] > field->repetition_level) {
57
0
            continue;
58
0
        }
59
14
        if (def_levels[i] >= field->definition_level) {
60
14
            null_map[pos++] = 0;
61
14
        } else {
62
0
            null_map[pos++] = 1;
63
0
        }
64
14
    }
65
10
    null_map.resize(pos);
66
10
}
67
68
static void fill_array_offset(FieldSchema* field, ColumnArray::Offsets64& offsets_data,
69
                              NullMap* null_map_ptr, const std::vector<level_t>& rep_levels,
70
2
                              const std::vector<level_t>& def_levels) {
71
2
    size_t num_levels = rep_levels.size();
72
2
    DCHECK_EQ(num_levels, def_levels.size());
73
2
    size_t origin_size = offsets_data.size();
74
2
    offsets_data.resize(origin_size + num_levels);
75
2
    if (null_map_ptr != nullptr) {
76
2
        null_map_ptr->resize(origin_size + num_levels);
77
2
    }
78
2
    size_t offset_pos = origin_size - 1;
79
8
    for (size_t i = 0; i < num_levels; ++i) {
80
        // skip the levels affect its ancestor or its descendants
81
6
        if (def_levels[i] < field->repeated_parent_def_level ||
82
6
            rep_levels[i] > field->repetition_level) {
83
0
            continue;
84
0
        }
85
6
        if (rep_levels[i] == field->repetition_level) {
86
4
            offsets_data[offset_pos]++;
87
4
            continue;
88
4
        }
89
2
        offset_pos++;
90
2
        offsets_data[offset_pos] = offsets_data[offset_pos - 1];
91
2
        if (def_levels[i] >= field->definition_level) {
92
2
            offsets_data[offset_pos]++;
93
2
        }
94
2
        if (def_levels[i] >= field->definition_level - 1) {
95
2
            (*null_map_ptr)[offset_pos] = 0;
96
2
        } else {
97
0
            (*null_map_ptr)[offset_pos] = 1;
98
0
        }
99
2
    }
100
2
    offsets_data.resize(offset_pos + 1);
101
2
    if (null_map_ptr != nullptr) {
102
2
        null_map_ptr->resize(offset_pos + 1);
103
2
    }
104
2
}
105
106
Status ParquetColumnReader::create(io::FileReaderSPtr file, FieldSchema* field,
107
                                   const tparquet::RowGroup& row_group, const RowRanges& row_ranges,
108
                                   const cctz::time_zone* ctz, io::IOContext* io_ctx,
109
                                   std::unique_ptr<ParquetColumnReader>& reader,
110
                                   size_t max_buf_size,
111
                                   std::unordered_map<int, tparquet::OffsetIndex>& col_offsets,
112
                                   RuntimeState* state, bool in_collection,
113
                                   const std::set<uint64_t>& column_ids,
114
126
                                   const std::set<uint64_t>& filter_column_ids) {
115
126
    size_t total_rows = row_group.num_rows;
116
126
    if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
117
2
        std::unique_ptr<ParquetColumnReader> element_reader;
118
2
        RETURN_IF_ERROR(create(file, &field->children[0], row_group, row_ranges, ctz, io_ctx,
119
2
                               element_reader, max_buf_size, col_offsets, state, true, column_ids,
120
2
                               filter_column_ids));
121
2
        auto array_reader = ArrayColumnReader::create_unique(row_ranges, total_rows, ctz, io_ctx);
122
2
        element_reader->set_column_in_nested();
123
2
        RETURN_IF_ERROR(array_reader->init(std::move(element_reader), field));
124
2
        array_reader->_filter_column_ids = filter_column_ids;
125
2
        reader.reset(array_reader.release());
126
124
    } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
127
0
        std::unique_ptr<ParquetColumnReader> key_reader;
128
0
        std::unique_ptr<ParquetColumnReader> value_reader;
129
130
0
        if (column_ids.empty() ||
131
0
            column_ids.find(field->children[0].get_column_id()) != column_ids.end()) {
132
            // Create key reader
133
0
            RETURN_IF_ERROR(create(file, &field->children[0], row_group, row_ranges, ctz, io_ctx,
134
0
                                   key_reader, max_buf_size, col_offsets, state, true, column_ids,
135
0
                                   filter_column_ids));
136
0
        } else {
137
0
            auto skip_reader = std::make_unique<SkipReadingReader>(row_ranges, total_rows, ctz,
138
0
                                                                   io_ctx, &field->children[0]);
139
0
            key_reader = std::move(skip_reader);
140
0
        }
141
142
0
        if (column_ids.empty() ||
143
0
            column_ids.find(field->children[1].get_column_id()) != column_ids.end()) {
144
            // Create value reader
145
0
            RETURN_IF_ERROR(create(file, &field->children[1], row_group, row_ranges, ctz, io_ctx,
146
0
                                   value_reader, max_buf_size, col_offsets, state, true, column_ids,
147
0
                                   filter_column_ids));
148
0
        } else {
149
0
            auto skip_reader = std::make_unique<SkipReadingReader>(row_ranges, total_rows, ctz,
150
0
                                                                   io_ctx, &field->children[0]);
151
0
            value_reader = std::move(skip_reader);
152
0
        }
153
154
0
        auto map_reader = MapColumnReader::create_unique(row_ranges, total_rows, ctz, io_ctx);
155
0
        key_reader->set_column_in_nested();
156
0
        value_reader->set_column_in_nested();
157
0
        RETURN_IF_ERROR(map_reader->init(std::move(key_reader), std::move(value_reader), field));
158
0
        map_reader->_filter_column_ids = filter_column_ids;
159
0
        reader.reset(map_reader.release());
160
124
    } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
161
10
        std::unordered_map<std::string, std::unique_ptr<ParquetColumnReader>> child_readers;
162
10
        child_readers.reserve(field->children.size());
163
10
        int non_skip_reader_idx = -1;
164
34
        for (int i = 0; i < field->children.size(); ++i) {
165
24
            auto& child = field->children[i];
166
24
            std::unique_ptr<ParquetColumnReader> child_reader;
167
24
            if (column_ids.empty() || column_ids.find(child.get_column_id()) != column_ids.end()) {
168
16
                RETURN_IF_ERROR(create(file, &child, row_group, row_ranges, ctz, io_ctx,
169
16
                                       child_reader, max_buf_size, col_offsets, state,
170
16
                                       in_collection, column_ids, filter_column_ids));
171
16
                child_readers[child.name] = std::move(child_reader);
172
                // Record the first non-SkippingReader
173
16
                if (non_skip_reader_idx == -1) {
174
10
                    non_skip_reader_idx = i;
175
10
                }
176
16
            } else {
177
8
                auto skip_reader = std::make_unique<SkipReadingReader>(row_ranges, total_rows, ctz,
178
8
                                                                       io_ctx, &child);
179
8
                skip_reader->_filter_column_ids = filter_column_ids;
180
8
                child_readers[child.name] = std::move(skip_reader);
181
8
            }
182
24
            child_readers[child.name]->set_column_in_nested();
183
24
        }
184
        // If all children are SkipReadingReader, force the first child to call create
185
10
        if (non_skip_reader_idx == -1) {
186
0
            std::unique_ptr<ParquetColumnReader> child_reader;
187
0
            RETURN_IF_ERROR(create(file, &field->children[0], row_group, row_ranges, ctz, io_ctx,
188
0
                                   child_reader, max_buf_size, col_offsets, state, in_collection,
189
0
                                   column_ids, filter_column_ids));
190
0
            child_reader->set_column_in_nested();
191
0
            child_readers[field->children[0].name] = std::move(child_reader);
192
0
        }
193
10
        auto struct_reader = StructColumnReader::create_unique(row_ranges, total_rows, ctz, io_ctx);
194
10
        RETURN_IF_ERROR(struct_reader->init(std::move(child_readers), field));
195
10
        struct_reader->_filter_column_ids = filter_column_ids;
196
10
        reader.reset(struct_reader.release());
197
114
    } else {
198
114
        auto physical_index = field->physical_column_index;
199
114
        const tparquet::OffsetIndex* offset_index =
200
114
                col_offsets.find(physical_index) != col_offsets.end() ? &col_offsets[physical_index]
201
114
                                                                      : nullptr;
202
203
114
        const tparquet::ColumnChunk& chunk = row_group.columns[physical_index];
204
114
        if (in_collection) {
205
2
            if (offset_index == nullptr) {
206
2
                auto scalar_reader = ScalarColumnReader<true, false>::create_unique(
207
2
                        row_ranges, total_rows, chunk, offset_index, ctz, io_ctx);
208
209
2
                RETURN_IF_ERROR(scalar_reader->init(file, field, max_buf_size, state));
210
2
                scalar_reader->_filter_column_ids = filter_column_ids;
211
2
                reader.reset(scalar_reader.release());
212
2
            } else {
213
0
                auto scalar_reader = ScalarColumnReader<true, true>::create_unique(
214
0
                        row_ranges, total_rows, chunk, offset_index, ctz, io_ctx);
215
216
0
                RETURN_IF_ERROR(scalar_reader->init(file, field, max_buf_size, state));
217
0
                scalar_reader->_filter_column_ids = filter_column_ids;
218
0
                reader.reset(scalar_reader.release());
219
0
            }
220
112
        } else {
221
112
            if (offset_index == nullptr) {
222
112
                auto scalar_reader = ScalarColumnReader<false, false>::create_unique(
223
112
                        row_ranges, total_rows, chunk, offset_index, ctz, io_ctx);
224
225
112
                RETURN_IF_ERROR(scalar_reader->init(file, field, max_buf_size, state));
226
112
                scalar_reader->_filter_column_ids = filter_column_ids;
227
112
                reader.reset(scalar_reader.release());
228
112
            } else {
229
0
                auto scalar_reader = ScalarColumnReader<false, true>::create_unique(
230
0
                        row_ranges, total_rows, chunk, offset_index, ctz, io_ctx);
231
232
0
                RETURN_IF_ERROR(scalar_reader->init(file, field, max_buf_size, state));
233
0
                scalar_reader->_filter_column_ids = filter_column_ids;
234
0
                reader.reset(scalar_reader.release());
235
0
            }
236
112
        }
237
114
    }
238
126
    return Status::OK();
239
126
}
240
241
void ParquetColumnReader::_generate_read_ranges(RowRange page_row_range,
242
186
                                                RowRanges* result_ranges) const {
243
186
    result_ranges->add(page_row_range);
244
186
    RowRanges::ranges_intersection(*result_ranges, _row_ranges, result_ranges);
245
186
}
246
247
template <bool IN_COLLECTION, bool OFFSET_INDEX>
248
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::init(io::FileReaderSPtr file,
249
                                                             FieldSchema* field,
250
                                                             size_t max_buf_size,
251
114
                                                             RuntimeState* state) {
252
114
    _field_schema = field;
253
114
    auto& chunk_meta = _chunk_meta.meta_data;
254
114
    int64_t chunk_start = has_dict_page(chunk_meta) ? chunk_meta.dictionary_page_offset
255
114
                                                    : chunk_meta.data_page_offset;
256
114
    size_t chunk_len = chunk_meta.total_compressed_size;
257
114
    size_t prefetch_buffer_size = std::min(chunk_len, max_buf_size);
258
114
    if ((typeid_cast<doris::io::TracingFileReader*>(file.get()) &&
259
114
         typeid_cast<io::MergeRangeFileReader*>(
260
53
                 ((doris::io::TracingFileReader*)(file.get()))->inner_reader().get())) ||
261
114
        typeid_cast<io::MergeRangeFileReader*>(file.get())) {
262
        // turn off prefetch data when using MergeRangeFileReader
263
114
        prefetch_buffer_size = 0;
264
114
    }
265
114
    _stream_reader = std::make_unique<io::BufferedFileStreamReader>(file, chunk_start, chunk_len,
266
114
                                                                    prefetch_buffer_size);
267
114
    ParquetPageReadContext ctx(
268
114
            (state == nullptr) ? true : state->query_options().enable_parquet_file_page_cache);
269
270
114
    _chunk_reader = std::make_unique<ColumnChunkReader<IN_COLLECTION, OFFSET_INDEX>>(
271
114
            _stream_reader.get(), &_chunk_meta, field, _offset_index, _total_rows, _io_ctx, ctx);
272
114
    RETURN_IF_ERROR(_chunk_reader->init());
273
114
    return Status::OK();
274
114
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE4initESt10shared_ptrINS_2io10FileReaderEEPNS_11FieldSchemaEmPNS_12RuntimeStateE
_ZN5doris18ScalarColumnReaderILb1ELb0EE4initESt10shared_ptrINS_2io10FileReaderEEPNS_11FieldSchemaEmPNS_12RuntimeStateE
Line
Count
Source
251
2
                                                             RuntimeState* state) {
252
2
    _field_schema = field;
253
2
    auto& chunk_meta = _chunk_meta.meta_data;
254
2
    int64_t chunk_start = has_dict_page(chunk_meta) ? chunk_meta.dictionary_page_offset
255
2
                                                    : chunk_meta.data_page_offset;
256
2
    size_t chunk_len = chunk_meta.total_compressed_size;
257
2
    size_t prefetch_buffer_size = std::min(chunk_len, max_buf_size);
258
2
    if ((typeid_cast<doris::io::TracingFileReader*>(file.get()) &&
259
2
         typeid_cast<io::MergeRangeFileReader*>(
260
0
                 ((doris::io::TracingFileReader*)(file.get()))->inner_reader().get())) ||
261
2
        typeid_cast<io::MergeRangeFileReader*>(file.get())) {
262
        // turn off prefetch data when using MergeRangeFileReader
263
2
        prefetch_buffer_size = 0;
264
2
    }
265
2
    _stream_reader = std::make_unique<io::BufferedFileStreamReader>(file, chunk_start, chunk_len,
266
2
                                                                    prefetch_buffer_size);
267
2
    ParquetPageReadContext ctx(
268
2
            (state == nullptr) ? true : state->query_options().enable_parquet_file_page_cache);
269
270
2
    _chunk_reader = std::make_unique<ColumnChunkReader<IN_COLLECTION, OFFSET_INDEX>>(
271
2
            _stream_reader.get(), &_chunk_meta, field, _offset_index, _total_rows, _io_ctx, ctx);
272
2
    RETURN_IF_ERROR(_chunk_reader->init());
273
2
    return Status::OK();
274
2
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE4initESt10shared_ptrINS_2io10FileReaderEEPNS_11FieldSchemaEmPNS_12RuntimeStateE
_ZN5doris18ScalarColumnReaderILb0ELb0EE4initESt10shared_ptrINS_2io10FileReaderEEPNS_11FieldSchemaEmPNS_12RuntimeStateE
Line
Count
Source
251
112
                                                             RuntimeState* state) {
252
112
    _field_schema = field;
253
112
    auto& chunk_meta = _chunk_meta.meta_data;
254
112
    int64_t chunk_start = has_dict_page(chunk_meta) ? chunk_meta.dictionary_page_offset
255
112
                                                    : chunk_meta.data_page_offset;
256
112
    size_t chunk_len = chunk_meta.total_compressed_size;
257
112
    size_t prefetch_buffer_size = std::min(chunk_len, max_buf_size);
258
112
    if ((typeid_cast<doris::io::TracingFileReader*>(file.get()) &&
259
112
         typeid_cast<io::MergeRangeFileReader*>(
260
53
                 ((doris::io::TracingFileReader*)(file.get()))->inner_reader().get())) ||
261
112
        typeid_cast<io::MergeRangeFileReader*>(file.get())) {
262
        // turn off prefetch data when using MergeRangeFileReader
263
112
        prefetch_buffer_size = 0;
264
112
    }
265
112
    _stream_reader = std::make_unique<io::BufferedFileStreamReader>(file, chunk_start, chunk_len,
266
112
                                                                    prefetch_buffer_size);
267
112
    ParquetPageReadContext ctx(
268
112
            (state == nullptr) ? true : state->query_options().enable_parquet_file_page_cache);
269
270
112
    _chunk_reader = std::make_unique<ColumnChunkReader<IN_COLLECTION, OFFSET_INDEX>>(
271
112
            _stream_reader.get(), &_chunk_meta, field, _offset_index, _total_rows, _io_ctx, ctx);
272
112
    RETURN_IF_ERROR(_chunk_reader->init());
273
112
    return Status::OK();
274
112
}
275
276
template <bool IN_COLLECTION, bool OFFSET_INDEX>
277
156
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::_skip_values(size_t num_values) {
278
156
    if (num_values == 0) {
279
54
        return Status::OK();
280
54
    }
281
102
    if (_chunk_reader->max_def_level() > 0) {
282
102
        LevelDecoder& def_decoder = _chunk_reader->def_level_decoder();
283
102
        size_t skipped = 0;
284
102
        size_t null_size = 0;
285
102
        size_t nonnull_size = 0;
286
217
        while (skipped < num_values) {
287
115
            level_t def_level = -1;
288
115
            size_t loop_skip = def_decoder.get_next_run(&def_level, num_values - skipped);
289
115
            if (loop_skip == 0) {
290
0
                std::stringstream ss;
291
0
                auto& bit_reader = def_decoder.rle_decoder().bit_reader();
292
0
                ss << "def_decoder buffer (hex): ";
293
0
                for (size_t i = 0; i < bit_reader.max_bytes(); ++i) {
294
0
                    ss << std::hex << std::setw(2) << std::setfill('0')
295
0
                       << static_cast<int>(bit_reader.buffer()[i]) << " ";
296
0
                }
297
0
                LOG(WARNING) << ss.str();
298
0
                return Status::InternalError("Failed to decode definition level.");
299
0
            }
300
115
            if (def_level < _field_schema->definition_level) {
301
8
                null_size += loop_skip;
302
107
            } else {
303
107
                nonnull_size += loop_skip;
304
107
            }
305
115
            skipped += loop_skip;
306
115
        }
307
102
        if (null_size > 0) {
308
5
            RETURN_IF_ERROR(_chunk_reader->skip_values(null_size, false));
309
5
        }
310
102
        if (nonnull_size > 0) {
311
101
            RETURN_IF_ERROR(_chunk_reader->skip_values(nonnull_size, true));
312
101
        }
313
102
    } else {
314
0
        RETURN_IF_ERROR(_chunk_reader->skip_values(num_values));
315
0
    }
316
102
    return Status::OK();
317
102
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE12_skip_valuesEm
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb0EE12_skip_valuesEm
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE12_skip_valuesEm
_ZN5doris18ScalarColumnReaderILb0ELb0EE12_skip_valuesEm
Line
Count
Source
277
156
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::_skip_values(size_t num_values) {
278
156
    if (num_values == 0) {
279
54
        return Status::OK();
280
54
    }
281
102
    if (_chunk_reader->max_def_level() > 0) {
282
102
        LevelDecoder& def_decoder = _chunk_reader->def_level_decoder();
283
102
        size_t skipped = 0;
284
102
        size_t null_size = 0;
285
102
        size_t nonnull_size = 0;
286
217
        while (skipped < num_values) {
287
115
            level_t def_level = -1;
288
115
            size_t loop_skip = def_decoder.get_next_run(&def_level, num_values - skipped);
289
115
            if (loop_skip == 0) {
290
0
                std::stringstream ss;
291
0
                auto& bit_reader = def_decoder.rle_decoder().bit_reader();
292
0
                ss << "def_decoder buffer (hex): ";
293
0
                for (size_t i = 0; i < bit_reader.max_bytes(); ++i) {
294
0
                    ss << std::hex << std::setw(2) << std::setfill('0')
295
0
                       << static_cast<int>(bit_reader.buffer()[i]) << " ";
296
0
                }
297
0
                LOG(WARNING) << ss.str();
298
0
                return Status::InternalError("Failed to decode definition level.");
299
0
            }
300
115
            if (def_level < _field_schema->definition_level) {
301
8
                null_size += loop_skip;
302
107
            } else {
303
107
                nonnull_size += loop_skip;
304
107
            }
305
115
            skipped += loop_skip;
306
115
        }
307
102
        if (null_size > 0) {
308
5
            RETURN_IF_ERROR(_chunk_reader->skip_values(null_size, false));
309
5
        }
310
102
        if (nonnull_size > 0) {
311
101
            RETURN_IF_ERROR(_chunk_reader->skip_values(nonnull_size, true));
312
101
        }
313
102
    } else {
314
0
        RETURN_IF_ERROR(_chunk_reader->skip_values(num_values));
315
0
    }
316
102
    return Status::OK();
317
102
}
318
319
template <bool IN_COLLECTION, bool OFFSET_INDEX>
320
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::_read_values(size_t num_values,
321
                                                                     ColumnPtr& doris_column,
322
                                                                     DataTypePtr& type,
323
                                                                     FilterMap& filter_map,
324
156
                                                                     bool is_dict_filter) {
325
156
    if (num_values == 0) {
326
0
        return Status::OK();
327
0
    }
328
156
    MutableColumnPtr data_column;
329
156
    std::vector<uint16_t> null_map;
330
156
    NullMap* map_data_column = nullptr;
331
156
    if (doris_column->is_nullable()) {
332
154
        SCOPED_RAW_TIMER(&_decode_null_map_time);
333
        // doris_column either originates from a mutable block in vparquet_group_reader
334
        // or is a newly created ColumnPtr, and therefore can be modified.
335
154
        auto* nullable_column =
336
154
                assert_cast<ColumnNullable*>(const_cast<IColumn*>(doris_column.get()));
337
338
154
        data_column = nullable_column->get_nested_column_ptr();
339
154
        map_data_column = &(nullable_column->get_null_map_data());
340
154
        if (_chunk_reader->max_def_level() > 0) {
341
134
            LevelDecoder& def_decoder = _chunk_reader->def_level_decoder();
342
134
            size_t has_read = 0;
343
134
            bool prev_is_null = true;
344
268
            while (has_read < num_values) {
345
134
                level_t def_level;
346
134
                size_t loop_read = def_decoder.get_next_run(&def_level, num_values - has_read);
347
134
                if (loop_read == 0) {
348
0
                    std::stringstream ss;
349
0
                    auto& bit_reader = def_decoder.rle_decoder().bit_reader();
350
0
                    ss << "def_decoder buffer (hex): ";
351
0
                    for (size_t i = 0; i < bit_reader.max_bytes(); ++i) {
352
0
                        ss << std::hex << std::setw(2) << std::setfill('0')
353
0
                           << static_cast<int>(bit_reader.buffer()[i]) << " ";
354
0
                    }
355
0
                    LOG(WARNING) << ss.str();
356
0
                    return Status::InternalError("Failed to decode definition level.");
357
0
                }
358
359
134
                bool is_null = def_level < _field_schema->definition_level;
360
134
                if (!(prev_is_null ^ is_null)) {
361
17
                    null_map.emplace_back(0);
362
17
                }
363
134
                size_t remaining = loop_read;
364
134
                while (remaining > USHRT_MAX) {
365
0
                    null_map.emplace_back(USHRT_MAX);
366
0
                    null_map.emplace_back(0);
367
0
                    remaining -= USHRT_MAX;
368
0
                }
369
134
                null_map.emplace_back((u_short)remaining);
370
134
                prev_is_null = is_null;
371
134
                has_read += loop_read;
372
134
            }
373
134
        }
374
154
    } else {
375
2
        if (_chunk_reader->max_def_level() > 0) {
376
0
            return Status::Corruption("Not nullable column has null values in parquet file");
377
0
        }
378
2
        data_column = doris_column->assume_mutable();
379
2
    }
380
156
    if (null_map.size() == 0) {
381
22
        size_t remaining = num_values;
382
22
        while (remaining > USHRT_MAX) {
383
0
            null_map.emplace_back(USHRT_MAX);
384
0
            null_map.emplace_back(0);
385
0
            remaining -= USHRT_MAX;
386
0
        }
387
22
        null_map.emplace_back((u_short)remaining);
388
22
    }
389
156
    ColumnSelectVector select_vector;
390
156
    {
391
156
        SCOPED_RAW_TIMER(&_decode_null_map_time);
392
156
        RETURN_IF_ERROR(select_vector.init(null_map, num_values, map_data_column, &filter_map,
393
156
                                           _filter_map_index));
394
156
        _filter_map_index += num_values;
395
156
    }
396
0
    return _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter);
397
156
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE12_read_valuesEmRNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEb
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb0EE12_read_valuesEmRNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEb
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE12_read_valuesEmRNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEb
_ZN5doris18ScalarColumnReaderILb0ELb0EE12_read_valuesEmRNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEb
Line
Count
Source
324
156
                                                                     bool is_dict_filter) {
325
156
    if (num_values == 0) {
326
0
        return Status::OK();
327
0
    }
328
156
    MutableColumnPtr data_column;
329
156
    std::vector<uint16_t> null_map;
330
156
    NullMap* map_data_column = nullptr;
331
156
    if (doris_column->is_nullable()) {
332
154
        SCOPED_RAW_TIMER(&_decode_null_map_time);
333
        // doris_column either originates from a mutable block in vparquet_group_reader
334
        // or is a newly created ColumnPtr, and therefore can be modified.
335
154
        auto* nullable_column =
336
154
                assert_cast<ColumnNullable*>(const_cast<IColumn*>(doris_column.get()));
337
338
154
        data_column = nullable_column->get_nested_column_ptr();
339
154
        map_data_column = &(nullable_column->get_null_map_data());
340
154
        if (_chunk_reader->max_def_level() > 0) {
341
134
            LevelDecoder& def_decoder = _chunk_reader->def_level_decoder();
342
134
            size_t has_read = 0;
343
134
            bool prev_is_null = true;
344
268
            while (has_read < num_values) {
345
134
                level_t def_level;
346
134
                size_t loop_read = def_decoder.get_next_run(&def_level, num_values - has_read);
347
134
                if (loop_read == 0) {
348
0
                    std::stringstream ss;
349
0
                    auto& bit_reader = def_decoder.rle_decoder().bit_reader();
350
0
                    ss << "def_decoder buffer (hex): ";
351
0
                    for (size_t i = 0; i < bit_reader.max_bytes(); ++i) {
352
0
                        ss << std::hex << std::setw(2) << std::setfill('0')
353
0
                           << static_cast<int>(bit_reader.buffer()[i]) << " ";
354
0
                    }
355
0
                    LOG(WARNING) << ss.str();
356
0
                    return Status::InternalError("Failed to decode definition level.");
357
0
                }
358
359
134
                bool is_null = def_level < _field_schema->definition_level;
360
134
                if (!(prev_is_null ^ is_null)) {
361
17
                    null_map.emplace_back(0);
362
17
                }
363
134
                size_t remaining = loop_read;
364
134
                while (remaining > USHRT_MAX) {
365
0
                    null_map.emplace_back(USHRT_MAX);
366
0
                    null_map.emplace_back(0);
367
0
                    remaining -= USHRT_MAX;
368
0
                }
369
134
                null_map.emplace_back((u_short)remaining);
370
134
                prev_is_null = is_null;
371
134
                has_read += loop_read;
372
134
            }
373
134
        }
374
154
    } else {
375
2
        if (_chunk_reader->max_def_level() > 0) {
376
0
            return Status::Corruption("Not nullable column has null values in parquet file");
377
0
        }
378
2
        data_column = doris_column->assume_mutable();
379
2
    }
380
156
    if (null_map.size() == 0) {
381
22
        size_t remaining = num_values;
382
22
        while (remaining > USHRT_MAX) {
383
0
            null_map.emplace_back(USHRT_MAX);
384
0
            null_map.emplace_back(0);
385
0
            remaining -= USHRT_MAX;
386
0
        }
387
22
        null_map.emplace_back((u_short)remaining);
388
22
    }
389
156
    ColumnSelectVector select_vector;
390
156
    {
391
156
        SCOPED_RAW_TIMER(&_decode_null_map_time);
392
156
        RETURN_IF_ERROR(select_vector.init(null_map, num_values, map_data_column, &filter_map,
393
156
                                           _filter_map_index));
394
156
        _filter_map_index += num_values;
395
156
    }
396
0
    return _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter);
397
156
}
398
399
/**
400
 * Load the nested column data of complex type.
401
 * A row of complex type may be stored across two(or more) pages, and the parameter `align_rows` indicates that
402
 * whether the reader should read the remaining value of the last row in previous page.
403
 */
404
template <bool IN_COLLECTION, bool OFFSET_INDEX>
405
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::_read_nested_column(
406
        ColumnPtr& doris_column, DataTypePtr& type, FilterMap& filter_map, size_t batch_size,
407
8
        size_t* read_rows, bool* eof, bool is_dict_filter) {
408
8
    _rep_levels.clear();
409
8
    _def_levels.clear();
410
411
    // Handle nullable columns
412
8
    MutableColumnPtr data_column;
413
8
    NullMap* map_data_column = nullptr;
414
8
    if (doris_column->is_nullable()) {
415
8
        SCOPED_RAW_TIMER(&_decode_null_map_time);
416
        // doris_column either originates from a mutable block in vparquet_group_reader
417
        // or is a newly created ColumnPtr, and therefore can be modified.
418
8
        auto* nullable_column =
419
8
                const_cast<ColumnNullable*>(assert_cast<const ColumnNullable*>(doris_column.get()));
420
8
        data_column = nullable_column->get_nested_column_ptr();
421
8
        map_data_column = &(nullable_column->get_null_map_data());
422
8
    } else {
423
0
        if (_field_schema->data_type->is_nullable()) {
424
0
            return Status::Corruption("Not nullable column has null values in parquet file");
425
0
        }
426
0
        data_column = doris_column->assume_mutable();
427
0
    }
428
429
8
    std::vector<uint16_t> null_map;
430
8
    std::unordered_set<size_t> ancestor_null_indices;
431
8
    std::vector<uint8_t> nested_filter_map_data;
432
433
8
    auto read_and_fill_data = [&](size_t before_rep_level_sz, size_t filter_map_index) {
434
8
        RETURN_IF_ERROR(_chunk_reader->fill_def(_def_levels));
435
8
        std::unique_ptr<FilterMap> nested_filter_map = std::make_unique<FilterMap>();
436
8
        if (filter_map.has_filter()) {
437
0
            RETURN_IF_ERROR(gen_filter_map(filter_map, filter_map_index, before_rep_level_sz,
438
0
                                           _rep_levels.size(), nested_filter_map_data,
439
0
                                           &nested_filter_map));
440
0
        }
441
442
8
        null_map.clear();
443
8
        ancestor_null_indices.clear();
444
8
        RETURN_IF_ERROR(gen_nested_null_map(before_rep_level_sz, _rep_levels.size(), null_map,
445
8
                                            ancestor_null_indices));
446
447
8
        ColumnSelectVector select_vector;
448
8
        {
449
8
            SCOPED_RAW_TIMER(&_decode_null_map_time);
450
8
            RETURN_IF_ERROR(select_vector.init(
451
8
                    null_map,
452
8
                    _rep_levels.size() - before_rep_level_sz - ancestor_null_indices.size(),
453
8
                    map_data_column, nested_filter_map.get(), 0, &ancestor_null_indices));
454
8
        }
455
456
8
        RETURN_IF_ERROR(
457
8
                _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter));
458
8
        if (ancestor_null_indices.size() != 0) {
459
0
            RETURN_IF_ERROR(_chunk_reader->skip_values(ancestor_null_indices.size(), false));
460
0
        }
461
8
        if (filter_map.has_filter()) {
462
0
            auto new_rep_sz = before_rep_level_sz;
463
0
            for (size_t idx = before_rep_level_sz; idx < _rep_levels.size(); idx++) {
464
0
                if (nested_filter_map_data[idx - before_rep_level_sz]) {
465
0
                    _rep_levels[new_rep_sz] = _rep_levels[idx];
466
0
                    _def_levels[new_rep_sz] = _def_levels[idx];
467
0
                    new_rep_sz++;
468
0
                }
469
0
            }
470
0
            _rep_levels.resize(new_rep_sz);
471
0
            _def_levels.resize(new_rep_sz);
472
0
        }
473
8
        return Status::OK();
474
8
    };
Unexecuted instantiation: _ZZN5doris18ScalarColumnReaderILb1ELb1EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbbENKUlmmE_clEmm
_ZZN5doris18ScalarColumnReaderILb1ELb0EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbbENKUlmmE_clEmm
Line
Count
Source
433
2
    auto read_and_fill_data = [&](size_t before_rep_level_sz, size_t filter_map_index) {
434
2
        RETURN_IF_ERROR(_chunk_reader->fill_def(_def_levels));
435
2
        std::unique_ptr<FilterMap> nested_filter_map = std::make_unique<FilterMap>();
436
2
        if (filter_map.has_filter()) {
437
0
            RETURN_IF_ERROR(gen_filter_map(filter_map, filter_map_index, before_rep_level_sz,
438
0
                                           _rep_levels.size(), nested_filter_map_data,
439
0
                                           &nested_filter_map));
440
0
        }
441
442
2
        null_map.clear();
443
2
        ancestor_null_indices.clear();
444
2
        RETURN_IF_ERROR(gen_nested_null_map(before_rep_level_sz, _rep_levels.size(), null_map,
445
2
                                            ancestor_null_indices));
446
447
2
        ColumnSelectVector select_vector;
448
2
        {
449
2
            SCOPED_RAW_TIMER(&_decode_null_map_time);
450
2
            RETURN_IF_ERROR(select_vector.init(
451
2
                    null_map,
452
2
                    _rep_levels.size() - before_rep_level_sz - ancestor_null_indices.size(),
453
2
                    map_data_column, nested_filter_map.get(), 0, &ancestor_null_indices));
454
2
        }
455
456
2
        RETURN_IF_ERROR(
457
2
                _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter));
458
2
        if (ancestor_null_indices.size() != 0) {
459
0
            RETURN_IF_ERROR(_chunk_reader->skip_values(ancestor_null_indices.size(), false));
460
0
        }
461
2
        if (filter_map.has_filter()) {
462
0
            auto new_rep_sz = before_rep_level_sz;
463
0
            for (size_t idx = before_rep_level_sz; idx < _rep_levels.size(); idx++) {
464
0
                if (nested_filter_map_data[idx - before_rep_level_sz]) {
465
0
                    _rep_levels[new_rep_sz] = _rep_levels[idx];
466
0
                    _def_levels[new_rep_sz] = _def_levels[idx];
467
0
                    new_rep_sz++;
468
0
                }
469
0
            }
470
0
            _rep_levels.resize(new_rep_sz);
471
0
            _def_levels.resize(new_rep_sz);
472
0
        }
473
2
        return Status::OK();
474
2
    };
Unexecuted instantiation: _ZZN5doris18ScalarColumnReaderILb0ELb1EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbbENKUlmmE_clEmm
_ZZN5doris18ScalarColumnReaderILb0ELb0EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbbENKUlmmE_clEmm
Line
Count
Source
433
6
    auto read_and_fill_data = [&](size_t before_rep_level_sz, size_t filter_map_index) {
434
6
        RETURN_IF_ERROR(_chunk_reader->fill_def(_def_levels));
435
6
        std::unique_ptr<FilterMap> nested_filter_map = std::make_unique<FilterMap>();
436
6
        if (filter_map.has_filter()) {
437
0
            RETURN_IF_ERROR(gen_filter_map(filter_map, filter_map_index, before_rep_level_sz,
438
0
                                           _rep_levels.size(), nested_filter_map_data,
439
0
                                           &nested_filter_map));
440
0
        }
441
442
6
        null_map.clear();
443
6
        ancestor_null_indices.clear();
444
6
        RETURN_IF_ERROR(gen_nested_null_map(before_rep_level_sz, _rep_levels.size(), null_map,
445
6
                                            ancestor_null_indices));
446
447
6
        ColumnSelectVector select_vector;
448
6
        {
449
6
            SCOPED_RAW_TIMER(&_decode_null_map_time);
450
6
            RETURN_IF_ERROR(select_vector.init(
451
6
                    null_map,
452
6
                    _rep_levels.size() - before_rep_level_sz - ancestor_null_indices.size(),
453
6
                    map_data_column, nested_filter_map.get(), 0, &ancestor_null_indices));
454
6
        }
455
456
6
        RETURN_IF_ERROR(
457
6
                _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter));
458
6
        if (ancestor_null_indices.size() != 0) {
459
0
            RETURN_IF_ERROR(_chunk_reader->skip_values(ancestor_null_indices.size(), false));
460
0
        }
461
6
        if (filter_map.has_filter()) {
462
0
            auto new_rep_sz = before_rep_level_sz;
463
0
            for (size_t idx = before_rep_level_sz; idx < _rep_levels.size(); idx++) {
464
0
                if (nested_filter_map_data[idx - before_rep_level_sz]) {
465
0
                    _rep_levels[new_rep_sz] = _rep_levels[idx];
466
0
                    _def_levels[new_rep_sz] = _def_levels[idx];
467
0
                    new_rep_sz++;
468
0
                }
469
0
            }
470
0
            _rep_levels.resize(new_rep_sz);
471
0
            _def_levels.resize(new_rep_sz);
472
0
        }
473
6
        return Status::OK();
474
6
    };
475
476
10
    while (_current_range_idx < _row_ranges.range_size()) {
477
8
        size_t left_row =
478
8
                std::max(_current_row_index, _row_ranges.get_range_from(_current_range_idx));
479
8
        size_t right_row = std::min(left_row + batch_size - *read_rows,
480
8
                                    (size_t)_row_ranges.get_range_to(_current_range_idx));
481
8
        _current_row_index = left_row;
482
8
        RETURN_IF_ERROR(_chunk_reader->seek_to_nested_row(left_row));
483
8
        size_t load_rows = 0;
484
8
        bool cross_page = false;
485
8
        size_t before_rep_level_sz = _rep_levels.size();
486
8
        RETURN_IF_ERROR(_chunk_reader->load_page_nested_rows(_rep_levels, right_row - left_row,
487
8
                                                             &load_rows, &cross_page));
488
8
        RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index));
489
8
        _filter_map_index += load_rows;
490
8
        while (cross_page) {
491
0
            before_rep_level_sz = _rep_levels.size();
492
0
            RETURN_IF_ERROR(_chunk_reader->load_cross_page_nested_row(_rep_levels, &cross_page));
493
0
            RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index - 1));
494
0
        }
495
8
        *read_rows += load_rows;
496
8
        _current_row_index += load_rows;
497
8
        _current_range_idx += (_current_row_index == _row_ranges.get_range_to(_current_range_idx));
498
8
        if (*read_rows == batch_size) {
499
6
            break;
500
6
        }
501
8
    }
502
8
    *eof = _current_range_idx == _row_ranges.range_size();
503
8
    return Status::OK();
504
8
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbb
_ZN5doris18ScalarColumnReaderILb1ELb0EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbb
Line
Count
Source
407
2
        size_t* read_rows, bool* eof, bool is_dict_filter) {
408
2
    _rep_levels.clear();
409
2
    _def_levels.clear();
410
411
    // Handle nullable columns
412
2
    MutableColumnPtr data_column;
413
2
    NullMap* map_data_column = nullptr;
414
2
    if (doris_column->is_nullable()) {
415
2
        SCOPED_RAW_TIMER(&_decode_null_map_time);
416
        // doris_column either originates from a mutable block in vparquet_group_reader
417
        // or is a newly created ColumnPtr, and therefore can be modified.
418
2
        auto* nullable_column =
419
2
                const_cast<ColumnNullable*>(assert_cast<const ColumnNullable*>(doris_column.get()));
420
2
        data_column = nullable_column->get_nested_column_ptr();
421
2
        map_data_column = &(nullable_column->get_null_map_data());
422
2
    } else {
423
0
        if (_field_schema->data_type->is_nullable()) {
424
0
            return Status::Corruption("Not nullable column has null values in parquet file");
425
0
        }
426
0
        data_column = doris_column->assume_mutable();
427
0
    }
428
429
2
    std::vector<uint16_t> null_map;
430
2
    std::unordered_set<size_t> ancestor_null_indices;
431
2
    std::vector<uint8_t> nested_filter_map_data;
432
433
2
    auto read_and_fill_data = [&](size_t before_rep_level_sz, size_t filter_map_index) {
434
2
        RETURN_IF_ERROR(_chunk_reader->fill_def(_def_levels));
435
2
        std::unique_ptr<FilterMap> nested_filter_map = std::make_unique<FilterMap>();
436
2
        if (filter_map.has_filter()) {
437
2
            RETURN_IF_ERROR(gen_filter_map(filter_map, filter_map_index, before_rep_level_sz,
438
2
                                           _rep_levels.size(), nested_filter_map_data,
439
2
                                           &nested_filter_map));
440
2
        }
441
442
2
        null_map.clear();
443
2
        ancestor_null_indices.clear();
444
2
        RETURN_IF_ERROR(gen_nested_null_map(before_rep_level_sz, _rep_levels.size(), null_map,
445
2
                                            ancestor_null_indices));
446
447
2
        ColumnSelectVector select_vector;
448
2
        {
449
2
            SCOPED_RAW_TIMER(&_decode_null_map_time);
450
2
            RETURN_IF_ERROR(select_vector.init(
451
2
                    null_map,
452
2
                    _rep_levels.size() - before_rep_level_sz - ancestor_null_indices.size(),
453
2
                    map_data_column, nested_filter_map.get(), 0, &ancestor_null_indices));
454
2
        }
455
456
2
        RETURN_IF_ERROR(
457
2
                _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter));
458
2
        if (ancestor_null_indices.size() != 0) {
459
2
            RETURN_IF_ERROR(_chunk_reader->skip_values(ancestor_null_indices.size(), false));
460
2
        }
461
2
        if (filter_map.has_filter()) {
462
2
            auto new_rep_sz = before_rep_level_sz;
463
2
            for (size_t idx = before_rep_level_sz; idx < _rep_levels.size(); idx++) {
464
2
                if (nested_filter_map_data[idx - before_rep_level_sz]) {
465
2
                    _rep_levels[new_rep_sz] = _rep_levels[idx];
466
2
                    _def_levels[new_rep_sz] = _def_levels[idx];
467
2
                    new_rep_sz++;
468
2
                }
469
2
            }
470
2
            _rep_levels.resize(new_rep_sz);
471
2
            _def_levels.resize(new_rep_sz);
472
2
        }
473
2
        return Status::OK();
474
2
    };
475
476
2
    while (_current_range_idx < _row_ranges.range_size()) {
477
2
        size_t left_row =
478
2
                std::max(_current_row_index, _row_ranges.get_range_from(_current_range_idx));
479
2
        size_t right_row = std::min(left_row + batch_size - *read_rows,
480
2
                                    (size_t)_row_ranges.get_range_to(_current_range_idx));
481
2
        _current_row_index = left_row;
482
2
        RETURN_IF_ERROR(_chunk_reader->seek_to_nested_row(left_row));
483
2
        size_t load_rows = 0;
484
2
        bool cross_page = false;
485
2
        size_t before_rep_level_sz = _rep_levels.size();
486
2
        RETURN_IF_ERROR(_chunk_reader->load_page_nested_rows(_rep_levels, right_row - left_row,
487
2
                                                             &load_rows, &cross_page));
488
2
        RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index));
489
2
        _filter_map_index += load_rows;
490
2
        while (cross_page) {
491
0
            before_rep_level_sz = _rep_levels.size();
492
0
            RETURN_IF_ERROR(_chunk_reader->load_cross_page_nested_row(_rep_levels, &cross_page));
493
0
            RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index - 1));
494
0
        }
495
2
        *read_rows += load_rows;
496
2
        _current_row_index += load_rows;
497
2
        _current_range_idx += (_current_row_index == _row_ranges.get_range_to(_current_range_idx));
498
2
        if (*read_rows == batch_size) {
499
2
            break;
500
2
        }
501
2
    }
502
2
    *eof = _current_range_idx == _row_ranges.range_size();
503
2
    return Status::OK();
504
2
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbb
_ZN5doris18ScalarColumnReaderILb0ELb0EE19_read_nested_columnERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERSt10shared_ptrIKNS_9IDataTypeEERNS_9FilterMapEmPmPbb
Line
Count
Source
407
6
        size_t* read_rows, bool* eof, bool is_dict_filter) {
408
6
    _rep_levels.clear();
409
6
    _def_levels.clear();
410
411
    // Handle nullable columns
412
6
    MutableColumnPtr data_column;
413
6
    NullMap* map_data_column = nullptr;
414
6
    if (doris_column->is_nullable()) {
415
6
        SCOPED_RAW_TIMER(&_decode_null_map_time);
416
        // doris_column either originates from a mutable block in vparquet_group_reader
417
        // or is a newly created ColumnPtr, and therefore can be modified.
418
6
        auto* nullable_column =
419
6
                const_cast<ColumnNullable*>(assert_cast<const ColumnNullable*>(doris_column.get()));
420
6
        data_column = nullable_column->get_nested_column_ptr();
421
6
        map_data_column = &(nullable_column->get_null_map_data());
422
6
    } else {
423
0
        if (_field_schema->data_type->is_nullable()) {
424
0
            return Status::Corruption("Not nullable column has null values in parquet file");
425
0
        }
426
0
        data_column = doris_column->assume_mutable();
427
0
    }
428
429
6
    std::vector<uint16_t> null_map;
430
6
    std::unordered_set<size_t> ancestor_null_indices;
431
6
    std::vector<uint8_t> nested_filter_map_data;
432
433
6
    auto read_and_fill_data = [&](size_t before_rep_level_sz, size_t filter_map_index) {
434
6
        RETURN_IF_ERROR(_chunk_reader->fill_def(_def_levels));
435
6
        std::unique_ptr<FilterMap> nested_filter_map = std::make_unique<FilterMap>();
436
6
        if (filter_map.has_filter()) {
437
6
            RETURN_IF_ERROR(gen_filter_map(filter_map, filter_map_index, before_rep_level_sz,
438
6
                                           _rep_levels.size(), nested_filter_map_data,
439
6
                                           &nested_filter_map));
440
6
        }
441
442
6
        null_map.clear();
443
6
        ancestor_null_indices.clear();
444
6
        RETURN_IF_ERROR(gen_nested_null_map(before_rep_level_sz, _rep_levels.size(), null_map,
445
6
                                            ancestor_null_indices));
446
447
6
        ColumnSelectVector select_vector;
448
6
        {
449
6
            SCOPED_RAW_TIMER(&_decode_null_map_time);
450
6
            RETURN_IF_ERROR(select_vector.init(
451
6
                    null_map,
452
6
                    _rep_levels.size() - before_rep_level_sz - ancestor_null_indices.size(),
453
6
                    map_data_column, nested_filter_map.get(), 0, &ancestor_null_indices));
454
6
        }
455
456
6
        RETURN_IF_ERROR(
457
6
                _chunk_reader->decode_values(data_column, type, select_vector, is_dict_filter));
458
6
        if (ancestor_null_indices.size() != 0) {
459
6
            RETURN_IF_ERROR(_chunk_reader->skip_values(ancestor_null_indices.size(), false));
460
6
        }
461
6
        if (filter_map.has_filter()) {
462
6
            auto new_rep_sz = before_rep_level_sz;
463
6
            for (size_t idx = before_rep_level_sz; idx < _rep_levels.size(); idx++) {
464
6
                if (nested_filter_map_data[idx - before_rep_level_sz]) {
465
6
                    _rep_levels[new_rep_sz] = _rep_levels[idx];
466
6
                    _def_levels[new_rep_sz] = _def_levels[idx];
467
6
                    new_rep_sz++;
468
6
                }
469
6
            }
470
6
            _rep_levels.resize(new_rep_sz);
471
6
            _def_levels.resize(new_rep_sz);
472
6
        }
473
6
        return Status::OK();
474
6
    };
475
476
8
    while (_current_range_idx < _row_ranges.range_size()) {
477
6
        size_t left_row =
478
6
                std::max(_current_row_index, _row_ranges.get_range_from(_current_range_idx));
479
6
        size_t right_row = std::min(left_row + batch_size - *read_rows,
480
6
                                    (size_t)_row_ranges.get_range_to(_current_range_idx));
481
6
        _current_row_index = left_row;
482
6
        RETURN_IF_ERROR(_chunk_reader->seek_to_nested_row(left_row));
483
6
        size_t load_rows = 0;
484
6
        bool cross_page = false;
485
6
        size_t before_rep_level_sz = _rep_levels.size();
486
6
        RETURN_IF_ERROR(_chunk_reader->load_page_nested_rows(_rep_levels, right_row - left_row,
487
6
                                                             &load_rows, &cross_page));
488
6
        RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index));
489
6
        _filter_map_index += load_rows;
490
6
        while (cross_page) {
491
0
            before_rep_level_sz = _rep_levels.size();
492
0
            RETURN_IF_ERROR(_chunk_reader->load_cross_page_nested_row(_rep_levels, &cross_page));
493
0
            RETURN_IF_ERROR(read_and_fill_data(before_rep_level_sz, _filter_map_index - 1));
494
0
        }
495
6
        *read_rows += load_rows;
496
6
        _current_row_index += load_rows;
497
6
        _current_range_idx += (_current_row_index == _row_ranges.get_range_to(_current_range_idx));
498
6
        if (*read_rows == batch_size) {
499
4
            break;
500
4
        }
501
6
    }
502
6
    *eof = _current_range_idx == _row_ranges.range_size();
503
6
    return Status::OK();
504
6
}
505
506
template <bool IN_COLLECTION, bool OFFSET_INDEX>
507
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::read_dict_values_to_column(
508
2
        MutableColumnPtr& doris_column, bool* has_dict) {
509
2
    bool loaded;
510
2
    RETURN_IF_ERROR(_try_load_dict_page(&loaded, has_dict));
511
2
    if (loaded && *has_dict) {
512
2
        return _chunk_reader->read_dict_values_to_column(doris_column);
513
2
    }
514
0
    return Status::OK();
515
2
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE26read_dict_values_to_columnERNS_3COWINS_7IColumnEE11mutable_ptrIS3_EEPb
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb0EE26read_dict_values_to_columnERNS_3COWINS_7IColumnEE11mutable_ptrIS3_EEPb
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE26read_dict_values_to_columnERNS_3COWINS_7IColumnEE11mutable_ptrIS3_EEPb
_ZN5doris18ScalarColumnReaderILb0ELb0EE26read_dict_values_to_columnERNS_3COWINS_7IColumnEE11mutable_ptrIS3_EEPb
Line
Count
Source
508
2
        MutableColumnPtr& doris_column, bool* has_dict) {
509
2
    bool loaded;
510
2
    RETURN_IF_ERROR(_try_load_dict_page(&loaded, has_dict));
511
2
    if (loaded && *has_dict) {
512
2
        return _chunk_reader->read_dict_values_to_column(doris_column);
513
2
    }
514
0
    return Status::OK();
515
2
}
516
template <bool IN_COLLECTION, bool OFFSET_INDEX>
517
Result<MutableColumnPtr>
518
ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::convert_dict_column_to_string_column(
519
0
        const ColumnInt32* dict_column) {
520
0
    return _chunk_reader->convert_dict_column_to_string_column(dict_column);
521
0
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE36convert_dict_column_to_string_columnEPKNS_12ColumnVectorILNS_13PrimitiveTypeE5EEE
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb0EE36convert_dict_column_to_string_columnEPKNS_12ColumnVectorILNS_13PrimitiveTypeE5EEE
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE36convert_dict_column_to_string_columnEPKNS_12ColumnVectorILNS_13PrimitiveTypeE5EEE
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb0EE36convert_dict_column_to_string_columnEPKNS_12ColumnVectorILNS_13PrimitiveTypeE5EEE
522
523
template <bool IN_COLLECTION, bool OFFSET_INDEX>
524
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::_try_load_dict_page(bool* loaded,
525
2
                                                                            bool* has_dict) {
526
    // _chunk_reader init will load first page header to check whether has dict page
527
2
    *loaded = true;
528
2
    *has_dict = _chunk_reader->has_dict();
529
2
    return Status::OK();
530
2
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE19_try_load_dict_pageEPbS2_
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb0EE19_try_load_dict_pageEPbS2_
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE19_try_load_dict_pageEPbS2_
_ZN5doris18ScalarColumnReaderILb0ELb0EE19_try_load_dict_pageEPbS2_
Line
Count
Source
525
2
                                                                            bool* has_dict) {
526
    // _chunk_reader init will load first page header to check whether has dict page
527
2
    *loaded = true;
528
2
    *has_dict = _chunk_reader->has_dict();
529
2
    return Status::OK();
530
2
}
531
532
template <bool IN_COLLECTION, bool OFFSET_INDEX>
533
Status ScalarColumnReader<IN_COLLECTION, OFFSET_INDEX>::read_column_data(
534
        ColumnPtr& doris_column, const DataTypePtr& type,
535
        const std::shared_ptr<TableSchemaChangeHelper::Node>& root_node, FilterMap& filter_map,
536
        size_t batch_size, size_t* read_rows, bool* eof, bool is_dict_filter,
537
194
        int64_t real_column_size) {
538
194
    if (_converter == nullptr) {
539
109
        _converter = parquet::PhysicalToLogicalConverter::get_converter(
540
109
                _field_schema, _field_schema->data_type, type, _ctz, is_dict_filter);
541
109
        if (!_converter->support()) {
542
0
            return Status::InternalError(
543
0
                    "The column type of '{}' is not supported: {}, is_dict_filter: {}, "
544
0
                    "src_logical_type: {}, dst_logical_type: {}",
545
0
                    _field_schema->name, _converter->get_error_msg(), is_dict_filter,
546
0
                    _field_schema->data_type->get_name(), type->get_name());
547
0
        }
548
109
    }
549
    // !FIXME: We should verify whether the get_physical_column logic is correct, why do we return a doris_column?
550
194
    ColumnPtr resolved_column =
551
194
            _converter->get_physical_column(_field_schema->physical_type, _field_schema->data_type,
552
194
                                            doris_column, type, is_dict_filter);
553
194
    DataTypePtr& resolved_type = _converter->get_physical_type();
554
555
194
    _def_levels.clear();
556
194
    _rep_levels.clear();
557
194
    *read_rows = 0;
558
559
194
    if (_in_nested) {
560
8
        RETURN_IF_ERROR(_read_nested_column(resolved_column, resolved_type, filter_map, batch_size,
561
8
                                            read_rows, eof, is_dict_filter));
562
8
        return _converter->convert(resolved_column, _field_schema->data_type, type, doris_column,
563
8
                                   is_dict_filter);
564
8
    }
565
566
186
    int64_t right_row = 0;
567
186
    if constexpr (OFFSET_INDEX == false) {
568
186
        RETURN_IF_ERROR(_chunk_reader->parse_page_header());
569
186
        right_row = _chunk_reader->page_end_row();
570
186
    } else {
571
0
        right_row = _chunk_reader->page_end_row();
572
0
    }
573
574
186
    do {
575
        // generate the row ranges that should be read
576
186
        RowRanges read_ranges;
577
186
        _generate_read_ranges(RowRange {_current_row_index, right_row}, &read_ranges);
578
186
        if (read_ranges.count() == 0) {
579
            // skip the whole page
580
63
            _current_row_index = right_row;
581
123
        } else {
582
123
            bool skip_whole_batch = false;
583
            // Determining whether to skip page or batch will increase the calculation time.
584
            // When the filtering effect is greater than 60%, it is possible to skip the page or batch.
585
123
            if (filter_map.has_filter() && filter_map.filter_ratio() > 0.6) {
586
                // lazy read
587
0
                size_t remaining_num_values = read_ranges.count();
588
0
                if (batch_size >= remaining_num_values &&
589
0
                    filter_map.can_filter_all(remaining_num_values, _filter_map_index)) {
590
                    // We can skip the whole page if the remaining values are filtered by predicate columns
591
0
                    _filter_map_index += remaining_num_values;
592
0
                    _current_row_index = right_row;
593
0
                    *read_rows = remaining_num_values;
594
0
                    break;
595
0
                }
596
0
                skip_whole_batch = batch_size <= remaining_num_values &&
597
0
                                   filter_map.can_filter_all(batch_size, _filter_map_index);
598
0
                if (skip_whole_batch) {
599
0
                    _filter_map_index += batch_size;
600
0
                }
601
0
            }
602
            // load page data to decode or skip values
603
123
            RETURN_IF_ERROR(_chunk_reader->parse_page_header());
604
123
            RETURN_IF_ERROR(_chunk_reader->load_page_data_idempotent());
605
123
            size_t has_read = 0;
606
256
            for (size_t idx = 0; idx < read_ranges.range_size(); idx++) {
607
156
                auto range = read_ranges.get_range(idx);
608
                // generate the skipped values
609
156
                size_t skip_values = range.from() - _current_row_index;
610
156
                RETURN_IF_ERROR(_skip_values(skip_values));
611
156
                _current_row_index += skip_values;
612
                // generate the read values
613
156
                size_t read_values =
614
156
                        std::min((size_t)(range.to() - range.from()), batch_size - has_read);
615
156
                if (skip_whole_batch) {
616
0
                    RETURN_IF_ERROR(_skip_values(read_values));
617
156
                } else {
618
156
                    RETURN_IF_ERROR(_read_values(read_values, resolved_column, resolved_type,
619
156
                                                 filter_map, is_dict_filter));
620
156
                }
621
156
                has_read += read_values;
622
156
                *read_rows += read_values;
623
156
                _current_row_index += read_values;
624
156
                if (has_read == batch_size) {
625
23
                    break;
626
23
                }
627
156
            }
628
123
        }
629
186
    } while (false);
630
631
186
    if (right_row == _current_row_index) {
632
101
        if (!_chunk_reader->has_next_page()) {
633
101
            *eof = true;
634
101
        } else {
635
0
            RETURN_IF_ERROR(_chunk_reader->next_page());
636
0
        }
637
101
    }
638
639
186
    return _converter->convert(resolved_column, _field_schema->data_type, type, doris_column,
640
186
                               is_dict_filter);
641
186
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb1ELb1EE16read_column_dataERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERKSt10shared_ptrIKNS_9IDataTypeEERKS8_INS_23TableSchemaChangeHelper4NodeEERNS_9FilterMapEmPmPbbl
_ZN5doris18ScalarColumnReaderILb1ELb0EE16read_column_dataERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERKSt10shared_ptrIKNS_9IDataTypeEERKS8_INS_23TableSchemaChangeHelper4NodeEERNS_9FilterMapEmPmPbbl
Line
Count
Source
537
2
        int64_t real_column_size) {
538
2
    if (_converter == nullptr) {
539
2
        _converter = parquet::PhysicalToLogicalConverter::get_converter(
540
2
                _field_schema, _field_schema->data_type, type, _ctz, is_dict_filter);
541
2
        if (!_converter->support()) {
542
0
            return Status::InternalError(
543
0
                    "The column type of '{}' is not supported: {}, is_dict_filter: {}, "
544
0
                    "src_logical_type: {}, dst_logical_type: {}",
545
0
                    _field_schema->name, _converter->get_error_msg(), is_dict_filter,
546
0
                    _field_schema->data_type->get_name(), type->get_name());
547
0
        }
548
2
    }
549
    // !FIXME: We should verify whether the get_physical_column logic is correct, why do we return a doris_column?
550
2
    ColumnPtr resolved_column =
551
2
            _converter->get_physical_column(_field_schema->physical_type, _field_schema->data_type,
552
2
                                            doris_column, type, is_dict_filter);
553
2
    DataTypePtr& resolved_type = _converter->get_physical_type();
554
555
2
    _def_levels.clear();
556
2
    _rep_levels.clear();
557
2
    *read_rows = 0;
558
559
2
    if (_in_nested) {
560
2
        RETURN_IF_ERROR(_read_nested_column(resolved_column, resolved_type, filter_map, batch_size,
561
2
                                            read_rows, eof, is_dict_filter));
562
2
        return _converter->convert(resolved_column, _field_schema->data_type, type, doris_column,
563
2
                                   is_dict_filter);
564
2
    }
565
566
0
    int64_t right_row = 0;
567
0
    if constexpr (OFFSET_INDEX == false) {
568
0
        RETURN_IF_ERROR(_chunk_reader->parse_page_header());
569
0
        right_row = _chunk_reader->page_end_row();
570
    } else {
571
        right_row = _chunk_reader->page_end_row();
572
    }
573
574
0
    do {
575
        // generate the row ranges that should be read
576
0
        RowRanges read_ranges;
577
0
        _generate_read_ranges(RowRange {_current_row_index, right_row}, &read_ranges);
578
0
        if (read_ranges.count() == 0) {
579
            // skip the whole page
580
0
            _current_row_index = right_row;
581
0
        } else {
582
0
            bool skip_whole_batch = false;
583
            // Determining whether to skip page or batch will increase the calculation time.
584
            // When the filtering effect is greater than 60%, it is possible to skip the page or batch.
585
0
            if (filter_map.has_filter() && filter_map.filter_ratio() > 0.6) {
586
                // lazy read
587
0
                size_t remaining_num_values = read_ranges.count();
588
0
                if (batch_size >= remaining_num_values &&
589
0
                    filter_map.can_filter_all(remaining_num_values, _filter_map_index)) {
590
                    // We can skip the whole page if the remaining values are filtered by predicate columns
591
0
                    _filter_map_index += remaining_num_values;
592
0
                    _current_row_index = right_row;
593
0
                    *read_rows = remaining_num_values;
594
0
                    break;
595
0
                }
596
0
                skip_whole_batch = batch_size <= remaining_num_values &&
597
0
                                   filter_map.can_filter_all(batch_size, _filter_map_index);
598
0
                if (skip_whole_batch) {
599
0
                    _filter_map_index += batch_size;
600
0
                }
601
0
            }
602
            // load page data to decode or skip values
603
0
            RETURN_IF_ERROR(_chunk_reader->parse_page_header());
604
0
            RETURN_IF_ERROR(_chunk_reader->load_page_data_idempotent());
605
0
            size_t has_read = 0;
606
0
            for (size_t idx = 0; idx < read_ranges.range_size(); idx++) {
607
0
                auto range = read_ranges.get_range(idx);
608
                // generate the skipped values
609
0
                size_t skip_values = range.from() - _current_row_index;
610
0
                RETURN_IF_ERROR(_skip_values(skip_values));
611
0
                _current_row_index += skip_values;
612
                // generate the read values
613
0
                size_t read_values =
614
0
                        std::min((size_t)(range.to() - range.from()), batch_size - has_read);
615
0
                if (skip_whole_batch) {
616
0
                    RETURN_IF_ERROR(_skip_values(read_values));
617
0
                } else {
618
0
                    RETURN_IF_ERROR(_read_values(read_values, resolved_column, resolved_type,
619
0
                                                 filter_map, is_dict_filter));
620
0
                }
621
0
                has_read += read_values;
622
0
                *read_rows += read_values;
623
0
                _current_row_index += read_values;
624
0
                if (has_read == batch_size) {
625
0
                    break;
626
0
                }
627
0
            }
628
0
        }
629
0
    } while (false);
630
631
0
    if (right_row == _current_row_index) {
632
0
        if (!_chunk_reader->has_next_page()) {
633
0
            *eof = true;
634
0
        } else {
635
0
            RETURN_IF_ERROR(_chunk_reader->next_page());
636
0
        }
637
0
    }
638
639
0
    return _converter->convert(resolved_column, _field_schema->data_type, type, doris_column,
640
0
                               is_dict_filter);
641
0
}
Unexecuted instantiation: _ZN5doris18ScalarColumnReaderILb0ELb1EE16read_column_dataERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERKSt10shared_ptrIKNS_9IDataTypeEERKS8_INS_23TableSchemaChangeHelper4NodeEERNS_9FilterMapEmPmPbbl
_ZN5doris18ScalarColumnReaderILb0ELb0EE16read_column_dataERNS_3COWINS_7IColumnEE13immutable_ptrIS3_EERKSt10shared_ptrIKNS_9IDataTypeEERKS8_INS_23TableSchemaChangeHelper4NodeEERNS_9FilterMapEmPmPbbl
Line
Count
Source
537
192
        int64_t real_column_size) {
538
192
    if (_converter == nullptr) {
539
107
        _converter = parquet::PhysicalToLogicalConverter::get_converter(
540
107
                _field_schema, _field_schema->data_type, type, _ctz, is_dict_filter);
541
107
        if (!_converter->support()) {
542
0
            return Status::InternalError(
543
0
                    "The column type of '{}' is not supported: {}, is_dict_filter: {}, "
544
0
                    "src_logical_type: {}, dst_logical_type: {}",
545
0
                    _field_schema->name, _converter->get_error_msg(), is_dict_filter,
546
0
                    _field_schema->data_type->get_name(), type->get_name());
547
0
        }
548
107
    }
549
    // !FIXME: We should verify whether the get_physical_column logic is correct, why do we return a doris_column?
550
192
    ColumnPtr resolved_column =
551
192
            _converter->get_physical_column(_field_schema->physical_type, _field_schema->data_type,
552
192
                                            doris_column, type, is_dict_filter);
553
192
    DataTypePtr& resolved_type = _converter->get_physical_type();
554
555
192
    _def_levels.clear();
556
192
    _rep_levels.clear();
557
192
    *read_rows = 0;
558
559
192
    if (_in_nested) {
560
6
        RETURN_IF_ERROR(_read_nested_column(resolved_column, resolved_type, filter_map, batch_size,
561
6
                                            read_rows, eof, is_dict_filter));
562
6
        return _converter->convert(resolved_column, _field_schema->data_type, type, doris_column,
563
6
                                   is_dict_filter);
564
6
    }
565
566
186
    int64_t right_row = 0;
567
186
    if constexpr (OFFSET_INDEX == false) {
568
186
        RETURN_IF_ERROR(_chunk_reader->parse_page_header());
569
186
        right_row = _chunk_reader->page_end_row();
570
    } else {
571
        right_row = _chunk_reader->page_end_row();
572
    }
573
574
186
    do {
575
        // generate the row ranges that should be read
576
186
        RowRanges read_ranges;
577
186
        _generate_read_ranges(RowRange {_current_row_index, right_row}, &read_ranges);
578
186
        if (read_ranges.count() == 0) {
579
            // skip the whole page
580
63
            _current_row_index = right_row;
581
123
        } else {
582
123
            bool skip_whole_batch = false;
583
            // Determining whether to skip page or batch will increase the calculation time.
584
            // When the filtering effect is greater than 60%, it is possible to skip the page or batch.
585
123
            if (filter_map.has_filter() && filter_map.filter_ratio() > 0.6) {
586
                // lazy read
587
0
                size_t remaining_num_values = read_ranges.count();
588
0
                if (batch_size >= remaining_num_values &&
589
0
                    filter_map.can_filter_all(remaining_num_values, _filter_map_index)) {
590
                    // We can skip the whole page if the remaining values are filtered by predicate columns
591
0
                    _filter_map_index += remaining_num_values;
592
0
                    _current_row_index = right_row;
593
0
                    *read_rows = remaining_num_values;
594
0
                    break;
595
0
                }
596
0
                skip_whole_batch = batch_size <= remaining_num_values &&
597
0
                                   filter_map.can_filter_all(batch_size, _filter_map_index);
598
0
                if (skip_whole_batch) {
599
0
                    _filter_map_index += batch_size;
600
0
                }
601
0
            }
602
            // load page data to decode or skip values
603
123
            RETURN_IF_ERROR(_chunk_reader->parse_page_header());
604
123
            RETURN_IF_ERROR(_chunk_reader->load_page_data_idempotent());
605
123
            size_t has_read = 0;
606
256
            for (size_t idx = 0; idx < read_ranges.range_size(); idx++) {
607
156
                auto range = read_ranges.get_range(idx);
608
                // generate the skipped values
609
156
                size_t skip_values = range.from() - _current_row_index;
610
156
                RETURN_IF_ERROR(_skip_values(skip_values));
611
156
                _current_row_index += skip_values;
612
                // generate the read values
613
156
                size_t read_values =
614
156
                        std::min((size_t)(range.to() - range.from()), batch_size - has_read);
615
156
                if (skip_whole_batch) {
616
0
                    RETURN_IF_ERROR(_skip_values(read_values));
617
156
                } else {
618
156
                    RETURN_IF_ERROR(_read_values(read_values, resolved_column, resolved_type,
619
156
                                                 filter_map, is_dict_filter));
620
156
                }
621
156
                has_read += read_values;
622
156
                *read_rows += read_values;
623
156
                _current_row_index += read_values;
624
156
                if (has_read == batch_size) {
625
23
                    break;
626
23
                }
627
156
            }
628
123
        }
629
186
    } while (false);
630
631
186
    if (right_row == _current_row_index) {
632
101
        if (!_chunk_reader->has_next_page()) {
633
101
            *eof = true;
634
101
        } else {
635
0
            RETURN_IF_ERROR(_chunk_reader->next_page());
636
0
        }
637
101
    }
638
639
186
    return _converter->convert(resolved_column, _field_schema->data_type, type, doris_column,
640
186
                               is_dict_filter);
641
186
}
642
643
Status ArrayColumnReader::init(std::unique_ptr<ParquetColumnReader> element_reader,
644
2
                               FieldSchema* field) {
645
2
    _field_schema = field;
646
2
    _element_reader = std::move(element_reader);
647
2
    return Status::OK();
648
2
}
649
650
Status ArrayColumnReader::read_column_data(
651
        ColumnPtr& doris_column, const DataTypePtr& type,
652
        const std::shared_ptr<TableSchemaChangeHelper::Node>& root_node, FilterMap& filter_map,
653
        size_t batch_size, size_t* read_rows, bool* eof, bool is_dict_filter,
654
2
        int64_t real_column_size) {
655
2
    MutableColumnPtr data_column;
656
2
    NullMap* null_map_ptr = nullptr;
657
2
    if (doris_column->is_nullable()) {
658
2
        auto mutable_column = doris_column->assume_mutable();
659
2
        auto* nullable_column = assert_cast<ColumnNullable*>(mutable_column.get());
660
2
        null_map_ptr = &nullable_column->get_null_map_data();
661
2
        data_column = nullable_column->get_nested_column_ptr();
662
2
    } else {
663
0
        if (_field_schema->data_type->is_nullable()) {
664
0
            return Status::Corruption("Not nullable column has null values in parquet file");
665
0
        }
666
0
        data_column = doris_column->assume_mutable();
667
0
    }
668
2
    if (type->get_primitive_type() != PrimitiveType::TYPE_ARRAY) {
669
0
        return Status::Corruption(
670
0
                "Wrong data type for column '{}', expected Array type, actual type: {}.",
671
0
                _field_schema->name, type->get_name());
672
0
    }
673
674
2
    ColumnPtr& element_column = assert_cast<ColumnArray&>(*data_column).get_data_ptr();
675
2
    const DataTypePtr& element_type =
676
2
            (assert_cast<const DataTypeArray*>(remove_nullable(type).get()))->get_nested_type();
677
    // read nested column
678
2
    RETURN_IF_ERROR(_element_reader->read_column_data(element_column, element_type,
679
2
                                                      root_node->get_element_node(), filter_map,
680
2
                                                      batch_size, read_rows, eof, is_dict_filter));
681
2
    if (*read_rows == 0) {
682
0
        return Status::OK();
683
0
    }
684
685
2
    ColumnArray::Offsets64& offsets_data = assert_cast<ColumnArray&>(*data_column).get_offsets();
686
    // fill offset and null map
687
2
    fill_array_offset(_field_schema, offsets_data, null_map_ptr, _element_reader->get_rep_level(),
688
2
                      _element_reader->get_def_level());
689
2
    DCHECK_EQ(element_column->size(), offsets_data.back());
690
2
#ifndef NDEBUG
691
2
    doris_column->sanity_check();
692
2
#endif
693
2
    return Status::OK();
694
2
}
695
696
Status MapColumnReader::init(std::unique_ptr<ParquetColumnReader> key_reader,
697
                             std::unique_ptr<ParquetColumnReader> value_reader,
698
0
                             FieldSchema* field) {
699
0
    _field_schema = field;
700
0
    _key_reader = std::move(key_reader);
701
0
    _value_reader = std::move(value_reader);
702
0
    return Status::OK();
703
0
}
704
705
Status MapColumnReader::read_column_data(
706
        ColumnPtr& doris_column, const DataTypePtr& type,
707
        const std::shared_ptr<TableSchemaChangeHelper::Node>& root_node, FilterMap& filter_map,
708
        size_t batch_size, size_t* read_rows, bool* eof, bool is_dict_filter,
709
0
        int64_t real_column_size) {
710
0
    MutableColumnPtr data_column;
711
0
    NullMap* null_map_ptr = nullptr;
712
0
    if (doris_column->is_nullable()) {
713
0
        auto mutable_column = doris_column->assume_mutable();
714
0
        auto* nullable_column = assert_cast<ColumnNullable*>(mutable_column.get());
715
0
        null_map_ptr = &nullable_column->get_null_map_data();
716
0
        data_column = nullable_column->get_nested_column_ptr();
717
0
    } else {
718
0
        if (_field_schema->data_type->is_nullable()) {
719
0
            return Status::Corruption("Not nullable column has null values in parquet file");
720
0
        }
721
0
        data_column = doris_column->assume_mutable();
722
0
    }
723
0
    if (remove_nullable(type)->get_primitive_type() != PrimitiveType::TYPE_MAP) {
724
0
        return Status::Corruption(
725
0
                "Wrong data type for column '{}', expected Map type, actual type id {}.",
726
0
                _field_schema->name, type->get_name());
727
0
    }
728
729
0
    auto& map = assert_cast<ColumnMap&>(*data_column);
730
0
    const DataTypePtr& key_type =
731
0
            assert_cast<const DataTypeMap*>(remove_nullable(type).get())->get_key_type();
732
0
    const DataTypePtr& value_type =
733
0
            assert_cast<const DataTypeMap*>(remove_nullable(type).get())->get_value_type();
734
0
    ColumnPtr& key_column = map.get_keys_ptr();
735
0
    ColumnPtr& value_column = map.get_values_ptr();
736
737
0
    size_t key_rows = 0;
738
0
    size_t value_rows = 0;
739
0
    bool key_eof = false;
740
0
    bool value_eof = false;
741
0
    int64_t orig_col_column_size = key_column->size();
742
743
0
    RETURN_IF_ERROR(_key_reader->read_column_data(key_column, key_type, root_node->get_key_node(),
744
0
                                                  filter_map, batch_size, &key_rows, &key_eof,
745
0
                                                  is_dict_filter));
746
747
0
    while (value_rows < key_rows && !value_eof) {
748
0
        size_t loop_rows = 0;
749
0
        RETURN_IF_ERROR(_value_reader->read_column_data(
750
0
                value_column, value_type, root_node->get_value_node(), filter_map,
751
0
                key_rows - value_rows, &loop_rows, &value_eof, is_dict_filter,
752
0
                key_column->size() - orig_col_column_size));
753
0
        value_rows += loop_rows;
754
0
    }
755
0
    DCHECK_EQ(key_rows, value_rows);
756
0
    *read_rows = key_rows;
757
0
    *eof = key_eof;
758
759
0
    if (*read_rows == 0) {
760
0
        return Status::OK();
761
0
    }
762
763
0
    DCHECK_EQ(key_column->size(), value_column->size());
764
    // fill offset and null map
765
0
    fill_array_offset(_field_schema, map.get_offsets(), null_map_ptr, _key_reader->get_rep_level(),
766
0
                      _key_reader->get_def_level());
767
0
    DCHECK_EQ(key_column->size(), map.get_offsets().back());
768
0
#ifndef NDEBUG
769
0
    doris_column->sanity_check();
770
0
#endif
771
0
    return Status::OK();
772
0
}
773
774
Status StructColumnReader::init(
775
        std::unordered_map<std::string, std::unique_ptr<ParquetColumnReader>>&& child_readers,
776
10
        FieldSchema* field) {
777
10
    _field_schema = field;
778
10
    _child_readers = std::move(child_readers);
779
10
    return Status::OK();
780
10
}
781
Status StructColumnReader::read_column_data(
782
        ColumnPtr& doris_column, const DataTypePtr& type,
783
        const std::shared_ptr<TableSchemaChangeHelper::Node>& root_node, FilterMap& filter_map,
784
        size_t batch_size, size_t* read_rows, bool* eof, bool is_dict_filter,
785
10
        int64_t real_column_size) {
786
10
    MutableColumnPtr data_column;
787
10
    NullMap* null_map_ptr = nullptr;
788
10
    if (doris_column->is_nullable()) {
789
10
        auto mutable_column = doris_column->assume_mutable();
790
10
        auto* nullable_column = assert_cast<ColumnNullable*>(mutable_column.get());
791
10
        null_map_ptr = &nullable_column->get_null_map_data();
792
10
        data_column = nullable_column->get_nested_column_ptr();
793
10
    } else {
794
0
        if (_field_schema->data_type->is_nullable()) {
795
0
            return Status::Corruption("Not nullable column has null values in parquet file");
796
0
        }
797
0
        data_column = doris_column->assume_mutable();
798
0
    }
799
10
    if (type->get_primitive_type() != PrimitiveType::TYPE_STRUCT) {
800
0
        return Status::Corruption(
801
0
                "Wrong data type for column '{}', expected Struct type, actual type id {}.",
802
0
                _field_schema->name, type->get_name());
803
0
    }
804
805
10
    auto& doris_struct = assert_cast<ColumnStruct&>(*data_column);
806
10
    const auto* doris_struct_type = assert_cast<const DataTypeStruct*>(remove_nullable(type).get());
807
808
10
    int64_t not_missing_column_id = -1;
809
10
    size_t not_missing_orig_column_size = 0;
810
10
    std::vector<size_t> missing_column_idxs {};
811
10
    std::vector<size_t> skip_reading_column_idxs {};
812
813
10
    _read_column_names.clear();
814
815
34
    for (size_t i = 0; i < doris_struct.tuple_size(); ++i) {
816
24
        ColumnPtr& doris_field = doris_struct.get_column_ptr(i);
817
24
        auto& doris_type = doris_struct_type->get_element(i);
818
24
        auto& doris_name = doris_struct_type->get_element_name(i);
819
24
        if (!root_node->children_column_exists(doris_name)) {
820
0
            missing_column_idxs.push_back(i);
821
0
            VLOG_DEBUG << "[ParquetReader] Missing column in schema: column_idx[" << i
822
0
                       << "], doris_name: " << doris_name << " (column not exists in root node)";
823
0
            continue;
824
0
        }
825
24
        auto file_name = root_node->children_file_column_name(doris_name);
826
827
        // Check if this is a SkipReadingReader - we should skip it when choosing reference column
828
        // because SkipReadingReader doesn't know the actual data size in nested context
829
24
        bool is_skip_reader =
830
24
                dynamic_cast<SkipReadingReader*>(_child_readers[file_name].get()) != nullptr;
831
832
24
        if (is_skip_reader) {
833
            // Store SkipReadingReader columns to fill them later based on reference column size
834
8
            skip_reading_column_idxs.push_back(i);
835
8
            continue;
836
8
        }
837
838
        // Only add non-SkipReadingReader columns to _read_column_names
839
        // This ensures get_rep_level() and get_def_level() return valid levels
840
16
        _read_column_names.emplace_back(file_name);
841
842
16
        size_t field_rows = 0;
843
16
        bool field_eof = false;
844
16
        if (not_missing_column_id == -1) {
845
10
            not_missing_column_id = i;
846
10
            not_missing_orig_column_size = doris_field->size();
847
10
            RETURN_IF_ERROR(_child_readers[file_name]->read_column_data(
848
10
                    doris_field, doris_type, root_node->get_children_node(doris_name), filter_map,
849
10
                    batch_size, &field_rows, &field_eof, is_dict_filter));
850
10
            *read_rows = field_rows;
851
10
            *eof = field_eof;
852
            /*
853
             * Considering the issue in the `_read_nested_column` function where data may span across pages, leading
854
             * to missing definition and repetition levels, when filling the null_map of the struct later, it is
855
             * crucial to use the definition and repetition levels from the first read column
856
             * (since `_read_nested_column` is not called repeatedly).
857
             *
858
             *  It is worth mentioning that, theoretically, any sub-column can be chosen to fill the null_map,
859
             *  and selecting the shortest one will offer better performance
860
             */
861
10
        } else {
862
12
            while (field_rows < *read_rows && !field_eof) {
863
6
                size_t loop_rows = 0;
864
6
                RETURN_IF_ERROR(_child_readers[file_name]->read_column_data(
865
6
                        doris_field, doris_type, root_node->get_children_node(doris_name),
866
6
                        filter_map, *read_rows - field_rows, &loop_rows, &field_eof,
867
6
                        is_dict_filter));
868
6
                field_rows += loop_rows;
869
6
            }
870
6
            DCHECK_EQ(*read_rows, field_rows);
871
            //            DCHECK_EQ(*eof, field_eof);
872
6
        }
873
16
    }
874
875
10
    int64_t missing_column_sz = -1;
876
877
10
    if (not_missing_column_id == -1) {
878
        // All queried columns are missing in the file (e.g., all added after schema change)
879
        // We need to pick a column from _field_schema children that exists in the file for RL/DL reference
880
0
        std::string reference_file_column_name;
881
0
        std::unique_ptr<ParquetColumnReader>* reference_reader = nullptr;
882
883
0
        for (const auto& child : _field_schema->children) {
884
0
            auto it = _child_readers.find(child.name);
885
0
            if (it != _child_readers.end()) {
886
                // Skip SkipReadingReader as they don't have valid RL/DL
887
0
                bool is_skip_reader = dynamic_cast<SkipReadingReader*>(it->second.get()) != nullptr;
888
0
                if (!is_skip_reader) {
889
0
                    reference_file_column_name = child.name;
890
0
                    reference_reader = &(it->second);
891
0
                    break;
892
0
                }
893
0
            }
894
0
        }
895
896
0
        if (reference_reader != nullptr) {
897
            // Read the reference column to get correct RL/DL information
898
            // TODO: Optimize by only reading RL/DL without actual data decoding
899
900
            // We need to find the FieldSchema for the reference column from _field_schema children
901
0
            FieldSchema* ref_field_schema = nullptr;
902
0
            for (auto& child : _field_schema->children) {
903
0
                if (child.name == reference_file_column_name) {
904
0
                    ref_field_schema = &child;
905
0
                    break;
906
0
                }
907
0
            }
908
909
0
            if (ref_field_schema == nullptr) {
910
0
                return Status::InternalError(
911
0
                        "Cannot find field schema for reference column '{}' in struct '{}'",
912
0
                        reference_file_column_name, _field_schema->name);
913
0
            }
914
915
            // Create a temporary column to hold the data (we'll use its size for missing_column_sz)
916
0
            ColumnPtr temp_column = ref_field_schema->data_type->create_column();
917
0
            auto temp_type = ref_field_schema->data_type;
918
919
0
            size_t field_rows = 0;
920
0
            bool field_eof = false;
921
922
            // Use ConstNode for the reference column instead of looking up from root_node.
923
            // The reference column is only used to get RL/DL information for determining the number
924
            // of elements in the struct. It may be a column that has been dropped from the table
925
            // schema (e.g., 'removed' field), but still exists in older parquet files.
926
            // Since we don't need schema mapping for this column (we just need its RL/DL levels),
927
            // using ConstNode is safe and avoids the issue where the reference column doesn't exist
928
            // in root_node (because it was dropped from table schema).
929
0
            auto ref_child_node = TableSchemaChangeHelper::ConstNode::get_instance();
930
0
            not_missing_orig_column_size = temp_column->size();
931
932
0
            RETURN_IF_ERROR((*reference_reader)
933
0
                                    ->read_column_data(temp_column, temp_type, ref_child_node,
934
0
                                                       filter_map, batch_size, &field_rows,
935
0
                                                       &field_eof, is_dict_filter));
936
937
0
            *read_rows = field_rows;
938
0
            *eof = field_eof;
939
940
            // Store this reference column name for get_rep_level/get_def_level to use
941
0
            _read_column_names.emplace_back(reference_file_column_name);
942
943
0
            missing_column_sz = temp_column->size() - not_missing_orig_column_size;
944
0
        } else {
945
0
            return Status::Corruption(
946
0
                    "Cannot read struct '{}': all queried columns are missing and no reference "
947
0
                    "column found in file",
948
0
                    _field_schema->name);
949
0
        }
950
0
    }
951
952
    //  This missing_column_sz is not *read_rows. Because read_rows returns the number of rows.
953
    //  For example: suppose we have a column array<struct<a:int,b:string>>,
954
    //  where b is a newly added column, that is, a missing column.
955
    //  There are two rows of data in this column,
956
    //      [{1,null},{2,null},{3,null}]
957
    //      [{4,null},{5,null}]
958
    //  When you first read subcolumn a, you read 5 data items and the value of *read_rows is 2.
959
    //  You should insert 5 records into subcolumn b instead of 2.
960
10
    if (missing_column_sz == -1) {
961
10
        missing_column_sz = doris_struct.get_column(not_missing_column_id).size() -
962
10
                            not_missing_orig_column_size;
963
10
    }
964
965
    // Fill SkipReadingReader columns with the correct amount of data based on reference column
966
    // Let SkipReadingReader handle the data filling through its read_column_data method
967
10
    for (auto idx : skip_reading_column_idxs) {
968
8
        auto& doris_field = doris_struct.get_column_ptr(idx);
969
8
        auto& doris_type = const_cast<DataTypePtr&>(doris_struct_type->get_element(idx));
970
8
        auto& doris_name = const_cast<String&>(doris_struct_type->get_element_name(idx));
971
8
        auto file_name = root_node->children_file_column_name(doris_name);
972
973
8
        size_t field_rows = 0;
974
8
        bool field_eof = false;
975
8
        RETURN_IF_ERROR(_child_readers[file_name]->read_column_data(
976
8
                doris_field, doris_type, root_node->get_children_node(doris_name), filter_map,
977
8
                missing_column_sz, &field_rows, &field_eof, is_dict_filter, missing_column_sz));
978
8
    }
979
980
    // Fill truly missing columns (not in root_node) with null or default value
981
10
    for (auto idx : missing_column_idxs) {
982
0
        auto& doris_field = doris_struct.get_column_ptr(idx);
983
0
        auto& doris_type = doris_struct_type->get_element(idx);
984
0
        DCHECK(doris_type->is_nullable());
985
0
        auto mutable_column = doris_field->assume_mutable();
986
0
        auto* nullable_column = static_cast<ColumnNullable*>(mutable_column.get());
987
0
        nullable_column->insert_many_defaults(missing_column_sz);
988
0
    }
989
990
10
    if (null_map_ptr != nullptr) {
991
10
        fill_struct_null_map(_field_schema, *null_map_ptr, this->get_rep_level(),
992
10
                             this->get_def_level());
993
10
    }
994
10
#ifndef NDEBUG
995
10
    doris_column->sanity_check();
996
10
#endif
997
10
    return Status::OK();
998
10
}
999
1000
template class ScalarColumnReader<true, true>;
1001
template class ScalarColumnReader<true, false>;
1002
template class ScalarColumnReader<false, true>;
1003
template class ScalarColumnReader<false, false>;
1004
1005
}; // namespace doris