Coverage Report

Created: 2026-05-17 01:25

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/parquet/vparquet_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/parquet/vparquet_reader.h"
19
20
#include <gen_cpp/Metrics_types.h>
21
#include <gen_cpp/PlanNodes_types.h>
22
#include <gen_cpp/parquet_types.h>
23
#include <glog/logging.h>
24
25
#include <algorithm>
26
#include <functional>
27
#include <sstream>
28
#include <utility>
29
30
#include "common/config.h"
31
#include "common/status.h"
32
#include "core/block/block.h"
33
#include "core/block/column_with_type_and_name.h"
34
#include "core/column/column.h"
35
#include "core/data_type/define_primitive_type.h"
36
#include "core/typeid_cast.h"
37
#include "core/types.h"
38
#include "exec/scan/file_scanner.h"
39
#include "exprs/vbloom_predicate.h"
40
#include "exprs/vdirect_in_predicate.h"
41
#include "exprs/vexpr.h"
42
#include "exprs/vexpr_context.h"
43
#include "exprs/vin_predicate.h"
44
#include "exprs/vruntimefilter_wrapper.h"
45
#include "exprs/vslot_ref.h"
46
#include "exprs/vtopn_pred.h"
47
#include "format/column_type_convert.h"
48
#include "format/parquet/parquet_block_split_bloom_filter.h"
49
#include "format/parquet/parquet_common.h"
50
#include "format/parquet/parquet_nested_column_utils.h"
51
#include "format/parquet/parquet_predicate.h"
52
#include "format/parquet/parquet_thrift_util.h"
53
#include "format/parquet/schema_desc.h"
54
#include "format/parquet/vparquet_file_metadata.h"
55
#include "format/parquet/vparquet_group_reader.h"
56
#include "format/parquet/vparquet_page_index.h"
57
#include "format/table/nested_column_access_helper.h"
58
#include "information_schema/schema_scanner.h"
59
#include "io/file_factory.h"
60
#include "io/fs/buffered_reader.h"
61
#include "io/fs/file_reader.h"
62
#include "io/fs/file_reader_writer_fwd.h"
63
#include "io/fs/tracing_file_reader.h"
64
#include "runtime/descriptors.h"
65
#include "util/slice.h"
66
#include "util/string_util.h"
67
#include "util/timezone_utils.h"
68
69
namespace cctz {
70
class time_zone;
71
} // namespace cctz
72
namespace doris {
73
class RowDescriptor;
74
class RuntimeState;
75
class SlotDescriptor;
76
class TupleDescriptor;
77
namespace io {
78
struct IOContext;
79
enum class FileCachePolicy : uint8_t;
80
} // namespace io
81
class Block;
82
} // namespace doris
83
84
namespace doris {
85
86
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
87
                             const TFileRangeDesc& range, size_t batch_size,
88
                             const cctz::time_zone* ctz, io::IOContext* io_ctx, RuntimeState* state,
89
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
90
98
        : _profile(profile),
91
98
          _scan_params(params),
92
98
          _scan_range(range),
93
98
          _batch_size(std::max(batch_size, 1UL)),
94
98
          _range_start_offset(range.start_offset),
95
98
          _range_size(range.size),
96
98
          _ctz(ctz),
97
98
          _io_ctx(io_ctx),
98
98
          _state(state),
99
98
          _enable_lazy_mat(enable_lazy_mat),
100
          _enable_filter_by_min_max(
101
98
                  state == nullptr ? true
102
98
                                   : state->query_options().enable_parquet_filter_by_min_max),
103
          _enable_filter_by_bloom_filter(
104
98
                  state == nullptr ? true
105
98
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
106
98
    _meta_cache = meta_cache;
107
98
    _init_profile();
108
98
    _init_system_properties();
109
98
    _init_file_description();
110
98
}
111
112
0
void ParquetReader::set_batch_size(size_t batch_size) {
113
0
    if (_batch_size == batch_size) {
114
0
        return;
115
0
    }
116
0
    _batch_size = batch_size;
117
0
}
118
119
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
120
                             const TFileRangeDesc& range, size_t batch_size,
121
                             const cctz::time_zone* ctz,
122
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
123
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
124
0
        : _profile(profile),
125
0
          _scan_params(params),
126
0
          _scan_range(range),
127
0
          _batch_size(std::max(batch_size, 1UL)),
128
0
          _range_start_offset(range.start_offset),
129
0
          _range_size(range.size),
130
0
          _ctz(ctz),
131
0
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
132
0
          _io_ctx_holder(std::move(io_ctx_holder)),
133
0
          _state(state),
134
0
          _enable_lazy_mat(enable_lazy_mat),
135
          _enable_filter_by_min_max(
136
0
                  state == nullptr ? true
137
0
                                   : state->query_options().enable_parquet_filter_by_min_max),
138
          _enable_filter_by_bloom_filter(
139
0
                  state == nullptr ? true
140
0
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
141
0
    _meta_cache = meta_cache;
142
0
    _init_profile();
143
0
    _init_system_properties();
144
0
    _init_file_description();
145
0
}
146
147
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
148
                             io::IOContext* io_ctx, RuntimeState* state, FileMetaCache* meta_cache,
149
                             bool enable_lazy_mat)
150
5
        : _profile(nullptr),
151
5
          _scan_params(params),
152
5
          _scan_range(range),
153
5
          _io_ctx(io_ctx),
154
5
          _state(state),
155
5
          _enable_lazy_mat(enable_lazy_mat),
156
          _enable_filter_by_min_max(
157
5
                  state == nullptr ? true
158
5
                                   : state->query_options().enable_parquet_filter_by_min_max),
159
          _enable_filter_by_bloom_filter(
160
5
                  state == nullptr ? true
161
5
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
162
5
    _meta_cache = meta_cache;
163
5
    _init_system_properties();
164
5
    _init_file_description();
165
5
}
166
167
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
168
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
169
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
170
0
        : _profile(nullptr),
171
0
          _scan_params(params),
172
0
          _scan_range(range),
173
0
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
174
0
          _io_ctx_holder(std::move(io_ctx_holder)),
175
0
          _state(state),
176
0
          _enable_lazy_mat(enable_lazy_mat),
177
          _enable_filter_by_min_max(
178
0
                  state == nullptr ? true
179
0
                                   : state->query_options().enable_parquet_filter_by_min_max),
180
          _enable_filter_by_bloom_filter(
181
0
                  state == nullptr ? true
182
0
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
183
0
    _meta_cache = meta_cache;
184
0
    _init_system_properties();
185
0
    _init_file_description();
186
0
}
187
188
103
ParquetReader::~ParquetReader() {
189
103
    _close_internal();
190
103
}
191
192
#ifdef BE_TEST
193
// for unit test
194
69
void ParquetReader::set_file_reader(io::FileReaderSPtr file_reader) {
195
69
    _file_reader = file_reader;
196
69
    _tracing_file_reader = file_reader;
197
69
}
198
#endif
199
200
// NOLINTNEXTLINE(readability-function-size): existing Parquet counter initialization stays grouped.
201
98
void ParquetReader::_init_profile() {
202
98
    if (_profile != nullptr) {
203
50
        static const char* parquet_profile = "ParquetReader";
204
50
        ADD_TIMER_WITH_LEVEL(_profile, parquet_profile, 1);
205
206
50
        _parquet_profile.filtered_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
207
50
                _profile, "RowGroupsFiltered", TUnit::UNIT, parquet_profile, 1);
208
50
        _parquet_profile.filtered_row_groups_by_min_max = ADD_CHILD_COUNTER_WITH_LEVEL(
209
50
                _profile, "RowGroupsFilteredByMinMax", TUnit::UNIT, parquet_profile, 1);
210
50
        _parquet_profile.filtered_row_groups_by_bloom_filter = ADD_CHILD_COUNTER_WITH_LEVEL(
211
50
                _profile, "RowGroupsFilteredByBloomFilter", TUnit::UNIT, parquet_profile, 1);
212
50
        _parquet_profile.to_read_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
213
50
                _profile, "RowGroupsReadNum", TUnit::UNIT, parquet_profile, 1);
214
50
        _parquet_profile.total_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
215
50
                _profile, "RowGroupsTotalNum", TUnit::UNIT, parquet_profile, 1);
216
50
        _parquet_profile.filtered_group_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
217
50
                _profile, "FilteredRowsByGroup", TUnit::UNIT, parquet_profile, 1);
218
50
        _parquet_profile.filtered_page_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
219
50
                _profile, "FilteredRowsByPage", TUnit::UNIT, parquet_profile, 1);
220
50
        _parquet_profile.lazy_read_filtered_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
221
50
                _profile, "FilteredRowsByLazyRead", TUnit::UNIT, parquet_profile, 1);
222
50
        _parquet_profile.filtered_bytes = ADD_CHILD_COUNTER_WITH_LEVEL(
223
50
                _profile, "FilteredBytes", TUnit::BYTES, parquet_profile, 1);
224
50
        _parquet_profile.raw_rows_read = ADD_CHILD_COUNTER_WITH_LEVEL(
225
50
                _profile, "RawRowsRead", TUnit::UNIT, parquet_profile, 1);
226
50
        _parquet_profile.column_read_time =
227
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ColumnReadTime", parquet_profile, 1);
228
50
        _parquet_profile.parse_meta_time =
229
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseMetaTime", parquet_profile, 1);
230
50
        _parquet_profile.parse_footer_time =
231
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseFooterTime", parquet_profile, 1);
232
50
        _parquet_profile.file_reader_create_time =
233
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "FileReaderCreateTime", parquet_profile, 1);
234
50
        _parquet_profile.open_file_num =
235
50
                ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "FileNum", TUnit::UNIT, parquet_profile, 1);
236
50
        _parquet_profile.page_index_read_calls =
237
50
                ADD_COUNTER_WITH_LEVEL(_profile, "PageIndexReadCalls", TUnit::UNIT, 1);
238
50
        _parquet_profile.page_index_filter_time =
239
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexFilterTime", parquet_profile, 1);
240
50
        _parquet_profile.read_page_index_time =
241
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexReadTime", parquet_profile, 1);
242
50
        _parquet_profile.parse_page_index_time =
243
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexParseTime", parquet_profile, 1);
244
50
        _parquet_profile.row_group_filter_time =
245
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "RowGroupFilterTime", parquet_profile, 1);
246
50
        _parquet_profile.file_footer_read_calls =
247
50
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterReadCalls", TUnit::UNIT, 1);
248
50
        _parquet_profile.file_footer_hit_cache =
249
50
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterHitCache", TUnit::UNIT, 1);
250
50
        _parquet_profile.decompress_time =
251
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecompressTime", parquet_profile, 1);
252
50
        _parquet_profile.decompress_cnt = ADD_CHILD_COUNTER_WITH_LEVEL(
253
50
                _profile, "DecompressCount", TUnit::UNIT, parquet_profile, 1);
254
50
        _parquet_profile.page_read_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
255
50
                _profile, "PageReadCount", TUnit::UNIT, parquet_profile, 1);
256
50
        _parquet_profile.page_cache_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
257
50
                _profile, "PageCacheWriteCount", TUnit::UNIT, parquet_profile, 1);
258
50
        _parquet_profile.page_cache_compressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
259
50
                _profile, "PageCacheCompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
260
50
        _parquet_profile.page_cache_decompressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
261
50
                _profile, "PageCacheDecompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
262
50
        _parquet_profile.page_cache_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
263
50
                _profile, "PageCacheHitCount", TUnit::UNIT, parquet_profile, 1);
264
50
        _parquet_profile.page_cache_missing_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
265
50
                _profile, "PageCacheMissingCount", TUnit::UNIT, parquet_profile, 1);
266
50
        _parquet_profile.page_cache_compressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
267
50
                _profile, "PageCacheCompressedHitCount", TUnit::UNIT, parquet_profile, 1);
268
50
        _parquet_profile.page_cache_decompressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
269
50
                _profile, "PageCacheDecompressedHitCount", TUnit::UNIT, parquet_profile, 1);
270
50
        _parquet_profile.decode_header_time =
271
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderDecodeTime", parquet_profile, 1);
272
50
        _parquet_profile.read_page_header_time =
273
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderReadTime", parquet_profile, 1);
274
50
        _parquet_profile.decode_value_time =
275
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeValueTime", parquet_profile, 1);
276
50
        _parquet_profile.decode_dict_time =
277
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeDictTime", parquet_profile, 1);
278
50
        _parquet_profile.decode_level_time =
279
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeLevelTime", parquet_profile, 1);
280
50
        _parquet_profile.decode_null_map_time =
281
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeNullMapTime", parquet_profile, 1);
282
50
        _parquet_profile.skip_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
283
50
                _profile, "SkipPageHeaderNum", TUnit::UNIT, parquet_profile, 1);
284
50
        _parquet_profile.parse_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
285
50
                _profile, "ParsePageHeaderNum", TUnit::UNIT, parquet_profile, 1);
286
50
        _parquet_profile.predicate_filter_time =
287
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PredicateFilterTime", parquet_profile, 1);
288
50
        _parquet_profile.dict_filter_rewrite_time =
289
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DictFilterRewriteTime", parquet_profile, 1);
290
50
        _parquet_profile.convert_time =
291
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ConvertTime", parquet_profile, 1);
292
50
        _parquet_profile.bloom_filter_read_time =
293
50
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "BloomFilterReadTime", parquet_profile, 1);
294
50
        _parquet_profile.variant_direct_typed_value_read_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
295
50
                _profile, "VariantDirectTypedValueReadRows", TUnit::UNIT, parquet_profile, 1);
296
50
        _parquet_profile.variant_rowwise_read_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
297
50
                _profile, "VariantRowWiseReadRows", TUnit::UNIT, parquet_profile, 1);
298
50
    }
299
98
}
300
301
14
Status ParquetReader::close() {
302
14
    _close_internal();
303
14
    return Status::OK();
304
14
}
305
306
117
void ParquetReader::_close_internal() {
307
117
    if (!_closed) {
308
103
        _closed = true;
309
103
    }
310
117
}
311
312
106
Status ParquetReader::_open_file() {
313
106
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
314
0
        return Status::EndOfFile("stop");
315
0
    }
316
106
    if (_file_reader == nullptr) {
317
15
        SCOPED_RAW_TIMER(&_reader_statistics.file_reader_create_time);
318
15
        ++_reader_statistics.open_file_num;
319
15
        _file_description.mtime =
320
15
                _scan_range.__isset.modification_time ? _scan_range.modification_time : 0;
321
15
        io::FileReaderOptions reader_options =
322
15
                FileFactory::get_reader_options(_state, _file_description);
323
15
        _file_reader = DORIS_TRY(io::DelegateReader::create_file_reader(
324
15
                _profile, _system_properties, _file_description, reader_options,
325
15
                io::DelegateReader::AccessMode::RANDOM, _io_ctx));
326
15
        _tracing_file_reader = _io_ctx ? std::make_shared<io::TracingFileReader>(
327
15
                                                 _file_reader, _io_ctx->file_reader_stats)
328
15
                                       : _file_reader;
329
15
    }
330
331
106
    if (_file_metadata == nullptr) {
332
84
        SCOPED_RAW_TIMER(&_reader_statistics.parse_footer_time);
333
84
        if (_tracing_file_reader->size() <= sizeof(PARQUET_VERSION_NUMBER)) {
334
            // Some system may generate parquet file with only 4 bytes: PAR1
335
            // Should consider it as empty file.
336
0
            return Status::EndOfFile("open file failed, empty parquet file {} with size: {}",
337
0
                                     _scan_range.path, _tracing_file_reader->size());
338
0
        }
339
84
        size_t meta_size = 0;
340
84
        bool enable_mapping_varbinary = _scan_params.__isset.enable_mapping_varbinary
341
84
                                                ? _scan_params.enable_mapping_varbinary
342
84
                                                : false;
343
84
        bool enable_mapping_timestamp_tz = _scan_params.__isset.enable_mapping_timestamp_tz
344
84
                                                   ? _scan_params.enable_mapping_timestamp_tz
345
84
                                                   : false;
346
84
        if (_meta_cache == nullptr) {
347
            // wrap _file_metadata with unique ptr, so that it can be released finally.
348
48
            RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
349
48
                                                &meta_size, _io_ctx, enable_mapping_varbinary,
350
48
                                                enable_mapping_timestamp_tz));
351
48
            _file_metadata = _file_metadata_ptr.get();
352
            // parse magic number & parse meta data
353
48
            _reader_statistics.file_footer_read_calls += 1;
354
48
        } else {
355
36
            const auto& file_meta_cache_key =
356
36
                    FileMetaCache::get_key(_tracing_file_reader, _file_description);
357
36
            if (!_meta_cache->lookup(file_meta_cache_key, &_meta_cache_handle)) {
358
24
                RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
359
24
                                                    &meta_size, _io_ctx, enable_mapping_varbinary,
360
24
                                                    enable_mapping_timestamp_tz));
361
                // _file_metadata_ptr.release() : move control of _file_metadata to _meta_cache_handle
362
24
                _meta_cache->insert(file_meta_cache_key, _file_metadata_ptr.release(),
363
24
                                    &_meta_cache_handle);
364
24
                _file_metadata = _meta_cache_handle.data<FileMetaData>();
365
24
                _reader_statistics.file_footer_read_calls += 1;
366
24
            } else {
367
12
                _reader_statistics.file_footer_hit_cache++;
368
12
            }
369
36
            _file_metadata = _meta_cache_handle.data<FileMetaData>();
370
36
        }
371
372
84
        if (_file_metadata == nullptr) {
373
0
            return Status::InternalError("failed to get file meta data: {}",
374
0
                                         _file_description.path);
375
0
        }
376
84
    }
377
106
    return Status::OK();
378
106
}
379
380
40
Status ParquetReader::get_file_metadata_schema(const FieldDescriptor** ptr) {
381
40
    RETURN_IF_ERROR(_open_file());
382
40
    DCHECK(_file_metadata != nullptr);
383
40
    *ptr = &_file_metadata->schema();
384
40
    return Status::OK();
385
40
}
386
387
103
void ParquetReader::_init_system_properties() {
388
103
    if (_scan_range.__isset.file_type) {
389
        // for compatibility
390
0
        _system_properties.system_type = _scan_range.file_type;
391
103
    } else {
392
103
        _system_properties.system_type = _scan_params.file_type;
393
103
    }
394
103
    _system_properties.properties = _scan_params.properties;
395
103
    _system_properties.hdfs_params = _scan_params.hdfs_params;
396
103
    if (_scan_params.__isset.broker_addresses) {
397
0
        _system_properties.broker_addresses.assign(_scan_params.broker_addresses.begin(),
398
0
                                                   _scan_params.broker_addresses.end());
399
0
    }
400
103
}
401
402
103
void ParquetReader::_init_file_description() {
403
103
    _file_description.path = _scan_range.path;
404
103
    _file_description.file_size = _scan_range.__isset.file_size ? _scan_range.file_size : -1;
405
103
    if (_scan_range.__isset.fs_name) {
406
0
        _file_description.fs_name = _scan_range.fs_name;
407
0
    }
408
103
    if (_scan_range.__isset.file_cache_admission) {
409
0
        _file_description.file_cache_admission = _scan_range.file_cache_admission;
410
0
    }
411
103
}
412
413
9
Status ParquetReader::on_before_init_reader(ReaderInitContext* ctx) {
414
9
    _column_descs = ctx->column_descs;
415
9
    _fill_col_name_to_block_idx = ctx->col_name_to_block_idx;
416
9
    RETURN_IF_ERROR(
417
9
            _extract_partition_values(*ctx->range, ctx->tuple_descriptor, _fill_partition_values));
418
21
    for (auto& desc : *ctx->column_descs) {
419
21
        if (desc.category == ColumnCategory::REGULAR ||
420
21
            desc.category == ColumnCategory::GENERATED) {
421
21
            ctx->column_names.push_back(desc.name);
422
21
        } else if (desc.category == ColumnCategory::SYNTHESIZED &&
423
0
                   desc.name.starts_with(BeConsts::GLOBAL_ROWID_COL)) {
424
0
            auto topn_row_id_column_iter = _create_topn_row_id_column_iterator();
425
0
            this->register_synthesized_column_handler(
426
0
                    desc.name,
427
0
                    [iter = std::move(topn_row_id_column_iter), this, &desc](
428
0
                            Block* block, size_t rows) -> Status {
429
0
                        return fill_topn_row_id(iter, desc.name, block, rows);
430
0
                    });
431
0
            continue;
432
0
        }
433
21
    }
434
435
    // Build table_info_node from Parquet file metadata with case-insensitive recursive matching.
436
    // File is already opened by init_reader before this hook, so metadata is available.
437
    // tuple_descriptor may be null in unit tests that only set column_descs.
438
9
    if (ctx->tuple_descriptor != nullptr) {
439
7
        const FieldDescriptor* field_desc = nullptr;
440
7
        RETURN_IF_ERROR(get_file_metadata_schema(&field_desc));
441
7
        RETURN_IF_ERROR(TableSchemaChangeHelper::BuildTableInfoUtil::by_parquet_name(
442
7
                ctx->tuple_descriptor, *field_desc, ctx->table_info_node));
443
7
        auto column_id_result = _create_column_ids_by_name(field_desc, ctx->tuple_descriptor);
444
7
        ctx->column_ids = std::move(column_id_result.column_ids);
445
7
        ctx->filter_column_ids = std::move(column_id_result.filter_column_ids);
446
7
    }
447
448
9
    return Status::OK();
449
9
}
450
451
ColumnIdResult ParquetReader::_create_column_ids_by_name(const FieldDescriptor* field_desc,
452
7
                                                         const TupleDescriptor* tuple_descriptor) {
453
7
    auto* mutable_field_desc = const_cast<FieldDescriptor*>(field_desc);
454
7
    mutable_field_desc->assign_ids();
455
456
7
    std::unordered_map<std::string, const FieldSchema*> table_col_name_to_field_schema_map;
457
21
    for (int i = 0; i < field_desc->size(); ++i) {
458
14
        const auto* field_schema = field_desc->get_column(i);
459
14
        if (!field_schema) {
460
0
            continue;
461
0
        }
462
14
        table_col_name_to_field_schema_map[field_schema->lower_case_name] = field_schema;
463
14
    }
464
465
7
    std::set<uint64_t> column_ids;
466
7
    std::set<uint64_t> filter_column_ids;
467
468
7
    auto process_access_paths = [](const FieldSchema* parquet_field,
469
7
                                   const std::vector<TColumnAccessPath>& access_paths,
470
7
                                   std::set<uint64_t>& out_ids) {
471
0
        process_nested_access_paths(
472
0
                parquet_field, access_paths, out_ids,
473
0
                [](const FieldSchema* field) { return field->get_column_id(); },
474
0
                [](const FieldSchema* field) { return field->get_max_column_id(); },
475
0
                ParquetNestedColumnUtils::extract_nested_column_ids_by_name);
476
0
    };
477
478
17
    for (const auto* slot : tuple_descriptor->slots()) {
479
17
        auto it = table_col_name_to_field_schema_map.find(slot->col_name_lower_case());
480
17
        if (it == table_col_name_to_field_schema_map.end()) {
481
5
            continue;
482
5
        }
483
12
        const auto* field_schema = it->second;
484
485
12
        if ((slot->col_type() != TYPE_STRUCT && slot->col_type() != TYPE_ARRAY &&
486
12
             slot->col_type() != TYPE_MAP && slot->col_type() != TYPE_VARIANT)) {
487
12
            column_ids.insert(field_schema->column_id);
488
12
            if (slot->is_predicate()) {
489
0
                filter_column_ids.insert(field_schema->column_id);
490
0
            }
491
12
            continue;
492
12
        }
493
494
0
        process_access_paths(field_schema, slot->all_access_paths(), column_ids);
495
0
        if (!slot->predicate_access_paths().empty()) {
496
0
            process_access_paths(field_schema, slot->predicate_access_paths(), filter_column_ids);
497
0
        }
498
0
    }
499
500
7
    return {std::move(column_ids), std::move(filter_column_ids)};
501
7
}
502
503
18
std::string ParquetReader::_selected_leaf_column_paths() const {
504
18
    if (_file_metadata == nullptr) {
505
0
        return "";
506
0
    }
507
508
18
    std::vector<std::string> leaf_paths;
509
18
    auto schema_desc = _file_metadata->schema();
510
18
    std::function<void(const FieldSchema*, const std::string&)> collect =
511
87
            [&](const FieldSchema* field, const std::string& path) {
512
87
                if (!_column_ids.empty() && !_column_ids.contains(field->get_column_id())) {
513
4
                    return;
514
4
                }
515
516
83
                if (field->children.empty()) {
517
70
                    if (field->physical_column_index >= 0) {
518
70
                        leaf_paths.push_back(path);
519
70
                    }
520
70
                    return;
521
70
                }
522
523
28
                for (const auto& child : field->children) {
524
28
                    collect(&child, path + "." + child.name);
525
28
                }
526
13
            };
527
528
59
    for (const auto& read_col : _read_file_columns) {
529
59
        const FieldSchema* field = schema_desc.get_column(read_col);
530
59
        if (field != nullptr) {
531
59
            collect(field, field->name);
532
59
        }
533
59
    }
534
535
18
    std::sort(leaf_paths.begin(), leaf_paths.end());
536
18
    leaf_paths.erase(std::unique(leaf_paths.begin(), leaf_paths.end()), leaf_paths.end());
537
538
18
    std::stringstream result;
539
88
    for (size_t i = 0; i < leaf_paths.size(); ++i) {
540
70
        if (i != 0) {
541
52
            result << ", ";
542
52
        }
543
70
        result << leaf_paths[i];
544
70
    }
545
18
    return result.str();
546
18
}
547
548
66
Status ParquetReader::_open_file_reader(ReaderInitContext* /*ctx*/) {
549
66
    return _open_file();
550
66
}
551
552
66
Status ParquetReader::_do_init_reader(ReaderInitContext* base_ctx) {
553
66
    auto* ctx = checked_context_cast<ParquetInitContext>(base_ctx);
554
66
    _col_name_to_block_idx = base_ctx->col_name_to_block_idx;
555
66
    _tuple_descriptor = ctx->tuple_descriptor;
556
66
    _row_descriptor = ctx->row_descriptor;
557
66
    _colname_to_slot_id = ctx->colname_to_slot_id;
558
66
    _not_single_slot_filter_conjuncts = ctx->not_single_slot_filter_conjuncts;
559
66
    _slot_id_to_filter_conjuncts = ctx->slot_id_to_filter_conjuncts;
560
66
    _filter_groups = ctx->filter_groups;
561
66
    _table_info_node_ptr = base_ctx->table_info_node;
562
66
    _column_ids = base_ctx->column_ids;
563
66
    _filter_column_ids = base_ctx->filter_column_ids;
564
565
    // _open_file_reader (called by init_reader NVI before hooks) must have opened the file.
566
66
    DCHECK(_file_metadata != nullptr)
567
0
            << "ParquetReader::_do_init_reader called without _open_file_reader";
568
66
    _t_metadata = &(_file_metadata->to_thrift());
569
570
66
    SCOPED_RAW_TIMER(&_reader_statistics.parse_meta_time);
571
66
    _total_groups = _t_metadata->row_groups.size();
572
66
    if (_total_groups == 0) {
573
0
        return Status::EndOfFile("init reader failed, empty parquet file: " + _scan_range.path);
574
0
    }
575
66
    _current_row_group_index = RowGroupReader::RowGroupIndex {-1, 0, 0};
576
577
    // Compute missing columns and file↔table column mapping.
578
    // This runs in _do_init_reader (not on_before_init_reader) because table-format readers
579
    // (Iceberg, Paimon, Hive, Hudi) override on_before_init_reader completely.
580
66
    if (has_column_descs()) {
581
24
        _fill_missing_cols.clear();
582
24
        _fill_missing_defaults.clear();
583
74
        for (const auto& col_name : base_ctx->column_names) {
584
74
            if (!_table_info_node_ptr->children_column_exists(col_name)) {
585
5
                _fill_missing_cols.insert(col_name);
586
5
            }
587
74
        }
588
24
        if (_column_descs && !_fill_missing_cols.empty()) {
589
13
            for (const auto& desc : *_column_descs) {
590
13
                if (_fill_missing_cols.contains(desc.name) &&
591
13
                    !_fill_partition_values.contains(desc.name)) {
592
0
                    _fill_missing_defaults[desc.name] = desc.default_expr;
593
0
                }
594
13
            }
595
5
        }
596
24
    }
597
    // Resolve file-column ↔ table-column mapping in file-schema order.
598
    // _init_read_columns handles both normal path (missing cols populated above)
599
    // and standalone path (_fill_missing_cols empty, _table_info_node_ptr may be null).
600
66
    _init_read_columns(base_ctx->column_names);
601
66
    if (_profile != nullptr) {
602
18
        _profile->add_info_string("ParquetReadColumnPaths", _selected_leaf_column_paths());
603
18
    }
604
605
    // build column predicates for column lazy read
606
66
    if (ctx->conjuncts != nullptr) {
607
66
        _lazy_read_ctx.conjuncts = *ctx->conjuncts;
608
66
    }
609
66
    if (ctx->slot_id_to_predicates != nullptr) {
610
66
        _lazy_read_ctx.slot_id_to_predicates = *ctx->slot_id_to_predicates;
611
66
    }
612
613
    // ---- Inlined set_fill_columns logic (partition/missing/synthesized classification) ----
614
615
    // 1. Collect predicate columns from conjuncts for lazy materialization
616
66
    std::unordered_map<std::string, std::pair<uint32_t, int>> predicate_columns;
617
66
    _collect_predicate_columns_from_conjuncts(predicate_columns);
618
619
    // 2. Classify read/partition/missing/synthesized columns into lazy vs predicate groups
620
66
    _classify_columns_for_lazy_read(predicate_columns, _fill_partition_values,
621
66
                                    _fill_missing_defaults);
622
623
    // 3. Populate col_names vectors for ColumnProcessor path
624
66
    for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
625
5
        _lazy_read_ctx.predicate_partition_col_names.emplace_back(kv.first);
626
5
    }
627
66
    for (auto& kv : _lazy_read_ctx.predicate_missing_columns) {
628
0
        _lazy_read_ctx.predicate_missing_col_names.emplace_back(kv.first);
629
0
    }
630
66
    for (auto& kv : _lazy_read_ctx.partition_columns) {
631
3
        _lazy_read_ctx.partition_col_names.emplace_back(kv.first);
632
3
    }
633
66
    for (auto& kv : _lazy_read_ctx.missing_columns) {
634
0
        _lazy_read_ctx.missing_col_names.emplace_back(kv.first);
635
0
    }
636
637
66
    if (_filter_groups && (_total_groups == 0 || _t_metadata->num_rows == 0 || _range_size < 0)) {
638
0
        return Status::EndOfFile("No row group to read");
639
0
    }
640
641
66
    return Status::OK();
642
66
}
643
644
66
void ParquetReader::_init_read_columns(const std::vector<std::string>& column_names) {
645
    // Build file_col_name → table_col_name map, skipping missing columns.
646
    // Must iterate file schema in physical order so that _generate_random_access_ranges
647
    // sees monotonically increasing chunk offsets.
648
66
    auto schema_desc = _file_metadata->schema();
649
66
    std::map<std::string, std::string> required_file_columns;
650
422
    for (const auto& col_name : column_names) {
651
422
        if (_fill_missing_cols.contains(col_name)) {
652
5
            continue;
653
5
        }
654
417
        std::string file_col = col_name;
655
417
        if (_table_info_node_ptr && _table_info_node_ptr->children_column_exists(col_name)) {
656
417
            file_col = _table_info_node_ptr->children_file_column_name(col_name);
657
417
        }
658
417
        required_file_columns[file_col] = col_name;
659
417
    }
660
725
    for (int i = 0; i < schema_desc.size(); ++i) {
661
659
        const auto& name = schema_desc.get_column(i)->name;
662
659
        if (required_file_columns.contains(name)) {
663
417
            _read_file_columns.emplace_back(name);
664
417
            _read_table_columns.emplace_back(required_file_columns[name]);
665
417
            _read_table_columns_set.insert(required_file_columns[name]);
666
417
        }
667
659
    }
668
66
}
669
670
0
bool ParquetReader::_exists_in_file(const std::string& expr_name) const {
671
    // `_read_table_columns_set` is used to ensure that only columns actually read are subject to min-max filtering.
672
    // This primarily handles cases where partition columns also exist in a file. The reason it's not modified
673
    // in `_table_info_node_ptr` is that Iceberg、Hudi has inconsistent requirements for this node;
674
    // Iceberg partition evolution need read partition columns from a file.
675
    // hudi set `hoodie.datasource.write.drop.partition.columns=false` not need read partition columns from a file.
676
0
    return _table_info_node_ptr->children_column_exists(expr_name) &&
677
0
           _read_table_columns_set.contains(expr_name);
678
0
}
679
680
0
bool ParquetReader::_type_matches(const int cid) const {
681
0
    auto* slot = _tuple_descriptor->slots()[cid];
682
0
    auto table_col_type = remove_nullable(slot->type());
683
684
0
    const auto& file_col_name = _table_info_node_ptr->children_file_column_name(slot->col_name());
685
0
    const auto& file_col_type =
686
0
            remove_nullable(_file_metadata->schema().get_column(file_col_name)->data_type);
687
688
0
    return (table_col_type->get_primitive_type() == file_col_type->get_primitive_type()) &&
689
0
           !is_complex_type(table_col_type->get_primitive_type());
690
0
}
691
692
void ParquetReader::_collect_predicate_columns_from_conjuncts(
693
66
        std::unordered_map<std::string, std::pair<uint32_t, int>>& predicate_columns) {
694
66
    std::function<void(VExpr * expr)> visit_slot = [&](VExpr* expr) {
695
39
        if (expr->is_slot_ref()) {
696
13
            VSlotRef* slot_ref = static_cast<VSlotRef*>(expr);
697
13
            auto expr_name = slot_ref->expr_name();
698
13
            predicate_columns.emplace(expr_name,
699
13
                                      std::make_pair(slot_ref->column_id(), slot_ref->slot_id()));
700
13
            if (slot_ref->column_id() == 0) {
701
3
                _lazy_read_ctx.resize_first_column = false;
702
3
            }
703
13
            return;
704
13
        }
705
26
        for (auto& child : expr->children()) {
706
26
            visit_slot(child.get());
707
26
        }
708
26
    };
709
710
66
    for (const auto& conjunct : _lazy_read_ctx.conjuncts) {
711
13
        auto expr = conjunct->root();
712
13
        if (expr->is_rf_wrapper()) {
713
0
            VRuntimeFilterWrapper* runtime_filter = assert_cast<VRuntimeFilterWrapper*>(expr.get());
714
0
            auto filter_impl = runtime_filter->get_impl();
715
0
            visit_slot(filter_impl.get());
716
13
        } else {
717
13
            visit_slot(expr.get());
718
13
        }
719
13
    }
720
721
66
    if (!_lazy_read_ctx.slot_id_to_predicates.empty()) {
722
0
        auto and_pred = AndBlockColumnPredicate::create_unique();
723
0
        for (const auto& entry : _lazy_read_ctx.slot_id_to_predicates) {
724
0
            for (const auto& pred : entry.second) {
725
                // Parquet shares _push_down_predicates for row-group/page min-max pruning and
726
                // bloom-filter evaluation, so this flag currently gates both predicate paths.
727
0
                if (!has_column_optimization(pred->col_name(), ColumnOptimizationTypes::MIN_MAX)) {
728
0
                    continue;
729
0
                }
730
0
                if (!_exists_in_file(pred->col_name()) || !_type_matches(pred->column_id())) {
731
0
                    continue;
732
0
                }
733
0
                and_pred->add_column_predicate(
734
0
                        SingleColumnBlockPredicate::create_unique(pred->clone(pred->column_id())));
735
0
            }
736
0
        }
737
0
        if (and_pred->num_of_column_predicate() > 0) {
738
0
            _push_down_predicates.push_back(std::move(and_pred));
739
0
        }
740
0
    }
741
66
}
742
743
void ParquetReader::_classify_columns_for_lazy_read(
744
        const std::unordered_map<std::string, std::pair<uint32_t, int>>&
745
                predicate_conjuncts_columns,
746
        const std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>&
747
                partition_columns,
748
66
        const std::unordered_map<std::string, VExprContextSPtr>& missing_columns) {
749
66
    const FieldDescriptor& schema = _file_metadata->schema();
750
66
    auto predicate_columns = predicate_conjuncts_columns;
751
#ifndef BE_TEST
752
    for (const auto& [col_name, _] : _generated_col_handlers) {
753
        int slot_id = -1;
754
        for (auto slot : _tuple_descriptor->slots()) {
755
            if (slot->col_name() == col_name) {
756
                slot_id = slot->id();
757
                break;
758
            }
759
        }
760
        DCHECK(slot_id != -1) << "slot id should not be -1 for generated column: " << col_name;
761
        auto column_index = _row_descriptor->get_column_id(slot_id);
762
        if (column_index == 0) {
763
            _lazy_read_ctx.resize_first_column = false;
764
        }
765
        // assume generated columns are only used for predicate push down.
766
        predicate_columns.emplace(col_name, std::make_pair(column_index, slot_id));
767
    }
768
769
    for (const auto& [col_name, _] : _synthesized_col_handlers) {
770
        int slot_id = -1;
771
        for (auto slot : _tuple_descriptor->slots()) {
772
            if (slot->col_name() == col_name) {
773
                slot_id = slot->id();
774
                break;
775
            }
776
        }
777
        DCHECK(slot_id != -1) << "slot id should not be -1 for synthesized column: " << col_name;
778
        auto column_index = _row_descriptor->get_column_id(slot_id);
779
        if (column_index == 0) {
780
            _lazy_read_ctx.resize_first_column = false;
781
        }
782
        // synthesized columns always fill data on first phase.
783
        _lazy_read_ctx.all_predicate_col_ids.emplace_back(column_index);
784
    }
785
#endif
786
417
    for (auto& read_table_col : _read_table_columns) {
787
417
        _lazy_read_ctx.all_read_columns.emplace_back(read_table_col);
788
789
417
        auto file_column_name = _table_info_node_ptr->children_file_column_name(read_table_col);
790
417
        PrimitiveType column_type =
791
417
                schema.get_column(file_column_name)->data_type->get_primitive_type();
792
417
        if (is_complex_type(column_type)) {
793
2
            _lazy_read_ctx.has_complex_type = true;
794
2
        }
795
417
        if (predicate_columns.size() > 0) {
796
12
            auto iter = predicate_columns.find(read_table_col);
797
12
            if (iter == predicate_columns.end()) {
798
4
                _lazy_read_ctx.lazy_read_columns.emplace_back(read_table_col);
799
8
            } else {
800
8
                _lazy_read_ctx.predicate_columns.first.emplace_back(iter->first);
801
8
                _lazy_read_ctx.predicate_columns.second.emplace_back(iter->second.second);
802
8
                _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
803
8
            }
804
12
        }
805
417
    }
806
807
66
    for (auto& kv : partition_columns) {
808
5
        auto iter = predicate_columns.find(kv.first);
809
5
        if (iter == predicate_columns.end()) {
810
0
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
811
5
        } else {
812
5
            _lazy_read_ctx.predicate_partition_columns.emplace(kv.first, kv.second);
813
5
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
814
5
        }
815
5
    }
816
817
66
    for (auto& kv : missing_columns) {
818
0
        auto iter = predicate_columns.find(kv.first);
819
0
        if (iter != predicate_columns.end()) {
820
            //For check missing column :   missing column == xx, missing column is null,missing column is not null.
821
0
            if (_slot_id_to_filter_conjuncts->find(iter->second.second) !=
822
0
                _slot_id_to_filter_conjuncts->end()) {
823
0
                for (auto& ctx : _slot_id_to_filter_conjuncts->find(iter->second.second)->second) {
824
0
                    _lazy_read_ctx.missing_columns_conjuncts.emplace_back(ctx);
825
0
                }
826
0
            }
827
0
            _lazy_read_ctx.predicate_missing_columns.emplace(kv.first, kv.second);
828
0
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
829
0
        } else {
830
0
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
831
0
        }
832
0
    }
833
834
66
    if (_enable_lazy_mat && _lazy_read_ctx.predicate_columns.first.size() > 0 &&
835
66
        _lazy_read_ctx.lazy_read_columns.size() > 0) {
836
2
        _lazy_read_ctx.can_lazy_read = true;
837
2
    }
838
839
66
    if (!_lazy_read_ctx.can_lazy_read) {
840
64
        for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
841
3
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
842
3
        }
843
64
        for (auto& kv : _lazy_read_ctx.predicate_missing_columns) {
844
0
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
845
0
        }
846
64
    }
847
66
}
848
849
// init file reader and file metadata for parsing schema
850
0
Status ParquetReader::init_schema_reader() {
851
0
    RETURN_IF_ERROR(_open_file());
852
0
    _t_metadata = &(_file_metadata->to_thrift());
853
0
    return Status::OK();
854
0
}
855
856
Status ParquetReader::get_parsed_schema(std::vector<std::string>* col_names,
857
0
                                        std::vector<DataTypePtr>* col_types) {
858
0
    _total_groups = _t_metadata->row_groups.size();
859
0
    auto schema_desc = _file_metadata->schema();
860
0
    for (int i = 0; i < schema_desc.size(); ++i) {
861
        // Get the Column Reader for the boolean column
862
0
        col_names->emplace_back(schema_desc.get_column(i)->name);
863
0
        col_types->emplace_back(make_nullable(schema_desc.get_column(i)->data_type));
864
0
    }
865
0
    return Status::OK();
866
0
}
867
868
Status ParquetReader::_get_columns_impl(
869
14
        std::unordered_map<std::string, DataTypePtr>* name_to_type) {
870
14
    const auto& schema_desc = _file_metadata->schema();
871
14
    std::unordered_set<std::string> column_names;
872
14
    schema_desc.get_column_names(&column_names);
873
210
    for (auto& name : column_names) {
874
210
        auto field = schema_desc.get_column(name);
875
210
        name_to_type->emplace(name, field->data_type);
876
210
    }
877
14
    return Status::OK();
878
14
}
879
880
98
Status ParquetReader::_do_get_next_block(Block* block, size_t* read_rows, bool* eof) {
881
98
    if (_current_group_reader == nullptr || _row_group_eof) {
882
38
        Status st = _next_row_group_reader();
883
38
        if (!st.ok() && !st.is<ErrorCode::END_OF_FILE>()) {
884
0
            return st;
885
0
        }
886
38
        if (_current_group_reader == nullptr || _row_group_eof || st.is<ErrorCode::END_OF_FILE>()) {
887
0
            _current_group_reader.reset(nullptr);
888
0
            _row_group_eof = true;
889
0
            *read_rows = 0;
890
0
            *eof = true;
891
0
            return Status::OK();
892
0
        }
893
38
    }
894
895
    // Limit memory per batch for load paths.
896
    // _load_bytes_per_row is updated after each batch so the *next* call pre-shrinks _batch_size
897
    // before reading, ensuring the current batch is already within the limit (from call 2 onward).
898
98
    const int64_t max_block_bytes =
899
98
            (_state != nullptr && _state->query_type() == TQueryType::LOAD &&
900
98
             config::load_reader_max_block_bytes > 0)
901
98
                    ? config::load_reader_max_block_bytes
902
98
                    : 0;
903
98
    if (max_block_bytes > 0 && _load_bytes_per_row > 0) {
904
0
        _batch_size = std::max((size_t)1,
905
0
                               (size_t)((int64_t)max_block_bytes / (int64_t)_load_bytes_per_row));
906
0
    }
907
908
98
    SCOPED_RAW_TIMER(&_reader_statistics.column_read_time);
909
98
    Status batch_st =
910
98
            _current_group_reader->next_batch(block, _batch_size, read_rows, &_row_group_eof);
911
98
    if (batch_st.is<ErrorCode::END_OF_FILE>()) {
912
0
        block->clear_column_data();
913
0
        _current_group_reader.reset(nullptr);
914
0
        *read_rows = 0;
915
0
        *eof = true;
916
0
        return Status::OK();
917
0
    }
918
919
98
    if (!batch_st.ok()) {
920
0
        return Status::InternalError("Read parquet file {} failed, reason = {}", _scan_range.path,
921
0
                                     batch_st.to_string());
922
0
    }
923
924
98
    if (max_block_bytes > 0 && *read_rows > 0) {
925
0
        _load_bytes_per_row = block->bytes() / *read_rows;
926
0
    }
927
928
98
    if (_row_group_eof) {
929
38
        auto column_st = _current_group_reader->merged_column_statistics();
930
38
        _column_statistics.merge(column_st);
931
38
        _reader_statistics.lazy_read_filtered_rows +=
932
38
                _current_group_reader->lazy_read_filtered_rows();
933
38
        _reader_statistics.predicate_filter_time += _current_group_reader->predicate_filter_time();
934
38
        _reader_statistics.dict_filter_rewrite_time +=
935
38
                _current_group_reader->dict_filter_rewrite_time();
936
38
        if (_io_ctx) {
937
15
            _io_ctx->condition_cache_filtered_rows +=
938
15
                    _current_group_reader->condition_cache_filtered_rows();
939
15
        }
940
941
38
        if (_current_row_group_index.row_group_id + 1 == _total_groups) {
942
37
            *eof = true;
943
37
        } else {
944
1
            *eof = false;
945
1
        }
946
38
    }
947
98
    return Status::OK();
948
98
}
949
950
RowGroupReader::PositionDeleteContext ParquetReader::_get_position_delete_ctx(
951
38
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index) {
952
38
    if (_delete_rows == nullptr) {
953
38
        return {row_group.num_rows, row_group_index.first_row};
954
38
    }
955
0
    const int64_t* delete_rows = &(*_delete_rows)[0];
956
0
    const int64_t* delete_rows_end = delete_rows + _delete_rows->size();
957
0
    const int64_t* start_pos = std::lower_bound(delete_rows + _delete_rows_index, delete_rows_end,
958
0
                                                row_group_index.first_row);
959
0
    int64_t start_index = start_pos - delete_rows;
960
0
    const int64_t* end_pos = std::lower_bound(start_pos, delete_rows_end, row_group_index.last_row);
961
0
    int64_t end_index = end_pos - delete_rows;
962
0
    _delete_rows_index = end_index;
963
0
    return RowGroupReader::PositionDeleteContext(*_delete_rows, row_group.num_rows,
964
0
                                                 row_group_index.first_row, start_index, end_index);
965
38
}
966
967
38
Status ParquetReader::_next_row_group_reader() {
968
38
    if (_current_group_reader != nullptr) {
969
1
        _current_group_reader->collect_profile_before_close();
970
1
    }
971
972
38
    RowRanges candidate_row_ranges;
973
38
    while (++_current_row_group_index.row_group_id < _total_groups) {
974
38
        const auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
975
38
        _current_row_group_index.first_row = _current_row_group_index.last_row;
976
38
        _current_row_group_index.last_row = _current_row_group_index.last_row + row_group.num_rows;
977
978
38
        if (_filter_groups && _is_misaligned_range_group(row_group)) {
979
0
            continue;
980
0
        }
981
982
38
        candidate_row_ranges.clear();
983
        // The range of lines to be read is determined by the push down predicate.
984
38
        RETURN_IF_ERROR(_process_min_max_bloom_filter(
985
38
                _current_row_group_index, row_group, _push_down_predicates, &candidate_row_ranges));
986
987
38
        std::function<int64_t(const FieldSchema*)> column_compressed_size =
988
138
                [&row_group, &column_compressed_size](const FieldSchema* field) -> int64_t {
989
138
            if (field->physical_column_index >= 0) {
990
124
                int parquet_col_id = field->physical_column_index;
991
124
                if (row_group.columns[parquet_col_id].__isset.meta_data) {
992
124
                    return row_group.columns[parquet_col_id].meta_data.total_compressed_size;
993
124
                }
994
0
                return 0;
995
124
            }
996
14
            int64_t size = 0;
997
30
            for (const FieldSchema& child : field->children) {
998
30
                size += column_compressed_size(&child);
999
30
            }
1000
14
            return size;
1001
138
        };
1002
38
        int64_t group_size = 0; // only calculate the needed columns
1003
108
        for (auto& read_col : _read_file_columns) {
1004
108
            const FieldSchema* field = _file_metadata->schema().get_column(read_col);
1005
108
            group_size += column_compressed_size(field);
1006
108
        }
1007
1008
38
        _reader_statistics.read_rows += candidate_row_ranges.count();
1009
38
        if (_io_ctx) {
1010
15
            _io_ctx->file_reader_stats->read_rows += candidate_row_ranges.count();
1011
15
        }
1012
1013
38
        if (candidate_row_ranges.count() != 0) {
1014
            // need read this row group.
1015
38
            _reader_statistics.read_row_groups++;
1016
38
            _reader_statistics.filtered_page_rows +=
1017
38
                    row_group.num_rows - candidate_row_ranges.count();
1018
38
            break;
1019
38
        } else {
1020
            // this row group be filtered.
1021
0
            _reader_statistics.filtered_row_groups++;
1022
0
            _reader_statistics.filtered_bytes += group_size;
1023
0
            _reader_statistics.filtered_group_rows += row_group.num_rows;
1024
0
        }
1025
38
    }
1026
1027
38
    if (_current_row_group_index.row_group_id == _total_groups) {
1028
0
        _row_group_eof = true;
1029
0
        _current_group_reader.reset(nullptr);
1030
0
        return Status::EndOfFile("No next RowGroupReader");
1031
0
    }
1032
1033
    // process page index and generate the ranges to read
1034
38
    auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
1035
1036
38
    RowGroupReader::PositionDeleteContext position_delete_ctx =
1037
38
            _get_position_delete_ctx(row_group, _current_row_group_index);
1038
38
    io::FileReaderSPtr group_file_reader;
1039
38
    if (typeid_cast<io::InMemoryFileReader*>(_file_reader.get())) {
1040
        // InMemoryFileReader has the ability to merge small IO
1041
0
        group_file_reader = _file_reader;
1042
38
    } else {
1043
38
        size_t avg_io_size = 0;
1044
38
        const std::vector<io::PrefetchRange> io_ranges =
1045
38
                _generate_random_access_ranges(_current_row_group_index, &avg_io_size);
1046
38
        int64_t merged_read_slice_size = -1;
1047
38
        if (_state != nullptr && _state->query_options().__isset.merge_read_slice_size) {
1048
26
            merged_read_slice_size = _state->query_options().merge_read_slice_size;
1049
26
        }
1050
        // The underlying page reader will prefetch data in column.
1051
        // Using both MergeRangeFileReader and BufferedStreamReader simultaneously would waste a lot of memory.
1052
38
        group_file_reader =
1053
38
                avg_io_size < io::MergeRangeFileReader::SMALL_IO
1054
38
                        ? std::make_shared<io::MergeRangeFileReader>(
1055
38
                                  _profile, _file_reader, io_ranges, merged_read_slice_size)
1056
38
                        : _file_reader;
1057
38
    }
1058
38
    _current_group_reader.reset(new RowGroupReader(
1059
38
            _io_ctx ? std::make_shared<io::TracingFileReader>(group_file_reader,
1060
15
                                                              _io_ctx->file_reader_stats)
1061
38
                    : group_file_reader,
1062
38
            _read_table_columns, _current_row_group_index.row_group_id, row_group, _ctz, _io_ctx,
1063
38
            position_delete_ctx, _lazy_read_ctx, _state, _column_ids, _filter_column_ids));
1064
38
    _row_group_eof = false;
1065
1066
38
    _current_group_reader->set_current_row_group_idx(_current_row_group_index);
1067
38
    _current_group_reader->set_col_name_to_block_idx(_col_name_to_block_idx);
1068
38
    if (_condition_cache_ctx) {
1069
0
        _current_group_reader->set_condition_cache_context(_condition_cache_ctx);
1070
0
    }
1071
38
    _current_group_reader->set_table_format_reader(this);
1072
1073
38
    _current_group_reader->_table_info_node_ptr = _table_info_node_ptr;
1074
38
    return _current_group_reader->init(_file_metadata->schema(), candidate_row_ranges, _col_offsets,
1075
38
                                       _tuple_descriptor, _row_descriptor, _colname_to_slot_id,
1076
38
                                       _not_single_slot_filter_conjuncts,
1077
38
                                       _slot_id_to_filter_conjuncts);
1078
38
}
1079
1080
std::vector<io::PrefetchRange> ParquetReader::_generate_random_access_ranges(
1081
38
        const RowGroupReader::RowGroupIndex& group, size_t* avg_io_size) {
1082
38
    std::vector<io::PrefetchRange> result;
1083
38
    int64_t last_chunk_end = -1;
1084
38
    size_t total_io_size = 0;
1085
38
    std::function<void(const FieldSchema*, const tparquet::RowGroup&)> scalar_range =
1086
136
            [&](const FieldSchema* field, const tparquet::RowGroup& row_group) {
1087
136
                if (_column_ids.empty() ||
1088
136
                    _column_ids.find(field->get_column_id()) != _column_ids.end()) {
1089
132
                    const auto field_type = remove_nullable(field->data_type)->get_primitive_type();
1090
132
                    if (field_type == TYPE_ARRAY) {
1091
2
                        scalar_range(field->children.data(), row_group);
1092
130
                    } else if (field_type == TYPE_MAP) {
1093
0
                        scalar_range(field->children.data(), row_group);
1094
0
                        scalar_range(field->children.data() + 1, row_group);
1095
130
                    } else if (field_type == TYPE_STRUCT || field_type == TYPE_VARIANT) {
1096
26
                        for (const auto& child : field->children) {
1097
26
                            scalar_range(&child, row_group);
1098
26
                        }
1099
119
                    } else {
1100
119
                        const tparquet::ColumnChunk& chunk =
1101
119
                                row_group.columns[field->physical_column_index];
1102
119
                        auto& chunk_meta = chunk.meta_data;
1103
119
                        int64_t chunk_start = has_dict_page(chunk_meta)
1104
119
                                                      ? chunk_meta.dictionary_page_offset
1105
119
                                                      : chunk_meta.data_page_offset;
1106
119
                        int64_t chunk_end = chunk_start + chunk_meta.total_compressed_size;
1107
119
                        DCHECK_GE(chunk_start, last_chunk_end);
1108
119
                        result.emplace_back(chunk_start, chunk_end);
1109
119
                        total_io_size += chunk_meta.total_compressed_size;
1110
119
                        last_chunk_end = chunk_end;
1111
119
                    }
1112
132
                }
1113
136
            };
1114
38
    const tparquet::RowGroup& row_group = _t_metadata->row_groups[group.row_group_id];
1115
108
    for (const auto& read_col : _read_file_columns) {
1116
108
        const FieldSchema* field = _file_metadata->schema().get_column(read_col);
1117
108
        scalar_range(field, row_group);
1118
108
    }
1119
38
    if (!result.empty()) {
1120
37
        *avg_io_size = total_io_size / result.size();
1121
37
    }
1122
38
    return result;
1123
38
}
1124
1125
37
bool ParquetReader::_is_misaligned_range_group(const tparquet::RowGroup& row_group) const {
1126
37
    int64_t start_offset = _get_column_start_offset(row_group.columns[0].meta_data);
1127
1128
37
    auto& last_column = row_group.columns[row_group.columns.size() - 1].meta_data;
1129
37
    int64_t end_offset = _get_column_start_offset(last_column) + last_column.total_compressed_size;
1130
1131
37
    int64_t row_group_mid = start_offset + (end_offset - start_offset) / 2;
1132
37
    if (!(row_group_mid >= _range_start_offset &&
1133
37
          row_group_mid < _range_start_offset + _range_size)) {
1134
0
        return true;
1135
0
    }
1136
37
    return false;
1137
37
}
1138
1139
0
int64_t ParquetReader::get_total_rows() const {
1140
0
    if (!_t_metadata) {
1141
0
        return 0;
1142
0
    }
1143
0
    if (!_filter_groups) {
1144
0
        return _t_metadata->num_rows;
1145
0
    }
1146
0
    int64_t total = 0;
1147
0
    for (const auto& rg : _t_metadata->row_groups) {
1148
0
        if (!_is_misaligned_range_group(rg)) {
1149
0
            total += rg.num_rows;
1150
0
        }
1151
0
    }
1152
0
    return total;
1153
0
}
1154
1155
0
void ParquetReader::set_condition_cache_context(std::shared_ptr<ConditionCacheContext> ctx) {
1156
0
    _condition_cache_ctx = std::move(ctx);
1157
0
    if (!_condition_cache_ctx || !_t_metadata || !_filter_groups) {
1158
0
        return;
1159
0
    }
1160
    // Find the first assigned row group to compute base_granule.
1161
0
    int64_t first_row = 0;
1162
0
    for (const auto& rg : _t_metadata->row_groups) {
1163
0
        if (!_is_misaligned_range_group(rg)) {
1164
0
            _condition_cache_ctx->base_granule = first_row / ConditionCacheContext::GRANULE_SIZE;
1165
0
            return;
1166
0
        }
1167
0
        first_row += rg.num_rows;
1168
0
    }
1169
0
}
1170
1171
Status ParquetReader::_process_page_index_filter(
1172
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index,
1173
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1174
18
        RowRanges* candidate_row_ranges) {
1175
18
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
1176
0
        return Status::EndOfFile("stop");
1177
0
    }
1178
1179
18
    std::function<void()> read_whole_row_group = [&]() {
1180
18
        candidate_row_ranges->add(RowRange {0, row_group.num_rows});
1181
18
    };
1182
1183
    // Check if the page index is available and if it exists.
1184
18
    PageIndex page_index;
1185
18
    if (!config::enable_parquet_page_index || _colname_to_slot_id == nullptr ||
1186
18
        !page_index.check_and_get_page_index_ranges(row_group.columns)) {
1187
18
        read_whole_row_group();
1188
18
        return Status::OK();
1189
18
    }
1190
1191
0
    std::vector<int> parquet_col_ids;
1192
0
    for (size_t idx = 0; idx < _read_table_columns.size(); idx++) {
1193
0
        const auto& read_table_col = _read_table_columns[idx];
1194
0
        const auto& read_file_col = _read_file_columns[idx];
1195
0
        if (!_colname_to_slot_id->contains(read_table_col)) {
1196
0
            continue;
1197
0
        }
1198
0
        auto* field = _file_metadata->schema().get_column(read_file_col);
1199
1200
0
        std::function<void(const FieldSchema* field)> f = [&](const FieldSchema* field) {
1201
0
            if (!_column_ids.empty() &&
1202
0
                _column_ids.find(field->get_column_id()) == _column_ids.end()) {
1203
0
                return;
1204
0
            }
1205
1206
0
            const auto field_type = remove_nullable(field->data_type)->get_primitive_type();
1207
0
            if (field_type == TYPE_ARRAY) {
1208
0
                f(field->children.data());
1209
0
            } else if (field_type == TYPE_MAP) {
1210
0
                f(field->children.data());
1211
0
                f(field->children.data() + 1);
1212
0
            } else if (field_type == TYPE_STRUCT || field_type == TYPE_VARIANT) {
1213
0
                for (const auto& child : field->children) {
1214
0
                    f(&child);
1215
0
                }
1216
0
            } else {
1217
0
                int parquet_col_id = field->physical_column_index;
1218
0
                if (parquet_col_id >= 0) {
1219
0
                    parquet_col_ids.push_back(parquet_col_id);
1220
0
                }
1221
0
            }
1222
0
        };
1223
1224
0
        f(field);
1225
0
    }
1226
1227
0
    auto parse_offset_index = [&]() -> Status {
1228
0
        std::vector<uint8_t> off_index_buff(page_index._offset_index_size);
1229
0
        Slice res(off_index_buff.data(), page_index._offset_index_size);
1230
0
        size_t bytes_read = 0;
1231
0
        {
1232
0
            SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1233
0
            RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._offset_index_start, res,
1234
0
                                                          &bytes_read, _io_ctx));
1235
0
        }
1236
0
        _column_statistics.page_index_read_calls++;
1237
0
        _col_offsets.clear();
1238
1239
0
        for (auto parquet_col_id : parquet_col_ids) {
1240
0
            auto& chunk = row_group.columns[parquet_col_id];
1241
0
            if (chunk.offset_index_length == 0) [[unlikely]] {
1242
0
                continue;
1243
0
            }
1244
0
            tparquet::OffsetIndex offset_index;
1245
0
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1246
0
            RETURN_IF_ERROR(
1247
0
                    page_index.parse_offset_index(chunk, off_index_buff.data(), &offset_index));
1248
0
            _col_offsets[parquet_col_id] = offset_index;
1249
0
        }
1250
0
        return Status::OK();
1251
0
    };
1252
1253
    // from https://github.com/apache/doris/pull/55795
1254
0
    RETURN_IF_ERROR(parse_offset_index());
1255
1256
    // Check if page index is needed for min-max filter.
1257
0
    if (!_enable_filter_by_min_max || push_down_pred.empty()) {
1258
0
        read_whole_row_group();
1259
0
        return Status::OK();
1260
0
    }
1261
1262
    // read column index.
1263
0
    std::vector<uint8_t> col_index_buff(page_index._column_index_size);
1264
0
    size_t bytes_read = 0;
1265
0
    Slice result(col_index_buff.data(), page_index._column_index_size);
1266
0
    {
1267
0
        SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1268
0
        RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._column_index_start, result,
1269
0
                                                      &bytes_read, _io_ctx));
1270
0
    }
1271
0
    _column_statistics.page_index_read_calls++;
1272
1273
0
    SCOPED_RAW_TIMER(&_reader_statistics.page_index_filter_time);
1274
1275
    // Construct a cacheable page index structure to avoid repeatedly reading the page index of the same column.
1276
0
    ParquetPredicate::CachedPageIndexStat cached_page_index;
1277
0
    cached_page_index.ctz = _ctz;
1278
0
    std::function<bool(ParquetPredicate::PageIndexStat**, int)> get_stat_func =
1279
0
            [&](ParquetPredicate::PageIndexStat** ans, const int cid) -> bool {
1280
0
        if (cached_page_index.stats.contains(cid)) {
1281
0
            *ans = &cached_page_index.stats[cid];
1282
0
            return (*ans)->available;
1283
0
        }
1284
0
        cached_page_index.stats.emplace(cid, ParquetPredicate::PageIndexStat {});
1285
0
        auto& sig_stat = cached_page_index.stats[cid];
1286
1287
0
        auto* slot = _tuple_descriptor->slots()[cid];
1288
0
        if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1289
            // table column not exist in file, may be schema change.
1290
0
            return false;
1291
0
        }
1292
1293
0
        const auto& file_col_name =
1294
0
                _table_info_node_ptr->children_file_column_name(slot->col_name());
1295
0
        const FieldSchema* col_schema = _file_metadata->schema().get_column(file_col_name);
1296
0
        int parquet_col_id = col_schema->physical_column_index;
1297
1298
0
        if (parquet_col_id < 0) {
1299
            // complex type, not support page index yet.
1300
0
            return false;
1301
0
        }
1302
0
        if (!_col_offsets.contains(parquet_col_id)) {
1303
            // If the file contains partition columns and the query applies filters on those
1304
            // partition columns, then reading the page index is unnecessary.
1305
0
            return false;
1306
0
        }
1307
1308
0
        auto& column_chunk = row_group.columns[parquet_col_id];
1309
0
        if (column_chunk.column_index_length == 0 || column_chunk.offset_index_length == 0) {
1310
            // column no page index.
1311
0
            return false;
1312
0
        }
1313
1314
0
        tparquet::ColumnIndex column_index;
1315
0
        {
1316
0
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1317
0
            RETURN_IF_ERROR(page_index.parse_column_index(column_chunk, col_index_buff.data(),
1318
0
                                                          &column_index));
1319
0
        }
1320
0
        const int64_t num_of_pages = column_index.null_pages.size();
1321
0
        if (num_of_pages <= 0) [[unlikely]] {
1322
            // no page. (maybe this row group no data.)
1323
0
            return false;
1324
0
        }
1325
0
        DCHECK_EQ(column_index.min_values.size(), column_index.max_values.size());
1326
0
        if (!column_index.__isset.null_counts) {
1327
            // not set null or null counts;
1328
0
            return false;
1329
0
        }
1330
1331
0
        auto& offset_index = _col_offsets[parquet_col_id];
1332
0
        const auto& page_locations = offset_index.page_locations;
1333
1334
0
        sig_stat.col_schema = col_schema;
1335
0
        sig_stat.num_of_pages = num_of_pages;
1336
0
        sig_stat.encoded_min_value = column_index.min_values;
1337
0
        sig_stat.encoded_max_value = column_index.max_values;
1338
0
        sig_stat.is_all_null.resize(num_of_pages);
1339
0
        sig_stat.has_null.resize(num_of_pages);
1340
0
        sig_stat.ranges.resize(num_of_pages);
1341
1342
0
        for (int page_id = 0; page_id < num_of_pages; page_id++) {
1343
0
            sig_stat.is_all_null[page_id] = column_index.null_pages[page_id];
1344
0
            sig_stat.has_null[page_id] = column_index.null_counts[page_id] > 0;
1345
1346
0
            int64_t from = page_locations[page_id].first_row_index;
1347
0
            int64_t to = 0;
1348
0
            if (page_id == page_locations.size() - 1) {
1349
0
                to = row_group_index.last_row;
1350
0
            } else {
1351
0
                to = page_locations[page_id + 1].first_row_index;
1352
0
            }
1353
0
            sig_stat.ranges[page_id] = RowRange {from, to};
1354
0
        }
1355
1356
0
        sig_stat.available = true;
1357
0
        *ans = &sig_stat;
1358
0
        return true;
1359
0
    };
1360
0
    cached_page_index.row_group_range = {0, row_group.num_rows};
1361
0
    cached_page_index.get_stat_func = get_stat_func;
1362
1363
0
    candidate_row_ranges->add({0, row_group.num_rows});
1364
0
    for (const auto& predicate : push_down_pred) {
1365
0
        RowRanges tmp_row_range;
1366
0
        if (!predicate->evaluate_and(&cached_page_index, &tmp_row_range)) {
1367
            // no need read this row group.
1368
0
            candidate_row_ranges->clear();
1369
0
            return Status::OK();
1370
0
        }
1371
0
        RowRanges::ranges_intersection(*candidate_row_ranges, tmp_row_range, candidate_row_ranges);
1372
0
    }
1373
0
    return Status::OK();
1374
0
}
1375
1376
Status ParquetReader::_process_min_max_bloom_filter(
1377
        const RowGroupReader::RowGroupIndex& row_group_index, const tparquet::RowGroup& row_group,
1378
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1379
38
        RowRanges* row_ranges) {
1380
38
    SCOPED_RAW_TIMER(&_reader_statistics.row_group_filter_time);
1381
38
    if (!_filter_groups) {
1382
        // No row group filtering is needed;
1383
        // for example, Iceberg reads position delete files.
1384
1
        row_ranges->add({0, row_group.num_rows});
1385
1
        return Status::OK();
1386
1
    }
1387
1388
37
    if (_read_by_rows) {
1389
19
        auto group_start = row_group_index.first_row;
1390
19
        auto group_end = row_group_index.last_row;
1391
1392
47
        while (!_row_ids.empty()) {
1393
28
            auto v = _row_ids.front();
1394
28
            if (v < group_start) {
1395
0
                continue;
1396
28
            } else if (v < group_end) {
1397
28
                row_ranges->add(RowRange {v - group_start, v - group_start + 1});
1398
28
                _row_ids.pop_front();
1399
28
            } else {
1400
0
                break;
1401
0
            }
1402
28
        }
1403
19
    } else {
1404
18
        bool filter_this_row_group = false;
1405
18
        bool filtered_by_min_max = false;
1406
18
        bool filtered_by_bloom_filter = false;
1407
18
        RETURN_IF_ERROR(_process_column_stat_filter(row_group, push_down_pred,
1408
18
                                                    &filter_this_row_group, &filtered_by_min_max,
1409
18
                                                    &filtered_by_bloom_filter));
1410
        // Update statistics based on filter type
1411
18
        if (filter_this_row_group) {
1412
0
            if (filtered_by_min_max) {
1413
0
                _reader_statistics.filtered_row_groups_by_min_max++;
1414
0
            }
1415
0
            if (filtered_by_bloom_filter) {
1416
0
                _reader_statistics.filtered_row_groups_by_bloom_filter++;
1417
0
            }
1418
0
        }
1419
1420
18
        if (!filter_this_row_group) {
1421
18
            RETURN_IF_ERROR(_process_page_index_filter(row_group, row_group_index, push_down_pred,
1422
18
                                                       row_ranges));
1423
18
        }
1424
18
    }
1425
1426
37
    return Status::OK();
1427
37
}
1428
1429
Status ParquetReader::_process_column_stat_filter(
1430
        const tparquet::RowGroup& row_group,
1431
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1432
20
        bool* filter_group, bool* filtered_by_min_max, bool* filtered_by_bloom_filter) {
1433
    // If both filters are disabled, skip filtering
1434
20
    if (!_enable_filter_by_min_max && !_enable_filter_by_bloom_filter) {
1435
0
        return Status::OK();
1436
0
    }
1437
1438
    // Cache bloom filters for each column to avoid reading the same bloom filter multiple times
1439
    // when there are multiple predicates on the same column
1440
20
    std::unordered_map<int, std::unique_ptr<ParquetBlockSplitBloomFilter>> bloom_filter_cache;
1441
1442
    // Initialize output parameters
1443
20
    *filtered_by_min_max = false;
1444
20
    *filtered_by_bloom_filter = false;
1445
1446
20
    for (const auto& predicate : _push_down_predicates) {
1447
2
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_stat_func =
1448
4
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1449
                    // Check if min-max filter is enabled
1450
4
                    if (!_enable_filter_by_min_max) {
1451
0
                        return false;
1452
0
                    }
1453
4
                    auto* slot = _tuple_descriptor->slots()[cid];
1454
4
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1455
0
                        return false;
1456
0
                    }
1457
4
                    const auto& file_col_name =
1458
4
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1459
4
                    const FieldSchema* col_schema =
1460
4
                            _file_metadata->schema().get_column(file_col_name);
1461
4
                    int parquet_col_id = col_schema->physical_column_index;
1462
4
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1463
4
                    stat->col_schema = col_schema;
1464
4
                    return ParquetPredicate::read_column_stats(col_schema, meta_data,
1465
4
                                                               &_ignored_stats,
1466
4
                                                               _t_metadata->created_by, stat)
1467
4
                            .ok();
1468
4
                };
1469
2
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_bloom_filter_func =
1470
2
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1471
0
                    auto* slot = _tuple_descriptor->slots()[cid];
1472
0
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1473
0
                        return false;
1474
0
                    }
1475
0
                    const auto& file_col_name =
1476
0
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1477
0
                    const FieldSchema* col_schema =
1478
0
                            _file_metadata->schema().get_column(file_col_name);
1479
0
                    int parquet_col_id = col_schema->physical_column_index;
1480
0
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1481
0
                    if (!meta_data.__isset.bloom_filter_offset) {
1482
0
                        return false;
1483
0
                    }
1484
0
                    auto primitive_type =
1485
0
                            remove_nullable(col_schema->data_type)->get_primitive_type();
1486
0
                    if (!ParquetPredicate::bloom_filter_supported(primitive_type)) {
1487
0
                        return false;
1488
0
                    }
1489
1490
                    // Check if bloom filter is enabled
1491
0
                    if (!_enable_filter_by_bloom_filter) {
1492
0
                        return false;
1493
0
                    }
1494
1495
                    // Check cache first
1496
0
                    auto cache_iter = bloom_filter_cache.find(parquet_col_id);
1497
0
                    if (cache_iter != bloom_filter_cache.end()) {
1498
                        // Bloom filter already loaded for this column, reuse it
1499
0
                        stat->bloom_filter = std::move(cache_iter->second);
1500
0
                        bloom_filter_cache.erase(cache_iter);
1501
0
                        return stat->bloom_filter != nullptr;
1502
0
                    }
1503
1504
0
                    if (!stat->bloom_filter) {
1505
0
                        SCOPED_RAW_TIMER(&_reader_statistics.bloom_filter_read_time);
1506
0
                        auto st = ParquetPredicate::read_bloom_filter(
1507
0
                                meta_data, _tracing_file_reader, _io_ctx, stat);
1508
0
                        if (!st.ok()) {
1509
0
                            LOG(WARNING) << "Failed to read bloom filter for column "
1510
0
                                         << col_schema->name << " in file " << _scan_range.path
1511
0
                                         << ", status: " << st.to_string();
1512
0
                            stat->bloom_filter.reset();
1513
0
                            return false;
1514
0
                        }
1515
0
                    }
1516
0
                    return stat->bloom_filter != nullptr;
1517
0
                };
1518
2
        ParquetPredicate::ColumnStat stat;
1519
2
        stat.ctz = _ctz;
1520
2
        stat.get_stat_func = &get_stat_func;
1521
2
        stat.get_bloom_filter_func = &get_bloom_filter_func;
1522
1523
2
        if (!predicate->evaluate_and(&stat)) {
1524
1
            *filter_group = true;
1525
1526
            // Track which filter was used for filtering
1527
            // If bloom filter was loaded, it means bloom filter was used
1528
1
            if (stat.bloom_filter) {
1529
0
                *filtered_by_bloom_filter = true;
1530
0
            }
1531
            // If col_schema was set but no bloom filter, it means min-max stats were used
1532
1
            if (stat.col_schema && !stat.bloom_filter) {
1533
1
                *filtered_by_min_max = true;
1534
1
            }
1535
1536
1
            return Status::OK();
1537
1
        }
1538
1539
        // After evaluating, if the bloom filter was used, cache it for subsequent predicates
1540
1
        if (stat.bloom_filter) {
1541
            // Find the column id for caching
1542
0
            for (auto* slot : _tuple_descriptor->slots()) {
1543
0
                if (_table_info_node_ptr->children_column_exists(slot->col_name())) {
1544
0
                    const auto& file_col_name =
1545
0
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1546
0
                    const FieldSchema* col_schema =
1547
0
                            _file_metadata->schema().get_column(file_col_name);
1548
0
                    int parquet_col_id = col_schema->physical_column_index;
1549
0
                    if (stat.col_schema == col_schema) {
1550
0
                        bloom_filter_cache[parquet_col_id] = std::move(stat.bloom_filter);
1551
0
                        break;
1552
0
                    }
1553
0
                }
1554
0
            }
1555
0
        }
1556
1
    }
1557
1558
    // Update filter statistics if this row group was not filtered
1559
    // The statistics will be updated in _init_row_groups when filter_group is true
1560
19
    return Status::OK();
1561
20
}
1562
1563
74
int64_t ParquetReader::_get_column_start_offset(const tparquet::ColumnMetaData& column) const {
1564
74
    return has_dict_page(column) ? column.dictionary_page_offset : column.data_page_offset;
1565
74
}
1566
1567
14
void ParquetReader::_collect_profile() {
1568
14
    if (_profile == nullptr) {
1569
0
        return;
1570
0
    }
1571
1572
14
    if (_current_group_reader != nullptr) {
1573
14
        _current_group_reader->collect_profile_before_close();
1574
14
    }
1575
14
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups, _reader_statistics.filtered_row_groups);
1576
14
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_min_max,
1577
14
                   _reader_statistics.filtered_row_groups_by_min_max);
1578
14
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_bloom_filter,
1579
14
                   _reader_statistics.filtered_row_groups_by_bloom_filter);
1580
14
    COUNTER_UPDATE(_parquet_profile.to_read_row_groups, _reader_statistics.read_row_groups);
1581
14
    COUNTER_UPDATE(_parquet_profile.total_row_groups, _total_groups);
1582
14
    COUNTER_UPDATE(_parquet_profile.filtered_group_rows, _reader_statistics.filtered_group_rows);
1583
14
    COUNTER_UPDATE(_parquet_profile.filtered_page_rows, _reader_statistics.filtered_page_rows);
1584
14
    COUNTER_UPDATE(_parquet_profile.lazy_read_filtered_rows,
1585
14
                   _reader_statistics.lazy_read_filtered_rows);
1586
14
    COUNTER_UPDATE(_parquet_profile.filtered_bytes, _reader_statistics.filtered_bytes);
1587
14
    COUNTER_UPDATE(_parquet_profile.raw_rows_read, _reader_statistics.read_rows);
1588
14
    COUNTER_UPDATE(_parquet_profile.column_read_time, _reader_statistics.column_read_time);
1589
14
    COUNTER_UPDATE(_parquet_profile.parse_meta_time, _reader_statistics.parse_meta_time);
1590
14
    COUNTER_UPDATE(_parquet_profile.parse_footer_time, _reader_statistics.parse_footer_time);
1591
14
    COUNTER_UPDATE(_parquet_profile.file_reader_create_time,
1592
14
                   _reader_statistics.file_reader_create_time);
1593
14
    COUNTER_UPDATE(_parquet_profile.open_file_num, _reader_statistics.open_file_num);
1594
14
    COUNTER_UPDATE(_parquet_profile.page_index_filter_time,
1595
14
                   _reader_statistics.page_index_filter_time);
1596
14
    COUNTER_UPDATE(_parquet_profile.read_page_index_time, _reader_statistics.read_page_index_time);
1597
14
    COUNTER_UPDATE(_parquet_profile.parse_page_index_time,
1598
14
                   _reader_statistics.parse_page_index_time);
1599
14
    COUNTER_UPDATE(_parquet_profile.row_group_filter_time,
1600
14
                   _reader_statistics.row_group_filter_time);
1601
14
    COUNTER_UPDATE(_parquet_profile.file_footer_read_calls,
1602
14
                   _reader_statistics.file_footer_read_calls);
1603
14
    COUNTER_UPDATE(_parquet_profile.file_footer_hit_cache,
1604
14
                   _reader_statistics.file_footer_hit_cache);
1605
1606
14
    COUNTER_UPDATE(_parquet_profile.skip_page_header_num, _column_statistics.skip_page_header_num);
1607
14
    COUNTER_UPDATE(_parquet_profile.parse_page_header_num,
1608
14
                   _column_statistics.parse_page_header_num);
1609
14
    COUNTER_UPDATE(_parquet_profile.predicate_filter_time,
1610
14
                   _reader_statistics.predicate_filter_time);
1611
14
    COUNTER_UPDATE(_parquet_profile.dict_filter_rewrite_time,
1612
14
                   _reader_statistics.dict_filter_rewrite_time);
1613
14
    COUNTER_UPDATE(_parquet_profile.convert_time, _column_statistics.convert_time);
1614
14
    COUNTER_UPDATE(_parquet_profile.bloom_filter_read_time,
1615
14
                   _reader_statistics.bloom_filter_read_time);
1616
14
    COUNTER_UPDATE(_parquet_profile.page_index_read_calls,
1617
14
                   _column_statistics.page_index_read_calls);
1618
14
    COUNTER_UPDATE(_parquet_profile.decompress_time, _column_statistics.decompress_time);
1619
14
    COUNTER_UPDATE(_parquet_profile.decompress_cnt, _column_statistics.decompress_cnt);
1620
14
    COUNTER_UPDATE(_parquet_profile.page_read_counter, _column_statistics.page_read_counter);
1621
14
    COUNTER_UPDATE(_parquet_profile.page_cache_write_counter,
1622
14
                   _column_statistics.page_cache_write_counter);
1623
14
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_write_counter,
1624
14
                   _column_statistics.page_cache_compressed_write_counter);
1625
14
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_write_counter,
1626
14
                   _column_statistics.page_cache_decompressed_write_counter);
1627
14
    COUNTER_UPDATE(_parquet_profile.page_cache_hit_counter,
1628
14
                   _column_statistics.page_cache_hit_counter);
1629
14
    COUNTER_UPDATE(_parquet_profile.page_cache_missing_counter,
1630
14
                   _column_statistics.page_cache_missing_counter);
1631
14
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_hit_counter,
1632
14
                   _column_statistics.page_cache_compressed_hit_counter);
1633
14
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_hit_counter,
1634
14
                   _column_statistics.page_cache_decompressed_hit_counter);
1635
14
    COUNTER_UPDATE(_parquet_profile.decode_header_time, _column_statistics.decode_header_time);
1636
14
    COUNTER_UPDATE(_parquet_profile.read_page_header_time,
1637
14
                   _column_statistics.read_page_header_time);
1638
14
    COUNTER_UPDATE(_parquet_profile.decode_value_time, _column_statistics.decode_value_time);
1639
14
    COUNTER_UPDATE(_parquet_profile.decode_dict_time, _column_statistics.decode_dict_time);
1640
14
    COUNTER_UPDATE(_parquet_profile.decode_level_time, _column_statistics.decode_level_time);
1641
14
    COUNTER_UPDATE(_parquet_profile.decode_null_map_time, _column_statistics.decode_null_map_time);
1642
14
    COUNTER_UPDATE(_parquet_profile.variant_direct_typed_value_read_rows,
1643
14
                   _column_statistics.variant_direct_typed_value_read_rows);
1644
14
    COUNTER_UPDATE(_parquet_profile.variant_rowwise_read_rows,
1645
14
                   _column_statistics.variant_rowwise_read_rows);
1646
14
}
1647
1648
14
void ParquetReader::_collect_profile_before_close() {
1649
14
    _collect_profile();
1650
14
}
1651
1652
} // namespace doris