Coverage Report

Created: 2026-04-20 11:15

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/parquet/vparquet_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/parquet/vparquet_reader.h"
19
20
#include <gen_cpp/Metrics_types.h>
21
#include <gen_cpp/PlanNodes_types.h>
22
#include <gen_cpp/parquet_types.h>
23
#include <glog/logging.h>
24
25
#include <algorithm>
26
#include <functional>
27
#include <utility>
28
29
#include "common/config.h"
30
#include "common/status.h"
31
#include "core/block/block.h"
32
#include "core/block/column_with_type_and_name.h"
33
#include "core/column/column.h"
34
#include "core/data_type/define_primitive_type.h"
35
#include "core/typeid_cast.h"
36
#include "core/types.h"
37
#include "exec/scan/file_scanner.h"
38
#include "exprs/vbloom_predicate.h"
39
#include "exprs/vdirect_in_predicate.h"
40
#include "exprs/vexpr.h"
41
#include "exprs/vexpr_context.h"
42
#include "exprs/vin_predicate.h"
43
#include "exprs/vruntimefilter_wrapper.h"
44
#include "exprs/vslot_ref.h"
45
#include "exprs/vtopn_pred.h"
46
#include "format/column_type_convert.h"
47
#include "format/parquet/parquet_block_split_bloom_filter.h"
48
#include "format/parquet/parquet_common.h"
49
#include "format/parquet/parquet_predicate.h"
50
#include "format/parquet/parquet_thrift_util.h"
51
#include "format/parquet/schema_desc.h"
52
#include "format/parquet/vparquet_file_metadata.h"
53
#include "format/parquet/vparquet_group_reader.h"
54
#include "format/parquet/vparquet_page_index.h"
55
#include "information_schema/schema_scanner.h"
56
#include "io/file_factory.h"
57
#include "io/fs/buffered_reader.h"
58
#include "io/fs/file_reader.h"
59
#include "io/fs/file_reader_writer_fwd.h"
60
#include "io/fs/tracing_file_reader.h"
61
#include "runtime/descriptors.h"
62
#include "util/slice.h"
63
#include "util/string_util.h"
64
#include "util/timezone_utils.h"
65
66
namespace cctz {
67
class time_zone;
68
} // namespace cctz
69
namespace doris {
70
class RowDescriptor;
71
class RuntimeState;
72
class SlotDescriptor;
73
class TupleDescriptor;
74
namespace io {
75
struct IOContext;
76
enum class FileCachePolicy : uint8_t;
77
} // namespace io
78
class Block;
79
} // namespace doris
80
81
namespace doris {
82
83
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
84
                             const TFileRangeDesc& range, size_t batch_size,
85
                             const cctz::time_zone* ctz, io::IOContext* io_ctx, RuntimeState* state,
86
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
87
234
        : _profile(profile),
88
234
          _scan_params(params),
89
234
          _scan_range(range),
90
234
          _batch_size(std::max(batch_size, _MIN_BATCH_SIZE)),
91
234
          _range_start_offset(range.start_offset),
92
234
          _range_size(range.size),
93
234
          _ctz(ctz),
94
234
          _io_ctx(io_ctx),
95
234
          _state(state),
96
234
          _enable_lazy_mat(enable_lazy_mat),
97
          _enable_filter_by_min_max(
98
234
                  state == nullptr ? true
99
234
                                   : state->query_options().enable_parquet_filter_by_min_max),
100
          _enable_filter_by_bloom_filter(
101
234
                  state == nullptr ? true
102
234
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
103
234
    _meta_cache = meta_cache;
104
234
    _init_profile();
105
234
    _init_system_properties();
106
234
    _init_file_description();
107
234
}
108
109
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
110
                             const TFileRangeDesc& range, size_t batch_size,
111
                             const cctz::time_zone* ctz,
112
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
113
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
114
0
        : _profile(profile),
115
0
          _scan_params(params),
116
0
          _scan_range(range),
117
0
          _batch_size(std::max(batch_size, _MIN_BATCH_SIZE)),
118
0
          _range_start_offset(range.start_offset),
119
0
          _range_size(range.size),
120
0
          _ctz(ctz),
121
0
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
122
0
          _io_ctx_holder(std::move(io_ctx_holder)),
123
0
          _state(state),
124
0
          _enable_lazy_mat(enable_lazy_mat),
125
          _enable_filter_by_min_max(
126
0
                  state == nullptr ? true
127
0
                                   : state->query_options().enable_parquet_filter_by_min_max),
128
          _enable_filter_by_bloom_filter(
129
0
                  state == nullptr ? true
130
0
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
131
0
    _meta_cache = meta_cache;
132
0
    _init_profile();
133
0
    _init_system_properties();
134
0
    _init_file_description();
135
0
}
136
137
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
138
                             io::IOContext* io_ctx, RuntimeState* state, FileMetaCache* meta_cache,
139
                             bool enable_lazy_mat)
140
5
        : _profile(nullptr),
141
5
          _scan_params(params),
142
5
          _scan_range(range),
143
5
          _io_ctx(io_ctx),
144
5
          _state(state),
145
5
          _enable_lazy_mat(enable_lazy_mat),
146
          _enable_filter_by_min_max(
147
5
                  state == nullptr ? true
148
5
                                   : state->query_options().enable_parquet_filter_by_min_max),
149
          _enable_filter_by_bloom_filter(
150
5
                  state == nullptr ? true
151
5
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
152
5
    _meta_cache = meta_cache;
153
5
    _init_system_properties();
154
5
    _init_file_description();
155
5
}
156
157
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
158
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
159
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
160
116
        : _profile(nullptr),
161
116
          _scan_params(params),
162
116
          _scan_range(range),
163
116
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
164
116
          _io_ctx_holder(std::move(io_ctx_holder)),
165
116
          _state(state),
166
116
          _enable_lazy_mat(enable_lazy_mat),
167
          _enable_filter_by_min_max(
168
116
                  state == nullptr ? true
169
116
                                   : state->query_options().enable_parquet_filter_by_min_max),
170
          _enable_filter_by_bloom_filter(
171
116
                  state == nullptr ? true
172
116
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
173
116
    _meta_cache = meta_cache;
174
116
    _init_system_properties();
175
116
    _init_file_description();
176
116
}
177
178
355
ParquetReader::~ParquetReader() {
179
355
    _close_internal();
180
355
}
181
182
#ifdef BE_TEST
183
// for unit test
184
void ParquetReader::set_file_reader(io::FileReaderSPtr file_reader) {
185
    _file_reader = file_reader;
186
    _tracing_file_reader = file_reader;
187
}
188
#endif
189
190
234
void ParquetReader::_init_profile() {
191
234
    if (_profile != nullptr) {
192
186
        static const char* parquet_profile = "ParquetReader";
193
186
        ADD_TIMER_WITH_LEVEL(_profile, parquet_profile, 1);
194
195
186
        _parquet_profile.filtered_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
196
186
                _profile, "RowGroupsFiltered", TUnit::UNIT, parquet_profile, 1);
197
186
        _parquet_profile.filtered_row_groups_by_min_max = ADD_CHILD_COUNTER_WITH_LEVEL(
198
186
                _profile, "RowGroupsFilteredByMinMax", TUnit::UNIT, parquet_profile, 1);
199
186
        _parquet_profile.filtered_row_groups_by_bloom_filter = ADD_CHILD_COUNTER_WITH_LEVEL(
200
186
                _profile, "RowGroupsFilteredByBloomFilter", TUnit::UNIT, parquet_profile, 1);
201
186
        _parquet_profile.to_read_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
202
186
                _profile, "RowGroupsReadNum", TUnit::UNIT, parquet_profile, 1);
203
186
        _parquet_profile.total_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
204
186
                _profile, "RowGroupsTotalNum", TUnit::UNIT, parquet_profile, 1);
205
186
        _parquet_profile.filtered_group_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
206
186
                _profile, "FilteredRowsByGroup", TUnit::UNIT, parquet_profile, 1);
207
186
        _parquet_profile.filtered_page_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
208
186
                _profile, "FilteredRowsByPage", TUnit::UNIT, parquet_profile, 1);
209
186
        _parquet_profile.lazy_read_filtered_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
210
186
                _profile, "FilteredRowsByLazyRead", TUnit::UNIT, parquet_profile, 1);
211
186
        _parquet_profile.filtered_bytes = ADD_CHILD_COUNTER_WITH_LEVEL(
212
186
                _profile, "FilteredBytes", TUnit::BYTES, parquet_profile, 1);
213
186
        _parquet_profile.raw_rows_read = ADD_CHILD_COUNTER_WITH_LEVEL(
214
186
                _profile, "RawRowsRead", TUnit::UNIT, parquet_profile, 1);
215
186
        _parquet_profile.column_read_time =
216
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ColumnReadTime", parquet_profile, 1);
217
186
        _parquet_profile.parse_meta_time =
218
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseMetaTime", parquet_profile, 1);
219
186
        _parquet_profile.parse_footer_time =
220
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseFooterTime", parquet_profile, 1);
221
186
        _parquet_profile.file_reader_create_time =
222
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "FileReaderCreateTime", parquet_profile, 1);
223
186
        _parquet_profile.open_file_num =
224
186
                ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "FileNum", TUnit::UNIT, parquet_profile, 1);
225
186
        _parquet_profile.page_index_read_calls =
226
186
                ADD_COUNTER_WITH_LEVEL(_profile, "PageIndexReadCalls", TUnit::UNIT, 1);
227
186
        _parquet_profile.page_index_filter_time =
228
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexFilterTime", parquet_profile, 1);
229
186
        _parquet_profile.read_page_index_time =
230
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexReadTime", parquet_profile, 1);
231
186
        _parquet_profile.parse_page_index_time =
232
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexParseTime", parquet_profile, 1);
233
186
        _parquet_profile.row_group_filter_time =
234
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "RowGroupFilterTime", parquet_profile, 1);
235
186
        _parquet_profile.file_footer_read_calls =
236
186
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterReadCalls", TUnit::UNIT, 1);
237
186
        _parquet_profile.file_footer_hit_cache =
238
186
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterHitCache", TUnit::UNIT, 1);
239
186
        _parquet_profile.decompress_time =
240
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecompressTime", parquet_profile, 1);
241
186
        _parquet_profile.decompress_cnt = ADD_CHILD_COUNTER_WITH_LEVEL(
242
186
                _profile, "DecompressCount", TUnit::UNIT, parquet_profile, 1);
243
186
        _parquet_profile.page_read_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
244
186
                _profile, "PageReadCount", TUnit::UNIT, parquet_profile, 1);
245
186
        _parquet_profile.page_cache_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
246
186
                _profile, "PageCacheWriteCount", TUnit::UNIT, parquet_profile, 1);
247
186
        _parquet_profile.page_cache_compressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
248
186
                _profile, "PageCacheCompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
249
186
        _parquet_profile.page_cache_decompressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
250
186
                _profile, "PageCacheDecompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
251
186
        _parquet_profile.page_cache_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
252
186
                _profile, "PageCacheHitCount", TUnit::UNIT, parquet_profile, 1);
253
186
        _parquet_profile.page_cache_missing_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
254
186
                _profile, "PageCacheMissingCount", TUnit::UNIT, parquet_profile, 1);
255
186
        _parquet_profile.page_cache_compressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
256
186
                _profile, "PageCacheCompressedHitCount", TUnit::UNIT, parquet_profile, 1);
257
186
        _parquet_profile.page_cache_decompressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
258
186
                _profile, "PageCacheDecompressedHitCount", TUnit::UNIT, parquet_profile, 1);
259
186
        _parquet_profile.decode_header_time =
260
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderDecodeTime", parquet_profile, 1);
261
186
        _parquet_profile.read_page_header_time =
262
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderReadTime", parquet_profile, 1);
263
186
        _parquet_profile.decode_value_time =
264
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeValueTime", parquet_profile, 1);
265
186
        _parquet_profile.decode_dict_time =
266
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeDictTime", parquet_profile, 1);
267
186
        _parquet_profile.decode_level_time =
268
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeLevelTime", parquet_profile, 1);
269
186
        _parquet_profile.decode_null_map_time =
270
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeNullMapTime", parquet_profile, 1);
271
186
        _parquet_profile.skip_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
272
186
                _profile, "SkipPageHeaderNum", TUnit::UNIT, parquet_profile, 1);
273
186
        _parquet_profile.parse_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
274
186
                _profile, "ParsePageHeaderNum", TUnit::UNIT, parquet_profile, 1);
275
186
        _parquet_profile.predicate_filter_time =
276
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PredicateFilterTime", parquet_profile, 1);
277
186
        _parquet_profile.dict_filter_rewrite_time =
278
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DictFilterRewriteTime", parquet_profile, 1);
279
186
        _parquet_profile.bloom_filter_read_time =
280
186
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "BloomFilterReadTime", parquet_profile, 1);
281
186
    }
282
234
}
283
284
150
Status ParquetReader::close() {
285
150
    _close_internal();
286
150
    return Status::OK();
287
150
}
288
289
505
void ParquetReader::_close_internal() {
290
505
    if (!_closed) {
291
355
        _closed = true;
292
355
    }
293
505
}
294
295
494
Status ParquetReader::_open_file() {
296
494
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
297
0
        return Status::EndOfFile("stop");
298
0
    }
299
494
    if (_file_reader == nullptr) {
300
267
        SCOPED_RAW_TIMER(&_reader_statistics.file_reader_create_time);
301
267
        ++_reader_statistics.open_file_num;
302
267
        _file_description.mtime =
303
267
                _scan_range.__isset.modification_time ? _scan_range.modification_time : 0;
304
267
        io::FileReaderOptions reader_options =
305
267
                FileFactory::get_reader_options(_state, _file_description);
306
267
        _file_reader = DORIS_TRY(io::DelegateReader::create_file_reader(
307
267
                _profile, _system_properties, _file_description, reader_options,
308
267
                io::DelegateReader::AccessMode::RANDOM, _io_ctx));
309
267
        _tracing_file_reader = _io_ctx ? std::make_shared<io::TracingFileReader>(
310
267
                                                 _file_reader, _io_ctx->file_reader_stats)
311
267
                                       : _file_reader;
312
267
    }
313
314
494
    if (_file_metadata == nullptr) {
315
336
        SCOPED_RAW_TIMER(&_reader_statistics.parse_footer_time);
316
336
        if (_tracing_file_reader->size() <= sizeof(PARQUET_VERSION_NUMBER)) {
317
            // Some system may generate parquet file with only 4 bytes: PAR1
318
            // Should consider it as empty file.
319
0
            return Status::EndOfFile("open file failed, empty parquet file {} with size: {}",
320
0
                                     _scan_range.path, _tracing_file_reader->size());
321
0
        }
322
336
        size_t meta_size = 0;
323
336
        bool enable_mapping_varbinary = _scan_params.__isset.enable_mapping_varbinary
324
336
                                                ? _scan_params.enable_mapping_varbinary
325
336
                                                : false;
326
336
        bool enable_mapping_timestamp_tz = _scan_params.__isset.enable_mapping_timestamp_tz
327
336
                                                   ? _scan_params.enable_mapping_timestamp_tz
328
336
                                                   : false;
329
336
        if (_meta_cache == nullptr) {
330
            // wrap _file_metadata with unique ptr, so that it can be released finally.
331
164
            RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
332
164
                                                &meta_size, _io_ctx, enable_mapping_varbinary,
333
164
                                                enable_mapping_timestamp_tz));
334
163
            _file_metadata = _file_metadata_ptr.get();
335
            // parse magic number & parse meta data
336
163
            _reader_statistics.file_footer_read_calls += 1;
337
172
        } else {
338
172
            const auto& file_meta_cache_key =
339
172
                    FileMetaCache::get_key(_tracing_file_reader, _file_description);
340
172
            if (!_meta_cache->lookup(file_meta_cache_key, &_meta_cache_handle)) {
341
135
                RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
342
135
                                                    &meta_size, _io_ctx, enable_mapping_varbinary,
343
135
                                                    enable_mapping_timestamp_tz));
344
                // _file_metadata_ptr.release() : move control of _file_metadata to _meta_cache_handle
345
135
                _meta_cache->insert(file_meta_cache_key, _file_metadata_ptr.release(),
346
135
                                    &_meta_cache_handle);
347
135
                _file_metadata = _meta_cache_handle.data<FileMetaData>();
348
135
                _reader_statistics.file_footer_read_calls += 1;
349
135
            } else {
350
37
                _reader_statistics.file_footer_hit_cache++;
351
37
            }
352
172
            _file_metadata = _meta_cache_handle.data<FileMetaData>();
353
172
        }
354
355
335
        if (_file_metadata == nullptr) {
356
0
            return Status::InternalError("failed to get file meta data: {}",
357
0
                                         _file_description.path);
358
0
        }
359
335
    }
360
493
    return Status::OK();
361
494
}
362
363
176
Status ParquetReader::get_file_metadata_schema(const FieldDescriptor** ptr) {
364
176
    RETURN_IF_ERROR(_open_file());
365
176
    DCHECK(_file_metadata != nullptr);
366
176
    *ptr = &_file_metadata->schema();
367
176
    return Status::OK();
368
176
}
369
370
355
void ParquetReader::_init_system_properties() {
371
355
    if (_scan_range.__isset.file_type) {
372
        // for compatibility
373
233
        _system_properties.system_type = _scan_range.file_type;
374
233
    } else {
375
122
        _system_properties.system_type = _scan_params.file_type;
376
122
    }
377
355
    _system_properties.properties = _scan_params.properties;
378
355
    _system_properties.hdfs_params = _scan_params.hdfs_params;
379
355
    if (_scan_params.__isset.broker_addresses) {
380
18
        _system_properties.broker_addresses.assign(_scan_params.broker_addresses.begin(),
381
18
                                                   _scan_params.broker_addresses.end());
382
18
    }
383
355
}
384
385
355
void ParquetReader::_init_file_description() {
386
355
    _file_description.path = _scan_range.path;
387
355
    _file_description.file_size = _scan_range.__isset.file_size ? _scan_range.file_size : -1;
388
355
    if (_scan_range.__isset.fs_name) {
389
0
        _file_description.fs_name = _scan_range.fs_name;
390
0
    }
391
355
    if (_scan_range.__isset.file_cache_admission) {
392
118
        _file_description.file_cache_admission = _scan_range.file_cache_admission;
393
118
    }
394
355
}
395
396
145
Status ParquetReader::on_before_init_reader(ReaderInitContext* ctx) {
397
145
    _column_descs = ctx->column_descs;
398
145
    _fill_col_name_to_block_idx = ctx->col_name_to_block_idx;
399
145
    RETURN_IF_ERROR(
400
145
            _extract_partition_values(*ctx->range, ctx->tuple_descriptor, _fill_partition_values));
401
1.00k
    for (auto& desc : *ctx->column_descs) {
402
1.00k
        if (desc.category == ColumnCategory::REGULAR ||
403
1.00k
            desc.category == ColumnCategory::GENERATED) {
404
995
            ctx->column_names.push_back(desc.name);
405
995
        } else if (desc.category == ColumnCategory::SYNTHESIZED &&
406
11
                   desc.name.starts_with(BeConsts::GLOBAL_ROWID_COL)) {
407
9
            auto topn_row_id_column_iter = _create_topn_row_id_column_iterator();
408
9
            this->register_synthesized_column_handler(
409
9
                    desc.name,
410
9
                    [iter = std::move(topn_row_id_column_iter), this, &desc](
411
19
                            Block* block, size_t rows) -> Status {
412
19
                        return fill_topn_row_id(iter, desc.name, block, rows);
413
19
                    });
414
9
            continue;
415
9
        }
416
1.00k
    }
417
418
    // Build table_info_node from Parquet file metadata with case-insensitive recursive matching.
419
    // File is already opened by init_reader before this hook, so metadata is available.
420
    // tuple_descriptor may be null in unit tests that only set column_descs.
421
145
    if (ctx->tuple_descriptor != nullptr) {
422
143
        const FieldDescriptor* field_desc = nullptr;
423
143
        RETURN_IF_ERROR(get_file_metadata_schema(&field_desc));
424
143
        RETURN_IF_ERROR(TableSchemaChangeHelper::BuildTableInfoUtil::by_parquet_name(
425
143
                ctx->tuple_descriptor, *field_desc, ctx->table_info_node));
426
143
    }
427
428
145
    return Status::OK();
429
145
}
430
431
202
Status ParquetReader::_open_file_reader(ReaderInitContext* /*ctx*/) {
432
202
    return _open_file();
433
202
}
434
435
202
Status ParquetReader::_do_init_reader(ReaderInitContext* base_ctx) {
436
202
    auto* ctx = checked_context_cast<ParquetInitContext>(base_ctx);
437
202
    _col_name_to_block_idx = base_ctx->col_name_to_block_idx;
438
202
    _tuple_descriptor = ctx->tuple_descriptor;
439
202
    _row_descriptor = ctx->row_descriptor;
440
202
    _colname_to_slot_id = ctx->colname_to_slot_id;
441
202
    _not_single_slot_filter_conjuncts = ctx->not_single_slot_filter_conjuncts;
442
202
    _slot_id_to_filter_conjuncts = ctx->slot_id_to_filter_conjuncts;
443
202
    _filter_groups = ctx->filter_groups;
444
202
    _table_info_node_ptr = base_ctx->table_info_node;
445
202
    _column_ids = base_ctx->column_ids;
446
202
    _filter_column_ids = base_ctx->filter_column_ids;
447
448
    // _open_file_reader (called by init_reader NVI before hooks) must have opened the file.
449
202
    DCHECK(_file_metadata != nullptr)
450
0
            << "ParquetReader::_do_init_reader called without _open_file_reader";
451
202
    _t_metadata = &(_file_metadata->to_thrift());
452
453
202
    SCOPED_RAW_TIMER(&_reader_statistics.parse_meta_time);
454
202
    _total_groups = _t_metadata->row_groups.size();
455
202
    if (_total_groups == 0) {
456
0
        return Status::EndOfFile("init reader failed, empty parquet file: " + _scan_range.path);
457
0
    }
458
202
    _current_row_group_index = RowGroupReader::RowGroupIndex {-1, 0, 0};
459
460
    // Compute missing columns and file↔table column mapping.
461
    // This runs in _do_init_reader (not on_before_init_reader) because table-format readers
462
    // (Iceberg, Paimon, Hive, Hudi) override on_before_init_reader completely.
463
202
    if (has_column_descs()) {
464
160
        _fill_missing_cols.clear();
465
160
        _fill_missing_defaults.clear();
466
1.04k
        for (const auto& col_name : base_ctx->column_names) {
467
1.04k
            if (!_table_info_node_ptr->children_column_exists(col_name)) {
468
6
                _fill_missing_cols.insert(col_name);
469
6
            }
470
1.04k
        }
471
160
        if (_column_descs && !_fill_missing_cols.empty()) {
472
16
            for (const auto& desc : *_column_descs) {
473
16
                if (_fill_missing_cols.contains(desc.name) &&
474
16
                    !_fill_partition_values.contains(desc.name)) {
475
1
                    _fill_missing_defaults[desc.name] = desc.default_expr;
476
1
                }
477
16
            }
478
6
        }
479
160
    }
480
    // Resolve file-column ↔ table-column mapping in file-schema order.
481
    // _init_read_columns handles both normal path (missing cols populated above)
482
    // and standalone path (_fill_missing_cols empty, _table_info_node_ptr may be null).
483
202
    _init_read_columns(base_ctx->column_names);
484
485
    // build column predicates for column lazy read
486
202
    if (ctx->conjuncts != nullptr) {
487
202
        _lazy_read_ctx.conjuncts = *ctx->conjuncts;
488
202
    }
489
202
    if (ctx->slot_id_to_predicates != nullptr) {
490
202
        _lazy_read_ctx.slot_id_to_predicates = *ctx->slot_id_to_predicates;
491
202
    }
492
493
    // ---- Inlined set_fill_columns logic (partition/missing/synthesized classification) ----
494
495
    // 1. Collect predicate columns from conjuncts for lazy materialization
496
202
    std::unordered_map<std::string, std::pair<uint32_t, int>> predicate_columns;
497
202
    _collect_predicate_columns_from_conjuncts(predicate_columns);
498
499
    // 2. Classify read/partition/missing/synthesized columns into lazy vs predicate groups
500
202
    _classify_columns_for_lazy_read(predicate_columns, _fill_partition_values,
501
202
                                    _fill_missing_defaults);
502
503
    // 3. Populate col_names vectors for ColumnProcessor path
504
202
    for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
505
5
        _lazy_read_ctx.predicate_partition_col_names.emplace_back(kv.first);
506
5
    }
507
202
    for (auto& kv : _lazy_read_ctx.predicate_missing_columns) {
508
0
        _lazy_read_ctx.predicate_missing_col_names.emplace_back(kv.first);
509
0
    }
510
202
    for (auto& kv : _lazy_read_ctx.partition_columns) {
511
5
        _lazy_read_ctx.partition_col_names.emplace_back(kv.first);
512
5
    }
513
202
    for (auto& kv : _lazy_read_ctx.missing_columns) {
514
1
        _lazy_read_ctx.missing_col_names.emplace_back(kv.first);
515
1
    }
516
517
202
    if (_filter_groups && (_total_groups == 0 || _t_metadata->num_rows == 0 || _range_size < 0)) {
518
0
        return Status::EndOfFile("No row group to read");
519
0
    }
520
521
202
    return Status::OK();
522
202
}
523
524
202
void ParquetReader::_init_read_columns(const std::vector<std::string>& column_names) {
525
    // Build file_col_name → table_col_name map, skipping missing columns.
526
    // Must iterate file schema in physical order so that _generate_random_access_ranges
527
    // sees monotonically increasing chunk offsets.
528
202
    auto schema_desc = _file_metadata->schema();
529
202
    std::map<std::string, std::string> required_file_columns;
530
1.39k
    for (const auto& col_name : column_names) {
531
1.39k
        if (_fill_missing_cols.contains(col_name)) {
532
6
            continue;
533
6
        }
534
1.39k
        std::string file_col = col_name;
535
1.39k
        if (_table_info_node_ptr && _table_info_node_ptr->children_column_exists(col_name)) {
536
1.39k
            file_col = _table_info_node_ptr->children_file_column_name(col_name);
537
1.39k
        }
538
1.39k
        required_file_columns[file_col] = col_name;
539
1.39k
    }
540
2.05k
    for (int i = 0; i < schema_desc.size(); ++i) {
541
1.85k
        const auto& name = schema_desc.get_column(i)->name;
542
1.85k
        if (required_file_columns.contains(name)) {
543
1.39k
            _read_file_columns.emplace_back(name);
544
1.39k
            _read_table_columns.emplace_back(required_file_columns[name]);
545
1.39k
            _read_table_columns_set.insert(required_file_columns[name]);
546
1.39k
        }
547
1.85k
    }
548
202
}
549
550
3
bool ParquetReader::_exists_in_file(const std::string& expr_name) const {
551
    // `_read_table_columns_set` is used to ensure that only columns actually read are subject to min-max filtering.
552
    // This primarily handles cases where partition columns also exist in a file. The reason it's not modified
553
    // in `_table_info_node_ptr` is that Iceberg、Hudi has inconsistent requirements for this node;
554
    // Iceberg partition evolution need read partition columns from a file.
555
    // hudi set `hoodie.datasource.write.drop.partition.columns=false` not need read partition columns from a file.
556
3
    return _table_info_node_ptr->children_column_exists(expr_name) &&
557
3
           _read_table_columns_set.contains(expr_name);
558
3
}
559
560
3
bool ParquetReader::_type_matches(const int cid) const {
561
3
    auto* slot = _tuple_descriptor->slots()[cid];
562
3
    auto table_col_type = remove_nullable(slot->type());
563
564
3
    const auto& file_col_name = _table_info_node_ptr->children_file_column_name(slot->col_name());
565
3
    const auto& file_col_type =
566
3
            remove_nullable(_file_metadata->schema().get_column(file_col_name)->data_type);
567
568
3
    return (table_col_type->get_primitive_type() == file_col_type->get_primitive_type()) &&
569
3
           !is_complex_type(table_col_type->get_primitive_type());
570
3
}
571
572
void ParquetReader::_collect_predicate_columns_from_conjuncts(
573
202
        std::unordered_map<std::string, std::pair<uint32_t, int>>& predicate_columns) {
574
202
    std::function<void(VExpr * expr)> visit_slot = [&](VExpr* expr) {
575
45
        if (expr->is_slot_ref()) {
576
16
            VSlotRef* slot_ref = static_cast<VSlotRef*>(expr);
577
16
            auto expr_name = slot_ref->expr_name();
578
16
            predicate_columns.emplace(expr_name,
579
16
                                      std::make_pair(slot_ref->column_id(), slot_ref->slot_id()));
580
16
            if (slot_ref->column_id() == 0) {
581
6
                _lazy_read_ctx.resize_first_column = false;
582
6
            }
583
16
            return;
584
16
        }
585
29
        for (auto& child : expr->children()) {
586
29
            visit_slot(child.get());
587
29
        }
588
29
    };
589
590
202
    for (const auto& conjunct : _lazy_read_ctx.conjuncts) {
591
16
        auto expr = conjunct->root();
592
16
        if (expr->is_rf_wrapper()) {
593
0
            VRuntimeFilterWrapper* runtime_filter = assert_cast<VRuntimeFilterWrapper*>(expr.get());
594
0
            auto filter_impl = runtime_filter->get_impl();
595
0
            visit_slot(filter_impl.get());
596
16
        } else {
597
16
            visit_slot(expr.get());
598
16
        }
599
16
    }
600
601
202
    if (!_lazy_read_ctx.slot_id_to_predicates.empty()) {
602
127
        auto and_pred = AndBlockColumnPredicate::create_unique();
603
922
        for (const auto& entry : _lazy_read_ctx.slot_id_to_predicates) {
604
922
            for (const auto& pred : entry.second) {
605
                // Parquet shares _push_down_predicates for row-group/page min-max pruning and
606
                // bloom-filter evaluation, so this flag currently gates both predicate paths.
607
3
                if (!has_column_optimization(pred->col_name(), ColumnOptimizationTypes::MIN_MAX)) {
608
0
                    continue;
609
0
                }
610
3
                if (!_exists_in_file(pred->col_name()) || !_type_matches(pred->column_id())) {
611
0
                    continue;
612
0
                }
613
3
                and_pred->add_column_predicate(
614
3
                        SingleColumnBlockPredicate::create_unique(pred->clone(pred->column_id())));
615
3
            }
616
922
        }
617
127
        if (and_pred->num_of_column_predicate() > 0) {
618
3
            _push_down_predicates.push_back(std::move(and_pred));
619
3
        }
620
127
    }
621
202
}
622
623
void ParquetReader::_classify_columns_for_lazy_read(
624
        const std::unordered_map<std::string, std::pair<uint32_t, int>>&
625
                predicate_conjuncts_columns,
626
        const std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>&
627
                partition_columns,
628
136
        const std::unordered_map<std::string, VExprContextSPtr>& missing_columns) {
629
136
    const FieldDescriptor& schema = _file_metadata->schema();
630
136
    auto predicate_columns = predicate_conjuncts_columns;
631
136
#ifndef BE_TEST
632
136
    for (const auto& [col_name, _] : _generated_col_handlers) {
633
0
        int slot_id = -1;
634
0
        for (auto slot : _tuple_descriptor->slots()) {
635
0
            if (slot->col_name() == col_name) {
636
0
                slot_id = slot->id();
637
0
                break;
638
0
            }
639
0
        }
640
0
        DCHECK(slot_id != -1) << "slot id should not be -1 for generated column: " << col_name;
641
0
        auto column_index = _row_descriptor->get_column_id(slot_id);
642
0
        if (column_index == 0) {
643
0
            _lazy_read_ctx.resize_first_column = false;
644
0
        }
645
        // assume generated columns are only used for predicate push down.
646
0
        predicate_columns.emplace(col_name, std::make_pair(column_index, slot_id));
647
0
    }
648
649
136
    for (const auto& [col_name, _] : _synthesized_col_handlers) {
650
9
        int slot_id = -1;
651
31
        for (auto slot : _tuple_descriptor->slots()) {
652
31
            if (slot->col_name() == col_name) {
653
9
                slot_id = slot->id();
654
9
                break;
655
9
            }
656
31
        }
657
9
        DCHECK(slot_id != -1) << "slot id should not be -1 for synthesized column: " << col_name;
658
9
        auto column_index = _row_descriptor->get_column_id(slot_id);
659
9
        if (column_index == 0) {
660
0
            _lazy_read_ctx.resize_first_column = false;
661
0
        }
662
        // synthesized columns always fill data on first phase.
663
9
        _lazy_read_ctx.all_predicate_col_ids.emplace_back(column_index);
664
9
    }
665
136
#endif
666
973
    for (auto& read_table_col : _read_table_columns) {
667
973
        _lazy_read_ctx.all_read_columns.emplace_back(read_table_col);
668
669
973
        auto file_column_name = _table_info_node_ptr->children_file_column_name(read_table_col);
670
973
        PrimitiveType column_type =
671
973
                schema.get_column(file_column_name)->data_type->get_primitive_type();
672
973
        if (is_complex_type(column_type)) {
673
278
            _lazy_read_ctx.has_complex_type = true;
674
278
        }
675
973
        if (predicate_columns.size() > 0) {
676
20
            auto iter = predicate_columns.find(read_table_col);
677
20
            if (iter == predicate_columns.end()) {
678
17
                _lazy_read_ctx.lazy_read_columns.emplace_back(read_table_col);
679
17
            } else {
680
3
                _lazy_read_ctx.predicate_columns.first.emplace_back(iter->first);
681
3
                _lazy_read_ctx.predicate_columns.second.emplace_back(iter->second.second);
682
3
                _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
683
3
            }
684
20
        }
685
973
    }
686
687
136
    for (auto& kv : partition_columns) {
688
2
        auto iter = predicate_columns.find(kv.first);
689
2
        if (iter == predicate_columns.end()) {
690
2
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
691
2
        } else {
692
0
            _lazy_read_ctx.predicate_partition_columns.emplace(kv.first, kv.second);
693
0
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
694
0
        }
695
2
    }
696
697
136
    for (auto& kv : missing_columns) {
698
1
        auto iter = predicate_columns.find(kv.first);
699
1
        if (iter != predicate_columns.end()) {
700
            //For check missing column :   missing column == xx, missing column is null,missing column is not null.
701
0
            if (_slot_id_to_filter_conjuncts->find(iter->second.second) !=
702
0
                _slot_id_to_filter_conjuncts->end()) {
703
0
                for (auto& ctx : _slot_id_to_filter_conjuncts->find(iter->second.second)->second) {
704
0
                    _lazy_read_ctx.missing_columns_conjuncts.emplace_back(ctx);
705
0
                }
706
0
            }
707
0
            _lazy_read_ctx.predicate_missing_columns.emplace(kv.first, kv.second);
708
0
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
709
1
        } else {
710
1
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
711
1
        }
712
1
    }
713
714
136
    if (_enable_lazy_mat && _lazy_read_ctx.predicate_columns.first.size() > 0 &&
715
136
        _lazy_read_ctx.lazy_read_columns.size() > 0) {
716
1
        _lazy_read_ctx.can_lazy_read = true;
717
1
    }
718
719
136
    if (!_lazy_read_ctx.can_lazy_read) {
720
135
        for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
721
0
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
722
0
        }
723
135
        for (auto& kv : _lazy_read_ctx.predicate_missing_columns) {
724
0
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
725
0
        }
726
135
    }
727
136
}
728
729
// init file reader and file metadata for parsing schema
730
116
Status ParquetReader::init_schema_reader() {
731
116
    RETURN_IF_ERROR(_open_file());
732
115
    _t_metadata = &(_file_metadata->to_thrift());
733
115
    return Status::OK();
734
116
}
735
736
Status ParquetReader::get_parsed_schema(std::vector<std::string>* col_names,
737
115
                                        std::vector<DataTypePtr>* col_types) {
738
115
    _total_groups = _t_metadata->row_groups.size();
739
115
    auto schema_desc = _file_metadata->schema();
740
749
    for (int i = 0; i < schema_desc.size(); ++i) {
741
        // Get the Column Reader for the boolean column
742
634
        col_names->emplace_back(schema_desc.get_column(i)->name);
743
634
        col_types->emplace_back(make_nullable(schema_desc.get_column(i)->data_type));
744
634
    }
745
115
    return Status::OK();
746
115
}
747
748
Status ParquetReader::_get_columns_impl(
749
150
        std::unordered_map<std::string, DataTypePtr>* name_to_type) {
750
150
    const auto& schema_desc = _file_metadata->schema();
751
150
    std::unordered_set<std::string> column_names;
752
150
    schema_desc.get_column_names(&column_names);
753
1.40k
    for (auto& name : column_names) {
754
1.40k
        auto field = schema_desc.get_column(name);
755
1.40k
        name_to_type->emplace(name, field->data_type);
756
1.40k
    }
757
150
    return Status::OK();
758
150
}
759
760
226
Status ParquetReader::_do_get_next_block(Block* block, size_t* read_rows, bool* eof) {
761
226
    if (_current_group_reader == nullptr || _row_group_eof) {
762
212
        Status st = _next_row_group_reader();
763
212
        if (!st.ok() && !st.is<ErrorCode::END_OF_FILE>()) {
764
0
            return st;
765
0
        }
766
212
        if (_current_group_reader == nullptr || _row_group_eof || st.is<ErrorCode::END_OF_FILE>()) {
767
0
            _current_group_reader.reset(nullptr);
768
0
            _row_group_eof = true;
769
0
            *read_rows = 0;
770
0
            *eof = true;
771
0
            return Status::OK();
772
0
        }
773
212
    }
774
775
    // Limit memory per batch for load paths.
776
    // _load_bytes_per_row is updated after each batch so the *next* call pre-shrinks _batch_size
777
    // before reading, ensuring the current batch is already within the limit (from call 2 onward).
778
226
    const int64_t max_block_bytes =
779
226
            (_state != nullptr && _state->query_type() == TQueryType::LOAD &&
780
226
             config::load_reader_max_block_bytes > 0)
781
226
                    ? config::load_reader_max_block_bytes
782
226
                    : 0;
783
226
    if (max_block_bytes > 0 && _load_bytes_per_row > 0) {
784
17
        _batch_size = std::max((size_t)1,
785
17
                               (size_t)((int64_t)max_block_bytes / (int64_t)_load_bytes_per_row));
786
17
    }
787
788
226
    SCOPED_RAW_TIMER(&_reader_statistics.column_read_time);
789
226
    Status batch_st =
790
226
            _current_group_reader->next_batch(block, _batch_size, read_rows, &_row_group_eof);
791
226
    if (batch_st.is<ErrorCode::END_OF_FILE>()) {
792
0
        block->clear_column_data();
793
0
        _current_group_reader.reset(nullptr);
794
0
        *read_rows = 0;
795
0
        *eof = true;
796
0
        return Status::OK();
797
0
    }
798
799
226
    if (!batch_st.ok()) {
800
0
        return Status::InternalError("Read parquet file {} failed, reason = {}", _scan_range.path,
801
0
                                     batch_st.to_string());
802
0
    }
803
804
226
    if (max_block_bytes > 0 && *read_rows > 0) {
805
45
        _load_bytes_per_row = block->bytes() / *read_rows;
806
45
    }
807
808
226
    if (_row_group_eof) {
809
212
        auto column_st = _current_group_reader->merged_column_statistics();
810
212
        _column_statistics.merge(column_st);
811
212
        _reader_statistics.lazy_read_filtered_rows +=
812
212
                _current_group_reader->lazy_read_filtered_rows();
813
212
        _reader_statistics.predicate_filter_time += _current_group_reader->predicate_filter_time();
814
212
        _reader_statistics.dict_filter_rewrite_time +=
815
212
                _current_group_reader->dict_filter_rewrite_time();
816
212
        if (_io_ctx) {
817
189
            _io_ctx->condition_cache_filtered_rows +=
818
189
                    _current_group_reader->condition_cache_filtered_rows();
819
189
        }
820
821
212
        if (_current_row_group_index.row_group_id + 1 == _total_groups) {
822
173
            *eof = true;
823
173
        } else {
824
39
            *eof = false;
825
39
        }
826
212
    }
827
226
    return Status::OK();
828
226
}
829
830
RowGroupReader::PositionDeleteContext ParquetReader::_get_position_delete_ctx(
831
212
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index) {
832
212
    if (_delete_rows == nullptr) {
833
212
        return RowGroupReader::PositionDeleteContext(row_group.num_rows, row_group_index.first_row);
834
212
    }
835
0
    const int64_t* delete_rows = &(*_delete_rows)[0];
836
0
    const int64_t* delete_rows_end = delete_rows + _delete_rows->size();
837
0
    const int64_t* start_pos = std::lower_bound(delete_rows + _delete_rows_index, delete_rows_end,
838
0
                                                row_group_index.first_row);
839
0
    int64_t start_index = start_pos - delete_rows;
840
0
    const int64_t* end_pos = std::lower_bound(start_pos, delete_rows_end, row_group_index.last_row);
841
0
    int64_t end_index = end_pos - delete_rows;
842
0
    _delete_rows_index = end_index;
843
0
    return RowGroupReader::PositionDeleteContext(*_delete_rows, row_group.num_rows,
844
0
                                                 row_group_index.first_row, start_index, end_index);
845
212
}
846
847
212
Status ParquetReader::_next_row_group_reader() {
848
212
    if (_current_group_reader != nullptr) {
849
39
        _current_group_reader->collect_profile_before_close();
850
39
    }
851
852
212
    RowRanges candidate_row_ranges;
853
214
    while (++_current_row_group_index.row_group_id < _total_groups) {
854
214
        const auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
855
214
        _current_row_group_index.first_row = _current_row_group_index.last_row;
856
214
        _current_row_group_index.last_row = _current_row_group_index.last_row + row_group.num_rows;
857
858
214
        if (_filter_groups && _is_misaligned_range_group(row_group)) {
859
0
            continue;
860
0
        }
861
862
214
        candidate_row_ranges.clear();
863
        // The range of lines to be read is determined by the push down predicate.
864
214
        RETURN_IF_ERROR(_process_min_max_bloom_filter(
865
214
                _current_row_group_index, row_group, _push_down_predicates, &candidate_row_ranges));
866
867
214
        std::function<int64_t(const FieldSchema*)> column_compressed_size =
868
6.09k
                [&row_group, &column_compressed_size](const FieldSchema* field) -> int64_t {
869
6.09k
            if (field->physical_column_index >= 0) {
870
5.21k
                int parquet_col_id = field->physical_column_index;
871
5.21k
                if (row_group.columns[parquet_col_id].__isset.meta_data) {
872
5.21k
                    return row_group.columns[parquet_col_id].meta_data.total_compressed_size;
873
5.21k
                }
874
0
                return 0;
875
5.21k
            }
876
886
            int64_t size = 0;
877
4.47k
            for (const FieldSchema& child : field->children) {
878
4.47k
                size += column_compressed_size(&child);
879
4.47k
            }
880
886
            return size;
881
6.09k
        };
882
214
        int64_t group_size = 0; // only calculate the needed columns
883
1.62k
        for (auto& read_col : _read_file_columns) {
884
1.62k
            const FieldSchema* field = _file_metadata->schema().get_column(read_col);
885
1.62k
            group_size += column_compressed_size(field);
886
1.62k
        }
887
888
214
        _reader_statistics.read_rows += candidate_row_ranges.count();
889
214
        if (_io_ctx) {
890
191
            _io_ctx->file_reader_stats->read_rows += candidate_row_ranges.count();
891
191
        }
892
893
214
        if (candidate_row_ranges.count() != 0) {
894
            // need read this row group.
895
212
            _reader_statistics.read_row_groups++;
896
212
            _reader_statistics.filtered_page_rows +=
897
212
                    row_group.num_rows - candidate_row_ranges.count();
898
212
            break;
899
212
        } else {
900
            // this row group be filtered.
901
2
            _reader_statistics.filtered_row_groups++;
902
2
            _reader_statistics.filtered_bytes += group_size;
903
2
            _reader_statistics.filtered_group_rows += row_group.num_rows;
904
2
        }
905
214
    }
906
907
212
    if (_current_row_group_index.row_group_id == _total_groups) {
908
0
        _row_group_eof = true;
909
0
        _current_group_reader.reset(nullptr);
910
0
        return Status::EndOfFile("No next RowGroupReader");
911
0
    }
912
913
    // process page index and generate the ranges to read
914
212
    auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
915
916
212
    RowGroupReader::PositionDeleteContext position_delete_ctx =
917
212
            _get_position_delete_ctx(row_group, _current_row_group_index);
918
212
    io::FileReaderSPtr group_file_reader;
919
212
    if (typeid_cast<io::InMemoryFileReader*>(_file_reader.get())) {
920
        // InMemoryFileReader has the ability to merge small IO
921
122
        group_file_reader = _file_reader;
922
122
    } else {
923
90
        size_t avg_io_size = 0;
924
90
        const std::vector<io::PrefetchRange> io_ranges =
925
90
                _generate_random_access_ranges(_current_row_group_index, &avg_io_size);
926
90
        int64_t merged_read_slice_size = -1;
927
90
        if (_state != nullptr && _state->query_options().__isset.merge_read_slice_size) {
928
78
            merged_read_slice_size = _state->query_options().merge_read_slice_size;
929
78
        }
930
        // The underlying page reader will prefetch data in column.
931
        // Using both MergeRangeFileReader and BufferedStreamReader simultaneously would waste a lot of memory.
932
90
        group_file_reader =
933
90
                avg_io_size < io::MergeRangeFileReader::SMALL_IO
934
90
                        ? std::make_shared<io::MergeRangeFileReader>(
935
90
                                  _profile, _file_reader, io_ranges, merged_read_slice_size)
936
90
                        : _file_reader;
937
90
    }
938
212
    _current_group_reader.reset(new RowGroupReader(
939
212
            _io_ctx ? std::make_shared<io::TracingFileReader>(group_file_reader,
940
189
                                                              _io_ctx->file_reader_stats)
941
212
                    : group_file_reader,
942
212
            _read_table_columns, _current_row_group_index.row_group_id, row_group, _ctz, _io_ctx,
943
212
            position_delete_ctx, _lazy_read_ctx, _state, _column_ids, _filter_column_ids));
944
212
    _row_group_eof = false;
945
946
212
    _current_group_reader->set_current_row_group_idx(_current_row_group_index);
947
212
    _current_group_reader->set_col_name_to_block_idx(_col_name_to_block_idx);
948
212
    if (_condition_cache_ctx) {
949
18
        _current_group_reader->set_condition_cache_context(_condition_cache_ctx);
950
18
    }
951
212
    _current_group_reader->set_table_format_reader(this);
952
953
212
    _current_group_reader->_table_info_node_ptr = _table_info_node_ptr;
954
212
    return _current_group_reader->init(_file_metadata->schema(), candidate_row_ranges, _col_offsets,
955
212
                                       _tuple_descriptor, _row_descriptor, _colname_to_slot_id,
956
212
                                       _not_single_slot_filter_conjuncts,
957
212
                                       _slot_id_to_filter_conjuncts);
958
212
}
959
960
std::vector<io::PrefetchRange> ParquetReader::_generate_random_access_ranges(
961
90
        const RowGroupReader::RowGroupIndex& group, size_t* avg_io_size) {
962
90
    std::vector<io::PrefetchRange> result;
963
90
    int64_t last_chunk_end = -1;
964
90
    size_t total_io_size = 0;
965
90
    std::function<void(const FieldSchema*, const tparquet::RowGroup&)> scalar_range =
966
1.74k
            [&](const FieldSchema* field, const tparquet::RowGroup& row_group) {
967
1.74k
                if (_column_ids.empty() ||
968
1.74k
                    _column_ids.find(field->get_column_id()) != _column_ids.end()) {
969
1.74k
                    if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
970
296
                        scalar_range(&field->children[0], row_group);
971
1.44k
                    } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
972
242
                        scalar_range(&field->children[0], row_group);
973
242
                        scalar_range(&field->children[1], row_group);
974
1.20k
                    } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
975
37
                        for (int i = 0; i < field->children.size(); ++i) {
976
26
                            scalar_range(&field->children[i], row_group);
977
26
                        }
978
1.19k
                    } else {
979
1.19k
                        const tparquet::ColumnChunk& chunk =
980
1.19k
                                row_group.columns[field->physical_column_index];
981
1.19k
                        auto& chunk_meta = chunk.meta_data;
982
1.19k
                        int64_t chunk_start = has_dict_page(chunk_meta)
983
1.19k
                                                      ? chunk_meta.dictionary_page_offset
984
1.19k
                                                      : chunk_meta.data_page_offset;
985
1.19k
                        int64_t chunk_end = chunk_start + chunk_meta.total_compressed_size;
986
1.19k
                        DCHECK_GE(chunk_start, last_chunk_end);
987
1.19k
                        result.emplace_back(chunk_start, chunk_end);
988
1.19k
                        total_io_size += chunk_meta.total_compressed_size;
989
1.19k
                        last_chunk_end = chunk_end;
990
1.19k
                    }
991
1.74k
                }
992
1.74k
            };
993
90
    const tparquet::RowGroup& row_group = _t_metadata->row_groups[group.row_group_id];
994
942
    for (const auto& read_col : _read_file_columns) {
995
942
        const FieldSchema* field = _file_metadata->schema().get_column(read_col);
996
942
        scalar_range(field, row_group);
997
942
    }
998
90
    if (!result.empty()) {
999
89
        *avg_io_size = total_io_size / result.size();
1000
89
    }
1001
90
    return result;
1002
90
}
1003
1004
234
bool ParquetReader::_is_misaligned_range_group(const tparquet::RowGroup& row_group) const {
1005
234
    int64_t start_offset = _get_column_start_offset(row_group.columns[0].meta_data);
1006
1007
234
    auto& last_column = row_group.columns[row_group.columns.size() - 1].meta_data;
1008
234
    int64_t end_offset = _get_column_start_offset(last_column) + last_column.total_compressed_size;
1009
1010
234
    int64_t row_group_mid = start_offset + (end_offset - start_offset) / 2;
1011
234
    if (!(row_group_mid >= _range_start_offset &&
1012
234
          row_group_mid < _range_start_offset + _range_size)) {
1013
0
        return true;
1014
0
    }
1015
234
    return false;
1016
234
}
1017
1018
3
int64_t ParquetReader::get_total_rows() const {
1019
3
    if (!_t_metadata) return 0;
1020
3
    if (!_filter_groups) return _t_metadata->num_rows;
1021
3
    int64_t total = 0;
1022
18
    for (const auto& rg : _t_metadata->row_groups) {
1023
18
        if (!_is_misaligned_range_group(rg)) {
1024
18
            total += rg.num_rows;
1025
18
        }
1026
18
    }
1027
3
    return total;
1028
3
}
1029
1030
3
void ParquetReader::set_condition_cache_context(std::shared_ptr<ConditionCacheContext> ctx) {
1031
3
    _condition_cache_ctx = std::move(ctx);
1032
3
    if (!_condition_cache_ctx || !_t_metadata || !_filter_groups) {
1033
0
        return;
1034
0
    }
1035
    // Find the first assigned row group to compute base_granule.
1036
3
    int64_t first_row = 0;
1037
3
    for (const auto& rg : _t_metadata->row_groups) {
1038
3
        if (!_is_misaligned_range_group(rg)) {
1039
3
            _condition_cache_ctx->base_granule = first_row / ConditionCacheContext::GRANULE_SIZE;
1040
3
            return;
1041
3
        }
1042
0
        first_row += rg.num_rows;
1043
0
    }
1044
3
}
1045
1046
Status ParquetReader::_process_page_index_filter(
1047
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index,
1048
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1049
175
        RowRanges* candidate_row_ranges) {
1050
175
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
1051
0
        return Status::EndOfFile("stop");
1052
0
    }
1053
1054
175
    std::function<void()> read_whole_row_group = [&]() {
1055
175
        candidate_row_ranges->add(RowRange {0, row_group.num_rows});
1056
175
    };
1057
1058
    // Check if the page index is available and if it exists.
1059
175
    PageIndex page_index;
1060
175
    if (!config::enable_parquet_page_index || _colname_to_slot_id == nullptr ||
1061
175
        !page_index.check_and_get_page_index_ranges(row_group.columns)) {
1062
171
        read_whole_row_group();
1063
171
        return Status::OK();
1064
171
    }
1065
1066
4
    std::vector<int> parquet_col_ids;
1067
276
    for (size_t idx = 0; idx < _read_table_columns.size(); idx++) {
1068
272
        const auto& read_table_col = _read_table_columns[idx];
1069
272
        const auto& read_file_col = _read_file_columns[idx];
1070
272
        if (!_colname_to_slot_id->contains(read_table_col)) {
1071
264
            continue;
1072
264
        }
1073
8
        auto* field = _file_metadata->schema().get_column(read_file_col);
1074
1075
8
        std::function<void(FieldSchema * field)> f = [&](FieldSchema* field) {
1076
8
            if (!_column_ids.empty() &&
1077
8
                _column_ids.find(field->get_column_id()) == _column_ids.end()) {
1078
0
                return;
1079
0
            }
1080
1081
8
            if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
1082
0
                f(&field->children[0]);
1083
8
            } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
1084
0
                f(&field->children[0]);
1085
0
                f(&field->children[1]);
1086
8
            } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
1087
0
                for (int i = 0; i < field->children.size(); ++i) {
1088
0
                    f(&field->children[i]);
1089
0
                }
1090
8
            } else {
1091
8
                int parquet_col_id = field->physical_column_index;
1092
8
                if (parquet_col_id >= 0) {
1093
8
                    parquet_col_ids.push_back(parquet_col_id);
1094
8
                }
1095
8
            }
1096
8
        };
1097
1098
8
        f(field);
1099
8
    }
1100
1101
4
    auto parse_offset_index = [&]() -> Status {
1102
4
        std::vector<uint8_t> off_index_buff(page_index._offset_index_size);
1103
4
        Slice res(off_index_buff.data(), page_index._offset_index_size);
1104
4
        size_t bytes_read = 0;
1105
4
        {
1106
4
            SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1107
4
            RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._offset_index_start, res,
1108
4
                                                          &bytes_read, _io_ctx));
1109
4
        }
1110
4
        _column_statistics.page_index_read_calls++;
1111
4
        _col_offsets.clear();
1112
1113
8
        for (auto parquet_col_id : parquet_col_ids) {
1114
8
            auto& chunk = row_group.columns[parquet_col_id];
1115
8
            if (chunk.offset_index_length == 0) [[unlikely]] {
1116
0
                continue;
1117
0
            }
1118
8
            tparquet::OffsetIndex offset_index;
1119
8
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1120
8
            RETURN_IF_ERROR(
1121
8
                    page_index.parse_offset_index(chunk, off_index_buff.data(), &offset_index));
1122
8
            _col_offsets[parquet_col_id] = offset_index;
1123
8
        }
1124
4
        return Status::OK();
1125
4
    };
1126
1127
    // from https://github.com/apache/doris/pull/55795
1128
4
    RETURN_IF_ERROR(parse_offset_index());
1129
1130
    // Check if page index is needed for min-max filter.
1131
4
    if (!_enable_filter_by_min_max || push_down_pred.empty()) {
1132
4
        read_whole_row_group();
1133
4
        return Status::OK();
1134
4
    }
1135
1136
    // read column index.
1137
0
    std::vector<uint8_t> col_index_buff(page_index._column_index_size);
1138
0
    size_t bytes_read = 0;
1139
0
    Slice result(col_index_buff.data(), page_index._column_index_size);
1140
0
    {
1141
0
        SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1142
0
        RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._column_index_start, result,
1143
0
                                                      &bytes_read, _io_ctx));
1144
0
    }
1145
0
    _column_statistics.page_index_read_calls++;
1146
1147
0
    SCOPED_RAW_TIMER(&_reader_statistics.page_index_filter_time);
1148
1149
    // Construct a cacheable page index structure to avoid repeatedly reading the page index of the same column.
1150
0
    ParquetPredicate::CachedPageIndexStat cached_page_index;
1151
0
    cached_page_index.ctz = _ctz;
1152
0
    std::function<bool(ParquetPredicate::PageIndexStat**, int)> get_stat_func =
1153
0
            [&](ParquetPredicate::PageIndexStat** ans, const int cid) -> bool {
1154
0
        if (cached_page_index.stats.contains(cid)) {
1155
0
            *ans = &cached_page_index.stats[cid];
1156
0
            return (*ans)->available;
1157
0
        }
1158
0
        cached_page_index.stats.emplace(cid, ParquetPredicate::PageIndexStat {});
1159
0
        auto& sig_stat = cached_page_index.stats[cid];
1160
1161
0
        auto* slot = _tuple_descriptor->slots()[cid];
1162
0
        if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1163
            // table column not exist in file, may be schema change.
1164
0
            return false;
1165
0
        }
1166
1167
0
        const auto& file_col_name =
1168
0
                _table_info_node_ptr->children_file_column_name(slot->col_name());
1169
0
        const FieldSchema* col_schema = _file_metadata->schema().get_column(file_col_name);
1170
0
        int parquet_col_id = col_schema->physical_column_index;
1171
1172
0
        if (parquet_col_id < 0) {
1173
            // complex type, not support page index yet.
1174
0
            return false;
1175
0
        }
1176
0
        if (!_col_offsets.contains(parquet_col_id)) {
1177
            // If the file contains partition columns and the query applies filters on those
1178
            // partition columns, then reading the page index is unnecessary.
1179
0
            return false;
1180
0
        }
1181
1182
0
        auto& column_chunk = row_group.columns[parquet_col_id];
1183
0
        if (column_chunk.column_index_length == 0 || column_chunk.offset_index_length == 0) {
1184
            // column no page index.
1185
0
            return false;
1186
0
        }
1187
1188
0
        tparquet::ColumnIndex column_index;
1189
0
        {
1190
0
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1191
0
            RETURN_IF_ERROR(page_index.parse_column_index(column_chunk, col_index_buff.data(),
1192
0
                                                          &column_index));
1193
0
        }
1194
0
        const int64_t num_of_pages = column_index.null_pages.size();
1195
0
        if (num_of_pages <= 0) [[unlikely]] {
1196
            // no page. (maybe this row group no data.)
1197
0
            return false;
1198
0
        }
1199
0
        DCHECK_EQ(column_index.min_values.size(), column_index.max_values.size());
1200
0
        if (!column_index.__isset.null_counts) {
1201
            // not set null or null counts;
1202
0
            return false;
1203
0
        }
1204
1205
0
        auto& offset_index = _col_offsets[parquet_col_id];
1206
0
        const auto& page_locations = offset_index.page_locations;
1207
1208
0
        sig_stat.col_schema = col_schema;
1209
0
        sig_stat.num_of_pages = num_of_pages;
1210
0
        sig_stat.encoded_min_value = column_index.min_values;
1211
0
        sig_stat.encoded_max_value = column_index.max_values;
1212
0
        sig_stat.is_all_null.resize(num_of_pages);
1213
0
        sig_stat.has_null.resize(num_of_pages);
1214
0
        sig_stat.ranges.resize(num_of_pages);
1215
1216
0
        for (int page_id = 0; page_id < num_of_pages; page_id++) {
1217
0
            sig_stat.is_all_null[page_id] = column_index.null_pages[page_id];
1218
0
            sig_stat.has_null[page_id] = column_index.null_counts[page_id] > 0;
1219
1220
0
            int64_t from = page_locations[page_id].first_row_index;
1221
0
            int64_t to = 0;
1222
0
            if (page_id == page_locations.size() - 1) {
1223
0
                to = row_group_index.last_row;
1224
0
            } else {
1225
0
                to = page_locations[page_id + 1].first_row_index;
1226
0
            }
1227
0
            sig_stat.ranges[page_id] = RowRange {from, to};
1228
0
        }
1229
1230
0
        sig_stat.available = true;
1231
0
        *ans = &sig_stat;
1232
0
        return true;
1233
0
    };
1234
0
    cached_page_index.row_group_range = {0, row_group.num_rows};
1235
0
    cached_page_index.get_stat_func = get_stat_func;
1236
1237
0
    candidate_row_ranges->add({0, row_group.num_rows});
1238
0
    for (const auto& predicate : push_down_pred) {
1239
0
        RowRanges tmp_row_range;
1240
0
        if (!predicate->evaluate_and(&cached_page_index, &tmp_row_range)) {
1241
            // no need read this row group.
1242
0
            candidate_row_ranges->clear();
1243
0
            return Status::OK();
1244
0
        }
1245
0
        RowRanges::ranges_intersection(*candidate_row_ranges, tmp_row_range, candidate_row_ranges);
1246
0
    }
1247
0
    return Status::OK();
1248
0
}
1249
1250
Status ParquetReader::_process_min_max_bloom_filter(
1251
        const RowGroupReader::RowGroupIndex& row_group_index, const tparquet::RowGroup& row_group,
1252
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1253
214
        RowRanges* row_ranges) {
1254
214
    SCOPED_RAW_TIMER(&_reader_statistics.row_group_filter_time);
1255
214
    if (!_filter_groups) {
1256
        // No row group filtering is needed;
1257
        // for example, Iceberg reads position delete files.
1258
1
        row_ranges->add({0, row_group.num_rows});
1259
1
        return Status::OK();
1260
1
    }
1261
1262
213
    if (_read_by_rows) {
1263
38
        auto group_start = row_group_index.first_row;
1264
38
        auto group_end = row_group_index.last_row;
1265
1266
93
        while (!_row_ids.empty()) {
1267
65
            auto v = _row_ids.front();
1268
65
            if (v < group_start) {
1269
0
                continue;
1270
65
            } else if (v < group_end) {
1271
55
                row_ranges->add(RowRange {v - group_start, v - group_start + 1});
1272
55
                _row_ids.pop_front();
1273
55
            } else {
1274
10
                break;
1275
10
            }
1276
65
        }
1277
175
    } else {
1278
175
        bool filter_this_row_group = false;
1279
175
        bool filtered_by_min_max = false;
1280
175
        bool filtered_by_bloom_filter = false;
1281
175
        RETURN_IF_ERROR(_process_column_stat_filter(row_group, push_down_pred,
1282
175
                                                    &filter_this_row_group, &filtered_by_min_max,
1283
175
                                                    &filtered_by_bloom_filter));
1284
        // Update statistics based on filter type
1285
175
        if (filter_this_row_group) {
1286
0
            if (filtered_by_min_max) {
1287
0
                _reader_statistics.filtered_row_groups_by_min_max++;
1288
0
            }
1289
0
            if (filtered_by_bloom_filter) {
1290
0
                _reader_statistics.filtered_row_groups_by_bloom_filter++;
1291
0
            }
1292
0
        }
1293
1294
175
        if (!filter_this_row_group) {
1295
175
            RETURN_IF_ERROR(_process_page_index_filter(row_group, row_group_index, push_down_pred,
1296
175
                                                       row_ranges));
1297
175
        }
1298
175
    }
1299
1300
213
    return Status::OK();
1301
213
}
1302
1303
Status ParquetReader::_process_column_stat_filter(
1304
        const tparquet::RowGroup& row_group,
1305
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1306
177
        bool* filter_group, bool* filtered_by_min_max, bool* filtered_by_bloom_filter) {
1307
    // If both filters are disabled, skip filtering
1308
177
    if (!_enable_filter_by_min_max && !_enable_filter_by_bloom_filter) {
1309
0
        return Status::OK();
1310
0
    }
1311
1312
    // Cache bloom filters for each column to avoid reading the same bloom filter multiple times
1313
    // when there are multiple predicates on the same column
1314
177
    std::unordered_map<int, std::unique_ptr<ParquetBlockSplitBloomFilter>> bloom_filter_cache;
1315
1316
    // Initialize output parameters
1317
177
    *filtered_by_min_max = false;
1318
177
    *filtered_by_bloom_filter = false;
1319
1320
177
    for (const auto& predicate : _push_down_predicates) {
1321
20
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_stat_func =
1322
22
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1323
                    // Check if min-max filter is enabled
1324
22
                    if (!_enable_filter_by_min_max) {
1325
0
                        return false;
1326
0
                    }
1327
22
                    auto* slot = _tuple_descriptor->slots()[cid];
1328
22
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1329
0
                        return false;
1330
0
                    }
1331
22
                    const auto& file_col_name =
1332
22
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1333
22
                    const FieldSchema* col_schema =
1334
22
                            _file_metadata->schema().get_column(file_col_name);
1335
22
                    int parquet_col_id = col_schema->physical_column_index;
1336
22
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1337
22
                    stat->col_schema = col_schema;
1338
22
                    return ParquetPredicate::read_column_stats(col_schema, meta_data,
1339
22
                                                               &_ignored_stats,
1340
22
                                                               _t_metadata->created_by, stat)
1341
22
                            .ok();
1342
22
                };
1343
20
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_bloom_filter_func =
1344
20
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1345
0
                    auto* slot = _tuple_descriptor->slots()[cid];
1346
0
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1347
0
                        return false;
1348
0
                    }
1349
0
                    const auto& file_col_name =
1350
0
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1351
0
                    const FieldSchema* col_schema =
1352
0
                            _file_metadata->schema().get_column(file_col_name);
1353
0
                    int parquet_col_id = col_schema->physical_column_index;
1354
0
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1355
0
                    if (!meta_data.__isset.bloom_filter_offset) {
1356
0
                        return false;
1357
0
                    }
1358
0
                    auto primitive_type =
1359
0
                            remove_nullable(col_schema->data_type)->get_primitive_type();
1360
0
                    if (!ParquetPredicate::bloom_filter_supported(primitive_type)) {
1361
0
                        return false;
1362
0
                    }
1363
1364
                    // Check if bloom filter is enabled
1365
0
                    if (!_enable_filter_by_bloom_filter) {
1366
0
                        return false;
1367
0
                    }
1368
1369
                    // Check cache first
1370
0
                    auto cache_iter = bloom_filter_cache.find(parquet_col_id);
1371
0
                    if (cache_iter != bloom_filter_cache.end()) {
1372
                        // Bloom filter already loaded for this column, reuse it
1373
0
                        stat->bloom_filter = std::move(cache_iter->second);
1374
0
                        bloom_filter_cache.erase(cache_iter);
1375
0
                        return stat->bloom_filter != nullptr;
1376
0
                    }
1377
1378
0
                    if (!stat->bloom_filter) {
1379
0
                        SCOPED_RAW_TIMER(&_reader_statistics.bloom_filter_read_time);
1380
0
                        auto st = ParquetPredicate::read_bloom_filter(
1381
0
                                meta_data, _tracing_file_reader, _io_ctx, stat);
1382
0
                        if (!st.ok()) {
1383
0
                            LOG(WARNING) << "Failed to read bloom filter for column "
1384
0
                                         << col_schema->name << " in file " << _scan_range.path
1385
0
                                         << ", status: " << st.to_string();
1386
0
                            stat->bloom_filter.reset();
1387
0
                            return false;
1388
0
                        }
1389
0
                    }
1390
0
                    return stat->bloom_filter != nullptr;
1391
0
                };
1392
20
        ParquetPredicate::ColumnStat stat;
1393
20
        stat.ctz = _ctz;
1394
20
        stat.get_stat_func = &get_stat_func;
1395
20
        stat.get_bloom_filter_func = &get_bloom_filter_func;
1396
1397
20
        if (!predicate->evaluate_and(&stat)) {
1398
1
            *filter_group = true;
1399
1400
            // Track which filter was used for filtering
1401
            // If bloom filter was loaded, it means bloom filter was used
1402
1
            if (stat.bloom_filter) {
1403
0
                *filtered_by_bloom_filter = true;
1404
0
            }
1405
            // If col_schema was set but no bloom filter, it means min-max stats were used
1406
1
            if (stat.col_schema && !stat.bloom_filter) {
1407
1
                *filtered_by_min_max = true;
1408
1
            }
1409
1410
1
            return Status::OK();
1411
1
        }
1412
1413
        // After evaluating, if the bloom filter was used, cache it for subsequent predicates
1414
19
        if (stat.bloom_filter) {
1415
            // Find the column id for caching
1416
0
            for (auto* slot : _tuple_descriptor->slots()) {
1417
0
                if (_table_info_node_ptr->children_column_exists(slot->col_name())) {
1418
0
                    const auto& file_col_name =
1419
0
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1420
0
                    const FieldSchema* col_schema =
1421
0
                            _file_metadata->schema().get_column(file_col_name);
1422
0
                    int parquet_col_id = col_schema->physical_column_index;
1423
0
                    if (stat.col_schema == col_schema) {
1424
0
                        bloom_filter_cache[parquet_col_id] = std::move(stat.bloom_filter);
1425
0
                        break;
1426
0
                    }
1427
0
                }
1428
0
            }
1429
0
        }
1430
19
    }
1431
1432
    // Update filter statistics if this row group was not filtered
1433
    // The statistics will be updated in _init_row_groups when filter_group is true
1434
176
    return Status::OK();
1435
177
}
1436
1437
468
int64_t ParquetReader::_get_column_start_offset(const tparquet::ColumnMetaData& column) const {
1438
468
    return has_dict_page(column) ? column.dictionary_page_offset : column.data_page_offset;
1439
468
}
1440
1441
150
void ParquetReader::_collect_profile() {
1442
150
    if (_profile == nullptr) {
1443
0
        return;
1444
0
    }
1445
1446
150
    if (_current_group_reader != nullptr) {
1447
150
        _current_group_reader->collect_profile_before_close();
1448
150
    }
1449
150
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups, _reader_statistics.filtered_row_groups);
1450
150
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_min_max,
1451
150
                   _reader_statistics.filtered_row_groups_by_min_max);
1452
150
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_bloom_filter,
1453
150
                   _reader_statistics.filtered_row_groups_by_bloom_filter);
1454
150
    COUNTER_UPDATE(_parquet_profile.to_read_row_groups, _reader_statistics.read_row_groups);
1455
150
    COUNTER_UPDATE(_parquet_profile.total_row_groups, _total_groups);
1456
150
    COUNTER_UPDATE(_parquet_profile.filtered_group_rows, _reader_statistics.filtered_group_rows);
1457
150
    COUNTER_UPDATE(_parquet_profile.filtered_page_rows, _reader_statistics.filtered_page_rows);
1458
150
    COUNTER_UPDATE(_parquet_profile.lazy_read_filtered_rows,
1459
150
                   _reader_statistics.lazy_read_filtered_rows);
1460
150
    COUNTER_UPDATE(_parquet_profile.filtered_bytes, _reader_statistics.filtered_bytes);
1461
150
    COUNTER_UPDATE(_parquet_profile.raw_rows_read, _reader_statistics.read_rows);
1462
150
    COUNTER_UPDATE(_parquet_profile.column_read_time, _reader_statistics.column_read_time);
1463
150
    COUNTER_UPDATE(_parquet_profile.parse_meta_time, _reader_statistics.parse_meta_time);
1464
150
    COUNTER_UPDATE(_parquet_profile.parse_footer_time, _reader_statistics.parse_footer_time);
1465
150
    COUNTER_UPDATE(_parquet_profile.file_reader_create_time,
1466
150
                   _reader_statistics.file_reader_create_time);
1467
150
    COUNTER_UPDATE(_parquet_profile.open_file_num, _reader_statistics.open_file_num);
1468
150
    COUNTER_UPDATE(_parquet_profile.page_index_filter_time,
1469
150
                   _reader_statistics.page_index_filter_time);
1470
150
    COUNTER_UPDATE(_parquet_profile.read_page_index_time, _reader_statistics.read_page_index_time);
1471
150
    COUNTER_UPDATE(_parquet_profile.parse_page_index_time,
1472
150
                   _reader_statistics.parse_page_index_time);
1473
150
    COUNTER_UPDATE(_parquet_profile.row_group_filter_time,
1474
150
                   _reader_statistics.row_group_filter_time);
1475
150
    COUNTER_UPDATE(_parquet_profile.file_footer_read_calls,
1476
150
                   _reader_statistics.file_footer_read_calls);
1477
150
    COUNTER_UPDATE(_parquet_profile.file_footer_hit_cache,
1478
150
                   _reader_statistics.file_footer_hit_cache);
1479
1480
150
    COUNTER_UPDATE(_parquet_profile.skip_page_header_num, _column_statistics.skip_page_header_num);
1481
150
    COUNTER_UPDATE(_parquet_profile.parse_page_header_num,
1482
150
                   _column_statistics.parse_page_header_num);
1483
150
    COUNTER_UPDATE(_parquet_profile.predicate_filter_time,
1484
150
                   _reader_statistics.predicate_filter_time);
1485
150
    COUNTER_UPDATE(_parquet_profile.dict_filter_rewrite_time,
1486
150
                   _reader_statistics.dict_filter_rewrite_time);
1487
150
    COUNTER_UPDATE(_parquet_profile.bloom_filter_read_time,
1488
150
                   _reader_statistics.bloom_filter_read_time);
1489
150
    COUNTER_UPDATE(_parquet_profile.page_index_read_calls,
1490
150
                   _column_statistics.page_index_read_calls);
1491
150
    COUNTER_UPDATE(_parquet_profile.decompress_time, _column_statistics.decompress_time);
1492
150
    COUNTER_UPDATE(_parquet_profile.decompress_cnt, _column_statistics.decompress_cnt);
1493
150
    COUNTER_UPDATE(_parquet_profile.page_read_counter, _column_statistics.page_read_counter);
1494
150
    COUNTER_UPDATE(_parquet_profile.page_cache_write_counter,
1495
150
                   _column_statistics.page_cache_write_counter);
1496
150
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_write_counter,
1497
150
                   _column_statistics.page_cache_compressed_write_counter);
1498
150
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_write_counter,
1499
150
                   _column_statistics.page_cache_decompressed_write_counter);
1500
150
    COUNTER_UPDATE(_parquet_profile.page_cache_hit_counter,
1501
150
                   _column_statistics.page_cache_hit_counter);
1502
150
    COUNTER_UPDATE(_parquet_profile.page_cache_missing_counter,
1503
150
                   _column_statistics.page_cache_missing_counter);
1504
150
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_hit_counter,
1505
150
                   _column_statistics.page_cache_compressed_hit_counter);
1506
150
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_hit_counter,
1507
150
                   _column_statistics.page_cache_decompressed_hit_counter);
1508
150
    COUNTER_UPDATE(_parquet_profile.decode_header_time, _column_statistics.decode_header_time);
1509
150
    COUNTER_UPDATE(_parquet_profile.read_page_header_time,
1510
150
                   _column_statistics.read_page_header_time);
1511
150
    COUNTER_UPDATE(_parquet_profile.decode_value_time, _column_statistics.decode_value_time);
1512
150
    COUNTER_UPDATE(_parquet_profile.decode_dict_time, _column_statistics.decode_dict_time);
1513
150
    COUNTER_UPDATE(_parquet_profile.decode_level_time, _column_statistics.decode_level_time);
1514
150
    COUNTER_UPDATE(_parquet_profile.decode_null_map_time, _column_statistics.decode_null_map_time);
1515
150
}
1516
1517
150
void ParquetReader::_collect_profile_before_close() {
1518
150
    _collect_profile();
1519
150
}
1520
1521
} // namespace doris