Coverage Report

Created: 2026-03-24 14:21

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/parquet/vparquet_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/parquet/vparquet_reader.h"
19
20
#include <gen_cpp/Metrics_types.h>
21
#include <gen_cpp/PlanNodes_types.h>
22
#include <gen_cpp/parquet_types.h>
23
#include <glog/logging.h>
24
25
#include <algorithm>
26
#include <functional>
27
#include <utility>
28
29
#include "common/status.h"
30
#include "core/block/block.h"
31
#include "core/block/column_with_type_and_name.h"
32
#include "core/column/column.h"
33
#include "core/data_type/define_primitive_type.h"
34
#include "core/typeid_cast.h"
35
#include "core/types.h"
36
#include "exec/scan/file_scanner.h"
37
#include "exprs/vbloom_predicate.h"
38
#include "exprs/vdirect_in_predicate.h"
39
#include "exprs/vexpr.h"
40
#include "exprs/vexpr_context.h"
41
#include "exprs/vin_predicate.h"
42
#include "exprs/vruntimefilter_wrapper.h"
43
#include "exprs/vslot_ref.h"
44
#include "exprs/vtopn_pred.h"
45
#include "format/column_type_convert.h"
46
#include "format/parquet/parquet_block_split_bloom_filter.h"
47
#include "format/parquet/parquet_common.h"
48
#include "format/parquet/parquet_predicate.h"
49
#include "format/parquet/parquet_thrift_util.h"
50
#include "format/parquet/schema_desc.h"
51
#include "format/parquet/vparquet_file_metadata.h"
52
#include "format/parquet/vparquet_group_reader.h"
53
#include "format/parquet/vparquet_page_index.h"
54
#include "information_schema/schema_scanner.h"
55
#include "io/file_factory.h"
56
#include "io/fs/buffered_reader.h"
57
#include "io/fs/file_reader.h"
58
#include "io/fs/file_reader_writer_fwd.h"
59
#include "io/fs/tracing_file_reader.h"
60
#include "runtime/descriptors.h"
61
#include "util/slice.h"
62
#include "util/string_util.h"
63
#include "util/timezone_utils.h"
64
65
namespace cctz {
66
class time_zone;
67
} // namespace cctz
68
namespace doris {
69
class RowDescriptor;
70
class RuntimeState;
71
class SlotDescriptor;
72
class TupleDescriptor;
73
namespace io {
74
struct IOContext;
75
enum class FileCachePolicy : uint8_t;
76
} // namespace io
77
class Block;
78
} // namespace doris
79
80
namespace doris {
81
82
#include "common/compile_check_begin.h"
83
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
84
                             const TFileRangeDesc& range, size_t batch_size,
85
                             const cctz::time_zone* ctz, io::IOContext* io_ctx, RuntimeState* state,
86
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
87
19.0k
        : _profile(profile),
88
19.0k
          _scan_params(params),
89
19.0k
          _scan_range(range),
90
19.0k
          _batch_size(std::max(batch_size, _MIN_BATCH_SIZE)),
91
19.0k
          _range_start_offset(range.start_offset),
92
19.0k
          _range_size(range.size),
93
19.0k
          _ctz(ctz),
94
19.0k
          _io_ctx(io_ctx),
95
19.0k
          _state(state),
96
19.0k
          _enable_lazy_mat(enable_lazy_mat),
97
          _enable_filter_by_min_max(
98
19.0k
                  state == nullptr ? true
99
19.0k
                                   : state->query_options().enable_parquet_filter_by_min_max),
100
          _enable_filter_by_bloom_filter(
101
19.0k
                  state == nullptr ? true
102
19.0k
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
103
19.0k
    _meta_cache = meta_cache;
104
19.0k
    _init_profile();
105
19.0k
    _init_system_properties();
106
19.0k
    _init_file_description();
107
19.0k
}
108
109
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
110
                             const TFileRangeDesc& range, size_t batch_size,
111
                             const cctz::time_zone* ctz,
112
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
113
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
114
0
        : _profile(profile),
115
0
          _scan_params(params),
116
0
          _scan_range(range),
117
0
          _batch_size(std::max(batch_size, _MIN_BATCH_SIZE)),
118
0
          _range_start_offset(range.start_offset),
119
0
          _range_size(range.size),
120
0
          _ctz(ctz),
121
0
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
122
0
          _io_ctx_holder(std::move(io_ctx_holder)),
123
0
          _state(state),
124
0
          _enable_lazy_mat(enable_lazy_mat),
125
          _enable_filter_by_min_max(
126
0
                  state == nullptr ? true
127
0
                                   : state->query_options().enable_parquet_filter_by_min_max),
128
          _enable_filter_by_bloom_filter(
129
0
                  state == nullptr ? true
130
0
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
131
0
    _meta_cache = meta_cache;
132
0
    _init_profile();
133
0
    _init_system_properties();
134
0
    _init_file_description();
135
0
}
136
137
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
138
                             io::IOContext* io_ctx, RuntimeState* state, FileMetaCache* meta_cache,
139
                             bool enable_lazy_mat)
140
5
        : _profile(nullptr),
141
5
          _scan_params(params),
142
5
          _scan_range(range),
143
5
          _io_ctx(io_ctx),
144
5
          _state(state),
145
5
          _enable_lazy_mat(enable_lazy_mat),
146
          _enable_filter_by_min_max(
147
5
                  state == nullptr ? true
148
5
                                   : state->query_options().enable_parquet_filter_by_min_max),
149
          _enable_filter_by_bloom_filter(
150
5
                  state == nullptr ? true
151
5
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
152
5
    _meta_cache = meta_cache;
153
5
    _init_system_properties();
154
5
    _init_file_description();
155
5
}
156
157
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
158
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
159
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
160
1.23k
        : _profile(nullptr),
161
1.23k
          _scan_params(params),
162
1.23k
          _scan_range(range),
163
1.23k
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
164
1.23k
          _io_ctx_holder(std::move(io_ctx_holder)),
165
1.23k
          _state(state),
166
1.23k
          _enable_lazy_mat(enable_lazy_mat),
167
          _enable_filter_by_min_max(
168
1.23k
                  state == nullptr ? true
169
1.23k
                                   : state->query_options().enable_parquet_filter_by_min_max),
170
          _enable_filter_by_bloom_filter(
171
1.23k
                  state == nullptr ? true
172
1.23k
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
173
1.23k
    _meta_cache = meta_cache;
174
1.23k
    _init_system_properties();
175
1.23k
    _init_file_description();
176
1.23k
}
177
178
20.2k
ParquetReader::~ParquetReader() {
179
20.2k
    _close_internal();
180
20.2k
}
181
182
#ifdef BE_TEST
183
// for unit test
184
void ParquetReader::set_file_reader(io::FileReaderSPtr file_reader) {
185
    _file_reader = file_reader;
186
    _tracing_file_reader = file_reader;
187
}
188
#endif
189
190
19.0k
void ParquetReader::_init_profile() {
191
19.0k
    if (_profile != nullptr) {
192
19.0k
        static const char* parquet_profile = "ParquetReader";
193
19.0k
        ADD_TIMER_WITH_LEVEL(_profile, parquet_profile, 1);
194
195
19.0k
        _parquet_profile.filtered_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
196
19.0k
                _profile, "RowGroupsFiltered", TUnit::UNIT, parquet_profile, 1);
197
19.0k
        _parquet_profile.filtered_row_groups_by_min_max = ADD_CHILD_COUNTER_WITH_LEVEL(
198
19.0k
                _profile, "RowGroupsFilteredByMinMax", TUnit::UNIT, parquet_profile, 1);
199
19.0k
        _parquet_profile.filtered_row_groups_by_bloom_filter = ADD_CHILD_COUNTER_WITH_LEVEL(
200
19.0k
                _profile, "RowGroupsFilteredByBloomFilter", TUnit::UNIT, parquet_profile, 1);
201
19.0k
        _parquet_profile.to_read_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
202
19.0k
                _profile, "RowGroupsReadNum", TUnit::UNIT, parquet_profile, 1);
203
19.0k
        _parquet_profile.total_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
204
19.0k
                _profile, "RowGroupsTotalNum", TUnit::UNIT, parquet_profile, 1);
205
19.0k
        _parquet_profile.filtered_group_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
206
19.0k
                _profile, "FilteredRowsByGroup", TUnit::UNIT, parquet_profile, 1);
207
19.0k
        _parquet_profile.filtered_page_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
208
19.0k
                _profile, "FilteredRowsByPage", TUnit::UNIT, parquet_profile, 1);
209
19.0k
        _parquet_profile.lazy_read_filtered_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
210
19.0k
                _profile, "FilteredRowsByLazyRead", TUnit::UNIT, parquet_profile, 1);
211
19.0k
        _parquet_profile.filtered_bytes = ADD_CHILD_COUNTER_WITH_LEVEL(
212
19.0k
                _profile, "FilteredBytes", TUnit::BYTES, parquet_profile, 1);
213
19.0k
        _parquet_profile.raw_rows_read = ADD_CHILD_COUNTER_WITH_LEVEL(
214
19.0k
                _profile, "RawRowsRead", TUnit::UNIT, parquet_profile, 1);
215
19.0k
        _parquet_profile.column_read_time =
216
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ColumnReadTime", parquet_profile, 1);
217
19.0k
        _parquet_profile.parse_meta_time =
218
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseMetaTime", parquet_profile, 1);
219
19.0k
        _parquet_profile.parse_footer_time =
220
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseFooterTime", parquet_profile, 1);
221
19.0k
        _parquet_profile.file_reader_create_time =
222
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "FileReaderCreateTime", parquet_profile, 1);
223
19.0k
        _parquet_profile.open_file_num =
224
19.0k
                ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "FileNum", TUnit::UNIT, parquet_profile, 1);
225
19.0k
        _parquet_profile.page_index_read_calls =
226
19.0k
                ADD_COUNTER_WITH_LEVEL(_profile, "PageIndexReadCalls", TUnit::UNIT, 1);
227
19.0k
        _parquet_profile.page_index_filter_time =
228
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexFilterTime", parquet_profile, 1);
229
19.0k
        _parquet_profile.read_page_index_time =
230
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexReadTime", parquet_profile, 1);
231
19.0k
        _parquet_profile.parse_page_index_time =
232
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexParseTime", parquet_profile, 1);
233
19.0k
        _parquet_profile.row_group_filter_time =
234
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "RowGroupFilterTime", parquet_profile, 1);
235
19.0k
        _parquet_profile.file_footer_read_calls =
236
19.0k
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterReadCalls", TUnit::UNIT, 1);
237
19.0k
        _parquet_profile.file_footer_hit_cache =
238
19.0k
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterHitCache", TUnit::UNIT, 1);
239
19.0k
        _parquet_profile.decompress_time =
240
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecompressTime", parquet_profile, 1);
241
19.0k
        _parquet_profile.decompress_cnt = ADD_CHILD_COUNTER_WITH_LEVEL(
242
19.0k
                _profile, "DecompressCount", TUnit::UNIT, parquet_profile, 1);
243
19.0k
        _parquet_profile.page_read_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
244
19.0k
                _profile, "PageReadCount", TUnit::UNIT, parquet_profile, 1);
245
19.0k
        _parquet_profile.page_cache_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
246
19.0k
                _profile, "PageCacheWriteCount", TUnit::UNIT, parquet_profile, 1);
247
19.0k
        _parquet_profile.page_cache_compressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
248
19.0k
                _profile, "PageCacheCompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
249
19.0k
        _parquet_profile.page_cache_decompressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
250
19.0k
                _profile, "PageCacheDecompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
251
19.0k
        _parquet_profile.page_cache_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
252
19.0k
                _profile, "PageCacheHitCount", TUnit::UNIT, parquet_profile, 1);
253
19.0k
        _parquet_profile.page_cache_missing_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
254
19.0k
                _profile, "PageCacheMissingCount", TUnit::UNIT, parquet_profile, 1);
255
19.0k
        _parquet_profile.page_cache_compressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
256
19.0k
                _profile, "PageCacheCompressedHitCount", TUnit::UNIT, parquet_profile, 1);
257
19.0k
        _parquet_profile.page_cache_decompressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
258
19.0k
                _profile, "PageCacheDecompressedHitCount", TUnit::UNIT, parquet_profile, 1);
259
19.0k
        _parquet_profile.decode_header_time =
260
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderDecodeTime", parquet_profile, 1);
261
19.0k
        _parquet_profile.read_page_header_time =
262
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderReadTime", parquet_profile, 1);
263
19.0k
        _parquet_profile.decode_value_time =
264
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeValueTime", parquet_profile, 1);
265
19.0k
        _parquet_profile.decode_dict_time =
266
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeDictTime", parquet_profile, 1);
267
19.0k
        _parquet_profile.decode_level_time =
268
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeLevelTime", parquet_profile, 1);
269
19.0k
        _parquet_profile.decode_null_map_time =
270
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeNullMapTime", parquet_profile, 1);
271
19.0k
        _parquet_profile.skip_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
272
19.0k
                _profile, "SkipPageHeaderNum", TUnit::UNIT, parquet_profile, 1);
273
19.0k
        _parquet_profile.parse_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
274
19.0k
                _profile, "ParsePageHeaderNum", TUnit::UNIT, parquet_profile, 1);
275
19.0k
        _parquet_profile.predicate_filter_time =
276
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PredicateFilterTime", parquet_profile, 1);
277
19.0k
        _parquet_profile.dict_filter_rewrite_time =
278
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DictFilterRewriteTime", parquet_profile, 1);
279
19.0k
        _parquet_profile.bloom_filter_read_time =
280
19.0k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "BloomFilterReadTime", parquet_profile, 1);
281
19.0k
    }
282
19.0k
}
283
284
1.62k
Status ParquetReader::close() {
285
1.62k
    _close_internal();
286
1.62k
    return Status::OK();
287
1.62k
}
288
289
21.9k
void ParquetReader::_close_internal() {
290
21.9k
    if (!_closed) {
291
20.2k
        _closed = true;
292
20.2k
    }
293
21.9k
}
294
295
39.6k
Status ParquetReader::_open_file() {
296
39.6k
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
297
0
        return Status::EndOfFile("stop");
298
0
    }
299
39.6k
    if (_file_reader == nullptr) {
300
20.2k
        SCOPED_RAW_TIMER(&_reader_statistics.file_reader_create_time);
301
20.2k
        ++_reader_statistics.open_file_num;
302
20.2k
        _file_description.mtime =
303
20.2k
                _scan_range.__isset.modification_time ? _scan_range.modification_time : 0;
304
20.2k
        io::FileReaderOptions reader_options =
305
20.2k
                FileFactory::get_reader_options(_state, _file_description);
306
20.2k
        _file_reader = DORIS_TRY(io::DelegateReader::create_file_reader(
307
20.2k
                _profile, _system_properties, _file_description, reader_options,
308
20.2k
                io::DelegateReader::AccessMode::RANDOM, _io_ctx));
309
20.2k
        _tracing_file_reader = _io_ctx ? std::make_shared<io::TracingFileReader>(
310
20.2k
                                                 _file_reader, _io_ctx->file_reader_stats)
311
20.2k
                                       : _file_reader;
312
20.2k
    }
313
314
39.6k
    if (_file_metadata == nullptr) {
315
20.2k
        SCOPED_RAW_TIMER(&_reader_statistics.parse_footer_time);
316
20.2k
        if (_tracing_file_reader->size() <= sizeof(PARQUET_VERSION_NUMBER)) {
317
            // Some system may generate parquet file with only 4 bytes: PAR1
318
            // Should consider it as empty file.
319
0
            return Status::EndOfFile("open file failed, empty parquet file {} with size: {}",
320
0
                                     _scan_range.path, _tracing_file_reader->size());
321
0
        }
322
20.2k
        size_t meta_size = 0;
323
20.2k
        bool enable_mapping_varbinary = _scan_params.__isset.enable_mapping_varbinary
324
20.2k
                                                ? _scan_params.enable_mapping_varbinary
325
20.2k
                                                : false;
326
20.2k
        bool enable_mapping_timestamp_tz = _scan_params.__isset.enable_mapping_timestamp_tz
327
20.2k
                                                   ? _scan_params.enable_mapping_timestamp_tz
328
20.2k
                                                   : false;
329
20.2k
        if (_meta_cache == nullptr) {
330
            // wrap _file_metadata with unique ptr, so that it can be released finally.
331
1.28k
            RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
332
1.28k
                                                &meta_size, _io_ctx, enable_mapping_varbinary,
333
1.28k
                                                enable_mapping_timestamp_tz));
334
1.27k
            _file_metadata = _file_metadata_ptr.get();
335
            // parse magic number & parse meta data
336
1.27k
            _reader_statistics.file_footer_read_calls += 1;
337
19.0k
        } else {
338
19.0k
            const auto& file_meta_cache_key =
339
19.0k
                    FileMetaCache::get_key(_tracing_file_reader, _file_description);
340
19.0k
            if (!_meta_cache->lookup(file_meta_cache_key, &_meta_cache_handle)) {
341
4.60k
                RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
342
4.60k
                                                    &meta_size, _io_ctx, enable_mapping_varbinary,
343
4.60k
                                                    enable_mapping_timestamp_tz));
344
                // _file_metadata_ptr.release() : move control of _file_metadata to _meta_cache_handle
345
4.60k
                _meta_cache->insert(file_meta_cache_key, _file_metadata_ptr.release(),
346
4.60k
                                    &_meta_cache_handle);
347
4.60k
                _file_metadata = _meta_cache_handle.data<FileMetaData>();
348
4.60k
                _reader_statistics.file_footer_read_calls += 1;
349
14.3k
            } else {
350
14.3k
                _reader_statistics.file_footer_hit_cache++;
351
14.3k
            }
352
19.0k
            _file_metadata = _meta_cache_handle.data<FileMetaData>();
353
19.0k
        }
354
355
20.2k
        if (_file_metadata == nullptr) {
356
0
            return Status::InternalError("failed to get file meta data: {}",
357
0
                                         _file_description.path);
358
0
        }
359
20.2k
    }
360
39.6k
    return Status::OK();
361
39.6k
}
362
363
18.6k
Status ParquetReader::get_file_metadata_schema(const FieldDescriptor** ptr) {
364
18.6k
    RETURN_IF_ERROR(_open_file());
365
18.6k
    DCHECK(_file_metadata != nullptr);
366
18.6k
    *ptr = &_file_metadata->schema();
367
18.6k
    return Status::OK();
368
18.6k
}
369
370
20.2k
void ParquetReader::_init_system_properties() {
371
20.2k
    if (_scan_range.__isset.file_type) {
372
        // for compatibility
373
19.0k
        _system_properties.system_type = _scan_range.file_type;
374
19.0k
    } else {
375
1.25k
        _system_properties.system_type = _scan_params.file_type;
376
1.25k
    }
377
20.2k
    _system_properties.properties = _scan_params.properties;
378
20.2k
    _system_properties.hdfs_params = _scan_params.hdfs_params;
379
20.2k
    if (_scan_params.__isset.broker_addresses) {
380
20
        _system_properties.broker_addresses.assign(_scan_params.broker_addresses.begin(),
381
20
                                                   _scan_params.broker_addresses.end());
382
20
    }
383
20.2k
}
384
385
20.2k
void ParquetReader::_init_file_description() {
386
20.2k
    _file_description.path = _scan_range.path;
387
18.4E
    _file_description.file_size = _scan_range.__isset.file_size ? _scan_range.file_size : -1;
388
20.2k
    if (_scan_range.__isset.fs_name) {
389
8.84k
        _file_description.fs_name = _scan_range.fs_name;
390
8.84k
    }
391
20.2k
    if (_scan_range.__isset.file_cache_admission) {
392
17.8k
        _file_description.file_cache_admission = _scan_range.file_cache_admission;
393
17.8k
    }
394
20.2k
}
395
396
Status ParquetReader::init_reader(
397
        const std::vector<std::string>& all_column_names,
398
        std::unordered_map<std::string, uint32_t>* col_name_to_block_idx,
399
        const VExprContextSPtrs& conjuncts,
400
        phmap::flat_hash_map<int, std::vector<std::shared_ptr<ColumnPredicate>>>&
401
                slot_id_to_predicates,
402
        const TupleDescriptor* tuple_descriptor, const RowDescriptor* row_descriptor,
403
        const std::unordered_map<std::string, int>* colname_to_slot_id,
404
        const VExprContextSPtrs* not_single_slot_filter_conjuncts,
405
        const std::unordered_map<int, VExprContextSPtrs>* slot_id_to_filter_conjuncts,
406
        std::shared_ptr<TableSchemaChangeHelper::Node> table_info_node_ptr, bool filter_groups,
407
18.9k
        const std::set<uint64_t>& column_ids, const std::set<uint64_t>& filter_column_ids) {
408
18.9k
    _col_name_to_block_idx = col_name_to_block_idx;
409
18.9k
    _tuple_descriptor = tuple_descriptor;
410
18.9k
    _row_descriptor = row_descriptor;
411
18.9k
    _colname_to_slot_id = colname_to_slot_id;
412
18.9k
    _not_single_slot_filter_conjuncts = not_single_slot_filter_conjuncts;
413
18.9k
    _slot_id_to_filter_conjuncts = slot_id_to_filter_conjuncts;
414
18.9k
    _table_info_node_ptr = table_info_node_ptr;
415
18.9k
    _filter_groups = filter_groups;
416
18.9k
    _column_ids = column_ids;
417
18.9k
    _filter_column_ids = filter_column_ids;
418
419
18.9k
    RETURN_IF_ERROR(_open_file());
420
18.9k
    _t_metadata = &(_file_metadata->to_thrift());
421
18.9k
    if (_file_metadata == nullptr) {
422
0
        return Status::InternalError("failed to init parquet reader, please open reader first");
423
0
    }
424
425
18.9k
    SCOPED_RAW_TIMER(&_reader_statistics.parse_meta_time);
426
18.9k
    _total_groups = _t_metadata->row_groups.size();
427
18.9k
    if (_total_groups == 0) {
428
6
        return Status::EndOfFile("init reader failed, empty parquet file: " + _scan_range.path);
429
6
    }
430
18.9k
    _current_row_group_index = RowGroupReader::RowGroupIndex {-1, 0, 0};
431
432
18.9k
    _table_column_names = &all_column_names;
433
18.9k
    auto schema_desc = _file_metadata->schema();
434
435
18.9k
    std::map<std::string, std::string> required_file_columns; //file column -> table column
436
68.1k
    for (auto table_column_name : all_column_names) {
437
68.1k
        if (_table_info_node_ptr->children_column_exists(table_column_name)) {
438
66.2k
            required_file_columns.emplace(
439
66.2k
                    _table_info_node_ptr->children_file_column_name(table_column_name),
440
66.2k
                    table_column_name);
441
66.2k
        } else {
442
1.90k
            _missing_cols.emplace_back(table_column_name);
443
1.90k
        }
444
68.1k
    }
445
181k
    for (int i = 0; i < schema_desc.size(); ++i) {
446
162k
        const auto& name = schema_desc.get_column(i)->name;
447
162k
        if (required_file_columns.contains(name)) {
448
66.2k
            _read_file_columns.emplace_back(name);
449
66.2k
            _read_table_columns.emplace_back(required_file_columns[name]);
450
66.2k
            _read_table_columns_set.insert(required_file_columns[name]);
451
66.2k
        }
452
162k
    }
453
    // build column predicates for column lazy read
454
18.9k
    _lazy_read_ctx.conjuncts = conjuncts;
455
18.9k
    _lazy_read_ctx.slot_id_to_predicates = slot_id_to_predicates;
456
18.9k
    return Status::OK();
457
18.9k
}
458
459
8.09k
bool ParquetReader::_exists_in_file(const std::string& expr_name) const {
460
    // `_read_table_columns_set` is used to ensure that only columns actually read are subject to min-max filtering.
461
    // This primarily handles cases where partition columns also exist in a file. The reason it's not modified
462
    // in `_table_info_node_ptr` is that Iceberg、Hudi has inconsistent requirements for this node;
463
    // Iceberg partition evolution need read partition columns from a file.
464
    // hudi set `hoodie.datasource.write.drop.partition.columns=false` not need read partition columns from a file.
465
8.09k
    return _table_info_node_ptr->children_column_exists(expr_name) &&
466
8.09k
           _read_table_columns_set.contains(expr_name);
467
8.09k
}
468
469
7.64k
bool ParquetReader::_type_matches(const int cid) const {
470
7.64k
    auto* slot = _tuple_descriptor->slots()[cid];
471
7.64k
    auto table_col_type = remove_nullable(slot->type());
472
473
7.64k
    const auto& file_col_name = _table_info_node_ptr->children_file_column_name(slot->col_name());
474
7.64k
    const auto& file_col_type =
475
7.64k
            remove_nullable(_file_metadata->schema().get_column(file_col_name)->data_type);
476
477
7.64k
    return (table_col_type->get_primitive_type() == file_col_type->get_primitive_type()) &&
478
7.64k
           !is_complex_type(table_col_type->get_primitive_type());
479
7.64k
}
480
481
Status ParquetReader::set_fill_columns(
482
        const std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>&
483
                partition_columns,
484
18.8k
        const std::unordered_map<std::string, VExprContextSPtr>& missing_columns) {
485
18.8k
    _lazy_read_ctx.fill_partition_columns = partition_columns;
486
18.8k
    _lazy_read_ctx.fill_missing_columns = missing_columns;
487
488
    // std::unordered_map<column_name, std::pair<col_id, slot_id>>
489
18.8k
    std::unordered_map<std::string, std::pair<uint32_t, int>> predicate_columns;
490
491
    // TODO(gabriel): we should try to clear too much structs which are used to represent conjuncts and predicates.
492
    // visit_slot for lazy mat.
493
37.4k
    std::function<void(VExpr * expr)> visit_slot = [&](VExpr* expr) {
494
37.4k
        if (expr->is_slot_ref()) {
495
11.7k
            VSlotRef* slot_ref = static_cast<VSlotRef*>(expr);
496
11.7k
            auto expr_name = slot_ref->expr_name();
497
11.7k
            predicate_columns.emplace(expr_name,
498
11.7k
                                      std::make_pair(slot_ref->column_id(), slot_ref->slot_id()));
499
11.7k
            if (slot_ref->column_id() == 0) {
500
8.22k
                _lazy_read_ctx.resize_first_column = false;
501
8.22k
            }
502
11.7k
            return;
503
11.7k
        }
504
26.1k
        for (auto& child : expr->children()) {
505
26.1k
            visit_slot(child.get());
506
26.1k
        }
507
25.7k
    };
508
18.8k
    for (const auto& conjunct : _lazy_read_ctx.conjuncts) {
509
11.4k
        auto expr = conjunct->root();
510
511
11.4k
        if (expr->is_rf_wrapper()) {
512
            // REF: src/runtime_filter/runtime_filter_consumer.cpp
513
3.18k
            VRuntimeFilterWrapper* runtime_filter = assert_cast<VRuntimeFilterWrapper*>(expr.get());
514
515
3.18k
            auto filter_impl = runtime_filter->get_impl();
516
3.18k
            visit_slot(filter_impl.get());
517
8.22k
        } else {
518
8.22k
            visit_slot(expr.get());
519
8.22k
        }
520
11.4k
    }
521
18.8k
    if (!_lazy_read_ctx.slot_id_to_predicates.empty()) {
522
17.1k
        auto and_pred = AndBlockColumnPredicate::create_unique();
523
65.8k
        for (const auto& entry : _lazy_read_ctx.slot_id_to_predicates) {
524
65.8k
            for (const auto& pred : entry.second) {
525
8.18k
                if (!_exists_in_file(pred->col_name()) || !_type_matches(pred->column_id())) {
526
708
                    continue;
527
708
                }
528
7.47k
                and_pred->add_column_predicate(
529
7.47k
                        SingleColumnBlockPredicate::create_unique(pred->clone(pred->column_id())));
530
7.47k
            }
531
65.8k
        }
532
17.1k
        if (and_pred->num_of_column_predicate() > 0) {
533
4.86k
            _push_down_predicates.push_back(std::move(and_pred));
534
4.86k
        }
535
17.1k
    }
536
537
18.8k
    const FieldDescriptor& schema = _file_metadata->schema();
538
539
65.7k
    for (auto& read_table_col : _read_table_columns) {
540
65.7k
        _lazy_read_ctx.all_read_columns.emplace_back(read_table_col);
541
542
65.7k
        auto file_column_name = _table_info_node_ptr->children_file_column_name(read_table_col);
543
65.7k
        PrimitiveType column_type =
544
65.7k
                schema.get_column(file_column_name)->data_type->get_primitive_type();
545
65.7k
        if (is_complex_type(column_type)) {
546
16.9k
            _lazy_read_ctx.has_complex_type = true;
547
16.9k
        }
548
65.7k
        if (predicate_columns.size() > 0) {
549
20.6k
            auto iter = predicate_columns.find(read_table_col);
550
20.6k
            if (iter == predicate_columns.end()) {
551
12.7k
                _lazy_read_ctx.lazy_read_columns.emplace_back(read_table_col);
552
12.7k
            } else {
553
7.92k
                _lazy_read_ctx.predicate_columns.first.emplace_back(iter->first);
554
7.92k
                _lazy_read_ctx.predicate_columns.second.emplace_back(iter->second.second);
555
7.92k
                _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
556
7.92k
            }
557
20.6k
        }
558
65.7k
    }
559
18.8k
    if (_row_id_column_iterator_pair.first != nullptr) {
560
1.23k
        _lazy_read_ctx.all_predicate_col_ids.emplace_back(_row_id_column_iterator_pair.second);
561
1.23k
    }
562
563
18.8k
    for (auto& kv : _lazy_read_ctx.fill_partition_columns) {
564
2.39k
        auto iter = predicate_columns.find(kv.first);
565
2.39k
        if (iter == predicate_columns.end()) {
566
2.09k
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
567
2.09k
        } else {
568
305
            _lazy_read_ctx.predicate_partition_columns.emplace(kv.first, kv.second);
569
305
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
570
305
        }
571
2.39k
    }
572
573
18.8k
    for (auto& kv : _lazy_read_ctx.fill_missing_columns) {
574
1.91k
        auto iter = predicate_columns.find(kv.first);
575
1.91k
        if (iter == predicate_columns.end()) {
576
1.37k
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
577
1.37k
        } else {
578
            //For check missing column :   missing column == xx, missing column is null,missing column is not null.
579
547
            if (_slot_id_to_filter_conjuncts->find(iter->second.second) !=
580
547
                _slot_id_to_filter_conjuncts->end()) {
581
416
                for (auto& ctx : _slot_id_to_filter_conjuncts->find(iter->second.second)->second) {
582
416
                    _lazy_read_ctx.missing_columns_conjuncts.emplace_back(ctx);
583
416
                }
584
412
            }
585
586
547
            _lazy_read_ctx.predicate_missing_columns.emplace(kv.first, kv.second);
587
547
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
588
547
        }
589
1.91k
    }
590
591
18.8k
    if (_enable_lazy_mat && _lazy_read_ctx.predicate_columns.first.size() > 0 &&
592
18.8k
        _lazy_read_ctx.lazy_read_columns.size() > 0) {
593
3.98k
        _lazy_read_ctx.can_lazy_read = true;
594
3.98k
    }
595
596
18.8k
    if (!_lazy_read_ctx.can_lazy_read) {
597
14.9k
        for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
598
257
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
599
257
        }
600
14.9k
        for (auto& kv : _lazy_read_ctx.predicate_missing_columns) {
601
513
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
602
513
        }
603
14.9k
    }
604
605
18.8k
    if (_filter_groups && (_total_groups == 0 || _t_metadata->num_rows == 0 || _range_size < 0)) {
606
2
        return Status::EndOfFile("No row group to read");
607
2
    }
608
18.8k
    _fill_all_columns = true;
609
18.8k
    return Status::OK();
610
18.8k
}
611
612
// init file reader and file metadata for parsing schema
613
1.99k
Status ParquetReader::init_schema_reader() {
614
1.99k
    RETURN_IF_ERROR(_open_file());
615
1.99k
    _t_metadata = &(_file_metadata->to_thrift());
616
1.99k
    return Status::OK();
617
1.99k
}
618
619
Status ParquetReader::get_parsed_schema(std::vector<std::string>* col_names,
620
1.23k
                                        std::vector<DataTypePtr>* col_types) {
621
1.23k
    _total_groups = _t_metadata->row_groups.size();
622
1.23k
    auto schema_desc = _file_metadata->schema();
623
8.17k
    for (int i = 0; i < schema_desc.size(); ++i) {
624
        // Get the Column Reader for the boolean column
625
6.93k
        col_names->emplace_back(schema_desc.get_column(i)->name);
626
6.93k
        col_types->emplace_back(make_nullable(schema_desc.get_column(i)->data_type));
627
6.93k
    }
628
1.23k
    return Status::OK();
629
1.23k
}
630
631
Status ParquetReader::get_columns(std::unordered_map<std::string, DataTypePtr>* name_to_type,
632
17.8k
                                  std::unordered_set<std::string>* missing_cols) {
633
17.8k
    const auto& schema_desc = _file_metadata->schema();
634
17.8k
    std::unordered_set<std::string> column_names;
635
17.8k
    schema_desc.get_column_names(&column_names);
636
159k
    for (auto& name : column_names) {
637
159k
        auto field = schema_desc.get_column(name);
638
159k
        name_to_type->emplace(name, field->data_type);
639
159k
    }
640
17.8k
    for (auto& col : _missing_cols) {
641
1.91k
        missing_cols->insert(col);
642
1.91k
    }
643
17.8k
    return Status::OK();
644
17.8k
}
645
646
44.1k
Status ParquetReader::get_next_block(Block* block, size_t* read_rows, bool* eof) {
647
44.1k
    if (_current_group_reader == nullptr || _row_group_eof) {
648
23.8k
        Status st = _next_row_group_reader();
649
23.8k
        if (!st.ok() && !st.is<ErrorCode::END_OF_FILE>()) {
650
0
            return st;
651
0
        }
652
23.8k
        if (_current_group_reader == nullptr || _row_group_eof || st.is<ErrorCode::END_OF_FILE>()) {
653
3.03k
            _current_group_reader.reset(nullptr);
654
3.03k
            _row_group_eof = true;
655
3.03k
            *read_rows = 0;
656
3.03k
            *eof = true;
657
3.03k
            return Status::OK();
658
3.03k
        }
659
23.8k
    }
660
41.1k
    if (_push_down_agg_type == TPushAggOp::type::COUNT) {
661
1.05k
        auto rows = std::min(_current_group_reader->get_remaining_rows(), (int64_t)_batch_size);
662
663
1.05k
        _current_group_reader->set_remaining_rows(_current_group_reader->get_remaining_rows() -
664
1.05k
                                                  rows);
665
1.05k
        auto mutate_columns = block->mutate_columns();
666
1.05k
        for (auto& col : mutate_columns) {
667
1.04k
            col->resize(rows);
668
1.04k
        }
669
1.05k
        block->set_columns(std::move(mutate_columns));
670
671
1.05k
        *read_rows = rows;
672
1.05k
        if (_current_group_reader->get_remaining_rows() == 0) {
673
312
            _current_group_reader.reset(nullptr);
674
312
        }
675
676
1.05k
        return Status::OK();
677
1.05k
    }
678
679
40.0k
    SCOPED_RAW_TIMER(&_reader_statistics.column_read_time);
680
40.0k
    Status batch_st =
681
40.0k
            _current_group_reader->next_batch(block, _batch_size, read_rows, &_row_group_eof);
682
40.0k
    if (batch_st.is<ErrorCode::END_OF_FILE>()) {
683
4
        block->clear_column_data();
684
4
        _current_group_reader.reset(nullptr);
685
4
        *read_rows = 0;
686
4
        *eof = true;
687
4
        return Status::OK();
688
4
    }
689
690
40.0k
    if (!batch_st.ok()) {
691
10
        return Status::InternalError("Read parquet file {} failed, reason = {}", _scan_range.path,
692
10
                                     batch_st.to_string());
693
10
    }
694
695
40.0k
    if (_row_group_eof) {
696
20.5k
        auto column_st = _current_group_reader->merged_column_statistics();
697
20.5k
        _column_statistics.merge(column_st);
698
20.5k
        _reader_statistics.lazy_read_filtered_rows +=
699
20.5k
                _current_group_reader->lazy_read_filtered_rows();
700
20.5k
        _reader_statistics.predicate_filter_time += _current_group_reader->predicate_filter_time();
701
20.5k
        _reader_statistics.dict_filter_rewrite_time +=
702
20.5k
                _current_group_reader->dict_filter_rewrite_time();
703
20.5k
        if (_io_ctx) {
704
20.5k
            _io_ctx->condition_cache_filtered_rows +=
705
20.5k
                    _current_group_reader->condition_cache_filtered_rows();
706
20.5k
        }
707
708
20.5k
        if (_current_row_group_index.row_group_id + 1 == _total_groups) {
709
15.7k
            *eof = true;
710
15.7k
        } else {
711
4.75k
            *eof = false;
712
4.75k
        }
713
20.5k
    }
714
40.0k
    return Status::OK();
715
40.0k
}
716
717
RowGroupReader::PositionDeleteContext ParquetReader::_get_position_delete_ctx(
718
20.9k
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index) {
719
20.9k
    if (_delete_rows == nullptr) {
720
19.4k
        return RowGroupReader::PositionDeleteContext(row_group.num_rows, row_group_index.first_row);
721
19.4k
    }
722
1.40k
    const int64_t* delete_rows = &(*_delete_rows)[0];
723
1.40k
    const int64_t* delete_rows_end = delete_rows + _delete_rows->size();
724
1.40k
    const int64_t* start_pos = std::lower_bound(delete_rows + _delete_rows_index, delete_rows_end,
725
1.40k
                                                row_group_index.first_row);
726
1.40k
    int64_t start_index = start_pos - delete_rows;
727
1.40k
    const int64_t* end_pos = std::lower_bound(start_pos, delete_rows_end, row_group_index.last_row);
728
1.40k
    int64_t end_index = end_pos - delete_rows;
729
1.40k
    _delete_rows_index = end_index;
730
1.40k
    return RowGroupReader::PositionDeleteContext(*_delete_rows, row_group.num_rows,
731
1.40k
                                                 row_group_index.first_row, start_index, end_index);
732
20.9k
}
733
734
23.8k
Status ParquetReader::_next_row_group_reader() {
735
23.8k
    if (_current_group_reader != nullptr) {
736
4.71k
        _current_group_reader->collect_profile_before_close();
737
4.71k
    }
738
739
23.8k
    RowRanges candidate_row_ranges;
740
53.2k
    while (++_current_row_group_index.row_group_id < _total_groups) {
741
50.2k
        const auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
742
50.2k
        _current_row_group_index.first_row = _current_row_group_index.last_row;
743
50.2k
        _current_row_group_index.last_row = _current_row_group_index.last_row + row_group.num_rows;
744
745
50.2k
        if (_filter_groups && _is_misaligned_range_group(row_group)) {
746
25.8k
            continue;
747
25.8k
        }
748
749
24.3k
        candidate_row_ranges.clear();
750
        // The range of lines to be read is determined by the push down predicate.
751
24.3k
        RETURN_IF_ERROR(_process_min_max_bloom_filter(
752
24.3k
                _current_row_group_index, row_group, _push_down_predicates, &candidate_row_ranges));
753
754
24.3k
        std::function<int64_t(const FieldSchema*)> column_compressed_size =
755
131k
                [&row_group, &column_compressed_size](const FieldSchema* field) -> int64_t {
756
131k
            if (field->physical_column_index >= 0) {
757
108k
                int parquet_col_id = field->physical_column_index;
758
108k
                if (row_group.columns[parquet_col_id].__isset.meta_data) {
759
108k
                    return row_group.columns[parquet_col_id].meta_data.total_compressed_size;
760
108k
                }
761
18.4E
                return 0;
762
108k
            }
763
23.3k
            int64_t size = 0;
764
39.4k
            for (const FieldSchema& child : field->children) {
765
39.4k
                size += column_compressed_size(&child);
766
39.4k
            }
767
23.3k
            return size;
768
131k
        };
769
24.3k
        int64_t group_size = 0; // only calculate the needed columns
770
92.0k
        for (auto& read_col : _read_file_columns) {
771
92.0k
            const FieldSchema* field = _file_metadata->schema().get_column(read_col);
772
92.0k
            group_size += column_compressed_size(field);
773
92.0k
        }
774
775
24.3k
        _reader_statistics.read_rows += candidate_row_ranges.count();
776
24.3k
        if (_io_ctx) {
777
24.3k
            _io_ctx->file_reader_stats->read_rows += candidate_row_ranges.count();
778
24.3k
        }
779
780
24.3k
        if (candidate_row_ranges.count() != 0) {
781
            // need read this row group.
782
20.8k
            _reader_statistics.read_row_groups++;
783
20.8k
            _reader_statistics.filtered_page_rows +=
784
20.8k
                    row_group.num_rows - candidate_row_ranges.count();
785
20.8k
            break;
786
20.8k
        } else {
787
            // this row group be filtered.
788
3.47k
            _reader_statistics.filtered_row_groups++;
789
3.47k
            _reader_statistics.filtered_bytes += group_size;
790
3.47k
            _reader_statistics.filtered_group_rows += row_group.num_rows;
791
3.47k
        }
792
24.3k
    }
793
794
23.8k
    if (_current_row_group_index.row_group_id == _total_groups) {
795
3.03k
        _row_group_eof = true;
796
3.03k
        _current_group_reader.reset(nullptr);
797
3.03k
        return Status::EndOfFile("No next RowGroupReader");
798
3.03k
    }
799
800
    // process page index and generate the ranges to read
801
20.8k
    auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
802
803
20.8k
    RowGroupReader::PositionDeleteContext position_delete_ctx =
804
20.8k
            _get_position_delete_ctx(row_group, _current_row_group_index);
805
20.8k
    io::FileReaderSPtr group_file_reader;
806
20.8k
    if (typeid_cast<io::InMemoryFileReader*>(_file_reader.get())) {
807
        // InMemoryFileReader has the ability to merge small IO
808
10.8k
        group_file_reader = _file_reader;
809
10.8k
    } else {
810
9.96k
        size_t avg_io_size = 0;
811
9.96k
        const std::vector<io::PrefetchRange> io_ranges =
812
9.96k
                _generate_random_access_ranges(_current_row_group_index, &avg_io_size);
813
9.96k
        int64_t merged_read_slice_size = -1;
814
9.97k
        if (_state != nullptr && _state->query_options().__isset.merge_read_slice_size) {
815
9.96k
            merged_read_slice_size = _state->query_options().merge_read_slice_size;
816
9.96k
        }
817
        // The underlying page reader will prefetch data in column.
818
        // Using both MergeRangeFileReader and BufferedStreamReader simultaneously would waste a lot of memory.
819
9.96k
        group_file_reader =
820
9.96k
                avg_io_size < io::MergeRangeFileReader::SMALL_IO
821
9.96k
                        ? std::make_shared<io::MergeRangeFileReader>(
822
9.93k
                                  _profile, _file_reader, io_ranges, merged_read_slice_size)
823
9.96k
                        : _file_reader;
824
9.96k
    }
825
20.8k
    _current_group_reader.reset(new RowGroupReader(
826
20.8k
            _io_ctx ? std::make_shared<io::TracingFileReader>(group_file_reader,
827
20.7k
                                                              _io_ctx->file_reader_stats)
828
20.8k
                    : group_file_reader,
829
20.8k
            _read_table_columns, _current_row_group_index.row_group_id, row_group, _ctz, _io_ctx,
830
20.8k
            position_delete_ctx, _lazy_read_ctx, _state, _column_ids, _filter_column_ids));
831
20.8k
    _row_group_eof = false;
832
833
20.8k
    _current_group_reader->set_current_row_group_idx(_current_row_group_index);
834
20.8k
    _current_group_reader->set_row_id_column_iterator(_row_id_column_iterator_pair);
835
20.8k
    _current_group_reader->set_col_name_to_block_idx(_col_name_to_block_idx);
836
20.8k
    if (_condition_cache_ctx) {
837
4.35k
        _current_group_reader->set_condition_cache_context(_condition_cache_ctx);
838
4.35k
    }
839
840
20.8k
    _current_group_reader->_table_info_node_ptr = _table_info_node_ptr;
841
20.8k
    return _current_group_reader->init(_file_metadata->schema(), candidate_row_ranges, _col_offsets,
842
20.8k
                                       _tuple_descriptor, _row_descriptor, _colname_to_slot_id,
843
20.8k
                                       _not_single_slot_filter_conjuncts,
844
20.8k
                                       _slot_id_to_filter_conjuncts);
845
23.8k
}
846
847
std::vector<io::PrefetchRange> ParquetReader::_generate_random_access_ranges(
848
9.95k
        const RowGroupReader::RowGroupIndex& group, size_t* avg_io_size) {
849
9.95k
    std::vector<io::PrefetchRange> result;
850
9.95k
    int64_t last_chunk_end = -1;
851
9.95k
    size_t total_io_size = 0;
852
9.95k
    std::function<void(const FieldSchema*, const tparquet::RowGroup&)> scalar_range =
853
70.7k
            [&](const FieldSchema* field, const tparquet::RowGroup& row_group) {
854
70.7k
                if (_column_ids.empty() ||
855
70.8k
                    _column_ids.find(field->get_column_id()) != _column_ids.end()) {
856
70.8k
                    if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
857
7.43k
                        scalar_range(&field->children[0], row_group);
858
63.4k
                    } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
859
5.11k
                        scalar_range(&field->children[0], row_group);
860
5.11k
                        scalar_range(&field->children[1], row_group);
861
58.3k
                    } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
862
10.4k
                        for (int i = 0; i < field->children.size(); ++i) {
863
7.07k
                            scalar_range(&field->children[i], row_group);
864
7.07k
                        }
865
54.9k
                    } else {
866
54.9k
                        const tparquet::ColumnChunk& chunk =
867
54.9k
                                row_group.columns[field->physical_column_index];
868
54.9k
                        auto& chunk_meta = chunk.meta_data;
869
54.9k
                        int64_t chunk_start = has_dict_page(chunk_meta)
870
54.9k
                                                      ? chunk_meta.dictionary_page_offset
871
54.9k
                                                      : chunk_meta.data_page_offset;
872
54.9k
                        int64_t chunk_end = chunk_start + chunk_meta.total_compressed_size;
873
54.9k
                        DCHECK_GE(chunk_start, last_chunk_end);
874
54.9k
                        result.emplace_back(chunk_start, chunk_end);
875
54.9k
                        total_io_size += chunk_meta.total_compressed_size;
876
54.9k
                        last_chunk_end = chunk_end;
877
54.9k
                    }
878
70.8k
                }
879
70.7k
            };
880
9.95k
    const tparquet::RowGroup& row_group = _t_metadata->row_groups[group.row_group_id];
881
46.2k
    for (const auto& read_col : _read_file_columns) {
882
46.2k
        const FieldSchema* field = _file_metadata->schema().get_column(read_col);
883
46.2k
        scalar_range(field, row_group);
884
46.2k
    }
885
9.95k
    if (!result.empty()) {
886
9.60k
        *avg_io_size = total_io_size / result.size();
887
9.60k
    }
888
9.95k
    return result;
889
9.95k
}
890
891
58.7k
bool ParquetReader::_is_misaligned_range_group(const tparquet::RowGroup& row_group) const {
892
58.7k
    int64_t start_offset = _get_column_start_offset(row_group.columns[0].meta_data);
893
894
58.7k
    auto& last_column = row_group.columns[row_group.columns.size() - 1].meta_data;
895
58.7k
    int64_t end_offset = _get_column_start_offset(last_column) + last_column.total_compressed_size;
896
897
58.7k
    int64_t row_group_mid = start_offset + (end_offset - start_offset) / 2;
898
58.7k
    if (!(row_group_mid >= _range_start_offset &&
899
58.7k
          row_group_mid < _range_start_offset + _range_size)) {
900
27.3k
        return true;
901
27.3k
    }
902
31.3k
    return false;
903
58.7k
}
904
905
2.75k
int64_t ParquetReader::get_total_rows() const {
906
2.75k
    if (!_t_metadata) return 0;
907
2.75k
    if (!_filter_groups) return _t_metadata->num_rows;
908
2.75k
    int64_t total = 0;
909
5.30k
    for (const auto& rg : _t_metadata->row_groups) {
910
5.30k
        if (!_is_misaligned_range_group(rg)) {
911
3.83k
            total += rg.num_rows;
912
3.83k
        }
913
5.30k
    }
914
2.75k
    return total;
915
2.75k
}
916
917
4.24k
void ParquetReader::set_condition_cache_context(std::shared_ptr<ConditionCacheContext> ctx) {
918
4.24k
    _condition_cache_ctx = std::move(ctx);
919
4.24k
    if (!_condition_cache_ctx || !_t_metadata || !_filter_groups) {
920
0
        return;
921
0
    }
922
    // Find the first assigned row group to compute base_granule.
923
4.24k
    int64_t first_row = 0;
924
4.26k
    for (const auto& rg : _t_metadata->row_groups) {
925
4.26k
        if (!_is_misaligned_range_group(rg)) {
926
4.23k
            _condition_cache_ctx->base_granule = first_row / ConditionCacheContext::GRANULE_SIZE;
927
4.23k
            return;
928
4.23k
        }
929
29
        first_row += rg.num_rows;
930
29
    }
931
4.24k
}
932
933
Status ParquetReader::_process_page_index_filter(
934
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index,
935
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
936
18.9k
        RowRanges* candidate_row_ranges) {
937
18.9k
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
938
2
        return Status::EndOfFile("stop");
939
2
    }
940
941
18.9k
    std::function<void()> read_whole_row_group = [&]() {
942
16.3k
        candidate_row_ranges->add(RowRange {0, row_group.num_rows});
943
16.3k
    };
944
945
    // Check if the page index is available and if it exists.
946
18.9k
    PageIndex page_index;
947
18.9k
    if (!config::enable_parquet_page_index || _colname_to_slot_id == nullptr ||
948
18.9k
        !page_index.check_and_get_page_index_ranges(row_group.columns)) {
949
9.83k
        read_whole_row_group();
950
9.83k
        return Status::OK();
951
9.83k
    }
952
953
9.07k
    std::vector<int> parquet_col_ids;
954
33.5k
    for (size_t idx = 0; idx < _read_table_columns.size(); idx++) {
955
24.4k
        const auto& read_table_col = _read_table_columns[idx];
956
24.4k
        const auto& read_file_col = _read_file_columns[idx];
957
24.4k
        if (!_colname_to_slot_id->contains(read_table_col)) {
958
423
            continue;
959
423
        }
960
24.0k
        auto* field = _file_metadata->schema().get_column(read_file_col);
961
962
40.1k
        std::function<void(FieldSchema * field)> f = [&](FieldSchema* field) {
963
40.1k
            if (!_column_ids.empty() &&
964
40.1k
                _column_ids.find(field->get_column_id()) == _column_ids.end()) {
965
1.29k
                return;
966
1.29k
            }
967
968
38.8k
            if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
969
2.33k
                f(&field->children[0]);
970
36.4k
            } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
971
1.85k
                f(&field->children[0]);
972
1.85k
                f(&field->children[1]);
973
34.6k
            } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
974
14.2k
                for (int i = 0; i < field->children.size(); ++i) {
975
10.1k
                    f(&field->children[i]);
976
10.1k
                }
977
30.4k
            } else {
978
30.4k
                int parquet_col_id = field->physical_column_index;
979
30.5k
                if (parquet_col_id >= 0) {
980
30.5k
                    parquet_col_ids.push_back(parquet_col_id);
981
30.5k
                }
982
30.4k
            }
983
38.8k
        };
984
985
24.0k
        f(field);
986
24.0k
    }
987
988
9.14k
    auto parse_offset_index = [&]() -> Status {
989
9.14k
        std::vector<uint8_t> off_index_buff(page_index._offset_index_size);
990
9.14k
        Slice res(off_index_buff.data(), page_index._offset_index_size);
991
9.14k
        size_t bytes_read = 0;
992
9.14k
        {
993
9.14k
            SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
994
9.14k
            RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._offset_index_start, res,
995
9.14k
                                                          &bytes_read, _io_ctx));
996
9.14k
        }
997
9.14k
        _column_statistics.page_index_read_calls++;
998
9.14k
        _col_offsets.clear();
999
1000
30.5k
        for (auto parquet_col_id : parquet_col_ids) {
1001
30.5k
            auto& chunk = row_group.columns[parquet_col_id];
1002
30.5k
            if (chunk.offset_index_length == 0) [[unlikely]] {
1003
0
                continue;
1004
0
            }
1005
30.5k
            tparquet::OffsetIndex offset_index;
1006
30.5k
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1007
30.5k
            RETURN_IF_ERROR(
1008
30.5k
                    page_index.parse_offset_index(chunk, off_index_buff.data(), &offset_index));
1009
30.5k
            _col_offsets[parquet_col_id] = offset_index;
1010
30.5k
        }
1011
9.14k
        return Status::OK();
1012
9.14k
    };
1013
1014
    // from https://github.com/apache/doris/pull/55795
1015
9.07k
    RETURN_IF_ERROR(parse_offset_index());
1016
1017
    // Check if page index is needed for min-max filter.
1018
9.10k
    if (!_enable_filter_by_min_max || push_down_pred.empty()) {
1019
6.52k
        read_whole_row_group();
1020
6.52k
        return Status::OK();
1021
6.52k
    }
1022
1023
    // read column index.
1024
2.55k
    std::vector<uint8_t> col_index_buff(page_index._column_index_size);
1025
2.55k
    size_t bytes_read = 0;
1026
2.55k
    Slice result(col_index_buff.data(), page_index._column_index_size);
1027
2.55k
    {
1028
2.55k
        SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1029
2.55k
        RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._column_index_start, result,
1030
2.55k
                                                      &bytes_read, _io_ctx));
1031
2.55k
    }
1032
2.55k
    _column_statistics.page_index_read_calls++;
1033
1034
2.55k
    SCOPED_RAW_TIMER(&_reader_statistics.page_index_filter_time);
1035
1036
    // Construct a cacheable page index structure to avoid repeatedly reading the page index of the same column.
1037
2.55k
    ParquetPredicate::CachedPageIndexStat cached_page_index;
1038
2.55k
    cached_page_index.ctz = _ctz;
1039
2.55k
    std::function<bool(ParquetPredicate::PageIndexStat**, int)> get_stat_func =
1040
2.55k
            [&](ParquetPredicate::PageIndexStat** ans, const int cid) -> bool {
1041
2.24k
        if (cached_page_index.stats.contains(cid)) {
1042
893
            *ans = &cached_page_index.stats[cid];
1043
893
            return (*ans)->available;
1044
893
        }
1045
1.35k
        cached_page_index.stats.emplace(cid, ParquetPredicate::PageIndexStat {});
1046
1.35k
        auto& sig_stat = cached_page_index.stats[cid];
1047
1048
1.35k
        auto* slot = _tuple_descriptor->slots()[cid];
1049
1.35k
        if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1050
            // table column not exist in file, may be schema change.
1051
0
            return false;
1052
0
        }
1053
1054
1.35k
        const auto& file_col_name =
1055
1.35k
                _table_info_node_ptr->children_file_column_name(slot->col_name());
1056
1.35k
        const FieldSchema* col_schema = _file_metadata->schema().get_column(file_col_name);
1057
1.35k
        int parquet_col_id = col_schema->physical_column_index;
1058
1059
1.35k
        if (parquet_col_id < 0) {
1060
            // complex type, not support page index yet.
1061
0
            return false;
1062
0
        }
1063
1.35k
        if (!_col_offsets.contains(parquet_col_id)) {
1064
            // If the file contains partition columns and the query applies filters on those
1065
            // partition columns, then reading the page index is unnecessary.
1066
0
            return false;
1067
0
        }
1068
1069
1.35k
        auto& column_chunk = row_group.columns[parquet_col_id];
1070
1.35k
        if (column_chunk.column_index_length == 0 || column_chunk.offset_index_length == 0) {
1071
            // column no page index.
1072
0
            return false;
1073
0
        }
1074
1075
1.35k
        tparquet::ColumnIndex column_index;
1076
1.35k
        {
1077
1.35k
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1078
1.35k
            RETURN_IF_ERROR(page_index.parse_column_index(column_chunk, col_index_buff.data(),
1079
1.35k
                                                          &column_index));
1080
1.35k
        }
1081
1.35k
        const int64_t num_of_pages = column_index.null_pages.size();
1082
1.35k
        if (num_of_pages <= 0) [[unlikely]] {
1083
            // no page. (maybe this row group no data.)
1084
0
            return false;
1085
0
        }
1086
1.35k
        DCHECK_EQ(column_index.min_values.size(), column_index.max_values.size());
1087
1.35k
        if (!column_index.__isset.null_counts) {
1088
            // not set null or null counts;
1089
0
            return false;
1090
0
        }
1091
1092
1.35k
        auto& offset_index = _col_offsets[parquet_col_id];
1093
1.35k
        const auto& page_locations = offset_index.page_locations;
1094
1095
1.35k
        sig_stat.col_schema = col_schema;
1096
1.35k
        sig_stat.num_of_pages = num_of_pages;
1097
1.35k
        sig_stat.encoded_min_value = column_index.min_values;
1098
1.35k
        sig_stat.encoded_max_value = column_index.max_values;
1099
1.35k
        sig_stat.is_all_null.resize(num_of_pages);
1100
1.35k
        sig_stat.has_null.resize(num_of_pages);
1101
1.35k
        sig_stat.ranges.resize(num_of_pages);
1102
1103
13.6k
        for (int page_id = 0; page_id < num_of_pages; page_id++) {
1104
12.2k
            sig_stat.is_all_null[page_id] = column_index.null_pages[page_id];
1105
12.2k
            sig_stat.has_null[page_id] = column_index.null_counts[page_id] > 0;
1106
1107
12.2k
            int64_t from = page_locations[page_id].first_row_index;
1108
12.2k
            int64_t to = 0;
1109
12.2k
            if (page_id == page_locations.size() - 1) {
1110
1.34k
                to = row_group_index.last_row;
1111
10.9k
            } else {
1112
10.9k
                to = page_locations[page_id + 1].first_row_index;
1113
10.9k
            }
1114
12.2k
            sig_stat.ranges[page_id] = RowRange {from, to};
1115
12.2k
        }
1116
1117
1.35k
        sig_stat.available = true;
1118
1.35k
        *ans = &sig_stat;
1119
1.35k
        return true;
1120
1.35k
    };
1121
2.55k
    cached_page_index.row_group_range = {0, row_group.num_rows};
1122
2.55k
    cached_page_index.get_stat_func = get_stat_func;
1123
1124
2.55k
    candidate_row_ranges->add({0, row_group.num_rows});
1125
2.62k
    for (const auto& predicate : push_down_pred) {
1126
2.62k
        RowRanges tmp_row_range;
1127
2.62k
        if (!predicate->evaluate_and(&cached_page_index, &tmp_row_range)) {
1128
            // no need read this row group.
1129
9
            candidate_row_ranges->clear();
1130
9
            return Status::OK();
1131
9
        }
1132
2.61k
        RowRanges::ranges_intersection(*candidate_row_ranges, tmp_row_range, candidate_row_ranges);
1133
2.61k
    }
1134
2.54k
    return Status::OK();
1135
2.55k
}
1136
1137
Status ParquetReader::_process_min_max_bloom_filter(
1138
        const RowGroupReader::RowGroupIndex& row_group_index, const tparquet::RowGroup& row_group,
1139
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1140
24.3k
        RowRanges* row_ranges) {
1141
24.3k
    SCOPED_RAW_TIMER(&_reader_statistics.row_group_filter_time);
1142
24.3k
    if (!_filter_groups) {
1143
        // No row group filtering is needed;
1144
        // for example, Iceberg reads position delete files.
1145
1.06k
        row_ranges->add({0, row_group.num_rows});
1146
1.06k
        return Status::OK();
1147
1.06k
    }
1148
1149
23.2k
    if (_read_by_rows) {
1150
994
        auto group_start = row_group_index.first_row;
1151
994
        auto group_end = row_group_index.last_row;
1152
1153
3.58k
        while (!_row_ids.empty()) {
1154
2.79k
            auto v = _row_ids.front();
1155
2.79k
            if (v < group_start) {
1156
0
                continue;
1157
2.79k
            } else if (v < group_end) {
1158
2.59k
                row_ranges->add(RowRange {v - group_start, v - group_start + 1});
1159
2.59k
                _row_ids.pop_front();
1160
2.59k
            } else {
1161
206
                break;
1162
206
            }
1163
2.79k
        }
1164
22.2k
    } else {
1165
22.2k
        bool filter_this_row_group = false;
1166
22.2k
        bool filtered_by_min_max = false;
1167
22.2k
        bool filtered_by_bloom_filter = false;
1168
22.2k
        RETURN_IF_ERROR(_process_column_stat_filter(row_group, push_down_pred,
1169
22.2k
                                                    &filter_this_row_group, &filtered_by_min_max,
1170
22.2k
                                                    &filtered_by_bloom_filter));
1171
        // Update statistics based on filter type
1172
22.2k
        if (filter_this_row_group) {
1173
3.35k
            if (filtered_by_min_max) {
1174
3.35k
                _reader_statistics.filtered_row_groups_by_min_max++;
1175
3.35k
            }
1176
3.35k
            if (filtered_by_bloom_filter) {
1177
0
                _reader_statistics.filtered_row_groups_by_bloom_filter++;
1178
0
            }
1179
3.35k
        }
1180
1181
22.2k
        if (!filter_this_row_group) {
1182
18.9k
            RETURN_IF_ERROR(_process_page_index_filter(row_group, row_group_index, push_down_pred,
1183
18.9k
                                                       row_ranges));
1184
18.9k
        }
1185
22.2k
    }
1186
1187
23.2k
    return Status::OK();
1188
23.2k
}
1189
1190
Status ParquetReader::_process_column_stat_filter(
1191
        const tparquet::RowGroup& row_group,
1192
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1193
22.2k
        bool* filter_group, bool* filtered_by_min_max, bool* filtered_by_bloom_filter) {
1194
    // If both filters are disabled, skip filtering
1195
22.2k
    if (!_enable_filter_by_min_max && !_enable_filter_by_bloom_filter) {
1196
0
        return Status::OK();
1197
0
    }
1198
1199
    // Cache bloom filters for each column to avoid reading the same bloom filter multiple times
1200
    // when there are multiple predicates on the same column
1201
22.2k
    std::unordered_map<int, std::unique_ptr<ParquetBlockSplitBloomFilter>> bloom_filter_cache;
1202
1203
    // Initialize output parameters
1204
22.2k
    *filtered_by_min_max = false;
1205
22.2k
    *filtered_by_bloom_filter = false;
1206
1207
22.2k
    for (const auto& predicate : _push_down_predicates) {
1208
9.45k
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_stat_func =
1209
10.2k
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1210
                    // Check if min-max filter is enabled
1211
10.2k
                    if (!_enable_filter_by_min_max) {
1212
1.28k
                        return false;
1213
1.28k
                    }
1214
8.96k
                    auto* slot = _tuple_descriptor->slots()[cid];
1215
8.96k
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1216
0
                        return false;
1217
0
                    }
1218
8.96k
                    const auto& file_col_name =
1219
8.96k
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1220
8.96k
                    const FieldSchema* col_schema =
1221
8.96k
                            _file_metadata->schema().get_column(file_col_name);
1222
8.96k
                    int parquet_col_id = col_schema->physical_column_index;
1223
8.96k
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1224
8.96k
                    stat->col_schema = col_schema;
1225
8.96k
                    return ParquetPredicate::read_column_stats(col_schema, meta_data,
1226
8.96k
                                                               &_ignored_stats,
1227
8.96k
                                                               _t_metadata->created_by, stat)
1228
8.96k
                            .ok();
1229
8.96k
                };
1230
9.45k
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_bloom_filter_func =
1231
9.45k
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1232
2.60k
                    auto* slot = _tuple_descriptor->slots()[cid];
1233
2.60k
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1234
0
                        return false;
1235
0
                    }
1236
2.60k
                    const auto& file_col_name =
1237
2.60k
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1238
2.60k
                    const FieldSchema* col_schema =
1239
2.60k
                            _file_metadata->schema().get_column(file_col_name);
1240
2.60k
                    int parquet_col_id = col_schema->physical_column_index;
1241
2.60k
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1242
2.60k
                    if (!meta_data.__isset.bloom_filter_offset) {
1243
2.58k
                        return false;
1244
2.58k
                    }
1245
20
                    auto primitive_type =
1246
20
                            remove_nullable(col_schema->data_type)->get_primitive_type();
1247
20
                    if (!ParquetPredicate::bloom_filter_supported(primitive_type)) {
1248
10
                        return false;
1249
10
                    }
1250
1251
                    // Check if bloom filter is enabled
1252
10
                    if (!_enable_filter_by_bloom_filter) {
1253
0
                        return false;
1254
0
                    }
1255
1256
                    // Check cache first
1257
10
                    auto cache_iter = bloom_filter_cache.find(parquet_col_id);
1258
10
                    if (cache_iter != bloom_filter_cache.end()) {
1259
                        // Bloom filter already loaded for this column, reuse it
1260
0
                        stat->bloom_filter = std::move(cache_iter->second);
1261
0
                        bloom_filter_cache.erase(cache_iter);
1262
0
                        return stat->bloom_filter != nullptr;
1263
0
                    }
1264
1265
10
                    if (!stat->bloom_filter) {
1266
10
                        SCOPED_RAW_TIMER(&_reader_statistics.bloom_filter_read_time);
1267
10
                        auto st = ParquetPredicate::read_bloom_filter(
1268
10
                                meta_data, _tracing_file_reader, _io_ctx, stat);
1269
10
                        if (!st.ok()) {
1270
0
                            LOG(WARNING) << "Failed to read bloom filter for column "
1271
0
                                         << col_schema->name << " in file " << _scan_range.path
1272
0
                                         << ", status: " << st.to_string();
1273
0
                            stat->bloom_filter.reset();
1274
0
                            return false;
1275
0
                        }
1276
10
                    }
1277
10
                    return stat->bloom_filter != nullptr;
1278
10
                };
1279
9.45k
        ParquetPredicate::ColumnStat stat;
1280
9.45k
        stat.ctz = _ctz;
1281
9.45k
        stat.get_stat_func = &get_stat_func;
1282
9.45k
        stat.get_bloom_filter_func = &get_bloom_filter_func;
1283
1284
9.45k
        if (!predicate->evaluate_and(&stat)) {
1285
3.35k
            *filter_group = true;
1286
1287
            // Track which filter was used for filtering
1288
            // If bloom filter was loaded, it means bloom filter was used
1289
3.35k
            if (stat.bloom_filter) {
1290
0
                *filtered_by_bloom_filter = true;
1291
0
            }
1292
            // If col_schema was set but no bloom filter, it means min-max stats were used
1293
3.35k
            if (stat.col_schema && !stat.bloom_filter) {
1294
3.35k
                *filtered_by_min_max = true;
1295
3.35k
            }
1296
1297
3.35k
            return Status::OK();
1298
3.35k
        }
1299
1300
        // After evaluating, if the bloom filter was used, cache it for subsequent predicates
1301
6.09k
        if (stat.bloom_filter) {
1302
            // Find the column id for caching
1303
52
            for (auto* slot : _tuple_descriptor->slots()) {
1304
52
                if (_table_info_node_ptr->children_column_exists(slot->col_name())) {
1305
52
                    const auto& file_col_name =
1306
52
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1307
52
                    const FieldSchema* col_schema =
1308
52
                            _file_metadata->schema().get_column(file_col_name);
1309
52
                    int parquet_col_id = col_schema->physical_column_index;
1310
52
                    if (stat.col_schema == col_schema) {
1311
10
                        bloom_filter_cache[parquet_col_id] = std::move(stat.bloom_filter);
1312
10
                        break;
1313
10
                    }
1314
52
                }
1315
52
            }
1316
10
        }
1317
6.09k
    }
1318
1319
    // Update filter statistics if this row group was not filtered
1320
    // The statistics will be updated in _init_row_groups when filter_group is true
1321
18.8k
    return Status::OK();
1322
22.2k
}
1323
1324
117k
int64_t ParquetReader::_get_column_start_offset(const tparquet::ColumnMetaData& column) const {
1325
117k
    return has_dict_page(column) ? column.dictionary_page_offset : column.data_page_offset;
1326
117k
}
1327
1328
17.9k
void ParquetReader::_collect_profile() {
1329
17.9k
    if (_profile == nullptr) {
1330
0
        return;
1331
0
    }
1332
1333
17.9k
    if (_current_group_reader != nullptr) {
1334
14.8k
        _current_group_reader->collect_profile_before_close();
1335
14.8k
    }
1336
17.9k
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups, _reader_statistics.filtered_row_groups);
1337
17.9k
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_min_max,
1338
17.9k
                   _reader_statistics.filtered_row_groups_by_min_max);
1339
17.9k
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_bloom_filter,
1340
17.9k
                   _reader_statistics.filtered_row_groups_by_bloom_filter);
1341
17.9k
    COUNTER_UPDATE(_parquet_profile.to_read_row_groups, _reader_statistics.read_row_groups);
1342
17.9k
    COUNTER_UPDATE(_parquet_profile.total_row_groups, _total_groups);
1343
17.9k
    COUNTER_UPDATE(_parquet_profile.filtered_group_rows, _reader_statistics.filtered_group_rows);
1344
17.9k
    COUNTER_UPDATE(_parquet_profile.filtered_page_rows, _reader_statistics.filtered_page_rows);
1345
17.9k
    COUNTER_UPDATE(_parquet_profile.lazy_read_filtered_rows,
1346
17.9k
                   _reader_statistics.lazy_read_filtered_rows);
1347
17.9k
    COUNTER_UPDATE(_parquet_profile.filtered_bytes, _reader_statistics.filtered_bytes);
1348
17.9k
    COUNTER_UPDATE(_parquet_profile.raw_rows_read, _reader_statistics.read_rows);
1349
17.9k
    COUNTER_UPDATE(_parquet_profile.column_read_time, _reader_statistics.column_read_time);
1350
17.9k
    COUNTER_UPDATE(_parquet_profile.parse_meta_time, _reader_statistics.parse_meta_time);
1351
17.9k
    COUNTER_UPDATE(_parquet_profile.parse_footer_time, _reader_statistics.parse_footer_time);
1352
17.9k
    COUNTER_UPDATE(_parquet_profile.file_reader_create_time,
1353
17.9k
                   _reader_statistics.file_reader_create_time);
1354
17.9k
    COUNTER_UPDATE(_parquet_profile.open_file_num, _reader_statistics.open_file_num);
1355
17.9k
    COUNTER_UPDATE(_parquet_profile.page_index_filter_time,
1356
17.9k
                   _reader_statistics.page_index_filter_time);
1357
17.9k
    COUNTER_UPDATE(_parquet_profile.read_page_index_time, _reader_statistics.read_page_index_time);
1358
17.9k
    COUNTER_UPDATE(_parquet_profile.parse_page_index_time,
1359
17.9k
                   _reader_statistics.parse_page_index_time);
1360
17.9k
    COUNTER_UPDATE(_parquet_profile.row_group_filter_time,
1361
17.9k
                   _reader_statistics.row_group_filter_time);
1362
17.9k
    COUNTER_UPDATE(_parquet_profile.file_footer_read_calls,
1363
17.9k
                   _reader_statistics.file_footer_read_calls);
1364
17.9k
    COUNTER_UPDATE(_parquet_profile.file_footer_hit_cache,
1365
17.9k
                   _reader_statistics.file_footer_hit_cache);
1366
1367
17.9k
    COUNTER_UPDATE(_parquet_profile.skip_page_header_num, _column_statistics.skip_page_header_num);
1368
17.9k
    COUNTER_UPDATE(_parquet_profile.parse_page_header_num,
1369
17.9k
                   _column_statistics.parse_page_header_num);
1370
17.9k
    COUNTER_UPDATE(_parquet_profile.predicate_filter_time,
1371
17.9k
                   _reader_statistics.predicate_filter_time);
1372
17.9k
    COUNTER_UPDATE(_parquet_profile.dict_filter_rewrite_time,
1373
17.9k
                   _reader_statistics.dict_filter_rewrite_time);
1374
17.9k
    COUNTER_UPDATE(_parquet_profile.bloom_filter_read_time,
1375
17.9k
                   _reader_statistics.bloom_filter_read_time);
1376
17.9k
    COUNTER_UPDATE(_parquet_profile.page_index_read_calls,
1377
17.9k
                   _column_statistics.page_index_read_calls);
1378
17.9k
    COUNTER_UPDATE(_parquet_profile.decompress_time, _column_statistics.decompress_time);
1379
17.9k
    COUNTER_UPDATE(_parquet_profile.decompress_cnt, _column_statistics.decompress_cnt);
1380
17.9k
    COUNTER_UPDATE(_parquet_profile.page_read_counter, _column_statistics.page_read_counter);
1381
17.9k
    COUNTER_UPDATE(_parquet_profile.page_cache_write_counter,
1382
17.9k
                   _column_statistics.page_cache_write_counter);
1383
17.9k
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_write_counter,
1384
17.9k
                   _column_statistics.page_cache_compressed_write_counter);
1385
17.9k
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_write_counter,
1386
17.9k
                   _column_statistics.page_cache_decompressed_write_counter);
1387
17.9k
    COUNTER_UPDATE(_parquet_profile.page_cache_hit_counter,
1388
17.9k
                   _column_statistics.page_cache_hit_counter);
1389
17.9k
    COUNTER_UPDATE(_parquet_profile.page_cache_missing_counter,
1390
17.9k
                   _column_statistics.page_cache_missing_counter);
1391
17.9k
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_hit_counter,
1392
17.9k
                   _column_statistics.page_cache_compressed_hit_counter);
1393
17.9k
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_hit_counter,
1394
17.9k
                   _column_statistics.page_cache_decompressed_hit_counter);
1395
17.9k
    COUNTER_UPDATE(_parquet_profile.decode_header_time, _column_statistics.decode_header_time);
1396
17.9k
    COUNTER_UPDATE(_parquet_profile.read_page_header_time,
1397
17.9k
                   _column_statistics.read_page_header_time);
1398
17.9k
    COUNTER_UPDATE(_parquet_profile.decode_value_time, _column_statistics.decode_value_time);
1399
17.9k
    COUNTER_UPDATE(_parquet_profile.decode_dict_time, _column_statistics.decode_dict_time);
1400
17.9k
    COUNTER_UPDATE(_parquet_profile.decode_level_time, _column_statistics.decode_level_time);
1401
17.9k
    COUNTER_UPDATE(_parquet_profile.decode_null_map_time, _column_statistics.decode_null_map_time);
1402
17.9k
}
1403
1404
17.9k
void ParquetReader::_collect_profile_before_close() {
1405
17.9k
    _collect_profile();
1406
17.9k
}
1407
1408
#include "common/compile_check_end.h"
1409
} // namespace doris