Coverage Report

Created: 2026-04-10 05:01

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/parquet/vparquet_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/parquet/vparquet_reader.h"
19
20
#include <gen_cpp/Metrics_types.h>
21
#include <gen_cpp/PlanNodes_types.h>
22
#include <gen_cpp/parquet_types.h>
23
#include <glog/logging.h>
24
25
#include <algorithm>
26
#include <functional>
27
#include <utility>
28
29
#include "common/config.h"
30
#include "common/status.h"
31
#include "core/block/block.h"
32
#include "core/block/column_with_type_and_name.h"
33
#include "core/column/column.h"
34
#include "core/data_type/define_primitive_type.h"
35
#include "core/typeid_cast.h"
36
#include "core/types.h"
37
#include "exec/scan/file_scanner.h"
38
#include "exprs/vbloom_predicate.h"
39
#include "exprs/vdirect_in_predicate.h"
40
#include "exprs/vexpr.h"
41
#include "exprs/vexpr_context.h"
42
#include "exprs/vin_predicate.h"
43
#include "exprs/vruntimefilter_wrapper.h"
44
#include "exprs/vslot_ref.h"
45
#include "exprs/vtopn_pred.h"
46
#include "format/column_type_convert.h"
47
#include "format/parquet/parquet_block_split_bloom_filter.h"
48
#include "format/parquet/parquet_common.h"
49
#include "format/parquet/parquet_predicate.h"
50
#include "format/parquet/parquet_thrift_util.h"
51
#include "format/parquet/schema_desc.h"
52
#include "format/parquet/vparquet_file_metadata.h"
53
#include "format/parquet/vparquet_group_reader.h"
54
#include "format/parquet/vparquet_page_index.h"
55
#include "information_schema/schema_scanner.h"
56
#include "io/file_factory.h"
57
#include "io/fs/buffered_reader.h"
58
#include "io/fs/file_reader.h"
59
#include "io/fs/file_reader_writer_fwd.h"
60
#include "io/fs/tracing_file_reader.h"
61
#include "runtime/descriptors.h"
62
#include "util/slice.h"
63
#include "util/string_util.h"
64
#include "util/timezone_utils.h"
65
66
namespace cctz {
67
class time_zone;
68
} // namespace cctz
69
namespace doris {
70
class RowDescriptor;
71
class RuntimeState;
72
class SlotDescriptor;
73
class TupleDescriptor;
74
namespace io {
75
struct IOContext;
76
enum class FileCachePolicy : uint8_t;
77
} // namespace io
78
class Block;
79
} // namespace doris
80
81
namespace doris {
82
83
#include "common/compile_check_begin.h"
84
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
85
                             const TFileRangeDesc& range, size_t batch_size,
86
                             const cctz::time_zone* ctz, io::IOContext* io_ctx, RuntimeState* state,
87
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
88
36.2k
        : _profile(profile),
89
36.2k
          _scan_params(params),
90
36.2k
          _scan_range(range),
91
36.2k
          _batch_size(std::max(batch_size, _MIN_BATCH_SIZE)),
92
36.2k
          _range_start_offset(range.start_offset),
93
36.2k
          _range_size(range.size),
94
36.2k
          _ctz(ctz),
95
36.2k
          _io_ctx(io_ctx),
96
36.2k
          _state(state),
97
36.2k
          _enable_lazy_mat(enable_lazy_mat),
98
          _enable_filter_by_min_max(
99
36.2k
                  state == nullptr ? true
100
36.2k
                                   : state->query_options().enable_parquet_filter_by_min_max),
101
          _enable_filter_by_bloom_filter(
102
36.2k
                  state == nullptr ? true
103
36.2k
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
104
36.2k
    _meta_cache = meta_cache;
105
36.2k
    _init_profile();
106
36.2k
    _init_system_properties();
107
36.2k
    _init_file_description();
108
36.2k
}
109
110
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
111
                             const TFileRangeDesc& range, size_t batch_size,
112
                             const cctz::time_zone* ctz,
113
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
114
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
115
0
        : _profile(profile),
116
0
          _scan_params(params),
117
0
          _scan_range(range),
118
0
          _batch_size(std::max(batch_size, _MIN_BATCH_SIZE)),
119
0
          _range_start_offset(range.start_offset),
120
0
          _range_size(range.size),
121
0
          _ctz(ctz),
122
0
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
123
0
          _io_ctx_holder(std::move(io_ctx_holder)),
124
0
          _state(state),
125
0
          _enable_lazy_mat(enable_lazy_mat),
126
          _enable_filter_by_min_max(
127
0
                  state == nullptr ? true
128
0
                                   : state->query_options().enable_parquet_filter_by_min_max),
129
          _enable_filter_by_bloom_filter(
130
0
                  state == nullptr ? true
131
0
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
132
0
    _meta_cache = meta_cache;
133
0
    _init_profile();
134
0
    _init_system_properties();
135
0
    _init_file_description();
136
0
}
137
138
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
139
                             io::IOContext* io_ctx, RuntimeState* state, FileMetaCache* meta_cache,
140
                             bool enable_lazy_mat)
141
5
        : _profile(nullptr),
142
5
          _scan_params(params),
143
5
          _scan_range(range),
144
5
          _io_ctx(io_ctx),
145
5
          _state(state),
146
5
          _enable_lazy_mat(enable_lazy_mat),
147
          _enable_filter_by_min_max(
148
5
                  state == nullptr ? true
149
5
                                   : state->query_options().enable_parquet_filter_by_min_max),
150
          _enable_filter_by_bloom_filter(
151
5
                  state == nullptr ? true
152
5
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
153
5
    _meta_cache = meta_cache;
154
5
    _init_system_properties();
155
5
    _init_file_description();
156
5
}
157
158
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
159
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
160
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
161
2.24k
        : _profile(nullptr),
162
2.24k
          _scan_params(params),
163
2.24k
          _scan_range(range),
164
2.24k
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
165
2.24k
          _io_ctx_holder(std::move(io_ctx_holder)),
166
2.24k
          _state(state),
167
2.24k
          _enable_lazy_mat(enable_lazy_mat),
168
          _enable_filter_by_min_max(
169
2.24k
                  state == nullptr ? true
170
2.24k
                                   : state->query_options().enable_parquet_filter_by_min_max),
171
          _enable_filter_by_bloom_filter(
172
2.24k
                  state == nullptr ? true
173
2.24k
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
174
2.24k
    _meta_cache = meta_cache;
175
2.24k
    _init_system_properties();
176
2.24k
    _init_file_description();
177
2.24k
}
178
179
38.5k
ParquetReader::~ParquetReader() {
180
38.5k
    _close_internal();
181
38.5k
}
182
183
#ifdef BE_TEST
184
// for unit test
185
void ParquetReader::set_file_reader(io::FileReaderSPtr file_reader) {
186
    _file_reader = file_reader;
187
    _tracing_file_reader = file_reader;
188
}
189
#endif
190
191
36.2k
void ParquetReader::_init_profile() {
192
36.2k
    if (_profile != nullptr) {
193
36.2k
        static const char* parquet_profile = "ParquetReader";
194
36.2k
        ADD_TIMER_WITH_LEVEL(_profile, parquet_profile, 1);
195
196
36.2k
        _parquet_profile.filtered_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
197
36.2k
                _profile, "RowGroupsFiltered", TUnit::UNIT, parquet_profile, 1);
198
36.2k
        _parquet_profile.filtered_row_groups_by_min_max = ADD_CHILD_COUNTER_WITH_LEVEL(
199
36.2k
                _profile, "RowGroupsFilteredByMinMax", TUnit::UNIT, parquet_profile, 1);
200
36.2k
        _parquet_profile.filtered_row_groups_by_bloom_filter = ADD_CHILD_COUNTER_WITH_LEVEL(
201
36.2k
                _profile, "RowGroupsFilteredByBloomFilter", TUnit::UNIT, parquet_profile, 1);
202
36.2k
        _parquet_profile.to_read_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
203
36.2k
                _profile, "RowGroupsReadNum", TUnit::UNIT, parquet_profile, 1);
204
36.2k
        _parquet_profile.total_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
205
36.2k
                _profile, "RowGroupsTotalNum", TUnit::UNIT, parquet_profile, 1);
206
36.2k
        _parquet_profile.filtered_group_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
207
36.2k
                _profile, "FilteredRowsByGroup", TUnit::UNIT, parquet_profile, 1);
208
36.2k
        _parquet_profile.filtered_page_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
209
36.2k
                _profile, "FilteredRowsByPage", TUnit::UNIT, parquet_profile, 1);
210
36.2k
        _parquet_profile.lazy_read_filtered_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
211
36.2k
                _profile, "FilteredRowsByLazyRead", TUnit::UNIT, parquet_profile, 1);
212
36.2k
        _parquet_profile.filtered_bytes = ADD_CHILD_COUNTER_WITH_LEVEL(
213
36.2k
                _profile, "FilteredBytes", TUnit::BYTES, parquet_profile, 1);
214
36.2k
        _parquet_profile.raw_rows_read = ADD_CHILD_COUNTER_WITH_LEVEL(
215
36.2k
                _profile, "RawRowsRead", TUnit::UNIT, parquet_profile, 1);
216
36.2k
        _parquet_profile.column_read_time =
217
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ColumnReadTime", parquet_profile, 1);
218
36.2k
        _parquet_profile.parse_meta_time =
219
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseMetaTime", parquet_profile, 1);
220
36.2k
        _parquet_profile.parse_footer_time =
221
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseFooterTime", parquet_profile, 1);
222
36.2k
        _parquet_profile.file_reader_create_time =
223
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "FileReaderCreateTime", parquet_profile, 1);
224
36.2k
        _parquet_profile.open_file_num =
225
36.2k
                ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "FileNum", TUnit::UNIT, parquet_profile, 1);
226
36.2k
        _parquet_profile.page_index_read_calls =
227
36.2k
                ADD_COUNTER_WITH_LEVEL(_profile, "PageIndexReadCalls", TUnit::UNIT, 1);
228
36.2k
        _parquet_profile.page_index_filter_time =
229
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexFilterTime", parquet_profile, 1);
230
36.2k
        _parquet_profile.read_page_index_time =
231
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexReadTime", parquet_profile, 1);
232
36.2k
        _parquet_profile.parse_page_index_time =
233
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexParseTime", parquet_profile, 1);
234
36.2k
        _parquet_profile.row_group_filter_time =
235
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "RowGroupFilterTime", parquet_profile, 1);
236
36.2k
        _parquet_profile.file_footer_read_calls =
237
36.2k
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterReadCalls", TUnit::UNIT, 1);
238
36.2k
        _parquet_profile.file_footer_hit_cache =
239
36.2k
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterHitCache", TUnit::UNIT, 1);
240
36.2k
        _parquet_profile.decompress_time =
241
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecompressTime", parquet_profile, 1);
242
36.2k
        _parquet_profile.decompress_cnt = ADD_CHILD_COUNTER_WITH_LEVEL(
243
36.2k
                _profile, "DecompressCount", TUnit::UNIT, parquet_profile, 1);
244
36.2k
        _parquet_profile.page_read_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
245
36.2k
                _profile, "PageReadCount", TUnit::UNIT, parquet_profile, 1);
246
36.2k
        _parquet_profile.page_cache_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
247
36.2k
                _profile, "PageCacheWriteCount", TUnit::UNIT, parquet_profile, 1);
248
36.2k
        _parquet_profile.page_cache_compressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
249
36.2k
                _profile, "PageCacheCompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
250
36.2k
        _parquet_profile.page_cache_decompressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
251
36.2k
                _profile, "PageCacheDecompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
252
36.2k
        _parquet_profile.page_cache_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
253
36.2k
                _profile, "PageCacheHitCount", TUnit::UNIT, parquet_profile, 1);
254
36.2k
        _parquet_profile.page_cache_missing_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
255
36.2k
                _profile, "PageCacheMissingCount", TUnit::UNIT, parquet_profile, 1);
256
36.2k
        _parquet_profile.page_cache_compressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
257
36.2k
                _profile, "PageCacheCompressedHitCount", TUnit::UNIT, parquet_profile, 1);
258
36.2k
        _parquet_profile.page_cache_decompressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
259
36.2k
                _profile, "PageCacheDecompressedHitCount", TUnit::UNIT, parquet_profile, 1);
260
36.2k
        _parquet_profile.decode_header_time =
261
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderDecodeTime", parquet_profile, 1);
262
36.2k
        _parquet_profile.read_page_header_time =
263
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderReadTime", parquet_profile, 1);
264
36.2k
        _parquet_profile.decode_value_time =
265
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeValueTime", parquet_profile, 1);
266
36.2k
        _parquet_profile.decode_dict_time =
267
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeDictTime", parquet_profile, 1);
268
36.2k
        _parquet_profile.decode_level_time =
269
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeLevelTime", parquet_profile, 1);
270
36.2k
        _parquet_profile.decode_null_map_time =
271
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeNullMapTime", parquet_profile, 1);
272
36.2k
        _parquet_profile.skip_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
273
36.2k
                _profile, "SkipPageHeaderNum", TUnit::UNIT, parquet_profile, 1);
274
36.2k
        _parquet_profile.parse_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
275
36.2k
                _profile, "ParsePageHeaderNum", TUnit::UNIT, parquet_profile, 1);
276
36.2k
        _parquet_profile.predicate_filter_time =
277
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PredicateFilterTime", parquet_profile, 1);
278
36.2k
        _parquet_profile.dict_filter_rewrite_time =
279
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DictFilterRewriteTime", parquet_profile, 1);
280
36.2k
        _parquet_profile.bloom_filter_read_time =
281
36.2k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "BloomFilterReadTime", parquet_profile, 1);
282
36.2k
    }
283
36.2k
}
284
285
2.94k
Status ParquetReader::close() {
286
2.94k
    _close_internal();
287
2.94k
    return Status::OK();
288
2.94k
}
289
290
41.4k
void ParquetReader::_close_internal() {
291
41.4k
    if (!_closed) {
292
38.5k
        _closed = true;
293
38.5k
    }
294
41.4k
}
295
296
75.2k
Status ParquetReader::_open_file() {
297
75.2k
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
298
0
        return Status::EndOfFile("stop");
299
0
    }
300
75.2k
    if (_file_reader == nullptr) {
301
38.4k
        SCOPED_RAW_TIMER(&_reader_statistics.file_reader_create_time);
302
38.4k
        ++_reader_statistics.open_file_num;
303
38.4k
        _file_description.mtime =
304
38.4k
                _scan_range.__isset.modification_time ? _scan_range.modification_time : 0;
305
38.4k
        io::FileReaderOptions reader_options =
306
38.4k
                FileFactory::get_reader_options(_state, _file_description);
307
38.4k
        _file_reader = DORIS_TRY(io::DelegateReader::create_file_reader(
308
38.4k
                _profile, _system_properties, _file_description, reader_options,
309
38.4k
                io::DelegateReader::AccessMode::RANDOM, _io_ctx));
310
38.4k
        _tracing_file_reader = _io_ctx ? std::make_shared<io::TracingFileReader>(
311
38.4k
                                                 _file_reader, _io_ctx->file_reader_stats)
312
38.4k
                                       : _file_reader;
313
38.4k
    }
314
315
75.2k
    if (_file_metadata == nullptr) {
316
38.5k
        SCOPED_RAW_TIMER(&_reader_statistics.parse_footer_time);
317
38.5k
        if (_tracing_file_reader->size() <= sizeof(PARQUET_VERSION_NUMBER)) {
318
            // Some system may generate parquet file with only 4 bytes: PAR1
319
            // Should consider it as empty file.
320
0
            return Status::EndOfFile("open file failed, empty parquet file {} with size: {}",
321
0
                                     _scan_range.path, _tracing_file_reader->size());
322
0
        }
323
38.5k
        size_t meta_size = 0;
324
38.5k
        bool enable_mapping_varbinary = _scan_params.__isset.enable_mapping_varbinary
325
38.5k
                                                ? _scan_params.enable_mapping_varbinary
326
38.5k
                                                : false;
327
38.5k
        bool enable_mapping_timestamp_tz = _scan_params.__isset.enable_mapping_timestamp_tz
328
38.5k
                                                   ? _scan_params.enable_mapping_timestamp_tz
329
38.5k
                                                   : false;
330
38.5k
        if (_meta_cache == nullptr) {
331
            // wrap _file_metadata with unique ptr, so that it can be released finally.
332
2.29k
            RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
333
2.29k
                                                &meta_size, _io_ctx, enable_mapping_varbinary,
334
2.29k
                                                enable_mapping_timestamp_tz));
335
2.29k
            _file_metadata = _file_metadata_ptr.get();
336
            // parse magic number & parse meta data
337
2.29k
            _reader_statistics.file_footer_read_calls += 1;
338
36.2k
        } else {
339
36.2k
            const auto& file_meta_cache_key =
340
36.2k
                    FileMetaCache::get_key(_tracing_file_reader, _file_description);
341
36.2k
            if (!_meta_cache->lookup(file_meta_cache_key, &_meta_cache_handle)) {
342
8.66k
                RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
343
8.66k
                                                    &meta_size, _io_ctx, enable_mapping_varbinary,
344
8.66k
                                                    enable_mapping_timestamp_tz));
345
                // _file_metadata_ptr.release() : move control of _file_metadata to _meta_cache_handle
346
8.66k
                _meta_cache->insert(file_meta_cache_key, _file_metadata_ptr.release(),
347
8.66k
                                    &_meta_cache_handle);
348
8.66k
                _file_metadata = _meta_cache_handle.data<FileMetaData>();
349
8.66k
                _reader_statistics.file_footer_read_calls += 1;
350
27.5k
            } else {
351
27.5k
                _reader_statistics.file_footer_hit_cache++;
352
27.5k
            }
353
36.2k
            _file_metadata = _meta_cache_handle.data<FileMetaData>();
354
36.2k
        }
355
356
38.5k
        if (_file_metadata == nullptr) {
357
0
            return Status::InternalError("failed to get file meta data: {}",
358
0
                                         _file_description.path);
359
0
        }
360
38.5k
    }
361
75.2k
    return Status::OK();
362
75.2k
}
363
364
35.4k
Status ParquetReader::get_file_metadata_schema(const FieldDescriptor** ptr) {
365
35.4k
    RETURN_IF_ERROR(_open_file());
366
35.4k
    DCHECK(_file_metadata != nullptr);
367
35.4k
    *ptr = &_file_metadata->schema();
368
35.4k
    return Status::OK();
369
35.4k
}
370
371
38.5k
void ParquetReader::_init_system_properties() {
372
38.5k
    if (_scan_range.__isset.file_type) {
373
        // for compatibility
374
35.9k
        _system_properties.system_type = _scan_range.file_type;
375
35.9k
    } else {
376
2.60k
        _system_properties.system_type = _scan_params.file_type;
377
2.60k
    }
378
38.5k
    _system_properties.properties = _scan_params.properties;
379
38.5k
    _system_properties.hdfs_params = _scan_params.hdfs_params;
380
38.5k
    if (_scan_params.__isset.broker_addresses) {
381
4
        _system_properties.broker_addresses.assign(_scan_params.broker_addresses.begin(),
382
4
                                                   _scan_params.broker_addresses.end());
383
4
    }
384
38.5k
}
385
386
38.5k
void ParquetReader::_init_file_description() {
387
38.5k
    _file_description.path = _scan_range.path;
388
18.4E
    _file_description.file_size = _scan_range.__isset.file_size ? _scan_range.file_size : -1;
389
38.5k
    if (_scan_range.__isset.fs_name) {
390
15.0k
        _file_description.fs_name = _scan_range.fs_name;
391
15.0k
    }
392
38.5k
    if (_scan_range.__isset.file_cache_admission) {
393
33.8k
        _file_description.file_cache_admission = _scan_range.file_cache_admission;
394
33.8k
    }
395
38.5k
}
396
397
Status ParquetReader::init_reader(
398
        const std::vector<std::string>& all_column_names,
399
        std::unordered_map<std::string, uint32_t>* col_name_to_block_idx,
400
        const VExprContextSPtrs& conjuncts,
401
        phmap::flat_hash_map<int, std::vector<std::shared_ptr<ColumnPredicate>>>&
402
                slot_id_to_predicates,
403
        const TupleDescriptor* tuple_descriptor, const RowDescriptor* row_descriptor,
404
        const std::unordered_map<std::string, int>* colname_to_slot_id,
405
        const VExprContextSPtrs* not_single_slot_filter_conjuncts,
406
        const std::unordered_map<int, VExprContextSPtrs>* slot_id_to_filter_conjuncts,
407
        std::shared_ptr<TableSchemaChangeHelper::Node> table_info_node_ptr, bool filter_groups,
408
36.0k
        const std::set<uint64_t>& column_ids, const std::set<uint64_t>& filter_column_ids) {
409
36.0k
    _col_name_to_block_idx = col_name_to_block_idx;
410
36.0k
    _tuple_descriptor = tuple_descriptor;
411
36.0k
    _row_descriptor = row_descriptor;
412
36.0k
    _colname_to_slot_id = colname_to_slot_id;
413
36.0k
    _not_single_slot_filter_conjuncts = not_single_slot_filter_conjuncts;
414
36.0k
    _slot_id_to_filter_conjuncts = slot_id_to_filter_conjuncts;
415
36.0k
    _table_info_node_ptr = table_info_node_ptr;
416
36.0k
    _filter_groups = filter_groups;
417
36.0k
    _column_ids = column_ids;
418
36.0k
    _filter_column_ids = filter_column_ids;
419
420
36.0k
    RETURN_IF_ERROR(_open_file());
421
36.0k
    _t_metadata = &(_file_metadata->to_thrift());
422
36.0k
    if (_file_metadata == nullptr) {
423
0
        return Status::InternalError("failed to init parquet reader, please open reader first");
424
0
    }
425
426
36.0k
    SCOPED_RAW_TIMER(&_reader_statistics.parse_meta_time);
427
36.0k
    _total_groups = _t_metadata->row_groups.size();
428
36.0k
    if (_total_groups == 0) {
429
12
        return Status::EndOfFile("init reader failed, empty parquet file: " + _scan_range.path);
430
12
    }
431
36.0k
    _current_row_group_index = RowGroupReader::RowGroupIndex {-1, 0, 0};
432
433
36.0k
    _table_column_names = &all_column_names;
434
36.0k
    auto schema_desc = _file_metadata->schema();
435
436
36.0k
    std::map<std::string, std::string> required_file_columns; //file column -> table column
437
126k
    for (auto table_column_name : all_column_names) {
438
126k
        if (_table_info_node_ptr->children_column_exists(table_column_name)) {
439
122k
            required_file_columns.emplace(
440
122k
                    _table_info_node_ptr->children_file_column_name(table_column_name),
441
122k
                    table_column_name);
442
122k
        } else {
443
4.15k
            _missing_cols.emplace_back(table_column_name);
444
4.15k
        }
445
126k
    }
446
336k
    for (int i = 0; i < schema_desc.size(); ++i) {
447
300k
        const auto& name = schema_desc.get_column(i)->name;
448
300k
        if (required_file_columns.contains(name)) {
449
122k
            _read_file_columns.emplace_back(name);
450
122k
            _read_table_columns.emplace_back(required_file_columns[name]);
451
122k
            _read_table_columns_set.insert(required_file_columns[name]);
452
122k
        }
453
300k
    }
454
    // build column predicates for column lazy read
455
36.0k
    _lazy_read_ctx.conjuncts = conjuncts;
456
36.0k
    _lazy_read_ctx.slot_id_to_predicates = slot_id_to_predicates;
457
36.0k
    return Status::OK();
458
36.0k
}
459
460
14.5k
bool ParquetReader::_exists_in_file(const std::string& expr_name) const {
461
    // `_read_table_columns_set` is used to ensure that only columns actually read are subject to min-max filtering.
462
    // This primarily handles cases where partition columns also exist in a file. The reason it's not modified
463
    // in `_table_info_node_ptr` is that Iceberg、Hudi has inconsistent requirements for this node;
464
    // Iceberg partition evolution need read partition columns from a file.
465
    // hudi set `hoodie.datasource.write.drop.partition.columns=false` not need read partition columns from a file.
466
14.5k
    return _table_info_node_ptr->children_column_exists(expr_name) &&
467
14.5k
           _read_table_columns_set.contains(expr_name);
468
14.5k
}
469
470
13.9k
bool ParquetReader::_type_matches(const int cid) const {
471
13.9k
    auto* slot = _tuple_descriptor->slots()[cid];
472
13.9k
    auto table_col_type = remove_nullable(slot->type());
473
474
13.9k
    const auto& file_col_name = _table_info_node_ptr->children_file_column_name(slot->col_name());
475
13.9k
    const auto& file_col_type =
476
13.9k
            remove_nullable(_file_metadata->schema().get_column(file_col_name)->data_type);
477
478
13.9k
    return (table_col_type->get_primitive_type() == file_col_type->get_primitive_type()) &&
479
13.9k
           !is_complex_type(table_col_type->get_primitive_type());
480
13.9k
}
481
482
Status ParquetReader::set_fill_columns(
483
        const std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>&
484
                partition_columns,
485
35.9k
        const std::unordered_map<std::string, VExprContextSPtr>& missing_columns) {
486
35.9k
    _lazy_read_ctx.fill_partition_columns = partition_columns;
487
35.9k
    _lazy_read_ctx.fill_missing_columns = missing_columns;
488
489
    // std::unordered_map<column_name, std::pair<col_id, slot_id>>
490
35.9k
    std::unordered_map<std::string, std::pair<uint32_t, int>> predicate_columns;
491
492
    // TODO(gabriel): we should try to clear too much structs which are used to represent conjuncts and predicates.
493
    // visit_slot for lazy mat.
494
69.3k
    std::function<void(VExpr * expr)> visit_slot = [&](VExpr* expr) {
495
69.3k
        if (expr->is_slot_ref()) {
496
21.4k
            VSlotRef* slot_ref = static_cast<VSlotRef*>(expr);
497
21.4k
            auto expr_name = slot_ref->expr_name();
498
21.4k
            predicate_columns.emplace(expr_name,
499
21.4k
                                      std::make_pair(slot_ref->column_id(), slot_ref->slot_id()));
500
21.4k
            if (slot_ref->column_id() == 0) {
501
15.1k
                _lazy_read_ctx.resize_first_column = false;
502
15.1k
            }
503
21.4k
            return;
504
21.4k
        }
505
48.7k
        for (auto& child : expr->children()) {
506
48.7k
            visit_slot(child.get());
507
48.7k
        }
508
47.9k
    };
509
35.9k
    for (const auto& conjunct : _lazy_read_ctx.conjuncts) {
510
20.8k
        auto expr = conjunct->root();
511
512
20.8k
        if (expr->is_rf_wrapper()) {
513
            // REF: src/runtime_filter/runtime_filter_consumer.cpp
514
5.81k
            VRuntimeFilterWrapper* runtime_filter = assert_cast<VRuntimeFilterWrapper*>(expr.get());
515
516
5.81k
            auto filter_impl = runtime_filter->get_impl();
517
5.81k
            visit_slot(filter_impl.get());
518
15.0k
        } else {
519
15.0k
            visit_slot(expr.get());
520
15.0k
        }
521
20.8k
    }
522
35.9k
    if (!_lazy_read_ctx.slot_id_to_predicates.empty()) {
523
32.3k
        auto and_pred = AndBlockColumnPredicate::create_unique();
524
122k
        for (const auto& entry : _lazy_read_ctx.slot_id_to_predicates) {
525
122k
            for (const auto& pred : entry.second) {
526
14.7k
                if (!_exists_in_file(pred->col_name()) || !_type_matches(pred->column_id())) {
527
1.15k
                    continue;
528
1.15k
                }
529
13.5k
                and_pred->add_column_predicate(
530
13.5k
                        SingleColumnBlockPredicate::create_unique(pred->clone(pred->column_id())));
531
13.5k
            }
532
122k
        }
533
32.3k
        if (and_pred->num_of_column_predicate() > 0) {
534
8.95k
            _push_down_predicates.push_back(std::move(and_pred));
535
8.95k
        }
536
32.3k
    }
537
538
35.9k
    const FieldDescriptor& schema = _file_metadata->schema();
539
540
35.9k
    auto check_iceberg_row_lineage_column_idx = [&](const auto& col_name) -> int {
541
26.4k
        if (_row_lineage_columns != nullptr) {
542
400
            if (col_name == IcebergTableReader::ROW_LINEAGE_ROW_ID) {
543
184
                return _row_lineage_columns->row_id_column_idx;
544
216
            } else if (col_name == IcebergTableReader::ROW_LINEAGE_LAST_UPDATED_SEQ_NUMBER) {
545
178
                return _row_lineage_columns->last_updated_sequence_number_column_idx;
546
178
            }
547
400
        }
548
26.0k
        return -1;
549
26.4k
    };
550
551
121k
    for (auto& read_table_col : _read_table_columns) {
552
121k
        _lazy_read_ctx.all_read_columns.emplace_back(read_table_col);
553
554
121k
        auto file_column_name = _table_info_node_ptr->children_file_column_name(read_table_col);
555
121k
        PrimitiveType column_type =
556
121k
                schema.get_column(file_column_name)->data_type->get_primitive_type();
557
121k
        if (is_complex_type(column_type)) {
558
32.0k
            _lazy_read_ctx.has_complex_type = true;
559
32.0k
        }
560
121k
        if (predicate_columns.size() > 0) {
561
38.2k
            auto iter = predicate_columns.find(read_table_col);
562
38.2k
            if (iter == predicate_columns.end()) {
563
23.3k
                if (auto row_lineage_idx = check_iceberg_row_lineage_column_idx(read_table_col);
564
23.3k
                    row_lineage_idx != -1) {
565
4
                    _lazy_read_ctx.predicate_columns.first.emplace_back(read_table_col);
566
                    // row lineage column can not dict filter.
567
4
                    int slot_id = 0;
568
12
                    for (auto slot : _tuple_descriptor->slots()) {
569
12
                        if (slot->col_name_lower_case() == read_table_col) {
570
4
                            slot_id = slot->id();
571
4
                        }
572
12
                    }
573
4
                    _lazy_read_ctx.predicate_columns.second.emplace_back(slot_id);
574
4
                    _lazy_read_ctx.all_predicate_col_ids.emplace_back(row_lineage_idx);
575
23.3k
                } else {
576
23.3k
                    _lazy_read_ctx.lazy_read_columns.emplace_back(read_table_col);
577
23.3k
                }
578
23.3k
            } else {
579
14.8k
                _lazy_read_ctx.predicate_columns.first.emplace_back(iter->first);
580
14.8k
                _lazy_read_ctx.predicate_columns.second.emplace_back(iter->second.second);
581
14.8k
                _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
582
14.8k
            }
583
38.2k
        }
584
121k
    }
585
35.9k
    if (_row_id_column_iterator_pair.first != nullptr) {
586
2.13k
        _lazy_read_ctx.all_predicate_col_ids.emplace_back(_row_id_column_iterator_pair.second);
587
2.13k
    }
588
589
35.9k
    for (auto& kv : _lazy_read_ctx.fill_partition_columns) {
590
4.31k
        auto iter = predicate_columns.find(kv.first);
591
4.31k
        if (iter == predicate_columns.end()) {
592
3.95k
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
593
3.95k
        } else {
594
361
            _lazy_read_ctx.predicate_partition_columns.emplace(kv.first, kv.second);
595
361
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
596
361
        }
597
4.31k
    }
598
599
35.9k
    for (auto& kv : _lazy_read_ctx.fill_missing_columns) {
600
4.18k
        auto iter = predicate_columns.find(kv.first);
601
4.18k
        if (iter != predicate_columns.end()) {
602
            //For check missing column :   missing column == xx, missing column is null,missing column is not null.
603
1.08k
            if (_slot_id_to_filter_conjuncts->find(iter->second.second) !=
604
1.08k
                _slot_id_to_filter_conjuncts->end()) {
605
826
                for (auto& ctx : _slot_id_to_filter_conjuncts->find(iter->second.second)->second) {
606
826
                    _lazy_read_ctx.missing_columns_conjuncts.emplace_back(ctx);
607
826
                }
608
818
            }
609
610
1.08k
            _lazy_read_ctx.predicate_missing_columns.emplace(kv.first, kv.second);
611
1.08k
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
612
3.10k
        } else if (auto row_lineage_idx = check_iceberg_row_lineage_column_idx(kv.first);
613
3.10k
                   row_lineage_idx != -1) {
614
356
            _lazy_read_ctx.predicate_missing_columns.emplace(kv.first, kv.second);
615
356
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(row_lineage_idx);
616
2.74k
        } else {
617
2.74k
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
618
2.74k
        }
619
4.18k
    }
620
621
35.9k
    if (_enable_lazy_mat && _lazy_read_ctx.predicate_columns.first.size() > 0 &&
622
35.9k
        _lazy_read_ctx.lazy_read_columns.size() > 0) {
623
7.33k
        _lazy_read_ctx.can_lazy_read = true;
624
7.33k
    }
625
626
35.9k
    if (!_lazy_read_ctx.can_lazy_read) {
627
28.6k
        for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
628
293
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
629
293
        }
630
28.6k
        for (auto& kv : _lazy_read_ctx.predicate_missing_columns) {
631
1.34k
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
632
1.34k
        }
633
28.6k
    }
634
635
35.9k
    if (_filter_groups && (_total_groups == 0 || _t_metadata->num_rows == 0 || _range_size < 0)) {
636
0
        return Status::EndOfFile("No row group to read");
637
0
    }
638
35.9k
    _fill_all_columns = true;
639
35.9k
    return Status::OK();
640
35.9k
}
641
642
// init file reader and file metadata for parsing schema
643
3.77k
Status ParquetReader::init_schema_reader() {
644
3.77k
    RETURN_IF_ERROR(_open_file());
645
3.77k
    _t_metadata = &(_file_metadata->to_thrift());
646
3.77k
    return Status::OK();
647
3.77k
}
648
649
Status ParquetReader::get_parsed_schema(std::vector<std::string>* col_names,
650
2.25k
                                        std::vector<DataTypePtr>* col_types) {
651
2.25k
    _total_groups = _t_metadata->row_groups.size();
652
2.25k
    auto schema_desc = _file_metadata->schema();
653
14.6k
    for (int i = 0; i < schema_desc.size(); ++i) {
654
        // Get the Column Reader for the boolean column
655
12.4k
        col_names->emplace_back(schema_desc.get_column(i)->name);
656
12.4k
        col_types->emplace_back(make_nullable(schema_desc.get_column(i)->data_type));
657
12.4k
    }
658
2.25k
    return Status::OK();
659
2.25k
}
660
661
void ParquetReader::set_iceberg_rowid_params(const std::string& file_path,
662
                                             int32_t partition_spec_id,
663
                                             const std::string& partition_data_json,
664
78
                                             int row_id_column_pos) {
665
78
    _iceberg_rowid_params.enabled = true;
666
78
    _iceberg_rowid_params.file_path = file_path;
667
78
    _iceberg_rowid_params.partition_spec_id = partition_spec_id;
668
78
    _iceberg_rowid_params.partition_data_json = partition_data_json;
669
78
    _iceberg_rowid_params.row_id_column_pos = row_id_column_pos;
670
78
    if (_current_group_reader != nullptr) {
671
0
        _current_group_reader->set_iceberg_rowid_params(_iceberg_rowid_params);
672
0
    }
673
78
}
674
675
Status ParquetReader::get_columns(std::unordered_map<std::string, DataTypePtr>* name_to_type,
676
33.5k
                                  std::unordered_set<std::string>* missing_cols) {
677
33.5k
    const auto& schema_desc = _file_metadata->schema();
678
33.5k
    std::unordered_set<std::string> column_names;
679
33.5k
    schema_desc.get_column_names(&column_names);
680
295k
    for (auto& name : column_names) {
681
295k
        auto field = schema_desc.get_column(name);
682
295k
        name_to_type->emplace(name, field->data_type);
683
295k
    }
684
33.5k
    for (auto& col : _missing_cols) {
685
4.18k
        missing_cols->insert(col);
686
4.18k
    }
687
33.5k
    return Status::OK();
688
33.5k
}
689
690
62.0k
Status ParquetReader::get_next_block(Block* block, size_t* read_rows, bool* eof) {
691
62.0k
    if (_current_group_reader == nullptr || _row_group_eof) {
692
44.7k
        Status st = _next_row_group_reader();
693
44.7k
        if (!st.ok() && !st.is<ErrorCode::END_OF_FILE>()) {
694
0
            return st;
695
0
        }
696
44.7k
        if (_current_group_reader == nullptr || _row_group_eof || st.is<ErrorCode::END_OF_FILE>()) {
697
5.88k
            _current_group_reader.reset(nullptr);
698
5.88k
            _row_group_eof = true;
699
5.88k
            *read_rows = 0;
700
5.88k
            *eof = true;
701
5.88k
            return Status::OK();
702
5.88k
        }
703
44.7k
    }
704
56.1k
    if (_push_down_agg_type == TPushAggOp::type::COUNT) {
705
1.92k
        auto rows = std::min(_current_group_reader->get_remaining_rows(), (int64_t)_batch_size);
706
707
1.92k
        _current_group_reader->set_remaining_rows(_current_group_reader->get_remaining_rows() -
708
1.92k
                                                  rows);
709
1.92k
        auto mutate_columns = block->mutate_columns();
710
1.92k
        for (auto& col : mutate_columns) {
711
1.92k
            col->resize(rows);
712
1.92k
        }
713
1.92k
        block->set_columns(std::move(mutate_columns));
714
715
1.92k
        *read_rows = rows;
716
1.92k
        if (_current_group_reader->get_remaining_rows() == 0) {
717
452
            _current_group_reader.reset(nullptr);
718
452
        }
719
720
1.92k
        return Status::OK();
721
1.92k
    }
722
723
    // Limit memory per batch for load paths.
724
    // _load_bytes_per_row is updated after each batch so the *next* call pre-shrinks _batch_size
725
    // before reading, ensuring the current batch is already within the limit (from call 2 onward).
726
54.2k
    const int64_t max_block_bytes =
727
54.4k
            (_state != nullptr && _state->query_type() == TQueryType::LOAD &&
728
54.2k
             config::load_reader_max_block_bytes > 0)
729
54.2k
                    ? config::load_reader_max_block_bytes
730
54.2k
                    : 0;
731
54.2k
    if (max_block_bytes > 0 && _load_bytes_per_row > 0) {
732
140
        _batch_size = std::max((size_t)1,
733
140
                               (size_t)((int64_t)max_block_bytes / (int64_t)_load_bytes_per_row));
734
140
    }
735
736
54.2k
    SCOPED_RAW_TIMER(&_reader_statistics.column_read_time);
737
54.2k
    Status batch_st =
738
54.2k
            _current_group_reader->next_batch(block, _batch_size, read_rows, &_row_group_eof);
739
54.2k
    if (batch_st.is<ErrorCode::END_OF_FILE>()) {
740
6
        block->clear_column_data();
741
6
        _current_group_reader.reset(nullptr);
742
6
        *read_rows = 0;
743
6
        *eof = true;
744
6
        return Status::OK();
745
6
    }
746
747
54.2k
    if (!batch_st.ok()) {
748
16
        return Status::InternalError("Read parquet file {} failed, reason = {}", _scan_range.path,
749
16
                                     batch_st.to_string());
750
16
    }
751
752
54.2k
    if (max_block_bytes > 0 && *read_rows > 0) {
753
1.97k
        _load_bytes_per_row = block->bytes() / *read_rows;
754
1.97k
    }
755
756
54.2k
    if (_row_group_eof) {
757
38.5k
        auto column_st = _current_group_reader->merged_column_statistics();
758
38.5k
        _column_statistics.merge(column_st);
759
38.5k
        _reader_statistics.lazy_read_filtered_rows +=
760
38.5k
                _current_group_reader->lazy_read_filtered_rows();
761
38.5k
        _reader_statistics.predicate_filter_time += _current_group_reader->predicate_filter_time();
762
38.5k
        _reader_statistics.dict_filter_rewrite_time +=
763
38.5k
                _current_group_reader->dict_filter_rewrite_time();
764
38.5k
        if (_io_ctx) {
765
38.4k
            _io_ctx->condition_cache_filtered_rows +=
766
38.4k
                    _current_group_reader->condition_cache_filtered_rows();
767
38.4k
        }
768
769
38.5k
        if (_current_row_group_index.row_group_id + 1 == _total_groups) {
770
30.0k
            *eof = true;
771
30.0k
        } else {
772
8.45k
            *eof = false;
773
8.45k
        }
774
38.5k
    }
775
54.2k
    return Status::OK();
776
54.2k
}
777
778
RowGroupReader::PositionDeleteContext ParquetReader::_get_position_delete_ctx(
779
38.9k
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index) {
780
38.9k
    if (_delete_rows == nullptr) {
781
36.0k
        return RowGroupReader::PositionDeleteContext(row_group.num_rows, row_group_index.first_row);
782
36.0k
    }
783
2.93k
    const int64_t* delete_rows = &(*_delete_rows)[0];
784
2.93k
    const int64_t* delete_rows_end = delete_rows + _delete_rows->size();
785
2.93k
    const int64_t* start_pos = std::lower_bound(delete_rows + _delete_rows_index, delete_rows_end,
786
2.93k
                                                row_group_index.first_row);
787
2.93k
    int64_t start_index = start_pos - delete_rows;
788
2.93k
    const int64_t* end_pos = std::lower_bound(start_pos, delete_rows_end, row_group_index.last_row);
789
2.93k
    int64_t end_index = end_pos - delete_rows;
790
2.93k
    _delete_rows_index = end_index;
791
2.93k
    return RowGroupReader::PositionDeleteContext(*_delete_rows, row_group.num_rows,
792
2.93k
                                                 row_group_index.first_row, start_index, end_index);
793
38.9k
}
794
795
44.6k
Status ParquetReader::_next_row_group_reader() {
796
44.6k
    if (_current_group_reader != nullptr) {
797
8.40k
        _current_group_reader->collect_profile_before_close();
798
8.40k
    }
799
800
44.6k
    RowRanges candidate_row_ranges;
801
103k
    while (++_current_row_group_index.row_group_id < _total_groups) {
802
97.4k
        const auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
803
97.4k
        _current_row_group_index.first_row = _current_row_group_index.last_row;
804
97.4k
        _current_row_group_index.last_row = _current_row_group_index.last_row + row_group.num_rows;
805
806
97.4k
        if (_filter_groups && _is_misaligned_range_group(row_group)) {
807
51.3k
            continue;
808
51.3k
        }
809
810
46.0k
        candidate_row_ranges.clear();
811
        // The range of lines to be read is determined by the push down predicate.
812
46.0k
        RETURN_IF_ERROR(_process_min_max_bloom_filter(
813
46.0k
                _current_row_group_index, row_group, _push_down_predicates, &candidate_row_ranges));
814
815
46.0k
        std::function<int64_t(const FieldSchema*)> column_compressed_size =
816
250k
                [&row_group, &column_compressed_size](const FieldSchema* field) -> int64_t {
817
250k
            if (field->physical_column_index >= 0) {
818
205k
                int parquet_col_id = field->physical_column_index;
819
205k
                if (row_group.columns[parquet_col_id].__isset.meta_data) {
820
205k
                    return row_group.columns[parquet_col_id].meta_data.total_compressed_size;
821
205k
                }
822
18.4E
                return 0;
823
205k
            }
824
44.9k
            int64_t size = 0;
825
76.3k
            for (const FieldSchema& child : field->children) {
826
76.3k
                size += column_compressed_size(&child);
827
76.3k
            }
828
44.9k
            return size;
829
250k
        };
830
46.0k
        int64_t group_size = 0; // only calculate the needed columns
831
174k
        for (auto& read_col : _read_file_columns) {
832
174k
            const FieldSchema* field = _file_metadata->schema().get_column(read_col);
833
174k
            group_size += column_compressed_size(field);
834
174k
        }
835
836
46.0k
        _reader_statistics.read_rows += candidate_row_ranges.count();
837
46.1k
        if (_io_ctx) {
838
46.1k
            _io_ctx->file_reader_stats->read_rows += candidate_row_ranges.count();
839
46.1k
        }
840
841
46.0k
        if (candidate_row_ranges.count() != 0) {
842
            // need read this row group.
843
39.0k
            _reader_statistics.read_row_groups++;
844
39.0k
            _reader_statistics.filtered_page_rows +=
845
39.0k
                    row_group.num_rows - candidate_row_ranges.count();
846
39.0k
            break;
847
39.0k
        } else {
848
            // this row group be filtered.
849
7.05k
            _reader_statistics.filtered_row_groups++;
850
7.05k
            _reader_statistics.filtered_bytes += group_size;
851
7.05k
            _reader_statistics.filtered_group_rows += row_group.num_rows;
852
7.05k
        }
853
46.0k
    }
854
855
44.6k
    if (_current_row_group_index.row_group_id == _total_groups) {
856
5.88k
        _row_group_eof = true;
857
5.88k
        _current_group_reader.reset(nullptr);
858
5.88k
        return Status::EndOfFile("No next RowGroupReader");
859
5.88k
    }
860
861
    // process page index and generate the ranges to read
862
38.8k
    auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
863
864
38.8k
    RowGroupReader::PositionDeleteContext position_delete_ctx =
865
38.8k
            _get_position_delete_ctx(row_group, _current_row_group_index);
866
38.8k
    io::FileReaderSPtr group_file_reader;
867
38.8k
    if (typeid_cast<io::InMemoryFileReader*>(_file_reader.get())) {
868
        // InMemoryFileReader has the ability to merge small IO
869
22.2k
        group_file_reader = _file_reader;
870
22.2k
    } else {
871
16.5k
        size_t avg_io_size = 0;
872
16.5k
        const std::vector<io::PrefetchRange> io_ranges =
873
16.5k
                _generate_random_access_ranges(_current_row_group_index, &avg_io_size);
874
16.5k
        int64_t merged_read_slice_size = -1;
875
16.7k
        if (_state != nullptr && _state->query_options().__isset.merge_read_slice_size) {
876
16.6k
            merged_read_slice_size = _state->query_options().merge_read_slice_size;
877
16.6k
        }
878
        // The underlying page reader will prefetch data in column.
879
        // Using both MergeRangeFileReader and BufferedStreamReader simultaneously would waste a lot of memory.
880
16.5k
        group_file_reader =
881
16.5k
                avg_io_size < io::MergeRangeFileReader::SMALL_IO
882
16.6k
                        ? std::make_shared<io::MergeRangeFileReader>(
883
16.6k
                                  _profile, _file_reader, io_ranges, merged_read_slice_size)
884
18.4E
                        : _file_reader;
885
16.5k
    }
886
38.8k
    _current_group_reader.reset(new RowGroupReader(
887
38.8k
            _io_ctx ? std::make_shared<io::TracingFileReader>(group_file_reader,
888
38.8k
                                                              _io_ctx->file_reader_stats)
889
18.4E
                    : group_file_reader,
890
38.8k
            _read_table_columns, _current_row_group_index.row_group_id, row_group, _ctz, _io_ctx,
891
38.8k
            position_delete_ctx, _lazy_read_ctx, _state, _column_ids, _filter_column_ids));
892
38.8k
    if (_iceberg_rowid_params.enabled) {
893
78
        _current_group_reader->set_iceberg_rowid_params(_iceberg_rowid_params);
894
78
    }
895
38.8k
    _row_group_eof = false;
896
897
38.8k
    _current_group_reader->set_current_row_group_idx(_current_row_group_index);
898
38.8k
    _current_group_reader->set_row_id_column_iterator(_row_id_column_iterator_pair);
899
38.8k
    _current_group_reader->set_row_lineage_columns(_row_lineage_columns);
900
38.8k
    _current_group_reader->set_col_name_to_block_idx(_col_name_to_block_idx);
901
38.8k
    if (_condition_cache_ctx) {
902
8.00k
        _current_group_reader->set_condition_cache_context(_condition_cache_ctx);
903
8.00k
    }
904
905
38.8k
    _current_group_reader->_table_info_node_ptr = _table_info_node_ptr;
906
38.8k
    return _current_group_reader->init(_file_metadata->schema(), candidate_row_ranges, _col_offsets,
907
38.8k
                                       _tuple_descriptor, _row_descriptor, _colname_to_slot_id,
908
38.8k
                                       _not_single_slot_filter_conjuncts,
909
38.8k
                                       _slot_id_to_filter_conjuncts);
910
44.6k
}
911
912
std::vector<io::PrefetchRange> ParquetReader::_generate_random_access_ranges(
913
16.6k
        const RowGroupReader::RowGroupIndex& group, size_t* avg_io_size) {
914
16.6k
    std::vector<io::PrefetchRange> result;
915
16.6k
    int64_t last_chunk_end = -1;
916
16.6k
    size_t total_io_size = 0;
917
16.6k
    std::function<void(const FieldSchema*, const tparquet::RowGroup&)> scalar_range =
918
126k
            [&](const FieldSchema* field, const tparquet::RowGroup& row_group) {
919
126k
                if (_column_ids.empty() ||
920
126k
                    _column_ids.find(field->get_column_id()) != _column_ids.end()) {
921
126k
                    if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
922
13.5k
                        scalar_range(&field->children[0], row_group);
923
113k
                    } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
924
9.56k
                        scalar_range(&field->children[0], row_group);
925
9.56k
                        scalar_range(&field->children[1], row_group);
926
103k
                    } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
927
19.2k
                        for (int i = 0; i < field->children.size(); ++i) {
928
13.0k
                            scalar_range(&field->children[i], row_group);
929
13.0k
                        }
930
97.2k
                    } else {
931
97.2k
                        const tparquet::ColumnChunk& chunk =
932
97.2k
                                row_group.columns[field->physical_column_index];
933
97.2k
                        auto& chunk_meta = chunk.meta_data;
934
97.2k
                        int64_t chunk_start = has_dict_page(chunk_meta)
935
97.2k
                                                      ? chunk_meta.dictionary_page_offset
936
97.2k
                                                      : chunk_meta.data_page_offset;
937
97.2k
                        int64_t chunk_end = chunk_start + chunk_meta.total_compressed_size;
938
97.2k
                        DCHECK_GE(chunk_start, last_chunk_end);
939
97.2k
                        result.emplace_back(chunk_start, chunk_end);
940
97.2k
                        total_io_size += chunk_meta.total_compressed_size;
941
97.2k
                        last_chunk_end = chunk_end;
942
97.2k
                    }
943
126k
                }
944
126k
            };
945
16.6k
    const tparquet::RowGroup& row_group = _t_metadata->row_groups[group.row_group_id];
946
81.1k
    for (const auto& read_col : _read_file_columns) {
947
81.1k
        const FieldSchema* field = _file_metadata->schema().get_column(read_col);
948
81.1k
        scalar_range(field, row_group);
949
81.1k
    }
950
16.6k
    if (!result.empty()) {
951
16.2k
        *avg_io_size = total_io_size / result.size();
952
16.2k
    }
953
16.6k
    return result;
954
16.6k
}
955
956
112k
bool ParquetReader::_is_misaligned_range_group(const tparquet::RowGroup& row_group) const {
957
112k
    int64_t start_offset = _get_column_start_offset(row_group.columns[0].meta_data);
958
959
112k
    auto& last_column = row_group.columns[row_group.columns.size() - 1].meta_data;
960
112k
    int64_t end_offset = _get_column_start_offset(last_column) + last_column.total_compressed_size;
961
962
112k
    int64_t row_group_mid = start_offset + (end_offset - start_offset) / 2;
963
112k
    if (!(row_group_mid >= _range_start_offset &&
964
112k
          row_group_mid < _range_start_offset + _range_size)) {
965
54.1k
        return true;
966
54.1k
    }
967
58.6k
    return false;
968
112k
}
969
970
5.01k
int64_t ParquetReader::get_total_rows() const {
971
5.01k
    if (!_t_metadata) return 0;
972
5.01k
    if (!_filter_groups) return _t_metadata->num_rows;
973
5.01k
    int64_t total = 0;
974
9.77k
    for (const auto& rg : _t_metadata->row_groups) {
975
9.77k
        if (!_is_misaligned_range_group(rg)) {
976
7.07k
            total += rg.num_rows;
977
7.07k
        }
978
9.77k
    }
979
5.01k
    return total;
980
5.01k
}
981
982
7.83k
void ParquetReader::set_condition_cache_context(std::shared_ptr<ConditionCacheContext> ctx) {
983
7.83k
    _condition_cache_ctx = std::move(ctx);
984
7.87k
    if (!_condition_cache_ctx || !_t_metadata || !_filter_groups) {
985
0
        return;
986
0
    }
987
    // Find the first assigned row group to compute base_granule.
988
7.83k
    int64_t first_row = 0;
989
7.87k
    for (const auto& rg : _t_metadata->row_groups) {
990
7.87k
        if (!_is_misaligned_range_group(rg)) {
991
7.82k
            _condition_cache_ctx->base_granule = first_row / ConditionCacheContext::GRANULE_SIZE;
992
7.82k
            return;
993
7.82k
        }
994
44
        first_row += rg.num_rows;
995
44
    }
996
7.83k
}
997
998
Status ParquetReader::_process_page_index_filter(
999
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index,
1000
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1001
34.9k
        RowRanges* candidate_row_ranges) {
1002
34.9k
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
1003
0
        return Status::EndOfFile("stop");
1004
0
    }
1005
1006
34.9k
    std::function<void()> read_whole_row_group = [&]() {
1007
30.2k
        candidate_row_ranges->add(RowRange {0, row_group.num_rows});
1008
30.2k
    };
1009
1010
    // Check if the page index is available and if it exists.
1011
34.9k
    PageIndex page_index;
1012
35.1k
    if (!config::enable_parquet_page_index || _colname_to_slot_id == nullptr ||
1013
35.1k
        !page_index.check_and_get_page_index_ranges(row_group.columns)) {
1014
17.5k
        read_whole_row_group();
1015
17.5k
        return Status::OK();
1016
17.5k
    }
1017
1018
17.4k
    std::vector<int> parquet_col_ids;
1019
62.5k
    for (size_t idx = 0; idx < _read_table_columns.size(); idx++) {
1020
45.1k
        const auto& read_table_col = _read_table_columns[idx];
1021
45.1k
        const auto& read_file_col = _read_file_columns[idx];
1022
45.1k
        if (!_colname_to_slot_id->contains(read_table_col)) {
1023
318
            continue;
1024
318
        }
1025
44.8k
        auto* field = _file_metadata->schema().get_column(read_file_col);
1026
1027
75.3k
        std::function<void(FieldSchema * field)> f = [&](FieldSchema* field) {
1028
75.3k
            if (!_column_ids.empty() &&
1029
75.3k
                _column_ids.find(field->get_column_id()) == _column_ids.end()) {
1030
2.79k
                return;
1031
2.79k
            }
1032
1033
72.5k
            if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
1034
4.29k
                f(&field->children[0]);
1035
68.2k
            } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
1036
3.45k
                f(&field->children[0]);
1037
3.45k
                f(&field->children[1]);
1038
64.8k
            } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
1039
27.3k
                for (int i = 0; i < field->children.size(); ++i) {
1040
19.4k
                    f(&field->children[i]);
1041
19.4k
                }
1042
56.9k
            } else {
1043
56.9k
                int parquet_col_id = field->physical_column_index;
1044
56.9k
                if (parquet_col_id >= 0) {
1045
56.9k
                    parquet_col_ids.push_back(parquet_col_id);
1046
56.9k
                }
1047
56.9k
            }
1048
72.5k
        };
1049
1050
44.8k
        f(field);
1051
44.8k
    }
1052
1053
17.6k
    auto parse_offset_index = [&]() -> Status {
1054
17.6k
        std::vector<uint8_t> off_index_buff(page_index._offset_index_size);
1055
17.6k
        Slice res(off_index_buff.data(), page_index._offset_index_size);
1056
17.6k
        size_t bytes_read = 0;
1057
17.6k
        {
1058
17.6k
            SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1059
17.6k
            RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._offset_index_start, res,
1060
17.6k
                                                          &bytes_read, _io_ctx));
1061
17.6k
        }
1062
17.6k
        _column_statistics.page_index_read_calls++;
1063
17.6k
        _col_offsets.clear();
1064
1065
57.0k
        for (auto parquet_col_id : parquet_col_ids) {
1066
57.0k
            auto& chunk = row_group.columns[parquet_col_id];
1067
57.0k
            if (chunk.offset_index_length == 0) [[unlikely]] {
1068
0
                continue;
1069
0
            }
1070
57.0k
            tparquet::OffsetIndex offset_index;
1071
57.0k
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1072
57.0k
            RETURN_IF_ERROR(
1073
57.0k
                    page_index.parse_offset_index(chunk, off_index_buff.data(), &offset_index));
1074
57.0k
            _col_offsets[parquet_col_id] = offset_index;
1075
57.0k
        }
1076
17.6k
        return Status::OK();
1077
17.6k
    };
1078
1079
    // from https://github.com/apache/doris/pull/55795
1080
17.4k
    RETURN_IF_ERROR(parse_offset_index());
1081
1082
    // Check if page index is needed for min-max filter.
1083
17.6k
    if (!_enable_filter_by_min_max || push_down_pred.empty()) {
1084
12.7k
        read_whole_row_group();
1085
12.7k
        return Status::OK();
1086
12.7k
    }
1087
1088
    // read column index.
1089
4.70k
    std::vector<uint8_t> col_index_buff(page_index._column_index_size);
1090
4.70k
    size_t bytes_read = 0;
1091
4.70k
    Slice result(col_index_buff.data(), page_index._column_index_size);
1092
4.70k
    {
1093
4.70k
        SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1094
4.70k
        RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._column_index_start, result,
1095
4.70k
                                                      &bytes_read, _io_ctx));
1096
4.70k
    }
1097
4.70k
    _column_statistics.page_index_read_calls++;
1098
1099
4.70k
    SCOPED_RAW_TIMER(&_reader_statistics.page_index_filter_time);
1100
1101
    // Construct a cacheable page index structure to avoid repeatedly reading the page index of the same column.
1102
4.70k
    ParquetPredicate::CachedPageIndexStat cached_page_index;
1103
4.70k
    cached_page_index.ctz = _ctz;
1104
4.70k
    std::function<bool(ParquetPredicate::PageIndexStat**, int)> get_stat_func =
1105
4.70k
            [&](ParquetPredicate::PageIndexStat** ans, const int cid) -> bool {
1106
4.15k
        if (cached_page_index.stats.contains(cid)) {
1107
1.64k
            *ans = &cached_page_index.stats[cid];
1108
1.64k
            return (*ans)->available;
1109
1.64k
        }
1110
2.50k
        cached_page_index.stats.emplace(cid, ParquetPredicate::PageIndexStat {});
1111
2.50k
        auto& sig_stat = cached_page_index.stats[cid];
1112
1113
2.50k
        auto* slot = _tuple_descriptor->slots()[cid];
1114
2.50k
        if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1115
            // table column not exist in file, may be schema change.
1116
0
            return false;
1117
0
        }
1118
1119
2.50k
        const auto& file_col_name =
1120
2.50k
                _table_info_node_ptr->children_file_column_name(slot->col_name());
1121
2.50k
        const FieldSchema* col_schema = _file_metadata->schema().get_column(file_col_name);
1122
2.50k
        int parquet_col_id = col_schema->physical_column_index;
1123
1124
2.50k
        if (parquet_col_id < 0) {
1125
            // complex type, not support page index yet.
1126
0
            return false;
1127
0
        }
1128
2.50k
        if (!_col_offsets.contains(parquet_col_id)) {
1129
            // If the file contains partition columns and the query applies filters on those
1130
            // partition columns, then reading the page index is unnecessary.
1131
0
            return false;
1132
0
        }
1133
1134
2.50k
        auto& column_chunk = row_group.columns[parquet_col_id];
1135
2.51k
        if (column_chunk.column_index_length == 0 || column_chunk.offset_index_length == 0) {
1136
            // column no page index.
1137
0
            return false;
1138
0
        }
1139
1140
2.50k
        tparquet::ColumnIndex column_index;
1141
2.50k
        {
1142
2.50k
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1143
2.50k
            RETURN_IF_ERROR(page_index.parse_column_index(column_chunk, col_index_buff.data(),
1144
2.50k
                                                          &column_index));
1145
2.50k
        }
1146
2.50k
        const int64_t num_of_pages = column_index.null_pages.size();
1147
2.50k
        if (num_of_pages <= 0) [[unlikely]] {
1148
            // no page. (maybe this row group no data.)
1149
0
            return false;
1150
0
        }
1151
2.50k
        DCHECK_EQ(column_index.min_values.size(), column_index.max_values.size());
1152
2.50k
        if (!column_index.__isset.null_counts) {
1153
            // not set null or null counts;
1154
0
            return false;
1155
0
        }
1156
1157
2.50k
        auto& offset_index = _col_offsets[parquet_col_id];
1158
2.50k
        const auto& page_locations = offset_index.page_locations;
1159
1160
2.50k
        sig_stat.col_schema = col_schema;
1161
2.50k
        sig_stat.num_of_pages = num_of_pages;
1162
2.50k
        sig_stat.encoded_min_value = column_index.min_values;
1163
2.50k
        sig_stat.encoded_max_value = column_index.max_values;
1164
2.50k
        sig_stat.is_all_null.resize(num_of_pages);
1165
2.50k
        sig_stat.has_null.resize(num_of_pages);
1166
2.50k
        sig_stat.ranges.resize(num_of_pages);
1167
1168
19.3k
        for (int page_id = 0; page_id < num_of_pages; page_id++) {
1169
16.8k
            sig_stat.is_all_null[page_id] = column_index.null_pages[page_id];
1170
16.8k
            sig_stat.has_null[page_id] = column_index.null_counts[page_id] > 0;
1171
1172
16.8k
            int64_t from = page_locations[page_id].first_row_index;
1173
16.8k
            int64_t to = 0;
1174
16.8k
            if (page_id == page_locations.size() - 1) {
1175
2.51k
                to = row_group_index.last_row;
1176
14.3k
            } else {
1177
14.3k
                to = page_locations[page_id + 1].first_row_index;
1178
14.3k
            }
1179
16.8k
            sig_stat.ranges[page_id] = RowRange {from, to};
1180
16.8k
        }
1181
1182
2.50k
        sig_stat.available = true;
1183
2.50k
        *ans = &sig_stat;
1184
2.50k
        return true;
1185
2.50k
    };
1186
4.70k
    cached_page_index.row_group_range = {0, row_group.num_rows};
1187
4.70k
    cached_page_index.get_stat_func = get_stat_func;
1188
1189
4.70k
    candidate_row_ranges->add({0, row_group.num_rows});
1190
4.93k
    for (const auto& predicate : push_down_pred) {
1191
4.93k
        RowRanges tmp_row_range;
1192
4.93k
        if (!predicate->evaluate_and(&cached_page_index, &tmp_row_range)) {
1193
            // no need read this row group.
1194
22
            candidate_row_ranges->clear();
1195
22
            return Status::OK();
1196
22
        }
1197
4.91k
        RowRanges::ranges_intersection(*candidate_row_ranges, tmp_row_range, candidate_row_ranges);
1198
4.91k
    }
1199
4.68k
    return Status::OK();
1200
4.70k
}
1201
1202
Status ParquetReader::_process_min_max_bloom_filter(
1203
        const RowGroupReader::RowGroupIndex& row_group_index, const tparquet::RowGroup& row_group,
1204
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1205
45.9k
        RowRanges* row_ranges) {
1206
45.9k
    SCOPED_RAW_TIMER(&_reader_statistics.row_group_filter_time);
1207
45.9k
    if (!_filter_groups) {
1208
        // No row group filtering is needed;
1209
        // for example, Iceberg reads position delete files.
1210
2.31k
        row_ranges->add({0, row_group.num_rows});
1211
2.31k
        return Status::OK();
1212
2.31k
    }
1213
1214
43.6k
    if (_read_by_rows) {
1215
2.18k
        auto group_start = row_group_index.first_row;
1216
2.18k
        auto group_end = row_group_index.last_row;
1217
1218
5.96k
        while (!_row_ids.empty()) {
1219
4.19k
            auto v = _row_ids.front();
1220
4.19k
            if (v < group_start) {
1221
0
                continue;
1222
4.19k
            } else if (v < group_end) {
1223
3.78k
                row_ranges->add(RowRange {v - group_start, v - group_start + 1});
1224
3.78k
                _row_ids.pop_front();
1225
3.78k
            } else {
1226
416
                break;
1227
416
            }
1228
4.19k
        }
1229
41.4k
    } else {
1230
41.4k
        bool filter_this_row_group = false;
1231
41.4k
        bool filtered_by_min_max = false;
1232
41.4k
        bool filtered_by_bloom_filter = false;
1233
41.4k
        RETURN_IF_ERROR(_process_column_stat_filter(row_group, push_down_pred,
1234
41.4k
                                                    &filter_this_row_group, &filtered_by_min_max,
1235
41.4k
                                                    &filtered_by_bloom_filter));
1236
        // Update statistics based on filter type
1237
41.4k
        if (filter_this_row_group) {
1238
6.49k
            if (filtered_by_min_max) {
1239
6.49k
                _reader_statistics.filtered_row_groups_by_min_max++;
1240
6.49k
            }
1241
6.49k
            if (filtered_by_bloom_filter) {
1242
0
                _reader_statistics.filtered_row_groups_by_bloom_filter++;
1243
0
            }
1244
6.49k
        }
1245
1246
41.4k
        if (!filter_this_row_group) {
1247
35.1k
            RETURN_IF_ERROR(_process_page_index_filter(row_group, row_group_index, push_down_pred,
1248
35.1k
                                                       row_ranges));
1249
35.1k
        }
1250
41.4k
    }
1251
1252
43.6k
    return Status::OK();
1253
43.6k
}
1254
1255
Status ParquetReader::_process_column_stat_filter(
1256
        const tparquet::RowGroup& row_group,
1257
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1258
41.3k
        bool* filter_group, bool* filtered_by_min_max, bool* filtered_by_bloom_filter) {
1259
    // If both filters are disabled, skip filtering
1260
41.3k
    if (!_enable_filter_by_min_max && !_enable_filter_by_bloom_filter) {
1261
0
        return Status::OK();
1262
0
    }
1263
1264
    // Cache bloom filters for each column to avoid reading the same bloom filter multiple times
1265
    // when there are multiple predicates on the same column
1266
41.3k
    std::unordered_map<int, std::unique_ptr<ParquetBlockSplitBloomFilter>> bloom_filter_cache;
1267
1268
    // Initialize output parameters
1269
41.3k
    *filtered_by_min_max = false;
1270
41.3k
    *filtered_by_bloom_filter = false;
1271
1272
41.3k
    for (const auto& predicate : _push_down_predicates) {
1273
17.4k
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_stat_func =
1274
18.5k
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1275
                    // Check if min-max filter is enabled
1276
18.5k
                    if (!_enable_filter_by_min_max) {
1277
1.91k
                        return false;
1278
1.91k
                    }
1279
16.6k
                    auto* slot = _tuple_descriptor->slots()[cid];
1280
16.6k
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1281
0
                        return false;
1282
0
                    }
1283
16.6k
                    const auto& file_col_name =
1284
16.6k
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1285
16.6k
                    const FieldSchema* col_schema =
1286
16.6k
                            _file_metadata->schema().get_column(file_col_name);
1287
16.6k
                    int parquet_col_id = col_schema->physical_column_index;
1288
16.6k
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1289
16.6k
                    stat->col_schema = col_schema;
1290
16.6k
                    return ParquetPredicate::read_column_stats(col_schema, meta_data,
1291
16.6k
                                                               &_ignored_stats,
1292
16.6k
                                                               _t_metadata->created_by, stat)
1293
16.6k
                            .ok();
1294
16.6k
                };
1295
17.4k
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_bloom_filter_func =
1296
17.4k
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1297
4.94k
                    auto* slot = _tuple_descriptor->slots()[cid];
1298
4.94k
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1299
0
                        return false;
1300
0
                    }
1301
4.94k
                    const auto& file_col_name =
1302
4.94k
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1303
4.94k
                    const FieldSchema* col_schema =
1304
4.94k
                            _file_metadata->schema().get_column(file_col_name);
1305
4.94k
                    int parquet_col_id = col_schema->physical_column_index;
1306
4.94k
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1307
4.94k
                    if (!meta_data.__isset.bloom_filter_offset) {
1308
4.91k
                        return false;
1309
4.91k
                    }
1310
22
                    auto primitive_type =
1311
22
                            remove_nullable(col_schema->data_type)->get_primitive_type();
1312
22
                    if (!ParquetPredicate::bloom_filter_supported(primitive_type)) {
1313
10
                        return false;
1314
10
                    }
1315
1316
                    // Check if bloom filter is enabled
1317
12
                    if (!_enable_filter_by_bloom_filter) {
1318
0
                        return false;
1319
0
                    }
1320
1321
                    // Check cache first
1322
12
                    auto cache_iter = bloom_filter_cache.find(parquet_col_id);
1323
12
                    if (cache_iter != bloom_filter_cache.end()) {
1324
                        // Bloom filter already loaded for this column, reuse it
1325
0
                        stat->bloom_filter = std::move(cache_iter->second);
1326
0
                        bloom_filter_cache.erase(cache_iter);
1327
0
                        return stat->bloom_filter != nullptr;
1328
0
                    }
1329
1330
12
                    if (!stat->bloom_filter) {
1331
10
                        SCOPED_RAW_TIMER(&_reader_statistics.bloom_filter_read_time);
1332
10
                        auto st = ParquetPredicate::read_bloom_filter(
1333
10
                                meta_data, _tracing_file_reader, _io_ctx, stat);
1334
10
                        if (!st.ok()) {
1335
0
                            LOG(WARNING) << "Failed to read bloom filter for column "
1336
0
                                         << col_schema->name << " in file " << _scan_range.path
1337
0
                                         << ", status: " << st.to_string();
1338
0
                            stat->bloom_filter.reset();
1339
0
                            return false;
1340
0
                        }
1341
10
                    }
1342
12
                    return stat->bloom_filter != nullptr;
1343
12
                };
1344
17.4k
        ParquetPredicate::ColumnStat stat;
1345
17.4k
        stat.ctz = _ctz;
1346
17.4k
        stat.get_stat_func = &get_stat_func;
1347
17.4k
        stat.get_bloom_filter_func = &get_bloom_filter_func;
1348
1349
17.4k
        if (!predicate->evaluate_and(&stat)) {
1350
6.49k
            *filter_group = true;
1351
1352
            // Track which filter was used for filtering
1353
            // If bloom filter was loaded, it means bloom filter was used
1354
6.49k
            if (stat.bloom_filter) {
1355
0
                *filtered_by_bloom_filter = true;
1356
0
            }
1357
            // If col_schema was set but no bloom filter, it means min-max stats were used
1358
6.49k
            if (stat.col_schema && !stat.bloom_filter) {
1359
6.49k
                *filtered_by_min_max = true;
1360
6.49k
            }
1361
1362
6.49k
            return Status::OK();
1363
6.49k
        }
1364
1365
        // After evaluating, if the bloom filter was used, cache it for subsequent predicates
1366
10.9k
        if (stat.bloom_filter) {
1367
            // Find the column id for caching
1368
52
            for (auto* slot : _tuple_descriptor->slots()) {
1369
52
                if (_table_info_node_ptr->children_column_exists(slot->col_name())) {
1370
52
                    const auto& file_col_name =
1371
52
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1372
52
                    const FieldSchema* col_schema =
1373
52
                            _file_metadata->schema().get_column(file_col_name);
1374
52
                    int parquet_col_id = col_schema->physical_column_index;
1375
52
                    if (stat.col_schema == col_schema) {
1376
10
                        bloom_filter_cache[parquet_col_id] = std::move(stat.bloom_filter);
1377
10
                        break;
1378
10
                    }
1379
52
                }
1380
52
            }
1381
10
        }
1382
10.9k
    }
1383
1384
    // Update filter statistics if this row group was not filtered
1385
    // The statistics will be updated in _init_row_groups when filter_group is true
1386
34.8k
    return Status::OK();
1387
41.3k
}
1388
1389
225k
int64_t ParquetReader::_get_column_start_offset(const tparquet::ColumnMetaData& column) const {
1390
225k
    return has_dict_page(column) ? column.dictionary_page_offset : column.data_page_offset;
1391
225k
}
1392
1393
33.8k
void ParquetReader::_collect_profile() {
1394
33.8k
    if (_profile == nullptr) {
1395
0
        return;
1396
0
    }
1397
1398
33.8k
    if (_current_group_reader != nullptr) {
1399
27.8k
        _current_group_reader->collect_profile_before_close();
1400
27.8k
    }
1401
33.8k
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups, _reader_statistics.filtered_row_groups);
1402
33.8k
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_min_max,
1403
33.8k
                   _reader_statistics.filtered_row_groups_by_min_max);
1404
33.8k
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_bloom_filter,
1405
33.8k
                   _reader_statistics.filtered_row_groups_by_bloom_filter);
1406
33.8k
    COUNTER_UPDATE(_parquet_profile.to_read_row_groups, _reader_statistics.read_row_groups);
1407
33.8k
    COUNTER_UPDATE(_parquet_profile.total_row_groups, _total_groups);
1408
33.8k
    COUNTER_UPDATE(_parquet_profile.filtered_group_rows, _reader_statistics.filtered_group_rows);
1409
33.8k
    COUNTER_UPDATE(_parquet_profile.filtered_page_rows, _reader_statistics.filtered_page_rows);
1410
33.8k
    COUNTER_UPDATE(_parquet_profile.lazy_read_filtered_rows,
1411
33.8k
                   _reader_statistics.lazy_read_filtered_rows);
1412
33.8k
    COUNTER_UPDATE(_parquet_profile.filtered_bytes, _reader_statistics.filtered_bytes);
1413
33.8k
    COUNTER_UPDATE(_parquet_profile.raw_rows_read, _reader_statistics.read_rows);
1414
33.8k
    COUNTER_UPDATE(_parquet_profile.column_read_time, _reader_statistics.column_read_time);
1415
33.8k
    COUNTER_UPDATE(_parquet_profile.parse_meta_time, _reader_statistics.parse_meta_time);
1416
33.8k
    COUNTER_UPDATE(_parquet_profile.parse_footer_time, _reader_statistics.parse_footer_time);
1417
33.8k
    COUNTER_UPDATE(_parquet_profile.file_reader_create_time,
1418
33.8k
                   _reader_statistics.file_reader_create_time);
1419
33.8k
    COUNTER_UPDATE(_parquet_profile.open_file_num, _reader_statistics.open_file_num);
1420
33.8k
    COUNTER_UPDATE(_parquet_profile.page_index_filter_time,
1421
33.8k
                   _reader_statistics.page_index_filter_time);
1422
33.8k
    COUNTER_UPDATE(_parquet_profile.read_page_index_time, _reader_statistics.read_page_index_time);
1423
33.8k
    COUNTER_UPDATE(_parquet_profile.parse_page_index_time,
1424
33.8k
                   _reader_statistics.parse_page_index_time);
1425
33.8k
    COUNTER_UPDATE(_parquet_profile.row_group_filter_time,
1426
33.8k
                   _reader_statistics.row_group_filter_time);
1427
33.8k
    COUNTER_UPDATE(_parquet_profile.file_footer_read_calls,
1428
33.8k
                   _reader_statistics.file_footer_read_calls);
1429
33.8k
    COUNTER_UPDATE(_parquet_profile.file_footer_hit_cache,
1430
33.8k
                   _reader_statistics.file_footer_hit_cache);
1431
1432
33.8k
    COUNTER_UPDATE(_parquet_profile.skip_page_header_num, _column_statistics.skip_page_header_num);
1433
33.8k
    COUNTER_UPDATE(_parquet_profile.parse_page_header_num,
1434
33.8k
                   _column_statistics.parse_page_header_num);
1435
33.8k
    COUNTER_UPDATE(_parquet_profile.predicate_filter_time,
1436
33.8k
                   _reader_statistics.predicate_filter_time);
1437
33.8k
    COUNTER_UPDATE(_parquet_profile.dict_filter_rewrite_time,
1438
33.8k
                   _reader_statistics.dict_filter_rewrite_time);
1439
33.8k
    COUNTER_UPDATE(_parquet_profile.bloom_filter_read_time,
1440
33.8k
                   _reader_statistics.bloom_filter_read_time);
1441
33.8k
    COUNTER_UPDATE(_parquet_profile.page_index_read_calls,
1442
33.8k
                   _column_statistics.page_index_read_calls);
1443
33.8k
    COUNTER_UPDATE(_parquet_profile.decompress_time, _column_statistics.decompress_time);
1444
33.8k
    COUNTER_UPDATE(_parquet_profile.decompress_cnt, _column_statistics.decompress_cnt);
1445
33.8k
    COUNTER_UPDATE(_parquet_profile.page_read_counter, _column_statistics.page_read_counter);
1446
33.8k
    COUNTER_UPDATE(_parquet_profile.page_cache_write_counter,
1447
33.8k
                   _column_statistics.page_cache_write_counter);
1448
33.8k
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_write_counter,
1449
33.8k
                   _column_statistics.page_cache_compressed_write_counter);
1450
33.8k
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_write_counter,
1451
33.8k
                   _column_statistics.page_cache_decompressed_write_counter);
1452
33.8k
    COUNTER_UPDATE(_parquet_profile.page_cache_hit_counter,
1453
33.8k
                   _column_statistics.page_cache_hit_counter);
1454
33.8k
    COUNTER_UPDATE(_parquet_profile.page_cache_missing_counter,
1455
33.8k
                   _column_statistics.page_cache_missing_counter);
1456
33.8k
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_hit_counter,
1457
33.8k
                   _column_statistics.page_cache_compressed_hit_counter);
1458
33.8k
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_hit_counter,
1459
33.8k
                   _column_statistics.page_cache_decompressed_hit_counter);
1460
33.8k
    COUNTER_UPDATE(_parquet_profile.decode_header_time, _column_statistics.decode_header_time);
1461
33.8k
    COUNTER_UPDATE(_parquet_profile.read_page_header_time,
1462
33.8k
                   _column_statistics.read_page_header_time);
1463
33.8k
    COUNTER_UPDATE(_parquet_profile.decode_value_time, _column_statistics.decode_value_time);
1464
33.8k
    COUNTER_UPDATE(_parquet_profile.decode_dict_time, _column_statistics.decode_dict_time);
1465
33.8k
    COUNTER_UPDATE(_parquet_profile.decode_level_time, _column_statistics.decode_level_time);
1466
33.8k
    COUNTER_UPDATE(_parquet_profile.decode_null_map_time, _column_statistics.decode_null_map_time);
1467
33.8k
}
1468
1469
33.8k
void ParquetReader::_collect_profile_before_close() {
1470
33.8k
    _collect_profile();
1471
33.8k
}
1472
1473
#include "common/compile_check_end.h"
1474
} // namespace doris