Coverage Report

Created: 2026-05-08 13:07

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/parquet/vparquet_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/parquet/vparquet_reader.h"
19
20
#include <gen_cpp/Metrics_types.h>
21
#include <gen_cpp/PlanNodes_types.h>
22
#include <gen_cpp/parquet_types.h>
23
#include <glog/logging.h>
24
25
#include <algorithm>
26
#include <functional>
27
#include <utility>
28
29
#include "common/config.h"
30
#include "common/status.h"
31
#include "core/block/block.h"
32
#include "core/block/column_with_type_and_name.h"
33
#include "core/column/column.h"
34
#include "core/data_type/define_primitive_type.h"
35
#include "core/typeid_cast.h"
36
#include "core/types.h"
37
#include "exec/scan/file_scanner.h"
38
#include "exprs/vbloom_predicate.h"
39
#include "exprs/vdirect_in_predicate.h"
40
#include "exprs/vexpr.h"
41
#include "exprs/vexpr_context.h"
42
#include "exprs/vin_predicate.h"
43
#include "exprs/vruntimefilter_wrapper.h"
44
#include "exprs/vslot_ref.h"
45
#include "exprs/vtopn_pred.h"
46
#include "format/column_type_convert.h"
47
#include "format/parquet/parquet_block_split_bloom_filter.h"
48
#include "format/parquet/parquet_common.h"
49
#include "format/parquet/parquet_predicate.h"
50
#include "format/parquet/parquet_thrift_util.h"
51
#include "format/parquet/schema_desc.h"
52
#include "format/parquet/vparquet_file_metadata.h"
53
#include "format/parquet/vparquet_group_reader.h"
54
#include "format/parquet/vparquet_page_index.h"
55
#include "information_schema/schema_scanner.h"
56
#include "io/file_factory.h"
57
#include "io/fs/buffered_reader.h"
58
#include "io/fs/file_reader.h"
59
#include "io/fs/file_reader_writer_fwd.h"
60
#include "io/fs/tracing_file_reader.h"
61
#include "runtime/descriptors.h"
62
#include "util/slice.h"
63
#include "util/string_util.h"
64
#include "util/timezone_utils.h"
65
66
namespace cctz {
67
class time_zone;
68
} // namespace cctz
69
namespace doris {
70
class RowDescriptor;
71
class RuntimeState;
72
class SlotDescriptor;
73
class TupleDescriptor;
74
namespace io {
75
struct IOContext;
76
enum class FileCachePolicy : uint8_t;
77
} // namespace io
78
class Block;
79
} // namespace doris
80
81
namespace doris {
82
83
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
84
                             const TFileRangeDesc& range, size_t batch_size,
85
                             const cctz::time_zone* ctz, io::IOContext* io_ctx, RuntimeState* state,
86
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
87
38.1k
        : _profile(profile),
88
38.1k
          _scan_params(params),
89
38.1k
          _scan_range(range),
90
38.1k
          _batch_size(std::max(batch_size, 1UL)),
91
38.1k
          _range_start_offset(range.start_offset),
92
38.1k
          _range_size(range.size),
93
38.1k
          _ctz(ctz),
94
38.1k
          _io_ctx(io_ctx),
95
38.1k
          _state(state),
96
38.1k
          _enable_lazy_mat(enable_lazy_mat),
97
          _enable_filter_by_min_max(
98
38.1k
                  state == nullptr ? true
99
38.1k
                                   : state->query_options().enable_parquet_filter_by_min_max),
100
          _enable_filter_by_bloom_filter(
101
38.1k
                  state == nullptr ? true
102
38.1k
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
103
38.1k
    _meta_cache = meta_cache;
104
38.1k
    _init_profile();
105
38.1k
    _init_system_properties();
106
38.1k
    _init_file_description();
107
38.1k
}
108
109
75.1k
void ParquetReader::set_batch_size(size_t batch_size) {
110
75.1k
    if (_batch_size == batch_size) {
111
38.2k
        return;
112
38.2k
    }
113
36.9k
    _batch_size = batch_size;
114
36.9k
}
115
116
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
117
                             const TFileRangeDesc& range, size_t batch_size,
118
                             const cctz::time_zone* ctz,
119
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
120
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
121
0
        : _profile(profile),
122
0
          _scan_params(params),
123
0
          _scan_range(range),
124
0
          _batch_size(std::max(batch_size, 1UL)),
125
0
          _range_start_offset(range.start_offset),
126
0
          _range_size(range.size),
127
0
          _ctz(ctz),
128
0
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
129
0
          _io_ctx_holder(std::move(io_ctx_holder)),
130
0
          _state(state),
131
0
          _enable_lazy_mat(enable_lazy_mat),
132
          _enable_filter_by_min_max(
133
0
                  state == nullptr ? true
134
0
                                   : state->query_options().enable_parquet_filter_by_min_max),
135
          _enable_filter_by_bloom_filter(
136
0
                  state == nullptr ? true
137
0
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
138
0
    _meta_cache = meta_cache;
139
0
    _init_profile();
140
0
    _init_system_properties();
141
0
    _init_file_description();
142
0
}
143
144
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
145
                             io::IOContext* io_ctx, RuntimeState* state, FileMetaCache* meta_cache,
146
                             bool enable_lazy_mat)
147
5
        : _profile(nullptr),
148
5
          _scan_params(params),
149
5
          _scan_range(range),
150
5
          _io_ctx(io_ctx),
151
5
          _state(state),
152
5
          _enable_lazy_mat(enable_lazy_mat),
153
          _enable_filter_by_min_max(
154
5
                  state == nullptr ? true
155
5
                                   : state->query_options().enable_parquet_filter_by_min_max),
156
          _enable_filter_by_bloom_filter(
157
5
                  state == nullptr ? true
158
5
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
159
5
    _meta_cache = meta_cache;
160
5
    _init_system_properties();
161
5
    _init_file_description();
162
5
}
163
164
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
165
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
166
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
167
2.42k
        : _profile(nullptr),
168
2.42k
          _scan_params(params),
169
2.42k
          _scan_range(range),
170
2.42k
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
171
2.42k
          _io_ctx_holder(std::move(io_ctx_holder)),
172
2.42k
          _state(state),
173
2.42k
          _enable_lazy_mat(enable_lazy_mat),
174
          _enable_filter_by_min_max(
175
2.42k
                  state == nullptr ? true
176
2.42k
                                   : state->query_options().enable_parquet_filter_by_min_max),
177
          _enable_filter_by_bloom_filter(
178
2.42k
                  state == nullptr ? true
179
2.42k
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
180
2.42k
    _meta_cache = meta_cache;
181
2.42k
    _init_system_properties();
182
2.42k
    _init_file_description();
183
2.42k
}
184
185
40.5k
ParquetReader::~ParquetReader() {
186
40.5k
    _close_internal();
187
40.5k
}
188
189
#ifdef BE_TEST
190
// for unit test
191
void ParquetReader::set_file_reader(io::FileReaderSPtr file_reader) {
192
    _file_reader = file_reader;
193
    _tracing_file_reader = file_reader;
194
}
195
#endif
196
197
38.1k
void ParquetReader::_init_profile() {
198
38.1k
    if (_profile != nullptr) {
199
38.1k
        static const char* parquet_profile = "ParquetReader";
200
38.1k
        ADD_TIMER_WITH_LEVEL(_profile, parquet_profile, 1);
201
202
38.1k
        _parquet_profile.filtered_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
203
38.1k
                _profile, "RowGroupsFiltered", TUnit::UNIT, parquet_profile, 1);
204
38.1k
        _parquet_profile.filtered_row_groups_by_min_max = ADD_CHILD_COUNTER_WITH_LEVEL(
205
38.1k
                _profile, "RowGroupsFilteredByMinMax", TUnit::UNIT, parquet_profile, 1);
206
38.1k
        _parquet_profile.filtered_row_groups_by_bloom_filter = ADD_CHILD_COUNTER_WITH_LEVEL(
207
38.1k
                _profile, "RowGroupsFilteredByBloomFilter", TUnit::UNIT, parquet_profile, 1);
208
38.1k
        _parquet_profile.to_read_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
209
38.1k
                _profile, "RowGroupsReadNum", TUnit::UNIT, parquet_profile, 1);
210
38.1k
        _parquet_profile.total_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
211
38.1k
                _profile, "RowGroupsTotalNum", TUnit::UNIT, parquet_profile, 1);
212
38.1k
        _parquet_profile.filtered_group_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
213
38.1k
                _profile, "FilteredRowsByGroup", TUnit::UNIT, parquet_profile, 1);
214
38.1k
        _parquet_profile.filtered_page_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
215
38.1k
                _profile, "FilteredRowsByPage", TUnit::UNIT, parquet_profile, 1);
216
38.1k
        _parquet_profile.lazy_read_filtered_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
217
38.1k
                _profile, "FilteredRowsByLazyRead", TUnit::UNIT, parquet_profile, 1);
218
38.1k
        _parquet_profile.filtered_bytes = ADD_CHILD_COUNTER_WITH_LEVEL(
219
38.1k
                _profile, "FilteredBytes", TUnit::BYTES, parquet_profile, 1);
220
38.1k
        _parquet_profile.raw_rows_read = ADD_CHILD_COUNTER_WITH_LEVEL(
221
38.1k
                _profile, "RawRowsRead", TUnit::UNIT, parquet_profile, 1);
222
38.1k
        _parquet_profile.column_read_time =
223
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ColumnReadTime", parquet_profile, 1);
224
38.1k
        _parquet_profile.parse_meta_time =
225
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseMetaTime", parquet_profile, 1);
226
38.1k
        _parquet_profile.parse_footer_time =
227
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseFooterTime", parquet_profile, 1);
228
38.1k
        _parquet_profile.file_reader_create_time =
229
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "FileReaderCreateTime", parquet_profile, 1);
230
38.1k
        _parquet_profile.open_file_num =
231
38.1k
                ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "FileNum", TUnit::UNIT, parquet_profile, 1);
232
38.1k
        _parquet_profile.page_index_read_calls =
233
38.1k
                ADD_COUNTER_WITH_LEVEL(_profile, "PageIndexReadCalls", TUnit::UNIT, 1);
234
38.1k
        _parquet_profile.page_index_filter_time =
235
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexFilterTime", parquet_profile, 1);
236
38.1k
        _parquet_profile.read_page_index_time =
237
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexReadTime", parquet_profile, 1);
238
38.1k
        _parquet_profile.parse_page_index_time =
239
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexParseTime", parquet_profile, 1);
240
38.1k
        _parquet_profile.row_group_filter_time =
241
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "RowGroupFilterTime", parquet_profile, 1);
242
38.1k
        _parquet_profile.file_footer_read_calls =
243
38.1k
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterReadCalls", TUnit::UNIT, 1);
244
38.1k
        _parquet_profile.file_footer_hit_cache =
245
38.1k
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterHitCache", TUnit::UNIT, 1);
246
38.1k
        _parquet_profile.decompress_time =
247
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecompressTime", parquet_profile, 1);
248
38.1k
        _parquet_profile.decompress_cnt = ADD_CHILD_COUNTER_WITH_LEVEL(
249
38.1k
                _profile, "DecompressCount", TUnit::UNIT, parquet_profile, 1);
250
38.1k
        _parquet_profile.page_read_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
251
38.1k
                _profile, "PageReadCount", TUnit::UNIT, parquet_profile, 1);
252
38.1k
        _parquet_profile.page_cache_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
253
38.1k
                _profile, "PageCacheWriteCount", TUnit::UNIT, parquet_profile, 1);
254
38.1k
        _parquet_profile.page_cache_compressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
255
38.1k
                _profile, "PageCacheCompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
256
38.1k
        _parquet_profile.page_cache_decompressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
257
38.1k
                _profile, "PageCacheDecompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
258
38.1k
        _parquet_profile.page_cache_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
259
38.1k
                _profile, "PageCacheHitCount", TUnit::UNIT, parquet_profile, 1);
260
38.1k
        _parquet_profile.page_cache_missing_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
261
38.1k
                _profile, "PageCacheMissingCount", TUnit::UNIT, parquet_profile, 1);
262
38.1k
        _parquet_profile.page_cache_compressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
263
38.1k
                _profile, "PageCacheCompressedHitCount", TUnit::UNIT, parquet_profile, 1);
264
38.1k
        _parquet_profile.page_cache_decompressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
265
38.1k
                _profile, "PageCacheDecompressedHitCount", TUnit::UNIT, parquet_profile, 1);
266
38.1k
        _parquet_profile.decode_header_time =
267
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderDecodeTime", parquet_profile, 1);
268
38.1k
        _parquet_profile.read_page_header_time =
269
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderReadTime", parquet_profile, 1);
270
38.1k
        _parquet_profile.decode_value_time =
271
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeValueTime", parquet_profile, 1);
272
38.1k
        _parquet_profile.decode_dict_time =
273
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeDictTime", parquet_profile, 1);
274
38.1k
        _parquet_profile.decode_level_time =
275
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeLevelTime", parquet_profile, 1);
276
38.1k
        _parquet_profile.decode_null_map_time =
277
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeNullMapTime", parquet_profile, 1);
278
38.1k
        _parquet_profile.skip_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
279
38.1k
                _profile, "SkipPageHeaderNum", TUnit::UNIT, parquet_profile, 1);
280
38.1k
        _parquet_profile.parse_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
281
38.1k
                _profile, "ParsePageHeaderNum", TUnit::UNIT, parquet_profile, 1);
282
38.1k
        _parquet_profile.predicate_filter_time =
283
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PredicateFilterTime", parquet_profile, 1);
284
38.1k
        _parquet_profile.dict_filter_rewrite_time =
285
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DictFilterRewriteTime", parquet_profile, 1);
286
38.1k
        _parquet_profile.convert_time =
287
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ConvertTime", parquet_profile, 1);
288
38.1k
        _parquet_profile.bloom_filter_read_time =
289
38.1k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "BloomFilterReadTime", parquet_profile, 1);
290
38.1k
    }
291
38.1k
}
292
293
34.5k
Status ParquetReader::close() {
294
34.5k
    _close_internal();
295
34.5k
    return Status::OK();
296
34.5k
}
297
298
75.1k
void ParquetReader::_close_internal() {
299
75.1k
    if (!_closed) {
300
40.6k
        _closed = true;
301
40.6k
    }
302
75.1k
}
303
304
76.8k
Status ParquetReader::_open_file() {
305
76.8k
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
306
0
        return Status::EndOfFile("stop");
307
0
    }
308
76.8k
    if (_file_reader == nullptr) {
309
39.2k
        SCOPED_RAW_TIMER(&_reader_statistics.file_reader_create_time);
310
39.2k
        ++_reader_statistics.open_file_num;
311
39.2k
        _file_description.mtime =
312
39.2k
                _scan_range.__isset.modification_time ? _scan_range.modification_time : 0;
313
39.2k
        io::FileReaderOptions reader_options =
314
39.2k
                FileFactory::get_reader_options(_state, _file_description);
315
39.2k
        _file_reader = DORIS_TRY(io::DelegateReader::create_file_reader(
316
39.2k
                _profile, _system_properties, _file_description, reader_options,
317
39.2k
                io::DelegateReader::AccessMode::RANDOM, _io_ctx));
318
39.2k
        _tracing_file_reader = _io_ctx ? std::make_shared<io::TracingFileReader>(
319
39.2k
                                                 _file_reader, _io_ctx->file_reader_stats)
320
39.2k
                                       : _file_reader;
321
39.2k
    }
322
323
76.8k
    if (_file_metadata == nullptr) {
324
39.3k
        SCOPED_RAW_TIMER(&_reader_statistics.parse_footer_time);
325
39.3k
        if (_tracing_file_reader->size() <= sizeof(PARQUET_VERSION_NUMBER)) {
326
            // Some system may generate parquet file with only 4 bytes: PAR1
327
            // Should consider it as empty file.
328
0
            return Status::EndOfFile("open file failed, empty parquet file {} with size: {}",
329
0
                                     _scan_range.path, _tracing_file_reader->size());
330
0
        }
331
39.3k
        size_t meta_size = 0;
332
39.3k
        bool enable_mapping_varbinary = _scan_params.__isset.enable_mapping_varbinary
333
39.3k
                                                ? _scan_params.enable_mapping_varbinary
334
39.3k
                                                : false;
335
39.3k
        bool enable_mapping_timestamp_tz = _scan_params.__isset.enable_mapping_timestamp_tz
336
39.3k
                                                   ? _scan_params.enable_mapping_timestamp_tz
337
39.3k
                                                   : false;
338
39.3k
        if (_meta_cache == nullptr) {
339
            // wrap _file_metadata with unique ptr, so that it can be released finally.
340
2.46k
            RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
341
2.46k
                                                &meta_size, _io_ctx, enable_mapping_varbinary,
342
2.46k
                                                enable_mapping_timestamp_tz));
343
2.46k
            _file_metadata = _file_metadata_ptr.get();
344
            // parse magic number & parse meta data
345
2.46k
            _reader_statistics.file_footer_read_calls += 1;
346
36.8k
        } else {
347
36.8k
            const auto& file_meta_cache_key =
348
36.8k
                    FileMetaCache::get_key(_tracing_file_reader, _file_description);
349
36.8k
            if (!_meta_cache->lookup(file_meta_cache_key, &_meta_cache_handle)) {
350
8.80k
                RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
351
8.80k
                                                    &meta_size, _io_ctx, enable_mapping_varbinary,
352
8.80k
                                                    enable_mapping_timestamp_tz));
353
                // _file_metadata_ptr.release() : move control of _file_metadata to _meta_cache_handle
354
8.80k
                _meta_cache->insert(file_meta_cache_key, _file_metadata_ptr.release(),
355
8.80k
                                    &_meta_cache_handle);
356
8.80k
                _file_metadata = _meta_cache_handle.data<FileMetaData>();
357
8.80k
                _reader_statistics.file_footer_read_calls += 1;
358
28.0k
            } else {
359
28.0k
                _reader_statistics.file_footer_hit_cache++;
360
28.0k
            }
361
36.8k
            _file_metadata = _meta_cache_handle.data<FileMetaData>();
362
36.8k
        }
363
364
39.3k
        if (_file_metadata == nullptr) {
365
0
            return Status::InternalError("failed to get file meta data: {}",
366
0
                                         _file_description.path);
367
0
        }
368
39.3k
    }
369
76.8k
    return Status::OK();
370
76.8k
}
371
372
36.0k
Status ParquetReader::get_file_metadata_schema(const FieldDescriptor** ptr) {
373
36.0k
    RETURN_IF_ERROR(_open_file());
374
36.0k
    DCHECK(_file_metadata != nullptr);
375
36.0k
    *ptr = &_file_metadata->schema();
376
36.0k
    return Status::OK();
377
36.0k
}
378
379
40.5k
void ParquetReader::_init_system_properties() {
380
40.5k
    if (_scan_range.__isset.file_type) {
381
        // for compatibility
382
37.9k
        _system_properties.system_type = _scan_range.file_type;
383
37.9k
    } else {
384
2.63k
        _system_properties.system_type = _scan_params.file_type;
385
2.63k
    }
386
40.5k
    _system_properties.properties = _scan_params.properties;
387
40.5k
    _system_properties.hdfs_params = _scan_params.hdfs_params;
388
40.5k
    if (_scan_params.__isset.broker_addresses) {
389
24
        _system_properties.broker_addresses.assign(_scan_params.broker_addresses.begin(),
390
24
                                                   _scan_params.broker_addresses.end());
391
24
    }
392
40.5k
}
393
394
40.5k
void ParquetReader::_init_file_description() {
395
40.5k
    _file_description.path = _scan_range.path;
396
18.4E
    _file_description.file_size = _scan_range.__isset.file_size ? _scan_range.file_size : -1;
397
40.5k
    if (_scan_range.__isset.fs_name) {
398
17.0k
        _file_description.fs_name = _scan_range.fs_name;
399
17.0k
    }
400
40.5k
    if (_scan_range.__isset.file_cache_admission) {
401
35.7k
        _file_description.file_cache_admission = _scan_range.file_cache_admission;
402
35.7k
    }
403
40.5k
}
404
405
3.14k
Status ParquetReader::on_before_init_reader(ReaderInitContext* ctx) {
406
3.14k
    _column_descs = ctx->column_descs;
407
3.14k
    _fill_col_name_to_block_idx = ctx->col_name_to_block_idx;
408
3.14k
    RETURN_IF_ERROR(
409
3.14k
            _extract_partition_values(*ctx->range, ctx->tuple_descriptor, _fill_partition_values));
410
13.3k
    for (auto& desc : *ctx->column_descs) {
411
13.3k
        if (desc.category == ColumnCategory::REGULAR ||
412
13.3k
            desc.category == ColumnCategory::GENERATED) {
413
13.2k
            ctx->column_names.push_back(desc.name);
414
13.2k
        } else if (desc.category == ColumnCategory::SYNTHESIZED &&
415
54
                   desc.name.starts_with(BeConsts::GLOBAL_ROWID_COL)) {
416
52
            auto topn_row_id_column_iter = _create_topn_row_id_column_iterator();
417
52
            this->register_synthesized_column_handler(
418
52
                    desc.name,
419
52
                    [iter = std::move(topn_row_id_column_iter), this, &desc](
420
353
                            Block* block, size_t rows) -> Status {
421
353
                        return fill_topn_row_id(iter, desc.name, block, rows);
422
353
                    });
423
52
            continue;
424
52
        }
425
13.3k
    }
426
427
    // Build table_info_node from Parquet file metadata with case-insensitive recursive matching.
428
    // File is already opened by init_reader before this hook, so metadata is available.
429
    // tuple_descriptor may be null in unit tests that only set column_descs.
430
3.14k
    if (ctx->tuple_descriptor != nullptr) {
431
3.14k
        const FieldDescriptor* field_desc = nullptr;
432
3.14k
        RETURN_IF_ERROR(get_file_metadata_schema(&field_desc));
433
3.14k
        RETURN_IF_ERROR(TableSchemaChangeHelper::BuildTableInfoUtil::by_parquet_name(
434
3.14k
                ctx->tuple_descriptor, *field_desc, ctx->table_info_node));
435
3.14k
    }
436
437
3.14k
    return Status::OK();
438
3.14k
}
439
440
36.9k
Status ParquetReader::_open_file_reader(ReaderInitContext* /*ctx*/) {
441
36.9k
    return _open_file();
442
36.9k
}
443
444
36.7k
Status ParquetReader::_do_init_reader(ReaderInitContext* base_ctx) {
445
36.7k
    auto* ctx = checked_context_cast<ParquetInitContext>(base_ctx);
446
36.7k
    _col_name_to_block_idx = base_ctx->col_name_to_block_idx;
447
36.7k
    _tuple_descriptor = ctx->tuple_descriptor;
448
36.7k
    _row_descriptor = ctx->row_descriptor;
449
36.7k
    _colname_to_slot_id = ctx->colname_to_slot_id;
450
36.7k
    _not_single_slot_filter_conjuncts = ctx->not_single_slot_filter_conjuncts;
451
36.7k
    _slot_id_to_filter_conjuncts = ctx->slot_id_to_filter_conjuncts;
452
36.7k
    _filter_groups = ctx->filter_groups;
453
36.7k
    _table_info_node_ptr = base_ctx->table_info_node;
454
36.7k
    _column_ids = base_ctx->column_ids;
455
36.7k
    _filter_column_ids = base_ctx->filter_column_ids;
456
457
    // _open_file_reader (called by init_reader NVI before hooks) must have opened the file.
458
18.4E
    DCHECK(_file_metadata != nullptr)
459
18.4E
            << "ParquetReader::_do_init_reader called without _open_file_reader";
460
36.7k
    _t_metadata = &(_file_metadata->to_thrift());
461
462
36.7k
    SCOPED_RAW_TIMER(&_reader_statistics.parse_meta_time);
463
36.7k
    _total_groups = _t_metadata->row_groups.size();
464
36.7k
    if (_total_groups == 0) {
465
12
        return Status::EndOfFile("init reader failed, empty parquet file: " + _scan_range.path);
466
12
    }
467
36.7k
    _current_row_group_index = RowGroupReader::RowGroupIndex {-1, 0, 0};
468
469
    // Compute missing columns and file↔table column mapping.
470
    // This runs in _do_init_reader (not on_before_init_reader) because table-format readers
471
    // (Iceberg, Paimon, Hive, Hudi) override on_before_init_reader completely.
472
36.7k
    if (has_column_descs()) {
473
34.4k
        _fill_missing_cols.clear();
474
34.4k
        _fill_missing_defaults.clear();
475
125k
        for (const auto& col_name : base_ctx->column_names) {
476
125k
            if (!_table_info_node_ptr->children_column_exists(col_name)) {
477
4.19k
                _fill_missing_cols.insert(col_name);
478
4.19k
            }
479
125k
        }
480
34.4k
        if (_column_descs && !_fill_missing_cols.empty()) {
481
13.1k
            for (const auto& desc : *_column_descs) {
482
13.1k
                if (_fill_missing_cols.contains(desc.name) &&
483
13.1k
                    !_fill_partition_values.contains(desc.name)) {
484
4.18k
                    _fill_missing_defaults[desc.name] = desc.default_expr;
485
4.18k
                }
486
13.1k
            }
487
3.03k
        }
488
34.4k
    }
489
    // Resolve file-column ↔ table-column mapping in file-schema order.
490
    // _init_read_columns handles both normal path (missing cols populated above)
491
    // and standalone path (_fill_missing_cols empty, _table_info_node_ptr may be null).
492
36.7k
    _init_read_columns(base_ctx->column_names);
493
494
    // build column predicates for column lazy read
495
36.8k
    if (ctx->conjuncts != nullptr) {
496
36.8k
        _lazy_read_ctx.conjuncts = *ctx->conjuncts;
497
36.8k
    }
498
36.8k
    if (ctx->slot_id_to_predicates != nullptr) {
499
36.8k
        _lazy_read_ctx.slot_id_to_predicates = *ctx->slot_id_to_predicates;
500
36.8k
    }
501
502
    // ---- Inlined set_fill_columns logic (partition/missing/synthesized classification) ----
503
504
    // 1. Collect predicate columns from conjuncts for lazy materialization
505
36.7k
    std::unordered_map<std::string, std::pair<uint32_t, int>> predicate_columns;
506
36.7k
    _collect_predicate_columns_from_conjuncts(predicate_columns);
507
508
    // 2. Classify read/partition/missing/synthesized columns into lazy vs predicate groups
509
36.7k
    _classify_columns_for_lazy_read(predicate_columns, _fill_partition_values,
510
36.7k
                                    _fill_missing_defaults);
511
512
    // 3. Populate col_names vectors for ColumnProcessor path
513
36.7k
    for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
514
359
        _lazy_read_ctx.predicate_partition_col_names.emplace_back(kv.first);
515
359
    }
516
36.7k
    for (auto& kv : _lazy_read_ctx.predicate_missing_columns) {
517
1.44k
        _lazy_read_ctx.predicate_missing_col_names.emplace_back(kv.first);
518
1.44k
    }
519
36.7k
    for (auto& kv : _lazy_read_ctx.partition_columns) {
520
4.26k
        _lazy_read_ctx.partition_col_names.emplace_back(kv.first);
521
4.26k
    }
522
36.7k
    for (auto& kv : _lazy_read_ctx.missing_columns) {
523
4.07k
        _lazy_read_ctx.missing_col_names.emplace_back(kv.first);
524
4.07k
    }
525
526
36.7k
    if (_filter_groups && (_total_groups == 0 || _t_metadata->num_rows == 0 || _range_size < 0)) {
527
4
        return Status::EndOfFile("No row group to read");
528
4
    }
529
530
36.7k
    return Status::OK();
531
36.7k
}
532
533
36.8k
void ParquetReader::_init_read_columns(const std::vector<std::string>& column_names) {
534
    // Build file_col_name → table_col_name map, skipping missing columns.
535
    // Must iterate file schema in physical order so that _generate_random_access_ranges
536
    // sees monotonically increasing chunk offsets.
537
36.8k
    auto schema_desc = _file_metadata->schema();
538
36.8k
    std::map<std::string, std::string> required_file_columns;
539
129k
    for (const auto& col_name : column_names) {
540
129k
        if (_fill_missing_cols.contains(col_name)) {
541
4.19k
            continue;
542
4.19k
        }
543
125k
        std::string file_col = col_name;
544
125k
        if (_table_info_node_ptr && _table_info_node_ptr->children_column_exists(col_name)) {
545
125k
            file_col = _table_info_node_ptr->children_file_column_name(col_name);
546
125k
        }
547
125k
        required_file_columns[file_col] = col_name;
548
125k
    }
549
341k
    for (int i = 0; i < schema_desc.size(); ++i) {
550
305k
        const auto& name = schema_desc.get_column(i)->name;
551
305k
        if (required_file_columns.contains(name)) {
552
125k
            _read_file_columns.emplace_back(name);
553
125k
            _read_table_columns.emplace_back(required_file_columns[name]);
554
125k
            _read_table_columns_set.insert(required_file_columns[name]);
555
125k
        }
556
305k
    }
557
36.8k
}
558
559
15.9k
bool ParquetReader::_exists_in_file(const std::string& expr_name) const {
560
    // `_read_table_columns_set` is used to ensure that only columns actually read are subject to min-max filtering.
561
    // This primarily handles cases where partition columns also exist in a file. The reason it's not modified
562
    // in `_table_info_node_ptr` is that Iceberg、Hudi has inconsistent requirements for this node;
563
    // Iceberg partition evolution need read partition columns from a file.
564
    // hudi set `hoodie.datasource.write.drop.partition.columns=false` not need read partition columns from a file.
565
15.9k
    return _table_info_node_ptr->children_column_exists(expr_name) &&
566
15.9k
           _read_table_columns_set.contains(expr_name);
567
15.9k
}
568
569
15.3k
bool ParquetReader::_type_matches(const int cid) const {
570
15.3k
    auto* slot = _tuple_descriptor->slots()[cid];
571
15.3k
    auto table_col_type = remove_nullable(slot->type());
572
573
15.3k
    const auto& file_col_name = _table_info_node_ptr->children_file_column_name(slot->col_name());
574
15.3k
    const auto& file_col_type =
575
15.3k
            remove_nullable(_file_metadata->schema().get_column(file_col_name)->data_type);
576
577
15.3k
    return (table_col_type->get_primitive_type() == file_col_type->get_primitive_type()) &&
578
15.3k
           !is_complex_type(table_col_type->get_primitive_type());
579
15.3k
}
580
581
void ParquetReader::_collect_predicate_columns_from_conjuncts(
582
36.7k
        std::unordered_map<std::string, std::pair<uint32_t, int>>& predicate_columns) {
583
73.8k
    std::function<void(VExpr * expr)> visit_slot = [&](VExpr* expr) {
584
73.8k
        if (expr->is_slot_ref()) {
585
23.0k
            VSlotRef* slot_ref = static_cast<VSlotRef*>(expr);
586
23.0k
            auto expr_name = slot_ref->expr_name();
587
23.0k
            predicate_columns.emplace(expr_name,
588
23.0k
                                      std::make_pair(slot_ref->column_id(), slot_ref->slot_id()));
589
23.0k
            if (slot_ref->column_id() == 0) {
590
16.3k
                _lazy_read_ctx.resize_first_column = false;
591
16.3k
            }
592
23.0k
            return;
593
23.0k
        }
594
51.5k
        for (auto& child : expr->children()) {
595
51.5k
            visit_slot(child.get());
596
51.5k
        }
597
50.8k
    };
598
599
36.7k
    for (const auto& conjunct : _lazy_read_ctx.conjuncts) {
600
22.4k
        auto expr = conjunct->root();
601
22.4k
        if (expr->is_rf_wrapper()) {
602
7.27k
            VRuntimeFilterWrapper* runtime_filter = assert_cast<VRuntimeFilterWrapper*>(expr.get());
603
7.27k
            auto filter_impl = runtime_filter->get_impl();
604
7.27k
            visit_slot(filter_impl.get());
605
15.1k
        } else {
606
15.1k
            visit_slot(expr.get());
607
15.1k
        }
608
22.4k
    }
609
610
36.7k
    if (!_lazy_read_ctx.slot_id_to_predicates.empty()) {
611
33.1k
        auto and_pred = AndBlockColumnPredicate::create_unique();
612
126k
        for (const auto& entry : _lazy_read_ctx.slot_id_to_predicates) {
613
126k
            for (const auto& pred : entry.second) {
614
                // Parquet shares _push_down_predicates for row-group/page min-max pruning and
615
                // bloom-filter evaluation, so this flag currently gates both predicate paths.
616
16.1k
                if (!has_column_optimization(pred->col_name(), ColumnOptimizationTypes::MIN_MAX)) {
617
26
                    continue;
618
26
                }
619
16.1k
                if (!_exists_in_file(pred->col_name()) || !_type_matches(pred->column_id())) {
620
1.14k
                    continue;
621
1.14k
                }
622
14.9k
                and_pred->add_column_predicate(
623
14.9k
                        SingleColumnBlockPredicate::create_unique(pred->clone(pred->column_id())));
624
14.9k
            }
625
126k
        }
626
33.1k
        if (and_pred->num_of_column_predicate() > 0) {
627
9.33k
            _push_down_predicates.push_back(std::move(and_pred));
628
9.33k
        }
629
33.1k
    }
630
36.7k
}
631
632
void ParquetReader::_classify_columns_for_lazy_read(
633
        const std::unordered_map<std::string, std::pair<uint32_t, int>>&
634
                predicate_conjuncts_columns,
635
        const std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>&
636
                partition_columns,
637
36.6k
        const std::unordered_map<std::string, VExprContextSPtr>& missing_columns) {
638
36.6k
    const FieldDescriptor& schema = _file_metadata->schema();
639
36.6k
    auto predicate_columns = predicate_conjuncts_columns;
640
36.6k
#ifndef BE_TEST
641
36.6k
    for (const auto& [col_name, _] : _generated_col_handlers) {
642
438
        int slot_id = -1;
643
1.40k
        for (auto slot : _tuple_descriptor->slots()) {
644
1.40k
            if (slot->col_name() == col_name) {
645
436
                slot_id = slot->id();
646
436
                break;
647
436
            }
648
1.40k
        }
649
438
        DCHECK(slot_id != -1) << "slot id should not be -1 for generated column: " << col_name;
650
438
        auto column_index = _row_descriptor->get_column_id(slot_id);
651
438
        if (column_index == 0) {
652
22
            _lazy_read_ctx.resize_first_column = false;
653
22
        }
654
        // assume generated columns are only used for predicate push down.
655
438
        predicate_columns.emplace(col_name, std::make_pair(column_index, slot_id));
656
438
    }
657
658
36.6k
    for (const auto& [col_name, _] : _synthesized_col_handlers) {
659
2.20k
        int slot_id = -1;
660
5.32k
        for (auto slot : _tuple_descriptor->slots()) {
661
5.32k
            if (slot->col_name() == col_name) {
662
2.20k
                slot_id = slot->id();
663
2.20k
                break;
664
2.20k
            }
665
5.32k
        }
666
2.20k
        DCHECK(slot_id != -1) << "slot id should not be -1 for synthesized column: " << col_name;
667
2.20k
        auto column_index = _row_descriptor->get_column_id(slot_id);
668
2.20k
        if (column_index == 0) {
669
0
            _lazy_read_ctx.resize_first_column = false;
670
0
        }
671
        // synthesized columns always fill data on first phase.
672
2.20k
        _lazy_read_ctx.all_predicate_col_ids.emplace_back(column_index);
673
2.20k
    }
674
36.6k
#endif
675
125k
    for (auto& read_table_col : _read_table_columns) {
676
125k
        _lazy_read_ctx.all_read_columns.emplace_back(read_table_col);
677
678
125k
        auto file_column_name = _table_info_node_ptr->children_file_column_name(read_table_col);
679
125k
        PrimitiveType column_type =
680
125k
                schema.get_column(file_column_name)->data_type->get_primitive_type();
681
125k
        if (is_complex_type(column_type)) {
682
33.0k
            _lazy_read_ctx.has_complex_type = true;
683
33.0k
        }
684
125k
        if (predicate_columns.size() > 0) {
685
39.3k
            auto iter = predicate_columns.find(read_table_col);
686
39.3k
            if (iter == predicate_columns.end()) {
687
23.9k
                _lazy_read_ctx.lazy_read_columns.emplace_back(read_table_col);
688
23.9k
            } else {
689
15.4k
                _lazy_read_ctx.predicate_columns.first.emplace_back(iter->first);
690
15.4k
                _lazy_read_ctx.predicate_columns.second.emplace_back(iter->second.second);
691
15.4k
                _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
692
15.4k
            }
693
39.3k
        }
694
125k
    }
695
696
36.6k
    for (auto& kv : partition_columns) {
697
4.34k
        auto iter = predicate_columns.find(kv.first);
698
4.34k
        if (iter == predicate_columns.end()) {
699
3.98k
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
700
3.98k
        } else {
701
364
            _lazy_read_ctx.predicate_partition_columns.emplace(kv.first, kv.second);
702
364
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
703
364
        }
704
4.34k
    }
705
706
36.6k
    for (auto& kv : missing_columns) {
707
4.18k
        auto iter = predicate_columns.find(kv.first);
708
4.18k
        if (iter != predicate_columns.end()) {
709
            //For check missing column :   missing column == xx, missing column is null,missing column is not null.
710
1.44k
            if (_slot_id_to_filter_conjuncts->find(iter->second.second) !=
711
1.44k
                _slot_id_to_filter_conjuncts->end()) {
712
826
                for (auto& ctx : _slot_id_to_filter_conjuncts->find(iter->second.second)->second) {
713
826
                    _lazy_read_ctx.missing_columns_conjuncts.emplace_back(ctx);
714
826
                }
715
818
            }
716
1.44k
            _lazy_read_ctx.predicate_missing_columns.emplace(kv.first, kv.second);
717
1.44k
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
718
2.73k
        } else {
719
2.73k
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
720
2.73k
        }
721
4.18k
    }
722
723
36.6k
    if (_enable_lazy_mat && _lazy_read_ctx.predicate_columns.first.size() > 0 &&
724
36.6k
        _lazy_read_ctx.lazy_read_columns.size() > 0) {
725
7.41k
        _lazy_read_ctx.can_lazy_read = true;
726
7.41k
    }
727
728
36.6k
    if (!_lazy_read_ctx.can_lazy_read) {
729
29.3k
        for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
730
302
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
731
302
        }
732
29.3k
        for (auto& kv : _lazy_read_ctx.predicate_missing_columns) {
733
1.34k
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
734
1.34k
        }
735
29.3k
    }
736
36.6k
}
737
738
// init file reader and file metadata for parsing schema
739
3.94k
Status ParquetReader::init_schema_reader() {
740
3.94k
    RETURN_IF_ERROR(_open_file());
741
3.94k
    _t_metadata = &(_file_metadata->to_thrift());
742
3.94k
    return Status::OK();
743
3.94k
}
744
745
Status ParquetReader::get_parsed_schema(std::vector<std::string>* col_names,
746
2.42k
                                        std::vector<DataTypePtr>* col_types) {
747
2.42k
    _total_groups = _t_metadata->row_groups.size();
748
2.42k
    auto schema_desc = _file_metadata->schema();
749
15.6k
    for (int i = 0; i < schema_desc.size(); ++i) {
750
        // Get the Column Reader for the boolean column
751
13.2k
        col_names->emplace_back(schema_desc.get_column(i)->name);
752
13.2k
        col_types->emplace_back(make_nullable(schema_desc.get_column(i)->data_type));
753
13.2k
    }
754
2.42k
    return Status::OK();
755
2.42k
}
756
757
Status ParquetReader::_get_columns_impl(
758
34.1k
        std::unordered_map<std::string, DataTypePtr>* name_to_type) {
759
34.1k
    const auto& schema_desc = _file_metadata->schema();
760
34.1k
    std::unordered_set<std::string> column_names;
761
34.1k
    schema_desc.get_column_names(&column_names);
762
300k
    for (auto& name : column_names) {
763
300k
        auto field = schema_desc.get_column(name);
764
300k
        name_to_type->emplace(name, field->data_type);
765
300k
    }
766
34.1k
    return Status::OK();
767
34.1k
}
768
769
80.5k
Status ParquetReader::_do_get_next_block(Block* block, size_t* read_rows, bool* eof) {
770
80.5k
    if (_current_group_reader == nullptr || _row_group_eof) {
771
44.6k
        Status st = _next_row_group_reader();
772
44.6k
        if (!st.ok() && !st.is<ErrorCode::END_OF_FILE>()) {
773
0
            return st;
774
0
        }
775
44.6k
        if (_current_group_reader == nullptr || _row_group_eof || st.is<ErrorCode::END_OF_FILE>()) {
776
5.87k
            _current_group_reader.reset(nullptr);
777
5.87k
            _row_group_eof = true;
778
5.87k
            *read_rows = 0;
779
5.87k
            *eof = true;
780
5.87k
            return Status::OK();
781
5.87k
        }
782
44.6k
    }
783
784
    // Limit memory per batch for load paths.
785
    // _load_bytes_per_row is updated after each batch so the *next* call pre-shrinks _batch_size
786
    // before reading, ensuring the current batch is already within the limit (from call 2 onward).
787
74.6k
    const int64_t max_block_bytes =
788
74.7k
            (_state != nullptr && _state->query_type() == TQueryType::LOAD &&
789
74.6k
             config::load_reader_max_block_bytes > 0)
790
74.6k
                    ? config::load_reader_max_block_bytes
791
74.6k
                    : 0;
792
74.6k
    if (max_block_bytes > 0 && _load_bytes_per_row > 0) {
793
178
        _batch_size = std::max((size_t)1,
794
178
                               (size_t)((int64_t)max_block_bytes / (int64_t)_load_bytes_per_row));
795
178
    }
796
797
74.6k
    SCOPED_RAW_TIMER(&_reader_statistics.column_read_time);
798
74.6k
    Status batch_st =
799
74.6k
            _current_group_reader->next_batch(block, _batch_size, read_rows, &_row_group_eof);
800
74.6k
    if (batch_st.is<ErrorCode::END_OF_FILE>()) {
801
0
        block->clear_column_data();
802
0
        _current_group_reader.reset(nullptr);
803
0
        *read_rows = 0;
804
0
        *eof = true;
805
0
        return Status::OK();
806
0
    }
807
808
74.6k
    if (!batch_st.ok()) {
809
18
        return Status::InternalError("Read parquet file {} failed, reason = {}", _scan_range.path,
810
18
                                     batch_st.to_string());
811
18
    }
812
813
74.6k
    if (max_block_bytes > 0 && *read_rows > 0) {
814
2.07k
        _load_bytes_per_row = block->bytes() / *read_rows;
815
2.07k
    }
816
817
74.6k
    if (_row_group_eof) {
818
38.6k
        auto column_st = _current_group_reader->merged_column_statistics();
819
38.6k
        _column_statistics.merge(column_st);
820
38.6k
        _reader_statistics.lazy_read_filtered_rows +=
821
38.6k
                _current_group_reader->lazy_read_filtered_rows();
822
38.6k
        _reader_statistics.predicate_filter_time += _current_group_reader->predicate_filter_time();
823
38.6k
        _reader_statistics.dict_filter_rewrite_time +=
824
38.6k
                _current_group_reader->dict_filter_rewrite_time();
825
38.6k
        if (_io_ctx) {
826
38.5k
            _io_ctx->condition_cache_filtered_rows +=
827
38.5k
                    _current_group_reader->condition_cache_filtered_rows();
828
38.5k
        }
829
830
38.6k
        if (_current_row_group_index.row_group_id + 1 == _total_groups) {
831
30.1k
            *eof = true;
832
30.1k
        } else {
833
8.43k
            *eof = false;
834
8.43k
        }
835
38.6k
    }
836
74.6k
    return Status::OK();
837
74.6k
}
838
839
RowGroupReader::PositionDeleteContext ParquetReader::_get_position_delete_ctx(
840
38.9k
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index) {
841
38.9k
    if (_delete_rows == nullptr) {
842
35.8k
        return RowGroupReader::PositionDeleteContext(row_group.num_rows, row_group_index.first_row);
843
35.8k
    }
844
3.00k
    const int64_t* delete_rows = &(*_delete_rows)[0];
845
3.00k
    const int64_t* delete_rows_end = delete_rows + _delete_rows->size();
846
3.00k
    const int64_t* start_pos = std::lower_bound(delete_rows + _delete_rows_index, delete_rows_end,
847
3.00k
                                                row_group_index.first_row);
848
3.00k
    int64_t start_index = start_pos - delete_rows;
849
3.00k
    const int64_t* end_pos = std::lower_bound(start_pos, delete_rows_end, row_group_index.last_row);
850
3.00k
    int64_t end_index = end_pos - delete_rows;
851
3.00k
    _delete_rows_index = end_index;
852
3.00k
    return RowGroupReader::PositionDeleteContext(*_delete_rows, row_group.num_rows,
853
3.00k
                                                 row_group_index.first_row, start_index, end_index);
854
38.9k
}
855
856
44.6k
Status ParquetReader::_next_row_group_reader() {
857
44.6k
    if (_current_group_reader != nullptr) {
858
8.41k
        _current_group_reader->collect_profile_before_close();
859
8.41k
    }
860
861
44.6k
    RowRanges candidate_row_ranges;
862
103k
    while (++_current_row_group_index.row_group_id < _total_groups) {
863
97.7k
        const auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
864
97.7k
        _current_row_group_index.first_row = _current_row_group_index.last_row;
865
97.7k
        _current_row_group_index.last_row = _current_row_group_index.last_row + row_group.num_rows;
866
867
97.7k
        if (_filter_groups && _is_misaligned_range_group(row_group)) {
868
51.4k
            continue;
869
51.4k
        }
870
871
46.2k
        candidate_row_ranges.clear();
872
        // The range of lines to be read is determined by the push down predicate.
873
46.2k
        RETURN_IF_ERROR(_process_min_max_bloom_filter(
874
46.2k
                _current_row_group_index, row_group, _push_down_predicates, &candidate_row_ranges));
875
876
46.2k
        std::function<int64_t(const FieldSchema*)> column_compressed_size =
877
255k
                [&row_group, &column_compressed_size](const FieldSchema* field) -> int64_t {
878
255k
            if (field->physical_column_index >= 0) {
879
208k
                int parquet_col_id = field->physical_column_index;
880
208k
                if (row_group.columns[parquet_col_id].__isset.meta_data) {
881
208k
                    return row_group.columns[parquet_col_id].meta_data.total_compressed_size;
882
208k
                }
883
18.4E
                return 0;
884
208k
            }
885
46.5k
            int64_t size = 0;
886
78.6k
            for (const FieldSchema& child : field->children) {
887
78.6k
                size += column_compressed_size(&child);
888
78.6k
            }
889
46.5k
            return size;
890
255k
        };
891
46.2k
        int64_t group_size = 0; // only calculate the needed columns
892
176k
        for (auto& read_col : _read_file_columns) {
893
176k
            const FieldSchema* field = _file_metadata->schema().get_column(read_col);
894
176k
            group_size += column_compressed_size(field);
895
176k
        }
896
897
46.2k
        _reader_statistics.read_rows += candidate_row_ranges.count();
898
46.3k
        if (_io_ctx) {
899
46.3k
            _io_ctx->file_reader_stats->read_rows += candidate_row_ranges.count();
900
46.3k
        }
901
902
46.2k
        if (candidate_row_ranges.count() != 0) {
903
            // need read this row group.
904
38.9k
            _reader_statistics.read_row_groups++;
905
38.9k
            _reader_statistics.filtered_page_rows +=
906
38.9k
                    row_group.num_rows - candidate_row_ranges.count();
907
38.9k
            break;
908
38.9k
        } else {
909
            // this row group be filtered.
910
7.34k
            _reader_statistics.filtered_row_groups++;
911
7.34k
            _reader_statistics.filtered_bytes += group_size;
912
7.34k
            _reader_statistics.filtered_group_rows += row_group.num_rows;
913
7.34k
        }
914
46.2k
    }
915
916
44.6k
    if (_current_row_group_index.row_group_id == _total_groups) {
917
5.87k
        _row_group_eof = true;
918
5.87k
        _current_group_reader.reset(nullptr);
919
5.87k
        return Status::EndOfFile("No next RowGroupReader");
920
5.87k
    }
921
922
    // process page index and generate the ranges to read
923
38.7k
    auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
924
925
38.7k
    RowGroupReader::PositionDeleteContext position_delete_ctx =
926
38.7k
            _get_position_delete_ctx(row_group, _current_row_group_index);
927
38.7k
    io::FileReaderSPtr group_file_reader;
928
38.7k
    if (typeid_cast<io::InMemoryFileReader*>(_file_reader.get())) {
929
        // InMemoryFileReader has the ability to merge small IO
930
22.1k
        group_file_reader = _file_reader;
931
22.1k
    } else {
932
16.6k
        size_t avg_io_size = 0;
933
16.6k
        const std::vector<io::PrefetchRange> io_ranges =
934
16.6k
                _generate_random_access_ranges(_current_row_group_index, &avg_io_size);
935
16.6k
        int64_t merged_read_slice_size = -1;
936
16.7k
        if (_state != nullptr && _state->query_options().__isset.merge_read_slice_size) {
937
16.6k
            merged_read_slice_size = _state->query_options().merge_read_slice_size;
938
16.6k
        }
939
        // The underlying page reader will prefetch data in column.
940
        // Using both MergeRangeFileReader and BufferedStreamReader simultaneously would waste a lot of memory.
941
16.6k
        group_file_reader =
942
16.6k
                avg_io_size < io::MergeRangeFileReader::SMALL_IO
943
16.6k
                        ? std::make_shared<io::MergeRangeFileReader>(
944
16.6k
                                  _profile, _file_reader, io_ranges, merged_read_slice_size)
945
16.6k
                        : _file_reader;
946
16.6k
    }
947
38.7k
    _current_group_reader.reset(new RowGroupReader(
948
38.8k
            _io_ctx ? std::make_shared<io::TracingFileReader>(group_file_reader,
949
38.8k
                                                              _io_ctx->file_reader_stats)
950
18.4E
                    : group_file_reader,
951
38.7k
            _read_table_columns, _current_row_group_index.row_group_id, row_group, _ctz, _io_ctx,
952
38.7k
            position_delete_ctx, _lazy_read_ctx, _state, _column_ids, _filter_column_ids));
953
38.7k
    _row_group_eof = false;
954
955
38.7k
    _current_group_reader->set_current_row_group_idx(_current_row_group_index);
956
38.7k
    _current_group_reader->set_col_name_to_block_idx(_col_name_to_block_idx);
957
38.7k
    if (_condition_cache_ctx) {
958
8.50k
        _current_group_reader->set_condition_cache_context(_condition_cache_ctx);
959
8.50k
    }
960
38.7k
    _current_group_reader->set_table_format_reader(this);
961
962
38.7k
    _current_group_reader->_table_info_node_ptr = _table_info_node_ptr;
963
38.7k
    return _current_group_reader->init(_file_metadata->schema(), candidate_row_ranges, _col_offsets,
964
38.7k
                                       _tuple_descriptor, _row_descriptor, _colname_to_slot_id,
965
38.7k
                                       _not_single_slot_filter_conjuncts,
966
38.7k
                                       _slot_id_to_filter_conjuncts);
967
44.6k
}
968
969
std::vector<io::PrefetchRange> ParquetReader::_generate_random_access_ranges(
970
16.6k
        const RowGroupReader::RowGroupIndex& group, size_t* avg_io_size) {
971
16.6k
    std::vector<io::PrefetchRange> result;
972
16.6k
    int64_t last_chunk_end = -1;
973
16.6k
    size_t total_io_size = 0;
974
16.6k
    std::function<void(const FieldSchema*, const tparquet::RowGroup&)> scalar_range =
975
130k
            [&](const FieldSchema* field, const tparquet::RowGroup& row_group) {
976
130k
                if (_column_ids.empty() ||
977
130k
                    _column_ids.find(field->get_column_id()) != _column_ids.end()) {
978
130k
                    if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
979
14.3k
                        scalar_range(&field->children[0], row_group);
980
116k
                    } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
981
9.90k
                        scalar_range(&field->children[0], row_group);
982
9.90k
                        scalar_range(&field->children[1], row_group);
983
106k
                    } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
984
20.1k
                        for (int i = 0; i < field->children.size(); ++i) {
985
13.6k
                            scalar_range(&field->children[i], row_group);
986
13.6k
                        }
987
100k
                    } else {
988
100k
                        const tparquet::ColumnChunk& chunk =
989
100k
                                row_group.columns[field->physical_column_index];
990
100k
                        auto& chunk_meta = chunk.meta_data;
991
100k
                        int64_t chunk_start = has_dict_page(chunk_meta)
992
100k
                                                      ? chunk_meta.dictionary_page_offset
993
100k
                                                      : chunk_meta.data_page_offset;
994
100k
                        int64_t chunk_end = chunk_start + chunk_meta.total_compressed_size;
995
100k
                        DCHECK_GE(chunk_start, last_chunk_end);
996
100k
                        result.emplace_back(chunk_start, chunk_end);
997
100k
                        total_io_size += chunk_meta.total_compressed_size;
998
100k
                        last_chunk_end = chunk_end;
999
100k
                    }
1000
130k
                }
1001
130k
            };
1002
16.6k
    const tparquet::RowGroup& row_group = _t_metadata->row_groups[group.row_group_id];
1003
83.1k
    for (const auto& read_col : _read_file_columns) {
1004
83.1k
        const FieldSchema* field = _file_metadata->schema().get_column(read_col);
1005
83.1k
        scalar_range(field, row_group);
1006
83.1k
    }
1007
16.6k
    if (!result.empty()) {
1008
16.1k
        *avg_io_size = total_io_size / result.size();
1009
16.1k
    }
1010
16.6k
    return result;
1011
16.6k
}
1012
1013
114k
bool ParquetReader::_is_misaligned_range_group(const tparquet::RowGroup& row_group) const {
1014
114k
    int64_t start_offset = _get_column_start_offset(row_group.columns[0].meta_data);
1015
1016
114k
    auto& last_column = row_group.columns[row_group.columns.size() - 1].meta_data;
1017
114k
    int64_t end_offset = _get_column_start_offset(last_column) + last_column.total_compressed_size;
1018
1019
114k
    int64_t row_group_mid = start_offset + (end_offset - start_offset) / 2;
1020
114k
    if (!(row_group_mid >= _range_start_offset &&
1021
114k
          row_group_mid < _range_start_offset + _range_size)) {
1022
54.1k
        return true;
1023
54.1k
    }
1024
60.7k
    return false;
1025
114k
}
1026
1027
5.68k
int64_t ParquetReader::get_total_rows() const {
1028
5.68k
    if (!_t_metadata) return 0;
1029
5.68k
    if (!_filter_groups) return _t_metadata->num_rows;
1030
5.68k
    int64_t total = 0;
1031
10.7k
    for (const auto& rg : _t_metadata->row_groups) {
1032
10.7k
        if (!_is_misaligned_range_group(rg)) {
1033
8.06k
            total += rg.num_rows;
1034
8.06k
        }
1035
10.7k
    }
1036
5.68k
    return total;
1037
5.68k
}
1038
1039
8.70k
void ParquetReader::set_condition_cache_context(std::shared_ptr<ConditionCacheContext> ctx) {
1040
8.70k
    _condition_cache_ctx = std::move(ctx);
1041
8.70k
    if (!_condition_cache_ctx || !_t_metadata || !_filter_groups) {
1042
0
        return;
1043
0
    }
1044
    // Find the first assigned row group to compute base_granule.
1045
8.70k
    int64_t first_row = 0;
1046
8.70k
    for (const auto& rg : _t_metadata->row_groups) {
1047
8.69k
        if (!_is_misaligned_range_group(rg)) {
1048
8.68k
            _condition_cache_ctx->base_granule = first_row / ConditionCacheContext::GRANULE_SIZE;
1049
8.68k
            return;
1050
8.68k
        }
1051
10
        first_row += rg.num_rows;
1052
10
    }
1053
8.70k
}
1054
1055
Status ParquetReader::_process_page_index_filter(
1056
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index,
1057
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1058
35.0k
        RowRanges* candidate_row_ranges) {
1059
35.0k
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
1060
4
        return Status::EndOfFile("stop");
1061
4
    }
1062
1063
35.0k
    std::function<void()> read_whole_row_group = [&]() {
1064
30.0k
        candidate_row_ranges->add(RowRange {0, row_group.num_rows});
1065
30.0k
    };
1066
1067
    // Check if the page index is available and if it exists.
1068
35.0k
    PageIndex page_index;
1069
35.0k
    if (!config::enable_parquet_page_index || _colname_to_slot_id == nullptr ||
1070
35.1k
        !page_index.check_and_get_page_index_ranges(row_group.columns)) {
1071
17.5k
        read_whole_row_group();
1072
17.5k
        return Status::OK();
1073
17.5k
    }
1074
1075
17.4k
    std::vector<int> parquet_col_ids;
1076
63.0k
    for (size_t idx = 0; idx < _read_table_columns.size(); idx++) {
1077
45.5k
        const auto& read_table_col = _read_table_columns[idx];
1078
45.5k
        const auto& read_file_col = _read_file_columns[idx];
1079
45.5k
        if (!_colname_to_slot_id->contains(read_table_col)) {
1080
582
            continue;
1081
582
        }
1082
44.9k
        auto* field = _file_metadata->schema().get_column(read_file_col);
1083
1084
75.5k
        std::function<void(FieldSchema * field)> f = [&](FieldSchema* field) {
1085
75.5k
            if (!_column_ids.empty() &&
1086
75.5k
                _column_ids.find(field->get_column_id()) == _column_ids.end()) {
1087
2.79k
                return;
1088
2.79k
            }
1089
1090
72.7k
            if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
1091
4.29k
                f(&field->children[0]);
1092
68.4k
            } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
1093
3.46k
                f(&field->children[0]);
1094
3.46k
                f(&field->children[1]);
1095
64.9k
            } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
1096
27.3k
                for (int i = 0; i < field->children.size(); ++i) {
1097
19.4k
                    f(&field->children[i]);
1098
19.4k
                }
1099
57.0k
            } else {
1100
57.0k
                int parquet_col_id = field->physical_column_index;
1101
57.1k
                if (parquet_col_id >= 0) {
1102
57.1k
                    parquet_col_ids.push_back(parquet_col_id);
1103
57.1k
                }
1104
57.0k
            }
1105
72.7k
        };
1106
1107
44.9k
        f(field);
1108
44.9k
    }
1109
1110
17.5k
    auto parse_offset_index = [&]() -> Status {
1111
17.5k
        std::vector<uint8_t> off_index_buff(page_index._offset_index_size);
1112
17.5k
        Slice res(off_index_buff.data(), page_index._offset_index_size);
1113
17.5k
        size_t bytes_read = 0;
1114
17.5k
        {
1115
17.5k
            SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1116
17.5k
            RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._offset_index_start, res,
1117
17.5k
                                                          &bytes_read, _io_ctx));
1118
17.5k
        }
1119
17.5k
        _column_statistics.page_index_read_calls++;
1120
17.5k
        _col_offsets.clear();
1121
1122
57.2k
        for (auto parquet_col_id : parquet_col_ids) {
1123
57.2k
            auto& chunk = row_group.columns[parquet_col_id];
1124
57.2k
            if (chunk.offset_index_length == 0) [[unlikely]] {
1125
0
                continue;
1126
0
            }
1127
57.2k
            tparquet::OffsetIndex offset_index;
1128
57.2k
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1129
57.2k
            RETURN_IF_ERROR(
1130
57.2k
                    page_index.parse_offset_index(chunk, off_index_buff.data(), &offset_index));
1131
57.2k
            _col_offsets[parquet_col_id] = offset_index;
1132
57.2k
        }
1133
17.5k
        return Status::OK();
1134
17.5k
    };
1135
1136
    // from https://github.com/apache/doris/pull/55795
1137
17.4k
    RETURN_IF_ERROR(parse_offset_index());
1138
1139
    // Check if page index is needed for min-max filter.
1140
17.4k
    if (!_enable_filter_by_min_max || push_down_pred.empty()) {
1141
12.5k
        read_whole_row_group();
1142
12.5k
        return Status::OK();
1143
12.5k
    }
1144
1145
    // read column index.
1146
4.94k
    std::vector<uint8_t> col_index_buff(page_index._column_index_size);
1147
4.94k
    size_t bytes_read = 0;
1148
4.94k
    Slice result(col_index_buff.data(), page_index._column_index_size);
1149
4.94k
    {
1150
4.94k
        SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1151
4.94k
        RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._column_index_start, result,
1152
4.94k
                                                      &bytes_read, _io_ctx));
1153
4.94k
    }
1154
4.94k
    _column_statistics.page_index_read_calls++;
1155
1156
4.94k
    SCOPED_RAW_TIMER(&_reader_statistics.page_index_filter_time);
1157
1158
    // Construct a cacheable page index structure to avoid repeatedly reading the page index of the same column.
1159
4.94k
    ParquetPredicate::CachedPageIndexStat cached_page_index;
1160
4.94k
    cached_page_index.ctz = _ctz;
1161
4.94k
    std::function<bool(ParquetPredicate::PageIndexStat**, int)> get_stat_func =
1162
4.94k
            [&](ParquetPredicate::PageIndexStat** ans, const int cid) -> bool {
1163
4.54k
        if (cached_page_index.stats.contains(cid)) {
1164
1.92k
            *ans = &cached_page_index.stats[cid];
1165
1.92k
            return (*ans)->available;
1166
1.92k
        }
1167
2.62k
        cached_page_index.stats.emplace(cid, ParquetPredicate::PageIndexStat {});
1168
2.62k
        auto& sig_stat = cached_page_index.stats[cid];
1169
1170
2.62k
        auto* slot = _tuple_descriptor->slots()[cid];
1171
2.62k
        if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1172
            // table column not exist in file, may be schema change.
1173
0
            return false;
1174
0
        }
1175
1176
2.62k
        const auto& file_col_name =
1177
2.62k
                _table_info_node_ptr->children_file_column_name(slot->col_name());
1178
2.62k
        const FieldSchema* col_schema = _file_metadata->schema().get_column(file_col_name);
1179
2.62k
        int parquet_col_id = col_schema->physical_column_index;
1180
1181
2.62k
        if (parquet_col_id < 0) {
1182
            // complex type, not support page index yet.
1183
0
            return false;
1184
0
        }
1185
2.62k
        if (!_col_offsets.contains(parquet_col_id)) {
1186
            // If the file contains partition columns and the query applies filters on those
1187
            // partition columns, then reading the page index is unnecessary.
1188
0
            return false;
1189
0
        }
1190
1191
2.62k
        auto& column_chunk = row_group.columns[parquet_col_id];
1192
2.63k
        if (column_chunk.column_index_length == 0 || column_chunk.offset_index_length == 0) {
1193
            // column no page index.
1194
0
            return false;
1195
0
        }
1196
1197
2.62k
        tparquet::ColumnIndex column_index;
1198
2.62k
        {
1199
2.62k
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1200
2.62k
            RETURN_IF_ERROR(page_index.parse_column_index(column_chunk, col_index_buff.data(),
1201
2.62k
                                                          &column_index));
1202
2.62k
        }
1203
2.62k
        const int64_t num_of_pages = column_index.null_pages.size();
1204
2.62k
        if (num_of_pages <= 0) [[unlikely]] {
1205
            // no page. (maybe this row group no data.)
1206
0
            return false;
1207
0
        }
1208
2.62k
        DCHECK_EQ(column_index.min_values.size(), column_index.max_values.size());
1209
2.62k
        if (!column_index.__isset.null_counts) {
1210
            // not set null or null counts;
1211
0
            return false;
1212
0
        }
1213
1214
2.62k
        auto& offset_index = _col_offsets[parquet_col_id];
1215
2.62k
        const auto& page_locations = offset_index.page_locations;
1216
1217
2.62k
        sig_stat.col_schema = col_schema;
1218
2.62k
        sig_stat.num_of_pages = num_of_pages;
1219
2.62k
        sig_stat.encoded_min_value = column_index.min_values;
1220
2.62k
        sig_stat.encoded_max_value = column_index.max_values;
1221
2.62k
        sig_stat.is_all_null.resize(num_of_pages);
1222
2.62k
        sig_stat.has_null.resize(num_of_pages);
1223
2.62k
        sig_stat.ranges.resize(num_of_pages);
1224
1225
22.6k
        for (int page_id = 0; page_id < num_of_pages; page_id++) {
1226
19.9k
            sig_stat.is_all_null[page_id] = column_index.null_pages[page_id];
1227
19.9k
            sig_stat.has_null[page_id] = column_index.null_counts[page_id] > 0;
1228
1229
19.9k
            int64_t from = page_locations[page_id].first_row_index;
1230
19.9k
            int64_t to = 0;
1231
19.9k
            if (page_id == page_locations.size() - 1) {
1232
2.63k
                to = row_group_index.last_row;
1233
17.3k
            } else {
1234
17.3k
                to = page_locations[page_id + 1].first_row_index;
1235
17.3k
            }
1236
19.9k
            sig_stat.ranges[page_id] = RowRange {from, to};
1237
19.9k
        }
1238
1239
2.62k
        sig_stat.available = true;
1240
2.62k
        *ans = &sig_stat;
1241
2.62k
        return true;
1242
2.62k
    };
1243
4.94k
    cached_page_index.row_group_range = {0, row_group.num_rows};
1244
4.94k
    cached_page_index.get_stat_func = get_stat_func;
1245
1246
4.94k
    candidate_row_ranges->add({0, row_group.num_rows});
1247
4.99k
    for (const auto& predicate : push_down_pred) {
1248
4.99k
        RowRanges tmp_row_range;
1249
4.99k
        if (!predicate->evaluate_and(&cached_page_index, &tmp_row_range)) {
1250
            // no need read this row group.
1251
22
            candidate_row_ranges->clear();
1252
22
            return Status::OK();
1253
22
        }
1254
4.97k
        RowRanges::ranges_intersection(*candidate_row_ranges, tmp_row_range, candidate_row_ranges);
1255
4.97k
    }
1256
4.92k
    return Status::OK();
1257
4.94k
}
1258
1259
Status ParquetReader::_process_min_max_bloom_filter(
1260
        const RowGroupReader::RowGroupIndex& row_group_index, const tparquet::RowGroup& row_group,
1261
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1262
46.2k
        RowRanges* row_ranges) {
1263
46.2k
    SCOPED_RAW_TIMER(&_reader_statistics.row_group_filter_time);
1264
46.2k
    if (!_filter_groups) {
1265
        // No row group filtering is needed;
1266
        // for example, Iceberg reads position delete files.
1267
2.31k
        row_ranges->add({0, row_group.num_rows});
1268
2.31k
        return Status::OK();
1269
2.31k
    }
1270
1271
43.9k
    if (_read_by_rows) {
1272
2.11k
        auto group_start = row_group_index.first_row;
1273
2.11k
        auto group_end = row_group_index.last_row;
1274
1275
5.88k
        while (!_row_ids.empty()) {
1276
4.19k
            auto v = _row_ids.front();
1277
4.19k
            if (v < group_start) {
1278
0
                continue;
1279
4.19k
            } else if (v < group_end) {
1280
3.77k
                row_ranges->add(RowRange {v - group_start, v - group_start + 1});
1281
3.77k
                _row_ids.pop_front();
1282
3.77k
            } else {
1283
419
                break;
1284
419
            }
1285
4.19k
        }
1286
41.8k
    } else {
1287
41.8k
        bool filter_this_row_group = false;
1288
41.8k
        bool filtered_by_min_max = false;
1289
41.8k
        bool filtered_by_bloom_filter = false;
1290
41.8k
        RETURN_IF_ERROR(_process_column_stat_filter(row_group, push_down_pred,
1291
41.8k
                                                    &filter_this_row_group, &filtered_by_min_max,
1292
41.8k
                                                    &filtered_by_bloom_filter));
1293
        // Update statistics based on filter type
1294
41.8k
        if (filter_this_row_group) {
1295
6.82k
            if (filtered_by_min_max) {
1296
6.82k
                _reader_statistics.filtered_row_groups_by_min_max++;
1297
6.82k
            }
1298
6.82k
            if (filtered_by_bloom_filter) {
1299
0
                _reader_statistics.filtered_row_groups_by_bloom_filter++;
1300
0
            }
1301
6.82k
        }
1302
1303
41.8k
        if (!filter_this_row_group) {
1304
35.0k
            RETURN_IF_ERROR(_process_page_index_filter(row_group, row_group_index, push_down_pred,
1305
35.0k
                                                       row_ranges));
1306
35.0k
        }
1307
41.8k
    }
1308
1309
43.9k
    return Status::OK();
1310
43.9k
}
1311
1312
Status ParquetReader::_process_column_stat_filter(
1313
        const tparquet::RowGroup& row_group,
1314
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1315
41.7k
        bool* filter_group, bool* filtered_by_min_max, bool* filtered_by_bloom_filter) {
1316
    // If both filters are disabled, skip filtering
1317
41.7k
    if (!_enable_filter_by_min_max && !_enable_filter_by_bloom_filter) {
1318
0
        return Status::OK();
1319
0
    }
1320
1321
    // Cache bloom filters for each column to avoid reading the same bloom filter multiple times
1322
    // when there are multiple predicates on the same column
1323
41.7k
    std::unordered_map<int, std::unique_ptr<ParquetBlockSplitBloomFilter>> bloom_filter_cache;
1324
1325
    // Initialize output parameters
1326
41.7k
    *filtered_by_min_max = false;
1327
41.7k
    *filtered_by_bloom_filter = false;
1328
1329
41.7k
    for (const auto& predicate : _push_down_predicates) {
1330
17.8k
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_stat_func =
1331
19.5k
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1332
                    // Check if min-max filter is enabled
1333
19.5k
                    if (!_enable_filter_by_min_max) {
1334
1.92k
                        return false;
1335
1.92k
                    }
1336
17.6k
                    auto* slot = _tuple_descriptor->slots()[cid];
1337
17.6k
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1338
0
                        return false;
1339
0
                    }
1340
17.6k
                    const auto& file_col_name =
1341
17.6k
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1342
17.6k
                    const FieldSchema* col_schema =
1343
17.6k
                            _file_metadata->schema().get_column(file_col_name);
1344
17.6k
                    int parquet_col_id = col_schema->physical_column_index;
1345
17.6k
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1346
17.6k
                    stat->col_schema = col_schema;
1347
17.6k
                    return ParquetPredicate::read_column_stats(col_schema, meta_data,
1348
17.6k
                                                               &_ignored_stats,
1349
17.6k
                                                               _t_metadata->created_by, stat)
1350
17.6k
                            .ok();
1351
17.6k
                };
1352
17.8k
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_bloom_filter_func =
1353
17.8k
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1354
5.00k
                    auto* slot = _tuple_descriptor->slots()[cid];
1355
5.00k
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1356
0
                        return false;
1357
0
                    }
1358
5.00k
                    const auto& file_col_name =
1359
5.00k
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1360
5.00k
                    const FieldSchema* col_schema =
1361
5.00k
                            _file_metadata->schema().get_column(file_col_name);
1362
5.00k
                    int parquet_col_id = col_schema->physical_column_index;
1363
5.00k
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1364
5.00k
                    if (!meta_data.__isset.bloom_filter_offset) {
1365
4.98k
                        return false;
1366
4.98k
                    }
1367
20
                    auto primitive_type =
1368
20
                            remove_nullable(col_schema->data_type)->get_primitive_type();
1369
20
                    if (!ParquetPredicate::bloom_filter_supported(primitive_type)) {
1370
10
                        return false;
1371
10
                    }
1372
1373
                    // Check if bloom filter is enabled
1374
10
                    if (!_enable_filter_by_bloom_filter) {
1375
0
                        return false;
1376
0
                    }
1377
1378
                    // Check cache first
1379
10
                    auto cache_iter = bloom_filter_cache.find(parquet_col_id);
1380
10
                    if (cache_iter != bloom_filter_cache.end()) {
1381
                        // Bloom filter already loaded for this column, reuse it
1382
0
                        stat->bloom_filter = std::move(cache_iter->second);
1383
0
                        bloom_filter_cache.erase(cache_iter);
1384
0
                        return stat->bloom_filter != nullptr;
1385
0
                    }
1386
1387
10
                    if (!stat->bloom_filter) {
1388
10
                        SCOPED_RAW_TIMER(&_reader_statistics.bloom_filter_read_time);
1389
10
                        auto st = ParquetPredicate::read_bloom_filter(
1390
10
                                meta_data, _tracing_file_reader, _io_ctx, stat);
1391
10
                        if (!st.ok()) {
1392
0
                            LOG(WARNING) << "Failed to read bloom filter for column "
1393
0
                                         << col_schema->name << " in file " << _scan_range.path
1394
0
                                         << ", status: " << st.to_string();
1395
0
                            stat->bloom_filter.reset();
1396
0
                            return false;
1397
0
                        }
1398
10
                    }
1399
10
                    return stat->bloom_filter != nullptr;
1400
10
                };
1401
17.8k
        ParquetPredicate::ColumnStat stat;
1402
17.8k
        stat.ctz = _ctz;
1403
17.8k
        stat.get_stat_func = &get_stat_func;
1404
17.8k
        stat.get_bloom_filter_func = &get_bloom_filter_func;
1405
1406
17.8k
        if (!predicate->evaluate_and(&stat)) {
1407
6.82k
            *filter_group = true;
1408
1409
            // Track which filter was used for filtering
1410
            // If bloom filter was loaded, it means bloom filter was used
1411
6.82k
            if (stat.bloom_filter) {
1412
0
                *filtered_by_bloom_filter = true;
1413
0
            }
1414
            // If col_schema was set but no bloom filter, it means min-max stats were used
1415
6.82k
            if (stat.col_schema && !stat.bloom_filter) {
1416
6.82k
                *filtered_by_min_max = true;
1417
6.82k
            }
1418
1419
6.82k
            return Status::OK();
1420
6.82k
        }
1421
1422
        // After evaluating, if the bloom filter was used, cache it for subsequent predicates
1423
11.0k
        if (stat.bloom_filter) {
1424
            // Find the column id for caching
1425
52
            for (auto* slot : _tuple_descriptor->slots()) {
1426
52
                if (_table_info_node_ptr->children_column_exists(slot->col_name())) {
1427
52
                    const auto& file_col_name =
1428
52
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1429
52
                    const FieldSchema* col_schema =
1430
52
                            _file_metadata->schema().get_column(file_col_name);
1431
52
                    int parquet_col_id = col_schema->physical_column_index;
1432
52
                    if (stat.col_schema == col_schema) {
1433
10
                        bloom_filter_cache[parquet_col_id] = std::move(stat.bloom_filter);
1434
10
                        break;
1435
10
                    }
1436
52
                }
1437
52
            }
1438
10
        }
1439
11.0k
    }
1440
1441
    // Update filter statistics if this row group was not filtered
1442
    // The statistics will be updated in _init_row_groups when filter_group is true
1443
34.9k
    return Status::OK();
1444
41.7k
}
1445
1446
229k
int64_t ParquetReader::_get_column_start_offset(const tparquet::ColumnMetaData& column) const {
1447
229k
    return has_dict_page(column) ? column.dictionary_page_offset : column.data_page_offset;
1448
229k
}
1449
1450
34.5k
void ParquetReader::_collect_profile() {
1451
34.5k
    if (_profile == nullptr) {
1452
0
        return;
1453
0
    }
1454
1455
34.5k
    if (_current_group_reader != nullptr) {
1456
28.2k
        _current_group_reader->collect_profile_before_close();
1457
28.2k
    }
1458
34.5k
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups, _reader_statistics.filtered_row_groups);
1459
34.5k
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_min_max,
1460
34.5k
                   _reader_statistics.filtered_row_groups_by_min_max);
1461
34.5k
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_bloom_filter,
1462
34.5k
                   _reader_statistics.filtered_row_groups_by_bloom_filter);
1463
34.5k
    COUNTER_UPDATE(_parquet_profile.to_read_row_groups, _reader_statistics.read_row_groups);
1464
34.5k
    COUNTER_UPDATE(_parquet_profile.total_row_groups, _total_groups);
1465
34.5k
    COUNTER_UPDATE(_parquet_profile.filtered_group_rows, _reader_statistics.filtered_group_rows);
1466
34.5k
    COUNTER_UPDATE(_parquet_profile.filtered_page_rows, _reader_statistics.filtered_page_rows);
1467
34.5k
    COUNTER_UPDATE(_parquet_profile.lazy_read_filtered_rows,
1468
34.5k
                   _reader_statistics.lazy_read_filtered_rows);
1469
34.5k
    COUNTER_UPDATE(_parquet_profile.filtered_bytes, _reader_statistics.filtered_bytes);
1470
34.5k
    COUNTER_UPDATE(_parquet_profile.raw_rows_read, _reader_statistics.read_rows);
1471
34.5k
    COUNTER_UPDATE(_parquet_profile.column_read_time, _reader_statistics.column_read_time);
1472
34.5k
    COUNTER_UPDATE(_parquet_profile.parse_meta_time, _reader_statistics.parse_meta_time);
1473
34.5k
    COUNTER_UPDATE(_parquet_profile.parse_footer_time, _reader_statistics.parse_footer_time);
1474
34.5k
    COUNTER_UPDATE(_parquet_profile.file_reader_create_time,
1475
34.5k
                   _reader_statistics.file_reader_create_time);
1476
34.5k
    COUNTER_UPDATE(_parquet_profile.open_file_num, _reader_statistics.open_file_num);
1477
34.5k
    COUNTER_UPDATE(_parquet_profile.page_index_filter_time,
1478
34.5k
                   _reader_statistics.page_index_filter_time);
1479
34.5k
    COUNTER_UPDATE(_parquet_profile.read_page_index_time, _reader_statistics.read_page_index_time);
1480
34.5k
    COUNTER_UPDATE(_parquet_profile.parse_page_index_time,
1481
34.5k
                   _reader_statistics.parse_page_index_time);
1482
34.5k
    COUNTER_UPDATE(_parquet_profile.row_group_filter_time,
1483
34.5k
                   _reader_statistics.row_group_filter_time);
1484
34.5k
    COUNTER_UPDATE(_parquet_profile.file_footer_read_calls,
1485
34.5k
                   _reader_statistics.file_footer_read_calls);
1486
34.5k
    COUNTER_UPDATE(_parquet_profile.file_footer_hit_cache,
1487
34.5k
                   _reader_statistics.file_footer_hit_cache);
1488
1489
34.5k
    COUNTER_UPDATE(_parquet_profile.skip_page_header_num, _column_statistics.skip_page_header_num);
1490
34.5k
    COUNTER_UPDATE(_parquet_profile.parse_page_header_num,
1491
34.5k
                   _column_statistics.parse_page_header_num);
1492
34.5k
    COUNTER_UPDATE(_parquet_profile.predicate_filter_time,
1493
34.5k
                   _reader_statistics.predicate_filter_time);
1494
34.5k
    COUNTER_UPDATE(_parquet_profile.dict_filter_rewrite_time,
1495
34.5k
                   _reader_statistics.dict_filter_rewrite_time);
1496
34.5k
    COUNTER_UPDATE(_parquet_profile.convert_time, _column_statistics.convert_time);
1497
34.5k
    COUNTER_UPDATE(_parquet_profile.bloom_filter_read_time,
1498
34.5k
                   _reader_statistics.bloom_filter_read_time);
1499
34.5k
    COUNTER_UPDATE(_parquet_profile.page_index_read_calls,
1500
34.5k
                   _column_statistics.page_index_read_calls);
1501
34.5k
    COUNTER_UPDATE(_parquet_profile.decompress_time, _column_statistics.decompress_time);
1502
34.5k
    COUNTER_UPDATE(_parquet_profile.decompress_cnt, _column_statistics.decompress_cnt);
1503
34.5k
    COUNTER_UPDATE(_parquet_profile.page_read_counter, _column_statistics.page_read_counter);
1504
34.5k
    COUNTER_UPDATE(_parquet_profile.page_cache_write_counter,
1505
34.5k
                   _column_statistics.page_cache_write_counter);
1506
34.5k
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_write_counter,
1507
34.5k
                   _column_statistics.page_cache_compressed_write_counter);
1508
34.5k
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_write_counter,
1509
34.5k
                   _column_statistics.page_cache_decompressed_write_counter);
1510
34.5k
    COUNTER_UPDATE(_parquet_profile.page_cache_hit_counter,
1511
34.5k
                   _column_statistics.page_cache_hit_counter);
1512
34.5k
    COUNTER_UPDATE(_parquet_profile.page_cache_missing_counter,
1513
34.5k
                   _column_statistics.page_cache_missing_counter);
1514
34.5k
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_hit_counter,
1515
34.5k
                   _column_statistics.page_cache_compressed_hit_counter);
1516
34.5k
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_hit_counter,
1517
34.5k
                   _column_statistics.page_cache_decompressed_hit_counter);
1518
34.5k
    COUNTER_UPDATE(_parquet_profile.decode_header_time, _column_statistics.decode_header_time);
1519
34.5k
    COUNTER_UPDATE(_parquet_profile.read_page_header_time,
1520
34.5k
                   _column_statistics.read_page_header_time);
1521
34.5k
    COUNTER_UPDATE(_parquet_profile.decode_value_time, _column_statistics.decode_value_time);
1522
34.5k
    COUNTER_UPDATE(_parquet_profile.decode_dict_time, _column_statistics.decode_dict_time);
1523
34.5k
    COUNTER_UPDATE(_parquet_profile.decode_level_time, _column_statistics.decode_level_time);
1524
34.5k
    COUNTER_UPDATE(_parquet_profile.decode_null_map_time, _column_statistics.decode_null_map_time);
1525
34.5k
}
1526
1527
34.5k
void ParquetReader::_collect_profile_before_close() {
1528
34.5k
    _collect_profile();
1529
34.5k
}
1530
1531
} // namespace doris