Coverage Report

Created: 2026-03-17 00:04

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/parquet/vparquet_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/parquet/vparquet_reader.h"
19
20
#include <gen_cpp/Metrics_types.h>
21
#include <gen_cpp/PlanNodes_types.h>
22
#include <gen_cpp/parquet_types.h>
23
#include <glog/logging.h>
24
25
#include <algorithm>
26
#include <functional>
27
#include <utility>
28
29
#include "common/status.h"
30
#include "core/block/block.h"
31
#include "core/block/column_with_type_and_name.h"
32
#include "core/column/column.h"
33
#include "core/data_type/define_primitive_type.h"
34
#include "core/typeid_cast.h"
35
#include "core/types.h"
36
#include "exec/scan/file_scanner.h"
37
#include "exprs/vbloom_predicate.h"
38
#include "exprs/vdirect_in_predicate.h"
39
#include "exprs/vexpr.h"
40
#include "exprs/vexpr_context.h"
41
#include "exprs/vin_predicate.h"
42
#include "exprs/vruntimefilter_wrapper.h"
43
#include "exprs/vslot_ref.h"
44
#include "exprs/vtopn_pred.h"
45
#include "format/column_type_convert.h"
46
#include "format/parquet/parquet_block_split_bloom_filter.h"
47
#include "format/parquet/parquet_common.h"
48
#include "format/parquet/parquet_predicate.h"
49
#include "format/parquet/parquet_thrift_util.h"
50
#include "format/parquet/schema_desc.h"
51
#include "format/parquet/vparquet_file_metadata.h"
52
#include "format/parquet/vparquet_group_reader.h"
53
#include "format/parquet/vparquet_page_index.h"
54
#include "information_schema/schema_scanner.h"
55
#include "io/file_factory.h"
56
#include "io/fs/buffered_reader.h"
57
#include "io/fs/file_reader.h"
58
#include "io/fs/file_reader_writer_fwd.h"
59
#include "io/fs/tracing_file_reader.h"
60
#include "runtime/descriptors.h"
61
#include "util/slice.h"
62
#include "util/string_util.h"
63
#include "util/timezone_utils.h"
64
65
namespace cctz {
66
class time_zone;
67
} // namespace cctz
68
namespace doris {
69
class RowDescriptor;
70
class RuntimeState;
71
class SlotDescriptor;
72
class TupleDescriptor;
73
namespace io {
74
struct IOContext;
75
enum class FileCachePolicy : uint8_t;
76
} // namespace io
77
class Block;
78
} // namespace doris
79
80
namespace doris {
81
82
#include "common/compile_check_begin.h"
83
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
84
                             const TFileRangeDesc& range, size_t batch_size,
85
                             const cctz::time_zone* ctz, io::IOContext* io_ctx, RuntimeState* state,
86
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
87
94
        : _profile(profile),
88
94
          _scan_params(params),
89
94
          _scan_range(range),
90
94
          _batch_size(std::max(batch_size, _MIN_BATCH_SIZE)),
91
94
          _range_start_offset(range.start_offset),
92
94
          _range_size(range.size),
93
94
          _ctz(ctz),
94
94
          _io_ctx(io_ctx),
95
94
          _state(state),
96
94
          _enable_lazy_mat(enable_lazy_mat),
97
          _enable_filter_by_min_max(
98
94
                  state == nullptr ? true
99
94
                                   : state->query_options().enable_parquet_filter_by_min_max),
100
          _enable_filter_by_bloom_filter(
101
94
                  state == nullptr ? true
102
94
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
103
94
    _meta_cache = meta_cache;
104
94
    _init_profile();
105
94
    _init_system_properties();
106
94
    _init_file_description();
107
94
}
108
109
ParquetReader::ParquetReader(RuntimeProfile* profile, const TFileScanRangeParams& params,
110
                             const TFileRangeDesc& range, size_t batch_size,
111
                             const cctz::time_zone* ctz,
112
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
113
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
114
0
        : _profile(profile),
115
0
          _scan_params(params),
116
0
          _scan_range(range),
117
0
          _batch_size(std::max(batch_size, _MIN_BATCH_SIZE)),
118
0
          _range_start_offset(range.start_offset),
119
0
          _range_size(range.size),
120
0
          _ctz(ctz),
121
0
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
122
0
          _io_ctx_holder(std::move(io_ctx_holder)),
123
0
          _state(state),
124
0
          _enable_lazy_mat(enable_lazy_mat),
125
          _enable_filter_by_min_max(
126
0
                  state == nullptr ? true
127
0
                                   : state->query_options().enable_parquet_filter_by_min_max),
128
          _enable_filter_by_bloom_filter(
129
0
                  state == nullptr ? true
130
0
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
131
0
    _meta_cache = meta_cache;
132
0
    _init_profile();
133
0
    _init_system_properties();
134
0
    _init_file_description();
135
0
}
136
137
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
138
                             io::IOContext* io_ctx, RuntimeState* state, FileMetaCache* meta_cache,
139
                             bool enable_lazy_mat)
140
0
        : _profile(nullptr),
141
0
          _scan_params(params),
142
0
          _scan_range(range),
143
0
          _io_ctx(io_ctx),
144
0
          _state(state),
145
0
          _enable_lazy_mat(enable_lazy_mat),
146
          _enable_filter_by_min_max(
147
0
                  state == nullptr ? true
148
0
                                   : state->query_options().enable_parquet_filter_by_min_max),
149
          _enable_filter_by_bloom_filter(
150
0
                  state == nullptr ? true
151
0
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
152
0
    _meta_cache = meta_cache;
153
0
    _init_system_properties();
154
0
    _init_file_description();
155
0
}
156
157
ParquetReader::ParquetReader(const TFileScanRangeParams& params, const TFileRangeDesc& range,
158
                             std::shared_ptr<io::IOContext> io_ctx_holder, RuntimeState* state,
159
                             FileMetaCache* meta_cache, bool enable_lazy_mat)
160
0
        : _profile(nullptr),
161
0
          _scan_params(params),
162
0
          _scan_range(range),
163
0
          _io_ctx(io_ctx_holder ? io_ctx_holder.get() : nullptr),
164
0
          _io_ctx_holder(std::move(io_ctx_holder)),
165
0
          _state(state),
166
0
          _enable_lazy_mat(enable_lazy_mat),
167
          _enable_filter_by_min_max(
168
0
                  state == nullptr ? true
169
0
                                   : state->query_options().enable_parquet_filter_by_min_max),
170
          _enable_filter_by_bloom_filter(
171
0
                  state == nullptr ? true
172
0
                                   : state->query_options().enable_parquet_filter_by_bloom_filter) {
173
0
    _meta_cache = meta_cache;
174
0
    _init_system_properties();
175
0
    _init_file_description();
176
0
}
177
178
94
ParquetReader::~ParquetReader() {
179
94
    _close_internal();
180
94
}
181
182
#ifdef BE_TEST
183
// for unit test
184
66
void ParquetReader::set_file_reader(io::FileReaderSPtr file_reader) {
185
66
    _file_reader = file_reader;
186
66
    _tracing_file_reader = file_reader;
187
66
}
188
#endif
189
190
94
void ParquetReader::_init_profile() {
191
94
    if (_profile != nullptr) {
192
48
        static const char* parquet_profile = "ParquetReader";
193
48
        ADD_TIMER_WITH_LEVEL(_profile, parquet_profile, 1);
194
195
48
        _parquet_profile.filtered_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
196
48
                _profile, "RowGroupsFiltered", TUnit::UNIT, parquet_profile, 1);
197
48
        _parquet_profile.filtered_row_groups_by_min_max = ADD_CHILD_COUNTER_WITH_LEVEL(
198
48
                _profile, "RowGroupsFilteredByMinMax", TUnit::UNIT, parquet_profile, 1);
199
48
        _parquet_profile.filtered_row_groups_by_bloom_filter = ADD_CHILD_COUNTER_WITH_LEVEL(
200
48
                _profile, "RowGroupsFilteredByBloomFilter", TUnit::UNIT, parquet_profile, 1);
201
48
        _parquet_profile.to_read_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
202
48
                _profile, "RowGroupsReadNum", TUnit::UNIT, parquet_profile, 1);
203
48
        _parquet_profile.total_row_groups = ADD_CHILD_COUNTER_WITH_LEVEL(
204
48
                _profile, "RowGroupsTotalNum", TUnit::UNIT, parquet_profile, 1);
205
48
        _parquet_profile.filtered_group_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
206
48
                _profile, "FilteredRowsByGroup", TUnit::UNIT, parquet_profile, 1);
207
48
        _parquet_profile.filtered_page_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
208
48
                _profile, "FilteredRowsByPage", TUnit::UNIT, parquet_profile, 1);
209
48
        _parquet_profile.lazy_read_filtered_rows = ADD_CHILD_COUNTER_WITH_LEVEL(
210
48
                _profile, "FilteredRowsByLazyRead", TUnit::UNIT, parquet_profile, 1);
211
48
        _parquet_profile.filtered_bytes = ADD_CHILD_COUNTER_WITH_LEVEL(
212
48
                _profile, "FilteredBytes", TUnit::BYTES, parquet_profile, 1);
213
48
        _parquet_profile.raw_rows_read = ADD_CHILD_COUNTER_WITH_LEVEL(
214
48
                _profile, "RawRowsRead", TUnit::UNIT, parquet_profile, 1);
215
48
        _parquet_profile.column_read_time =
216
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ColumnReadTime", parquet_profile, 1);
217
48
        _parquet_profile.parse_meta_time =
218
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseMetaTime", parquet_profile, 1);
219
48
        _parquet_profile.parse_footer_time =
220
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ParseFooterTime", parquet_profile, 1);
221
48
        _parquet_profile.file_reader_create_time =
222
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "FileReaderCreateTime", parquet_profile, 1);
223
48
        _parquet_profile.open_file_num =
224
48
                ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "FileNum", TUnit::UNIT, parquet_profile, 1);
225
48
        _parquet_profile.page_index_read_calls =
226
48
                ADD_COUNTER_WITH_LEVEL(_profile, "PageIndexReadCalls", TUnit::UNIT, 1);
227
48
        _parquet_profile.page_index_filter_time =
228
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexFilterTime", parquet_profile, 1);
229
48
        _parquet_profile.read_page_index_time =
230
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexReadTime", parquet_profile, 1);
231
48
        _parquet_profile.parse_page_index_time =
232
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageIndexParseTime", parquet_profile, 1);
233
48
        _parquet_profile.row_group_filter_time =
234
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "RowGroupFilterTime", parquet_profile, 1);
235
48
        _parquet_profile.file_footer_read_calls =
236
48
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterReadCalls", TUnit::UNIT, 1);
237
48
        _parquet_profile.file_footer_hit_cache =
238
48
                ADD_COUNTER_WITH_LEVEL(_profile, "FileFooterHitCache", TUnit::UNIT, 1);
239
48
        _parquet_profile.decompress_time =
240
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecompressTime", parquet_profile, 1);
241
48
        _parquet_profile.decompress_cnt = ADD_CHILD_COUNTER_WITH_LEVEL(
242
48
                _profile, "DecompressCount", TUnit::UNIT, parquet_profile, 1);
243
48
        _parquet_profile.page_read_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
244
48
                _profile, "PageReadCount", TUnit::UNIT, parquet_profile, 1);
245
48
        _parquet_profile.page_cache_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
246
48
                _profile, "PageCacheWriteCount", TUnit::UNIT, parquet_profile, 1);
247
48
        _parquet_profile.page_cache_compressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
248
48
                _profile, "PageCacheCompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
249
48
        _parquet_profile.page_cache_decompressed_write_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
250
48
                _profile, "PageCacheDecompressedWriteCount", TUnit::UNIT, parquet_profile, 1);
251
48
        _parquet_profile.page_cache_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
252
48
                _profile, "PageCacheHitCount", TUnit::UNIT, parquet_profile, 1);
253
48
        _parquet_profile.page_cache_missing_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
254
48
                _profile, "PageCacheMissingCount", TUnit::UNIT, parquet_profile, 1);
255
48
        _parquet_profile.page_cache_compressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
256
48
                _profile, "PageCacheCompressedHitCount", TUnit::UNIT, parquet_profile, 1);
257
48
        _parquet_profile.page_cache_decompressed_hit_counter = ADD_CHILD_COUNTER_WITH_LEVEL(
258
48
                _profile, "PageCacheDecompressedHitCount", TUnit::UNIT, parquet_profile, 1);
259
48
        _parquet_profile.decode_header_time =
260
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderDecodeTime", parquet_profile, 1);
261
48
        _parquet_profile.read_page_header_time =
262
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PageHeaderReadTime", parquet_profile, 1);
263
48
        _parquet_profile.decode_value_time =
264
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeValueTime", parquet_profile, 1);
265
48
        _parquet_profile.decode_dict_time =
266
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeDictTime", parquet_profile, 1);
267
48
        _parquet_profile.decode_level_time =
268
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeLevelTime", parquet_profile, 1);
269
48
        _parquet_profile.decode_null_map_time =
270
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DecodeNullMapTime", parquet_profile, 1);
271
48
        _parquet_profile.skip_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
272
48
                _profile, "SkipPageHeaderNum", TUnit::UNIT, parquet_profile, 1);
273
48
        _parquet_profile.parse_page_header_num = ADD_CHILD_COUNTER_WITH_LEVEL(
274
48
                _profile, "ParsePageHeaderNum", TUnit::UNIT, parquet_profile, 1);
275
48
        _parquet_profile.predicate_filter_time =
276
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "PredicateFilterTime", parquet_profile, 1);
277
48
        _parquet_profile.dict_filter_rewrite_time =
278
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "DictFilterRewriteTime", parquet_profile, 1);
279
48
        _parquet_profile.bloom_filter_read_time =
280
48
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "BloomFilterReadTime", parquet_profile, 1);
281
48
    }
282
94
}
283
284
14
Status ParquetReader::close() {
285
14
    _close_internal();
286
14
    return Status::OK();
287
14
}
288
289
108
void ParquetReader::_close_internal() {
290
108
    if (!_closed) {
291
94
        _closed = true;
292
94
    }
293
108
}
294
295
95
Status ParquetReader::_open_file() {
296
95
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
297
0
        return Status::EndOfFile("stop");
298
0
    }
299
95
    if (_file_reader == nullptr) {
300
14
        SCOPED_RAW_TIMER(&_reader_statistics.file_reader_create_time);
301
14
        ++_reader_statistics.open_file_num;
302
14
        _file_description.mtime =
303
14
                _scan_range.__isset.modification_time ? _scan_range.modification_time : 0;
304
14
        io::FileReaderOptions reader_options =
305
14
                FileFactory::get_reader_options(_state, _file_description);
306
14
        _file_reader = DORIS_TRY(io::DelegateReader::create_file_reader(
307
14
                _profile, _system_properties, _file_description, reader_options,
308
14
                io::DelegateReader::AccessMode::RANDOM, _io_ctx));
309
14
        _tracing_file_reader = _io_ctx ? std::make_shared<io::TracingFileReader>(
310
14
                                                 _file_reader, _io_ctx->file_reader_stats)
311
14
                                       : _file_reader;
312
14
    }
313
314
95
    if (_file_metadata == nullptr) {
315
80
        SCOPED_RAW_TIMER(&_reader_statistics.parse_footer_time);
316
80
        if (_tracing_file_reader->size() <= sizeof(PARQUET_VERSION_NUMBER)) {
317
            // Some system may generate parquet file with only 4 bytes: PAR1
318
            // Should consider it as empty file.
319
0
            return Status::EndOfFile("open file failed, empty parquet file {} with size: {}",
320
0
                                     _scan_range.path, _tracing_file_reader->size());
321
0
        }
322
80
        size_t meta_size = 0;
323
80
        bool enable_mapping_varbinary = _scan_params.__isset.enable_mapping_varbinary
324
80
                                                ? _scan_params.enable_mapping_varbinary
325
80
                                                : false;
326
80
        bool enable_mapping_timestamp_tz = _scan_params.__isset.enable_mapping_timestamp_tz
327
80
                                                   ? _scan_params.enable_mapping_timestamp_tz
328
80
                                                   : false;
329
80
        if (_meta_cache == nullptr) {
330
            // wrap _file_metadata with unique ptr, so that it can be released finally.
331
46
            RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
332
46
                                                &meta_size, _io_ctx, enable_mapping_varbinary,
333
46
                                                enable_mapping_timestamp_tz));
334
46
            _file_metadata = _file_metadata_ptr.get();
335
            // parse magic number & parse meta data
336
46
            _reader_statistics.file_footer_read_calls += 1;
337
46
        } else {
338
34
            const auto& file_meta_cache_key =
339
34
                    FileMetaCache::get_key(_tracing_file_reader, _file_description);
340
34
            if (!_meta_cache->lookup(file_meta_cache_key, &_meta_cache_handle)) {
341
22
                RETURN_IF_ERROR(parse_thrift_footer(_tracing_file_reader, &_file_metadata_ptr,
342
22
                                                    &meta_size, _io_ctx, enable_mapping_varbinary,
343
22
                                                    enable_mapping_timestamp_tz));
344
                // _file_metadata_ptr.release() : move control of _file_metadata to _meta_cache_handle
345
22
                _meta_cache->insert(file_meta_cache_key, _file_metadata_ptr.release(),
346
22
                                    &_meta_cache_handle);
347
22
                _file_metadata = _meta_cache_handle.data<FileMetaData>();
348
22
                _reader_statistics.file_footer_read_calls += 1;
349
22
            } else {
350
12
                _reader_statistics.file_footer_hit_cache++;
351
12
            }
352
34
            _file_metadata = _meta_cache_handle.data<FileMetaData>();
353
34
        }
354
355
80
        if (_file_metadata == nullptr) {
356
0
            return Status::InternalError("failed to get file meta data: {}",
357
0
                                         _file_description.path);
358
0
        }
359
80
    }
360
95
    return Status::OK();
361
95
}
362
363
33
Status ParquetReader::get_file_metadata_schema(const FieldDescriptor** ptr) {
364
33
    RETURN_IF_ERROR(_open_file());
365
33
    DCHECK(_file_metadata != nullptr);
366
33
    *ptr = &_file_metadata->schema();
367
33
    return Status::OK();
368
33
}
369
370
94
void ParquetReader::_init_system_properties() {
371
94
    if (_scan_range.__isset.file_type) {
372
        // for compatibility
373
0
        _system_properties.system_type = _scan_range.file_type;
374
94
    } else {
375
94
        _system_properties.system_type = _scan_params.file_type;
376
94
    }
377
94
    _system_properties.properties = _scan_params.properties;
378
94
    _system_properties.hdfs_params = _scan_params.hdfs_params;
379
94
    if (_scan_params.__isset.broker_addresses) {
380
0
        _system_properties.broker_addresses.assign(_scan_params.broker_addresses.begin(),
381
0
                                                   _scan_params.broker_addresses.end());
382
0
    }
383
94
}
384
385
94
void ParquetReader::_init_file_description() {
386
94
    _file_description.path = _scan_range.path;
387
94
    _file_description.file_size = _scan_range.__isset.file_size ? _scan_range.file_size : -1;
388
94
    if (_scan_range.__isset.fs_name) {
389
0
        _file_description.fs_name = _scan_range.fs_name;
390
0
    }
391
94
}
392
393
Status ParquetReader::init_reader(
394
        const std::vector<ColumnDescriptor>& column_descs,
395
        std::unordered_map<std::string, uint32_t>* col_name_to_block_idx,
396
        const VExprContextSPtrs& conjuncts,
397
        phmap::flat_hash_map<int, std::vector<std::shared_ptr<ColumnPredicate>>>&
398
                slot_id_to_predicates,
399
        const TupleDescriptor* tuple_descriptor, const RowDescriptor* row_descriptor,
400
        const std::unordered_map<std::string, int>* colname_to_slot_id,
401
        const VExprContextSPtrs* not_single_slot_filter_conjuncts,
402
24
        const std::unordered_map<int, VExprContextSPtrs>* slot_id_to_filter_conjuncts) {
403
    // Store essential params early so hooks can access them
404
24
    _tuple_descriptor = tuple_descriptor;
405
24
    _row_descriptor = row_descriptor;
406
407
    // Open file first so hooks can access file metadata
408
24
    RETURN_IF_ERROR(_open_file());
409
410
    // Hook: let subclasses customize column selection and schema mapping
411
24
    std::vector<std::string> column_names;
412
24
    std::shared_ptr<TableSchemaChangeHelper::Node> table_info_node =
413
24
            TableSchemaChangeHelper::ConstNode::get_instance();
414
24
    std::set<uint64_t> column_ids;
415
24
    std::set<uint64_t> filter_column_ids;
416
417
24
    RETURN_IF_ERROR(on_before_init_columns(column_descs, column_names, table_info_node, column_ids,
418
24
                                           filter_column_ids));
419
420
    // Core init
421
24
    RETURN_IF_ERROR(_do_init_reader(
422
24
            column_names, col_name_to_block_idx, conjuncts, slot_id_to_predicates, tuple_descriptor,
423
24
            row_descriptor, colname_to_slot_id, not_single_slot_filter_conjuncts,
424
24
            slot_id_to_filter_conjuncts, table_info_node, column_ids, filter_column_ids));
425
426
    // Hook: let subclasses do post-init work (e.g. read delete files)
427
24
    RETURN_IF_ERROR(on_after_init());
428
429
    // Auto-detect SYNTHESIZED columns
430
74
    for (const auto& desc : column_descs) {
431
74
        if (desc.category == ColumnCategory::SYNTHESIZED) {
432
0
            _lazy_read_ctx.predicate_synthesized_col_names.push_back(desc.name);
433
0
        }
434
74
    }
435
436
24
    return Status::OK();
437
24
}
438
439
Status ParquetReader::on_before_init_columns(
440
        const std::vector<ColumnDescriptor>& column_descs, std::vector<std::string>& column_names,
441
        std::shared_ptr<TableSchemaChangeHelper::Node>& /*table_info_node*/,
442
9
        std::set<uint64_t>& /*column_ids*/, std::set<uint64_t>& /*filter_column_ids*/) {
443
    // Default: extract REGULAR + INTERNAL column names from descriptors
444
21
    for (const auto& desc : column_descs) {
445
21
        if (desc.category == ColumnCategory::REGULAR || desc.category == ColumnCategory::INTERNAL) {
446
21
            column_names.push_back(desc.name);
447
21
        }
448
21
    }
449
9
    return Status::OK();
450
9
}
451
452
Status ParquetReader::_do_init_reader(
453
        const std::vector<std::string>& all_column_names,
454
        std::unordered_map<std::string, uint32_t>* col_name_to_block_idx,
455
        const VExprContextSPtrs& conjuncts,
456
        phmap::flat_hash_map<int, std::vector<std::shared_ptr<ColumnPredicate>>>&
457
                slot_id_to_predicates,
458
        const TupleDescriptor* tuple_descriptor, const RowDescriptor* row_descriptor,
459
        const std::unordered_map<std::string, int>* colname_to_slot_id,
460
        const VExprContextSPtrs* not_single_slot_filter_conjuncts,
461
        const std::unordered_map<int, VExprContextSPtrs>* slot_id_to_filter_conjuncts,
462
        std::shared_ptr<TableSchemaChangeHelper::Node> table_info_node_ptr,
463
62
        const std::set<uint64_t>& column_ids, const std::set<uint64_t>& filter_column_ids) {
464
62
    _col_name_to_block_idx = col_name_to_block_idx;
465
62
    _tuple_descriptor = tuple_descriptor;
466
62
    _row_descriptor = row_descriptor;
467
62
    _colname_to_slot_id = colname_to_slot_id;
468
62
    _not_single_slot_filter_conjuncts = not_single_slot_filter_conjuncts;
469
62
    _slot_id_to_filter_conjuncts = slot_id_to_filter_conjuncts;
470
62
    _table_info_node_ptr = table_info_node_ptr;
471
62
    _column_ids = column_ids;
472
62
    _filter_column_ids = filter_column_ids;
473
474
    // _open_file() is called by init_reader template method before hooks.
475
    // For standalone _do_init_reader callers (tvf, load, etc.), open the file here if not already opened.
476
62
    if (_file_metadata == nullptr) {
477
38
        RETURN_IF_ERROR(_open_file());
478
38
    }
479
62
    _t_metadata = &(_file_metadata->to_thrift());
480
62
    if (_file_metadata == nullptr) {
481
0
        return Status::InternalError("failed to init parquet reader, please open reader first");
482
0
    }
483
484
62
    SCOPED_RAW_TIMER(&_reader_statistics.parse_meta_time);
485
62
    _total_groups = _t_metadata->row_groups.size();
486
62
    if (_total_groups == 0) {
487
0
        return Status::EndOfFile("init reader failed, empty parquet file: " + _scan_range.path);
488
0
    }
489
62
    _current_row_group_index = RowGroupReader::RowGroupIndex {-1, 0, 0};
490
491
62
    _table_column_names = &all_column_names;
492
62
    auto schema_desc = _file_metadata->schema();
493
494
62
    std::map<std::string, std::string> required_file_columns; //file column -> table column
495
406
    for (auto table_column_name : all_column_names) {
496
406
        if (_table_info_node_ptr->children_column_exists(table_column_name)) {
497
406
            required_file_columns.emplace(
498
406
                    _table_info_node_ptr->children_file_column_name(table_column_name),
499
406
                    table_column_name);
500
406
        } else {
501
0
            _missing_cols.emplace_back(table_column_name);
502
0
        }
503
406
    }
504
695
    for (int i = 0; i < schema_desc.size(); ++i) {
505
633
        const auto& name = schema_desc.get_column(i)->name;
506
633
        if (required_file_columns.contains(name)) {
507
401
            _read_file_columns.emplace_back(name);
508
401
            _read_table_columns.emplace_back(required_file_columns[name]);
509
401
            _read_table_columns_set.insert(required_file_columns[name]);
510
401
        }
511
633
    }
512
    // build column predicates for column lazy read
513
62
    _lazy_read_ctx.conjuncts = conjuncts;
514
62
    _lazy_read_ctx.slot_id_to_predicates = slot_id_to_predicates;
515
62
    return Status::OK();
516
62
}
517
518
0
bool ParquetReader::_exists_in_file(const std::string& expr_name) const {
519
    // `_read_table_columns_set` is used to ensure that only columns actually read are subject to min-max filtering.
520
    // This primarily handles cases where partition columns also exist in a file. The reason it's not modified
521
    // in `_table_info_node_ptr` is that Iceberg、Hudi has inconsistent requirements for this node;
522
    // Iceberg partition evolution need read partition columns from a file.
523
    // hudi set `hoodie.datasource.write.drop.partition.columns=false` not need read partition columns from a file.
524
0
    return _table_info_node_ptr->children_column_exists(expr_name) &&
525
0
           _read_table_columns_set.contains(expr_name);
526
0
}
527
528
0
bool ParquetReader::_type_matches(const int cid) const {
529
0
    auto* slot = _tuple_descriptor->slots()[cid];
530
0
    auto table_col_type = remove_nullable(slot->type());
531
532
0
    const auto& file_col_name = _table_info_node_ptr->children_file_column_name(slot->col_name());
533
0
    const auto& file_col_type =
534
0
            remove_nullable(_file_metadata->schema().get_column(file_col_name)->data_type);
535
536
0
    return (table_col_type->get_primitive_type() == file_col_type->get_primitive_type()) &&
537
0
           !is_complex_type(table_col_type->get_primitive_type());
538
0
}
539
540
Status ParquetReader::set_fill_columns(
541
        const std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>&
542
                partition_columns,
543
35
        const std::unordered_map<std::string, VExprContextSPtr>& missing_columns) {
544
35
    LOG(INFO) << "[DEBUG ParquetReader::set_fill_columns] partition_columns.size="
545
35
              << partition_columns.size() << " missing_columns.size=" << missing_columns.size();
546
35
    for (const auto& kv : partition_columns) {
547
5
        LOG(INFO) << "[DEBUG ParquetReader::set_fill_columns] partition col: " << kv.first
548
5
                  << " value=" << std::get<0>(kv.second);
549
5
    }
550
    // Store in TableFormatReader for on_fill_partition/missing_columns hooks
551
35
    set_fill_column_data(partition_columns, missing_columns, _col_name_to_block_idx);
552
553
    // std::unordered_map<column_name, std::pair<col_id, slot_id>>
554
35
    std::unordered_map<std::string, std::pair<uint32_t, int>> predicate_columns;
555
556
    // TODO(gabriel): we should try to clear too much structs which are used to represent conjuncts and predicates.
557
    // visit_slot for lazy mat.
558
39
    std::function<void(VExpr * expr)> visit_slot = [&](VExpr* expr) {
559
39
        if (expr->is_slot_ref()) {
560
13
            VSlotRef* slot_ref = static_cast<VSlotRef*>(expr);
561
13
            auto expr_name = slot_ref->expr_name();
562
13
            predicate_columns.emplace(expr_name,
563
13
                                      std::make_pair(slot_ref->column_id(), slot_ref->slot_id()));
564
13
            if (slot_ref->column_id() == 0) {
565
3
                _lazy_read_ctx.resize_first_column = false;
566
3
            }
567
13
            return;
568
13
        }
569
26
        for (auto& child : expr->children()) {
570
26
            visit_slot(child.get());
571
26
        }
572
26
    };
573
35
    for (const auto& conjunct : _lazy_read_ctx.conjuncts) {
574
13
        auto expr = conjunct->root();
575
576
13
        if (expr->is_rf_wrapper()) {
577
            // REF: src/runtime_filter/runtime_filter_consumer.cpp
578
0
            VRuntimeFilterWrapper* runtime_filter = assert_cast<VRuntimeFilterWrapper*>(expr.get());
579
580
0
            auto filter_impl = runtime_filter->get_impl();
581
0
            visit_slot(filter_impl.get());
582
13
        } else {
583
13
            visit_slot(expr.get());
584
13
        }
585
13
    }
586
35
    if (!_lazy_read_ctx.slot_id_to_predicates.empty()) {
587
0
        auto and_pred = AndBlockColumnPredicate::create_unique();
588
0
        for (const auto& entry : _lazy_read_ctx.slot_id_to_predicates) {
589
0
            for (const auto& pred : entry.second) {
590
0
                if (!_exists_in_file(pred->col_name()) || !_type_matches(pred->column_id())) {
591
0
                    continue;
592
0
                }
593
0
                and_pred->add_column_predicate(
594
0
                        SingleColumnBlockPredicate::create_unique(pred->clone(pred->column_id())));
595
0
            }
596
0
        }
597
0
        if (and_pred->num_of_column_predicate() > 0) {
598
0
            _push_down_predicates.push_back(std::move(and_pred));
599
0
        }
600
0
    }
601
602
35
    const FieldDescriptor& schema = _file_metadata->schema();
603
604
104
    for (auto& read_table_col : _read_table_columns) {
605
104
        _lazy_read_ctx.all_read_columns.emplace_back(read_table_col);
606
607
104
        auto file_column_name = _table_info_node_ptr->children_file_column_name(read_table_col);
608
104
        PrimitiveType column_type =
609
104
                schema.get_column(file_column_name)->data_type->get_primitive_type();
610
104
        if (is_complex_type(column_type)) {
611
2
            _lazy_read_ctx.has_complex_type = true;
612
2
        }
613
104
        if (predicate_columns.size() > 0) {
614
12
            auto iter = predicate_columns.find(read_table_col);
615
12
            if (iter == predicate_columns.end()) {
616
4
                _lazy_read_ctx.lazy_read_columns.emplace_back(read_table_col);
617
8
            } else {
618
8
                _lazy_read_ctx.predicate_columns.first.emplace_back(iter->first);
619
8
                _lazy_read_ctx.predicate_columns.second.emplace_back(iter->second.second);
620
8
                _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
621
8
            }
622
12
        }
623
104
    }
624
35
    if (_row_id_column_iterator_pair.first != nullptr) {
625
5
        _lazy_read_ctx.all_predicate_col_ids.emplace_back(_row_id_column_iterator_pair.second);
626
5
    }
627
628
35
    for (auto& kv : partition_columns) {
629
5
        auto iter = predicate_columns.find(kv.first);
630
5
        if (iter == predicate_columns.end()) {
631
0
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
632
5
        } else {
633
5
            _lazy_read_ctx.predicate_partition_columns.emplace(kv.first, kv.second);
634
5
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
635
5
        }
636
5
    }
637
638
35
    for (auto& kv : missing_columns) {
639
0
        auto iter = predicate_columns.find(kv.first);
640
0
        if (iter == predicate_columns.end()) {
641
0
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
642
0
        } else {
643
            //For check missing column :   missing column == xx, missing column is null,missing column is not null.
644
0
            if (_slot_id_to_filter_conjuncts->find(iter->second.second) !=
645
0
                _slot_id_to_filter_conjuncts->end()) {
646
0
                for (auto& ctx : _slot_id_to_filter_conjuncts->find(iter->second.second)->second) {
647
0
                    _lazy_read_ctx.missing_columns_conjuncts.emplace_back(ctx);
648
0
                }
649
0
            }
650
651
0
            _lazy_read_ctx.predicate_missing_columns.emplace(kv.first, kv.second);
652
0
            _lazy_read_ctx.all_predicate_col_ids.emplace_back(iter->second.first);
653
0
        }
654
0
    }
655
656
35
    if (_enable_lazy_mat && _lazy_read_ctx.predicate_columns.first.size() > 0 &&
657
35
        _lazy_read_ctx.lazy_read_columns.size() > 0) {
658
2
        _lazy_read_ctx.can_lazy_read = true;
659
2
    }
660
661
35
    if (!_lazy_read_ctx.can_lazy_read) {
662
33
        for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
663
3
            _lazy_read_ctx.partition_columns.emplace(kv.first, kv.second);
664
3
        }
665
33
        for (auto& kv : _lazy_read_ctx.predicate_missing_columns) {
666
0
            _lazy_read_ctx.missing_columns.emplace(kv.first, kv.second);
667
0
        }
668
33
    }
669
670
    // Populate col_names vectors for ColumnProcessor path.
671
35
    for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
672
5
        _lazy_read_ctx.predicate_partition_col_names.emplace_back(kv.first);
673
5
    }
674
35
    for (auto& kv : _lazy_read_ctx.predicate_missing_columns) {
675
0
        _lazy_read_ctx.predicate_missing_col_names.emplace_back(kv.first);
676
0
    }
677
35
    for (auto& kv : _lazy_read_ctx.partition_columns) {
678
3
        _lazy_read_ctx.partition_col_names.emplace_back(kv.first);
679
3
    }
680
35
    for (auto& kv : _lazy_read_ctx.missing_columns) {
681
0
        _lazy_read_ctx.missing_col_names.emplace_back(kv.first);
682
0
    }
683
684
35
    if (_filter_groups && (_total_groups == 0 || _t_metadata->num_rows == 0 || _range_size < 0)) {
685
0
        return Status::EndOfFile("No row group to read");
686
0
    }
687
35
    _fill_all_columns = true;
688
35
    return Status::OK();
689
35
}
690
691
// init file reader and file metadata for parsing schema
692
0
Status ParquetReader::init_schema_reader() {
693
0
    RETURN_IF_ERROR(_open_file());
694
0
    _t_metadata = &(_file_metadata->to_thrift());
695
0
    return Status::OK();
696
0
}
697
698
Status ParquetReader::get_parsed_schema(std::vector<std::string>* col_names,
699
0
                                        std::vector<DataTypePtr>* col_types) {
700
0
    _total_groups = _t_metadata->row_groups.size();
701
0
    auto schema_desc = _file_metadata->schema();
702
0
    for (int i = 0; i < schema_desc.size(); ++i) {
703
        // Get the Column Reader for the boolean column
704
0
        col_names->emplace_back(schema_desc.get_column(i)->name);
705
0
        col_types->emplace_back(make_nullable(schema_desc.get_column(i)->data_type));
706
0
    }
707
0
    return Status::OK();
708
0
}
709
710
Status ParquetReader::get_columns(std::unordered_map<std::string, DataTypePtr>* name_to_type,
711
14
                                  std::unordered_set<std::string>* missing_cols) {
712
14
    const auto& schema_desc = _file_metadata->schema();
713
14
    std::unordered_set<std::string> column_names;
714
14
    schema_desc.get_column_names(&column_names);
715
210
    for (auto& name : column_names) {
716
210
        auto field = schema_desc.get_column(name);
717
210
        name_to_type->emplace(name, field->data_type);
718
210
    }
719
14
    for (auto& col : _missing_cols) {
720
0
        missing_cols->insert(col);
721
0
    }
722
14
    return Status::OK();
723
14
}
724
725
47
Status ParquetReader::get_next_block(Block* block, size_t* read_rows, bool* eof) {
726
47
    RETURN_IF_ERROR(_do_get_next_block(block, read_rows, eof));
727
47
    return Status::OK();
728
47
}
729
730
47
Status ParquetReader::_do_get_next_block(Block* block, size_t* read_rows, bool* eof) {
731
47
    if (_current_group_reader == nullptr || _row_group_eof) {
732
35
        Status st = _next_row_group_reader();
733
35
        if (!st.ok() && !st.is<ErrorCode::END_OF_FILE>()) {
734
0
            return st;
735
0
        }
736
35
        if (_current_group_reader == nullptr || _row_group_eof || st.is<ErrorCode::END_OF_FILE>()) {
737
0
            _current_group_reader.reset(nullptr);
738
0
            _row_group_eof = true;
739
0
            *read_rows = 0;
740
0
            *eof = true;
741
0
            return Status::OK();
742
0
        }
743
35
    }
744
47
    if (_push_down_agg_type == TPushAggOp::type::COUNT) {
745
0
        auto rows = std::min(_current_group_reader->get_remaining_rows(), (int64_t)_batch_size);
746
747
0
        _current_group_reader->set_remaining_rows(_current_group_reader->get_remaining_rows() -
748
0
                                                  rows);
749
0
        auto mutate_columns = block->mutate_columns();
750
0
        for (auto& col : mutate_columns) {
751
0
            col->resize(rows);
752
0
        }
753
0
        block->set_columns(std::move(mutate_columns));
754
755
0
        *read_rows = rows;
756
0
        if (_current_group_reader->get_remaining_rows() == 0) {
757
0
            _current_group_reader.reset(nullptr);
758
0
        }
759
760
0
        return Status::OK();
761
0
    }
762
763
47
    SCOPED_RAW_TIMER(&_reader_statistics.column_read_time);
764
47
    Status batch_st =
765
47
            _current_group_reader->next_batch(block, _batch_size, read_rows, &_row_group_eof);
766
47
    if (batch_st.is<ErrorCode::END_OF_FILE>()) {
767
0
        block->clear_column_data();
768
0
        _current_group_reader.reset(nullptr);
769
0
        *read_rows = 0;
770
0
        *eof = true;
771
0
        return Status::OK();
772
0
    }
773
774
47
    if (!batch_st.ok()) {
775
0
        return Status::InternalError("Read parquet file {} failed, reason = {}", _scan_range.path,
776
0
                                     batch_st.to_string());
777
0
    }
778
779
47
    if (_row_group_eof) {
780
35
        auto column_st = _current_group_reader->merged_column_statistics();
781
35
        _column_statistics.merge(column_st);
782
35
        _reader_statistics.lazy_read_filtered_rows +=
783
35
                _current_group_reader->lazy_read_filtered_rows();
784
35
        _reader_statistics.predicate_filter_time += _current_group_reader->predicate_filter_time();
785
35
        _reader_statistics.dict_filter_rewrite_time +=
786
35
                _current_group_reader->dict_filter_rewrite_time();
787
35
        if (_current_row_group_index.row_group_id + 1 == _total_groups) {
788
35
            *eof = true;
789
35
        } else {
790
0
            *eof = false;
791
0
        }
792
35
    }
793
47
    return Status::OK();
794
47
}
795
796
RowGroupReader::PositionDeleteContext ParquetReader::_get_position_delete_ctx(
797
35
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index) {
798
35
    if (_delete_rows == nullptr) {
799
35
        return RowGroupReader::PositionDeleteContext(row_group.num_rows, row_group_index.first_row);
800
35
    }
801
0
    const int64_t* delete_rows = &(*_delete_rows)[0];
802
0
    const int64_t* delete_rows_end = delete_rows + _delete_rows->size();
803
0
    const int64_t* start_pos = std::lower_bound(delete_rows + _delete_rows_index, delete_rows_end,
804
0
                                                row_group_index.first_row);
805
0
    int64_t start_index = start_pos - delete_rows;
806
0
    const int64_t* end_pos = std::lower_bound(start_pos, delete_rows_end, row_group_index.last_row);
807
0
    int64_t end_index = end_pos - delete_rows;
808
0
    _delete_rows_index = end_index;
809
0
    return RowGroupReader::PositionDeleteContext(*_delete_rows, row_group.num_rows,
810
0
                                                 row_group_index.first_row, start_index, end_index);
811
35
}
812
813
35
Status ParquetReader::_next_row_group_reader() {
814
35
    if (_current_group_reader != nullptr) {
815
0
        _current_group_reader->collect_profile_before_close();
816
0
    }
817
818
35
    RowRanges candidate_row_ranges;
819
35
    while (++_current_row_group_index.row_group_id < _total_groups) {
820
35
        const auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
821
35
        _current_row_group_index.first_row = _current_row_group_index.last_row;
822
35
        _current_row_group_index.last_row = _current_row_group_index.last_row + row_group.num_rows;
823
824
35
        if (_filter_groups && _is_misaligned_range_group(row_group)) {
825
0
            continue;
826
0
        }
827
828
35
        candidate_row_ranges.clear();
829
        // The range of lines to be read is determined by the push down predicate.
830
35
        RETURN_IF_ERROR(_process_min_max_bloom_filter(
831
35
                _current_row_group_index, row_group, _push_down_predicates, &candidate_row_ranges));
832
833
35
        std::function<int64_t(const FieldSchema*)> column_compressed_size =
834
134
                [&row_group, &column_compressed_size](const FieldSchema* field) -> int64_t {
835
134
            if (field->physical_column_index >= 0) {
836
120
                int parquet_col_id = field->physical_column_index;
837
120
                if (row_group.columns[parquet_col_id].__isset.meta_data) {
838
120
                    return row_group.columns[parquet_col_id].meta_data.total_compressed_size;
839
120
                }
840
0
                return 0;
841
120
            }
842
14
            int64_t size = 0;
843
30
            for (const FieldSchema& child : field->children) {
844
30
                size += column_compressed_size(&child);
845
30
            }
846
14
            return size;
847
134
        };
848
35
        int64_t group_size = 0; // only calculate the needed columns
849
104
        for (auto& read_col : _read_file_columns) {
850
104
            const FieldSchema* field = _file_metadata->schema().get_column(read_col);
851
104
            group_size += column_compressed_size(field);
852
104
        }
853
854
35
        _reader_statistics.read_rows += candidate_row_ranges.count();
855
35
        if (_io_ctx) {
856
14
            _io_ctx->file_reader_stats->read_rows += candidate_row_ranges.count();
857
14
        }
858
859
35
        if (candidate_row_ranges.count() != 0) {
860
            // need read this row group.
861
35
            _reader_statistics.read_row_groups++;
862
35
            _reader_statistics.filtered_page_rows +=
863
35
                    row_group.num_rows - candidate_row_ranges.count();
864
35
            break;
865
35
        } else {
866
            // this row group be filtered.
867
0
            _reader_statistics.filtered_row_groups++;
868
0
            _reader_statistics.filtered_bytes += group_size;
869
0
            _reader_statistics.filtered_group_rows += row_group.num_rows;
870
0
        }
871
35
    }
872
873
35
    if (_current_row_group_index.row_group_id == _total_groups) {
874
0
        _row_group_eof = true;
875
0
        _current_group_reader.reset(nullptr);
876
0
        return Status::EndOfFile("No next RowGroupReader");
877
0
    }
878
879
    // process page index and generate the ranges to read
880
35
    auto& row_group = _t_metadata->row_groups[_current_row_group_index.row_group_id];
881
882
35
    RowGroupReader::PositionDeleteContext position_delete_ctx =
883
35
            _get_position_delete_ctx(row_group, _current_row_group_index);
884
35
    io::FileReaderSPtr group_file_reader;
885
35
    if (typeid_cast<io::InMemoryFileReader*>(_file_reader.get())) {
886
        // InMemoryFileReader has the ability to merge small IO
887
0
        group_file_reader = _file_reader;
888
35
    } else {
889
35
        size_t avg_io_size = 0;
890
35
        const std::vector<io::PrefetchRange> io_ranges =
891
35
                _generate_random_access_ranges(_current_row_group_index, &avg_io_size);
892
35
        int64_t merged_read_slice_size = -1;
893
35
        if (_state != nullptr && _state->query_options().__isset.merge_read_slice_size) {
894
25
            merged_read_slice_size = _state->query_options().merge_read_slice_size;
895
25
        }
896
        // The underlying page reader will prefetch data in column.
897
        // Using both MergeRangeFileReader and BufferedStreamReader simultaneously would waste a lot of memory.
898
35
        group_file_reader =
899
35
                avg_io_size < io::MergeRangeFileReader::SMALL_IO
900
35
                        ? std::make_shared<io::MergeRangeFileReader>(
901
35
                                  _profile, _file_reader, io_ranges, merged_read_slice_size)
902
35
                        : _file_reader;
903
35
    }
904
35
    _current_group_reader.reset(new RowGroupReader(
905
35
            _io_ctx ? std::make_shared<io::TracingFileReader>(group_file_reader,
906
14
                                                              _io_ctx->file_reader_stats)
907
35
                    : group_file_reader,
908
35
            _read_table_columns, _current_row_group_index.row_group_id, row_group, _ctz, _io_ctx,
909
35
            position_delete_ctx, _lazy_read_ctx, _state, _column_ids, _filter_column_ids));
910
35
    _row_group_eof = false;
911
912
35
    _current_group_reader->set_current_row_group_idx(_current_row_group_index);
913
35
    _current_group_reader->set_row_id_column_iterator(_row_id_column_iterator_pair);
914
35
    _current_group_reader->set_col_name_to_block_idx(_col_name_to_block_idx);
915
35
    _current_group_reader->set_table_format_reader(this);
916
917
35
    _current_group_reader->_table_info_node_ptr = _table_info_node_ptr;
918
35
    return _current_group_reader->init(_file_metadata->schema(), candidate_row_ranges, _col_offsets,
919
35
                                       _tuple_descriptor, _row_descriptor, _colname_to_slot_id,
920
35
                                       _not_single_slot_filter_conjuncts,
921
35
                                       _slot_id_to_filter_conjuncts);
922
35
}
923
924
std::vector<io::PrefetchRange> ParquetReader::_generate_random_access_ranges(
925
35
        const RowGroupReader::RowGroupIndex& group, size_t* avg_io_size) {
926
35
    std::vector<io::PrefetchRange> result;
927
35
    int64_t last_chunk_end = -1;
928
35
    size_t total_io_size = 0;
929
35
    std::function<void(const FieldSchema*, const tparquet::RowGroup&)> scalar_range =
930
132
            [&](const FieldSchema* field, const tparquet::RowGroup& row_group) {
931
132
                if (_column_ids.empty() ||
932
132
                    _column_ids.find(field->get_column_id()) != _column_ids.end()) {
933
128
                    if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
934
2
                        scalar_range(&field->children[0], row_group);
935
126
                    } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
936
0
                        scalar_range(&field->children[0], row_group);
937
0
                        scalar_range(&field->children[1], row_group);
938
126
                    } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
939
37
                        for (int i = 0; i < field->children.size(); ++i) {
940
26
                            scalar_range(&field->children[i], row_group);
941
26
                        }
942
115
                    } else {
943
115
                        const tparquet::ColumnChunk& chunk =
944
115
                                row_group.columns[field->physical_column_index];
945
115
                        auto& chunk_meta = chunk.meta_data;
946
115
                        int64_t chunk_start = has_dict_page(chunk_meta)
947
115
                                                      ? chunk_meta.dictionary_page_offset
948
115
                                                      : chunk_meta.data_page_offset;
949
115
                        int64_t chunk_end = chunk_start + chunk_meta.total_compressed_size;
950
115
                        DCHECK_GE(chunk_start, last_chunk_end);
951
115
                        result.emplace_back(chunk_start, chunk_end);
952
115
                        total_io_size += chunk_meta.total_compressed_size;
953
115
                        last_chunk_end = chunk_end;
954
115
                    }
955
128
                }
956
132
            };
957
35
    const tparquet::RowGroup& row_group = _t_metadata->row_groups[group.row_group_id];
958
104
    for (const auto& read_col : _read_file_columns) {
959
104
        const FieldSchema* field = _file_metadata->schema().get_column(read_col);
960
104
        scalar_range(field, row_group);
961
104
    }
962
35
    if (!result.empty()) {
963
34
        *avg_io_size = total_io_size / result.size();
964
34
    }
965
35
    return result;
966
35
}
967
968
35
bool ParquetReader::_is_misaligned_range_group(const tparquet::RowGroup& row_group) {
969
35
    int64_t start_offset = _get_column_start_offset(row_group.columns[0].meta_data);
970
971
35
    auto& last_column = row_group.columns[row_group.columns.size() - 1].meta_data;
972
35
    int64_t end_offset = _get_column_start_offset(last_column) + last_column.total_compressed_size;
973
974
35
    int64_t row_group_mid = start_offset + (end_offset - start_offset) / 2;
975
35
    if (!(row_group_mid >= _range_start_offset &&
976
35
          row_group_mid < _range_start_offset + _range_size)) {
977
0
        return true;
978
0
    }
979
35
    return false;
980
35
}
981
982
Status ParquetReader::_process_page_index_filter(
983
        const tparquet::RowGroup& row_group, const RowGroupReader::RowGroupIndex& row_group_index,
984
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
985
16
        RowRanges* candidate_row_ranges) {
986
16
    if (UNLIKELY(_io_ctx && _io_ctx->should_stop)) {
987
0
        return Status::EndOfFile("stop");
988
0
    }
989
990
16
    std::function<void()> read_whole_row_group = [&]() {
991
16
        candidate_row_ranges->add(RowRange {0, row_group.num_rows});
992
16
    };
993
994
    // Check if the page index is available and if it exists.
995
16
    PageIndex page_index;
996
16
    if (!config::enable_parquet_page_index || _colname_to_slot_id == nullptr ||
997
16
        !page_index.check_and_get_page_index_ranges(row_group.columns)) {
998
16
        read_whole_row_group();
999
16
        return Status::OK();
1000
16
    }
1001
1002
0
    std::vector<int> parquet_col_ids;
1003
0
    for (size_t idx = 0; idx < _read_table_columns.size(); idx++) {
1004
0
        const auto& read_table_col = _read_table_columns[idx];
1005
0
        const auto& read_file_col = _read_file_columns[idx];
1006
0
        if (!_colname_to_slot_id->contains(read_table_col)) {
1007
0
            continue;
1008
0
        }
1009
0
        auto* field = _file_metadata->schema().get_column(read_file_col);
1010
1011
0
        std::function<void(FieldSchema * field)> f = [&](FieldSchema* field) {
1012
0
            if (!_column_ids.empty() &&
1013
0
                _column_ids.find(field->get_column_id()) == _column_ids.end()) {
1014
0
                return;
1015
0
            }
1016
1017
0
            if (field->data_type->get_primitive_type() == TYPE_ARRAY) {
1018
0
                f(&field->children[0]);
1019
0
            } else if (field->data_type->get_primitive_type() == TYPE_MAP) {
1020
0
                f(&field->children[0]);
1021
0
                f(&field->children[1]);
1022
0
            } else if (field->data_type->get_primitive_type() == TYPE_STRUCT) {
1023
0
                for (int i = 0; i < field->children.size(); ++i) {
1024
0
                    f(&field->children[i]);
1025
0
                }
1026
0
            } else {
1027
0
                int parquet_col_id = field->physical_column_index;
1028
0
                if (parquet_col_id >= 0) {
1029
0
                    parquet_col_ids.push_back(parquet_col_id);
1030
0
                }
1031
0
            }
1032
0
        };
1033
1034
0
        f(field);
1035
0
    }
1036
1037
0
    auto parse_offset_index = [&]() -> Status {
1038
0
        std::vector<uint8_t> off_index_buff(page_index._offset_index_size);
1039
0
        Slice res(off_index_buff.data(), page_index._offset_index_size);
1040
0
        size_t bytes_read = 0;
1041
0
        {
1042
0
            SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1043
0
            RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._offset_index_start, res,
1044
0
                                                          &bytes_read, _io_ctx));
1045
0
        }
1046
0
        _column_statistics.page_index_read_calls++;
1047
0
        _col_offsets.clear();
1048
1049
0
        for (auto parquet_col_id : parquet_col_ids) {
1050
0
            auto& chunk = row_group.columns[parquet_col_id];
1051
0
            if (chunk.offset_index_length == 0) [[unlikely]] {
1052
0
                continue;
1053
0
            }
1054
0
            tparquet::OffsetIndex offset_index;
1055
0
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1056
0
            RETURN_IF_ERROR(
1057
0
                    page_index.parse_offset_index(chunk, off_index_buff.data(), &offset_index));
1058
0
            _col_offsets[parquet_col_id] = offset_index;
1059
0
        }
1060
0
        return Status::OK();
1061
0
    };
1062
1063
    // from https://github.com/apache/doris/pull/55795
1064
0
    RETURN_IF_ERROR(parse_offset_index());
1065
1066
    // Check if page index is needed for min-max filter.
1067
0
    if (!_enable_filter_by_min_max || push_down_pred.empty()) {
1068
0
        read_whole_row_group();
1069
0
        return Status::OK();
1070
0
    }
1071
1072
    // read column index.
1073
0
    std::vector<uint8_t> col_index_buff(page_index._column_index_size);
1074
0
    size_t bytes_read = 0;
1075
0
    Slice result(col_index_buff.data(), page_index._column_index_size);
1076
0
    {
1077
0
        SCOPED_RAW_TIMER(&_reader_statistics.read_page_index_time);
1078
0
        RETURN_IF_ERROR(_tracing_file_reader->read_at(page_index._column_index_start, result,
1079
0
                                                      &bytes_read, _io_ctx));
1080
0
    }
1081
0
    _column_statistics.page_index_read_calls++;
1082
1083
0
    SCOPED_RAW_TIMER(&_reader_statistics.page_index_filter_time);
1084
1085
    // Construct a cacheable page index structure to avoid repeatedly reading the page index of the same column.
1086
0
    ParquetPredicate::CachedPageIndexStat cached_page_index;
1087
0
    cached_page_index.ctz = _ctz;
1088
0
    std::function<bool(ParquetPredicate::PageIndexStat**, int)> get_stat_func =
1089
0
            [&](ParquetPredicate::PageIndexStat** ans, const int cid) -> bool {
1090
0
        if (cached_page_index.stats.contains(cid)) {
1091
0
            *ans = &cached_page_index.stats[cid];
1092
0
            return (*ans)->available;
1093
0
        }
1094
0
        cached_page_index.stats.emplace(cid, ParquetPredicate::PageIndexStat {});
1095
0
        auto& sig_stat = cached_page_index.stats[cid];
1096
1097
0
        auto* slot = _tuple_descriptor->slots()[cid];
1098
0
        if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1099
            // table column not exist in file, may be schema change.
1100
0
            return false;
1101
0
        }
1102
1103
0
        const auto& file_col_name =
1104
0
                _table_info_node_ptr->children_file_column_name(slot->col_name());
1105
0
        const FieldSchema* col_schema = _file_metadata->schema().get_column(file_col_name);
1106
0
        int parquet_col_id = col_schema->physical_column_index;
1107
1108
0
        if (parquet_col_id < 0) {
1109
            // complex type, not support page index yet.
1110
0
            return false;
1111
0
        }
1112
0
        if (!_col_offsets.contains(parquet_col_id)) {
1113
            // If the file contains partition columns and the query applies filters on those
1114
            // partition columns, then reading the page index is unnecessary.
1115
0
            return false;
1116
0
        }
1117
1118
0
        auto& column_chunk = row_group.columns[parquet_col_id];
1119
0
        if (column_chunk.column_index_length == 0 || column_chunk.offset_index_length == 0) {
1120
            // column no page index.
1121
0
            return false;
1122
0
        }
1123
1124
0
        tparquet::ColumnIndex column_index;
1125
0
        {
1126
0
            SCOPED_RAW_TIMER(&_reader_statistics.parse_page_index_time);
1127
0
            RETURN_IF_ERROR(page_index.parse_column_index(column_chunk, col_index_buff.data(),
1128
0
                                                          &column_index));
1129
0
        }
1130
0
        const int64_t num_of_pages = column_index.null_pages.size();
1131
0
        if (num_of_pages <= 0) [[unlikely]] {
1132
            // no page. (maybe this row group no data.)
1133
0
            return false;
1134
0
        }
1135
0
        DCHECK_EQ(column_index.min_values.size(), column_index.max_values.size());
1136
0
        if (!column_index.__isset.null_counts) {
1137
            // not set null or null counts;
1138
0
            return false;
1139
0
        }
1140
1141
0
        auto& offset_index = _col_offsets[parquet_col_id];
1142
0
        const auto& page_locations = offset_index.page_locations;
1143
1144
0
        sig_stat.col_schema = col_schema;
1145
0
        sig_stat.num_of_pages = num_of_pages;
1146
0
        sig_stat.encoded_min_value = column_index.min_values;
1147
0
        sig_stat.encoded_max_value = column_index.max_values;
1148
0
        sig_stat.is_all_null.resize(num_of_pages);
1149
0
        sig_stat.has_null.resize(num_of_pages);
1150
0
        sig_stat.ranges.resize(num_of_pages);
1151
1152
0
        for (int page_id = 0; page_id < num_of_pages; page_id++) {
1153
0
            sig_stat.is_all_null[page_id] = column_index.null_pages[page_id];
1154
0
            sig_stat.has_null[page_id] = column_index.null_counts[page_id] > 0;
1155
1156
0
            int64_t from = page_locations[page_id].first_row_index;
1157
0
            int64_t to = 0;
1158
0
            if (page_id == page_locations.size() - 1) {
1159
0
                to = row_group_index.last_row;
1160
0
            } else {
1161
0
                to = page_locations[page_id + 1].first_row_index;
1162
0
            }
1163
0
            sig_stat.ranges[page_id] = RowRange {from, to};
1164
0
        }
1165
1166
0
        sig_stat.available = true;
1167
0
        *ans = &sig_stat;
1168
0
        return true;
1169
0
    };
1170
0
    cached_page_index.row_group_range = {0, row_group.num_rows};
1171
0
    cached_page_index.get_stat_func = get_stat_func;
1172
1173
0
    candidate_row_ranges->add({0, row_group.num_rows});
1174
0
    for (const auto& predicate : push_down_pred) {
1175
0
        RowRanges tmp_row_range;
1176
0
        if (!predicate->evaluate_and(&cached_page_index, &tmp_row_range)) {
1177
            // no need read this row group.
1178
0
            candidate_row_ranges->clear();
1179
0
            return Status::OK();
1180
0
        }
1181
0
        RowRanges::ranges_intersection(*candidate_row_ranges, tmp_row_range, candidate_row_ranges);
1182
0
    }
1183
0
    return Status::OK();
1184
0
}
1185
1186
Status ParquetReader::_process_min_max_bloom_filter(
1187
        const RowGroupReader::RowGroupIndex& row_group_index, const tparquet::RowGroup& row_group,
1188
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1189
35
        RowRanges* row_ranges) {
1190
35
    SCOPED_RAW_TIMER(&_reader_statistics.row_group_filter_time);
1191
35
    if (!_filter_groups) {
1192
        // No row group filtering is needed;
1193
        // for example, Iceberg reads position delete files.
1194
0
        row_ranges->add({0, row_group.num_rows});
1195
0
        return Status::OK();
1196
0
    }
1197
1198
35
    if (_read_by_rows) {
1199
19
        auto group_start = row_group_index.first_row;
1200
19
        auto group_end = row_group_index.last_row;
1201
1202
47
        while (!_row_ids.empty()) {
1203
28
            auto v = _row_ids.front();
1204
28
            if (v < group_start) {
1205
0
                continue;
1206
28
            } else if (v < group_end) {
1207
28
                row_ranges->add(RowRange {v - group_start, v - group_start + 1});
1208
28
                _row_ids.pop_front();
1209
28
            } else {
1210
0
                break;
1211
0
            }
1212
28
        }
1213
19
    } else {
1214
16
        bool filter_this_row_group = false;
1215
16
        bool filtered_by_min_max = false;
1216
16
        bool filtered_by_bloom_filter = false;
1217
16
        RETURN_IF_ERROR(_process_column_stat_filter(row_group, push_down_pred,
1218
16
                                                    &filter_this_row_group, &filtered_by_min_max,
1219
16
                                                    &filtered_by_bloom_filter));
1220
        // Update statistics based on filter type
1221
16
        if (filter_this_row_group) {
1222
0
            if (filtered_by_min_max) {
1223
0
                _reader_statistics.filtered_row_groups_by_min_max++;
1224
0
            }
1225
0
            if (filtered_by_bloom_filter) {
1226
0
                _reader_statistics.filtered_row_groups_by_bloom_filter++;
1227
0
            }
1228
0
        }
1229
1230
16
        if (!filter_this_row_group) {
1231
16
            RETURN_IF_ERROR(_process_page_index_filter(row_group, row_group_index, push_down_pred,
1232
16
                                                       row_ranges));
1233
16
        }
1234
16
    }
1235
1236
35
    return Status::OK();
1237
35
}
1238
1239
Status ParquetReader::_process_column_stat_filter(
1240
        const tparquet::RowGroup& row_group,
1241
        const std::vector<std::unique_ptr<MutilColumnBlockPredicate>>& push_down_pred,
1242
18
        bool* filter_group, bool* filtered_by_min_max, bool* filtered_by_bloom_filter) {
1243
    // If both filters are disabled, skip filtering
1244
18
    if (!_enable_filter_by_min_max && !_enable_filter_by_bloom_filter) {
1245
0
        return Status::OK();
1246
0
    }
1247
1248
    // Cache bloom filters for each column to avoid reading the same bloom filter multiple times
1249
    // when there are multiple predicates on the same column
1250
18
    std::unordered_map<int, std::unique_ptr<ParquetBlockSplitBloomFilter>> bloom_filter_cache;
1251
1252
    // Initialize output parameters
1253
18
    *filtered_by_min_max = false;
1254
18
    *filtered_by_bloom_filter = false;
1255
1256
18
    for (const auto& predicate : _push_down_predicates) {
1257
2
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_stat_func =
1258
4
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1259
                    // Check if min-max filter is enabled
1260
4
                    if (!_enable_filter_by_min_max) {
1261
0
                        return false;
1262
0
                    }
1263
4
                    auto* slot = _tuple_descriptor->slots()[cid];
1264
4
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1265
0
                        return false;
1266
0
                    }
1267
4
                    const auto& file_col_name =
1268
4
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1269
4
                    const FieldSchema* col_schema =
1270
4
                            _file_metadata->schema().get_column(file_col_name);
1271
4
                    int parquet_col_id = col_schema->physical_column_index;
1272
4
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1273
4
                    stat->col_schema = col_schema;
1274
4
                    return ParquetPredicate::read_column_stats(col_schema, meta_data,
1275
4
                                                               &_ignored_stats,
1276
4
                                                               _t_metadata->created_by, stat)
1277
4
                            .ok();
1278
4
                };
1279
2
        std::function<bool(ParquetPredicate::ColumnStat*, int)> get_bloom_filter_func =
1280
2
                [&](ParquetPredicate::ColumnStat* stat, const int cid) {
1281
0
                    auto* slot = _tuple_descriptor->slots()[cid];
1282
0
                    if (!_table_info_node_ptr->children_column_exists(slot->col_name())) {
1283
0
                        return false;
1284
0
                    }
1285
0
                    const auto& file_col_name =
1286
0
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1287
0
                    const FieldSchema* col_schema =
1288
0
                            _file_metadata->schema().get_column(file_col_name);
1289
0
                    int parquet_col_id = col_schema->physical_column_index;
1290
0
                    auto meta_data = row_group.columns[parquet_col_id].meta_data;
1291
0
                    if (!meta_data.__isset.bloom_filter_offset) {
1292
0
                        return false;
1293
0
                    }
1294
0
                    auto primitive_type =
1295
0
                            remove_nullable(col_schema->data_type)->get_primitive_type();
1296
0
                    if (!ParquetPredicate::bloom_filter_supported(primitive_type)) {
1297
0
                        return false;
1298
0
                    }
1299
1300
                    // Check if bloom filter is enabled
1301
0
                    if (!_enable_filter_by_bloom_filter) {
1302
0
                        return false;
1303
0
                    }
1304
1305
                    // Check cache first
1306
0
                    auto cache_iter = bloom_filter_cache.find(parquet_col_id);
1307
0
                    if (cache_iter != bloom_filter_cache.end()) {
1308
                        // Bloom filter already loaded for this column, reuse it
1309
0
                        stat->bloom_filter = std::move(cache_iter->second);
1310
0
                        bloom_filter_cache.erase(cache_iter);
1311
0
                        return stat->bloom_filter != nullptr;
1312
0
                    }
1313
1314
0
                    if (!stat->bloom_filter) {
1315
0
                        SCOPED_RAW_TIMER(&_reader_statistics.bloom_filter_read_time);
1316
0
                        auto st = ParquetPredicate::read_bloom_filter(
1317
0
                                meta_data, _tracing_file_reader, _io_ctx, stat);
1318
0
                        if (!st.ok()) {
1319
0
                            LOG(WARNING) << "Failed to read bloom filter for column "
1320
0
                                         << col_schema->name << " in file " << _scan_range.path
1321
0
                                         << ", status: " << st.to_string();
1322
0
                            stat->bloom_filter.reset();
1323
0
                            return false;
1324
0
                        }
1325
0
                    }
1326
0
                    return stat->bloom_filter != nullptr;
1327
0
                };
1328
2
        ParquetPredicate::ColumnStat stat;
1329
2
        stat.ctz = _ctz;
1330
2
        stat.get_stat_func = &get_stat_func;
1331
2
        stat.get_bloom_filter_func = &get_bloom_filter_func;
1332
1333
2
        if (!predicate->evaluate_and(&stat)) {
1334
1
            *filter_group = true;
1335
1336
            // Track which filter was used for filtering
1337
            // If bloom filter was loaded, it means bloom filter was used
1338
1
            if (stat.bloom_filter) {
1339
0
                *filtered_by_bloom_filter = true;
1340
0
            }
1341
            // If col_schema was set but no bloom filter, it means min-max stats were used
1342
1
            if (stat.col_schema && !stat.bloom_filter) {
1343
1
                *filtered_by_min_max = true;
1344
1
            }
1345
1346
1
            return Status::OK();
1347
1
        }
1348
1349
        // After evaluating, if the bloom filter was used, cache it for subsequent predicates
1350
1
        if (stat.bloom_filter) {
1351
            // Find the column id for caching
1352
0
            for (auto* slot : _tuple_descriptor->slots()) {
1353
0
                if (_table_info_node_ptr->children_column_exists(slot->col_name())) {
1354
0
                    const auto& file_col_name =
1355
0
                            _table_info_node_ptr->children_file_column_name(slot->col_name());
1356
0
                    const FieldSchema* col_schema =
1357
0
                            _file_metadata->schema().get_column(file_col_name);
1358
0
                    int parquet_col_id = col_schema->physical_column_index;
1359
0
                    if (stat.col_schema == col_schema) {
1360
0
                        bloom_filter_cache[parquet_col_id] = std::move(stat.bloom_filter);
1361
0
                        break;
1362
0
                    }
1363
0
                }
1364
0
            }
1365
0
        }
1366
1
    }
1367
1368
    // Update filter statistics if this row group was not filtered
1369
    // The statistics will be updated in _init_row_groups when filter_group is true
1370
17
    return Status::OK();
1371
18
}
1372
1373
70
int64_t ParquetReader::_get_column_start_offset(const tparquet::ColumnMetaData& column) {
1374
70
    return has_dict_page(column) ? column.dictionary_page_offset : column.data_page_offset;
1375
70
}
1376
1377
14
void ParquetReader::_collect_profile() {
1378
14
    if (_profile == nullptr) {
1379
0
        return;
1380
0
    }
1381
1382
14
    if (_current_group_reader != nullptr) {
1383
14
        _current_group_reader->collect_profile_before_close();
1384
14
    }
1385
14
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups, _reader_statistics.filtered_row_groups);
1386
14
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_min_max,
1387
14
                   _reader_statistics.filtered_row_groups_by_min_max);
1388
14
    COUNTER_UPDATE(_parquet_profile.filtered_row_groups_by_bloom_filter,
1389
14
                   _reader_statistics.filtered_row_groups_by_bloom_filter);
1390
14
    COUNTER_UPDATE(_parquet_profile.to_read_row_groups, _reader_statistics.read_row_groups);
1391
14
    COUNTER_UPDATE(_parquet_profile.total_row_groups, _total_groups);
1392
14
    COUNTER_UPDATE(_parquet_profile.filtered_group_rows, _reader_statistics.filtered_group_rows);
1393
14
    COUNTER_UPDATE(_parquet_profile.filtered_page_rows, _reader_statistics.filtered_page_rows);
1394
14
    COUNTER_UPDATE(_parquet_profile.lazy_read_filtered_rows,
1395
14
                   _reader_statistics.lazy_read_filtered_rows);
1396
14
    COUNTER_UPDATE(_parquet_profile.filtered_bytes, _reader_statistics.filtered_bytes);
1397
14
    COUNTER_UPDATE(_parquet_profile.raw_rows_read, _reader_statistics.read_rows);
1398
14
    COUNTER_UPDATE(_parquet_profile.column_read_time, _reader_statistics.column_read_time);
1399
14
    COUNTER_UPDATE(_parquet_profile.parse_meta_time, _reader_statistics.parse_meta_time);
1400
14
    COUNTER_UPDATE(_parquet_profile.parse_footer_time, _reader_statistics.parse_footer_time);
1401
14
    COUNTER_UPDATE(_parquet_profile.file_reader_create_time,
1402
14
                   _reader_statistics.file_reader_create_time);
1403
14
    COUNTER_UPDATE(_parquet_profile.open_file_num, _reader_statistics.open_file_num);
1404
14
    COUNTER_UPDATE(_parquet_profile.page_index_filter_time,
1405
14
                   _reader_statistics.page_index_filter_time);
1406
14
    COUNTER_UPDATE(_parquet_profile.read_page_index_time, _reader_statistics.read_page_index_time);
1407
14
    COUNTER_UPDATE(_parquet_profile.parse_page_index_time,
1408
14
                   _reader_statistics.parse_page_index_time);
1409
14
    COUNTER_UPDATE(_parquet_profile.row_group_filter_time,
1410
14
                   _reader_statistics.row_group_filter_time);
1411
14
    COUNTER_UPDATE(_parquet_profile.file_footer_read_calls,
1412
14
                   _reader_statistics.file_footer_read_calls);
1413
14
    COUNTER_UPDATE(_parquet_profile.file_footer_hit_cache,
1414
14
                   _reader_statistics.file_footer_hit_cache);
1415
1416
14
    COUNTER_UPDATE(_parquet_profile.skip_page_header_num, _column_statistics.skip_page_header_num);
1417
14
    COUNTER_UPDATE(_parquet_profile.parse_page_header_num,
1418
14
                   _column_statistics.parse_page_header_num);
1419
14
    COUNTER_UPDATE(_parquet_profile.predicate_filter_time,
1420
14
                   _reader_statistics.predicate_filter_time);
1421
14
    COUNTER_UPDATE(_parquet_profile.dict_filter_rewrite_time,
1422
14
                   _reader_statistics.dict_filter_rewrite_time);
1423
14
    COUNTER_UPDATE(_parquet_profile.bloom_filter_read_time,
1424
14
                   _reader_statistics.bloom_filter_read_time);
1425
14
    COUNTER_UPDATE(_parquet_profile.page_index_read_calls,
1426
14
                   _column_statistics.page_index_read_calls);
1427
14
    COUNTER_UPDATE(_parquet_profile.decompress_time, _column_statistics.decompress_time);
1428
14
    COUNTER_UPDATE(_parquet_profile.decompress_cnt, _column_statistics.decompress_cnt);
1429
14
    COUNTER_UPDATE(_parquet_profile.page_read_counter, _column_statistics.page_read_counter);
1430
14
    COUNTER_UPDATE(_parquet_profile.page_cache_write_counter,
1431
14
                   _column_statistics.page_cache_write_counter);
1432
14
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_write_counter,
1433
14
                   _column_statistics.page_cache_compressed_write_counter);
1434
14
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_write_counter,
1435
14
                   _column_statistics.page_cache_decompressed_write_counter);
1436
14
    COUNTER_UPDATE(_parquet_profile.page_cache_hit_counter,
1437
14
                   _column_statistics.page_cache_hit_counter);
1438
14
    COUNTER_UPDATE(_parquet_profile.page_cache_missing_counter,
1439
14
                   _column_statistics.page_cache_missing_counter);
1440
14
    COUNTER_UPDATE(_parquet_profile.page_cache_compressed_hit_counter,
1441
14
                   _column_statistics.page_cache_compressed_hit_counter);
1442
14
    COUNTER_UPDATE(_parquet_profile.page_cache_decompressed_hit_counter,
1443
14
                   _column_statistics.page_cache_decompressed_hit_counter);
1444
14
    COUNTER_UPDATE(_parquet_profile.decode_header_time, _column_statistics.decode_header_time);
1445
14
    COUNTER_UPDATE(_parquet_profile.read_page_header_time,
1446
14
                   _column_statistics.read_page_header_time);
1447
14
    COUNTER_UPDATE(_parquet_profile.decode_value_time, _column_statistics.decode_value_time);
1448
14
    COUNTER_UPDATE(_parquet_profile.decode_dict_time, _column_statistics.decode_dict_time);
1449
14
    COUNTER_UPDATE(_parquet_profile.decode_level_time, _column_statistics.decode_level_time);
1450
14
    COUNTER_UPDATE(_parquet_profile.decode_null_map_time, _column_statistics.decode_null_map_time);
1451
14
}
1452
1453
14
void ParquetReader::_collect_profile_before_close() {
1454
14
    _collect_profile();
1455
14
}
1456
1457
#include "common/compile_check_end.h"
1458
} // namespace doris