Coverage Report

Created: 2026-05-13 17:40

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/parquet/vparquet_group_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/parquet/vparquet_group_reader.h"
19
20
#include <gen_cpp/Exprs_types.h>
21
#include <gen_cpp/Opcodes_types.h>
22
#include <gen_cpp/Types_types.h>
23
#include <gen_cpp/parquet_types.h>
24
#include <string.h>
25
26
#include <algorithm>
27
#include <boost/iterator/iterator_facade.hpp>
28
#include <memory>
29
#include <ostream>
30
31
#include "common/config.h"
32
#include "common/logging.h"
33
#include "common/object_pool.h"
34
#include "common/status.h"
35
#include "core/assert_cast.h"
36
#include "core/block/block.h"
37
#include "core/block/column_with_type_and_name.h"
38
#include "core/column/column_const.h"
39
#include "core/column/column_nullable.h"
40
#include "core/column/column_string.h"
41
#include "core/column/column_vector.h"
42
#include "core/custom_allocator.h"
43
#include "core/data_type/data_type.h"
44
#include "core/data_type/data_type_string.h"
45
#include "core/data_type/define_primitive_type.h"
46
#include "core/pod_array.h"
47
#include "core/types.h"
48
#include "exprs/create_predicate_function.h"
49
#include "exprs/hybrid_set.h"
50
#include "exprs/vdirect_in_predicate.h"
51
#include "exprs/vectorized_fn_call.h"
52
#include "exprs/vexpr.h"
53
#include "exprs/vexpr_context.h"
54
#include "exprs/vliteral.h"
55
#include "exprs/vslot_ref.h"
56
#include "format/parquet/schema_desc.h"
57
#include "format/parquet/vparquet_column_reader.h"
58
#include "format/table/iceberg_reader.h"
59
#include "runtime/descriptors.h"
60
#include "runtime/runtime_state.h"
61
#include "runtime/thread_context.h"
62
#include "storage/segment/column_reader.h"
63
64
namespace cctz {
65
class time_zone;
66
} // namespace cctz
67
namespace doris {
68
class RuntimeState;
69
70
namespace io {
71
struct IOContext;
72
} // namespace io
73
} // namespace doris
74
75
namespace doris {
76
77
const std::vector<int64_t> RowGroupReader::NO_DELETE = {};
78
static constexpr uint32_t MAX_DICT_CODE_PREDICATE_TO_REWRITE = std::numeric_limits<uint32_t>::max();
79
80
RowGroupReader::RowGroupReader(io::FileReaderSPtr file_reader,
81
                               const std::vector<std::string>& read_columns,
82
                               const int32_t row_group_id, const tparquet::RowGroup& row_group,
83
                               const cctz::time_zone* ctz, io::IOContext* io_ctx,
84
                               const PositionDeleteContext& position_delete_ctx,
85
                               const LazyReadContext& lazy_read_ctx, RuntimeState* state,
86
                               const std::set<uint64_t>& column_ids,
87
                               const std::set<uint64_t>& filter_column_ids)
88
38
        : _file_reader(file_reader),
89
38
          _read_table_columns(read_columns),
90
38
          _row_group_id(row_group_id),
91
38
          _row_group_meta(row_group),
92
38
          _remaining_rows(row_group.num_rows),
93
38
          _ctz(ctz),
94
38
          _io_ctx(io_ctx),
95
38
          _position_delete_ctx(position_delete_ctx),
96
38
          _lazy_read_ctx(lazy_read_ctx),
97
38
          _state(state),
98
38
          _obj_pool(new ObjectPool()),
99
38
          _column_ids(column_ids),
100
38
          _filter_column_ids(filter_column_ids) {}
101
102
38
RowGroupReader::~RowGroupReader() {
103
38
    if (_obj_pool != nullptr) {
104
38
        _obj_pool->clear();
105
38
    }
106
38
}
107
108
Status RowGroupReader::init(
109
        const FieldDescriptor& schema, RowRanges& row_ranges,
110
        std::unordered_map<int, tparquet::OffsetIndex>& col_offsets,
111
        const TupleDescriptor* tuple_descriptor, const RowDescriptor* row_descriptor,
112
        const std::unordered_map<std::string, int>* colname_to_slot_id,
113
        const VExprContextSPtrs* not_single_slot_filter_conjuncts,
114
38
        const std::unordered_map<int, VExprContextSPtrs>* slot_id_to_filter_conjuncts) {
115
38
    _tuple_descriptor = tuple_descriptor;
116
38
    _row_descriptor = row_descriptor;
117
38
    _col_name_to_slot_id = colname_to_slot_id;
118
38
    _slot_id_to_filter_conjuncts = slot_id_to_filter_conjuncts;
119
38
    _read_ranges = row_ranges;
120
38
    _filter_read_ranges_by_condition_cache();
121
38
    _remaining_rows = _read_ranges.count();
122
123
38
    if (_read_table_columns.empty()) {
124
        // Query task that only select columns in path.
125
1
        return Status::OK();
126
1
    }
127
37
    const size_t MAX_GROUP_BUF_SIZE = config::parquet_rowgroup_max_buffer_mb << 20;
128
37
    const size_t MAX_COLUMN_BUF_SIZE = config::parquet_column_max_buffer_mb << 20;
129
37
    size_t max_buf_size =
130
37
            std::min(MAX_COLUMN_BUF_SIZE, MAX_GROUP_BUF_SIZE / _read_table_columns.size());
131
108
    for (const auto& read_table_col : _read_table_columns) {
132
108
        auto read_file_col = _table_info_node_ptr->children_file_column_name(read_table_col);
133
108
        auto* field = schema.get_column(read_file_col);
134
108
        std::unique_ptr<ParquetColumnReader> reader;
135
108
        RETURN_IF_ERROR(ParquetColumnReader::create(
136
108
                _file_reader, field, _row_group_meta, _read_ranges, _ctz, _io_ctx, reader,
137
108
                max_buf_size, col_offsets, _state, false, _column_ids, _filter_column_ids));
138
108
        if (reader == nullptr) {
139
0
            VLOG_DEBUG << "Init row group(" << _row_group_id << ") reader failed";
140
0
            return Status::Corruption("Init row group reader failed");
141
0
        }
142
108
        _column_readers[read_table_col] = std::move(reader);
143
108
    }
144
145
37
    bool disable_dict_filter = false;
146
37
    if (not_single_slot_filter_conjuncts != nullptr && !not_single_slot_filter_conjuncts->empty()) {
147
0
        disable_dict_filter = true;
148
0
        _filter_conjuncts.insert(_filter_conjuncts.end(), not_single_slot_filter_conjuncts->begin(),
149
0
                                 not_single_slot_filter_conjuncts->end());
150
0
    }
151
152
    // Check if single slot can be filtered by dict.
153
37
    if (_slot_id_to_filter_conjuncts && !_slot_id_to_filter_conjuncts->empty()) {
154
6
        const std::vector<std::string>& predicate_col_names =
155
6
                _lazy_read_ctx.predicate_columns.first;
156
6
        const std::vector<int>& predicate_col_slot_ids = _lazy_read_ctx.predicate_columns.second;
157
14
        for (size_t i = 0; i < predicate_col_names.size(); ++i) {
158
8
            const std::string& predicate_col_name = predicate_col_names[i];
159
8
            int slot_id = predicate_col_slot_ids[i];
160
161
8
            if (!_table_format_reader->has_column_optimization(
162
8
                        predicate_col_name,
163
8
                        TableFormatReader::ColumnOptimizationTypes::DICT_FILTER)) {
164
                // Row-lineage style generated columns cannot participate in dict filtering.
165
0
                if (_slot_id_to_filter_conjuncts->find(slot_id) !=
166
0
                    _slot_id_to_filter_conjuncts->end()) {
167
0
                    for (auto& ctx : _slot_id_to_filter_conjuncts->at(slot_id)) {
168
0
                        _filter_conjuncts.push_back(ctx);
169
0
                    }
170
0
                }
171
0
                continue;
172
0
            }
173
174
8
            auto predicate_file_col_name =
175
8
                    _table_info_node_ptr->children_file_column_name(predicate_col_name);
176
8
            auto field = schema.get_column(predicate_file_col_name);
177
8
            if (!disable_dict_filter && !_lazy_read_ctx.has_complex_type &&
178
8
                _can_filter_by_dict(
179
8
                        slot_id, _row_group_meta.columns[field->physical_column_index].meta_data)) {
180
2
                _dict_filter_cols.emplace_back(std::make_pair(predicate_col_name, slot_id));
181
6
            } else {
182
6
                if (_slot_id_to_filter_conjuncts->find(slot_id) !=
183
6
                    _slot_id_to_filter_conjuncts->end()) {
184
6
                    for (auto& ctx : _slot_id_to_filter_conjuncts->at(slot_id)) {
185
6
                        _filter_conjuncts.push_back(ctx);
186
6
                    }
187
6
                }
188
6
            }
189
8
        }
190
        // Add predicate_partition_columns in _slot_id_to_filter_conjuncts(single slot conjuncts)
191
        // to _filter_conjuncts, others should be added from not_single_slot_filter_conjuncts.
192
6
        for (auto& kv : _lazy_read_ctx.predicate_partition_columns) {
193
4
            auto& [value, slot_desc] = kv.second;
194
4
            auto iter = _slot_id_to_filter_conjuncts->find(slot_desc->id());
195
4
            if (iter != _slot_id_to_filter_conjuncts->end()) {
196
4
                for (auto& ctx : iter->second) {
197
4
                    _filter_conjuncts.push_back(ctx);
198
4
                }
199
4
            }
200
4
        }
201
        //For check missing column :   missing column == xx, missing column is null,missing column is not null.
202
6
        _filter_conjuncts.insert(_filter_conjuncts.end(),
203
6
                                 _lazy_read_ctx.missing_columns_conjuncts.begin(),
204
6
                                 _lazy_read_ctx.missing_columns_conjuncts.end());
205
6
        RETURN_IF_ERROR(_rewrite_dict_predicates());
206
6
    }
207
    // _state is nullptr in some ut.
208
37
    if (_state && _state->enable_adjust_conjunct_order_by_cost()) {
209
8
        std::ranges::sort(_filter_conjuncts, [](const auto& a, const auto& b) {
210
8
            return a->execute_cost() < b->execute_cost();
211
8
        });
212
8
    }
213
37
    return Status::OK();
214
37
}
215
216
bool RowGroupReader::_can_filter_by_dict(int slot_id,
217
8
                                         const tparquet::ColumnMetaData& column_metadata) {
218
8
    SlotDescriptor* slot = nullptr;
219
8
    const std::vector<SlotDescriptor*>& slots = _tuple_descriptor->slots();
220
14
    for (auto each : slots) {
221
14
        if (each->id() == slot_id) {
222
8
            slot = each;
223
8
            break;
224
8
        }
225
14
    }
226
8
    if (!is_string_type(slot->type()->get_primitive_type()) &&
227
8
        !is_var_len_object(slot->type()->get_primitive_type())) {
228
6
        return false;
229
6
    }
230
2
    if (column_metadata.type != tparquet::Type::BYTE_ARRAY) {
231
0
        return false;
232
0
    }
233
234
2
    if (!is_dictionary_encoded(column_metadata)) {
235
0
        return false;
236
0
    }
237
238
2
    if (_slot_id_to_filter_conjuncts->find(slot_id) == _slot_id_to_filter_conjuncts->end()) {
239
0
        return false;
240
0
    }
241
242
    // TODO: The current implementation of dictionary filtering does not take into account
243
    //  the implementation of NULL values because the dictionary itself does not contain
244
    //  NULL value encoding. As a result, many NULL-related functions or expressions
245
    //  cannot work properly, such as is null, is not null, coalesce, etc.
246
    //  Here we check if the predicate expr is IN or BINARY_PRED.
247
    //  Implementation of NULL value dictionary filtering will be carried out later.
248
2
    return std::ranges::all_of(_slot_id_to_filter_conjuncts->at(slot_id), [&](const auto& ctx) {
249
2
        return (ctx->root()->node_type() == TExprNodeType::IN_PRED ||
250
2
                ctx->root()->node_type() == TExprNodeType::BINARY_PRED) &&
251
2
               ctx->root()->children()[0]->node_type() == TExprNodeType::SLOT_REF;
252
2
    });
253
2
}
254
255
// This function is copied from
256
// https://github.com/apache/impala/blob/master/be/src/exec/parquet/hdfs-parquet-scanner.cc#L1717
257
2
bool RowGroupReader::is_dictionary_encoded(const tparquet::ColumnMetaData& column_metadata) {
258
    // The Parquet spec allows for column chunks to have mixed encodings
259
    // where some data pages are dictionary-encoded and others are plain
260
    // encoded. For example, a Parquet file writer might start writing
261
    // a column chunk as dictionary encoded, but it will switch to plain
262
    // encoding if the dictionary grows too large.
263
    //
264
    // In order for dictionary filters to skip the entire row group,
265
    // the conjuncts must be evaluated on column chunks that are entirely
266
    // encoded with the dictionary encoding. There are two checks
267
    // available to verify this:
268
    // 1. The encoding_stats field on the column chunk metadata provides
269
    //    information about the number of data pages written in each
270
    //    format. This allows for a specific check of whether all the
271
    //    data pages are dictionary encoded.
272
    // 2. The encodings field on the column chunk metadata lists the
273
    //    encodings used. If this list contains the dictionary encoding
274
    //    and does not include unexpected encodings (i.e. encodings not
275
    //    associated with definition/repetition levels), then it is entirely
276
    //    dictionary encoded.
277
2
    if (column_metadata.__isset.encoding_stats) {
278
        // Condition #1 above
279
4
        for (const tparquet::PageEncodingStats& enc_stat : column_metadata.encoding_stats) {
280
4
            if (enc_stat.page_type == tparquet::PageType::DATA_PAGE &&
281
4
                (enc_stat.encoding != tparquet::Encoding::PLAIN_DICTIONARY &&
282
2
                 enc_stat.encoding != tparquet::Encoding::RLE_DICTIONARY) &&
283
4
                enc_stat.count > 0) {
284
0
                return false;
285
0
            }
286
4
        }
287
2
    } else {
288
        // Condition #2 above
289
0
        bool has_dict_encoding = false;
290
0
        bool has_nondict_encoding = false;
291
0
        for (const tparquet::Encoding::type& encoding : column_metadata.encodings) {
292
0
            if (encoding == tparquet::Encoding::PLAIN_DICTIONARY ||
293
0
                encoding == tparquet::Encoding::RLE_DICTIONARY) {
294
0
                has_dict_encoding = true;
295
0
            }
296
297
            // RLE and BIT_PACKED are used for repetition/definition levels
298
0
            if (encoding != tparquet::Encoding::PLAIN_DICTIONARY &&
299
0
                encoding != tparquet::Encoding::RLE_DICTIONARY &&
300
0
                encoding != tparquet::Encoding::RLE && encoding != tparquet::Encoding::BIT_PACKED) {
301
0
                has_nondict_encoding = true;
302
0
                break;
303
0
            }
304
0
        }
305
        // Not entirely dictionary encoded if:
306
        // 1. No dictionary encoding listed
307
        // OR
308
        // 2. Some non-dictionary encoding is listed
309
0
        if (!has_dict_encoding || has_nondict_encoding) {
310
0
            return false;
311
0
        }
312
0
    }
313
314
2
    return true;
315
2
}
316
317
Status RowGroupReader::next_batch(Block* block, size_t batch_size, size_t* read_rows,
318
98
                                  bool* batch_eof) {
319
98
    if (_is_row_group_filtered) {
320
2
        *read_rows = 0;
321
2
        *batch_eof = true;
322
2
        return Status::OK();
323
2
    }
324
325
    // Process external table query task that select columns are all from path.
326
96
    if (_read_table_columns.empty()) {
327
11
        bool modify_row_ids = false;
328
11
        RETURN_IF_ERROR(_read_empty_batch(batch_size, read_rows, batch_eof, &modify_row_ids));
329
330
11
        DCHECK(_table_format_reader);
331
11
        RETURN_IF_ERROR(_table_format_reader->on_fill_partition_columns(
332
11
                block, *read_rows, _lazy_read_ctx.partition_col_names));
333
11
        RETURN_IF_ERROR(_table_format_reader->on_fill_missing_columns(
334
11
                block, *read_rows, _lazy_read_ctx.missing_col_names));
335
11
        if (_table_format_reader->has_synthesized_column_handlers()) {
336
0
            RETURN_IF_ERROR(_get_current_batch_row_id(*read_rows));
337
0
        }
338
11
        RETURN_IF_ERROR(_table_format_reader->fill_synthesized_columns(block, *read_rows));
339
11
        RETURN_IF_ERROR(_table_format_reader->fill_generated_columns(block, *read_rows));
340
11
        Status st = VExprContext::filter_block(_lazy_read_ctx.conjuncts, block, block->columns());
341
11
        *read_rows = block->rows();
342
11
        return st;
343
11
    }
344
85
    if (_lazy_read_ctx.can_lazy_read) {
345
        // call _do_lazy_read recursively when current batch is skipped
346
12
        return _do_lazy_read(block, batch_size, read_rows, batch_eof);
347
73
    } else {
348
73
        FilterMap filter_map;
349
73
        int64_t batch_base_row = _total_read_rows;
350
73
        RETURN_IF_ERROR((_read_column_data(block, _lazy_read_ctx.all_read_columns, batch_size,
351
73
                                           read_rows, batch_eof, filter_map)));
352
73
        DCHECK(_table_format_reader);
353
73
        RETURN_IF_ERROR(_table_format_reader->on_fill_partition_columns(
354
73
                block, *read_rows, _lazy_read_ctx.partition_col_names));
355
73
        RETURN_IF_ERROR(_table_format_reader->on_fill_missing_columns(
356
73
                block, *read_rows, _lazy_read_ctx.missing_col_names));
357
358
73
        if (_table_format_reader->has_synthesized_column_handlers() ||
359
73
            _table_format_reader->has_generated_column_handlers()) {
360
5
            RETURN_IF_ERROR(_get_current_batch_row_id(*read_rows));
361
5
        }
362
73
        RETURN_IF_ERROR(_table_format_reader->fill_synthesized_columns(block, *read_rows));
363
73
        RETURN_IF_ERROR(_table_format_reader->fill_generated_columns(block, *read_rows));
364
365
73
#ifndef NDEBUG
366
207
        for (auto col : *block) {
367
207
            col.column->sanity_check();
368
207
            DCHECK(block->rows() == col.column->size())
369
0
                    << absl::Substitute("block rows = $0 , column rows = $1, col name = $2",
370
0
                                        block->rows(), col.column->size(), col.name);
371
207
        }
372
73
#endif
373
374
73
        if (block->rows() == 0) {
375
0
            RETURN_IF_ERROR(_convert_dict_cols_to_string_cols(block));
376
0
            *read_rows = block->rows();
377
0
#ifndef NDEBUG
378
0
            for (auto col : *block) {
379
0
                col.column->sanity_check();
380
0
                DCHECK(block->rows() == col.column->size())
381
0
                        << absl::Substitute("block rows = $0 , column rows = $1, col name = $2",
382
0
                                            block->rows(), col.column->size(), col.name);
383
0
            }
384
0
#endif
385
0
            return Status::OK();
386
0
        }
387
73
        {
388
73
            SCOPED_RAW_TIMER(&_predicate_filter_time);
389
73
            RETURN_IF_ERROR(_build_pos_delete_filter(*read_rows));
390
391
73
            std::vector<uint32_t> columns_to_filter;
392
73
            int column_to_keep = block->columns();
393
73
            columns_to_filter.resize(column_to_keep);
394
280
            for (uint32_t i = 0; i < column_to_keep; ++i) {
395
207
                columns_to_filter[i] = i;
396
207
            }
397
73
            if (!_lazy_read_ctx.conjuncts.empty()) {
398
22
                std::vector<IColumn::Filter*> filters;
399
22
                if (_position_delete_ctx.has_filter) {
400
0
                    filters.push_back(_pos_delete_filter_ptr.get());
401
0
                }
402
22
                IColumn::Filter result_filter(block->rows(), 1);
403
22
                bool can_filter_all = false;
404
405
22
                {
406
22
                    RETURN_IF_ERROR_OR_CATCH_EXCEPTION(VExprContext::execute_conjuncts(
407
22
                            _filter_conjuncts, &filters, block, &result_filter, &can_filter_all));
408
22
                }
409
410
                // Condition cache MISS: mark granules with surviving rows (non-lazy path)
411
22
                if (!can_filter_all) {
412
11
                    _mark_condition_cache_granules(result_filter.data(), block->rows(),
413
11
                                                   batch_base_row);
414
11
                }
415
416
22
                if (can_filter_all) {
417
33
                    for (auto& col : columns_to_filter) {
418
33
                        std::move(*block->get_by_position(col).column).assume_mutable()->clear();
419
33
                    }
420
11
                    Block::erase_useless_column(block, column_to_keep);
421
11
                    RETURN_IF_ERROR(_convert_dict_cols_to_string_cols(block));
422
11
                    return Status::OK();
423
11
                }
424
425
11
                RETURN_IF_CATCH_EXCEPTION(
426
11
                        Block::filter_block_internal(block, columns_to_filter, result_filter));
427
11
                Block::erase_useless_column(block, column_to_keep);
428
51
            } else {
429
51
                RETURN_IF_CATCH_EXCEPTION(
430
51
                        RETURN_IF_ERROR(_filter_block(block, column_to_keep, columns_to_filter)));
431
51
            }
432
62
            RETURN_IF_ERROR(_convert_dict_cols_to_string_cols(block));
433
62
        }
434
62
#ifndef NDEBUG
435
174
        for (auto col : *block) {
436
174
            col.column->sanity_check();
437
174
            DCHECK(block->rows() == col.column->size())
438
0
                    << absl::Substitute("block rows = $0 , column rows = $1, col name = $2",
439
0
                                        block->rows(), col.column->size(), col.name);
440
174
        }
441
62
#endif
442
62
        *read_rows = block->rows();
443
62
        return Status::OK();
444
62
    }
445
85
}
446
447
// Maps each batch row to its global parquet file position via _read_ranges, then marks
448
// the corresponding condition cache granule as true if the filter indicates the row survived.
449
// batch_seq_start is the number of rows already read sequentially before this batch
450
// (i.e., _total_read_rows before the batch started).
451
void RowGroupReader::_mark_condition_cache_granules(const uint8_t* filter_data, size_t num_rows,
452
22
                                                    int64_t batch_seq_start) {
453
22
    if (!_condition_cache_ctx || _condition_cache_ctx->is_hit) {
454
22
        return;
455
22
    }
456
0
    auto& cache = *_condition_cache_ctx->filter_result;
457
0
    for (size_t i = 0; i < num_rows; i++) {
458
0
        if (filter_data[i]) {
459
            // row-group-relative position of this row
460
0
            int64_t rg_pos = _read_ranges.get_row_index_by_pos(batch_seq_start + i);
461
            // global row number in the parquet file
462
0
            size_t granule = (_current_row_group_idx.first_row + rg_pos) /
463
0
                             ConditionCacheContext::GRANULE_SIZE;
464
0
            size_t cache_idx = granule - _condition_cache_ctx->base_granule;
465
0
            if (cache_idx < cache.size()) {
466
0
                cache[cache_idx] = true;
467
0
            }
468
0
        }
469
0
    }
470
0
}
471
472
// On condition cache HIT, removes row ranges whose granules have no surviving rows from
473
// _read_ranges BEFORE column readers are created. This makes ParquetColumnReader skip I/O
474
// entirely for false-granule rows — both predicate and lazy columns — via its existing
475
// page/row-skipping infrastructure.
476
38
void RowGroupReader::_filter_read_ranges_by_condition_cache() {
477
38
    if (!_condition_cache_ctx || !_condition_cache_ctx->is_hit) {
478
38
        return;
479
38
    }
480
0
    auto& filter_result = *_condition_cache_ctx->filter_result;
481
0
    if (filter_result.empty()) {
482
0
        return;
483
0
    }
484
485
0
    auto old_row_count = _read_ranges.count();
486
0
    _read_ranges =
487
0
            filter_ranges_by_cache(_read_ranges, filter_result, _current_row_group_idx.first_row,
488
0
                                   _condition_cache_ctx->base_granule);
489
0
    _is_row_group_filtered = _read_ranges.is_empty();
490
0
    _condition_cache_filtered_rows += old_row_count - _read_ranges.count();
491
0
}
492
493
// Filters read_ranges by removing rows whose cache granule is false.
494
//
495
// Cache index i maps to global granule (base_granule + i), which covers global file
496
// rows [(base_granule+i)*GS, (base_granule+i+1)*GS). Since read_ranges uses
497
// row-group-relative indices and first_row is the global position of the row group's
498
// first row, global granule g maps to row-group-relative range:
499
//   [max(0, g*GS - first_row), max(0, (g+1)*GS - first_row))
500
//
501
// We build a RowRanges of all false-granule regions (in row-group-relative coordinates),
502
// then subtract from read_ranges via ranges_exception.
503
//
504
// Granules beyond cache.size() are kept conservatively (assumed true).
505
//
506
// When base_granule > 0, the cache only covers granules starting from base_granule.
507
// This happens when a Parquet file is split across multiple scan ranges and this reader
508
// only processes row groups starting at a non-zero offset in the file.
509
RowRanges RowGroupReader::filter_ranges_by_cache(const RowRanges& read_ranges,
510
                                                 const std::vector<bool>& cache, int64_t first_row,
511
21
                                                 int64_t base_granule) {
512
21
    constexpr int64_t GS = ConditionCacheContext::GRANULE_SIZE;
513
21
    RowRanges filtered_ranges;
514
515
138
    for (size_t i = 0; i < cache.size(); i++) {
516
117
        if (!cache[i]) {
517
64
            int64_t global_granule = base_granule + static_cast<int64_t>(i);
518
64
            int64_t rg_from = std::max(static_cast<int64_t>(0), global_granule * GS - first_row);
519
64
            int64_t rg_to =
520
64
                    std::max(static_cast<int64_t>(0), (global_granule + 1) * GS - first_row);
521
64
            if (rg_from < rg_to) {
522
16
                filtered_ranges.add(RowRange(rg_from, rg_to));
523
16
            }
524
64
        }
525
117
    }
526
527
21
    RowRanges result;
528
21
    RowRanges::ranges_exception(read_ranges, filtered_ranges, &result);
529
21
    return result;
530
21
}
531
532
Status RowGroupReader::_read_column_data(Block* block,
533
                                         const std::vector<std::string>& table_columns,
534
                                         size_t batch_size, size_t* read_rows, bool* batch_eof,
535
106
                                         FilterMap& filter_map) {
536
106
    size_t batch_read_rows = 0;
537
106
    bool has_eof = false;
538
213
    for (auto& read_col_name : table_columns) {
539
213
        uint32_t block_pos = 0;
540
213
        RETURN_IF_ERROR(_get_block_column_pos(*block, read_col_name, &block_pos));
541
213
        auto reader_iter = _column_readers.find(read_col_name);
542
213
        if (reader_iter == _column_readers.end() || reader_iter->second == nullptr) {
543
0
            return Status::InternalError("Column reader for '{}' not found in parquet row group",
544
0
                                         read_col_name);
545
0
        }
546
547
213
        auto& column_with_type_and_name = block->safe_get_by_position(block_pos);
548
213
        auto& column_ptr = column_with_type_and_name.column;
549
213
        auto& column_type = column_with_type_and_name.type;
550
213
        bool is_dict_filter = false;
551
213
        for (auto& _dict_filter_col : _dict_filter_cols) {
552
0
            if (_dict_filter_col.first == read_col_name) {
553
0
                MutableColumnPtr dict_column = ColumnInt32::create();
554
0
                if (column_type->is_nullable()) {
555
0
                    block->get_by_position(block_pos).type =
556
0
                            std::make_shared<DataTypeNullable>(std::make_shared<DataTypeInt32>());
557
0
                    block->replace_by_position(
558
0
                            block_pos,
559
0
                            ColumnNullable::create(std::move(dict_column),
560
0
                                                   ColumnUInt8::create(dict_column->size(), 0)));
561
0
                } else {
562
0
                    block->get_by_position(block_pos).type = std::make_shared<DataTypeInt32>();
563
0
                    block->replace_by_position(block_pos, std::move(dict_column));
564
0
                }
565
0
                is_dict_filter = true;
566
0
                break;
567
0
            }
568
0
        }
569
570
213
        size_t col_read_rows = 0;
571
213
        bool col_eof = false;
572
        // Should reset _filter_map_index to 0 when reading next column.
573
        //        select_vector.reset();
574
213
        reader_iter->second->reset_filter_map_index();
575
489
        while (!col_eof && col_read_rows < batch_size) {
576
276
            size_t loop_rows = 0;
577
276
            RETURN_IF_ERROR(reader_iter->second->read_column_data(
578
276
                    column_ptr, column_type, _table_info_node_ptr->get_children_node(read_col_name),
579
276
                    filter_map, batch_size - col_read_rows, &loop_rows, &col_eof, is_dict_filter));
580
276
            VLOG_DEBUG << "[RowGroupReader] column '" << read_col_name
581
0
                       << "' loop_rows=" << loop_rows << " col_read_rows_so_far=" << col_read_rows
582
0
                       << std::endl;
583
276
            col_read_rows += loop_rows;
584
276
        }
585
213
        VLOG_DEBUG << "[RowGroupReader] column '" << read_col_name
586
0
                   << "' read_rows=" << col_read_rows << std::endl;
587
213
        if (batch_read_rows > 0 && batch_read_rows != col_read_rows) {
588
0
            LOG(WARNING) << "[RowGroupReader] Mismatched read rows among parquet columns. "
589
0
                            "previous_batch_read_rows="
590
0
                         << batch_read_rows << ", current_column='" << read_col_name
591
0
                         << "', current_col_read_rows=" << col_read_rows;
592
0
            return Status::Corruption("Can't read the same number of rows among parquet columns");
593
0
        }
594
213
        batch_read_rows = col_read_rows;
595
596
213
#ifndef NDEBUG
597
213
        column_ptr->sanity_check();
598
213
#endif
599
213
        if (col_eof) {
600
103
            has_eof = true;
601
103
        }
602
213
    }
603
604
106
    *read_rows = batch_read_rows;
605
106
    *batch_eof = has_eof;
606
607
106
    return Status::OK();
608
106
}
609
610
Status RowGroupReader::_do_lazy_read(Block* block, size_t batch_size, size_t* read_rows,
611
12
                                     bool* batch_eof) {
612
12
    std::unique_ptr<FilterMap> filter_map_ptr = nullptr;
613
12
    size_t pre_read_rows;
614
12
    bool pre_eof;
615
12
    std::vector<uint32_t> columns_to_filter;
616
12
    uint32_t origin_column_num = block->columns();
617
12
    columns_to_filter.resize(origin_column_num);
618
48
    for (uint32_t i = 0; i < origin_column_num; ++i) {
619
36
        columns_to_filter[i] = i;
620
36
    }
621
12
    IColumn::Filter result_filter;
622
12
    size_t pre_raw_read_rows = 0;
623
22
    while (!_state->is_cancelled()) {
624
        // read predicate columns
625
22
        pre_read_rows = 0;
626
22
        pre_eof = false;
627
22
        FilterMap filter_map;
628
22
        int64_t batch_base_row = _total_read_rows;
629
22
        RETURN_IF_ERROR(_read_column_data(block, _lazy_read_ctx.predicate_columns.first, batch_size,
630
22
                                          &pre_read_rows, &pre_eof, filter_map));
631
22
        if (pre_read_rows == 0) {
632
0
            DCHECK_EQ(pre_eof, true);
633
0
            break;
634
0
        }
635
22
        pre_raw_read_rows += pre_read_rows;
636
637
22
        DCHECK(_table_format_reader);
638
22
        RETURN_IF_ERROR(_table_format_reader->on_fill_partition_columns(
639
22
                block, pre_read_rows, _lazy_read_ctx.predicate_partition_col_names));
640
22
        RETURN_IF_ERROR(_table_format_reader->on_fill_missing_columns(
641
22
                block, pre_read_rows, _lazy_read_ctx.predicate_missing_col_names));
642
22
        if (_table_format_reader->has_synthesized_column_handlers() ||
643
22
            _table_format_reader->has_generated_column_handlers()) {
644
0
            RETURN_IF_ERROR(_get_current_batch_row_id(pre_read_rows));
645
0
        }
646
22
        RETURN_IF_ERROR(_table_format_reader->fill_synthesized_columns(block, pre_read_rows));
647
22
        RETURN_IF_ERROR(_table_format_reader->fill_generated_columns(block, pre_read_rows));
648
22
        RETURN_IF_ERROR(_build_pos_delete_filter(pre_read_rows));
649
650
22
#ifndef NDEBUG
651
66
        for (auto col : *block) {
652
66
            if (col.column->size() == 0) { // lazy read column.
653
22
                continue;
654
22
            }
655
44
            col.column->sanity_check();
656
44
            DCHECK(pre_read_rows == col.column->size())
657
0
                    << absl::Substitute("pre_read_rows = $0 , column rows = $1, col name = $2",
658
0
                                        pre_read_rows, col.column->size(), col.name);
659
44
        }
660
22
#endif
661
662
22
        bool can_filter_all = false;
663
22
        {
664
22
            SCOPED_RAW_TIMER(&_predicate_filter_time);
665
666
            // generate filter vector
667
22
            if (_lazy_read_ctx.resize_first_column) {
668
                // VExprContext.execute has an optimization, the filtering is executed when block->rows() > 0
669
                // The following process may be tricky and time-consuming, but we have no other way.
670
22
                block->get_by_position(0).column->assume_mutable()->resize(pre_read_rows);
671
22
            }
672
22
            result_filter.assign(pre_read_rows, static_cast<unsigned char>(1));
673
22
            std::vector<IColumn::Filter*> filters;
674
22
            if (_position_delete_ctx.has_filter) {
675
0
                filters.push_back(_pos_delete_filter_ptr.get());
676
0
            }
677
678
22
            VExprContextSPtrs filter_contexts;
679
44
            for (auto& conjunct : _filter_conjuncts) {
680
44
                filter_contexts.emplace_back(conjunct);
681
44
            }
682
683
22
            {
684
22
                RETURN_IF_ERROR(VExprContext::execute_conjuncts(filter_contexts, &filters, block,
685
22
                                                                &result_filter, &can_filter_all));
686
22
            }
687
688
            // Condition cache MISS: mark granules with surviving rows
689
22
            if (!can_filter_all) {
690
11
                _mark_condition_cache_granules(result_filter.data(), pre_read_rows, batch_base_row);
691
11
            }
692
693
22
            if (_lazy_read_ctx.resize_first_column) {
694
                // We have to clean the first column to insert right data.
695
22
                block->get_by_position(0).column->assume_mutable()->clear();
696
22
            }
697
22
        }
698
699
0
        const uint8_t* __restrict filter_map_data = result_filter.data();
700
22
        filter_map_ptr = std::make_unique<FilterMap>();
701
22
        RETURN_IF_ERROR(filter_map_ptr->init(filter_map_data, pre_read_rows, can_filter_all));
702
22
        if (filter_map_ptr->filter_all()) {
703
11
            {
704
11
                SCOPED_RAW_TIMER(&_predicate_filter_time);
705
11
                for (const auto& col : _lazy_read_ctx.predicate_columns.first) {
706
                    // clean block to read predicate columns
707
11
                    uint32_t block_pos = 0;
708
11
                    RETURN_IF_ERROR(_get_block_column_pos(*block, col, &block_pos));
709
11
                    block->get_by_position(block_pos).column->assume_mutable()->clear();
710
11
                }
711
11
                for (const auto& col : _lazy_read_ctx.predicate_partition_columns) {
712
11
                    uint32_t block_pos = 0;
713
11
                    RETURN_IF_ERROR(_get_block_column_pos(*block, col.first, &block_pos));
714
11
                    block->get_by_position(block_pos).column->assume_mutable()->clear();
715
11
                }
716
11
                for (const auto& col : _lazy_read_ctx.predicate_missing_columns) {
717
0
                    uint32_t block_pos = 0;
718
0
                    RETURN_IF_ERROR(_get_block_column_pos(*block, col.first, &block_pos));
719
0
                    block->get_by_position(block_pos).column->assume_mutable()->clear();
720
0
                }
721
11
                RETURN_IF_ERROR(_table_format_reader->clear_synthesized_columns(block));
722
11
                RETURN_IF_ERROR(_table_format_reader->clear_generated_columns(block));
723
11
                Block::erase_useless_column(block, origin_column_num);
724
11
            }
725
726
11
            if (!pre_eof) {
727
                // If continuous batches are skipped, we can cache them to skip a whole page
728
10
                _cached_filtered_rows += pre_read_rows;
729
10
                if (pre_raw_read_rows >= config::doris_scanner_row_num) {
730
0
                    *read_rows = 0;
731
0
                    RETURN_IF_ERROR(_convert_dict_cols_to_string_cols(block));
732
0
                    return Status::OK();
733
0
                }
734
10
            } else { // pre_eof
735
                // If filter_map_ptr->filter_all() and pre_eof, we can skip whole row group.
736
1
                *read_rows = 0;
737
1
                *batch_eof = true;
738
1
                _lazy_read_filtered_rows += (pre_read_rows + _cached_filtered_rows);
739
1
                RETURN_IF_ERROR(_convert_dict_cols_to_string_cols(block));
740
1
                return Status::OK();
741
1
            }
742
11
        } else {
743
11
            break;
744
11
        }
745
22
    }
746
11
    if (_state->is_cancelled()) {
747
0
        return Status::Cancelled("cancelled");
748
0
    }
749
750
11
    if (filter_map_ptr == nullptr) {
751
0
        DCHECK_EQ(pre_read_rows + _cached_filtered_rows, 0);
752
0
        *read_rows = 0;
753
0
        *batch_eof = true;
754
0
        return Status::OK();
755
0
    }
756
757
11
    FilterMap& filter_map = *filter_map_ptr;
758
11
    DorisUniqueBufferPtr<uint8_t> rebuild_filter_map = nullptr;
759
11
    if (_cached_filtered_rows != 0) {
760
0
        RETURN_IF_ERROR(_rebuild_filter_map(filter_map, rebuild_filter_map, pre_read_rows));
761
0
        pre_read_rows += _cached_filtered_rows;
762
0
        _cached_filtered_rows = 0;
763
0
    }
764
765
    // lazy read columns
766
11
    size_t lazy_read_rows;
767
11
    bool lazy_eof;
768
11
    RETURN_IF_ERROR(_read_column_data(block, _lazy_read_ctx.lazy_read_columns, pre_read_rows,
769
11
                                      &lazy_read_rows, &lazy_eof, filter_map));
770
771
11
    if (pre_read_rows != lazy_read_rows) {
772
0
        return Status::Corruption("Can't read the same number of rows when doing lazy read");
773
0
    }
774
    // pre_eof ^ lazy_eof
775
    // we set pre_read_rows as batch_size for lazy read columns, so pre_eof != lazy_eof
776
777
    // filter data in predicate columns, and remove filter column
778
11
    {
779
11
        SCOPED_RAW_TIMER(&_predicate_filter_time);
780
11
        if (filter_map.has_filter()) {
781
0
            RETURN_IF_CATCH_EXCEPTION(Block::filter_block_internal(
782
0
                    block, _lazy_read_ctx.all_predicate_col_ids, result_filter));
783
0
            Block::erase_useless_column(block, origin_column_num);
784
785
11
        } else {
786
11
            Block::erase_useless_column(block, origin_column_num);
787
11
        }
788
11
    }
789
790
11
    RETURN_IF_ERROR(_convert_dict_cols_to_string_cols(block));
791
792
11
    size_t column_num = block->columns();
793
11
    size_t column_size = 0;
794
44
    for (int i = 0; i < column_num; ++i) {
795
33
        size_t cz = block->get_by_position(i).column->size();
796
33
        if (column_size != 0 && cz != 0) {
797
22
            DCHECK_EQ(column_size, cz);
798
22
        }
799
33
        if (cz != 0) {
800
33
            column_size = cz;
801
33
        }
802
33
    }
803
11
    _lazy_read_filtered_rows += pre_read_rows - column_size;
804
11
    *read_rows = column_size;
805
806
11
    *batch_eof = pre_eof;
807
11
    DCHECK(_table_format_reader);
808
11
    RETURN_IF_ERROR(_table_format_reader->on_fill_partition_columns(
809
11
            block, column_size, _lazy_read_ctx.partition_col_names));
810
11
    RETURN_IF_ERROR(_table_format_reader->on_fill_missing_columns(
811
11
            block, column_size, _lazy_read_ctx.missing_col_names));
812
11
#ifndef NDEBUG
813
33
    for (auto col : *block) {
814
33
        col.column->sanity_check();
815
33
        DCHECK(block->rows() == col.column->size())
816
0
                << absl::Substitute("block rows = $0 , column rows = $1, col name = $2",
817
0
                                    block->rows(), col.column->size(), col.name);
818
33
    }
819
11
#endif
820
11
    return Status::OK();
821
11
}
822
823
Status RowGroupReader::_rebuild_filter_map(FilterMap& filter_map,
824
                                           DorisUniqueBufferPtr<uint8_t>& filter_map_data,
825
0
                                           size_t pre_read_rows) const {
826
0
    if (_cached_filtered_rows == 0) {
827
0
        return Status::OK();
828
0
    }
829
0
    size_t total_rows = _cached_filtered_rows + pre_read_rows;
830
0
    if (filter_map.filter_all()) {
831
0
        RETURN_IF_ERROR(filter_map.init(nullptr, total_rows, true));
832
0
        return Status::OK();
833
0
    }
834
835
0
    filter_map_data = make_unique_buffer<uint8_t>(total_rows);
836
0
    auto* map = filter_map_data.get();
837
0
    for (size_t i = 0; i < _cached_filtered_rows; ++i) {
838
0
        map[i] = 0;
839
0
    }
840
0
    const uint8_t* old_map = filter_map.filter_map_data();
841
0
    if (old_map == nullptr) {
842
        // select_vector.filter_all() == true is already built.
843
0
        for (size_t i = _cached_filtered_rows; i < total_rows; ++i) {
844
0
            map[i] = 1;
845
0
        }
846
0
    } else {
847
0
        memcpy(map + _cached_filtered_rows, old_map, pre_read_rows);
848
0
    }
849
0
    RETURN_IF_ERROR(filter_map.init(map, total_rows, false));
850
0
    return Status::OK();
851
0
}
852
853
Status RowGroupReader::_fill_partition_columns(
854
        Block* block, size_t rows,
855
        const std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>&
856
0
                partition_columns) {
857
0
    DataTypeSerDe::FormatOptions _text_formatOptions;
858
0
    for (const auto& kv : partition_columns) {
859
0
        uint32_t block_pos = 0;
860
0
        RETURN_IF_ERROR(_get_block_column_pos(*block, kv.first, &block_pos));
861
0
        auto doris_column = block->get_by_position(block_pos).column;
862
        // obtained from block*, it is a mutable object.
863
0
        auto* col_ptr = const_cast<IColumn*>(doris_column.get());
864
0
        const auto& [value, slot_desc] = kv.second;
865
0
        auto _text_serde = slot_desc->get_data_type_ptr()->get_serde();
866
0
        Slice slice(value.data(), value.size());
867
0
        uint64_t num_deserialized = 0;
868
        // Be careful when reading empty rows from parquet row groups.
869
0
        if (_text_serde->deserialize_column_from_fixed_json(*col_ptr, slice, rows,
870
0
                                                            &num_deserialized,
871
0
                                                            _text_formatOptions) != Status::OK()) {
872
0
            return Status::InternalError("Failed to fill partition column: {}={}",
873
0
                                         slot_desc->col_name(), value);
874
0
        }
875
0
        if (num_deserialized != rows) {
876
0
            return Status::InternalError(
877
0
                    "Failed to fill partition column: {}={} ."
878
0
                    "Number of rows expected to be written : {}, number of rows actually written : "
879
0
                    "{}",
880
0
                    slot_desc->col_name(), value, num_deserialized, rows);
881
0
        }
882
0
    }
883
0
    return Status::OK();
884
0
}
885
886
Status RowGroupReader::_fill_missing_columns(
887
        Block* block, size_t rows,
888
0
        const std::unordered_map<std::string, VExprContextSPtr>& missing_columns) {
889
0
    for (const auto& kv : missing_columns) {
890
0
        uint32_t block_pos = 0;
891
0
        RETURN_IF_ERROR(_get_block_column_pos(*block, kv.first, &block_pos));
892
0
        if (kv.second == nullptr) {
893
            // no default column, fill with null
894
0
            auto mutable_column = block->get_by_position(block_pos).column->assume_mutable();
895
0
            auto* nullable_column = assert_cast<ColumnNullable*>(mutable_column.get());
896
0
            nullable_column->insert_many_defaults(rows);
897
0
        } else {
898
            // fill with default value
899
0
            const auto& ctx = kv.second;
900
0
            ColumnPtr result_column_ptr;
901
            // PT1 => dest primitive type
902
0
            RETURN_IF_ERROR(ctx->execute(block, result_column_ptr));
903
0
            if (result_column_ptr->use_count() == 1) {
904
                // call resize because the first column of _src_block_ptr may not be filled by reader,
905
                // so _src_block_ptr->rows() may return wrong result, cause the column created by `ctx->execute()`
906
                // has only one row.
907
0
                auto mutable_column = result_column_ptr->assume_mutable();
908
0
                mutable_column->resize(rows);
909
                // result_column_ptr maybe a ColumnConst, convert it to a normal column
910
0
                result_column_ptr = result_column_ptr->convert_to_full_column_if_const();
911
0
                auto origin_column_type = block->get_by_position(block_pos).type;
912
0
                bool is_nullable = origin_column_type->is_nullable();
913
0
                block->replace_by_position(block_pos, is_nullable ? make_nullable(result_column_ptr)
914
0
                                                                  : result_column_ptr);
915
0
            }
916
0
        }
917
0
    }
918
0
    return Status::OK();
919
0
}
920
921
Status RowGroupReader::_get_block_column_pos(const Block& block, const std::string& column_name,
922
235
                                             uint32_t* position) const {
923
235
    if (_col_name_to_block_idx == nullptr) {
924
0
        return Status::InternalError(
925
0
                "Column name to block index map is not set when reading parquet column '{}', "
926
0
                "block: "
927
0
                "{}",
928
0
                column_name, block.dump_structure());
929
0
    }
930
235
    auto iter = _col_name_to_block_idx->find(column_name);
931
235
    if (iter == _col_name_to_block_idx->end()) {
932
0
        return Status::InternalError("Column '{}' not found in block index map, block: {}",
933
0
                                     column_name, block.dump_structure());
934
0
    }
935
235
    if (iter->second >= block.columns()) {
936
0
        return Status::InternalError(
937
0
                "Column '{}' maps to invalid block position {}, block columns: {}, block: {}",
938
0
                column_name, iter->second, block.columns(), block.dump_structure());
939
0
    }
940
235
    *position = iter->second;
941
235
    return Status::OK();
942
235
}
943
944
Status RowGroupReader::_read_empty_batch(size_t batch_size, size_t* read_rows, bool* batch_eof,
945
11
                                         bool* modify_row_ids) {
946
11
    *modify_row_ids = false;
947
11
    if (_position_delete_ctx.has_filter) {
948
0
        int64_t start_row_id = _position_delete_ctx.current_row_id;
949
0
        int64_t end_row_id = std::min(_position_delete_ctx.current_row_id + (int64_t)batch_size,
950
0
                                      _position_delete_ctx.last_row_id);
951
0
        int64_t num_delete_rows = 0;
952
0
        auto before_index = _position_delete_ctx.index;
953
0
        while (_position_delete_ctx.index < _position_delete_ctx.end_index) {
954
0
            const int64_t& delete_row_id =
955
0
                    _position_delete_ctx.delete_rows[_position_delete_ctx.index];
956
0
            if (delete_row_id < start_row_id) {
957
0
                _position_delete_ctx.index++;
958
0
                before_index = _position_delete_ctx.index;
959
0
            } else if (delete_row_id < end_row_id) {
960
0
                num_delete_rows++;
961
0
                _position_delete_ctx.index++;
962
0
            } else { // delete_row_id >= end_row_id
963
0
                break;
964
0
            }
965
0
        }
966
0
        *read_rows = end_row_id - start_row_id - num_delete_rows;
967
0
        _position_delete_ctx.current_row_id = end_row_id;
968
0
        *batch_eof = _position_delete_ctx.current_row_id == _position_delete_ctx.last_row_id;
969
970
0
        if (_table_format_reader->has_synthesized_column_handlers() ||
971
0
            _table_format_reader->has_generated_column_handlers()) {
972
0
            *modify_row_ids = true;
973
0
            _current_batch_row_ids.clear();
974
0
            _current_batch_row_ids.resize(*read_rows);
975
0
            size_t idx = 0;
976
0
            for (auto id = start_row_id; id < end_row_id; id++) {
977
0
                if (before_index < _position_delete_ctx.index &&
978
0
                    id == _position_delete_ctx.delete_rows[before_index]) {
979
0
                    before_index++;
980
0
                    continue;
981
0
                }
982
0
                _current_batch_row_ids[idx++] = (rowid_t)id;
983
0
            }
984
0
        }
985
11
    } else {
986
11
        if (batch_size < _remaining_rows) {
987
10
            *read_rows = batch_size;
988
10
            _remaining_rows -= batch_size;
989
10
            *batch_eof = false;
990
10
        } else {
991
1
            *read_rows = _remaining_rows;
992
1
            _remaining_rows = 0;
993
1
            *batch_eof = true;
994
1
        }
995
11
        if (_table_format_reader->has_synthesized_column_handlers() ||
996
11
            _table_format_reader->has_generated_column_handlers()) {
997
0
            *modify_row_ids = true;
998
0
            RETURN_IF_ERROR(_get_current_batch_row_id(*read_rows));
999
0
        }
1000
11
    }
1001
11
    _total_read_rows += *read_rows;
1002
11
    return Status::OK();
1003
11
}
1004
1005
5
Status RowGroupReader::_get_current_batch_row_id(size_t read_rows) {
1006
5
    _current_batch_row_ids.clear();
1007
5
    _current_batch_row_ids.resize(read_rows);
1008
1009
5
    int64_t idx = 0;
1010
5
    int64_t read_range_rows = 0;
1011
19
    for (size_t range_idx = 0; range_idx < _read_ranges.range_size(); range_idx++) {
1012
14
        auto range = _read_ranges.get_range(range_idx);
1013
14
        if (read_rows == 0) {
1014
0
            break;
1015
0
        }
1016
14
        if (read_range_rows + (range.to() - range.from()) > _total_read_rows) {
1017
14
            int64_t fi =
1018
14
                    std::max(_total_read_rows, read_range_rows) - read_range_rows + range.from();
1019
14
            size_t len = std::min(read_rows, (size_t)(std::max(range.to(), fi) - fi));
1020
1021
14
            read_rows -= len;
1022
1023
28
            for (auto i = 0; i < len; i++) {
1024
14
                _current_batch_row_ids[idx++] =
1025
14
                        (rowid_t)(fi + i + _current_row_group_idx.first_row);
1026
14
            }
1027
14
        }
1028
14
        read_range_rows += range.to() - range.from();
1029
14
    }
1030
5
    return Status::OK();
1031
5
}
1032
1033
95
Status RowGroupReader::_build_pos_delete_filter(size_t read_rows) {
1034
95
    if (!_position_delete_ctx.has_filter) {
1035
95
        _pos_delete_filter_ptr.reset(nullptr);
1036
95
        _total_read_rows += read_rows;
1037
95
        return Status::OK();
1038
95
    }
1039
0
    _pos_delete_filter_ptr.reset(new IColumn::Filter(read_rows, 1));
1040
0
    auto* __restrict _pos_delete_filter_data = _pos_delete_filter_ptr->data();
1041
0
    while (_position_delete_ctx.index < _position_delete_ctx.end_index) {
1042
0
        const int64_t delete_row_index_in_row_group =
1043
0
                _position_delete_ctx.delete_rows[_position_delete_ctx.index] -
1044
0
                _position_delete_ctx.first_row_id;
1045
0
        int64_t read_range_rows = 0;
1046
0
        size_t remaining_read_rows = _total_read_rows + read_rows;
1047
0
        for (size_t range_idx = 0; range_idx < _read_ranges.range_size(); range_idx++) {
1048
0
            auto range = _read_ranges.get_range(range_idx);
1049
0
            if (delete_row_index_in_row_group < range.from()) {
1050
0
                ++_position_delete_ctx.index;
1051
0
                break;
1052
0
            } else if (delete_row_index_in_row_group < range.to()) {
1053
0
                int64_t index = (delete_row_index_in_row_group - range.from()) + read_range_rows -
1054
0
                                _total_read_rows;
1055
0
                if (index > read_rows - 1) {
1056
0
                    _total_read_rows += read_rows;
1057
0
                    return Status::OK();
1058
0
                }
1059
0
                _pos_delete_filter_data[index] = 0;
1060
0
                ++_position_delete_ctx.index;
1061
0
                break;
1062
0
            } else { // delete_row >= range.last_row
1063
0
            }
1064
1065
0
            int64_t range_size = range.to() - range.from();
1066
            // Don't search next range when there is no remaining_read_rows.
1067
0
            if (remaining_read_rows <= range_size) {
1068
0
                _total_read_rows += read_rows;
1069
0
                return Status::OK();
1070
0
            } else {
1071
0
                remaining_read_rows -= range_size;
1072
0
                read_range_rows += range_size;
1073
0
            }
1074
0
        }
1075
0
    }
1076
0
    _total_read_rows += read_rows;
1077
0
    return Status::OK();
1078
0
}
1079
1080
// need exception safety
1081
Status RowGroupReader::_filter_block(Block* block, int column_to_keep,
1082
51
                                     const std::vector<uint32_t>& columns_to_filter) {
1083
51
    if (_pos_delete_filter_ptr) {
1084
0
        RETURN_IF_CATCH_EXCEPTION(
1085
0
                Block::filter_block_internal(block, columns_to_filter, (*_pos_delete_filter_ptr)));
1086
0
    }
1087
51
    Block::erase_useless_column(block, column_to_keep);
1088
1089
51
    return Status::OK();
1090
51
}
1091
1092
6
Status RowGroupReader::_rewrite_dict_predicates() {
1093
6
    SCOPED_RAW_TIMER(&_dict_filter_rewrite_time);
1094
6
    for (auto it = _dict_filter_cols.begin(); it != _dict_filter_cols.end();) {
1095
2
        std::string& dict_filter_col_name = it->first;
1096
2
        int slot_id = it->second;
1097
        // 1. Get dictionary values to a string column.
1098
2
        MutableColumnPtr dict_value_column = ColumnString::create();
1099
2
        bool has_dict = false;
1100
2
        RETURN_IF_ERROR(_column_readers[dict_filter_col_name]->read_dict_values_to_column(
1101
2
                dict_value_column, &has_dict));
1102
2
#ifndef NDEBUG
1103
2
        dict_value_column->sanity_check();
1104
2
#endif
1105
2
        size_t dict_value_column_size = dict_value_column->size();
1106
2
        DCHECK(has_dict);
1107
        // 2. Build a temp block from the dict string column, then execute conjuncts and filter block.
1108
        // 2.1 Build a temp block from the dict string column to match the conjuncts executing.
1109
2
        Block temp_block;
1110
2
        int dict_pos = -1;
1111
2
        int index = 0;
1112
4
        for (const auto slot_desc : _tuple_descriptor->slots()) {
1113
4
            if (slot_desc->id() == slot_id) {
1114
2
                auto data_type = slot_desc->get_data_type_ptr();
1115
2
                if (data_type->is_nullable()) {
1116
0
                    temp_block.insert(
1117
0
                            {ColumnNullable::create(
1118
0
                                     std::move(
1119
0
                                             dict_value_column), // NOLINT(bugprone-use-after-move)
1120
0
                                     ColumnUInt8::create(dict_value_column_size, 0)),
1121
0
                             std::make_shared<DataTypeNullable>(std::make_shared<DataTypeString>()),
1122
0
                             ""});
1123
2
                } else {
1124
2
                    temp_block.insert(
1125
2
                            {std::move(dict_value_column), std::make_shared<DataTypeString>(), ""});
1126
2
                }
1127
2
                dict_pos = index;
1128
1129
2
            } else {
1130
2
                temp_block.insert(ColumnWithTypeAndName(slot_desc->get_empty_mutable_column(),
1131
2
                                                        slot_desc->get_data_type_ptr(),
1132
2
                                                        slot_desc->col_name()));
1133
2
            }
1134
4
            ++index;
1135
4
        }
1136
1137
        // 2.2 Execute conjuncts.
1138
2
        VExprContextSPtrs ctxs;
1139
2
        auto iter = _slot_id_to_filter_conjuncts->find(slot_id);
1140
2
        if (iter != _slot_id_to_filter_conjuncts->end()) {
1141
2
            for (auto& ctx : iter->second) {
1142
2
                ctxs.push_back(ctx);
1143
2
            }
1144
2
        } else {
1145
0
            std::stringstream msg;
1146
0
            msg << "_slot_id_to_filter_conjuncts: slot_id [" << slot_id << "] not found";
1147
0
            return Status::NotFound(msg.str());
1148
0
        }
1149
1150
2
        if (dict_pos != 0) {
1151
            // VExprContext.execute has an optimization, the filtering is executed when block->rows() > 0
1152
            // The following process may be tricky and time-consuming, but we have no other way.
1153
0
            temp_block.get_by_position(0).column->assume_mutable()->resize(dict_value_column_size);
1154
0
        }
1155
2
        IColumn::Filter result_filter(temp_block.rows(), 1);
1156
2
        bool can_filter_all;
1157
2
        {
1158
2
            RETURN_IF_ERROR(VExprContext::execute_conjuncts(ctxs, nullptr, &temp_block,
1159
2
                                                            &result_filter, &can_filter_all));
1160
2
        }
1161
2
        if (dict_pos != 0) {
1162
            // We have to clean the first column to insert right data.
1163
0
            temp_block.get_by_position(0).column->assume_mutable()->clear();
1164
0
        }
1165
1166
        // If can_filter_all = true, can filter this row group.
1167
2
        if (can_filter_all) {
1168
2
            _is_row_group_filtered = true;
1169
2
            return Status::OK();
1170
2
        }
1171
1172
        // 3. Get dict codes.
1173
0
        std::vector<int32_t> dict_codes;
1174
0
        for (size_t i = 0; i < result_filter.size(); ++i) {
1175
0
            if (result_filter[i]) {
1176
0
                dict_codes.emplace_back(i);
1177
0
            }
1178
0
        }
1179
1180
        // About Performance: if dict_column size is too large, it will generate a large IN filter.
1181
0
        if (dict_codes.size() > MAX_DICT_CODE_PREDICATE_TO_REWRITE) {
1182
0
            it = _dict_filter_cols.erase(it);
1183
0
            for (auto& ctx : ctxs) {
1184
0
                _filter_conjuncts.push_back(ctx);
1185
0
            }
1186
0
            continue;
1187
0
        }
1188
1189
        // 4. Rewrite conjuncts.
1190
0
        RETURN_IF_ERROR(_rewrite_dict_conjuncts(
1191
0
                dict_codes, slot_id, temp_block.get_by_position(dict_pos).column->is_nullable()));
1192
0
        ++it;
1193
0
    }
1194
4
    return Status::OK();
1195
6
}
1196
1197
Status RowGroupReader::_rewrite_dict_conjuncts(std::vector<int32_t>& dict_codes, int slot_id,
1198
0
                                               bool is_nullable) {
1199
0
    VExprSPtr root;
1200
0
    if (dict_codes.size() == 1) {
1201
0
        {
1202
0
            TFunction fn;
1203
0
            TFunctionName fn_name;
1204
0
            fn_name.__set_db_name("");
1205
0
            fn_name.__set_function_name("eq");
1206
0
            fn.__set_name(fn_name);
1207
0
            fn.__set_binary_type(TFunctionBinaryType::BUILTIN);
1208
0
            std::vector<TTypeDesc> arg_types;
1209
0
            arg_types.push_back(create_type_desc(PrimitiveType::TYPE_INT));
1210
0
            arg_types.push_back(create_type_desc(PrimitiveType::TYPE_INT));
1211
0
            fn.__set_arg_types(arg_types);
1212
0
            fn.__set_ret_type(create_type_desc(PrimitiveType::TYPE_BOOLEAN));
1213
0
            fn.__set_has_var_args(false);
1214
1215
0
            TExprNode texpr_node;
1216
0
            texpr_node.__set_type(create_type_desc(PrimitiveType::TYPE_BOOLEAN));
1217
0
            texpr_node.__set_node_type(TExprNodeType::BINARY_PRED);
1218
0
            texpr_node.__set_opcode(TExprOpcode::EQ);
1219
0
            texpr_node.__set_fn(fn);
1220
0
            texpr_node.__set_num_children(2);
1221
0
            texpr_node.__set_is_nullable(is_nullable);
1222
0
            root = VectorizedFnCall::create_shared(texpr_node);
1223
0
        }
1224
0
        {
1225
0
            SlotDescriptor* slot = nullptr;
1226
0
            const std::vector<SlotDescriptor*>& slots = _tuple_descriptor->slots();
1227
0
            for (auto each : slots) {
1228
0
                if (each->id() == slot_id) {
1229
0
                    slot = each;
1230
0
                    break;
1231
0
                }
1232
0
            }
1233
0
            root->add_child(VSlotRef::create_shared(slot));
1234
0
        }
1235
0
        {
1236
0
            TExprNode texpr_node;
1237
0
            texpr_node.__set_node_type(TExprNodeType::INT_LITERAL);
1238
0
            texpr_node.__set_type(create_type_desc(TYPE_INT));
1239
0
            TIntLiteral int_literal;
1240
0
            int_literal.__set_value(dict_codes[0]);
1241
0
            texpr_node.__set_int_literal(int_literal);
1242
0
            texpr_node.__set_is_nullable(is_nullable);
1243
0
            root->add_child(VLiteral::create_shared(texpr_node));
1244
0
        }
1245
0
    } else {
1246
0
        {
1247
0
            TTypeDesc type_desc = create_type_desc(PrimitiveType::TYPE_BOOLEAN);
1248
0
            TExprNode node;
1249
0
            node.__set_type(type_desc);
1250
0
            node.__set_node_type(TExprNodeType::IN_PRED);
1251
0
            node.in_predicate.__set_is_not_in(false);
1252
0
            node.__set_opcode(TExprOpcode::FILTER_IN);
1253
            // VdirectInPredicate assume is_nullable = false.
1254
0
            node.__set_is_nullable(false);
1255
1256
0
            std::shared_ptr<HybridSetBase> hybrid_set(
1257
0
                    create_set(PrimitiveType::TYPE_INT, dict_codes.size(), false));
1258
0
            for (int j = 0; j < dict_codes.size(); ++j) {
1259
0
                hybrid_set->insert(&dict_codes[j]);
1260
0
            }
1261
0
            root = VDirectInPredicate::create_shared(node, hybrid_set);
1262
0
        }
1263
0
        {
1264
0
            SlotDescriptor* slot = nullptr;
1265
0
            const std::vector<SlotDescriptor*>& slots = _tuple_descriptor->slots();
1266
0
            for (auto each : slots) {
1267
0
                if (each->id() == slot_id) {
1268
0
                    slot = each;
1269
0
                    break;
1270
0
                }
1271
0
            }
1272
0
            root->add_child(VSlotRef::create_shared(slot));
1273
0
        }
1274
0
    }
1275
0
    VExprContextSPtr rewritten_conjunct_ctx = VExprContext::create_shared(root);
1276
0
    RETURN_IF_ERROR(rewritten_conjunct_ctx->prepare(_state, *_row_descriptor));
1277
0
    RETURN_IF_ERROR(rewritten_conjunct_ctx->open(_state));
1278
0
    _dict_filter_conjuncts.push_back(rewritten_conjunct_ctx);
1279
0
    _filter_conjuncts.push_back(rewritten_conjunct_ctx);
1280
0
    return Status::OK();
1281
0
}
1282
1283
85
Status RowGroupReader::_convert_dict_cols_to_string_cols(Block* block) {
1284
85
    for (auto& dict_filter_cols : _dict_filter_cols) {
1285
0
        uint32_t block_pos = 0;
1286
0
        RETURN_IF_ERROR(_get_block_column_pos(*block, dict_filter_cols.first, &block_pos));
1287
0
        auto reader_iter = _column_readers.find(dict_filter_cols.first);
1288
0
        if (reader_iter == _column_readers.end() || reader_iter->second == nullptr) {
1289
0
            return Status::InternalError("Column reader for '{}' not found in parquet row group",
1290
0
                                         dict_filter_cols.first);
1291
0
        }
1292
0
        ColumnWithTypeAndName& column_with_type_and_name = block->get_by_position(block_pos);
1293
0
        const ColumnPtr& column = column_with_type_and_name.column;
1294
0
        if (const auto* nullable_column = check_and_get_column<ColumnNullable>(*column)) {
1295
0
            const ColumnPtr& nested_column = nullable_column->get_nested_column_ptr();
1296
0
            const auto* dict_column = assert_cast<const ColumnInt32*>(nested_column.get());
1297
0
            DCHECK(dict_column);
1298
1299
0
            auto string_column = DORIS_TRY(
1300
0
                    reader_iter->second->convert_dict_column_to_string_column(dict_column));
1301
1302
0
            column_with_type_and_name.type =
1303
0
                    std::make_shared<DataTypeNullable>(std::make_shared<DataTypeString>());
1304
0
            block->replace_by_position(
1305
0
                    block_pos, ColumnNullable::create(std::move(string_column),
1306
0
                                                      nullable_column->get_null_map_column_ptr()));
1307
0
        } else {
1308
0
            const auto* dict_column = assert_cast<const ColumnInt32*>(column.get());
1309
0
            auto string_column = DORIS_TRY(
1310
0
                    reader_iter->second->convert_dict_column_to_string_column(dict_column));
1311
1312
0
            column_with_type_and_name.type = std::make_shared<DataTypeString>();
1313
0
            block->replace_by_position(block_pos, std::move(string_column));
1314
0
        }
1315
0
    }
1316
85
    return Status::OK();
1317
85
}
1318
1319
38
ParquetColumnReader::ColumnStatistics RowGroupReader::merged_column_statistics() {
1320
38
    ParquetColumnReader::ColumnStatistics st;
1321
108
    for (auto& reader : _column_readers) {
1322
108
        auto ost = reader.second->column_statistics();
1323
108
        st.merge(ost);
1324
108
    }
1325
38
    return st;
1326
38
}
1327
1328
} // namespace doris