Coverage Report

Created: 2026-05-17 12:14

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/storage/segment/segment_iterator.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "storage/segment/segment_iterator.h"
19
20
#include <assert.h>
21
#include <gen_cpp/Exprs_types.h>
22
#include <gen_cpp/Opcodes_types.h>
23
#include <gen_cpp/Types_types.h>
24
#include <gen_cpp/olap_file.pb.h>
25
26
#include <algorithm>
27
#include <boost/iterator/iterator_facade.hpp>
28
#include <cstdint>
29
#include <memory>
30
#include <numeric>
31
#include <set>
32
#include <unordered_map>
33
#include <utility>
34
#include <vector>
35
36
#include "cloud/config.h"
37
#include "common/compiler_util.h" // IWYU pragma: keep
38
#include "common/config.h"
39
#include "common/consts.h"
40
#include "common/exception.h"
41
#include "common/logging.h"
42
#include "common/metrics/doris_metrics.h"
43
#include "common/object_pool.h"
44
#include "common/status.h"
45
#include "core/assert_cast.h"
46
#include "core/block/column_with_type_and_name.h"
47
#include "core/column/column.h"
48
#include "core/column/column_const.h"
49
#include "core/column/column_nothing.h"
50
#include "core/column/column_nullable.h"
51
#include "core/column/column_string.h"
52
#include "core/column/column_variant.h"
53
#include "core/column/column_vector.h"
54
#include "core/data_type/data_type.h"
55
#include "core/data_type/data_type_factory.hpp"
56
#include "core/data_type/data_type_number.h"
57
#include "core/data_type/define_primitive_type.h"
58
#include "core/field.h"
59
#include "core/string_ref.h"
60
#include "core/typeid_cast.h"
61
#include "core/types.h"
62
#include "exprs/function/array/function_array_index.h"
63
#include "exprs/vexpr.h"
64
#include "exprs/vexpr_context.h"
65
#include "exprs/virtual_slot_ref.h"
66
#include "exprs/vliteral.h"
67
#include "exprs/vslot_ref.h"
68
#include "io/cache/cached_remote_file_reader.h"
69
#include "io/fs/file_reader.h"
70
#include "io/io_common.h"
71
#include "runtime/query_context.h"
72
#include "runtime/runtime_predicate.h"
73
#include "runtime/runtime_state.h"
74
#include "runtime/thread_context.h"
75
#include "storage/compaction/collection_similarity.h"
76
#include "storage/field.h"
77
#include "storage/id_manager.h"
78
#include "storage/index/ann/ann_index.h"
79
#include "storage/index/ann/ann_index_iterator.h"
80
#include "storage/index/ann/ann_index_reader.h"
81
#include "storage/index/ann/ann_topn_runtime.h"
82
#include "storage/index/index_file_reader.h"
83
#include "storage/index/index_iterator.h"
84
#include "storage/index/index_query_context.h"
85
#include "storage/index/index_reader_helper.h"
86
#include "storage/index/indexed_column_reader.h"
87
#include "storage/index/inverted/inverted_index_reader.h"
88
#include "storage/index/ordinal_page_index.h"
89
#include "storage/index/primary_key_index.h"
90
#include "storage/index/short_key_index.h"
91
#include "storage/iterators.h"
92
#include "storage/olap_common.h"
93
#include "storage/predicate/bloom_filter_predicate.h"
94
#include "storage/predicate/column_predicate.h"
95
#include "storage/predicate/like_column_predicate.h"
96
#include "storage/schema.h"
97
#include "storage/segment/column_reader.h"
98
#include "storage/segment/column_reader_cache.h"
99
#include "storage/segment/condition_cache.h"
100
#include "storage/segment/row_ranges.h"
101
#include "storage/segment/segment.h"
102
#include "storage/segment/segment_prefetcher.h"
103
#include "storage/segment/variant/variant_column_reader.h"
104
#include "storage/segment/virtual_column_iterator.h"
105
#include "storage/tablet/tablet_schema.h"
106
#include "storage/types.h"
107
#include "storage/utils.h"
108
#include "util/concurrency_stats.h"
109
#include "util/defer_op.h"
110
#include "util/simd/bits.h"
111
112
namespace doris {
113
using namespace ErrorCode;
114
namespace segment_v2 {
115
116
2.83k
SegmentIterator::~SegmentIterator() = default;
117
118
2.82k
void SegmentIterator::_init_row_bitmap_by_condition_cache() {
119
    // Only dispose need column predicate and expr cal in condition cache
120
2.82k
    if (!_col_predicates.empty() ||
121
2.82k
        (_enable_common_expr_pushdown && !_remaining_conjunct_roots.empty())) {
122
0
        if (_opts.condition_cache_digest) {
123
0
            auto* condition_cache = ConditionCache::instance();
124
0
            ConditionCache::CacheKey cache_key(_opts.rowset_id, _segment->id(),
125
0
                                               _opts.condition_cache_digest);
126
127
            // Increment search count when digest != 0
128
0
            DorisMetrics::instance()->condition_cache_search_count->increment(1);
129
130
0
            ConditionCacheHandle handle;
131
0
            _find_condition_cache = condition_cache->lookup(cache_key, &handle);
132
133
            // Increment hit count if cache lookup is successful
134
0
            if (_find_condition_cache) {
135
0
                DorisMetrics::instance()->condition_cache_hit_count->increment(1);
136
0
                if (_opts.runtime_state) {
137
0
                    VLOG_DEBUG << "Condition cache hit, query id: "
138
0
                               << print_id(_opts.runtime_state->query_id())
139
0
                               << ", segment id: " << _segment->id()
140
0
                               << ", cache digest: " << _opts.condition_cache_digest
141
0
                               << ", rowset id: " << _opts.rowset_id.to_string();
142
0
                }
143
0
            }
144
145
0
            auto num_rows = _segment->num_rows();
146
0
            if (_find_condition_cache) {
147
0
                const auto& filter_result = *(handle.get_filter_result());
148
0
                int64_t filtered_blocks = 0;
149
0
                for (int i = 0; i < filter_result.size(); i++) {
150
0
                    if (!filter_result[i]) {
151
0
                        _row_bitmap.removeRange(
152
0
                                i * CONDITION_CACHE_OFFSET,
153
0
                                i * CONDITION_CACHE_OFFSET + CONDITION_CACHE_OFFSET);
154
0
                        filtered_blocks++;
155
0
                    }
156
0
                }
157
                // Record condition_cache hit segment number
158
0
                _opts.stats->condition_cache_hit_seg_nums++;
159
                // Record rows filtered by condition cache hit
160
0
                _opts.stats->condition_cache_filtered_rows +=
161
0
                        filtered_blocks * SegmentIterator::CONDITION_CACHE_OFFSET;
162
0
            } else {
163
0
                _condition_cache = std::make_shared<std::vector<bool>>(
164
0
                        num_rows / CONDITION_CACHE_OFFSET + 1, false);
165
0
            }
166
0
        }
167
2.82k
    } else {
168
2.82k
        _opts.condition_cache_digest = 0;
169
2.82k
    }
170
2.82k
}
171
172
// A fast range iterator for roaring bitmap. Output ranges use closed-open form, like [from, to).
173
// Example:
174
//   input bitmap:  [0 1 4 5 6 7 10 15 16 17 18 19]
175
//   output ranges: [0,2), [4,8), [10,11), [15,20) (when max_range_size=10)
176
//   output ranges: [0,2), [4,7), [7,8), [10,11), [15,18), [18,20) (when max_range_size=3)
177
class SegmentIterator::BitmapRangeIterator {
178
public:
179
0
    BitmapRangeIterator() = default;
180
2.82k
    virtual ~BitmapRangeIterator() = default;
181
182
2.82k
    explicit BitmapRangeIterator(const roaring::Roaring& bitmap) {
183
2.82k
        roaring_init_iterator(&bitmap.roaring, &_iter);
184
2.82k
    }
185
186
0
    bool has_more_range() const { return !_eof; }
187
188
6.50k
    [[nodiscard]] static uint32_t get_batch_size() { return kBatchSize; }
189
190
    // read next range into [*from, *to) whose size <= max_range_size.
191
    // return false when there is no more range.
192
0
    virtual bool next_range(const uint32_t max_range_size, uint32_t* from, uint32_t* to) {
193
0
        if (_eof) {
194
0
            return false;
195
0
        }
196
197
0
        *from = _buf[_buf_pos];
198
0
        uint32_t range_size = 0;
199
0
        uint32_t expect_val = _buf[_buf_pos]; // this initial value just make first batch valid
200
201
        // if array is contiguous sequence then the following conditions need to be met :
202
        // a_0: x
203
        // a_1: x+1
204
        // a_2: x+2
205
        // ...
206
        // a_p: x+p
207
        // so we can just use (a_p-a_0)-p to check conditions
208
        // and should notice the previous batch needs to be continuous with the current batch
209
0
        while (!_eof && range_size + _buf_size - _buf_pos <= max_range_size &&
210
0
               expect_val == _buf[_buf_pos] &&
211
0
               _buf[_buf_size - 1] - _buf[_buf_pos] == _buf_size - 1 - _buf_pos) {
212
0
            range_size += _buf_size - _buf_pos;
213
0
            expect_val = _buf[_buf_size - 1] + 1;
214
0
            _read_next_batch();
215
0
        }
216
217
        // promise remain range not will reach next batch
218
0
        if (!_eof && range_size < max_range_size && expect_val == _buf[_buf_pos]) {
219
0
            do {
220
0
                _buf_pos++;
221
0
                range_size++;
222
0
            } while (range_size < max_range_size && _buf[_buf_pos] == _buf[_buf_pos - 1] + 1);
223
0
        }
224
0
        *to = *from + range_size;
225
0
        return true;
226
0
    }
227
228
    // read batch_size of rowids from roaring bitmap into buf array
229
12.9k
    virtual uint32_t read_batch_rowids(rowid_t* buf, uint32_t batch_size) {
230
12.9k
        return roaring::api::roaring_read_uint32_iterator(&_iter, buf, batch_size);
231
12.9k
    }
232
233
private:
234
0
    void _read_next_batch() {
235
0
        _buf_pos = 0;
236
0
        _buf_size = roaring::api::roaring_read_uint32_iterator(&_iter, _buf, kBatchSize);
237
0
        _eof = (_buf_size == 0);
238
0
    }
239
240
    static const uint32_t kBatchSize = 256;
241
    roaring::api::roaring_uint32_iterator_t _iter;
242
    uint32_t _buf[kBatchSize];
243
    uint32_t _buf_pos = 0;
244
    uint32_t _buf_size = 0;
245
    bool _eof = false;
246
};
247
248
// A backward range iterator for roaring bitmap. Output ranges use closed-open form, like [from, to).
249
// Example:
250
//   input bitmap:  [0 1 4 5 6 7 10 15 16 17 18 19]
251
//   output ranges: , [15,20), [10,11), [4,8), [0,2) (when max_range_size=10)
252
//   output ranges: [17,20), [15,17), [10,11), [5,8), [4, 5), [0,2) (when max_range_size=3)
253
class SegmentIterator::BackwardBitmapRangeIterator : public SegmentIterator::BitmapRangeIterator {
254
public:
255
0
    explicit BackwardBitmapRangeIterator(const roaring::Roaring& bitmap) {
256
0
        roaring_init_iterator_last(&bitmap.roaring, &_riter);
257
0
        _rowid_count = cast_set<uint32_t>(roaring_bitmap_get_cardinality(&bitmap.roaring));
258
0
        _rowid_left = _rowid_count;
259
0
    }
260
261
0
    bool has_more_range() const { return !_riter.has_value; }
262
263
    // read next range into [*from, *to) whose size <= max_range_size.
264
    // return false when there is no more range.
265
0
    bool next_range(const uint32_t max_range_size, uint32_t* from, uint32_t* to) override {
266
0
        if (!_riter.has_value) {
267
0
            return false;
268
0
        }
269
270
0
        uint32_t range_size = 0;
271
0
        *to = _riter.current_value + 1;
272
273
0
        do {
274
0
            *from = _riter.current_value;
275
0
            range_size++;
276
0
            roaring_previous_uint32_iterator(&_riter);
277
0
        } while (range_size < max_range_size && _riter.has_value &&
278
0
                 _riter.current_value + 1 == *from);
279
280
0
        return true;
281
0
    }
282
    /**
283
     * Reads a batch of row IDs from a roaring bitmap, starting from the end and moving backwards.
284
     * This function retrieves the last `batch_size` row IDs from the bitmap and stores them in the provided buffer.
285
     * It updates the internal state to track how many row IDs are left to read in subsequent calls.
286
     *
287
     * The row IDs are read in reverse order, but stored in the buffer maintaining their original order in the bitmap.
288
     *
289
     * Example:
290
     *   input bitmap: [0 1 4 5 6 7 10 15 16 17 18 19]
291
     *   If the bitmap has 12 elements and batch_size is set to 5, the function will first read [15, 16, 17, 18, 19]
292
     *   into the buffer, leaving 7 elements left. In the next call with batch_size 5, it will read [4, 5, 6, 7, 10].
293
     *
294
     */
295
0
    uint32_t read_batch_rowids(rowid_t* buf, uint32_t batch_size) override {
296
0
        if (!_riter.has_value || _rowid_left == 0) {
297
0
            return 0;
298
0
        }
299
300
0
        if (_rowid_count <= batch_size) {
301
0
            roaring_bitmap_to_uint32_array(_riter.parent,
302
0
                                           buf); // Fill 'buf' with '_rowid_count' elements.
303
0
            uint32_t num_read = _rowid_left;     // Save the number of row IDs read.
304
0
            _rowid_left = 0;                     // No row IDs left after this operation.
305
0
            return num_read;                     // Return the number of row IDs read.
306
0
        }
307
308
0
        uint32_t read_size = std::min(batch_size, _rowid_left);
309
0
        uint32_t num_read = 0; // Counter for the number of row IDs read.
310
311
        // Read row IDs into the buffer in reverse order.
312
0
        while (num_read < read_size && _riter.has_value) {
313
0
            buf[read_size - num_read - 1] = _riter.current_value;
314
0
            num_read++;
315
0
            _rowid_left--; // Decrement the count of remaining row IDs.
316
0
            roaring_previous_uint32_iterator(&_riter);
317
0
        }
318
319
        // Return the actual number of row IDs read.
320
0
        return num_read;
321
0
    }
322
323
private:
324
    roaring::api::roaring_uint32_iterator_t _riter;
325
    uint32_t _rowid_count;
326
    uint32_t _rowid_left;
327
};
328
329
SegmentIterator::SegmentIterator(std::shared_ptr<Segment> segment, SchemaSPtr schema)
330
2.83k
        : _segment(std::move(segment)),
331
2.83k
          _schema(schema),
332
2.83k
          _column_iterators(_schema->num_columns()),
333
2.83k
          _index_iterators(_schema->num_columns()),
334
2.83k
          _cur_rowid(0),
335
2.83k
          _lazy_materialization_read(false),
336
2.83k
          _lazy_inited(false),
337
2.83k
          _inited(false),
338
2.83k
          _pool(new ObjectPool) {}
339
340
5.50k
Status SegmentIterator::init(const StorageReadOptions& opts) {
341
5.50k
    auto status = _init_impl(opts);
342
5.50k
    if (!status.ok()) {
343
0
        _segment->update_healthy_status(status);
344
0
    }
345
5.50k
    return status;
346
5.50k
}
347
348
2.82k
std::unique_ptr<AdaptiveBlockSizePredictor> SegmentIterator::_make_block_size_predictor() const {
349
2.82k
    if (!config::enable_adaptive_batch_size || _opts.preferred_block_size_bytes == 0) {
350
0
        return nullptr;
351
0
    }
352
353
    // Collect per-column raw byte metadata from the segment footer for the columns
354
    // this iterator will actually output (defined by _schema, which is built from
355
    // _opts.return_columns).
356
2.82k
    std::vector<AdaptiveBlockSizePredictor::ColumnMetadata> col_metadata;
357
2.82k
    uint32_t seg_rows = _segment->num_rows();
358
2.82k
    uint64_t total_raw_bytes = 0;
359
2.82k
    double metadata_hint_bytes_per_row = 0.0;
360
2.82k
    if (seg_rows > 0) {
361
2.82k
        const auto& ts = _segment->tablet_schema();
362
2.82k
        if (ts) {
363
7.05k
            for (ColumnId cid : _schema->column_ids()) {
364
7.05k
                if (static_cast<size_t>(cid) < ts->num_columns()) {
365
7.05k
                    int32_t uid = ts->column(cid).unique_id();
366
7.05k
                    uint64_t raw_bytes = _segment->column_raw_data_bytes(uid);
367
7.05k
                    if (uid >= 0 && raw_bytes > 0) {
368
6.94k
                        total_raw_bytes += raw_bytes;
369
6.94k
                    }
370
7.05k
                }
371
7.05k
            }
372
2.82k
            metadata_hint_bytes_per_row = total_raw_bytes / static_cast<double>(seg_rows);
373
2.82k
        }
374
2.82k
    }
375
376
2.82k
    return std::make_unique<AdaptiveBlockSizePredictor>(
377
2.82k
            _opts.preferred_block_size_bytes, metadata_hint_bytes_per_row,
378
2.82k
            AdaptiveBlockSizePredictor::kDefaultProbeRows, _opts.block_row_max);
379
2.82k
}
380
381
5.50k
Status SegmentIterator::_init_impl(const StorageReadOptions& opts) {
382
    // get file handle from file descriptor of segment
383
5.50k
    if (_inited) {
384
2.68k
        return Status::OK();
385
2.68k
    }
386
2.82k
    _opts = opts;
387
2.82k
    SCOPED_RAW_TIMER(&_opts.stats->segment_iterator_init_timer_ns);
388
2.82k
    _inited = true;
389
2.82k
    _file_reader = _segment->_file_reader;
390
2.82k
    _col_predicates.clear();
391
392
2.82k
    for (const auto& predicate : opts.column_predicates) {
393
0
        if (!_segment->can_apply_predicate_safely(predicate->column_id(), *_schema,
394
0
                                                  _opts.target_cast_type_for_variants, _opts)) {
395
0
            continue;
396
0
        }
397
0
        _col_predicates.emplace_back(predicate);
398
0
    }
399
2.82k
    _tablet_id = opts.tablet_id;
400
    // Read options will not change, so that just resize here
401
2.82k
    _block_rowids.resize(_opts.block_row_max);
402
403
    // Adaptive batch size: snapshot the initial row limit and create predictor if enabled.
404
2.82k
    _initial_block_row_max = _opts.block_row_max;
405
2.82k
    _block_size_predictor = _make_block_size_predictor();
406
407
2.82k
    _remaining_conjunct_roots = opts.remaining_conjunct_roots;
408
409
2.82k
    if (_schema->rowid_col_idx() > 0) {
410
0
        _record_rowids = true;
411
0
    }
412
413
2.82k
    _virtual_column_exprs = _opts.virtual_column_exprs;
414
2.82k
    _vir_cid_to_idx_in_block = _opts.vir_cid_to_idx_in_block;
415
2.82k
    _score_runtime = _opts.score_runtime;
416
2.82k
    _ann_topn_runtime = _opts.ann_topn_runtime;
417
418
2.82k
    if (opts.output_columns != nullptr) {
419
1.30k
        _output_columns = *(opts.output_columns);
420
1.30k
    }
421
422
2.82k
    _storage_name_and_type.resize(_schema->columns().size());
423
2.82k
    auto storage_format = _opts.tablet_schema->get_inverted_index_storage_format();
424
25.8k
    for (int i = 0; i < _schema->columns().size(); ++i) {
425
23.0k
        const StorageField* col = _schema->column(i);
426
23.0k
        if (col) {
427
7.05k
            auto storage_type = _segment->get_data_type_of(col->get_desc(), _opts);
428
7.05k
            if (storage_type == nullptr) {
429
0
                storage_type = DataTypeFactory::instance().create_data_type(col->get_desc(),
430
0
                                                                            col->is_nullable());
431
0
            }
432
            // Currently, when writing a lucene index, the field of the document is column_name, and the column name is
433
            // bound to the index field. Since version 1.2, the data file storage has been changed from column_name to
434
            // column_unique_id, allowing the column name to be changed. Due to current limitations, previous inverted
435
            // index data cannot be used after Doris changes the column name. Column names also support Unicode
436
            // characters, which may cause other problems with indexing in non-ASCII characters.
437
            // After consideration, it was decided to change the field name from column_name to column_unique_id in
438
            // format V2, while format V1 continues to use column_name.
439
7.05k
            std::string field_name;
440
7.05k
            if (storage_format == InvertedIndexStorageFormatPB::V1) {
441
4.37k
                field_name = col->name();
442
4.37k
            } else {
443
2.68k
                if (col->is_extracted_column()) {
444
                    // variant sub col
445
                    // field_name format: parent_unique_id.sub_col_name
446
0
                    field_name = std::to_string(col->parent_unique_id()) + "." + col->name();
447
2.68k
                } else {
448
2.68k
                    field_name = std::to_string(col->unique_id());
449
2.68k
                }
450
2.68k
            }
451
7.05k
            _storage_name_and_type[i] = std::make_pair(field_name, storage_type);
452
7.05k
            if (int32_t uid = col->get_unique_id(); !_variant_sparse_column_cache.contains(uid)) {
453
7.05k
                DCHECK(uid >= 0);
454
7.05k
                _variant_sparse_column_cache.emplace(uid,
455
7.05k
                                                     std::make_unique<PathToBinaryColumnCache>());
456
7.05k
            }
457
7.05k
        }
458
23.0k
    }
459
460
2.82k
    RETURN_IF_ERROR(init_iterators());
461
462
2.82k
    RETURN_IF_ERROR(_construct_compound_expr_context());
463
2.82k
    _enable_common_expr_pushdown = !_common_expr_ctxs_push_down.empty();
464
2.82k
    VLOG_DEBUG << fmt::format(
465
0
            "Segment iterator init, virtual_column_exprs size: {}, "
466
0
            "_vir_cid_to_idx_in_block size: {}, common_expr_pushdown size: {}",
467
0
            _opts.virtual_column_exprs.size(), _opts.vir_cid_to_idx_in_block.size(),
468
0
            _common_expr_ctxs_push_down.size());
469
2.82k
    _initialize_predicate_results();
470
2.82k
    return Status::OK();
471
2.82k
}
472
473
2.82k
void SegmentIterator::_initialize_predicate_results() {
474
    // Initialize from _col_predicates
475
2.82k
    for (auto pred : _col_predicates) {
476
0
        int cid = pred->column_id();
477
0
        _column_predicate_index_exec_status[cid][pred] = false;
478
0
    }
479
480
2.82k
    _calculate_expr_in_remaining_conjunct_root();
481
2.82k
}
482
483
2.82k
Status SegmentIterator::init_iterators() {
484
2.82k
    RETURN_IF_ERROR(_init_return_column_iterators());
485
2.82k
    RETURN_IF_ERROR(_init_index_iterators());
486
2.82k
    return Status::OK();
487
2.82k
}
488
489
12.9k
Status SegmentIterator::_lazy_init(Block* block) {
490
12.9k
    if (_lazy_inited) {
491
10.1k
        return Status::OK();
492
10.1k
    }
493
2.82k
    SCOPED_RAW_TIMER(&_opts.stats->block_init_ns);
494
2.82k
    DorisMetrics::instance()->segment_read_total->increment(1);
495
2.82k
    _row_bitmap.addRange(0, _segment->num_rows());
496
2.82k
    _init_row_bitmap_by_condition_cache();
497
498
    // z-order can not use prefix index
499
2.82k
    if (_segment->_tablet_schema->sort_type() != SortType::ZORDER &&
500
2.82k
        _segment->_tablet_schema->cluster_key_uids().empty()) {
501
2.82k
        RETURN_IF_ERROR(_get_row_ranges_by_keys());
502
2.82k
    }
503
2.82k
    RETURN_IF_ERROR(_get_row_ranges_by_column_conditions());
504
2.82k
    RETURN_IF_ERROR(_vec_init_lazy_materialization());
505
    // Remove rows that have been marked deleted
506
2.82k
    if (_opts.delete_bitmap.count(segment_id()) > 0 &&
507
2.82k
        _opts.delete_bitmap.at(segment_id()) != nullptr) {
508
25
        size_t pre_size = _row_bitmap.cardinality();
509
25
        _row_bitmap -= *(_opts.delete_bitmap.at(segment_id()));
510
25
        _opts.stats->rows_del_by_bitmap += (pre_size - _row_bitmap.cardinality());
511
25
        VLOG_DEBUG << "read on segment: " << segment_id() << ", delete bitmap cardinality: "
512
0
                   << _opts.delete_bitmap.at(segment_id())->cardinality() << ", "
513
0
                   << _opts.stats->rows_del_by_bitmap << " rows deleted by bitmap";
514
25
    }
515
516
2.82k
    if (!_opts.row_ranges.is_empty()) {
517
0
        _row_bitmap &= RowRanges::ranges_to_roaring(_opts.row_ranges);
518
0
    }
519
520
2.82k
    _prepare_score_column_materialization();
521
522
2.82k
    RETURN_IF_ERROR(_apply_ann_topn_predicate());
523
524
2.82k
    if (_opts.read_orderby_key_reverse) {
525
0
        _range_iter.reset(new BackwardBitmapRangeIterator(_row_bitmap));
526
2.82k
    } else {
527
2.82k
        _range_iter.reset(new BitmapRangeIterator(_row_bitmap));
528
2.82k
    }
529
530
    // Reserve columns for _initial_block_row_max (the original max before any adaptive
531
    // prediction) because the predictor may increase block_row_max on subsequent batches
532
    // up to this ceiling. Using the current (possibly reduced) _opts.block_row_max would
533
    // cause heap-buffer-overflow if a later prediction is larger.
534
2.82k
    auto nrows_reserve_limit =
535
2.82k
            std::min(_row_bitmap.cardinality(), uint64_t(_initial_block_row_max));
536
2.82k
    if (_lazy_materialization_read || _opts.record_rowids || _is_need_expr_eval) {
537
896
        _block_rowids.resize(_initial_block_row_max);
538
896
    }
539
2.82k
    _current_return_columns.resize(_schema->columns().size());
540
541
2.82k
    _vec_init_char_column_id(block);
542
9.87k
    for (size_t i = 0; i < _schema->column_ids().size(); i++) {
543
7.05k
        ColumnId cid = _schema->column_ids()[i];
544
7.05k
        const auto* column_desc = _schema->column(cid);
545
7.05k
        if (_is_pred_column[cid]) {
546
467
            auto storage_column_type = _storage_name_and_type[cid].second;
547
            // Char type is special , since char type's computational datatype is same with string,
548
            // both are DataTypeString, but DataTypeString only return FieldType::OLAP_FIELD_TYPE_STRING
549
            // in get_storage_field_type.
550
467
            RETURN_IF_CATCH_EXCEPTION(
551
                    // Here, cid will not go out of bounds
552
                    // because the size of _current_return_columns equals _schema->tablet_columns().size()
553
467
                    _current_return_columns[cid] = Schema::get_predicate_column_ptr(
554
467
                            _is_char_type[cid] ? FieldType::OLAP_FIELD_TYPE_CHAR
555
467
                                               : storage_column_type->get_storage_field_type(),
556
467
                            storage_column_type->is_nullable(), _opts.io_ctx.reader_type));
557
467
            _current_return_columns[cid]->set_rowset_segment_id(
558
467
                    {_segment->rowset_id(), _segment->id()});
559
467
            _current_return_columns[cid]->reserve(nrows_reserve_limit);
560
6.58k
        } else if (i >= block->columns()) {
561
            // This column needs to be scanned, but doesn't need to be returned upward. (delete sign)
562
            // if i >= block->columns means the column and not the pred_column means `column i` is
563
            // a delete condition column. but the column is not effective in the segment. so we just
564
            // create a column to hold the data.
565
            // a. origin data -> b. delete condition -> c. new load data
566
            // the segment of c do not effective delete condition, but it still need read the column
567
            // to match the schema.
568
            // TODO: skip read the not effective delete column to speed up segment read.
569
0
            _current_return_columns[cid] = Schema::get_data_type_ptr(*column_desc)->create_column();
570
0
            _current_return_columns[cid]->reserve(nrows_reserve_limit);
571
0
        }
572
7.05k
    }
573
574
    // Additional deleted filter condition will be materialized column be at the end of the block,
575
    // after _output_column_by_sel_idx  will be erase, we not need to filter it,
576
    // so erase it from _columns_to_filter in the first next_batch.
577
    // Eg:
578
    //      `delete from table where a = 10;`
579
    //      `select b from table;`
580
    // a column only effective in segment iterator, the block from query engine only contain the b column,
581
    // so no need to filter a column by expr.
582
2.82k
    for (auto it = _columns_to_filter.begin(); it != _columns_to_filter.end();) {
583
0
        if (*it >= block->columns()) {
584
0
            it = _columns_to_filter.erase(it);
585
0
        } else {
586
0
            ++it;
587
0
        }
588
0
    }
589
590
2.82k
    _lazy_inited = true;
591
592
2.82k
    _init_segment_prefetchers();
593
594
2.82k
    return Status::OK();
595
2.82k
}
596
597
2.82k
void SegmentIterator::_init_segment_prefetchers() {
598
2.82k
    SCOPED_RAW_TIMER(&_opts.stats->segment_iterator_init_segment_prefetchers_timer_ns);
599
2.82k
    if (!config::is_cloud_mode()) {
600
2.82k
        return;
601
2.82k
    }
602
0
    static std::vector<ReaderType> supported_reader_types {
603
0
            ReaderType::READER_QUERY, ReaderType::READER_BASE_COMPACTION,
604
0
            ReaderType::READER_CUMULATIVE_COMPACTION, ReaderType::READER_FULL_COMPACTION};
605
0
    if (std::ranges::none_of(supported_reader_types,
606
0
                             [&](ReaderType t) { return _opts.io_ctx.reader_type == t; })) {
607
0
        return;
608
0
    }
609
    // Initialize segment prefetcher for predicate and non-predicate columns
610
0
    bool is_query = (_opts.io_ctx.reader_type == ReaderType::READER_QUERY);
611
0
    bool enable_prefetch = is_query ? config::enable_query_segment_file_cache_prefetch
612
0
                                    : config::enable_compaction_segment_file_cache_prefetch;
613
0
    LOG_IF(INFO, config::enable_segment_prefetch_verbose_log) << fmt::format(
614
0
            "[verbose] SegmentIterator _init_segment_prefetchers, is_query={}, enable_prefetch={}, "
615
0
            "_row_bitmap.isEmpty()={}, row_bitmap.cardinality()={}, tablet={}, rowset={}, "
616
0
            "segment={}, predicate_column_ids={}, common_expr_column_ids={}",
617
0
            is_query, enable_prefetch, _row_bitmap.isEmpty(), _row_bitmap.cardinality(),
618
0
            _opts.tablet_id, _opts.rowset_id.to_string(), segment_id(),
619
0
            fmt::join(_predicate_column_ids, ","), fmt::join(_common_expr_column_ids, ","));
620
0
    if (enable_prefetch && !_row_bitmap.isEmpty()) {
621
0
        int window_size =
622
0
                1 + (is_query ? config::query_segment_file_cache_prefetch_block_size
623
0
                              : config::compaction_segment_file_cache_prefetch_block_size);
624
0
        LOG_IF(INFO, config::enable_segment_prefetch_verbose_log) << fmt::format(
625
0
                "[verbose] SegmentIterator prefetch config: window_size={}", window_size);
626
0
        if (window_size > 0 &&
627
0
            !_column_iterators.empty()) { // ensure init_iterators has been called
628
0
            SegmentPrefetcherConfig prefetch_config(window_size,
629
0
                                                    config::file_cache_each_block_size);
630
0
            for (auto cid : _schema->column_ids()) {
631
0
                auto& column_iter = _column_iterators[cid];
632
0
                if (column_iter == nullptr) {
633
0
                    continue;
634
0
                }
635
0
                const auto* tablet_column = _schema->column(cid);
636
0
                SegmentPrefetchParams params {
637
0
                        .config = prefetch_config,
638
0
                        .read_options = _opts,
639
0
                };
640
0
                LOG_IF(INFO, config::enable_segment_prefetch_verbose_log) << fmt::format(
641
0
                        "[verbose] SegmentIterator init_segment_prefetchers, "
642
0
                        "tablet={}, rowset={}, segment={}, column_id={}, col_name={}, type={}",
643
0
                        _opts.tablet_id, _opts.rowset_id.to_string(), segment_id(), cid,
644
0
                        tablet_column->name(), tablet_column->type());
645
0
                Status st = column_iter->init_prefetcher(params);
646
0
                if (!st.ok()) {
647
0
                    LOG_IF(WARNING, config::enable_segment_prefetch_verbose_log) << fmt::format(
648
0
                            "[verbose] failed to init prefetcher for column_id={}, "
649
0
                            "tablet={}, rowset={}, segment={}, error={}",
650
0
                            cid, _opts.tablet_id, _opts.rowset_id.to_string(), segment_id(),
651
0
                            st.to_string());
652
0
                }
653
0
            }
654
655
            // for compaction, it's guaranteed that all rows are read, so we can prefetch all data blocks
656
0
            PrefetcherInitMethod init_method = (is_query && _row_bitmap.cardinality() < num_rows())
657
0
                                                       ? PrefetcherInitMethod::FROM_ROWIDS
658
0
                                                       : PrefetcherInitMethod::ALL_DATA_BLOCKS;
659
0
            std::map<PrefetcherInitMethod, std::vector<SegmentPrefetcher*>> prefetchers;
660
0
            for (const auto& column_iter : _column_iterators) {
661
0
                if (column_iter != nullptr) {
662
0
                    column_iter->collect_prefetchers(prefetchers, init_method);
663
0
                }
664
0
            }
665
0
            for (auto& [method, prefetcher_vec] : prefetchers) {
666
0
                if (method == PrefetcherInitMethod::ALL_DATA_BLOCKS) {
667
0
                    for (auto* prefetcher : prefetcher_vec) {
668
0
                        prefetcher->build_all_data_blocks();
669
0
                    }
670
0
                } else if (method == PrefetcherInitMethod::FROM_ROWIDS && !prefetcher_vec.empty()) {
671
0
                    SegmentPrefetcher::build_blocks_by_rowids(_row_bitmap, prefetcher_vec);
672
0
                }
673
0
            }
674
0
        }
675
0
    }
676
0
}
677
678
2.82k
Status SegmentIterator::_get_row_ranges_by_keys() {
679
2.82k
    SCOPED_RAW_TIMER(&_opts.stats->generate_row_ranges_by_keys_ns);
680
2.82k
    DorisMetrics::instance()->segment_row_total->increment(num_rows());
681
682
    // fast path for empty segment or empty key ranges
683
2.82k
    if (_row_bitmap.isEmpty() || _opts.key_ranges.empty()) {
684
2.82k
        return Status::OK();
685
2.82k
    }
686
687
    // Read & seek key columns is a waste of time when no key column in _schema
688
0
    if (std::none_of(
689
0
                _schema->columns().begin(), _schema->columns().end(), [&](const StorageField* col) {
690
0
                    return col && _opts.tablet_schema->column_by_uid(col->unique_id()).is_key();
691
0
                })) {
692
0
        return Status::OK();
693
0
    }
694
695
0
    RowRanges result_ranges;
696
0
    for (auto& key_range : _opts.key_ranges) {
697
0
        rowid_t lower_rowid = 0;
698
0
        rowid_t upper_rowid = num_rows();
699
0
        RETURN_IF_ERROR(_prepare_seek(key_range));
700
0
        if (key_range.upper_key != nullptr) {
701
            // If client want to read upper_bound, the include_upper is true. So we
702
            // should get the first ordinal at which key is larger than upper_bound.
703
            // So we call _lookup_ordinal with include_upper's negate
704
0
            RETURN_IF_ERROR(_lookup_ordinal(*key_range.upper_key, !key_range.include_upper,
705
0
                                            num_rows(), &upper_rowid));
706
0
        }
707
0
        if (upper_rowid > 0 && key_range.lower_key != nullptr) {
708
0
            RETURN_IF_ERROR(_lookup_ordinal(*key_range.lower_key, key_range.include_lower,
709
0
                                            upper_rowid, &lower_rowid));
710
0
        }
711
0
        auto row_range = RowRanges::create_single(lower_rowid, upper_rowid);
712
0
        RowRanges::ranges_union(result_ranges, row_range, &result_ranges);
713
0
    }
714
0
    size_t pre_size = _row_bitmap.cardinality();
715
0
    _row_bitmap &= RowRanges::ranges_to_roaring(result_ranges);
716
0
    _opts.stats->rows_key_range_filtered += (pre_size - _row_bitmap.cardinality());
717
718
0
    return Status::OK();
719
0
}
720
721
// Set up environment for the following seek.
722
0
Status SegmentIterator::_prepare_seek(const StorageReadOptions::KeyRange& key_range) {
723
0
    std::vector<const StorageField*> key_fields;
724
0
    std::set<uint32_t> column_set;
725
0
    if (key_range.lower_key != nullptr) {
726
0
        for (auto cid : key_range.lower_key->schema()->column_ids()) {
727
0
            column_set.emplace(cid);
728
0
            key_fields.emplace_back(key_range.lower_key->column_schema(cid));
729
0
        }
730
0
    }
731
0
    if (key_range.upper_key != nullptr) {
732
0
        for (auto cid : key_range.upper_key->schema()->column_ids()) {
733
0
            if (column_set.count(cid) == 0) {
734
0
                key_fields.emplace_back(key_range.upper_key->column_schema(cid));
735
0
                column_set.emplace(cid);
736
0
            }
737
0
        }
738
0
    }
739
0
    if (!_seek_schema) {
740
        // Schema constructors accept a vector of TabletColumnPtr. Convert
741
        // StorageField pointers to TabletColumnPtr by copying their descriptors.
742
0
        std::vector<TabletColumnPtr> cols;
743
0
        cols.reserve(key_fields.size());
744
0
        for (const StorageField* f : key_fields) {
745
0
            cols.emplace_back(std::make_shared<TabletColumn>(f->get_desc()));
746
0
        }
747
0
        _seek_schema = std::make_unique<Schema>(cols, cols.size());
748
0
    }
749
    // todo(wb) need refactor here, when using pk to search, _seek_block is useless
750
0
    if (_seek_block.size() == 0) {
751
0
        _seek_block.resize(_seek_schema->num_column_ids());
752
0
        int i = 0;
753
0
        for (auto cid : _seek_schema->column_ids()) {
754
0
            auto column_desc = _seek_schema->column(cid);
755
0
            _seek_block[i] = Schema::get_column_by_field(*column_desc);
756
0
            i++;
757
0
        }
758
0
    }
759
760
    // create used column iterator
761
0
    for (auto cid : _seek_schema->column_ids()) {
762
0
        if (_column_iterators[cid] == nullptr) {
763
            // TODO: Do we need this?
764
0
            if (_virtual_column_exprs.contains(cid)) {
765
0
                _column_iterators[cid] = std::make_unique<VirtualColumnIterator>();
766
0
                continue;
767
0
            }
768
769
0
            RETURN_IF_ERROR(_segment->new_column_iterator(_opts.tablet_schema->column(cid),
770
0
                                                          &_column_iterators[cid], &_opts,
771
0
                                                          &_variant_sparse_column_cache));
772
0
            ColumnIteratorOptions iter_opts {
773
0
                    .use_page_cache = _opts.use_page_cache,
774
0
                    .file_reader = _file_reader.get(),
775
0
                    .stats = _opts.stats,
776
0
                    .io_ctx = _opts.io_ctx,
777
0
            };
778
0
            RETURN_IF_ERROR(_column_iterators[cid]->init(iter_opts));
779
0
        }
780
0
    }
781
782
0
    return Status::OK();
783
0
}
784
785
2.82k
Status SegmentIterator::_get_row_ranges_by_column_conditions() {
786
2.82k
    SCOPED_RAW_TIMER(&_opts.stats->generate_row_ranges_by_column_conditions_ns);
787
2.82k
    if (_row_bitmap.isEmpty()) {
788
0
        return Status::OK();
789
0
    }
790
791
2.82k
    {
792
2.82k
        if (_opts.runtime_state &&
793
2.82k
            _opts.runtime_state->query_options().enable_inverted_index_query &&
794
2.82k
            (has_index_in_iterators() || !_common_expr_ctxs_push_down.empty())) {
795
0
            SCOPED_RAW_TIMER(&_opts.stats->inverted_index_filter_timer);
796
0
            size_t input_rows = _row_bitmap.cardinality();
797
            // Only apply column-level inverted index if we have iterators
798
0
            if (has_index_in_iterators()) {
799
0
                RETURN_IF_ERROR(_apply_inverted_index());
800
0
            }
801
            // Always apply expr-level index (e.g., search expressions) if we have common_expr_pushdown
802
            // This allows search expressions with variant subcolumns to be evaluated even when
803
            // the segment doesn't have all subcolumns
804
0
            RETURN_IF_ERROR(_apply_index_expr());
805
0
            for (auto it = _common_expr_ctxs_push_down.begin();
806
0
                 it != _common_expr_ctxs_push_down.end();) {
807
0
                if ((*it)->all_expr_inverted_index_evaluated()) {
808
0
                    const auto* result = (*it)->get_index_context()->get_index_result_for_expr(
809
0
                            (*it)->root().get());
810
0
                    if (result != nullptr) {
811
0
                        _row_bitmap &= *result->get_data_bitmap();
812
0
                        auto root = (*it)->root();
813
0
                        auto iter_find = std::find(_remaining_conjunct_roots.begin(),
814
0
                                                   _remaining_conjunct_roots.end(), root);
815
0
                        if (iter_find != _remaining_conjunct_roots.end()) {
816
0
                            _remaining_conjunct_roots.erase(iter_find);
817
0
                        }
818
0
                        it = _common_expr_ctxs_push_down.erase(it);
819
0
                    }
820
0
                } else {
821
0
                    ++it;
822
0
                }
823
0
            }
824
0
            _opts.condition_cache_digest =
825
0
                    _common_expr_ctxs_push_down.empty() ? 0 : _opts.condition_cache_digest;
826
0
            _opts.stats->rows_inverted_index_filtered += (input_rows - _row_bitmap.cardinality());
827
0
            for (auto cid : _schema->column_ids()) {
828
0
                bool result_true = _check_all_conditions_passed_inverted_index_for_column(cid);
829
0
                if (result_true) {
830
0
                    _need_read_data_indices[cid] = false;
831
0
                }
832
0
            }
833
0
        }
834
2.82k
    }
835
836
2.82k
    DBUG_EXECUTE_IF("segment_iterator.inverted_index.filtered_rows", {
837
2.82k
        LOG(INFO) << "Debug Point: segment_iterator.inverted_index.filtered_rows: "
838
2.82k
                  << _opts.stats->rows_inverted_index_filtered;
839
2.82k
        auto filtered_rows = DebugPoints::instance()->get_debug_param_or_default<int32_t>(
840
2.82k
                "segment_iterator.inverted_index.filtered_rows", "filtered_rows", -1);
841
2.82k
        if (filtered_rows != _opts.stats->rows_inverted_index_filtered) {
842
2.82k
            return Status::Error<ErrorCode::INTERNAL_ERROR>(
843
2.82k
                    "filtered_rows: {} not equal to expected: {}",
844
2.82k
                    _opts.stats->rows_inverted_index_filtered, filtered_rows);
845
2.82k
        }
846
2.82k
    })
847
848
2.82k
    DBUG_EXECUTE_IF("segment_iterator.apply_inverted_index", {
849
2.82k
        LOG(INFO) << "Debug Point: segment_iterator.apply_inverted_index";
850
2.82k
        if (!_common_expr_ctxs_push_down.empty() || !_col_predicates.empty()) {
851
2.82k
            return Status::Error<ErrorCode::INTERNAL_ERROR>(
852
2.82k
                    "it is failed to apply inverted index, common_expr_ctxs_push_down: {}, "
853
2.82k
                    "col_predicates: {}",
854
2.82k
                    _common_expr_ctxs_push_down.size(), _col_predicates.size());
855
2.82k
        }
856
2.82k
    })
857
858
2.82k
    if (!_row_bitmap.isEmpty() &&
859
2.82k
        (!_opts.topn_filter_source_node_ids.empty() || !_opts.col_id_to_predicates.empty() ||
860
2.82k
         _opts.delete_condition_predicates->num_of_column_predicate() > 0)) {
861
467
        RowRanges condition_row_ranges = RowRanges::create_single(_segment->num_rows());
862
467
        RETURN_IF_ERROR(_get_row_ranges_from_conditions(&condition_row_ranges));
863
467
        size_t pre_size = _row_bitmap.cardinality();
864
467
        _row_bitmap &= RowRanges::ranges_to_roaring(condition_row_ranges);
865
467
        _opts.stats->rows_conditions_filtered += (pre_size - _row_bitmap.cardinality());
866
467
    }
867
868
2.82k
    DBUG_EXECUTE_IF("bloom_filter_must_filter_data", {
869
2.82k
        if (_opts.stats->rows_bf_filtered == 0) {
870
2.82k
            return Status::Error<ErrorCode::INTERNAL_ERROR>(
871
2.82k
                    "Bloom filter did not filter the data.");
872
2.82k
        }
873
2.82k
    })
874
875
    // TODO(hkp): calculate filter rate to decide whether to
876
    // use zone map/bloom filter/secondary index or not.
877
2.82k
    return Status::OK();
878
2.82k
}
879
880
0
bool SegmentIterator::_column_has_ann_index(int32_t cid) {
881
0
    bool has_ann_index = _index_iterators[cid] != nullptr &&
882
0
                         _index_iterators[cid]->get_reader(AnnIndexReaderType::ANN);
883
884
0
    return has_ann_index;
885
0
}
886
887
2.82k
Status SegmentIterator::_apply_ann_topn_predicate() {
888
2.82k
    if (_ann_topn_runtime == nullptr) {
889
2.82k
        return Status::OK();
890
2.82k
    }
891
892
0
    VLOG_DEBUG << fmt::format("Try apply ann topn: {}", _ann_topn_runtime->debug_string());
893
0
    size_t src_col_idx = _ann_topn_runtime->get_src_column_idx();
894
0
    ColumnId src_cid = _schema->column_id(src_col_idx);
895
0
    IndexIterator* ann_index_iterator = _index_iterators[src_cid].get();
896
0
    bool has_ann_index = _column_has_ann_index(src_cid);
897
0
    bool has_common_expr_push_down = !_common_expr_ctxs_push_down.empty();
898
0
    bool has_column_predicate = std::any_of(_is_pred_column.begin(), _is_pred_column.end(),
899
0
                                            [](bool is_pred) { return is_pred; });
900
0
    if (!has_ann_index || has_common_expr_push_down || has_column_predicate) {
901
0
        VLOG_DEBUG << fmt::format(
902
0
                "Ann topn can not be evaluated by ann index, has_ann_index: {}, "
903
0
                "has_common_expr_push_down: {}, has_column_predicate: {}",
904
0
                has_ann_index, has_common_expr_push_down, has_column_predicate);
905
        // Disable index-only scan on ann indexed column.
906
0
        _need_read_data_indices[src_cid] = true;
907
0
        _opts.stats->ann_fall_back_brute_force_cnt += 1;
908
0
        return Status::OK();
909
0
    }
910
911
    // Process asc & desc according to the type of metric
912
0
    auto index_reader = ann_index_iterator->get_reader(AnnIndexReaderType::ANN);
913
0
    auto ann_index_reader = dynamic_cast<AnnIndexReader*>(index_reader.get());
914
0
    DCHECK(ann_index_reader != nullptr);
915
0
    if (ann_index_reader->get_metric_type() == AnnIndexMetric::IP) {
916
0
        if (_ann_topn_runtime->is_asc()) {
917
0
            VLOG_DEBUG << fmt::format(
918
0
                    "Asc topn for inner product can not be evaluated by ann index");
919
            // Disable index-only scan on ann indexed column.
920
0
            _need_read_data_indices[src_cid] = true;
921
0
            _opts.stats->ann_fall_back_brute_force_cnt += 1;
922
0
            return Status::OK();
923
0
        }
924
0
    } else {
925
0
        if (!_ann_topn_runtime->is_asc()) {
926
0
            VLOG_DEBUG << fmt::format("Desc topn for l2/cosine can not be evaluated by ann index");
927
            // Disable index-only scan on ann indexed column.
928
0
            _need_read_data_indices[src_cid] = true;
929
0
            _opts.stats->ann_fall_back_brute_force_cnt += 1;
930
0
            return Status::OK();
931
0
        }
932
0
    }
933
934
0
    if (ann_index_reader->get_metric_type() != _ann_topn_runtime->get_metric_type()) {
935
0
        VLOG_DEBUG << fmt::format(
936
0
                "Ann topn metric type {} not match index metric type {}, can not be evaluated by "
937
0
                "ann index",
938
0
                metric_to_string(_ann_topn_runtime->get_metric_type()),
939
0
                metric_to_string(ann_index_reader->get_metric_type()));
940
        // Disable index-only scan on ann indexed column.
941
0
        _need_read_data_indices[src_cid] = true;
942
0
        _opts.stats->ann_fall_back_brute_force_cnt += 1;
943
0
        return Status::OK();
944
0
    }
945
946
0
    size_t pre_size = _row_bitmap.cardinality();
947
0
    size_t rows_of_segment = _segment->num_rows();
948
0
    if (static_cast<double>(pre_size) < static_cast<double>(rows_of_segment) * 0.3) {
949
0
        VLOG_DEBUG << fmt::format(
950
0
                "Ann topn predicate input rows {} < 30% of segment rows {}, will not use ann index "
951
0
                "to "
952
0
                "filter",
953
0
                pre_size, rows_of_segment);
954
        // Disable index-only scan on ann indexed column.
955
0
        _need_read_data_indices[src_cid] = true;
956
0
        _opts.stats->ann_fall_back_brute_force_cnt += 1;
957
0
        return Status::OK();
958
0
    }
959
0
    IColumn::MutablePtr result_column;
960
0
    std::shared_ptr<std::vector<uint64_t>> result_row_ids;
961
0
    segment_v2::AnnIndexStats ann_index_stats;
962
963
    // Try to load ANN index before search
964
0
    auto ann_index_iterator_casted =
965
0
            dynamic_cast<segment_v2::AnnIndexIterator*>(ann_index_iterator);
966
0
    if (ann_index_iterator_casted == nullptr) {
967
0
        VLOG_DEBUG << "Failed to cast index iterator to AnnIndexIterator, fallback to brute force";
968
0
        _need_read_data_indices[src_cid] = true;
969
0
        _opts.stats->ann_fall_back_brute_force_cnt += 1;
970
0
        return Status::OK();
971
0
    }
972
973
    // Track load index timing
974
0
    {
975
0
        SCOPED_TIMER(&(ann_index_stats.load_index_costs_ns));
976
0
        if (!ann_index_iterator_casted->try_load_index()) {
977
0
            VLOG_DEBUG << "Failed to load ANN index, fallback to brute force search";
978
0
            _need_read_data_indices[src_cid] = true;
979
0
            _opts.stats->ann_fall_back_brute_force_cnt += 1;
980
0
            return Status::OK();
981
0
        }
982
0
        double load_costs_ms =
983
0
                static_cast<double>(ann_index_stats.load_index_costs_ns.value()) / 1000000.0;
984
0
        DorisMetrics::instance()->ann_index_load_costs_ms->increment(
985
0
                static_cast<int64_t>(load_costs_ms));
986
0
    }
987
988
0
    bool enable_ann_index_result_cache =
989
0
            !_opts.runtime_state ||
990
0
            !_opts.runtime_state->query_options().__isset.enable_ann_index_result_cache ||
991
0
            _opts.runtime_state->query_options().enable_ann_index_result_cache;
992
0
    RETURN_IF_ERROR(_ann_topn_runtime->evaluate_vector_ann_search(
993
0
            ann_index_iterator_casted, &_row_bitmap, rows_of_segment, enable_ann_index_result_cache,
994
0
            result_column, result_row_ids, ann_index_stats));
995
996
0
    VLOG_DEBUG << fmt::format("Ann topn filtered {} - {} = {} rows", pre_size,
997
0
                              _row_bitmap.cardinality(), pre_size - _row_bitmap.cardinality());
998
999
0
    int64_t rows_filterd = pre_size - _row_bitmap.cardinality();
1000
0
    _opts.stats->rows_ann_index_topn_filtered += rows_filterd;
1001
0
    _opts.stats->ann_index_load_ns += ann_index_stats.load_index_costs_ns.value();
1002
0
    _opts.stats->ann_topn_search_ns += ann_index_stats.search_costs_ns.value();
1003
0
    _opts.stats->ann_ivf_on_disk_load_ns += ann_index_stats.ivf_on_disk_load_costs_ns.value();
1004
0
    _opts.stats->ann_ivf_on_disk_cache_hit_cnt += ann_index_stats.ivf_on_disk_cache_hit_cnt.value();
1005
0
    _opts.stats->ann_ivf_on_disk_cache_miss_cnt +=
1006
0
            ann_index_stats.ivf_on_disk_cache_miss_cnt.value();
1007
0
    _opts.stats->ann_index_topn_engine_search_ns += ann_index_stats.engine_search_ns.value();
1008
0
    _opts.stats->ann_index_topn_result_process_ns +=
1009
0
            ann_index_stats.result_process_costs_ns.value();
1010
0
    _opts.stats->ann_index_topn_engine_convert_ns += ann_index_stats.engine_convert_ns.value();
1011
0
    _opts.stats->ann_index_topn_engine_prepare_ns += ann_index_stats.engine_prepare_ns.value();
1012
0
    _opts.stats->ann_index_topn_search_cnt += 1;
1013
0
    _opts.stats->ann_index_cache_hits += ann_index_stats.topn_cache_hits.value();
1014
0
    const size_t dst_col_idx = _ann_topn_runtime->get_dest_column_idx();
1015
0
    ColumnIterator* column_iter = _column_iterators[_schema->column_id(dst_col_idx)].get();
1016
0
    DCHECK(column_iter != nullptr);
1017
0
    VirtualColumnIterator* virtual_column_iter = dynamic_cast<VirtualColumnIterator*>(column_iter);
1018
0
    DCHECK(virtual_column_iter != nullptr);
1019
0
    VLOG_DEBUG << fmt::format(
1020
0
            "Virtual column iterator, column_idx {}, is materialized with {} rows", dst_col_idx,
1021
0
            result_row_ids->size());
1022
    // reference count of result_column should be 1, so move will not issue any data copy.
1023
0
    virtual_column_iter->prepare_materialization(std::move(result_column), result_row_ids);
1024
1025
0
    _need_read_data_indices[src_cid] = false;
1026
0
    VLOG_DEBUG << fmt::format(
1027
0
            "Enable ANN index-only scan for src column cid {} (skip reading data pages)", src_cid);
1028
1029
0
    return Status::OK();
1030
0
}
1031
1032
467
Status SegmentIterator::_get_row_ranges_from_conditions(RowRanges* condition_row_ranges) {
1033
467
    std::set<int32_t> cids;
1034
467
    for (auto& entry : _opts.col_id_to_predicates) {
1035
0
        cids.insert(entry.first);
1036
0
    }
1037
1038
467
    {
1039
467
        SCOPED_RAW_TIMER(&_opts.stats->generate_row_ranges_by_dict_ns);
1040
        /// Low cardinality optimization is currently not very stable, so to prevent data corruption,
1041
        /// we are temporarily disabling its use in data compaction.
1042
        // TODO: enable it in not only ReaderTyper::READER_QUERY but also other reader types.
1043
467
        if (_opts.io_ctx.reader_type == ReaderType::READER_QUERY) {
1044
0
            RowRanges dict_row_ranges = RowRanges::create_single(num_rows());
1045
0
            for (auto cid : cids) {
1046
0
                if (!_segment->can_apply_predicate_safely(
1047
0
                            cid, *_schema, _opts.target_cast_type_for_variants, _opts)) {
1048
0
                    continue;
1049
0
                }
1050
0
                DCHECK(_opts.col_id_to_predicates.count(cid) > 0);
1051
0
                RETURN_IF_ERROR(_column_iterators[cid]->get_row_ranges_by_dict(
1052
0
                        _opts.col_id_to_predicates.at(cid).get(), &dict_row_ranges));
1053
1054
0
                if (dict_row_ranges.is_empty()) {
1055
0
                    break;
1056
0
                }
1057
0
            }
1058
1059
0
            if (dict_row_ranges.is_empty()) {
1060
0
                RowRanges::ranges_intersection(*condition_row_ranges, dict_row_ranges,
1061
0
                                               condition_row_ranges);
1062
0
                _opts.stats->segment_dict_filtered++;
1063
0
                _opts.stats->filtered_segment_number++;
1064
0
                return Status::OK();
1065
0
            }
1066
0
        }
1067
467
    }
1068
1069
467
    size_t pre_size = 0;
1070
467
    {
1071
467
        SCOPED_RAW_TIMER(&_opts.stats->generate_row_ranges_by_bf_ns);
1072
        // first filter data by bloom filter index
1073
        // bloom filter index only use CondColumn
1074
467
        RowRanges bf_row_ranges = RowRanges::create_single(num_rows());
1075
467
        for (auto& cid : cids) {
1076
0
            DCHECK(_opts.col_id_to_predicates.count(cid) > 0);
1077
0
            if (!_segment->can_apply_predicate_safely(cid, *_schema,
1078
0
                                                      _opts.target_cast_type_for_variants, _opts)) {
1079
0
                continue;
1080
0
            }
1081
            // get row ranges by bf index of this column,
1082
0
            RowRanges column_bf_row_ranges = RowRanges::create_single(num_rows());
1083
0
            RETURN_IF_ERROR(_column_iterators[cid]->get_row_ranges_by_bloom_filter(
1084
0
                    _opts.col_id_to_predicates.at(cid).get(), &column_bf_row_ranges));
1085
0
            RowRanges::ranges_intersection(bf_row_ranges, column_bf_row_ranges, &bf_row_ranges);
1086
0
        }
1087
1088
467
        pre_size = condition_row_ranges->count();
1089
467
        RowRanges::ranges_intersection(*condition_row_ranges, bf_row_ranges, condition_row_ranges);
1090
467
        _opts.stats->rows_bf_filtered += (pre_size - condition_row_ranges->count());
1091
467
    }
1092
1093
0
    {
1094
467
        SCOPED_RAW_TIMER(&_opts.stats->generate_row_ranges_by_zonemap_ns);
1095
467
        RowRanges zone_map_row_ranges = RowRanges::create_single(num_rows());
1096
        // second filter data by zone map
1097
467
        for (const auto& cid : cids) {
1098
0
            DCHECK(_opts.col_id_to_predicates.count(cid) > 0);
1099
0
            if (!_segment->can_apply_predicate_safely(cid, *_schema,
1100
0
                                                      _opts.target_cast_type_for_variants, _opts)) {
1101
0
                continue;
1102
0
            }
1103
            // do not check zonemap if predicate does not support zonemap
1104
0
            if (!_opts.col_id_to_predicates.at(cid)->support_zonemap()) {
1105
0
                VLOG_DEBUG << "skip zonemap for column " << cid;
1106
0
                continue;
1107
0
            }
1108
            // get row ranges by zone map of this column,
1109
0
            RowRanges column_row_ranges = RowRanges::create_single(num_rows());
1110
0
            RETURN_IF_ERROR(_column_iterators[cid]->get_row_ranges_by_zone_map(
1111
0
                    _opts.col_id_to_predicates.at(cid).get(),
1112
0
                    _opts.del_predicates_for_zone_map.count(cid) > 0
1113
0
                            ? &(_opts.del_predicates_for_zone_map.at(cid))
1114
0
                            : nullptr,
1115
0
                    &column_row_ranges));
1116
            // intersect different columns's row ranges to get final row ranges by zone map
1117
0
            RowRanges::ranges_intersection(zone_map_row_ranges, column_row_ranges,
1118
0
                                           &zone_map_row_ranges);
1119
0
        }
1120
1121
467
        pre_size = condition_row_ranges->count();
1122
467
        RowRanges::ranges_intersection(*condition_row_ranges, zone_map_row_ranges,
1123
467
                                       condition_row_ranges);
1124
1125
467
        size_t pre_size2 = condition_row_ranges->count();
1126
467
        RowRanges::ranges_intersection(*condition_row_ranges, zone_map_row_ranges,
1127
467
                                       condition_row_ranges);
1128
467
        _opts.stats->rows_stats_rp_filtered += (pre_size2 - condition_row_ranges->count());
1129
467
        _opts.stats->rows_stats_filtered += (pre_size - condition_row_ranges->count());
1130
467
    }
1131
1132
0
    return Status::OK();
1133
467
}
1134
1135
0
bool SegmentIterator::_is_literal_node(const TExprNodeType::type& node_type) {
1136
0
    switch (node_type) {
1137
0
    case TExprNodeType::BOOL_LITERAL:
1138
0
    case TExprNodeType::INT_LITERAL:
1139
0
    case TExprNodeType::LARGE_INT_LITERAL:
1140
0
    case TExprNodeType::FLOAT_LITERAL:
1141
0
    case TExprNodeType::DECIMAL_LITERAL:
1142
0
    case TExprNodeType::STRING_LITERAL:
1143
0
    case TExprNodeType::DATE_LITERAL:
1144
0
    case TExprNodeType::TIMEV2_LITERAL:
1145
0
        return true;
1146
0
    default:
1147
0
        return false;
1148
0
    }
1149
0
}
1150
1151
0
Status SegmentIterator::_extract_common_expr_columns(const VExprSPtr& expr) {
1152
0
    auto& children = expr->children();
1153
0
    for (int i = 0; i < children.size(); ++i) {
1154
0
        RETURN_IF_ERROR(_extract_common_expr_columns(children[i]));
1155
0
    }
1156
1157
0
    auto node_type = expr->node_type();
1158
0
    if (node_type == TExprNodeType::SLOT_REF) {
1159
0
        auto slot_expr = std::dynamic_pointer_cast<doris::VSlotRef>(expr);
1160
0
        _is_common_expr_column[_schema->column_id(slot_expr->column_id())] = true;
1161
0
        _common_expr_columns.insert(_schema->column_id(slot_expr->column_id()));
1162
0
    } else if (node_type == TExprNodeType::VIRTUAL_SLOT_REF) {
1163
0
        std::shared_ptr<VirtualSlotRef> virtual_slot_ref =
1164
0
                std::dynamic_pointer_cast<VirtualSlotRef>(expr);
1165
0
        RETURN_IF_ERROR(_extract_common_expr_columns(virtual_slot_ref->get_virtual_column_expr()));
1166
0
    }
1167
1168
0
    return Status::OK();
1169
0
}
1170
1171
0
bool SegmentIterator::_check_apply_by_inverted_index(std::shared_ptr<ColumnPredicate> pred) {
1172
0
    if (_opts.runtime_state && !_opts.runtime_state->query_options().enable_inverted_index_query) {
1173
0
        return false;
1174
0
    }
1175
0
    auto pred_column_id = pred->column_id();
1176
0
    if (_index_iterators[pred_column_id] == nullptr) {
1177
        //this column without inverted index
1178
0
        return false;
1179
0
    }
1180
1181
0
    if (_inverted_index_not_support_pred_type(pred->type())) {
1182
0
        return false;
1183
0
    }
1184
1185
0
    if (pred->type() == PredicateType::IN_LIST || pred->type() == PredicateType::NOT_IN_LIST) {
1186
        // in_list or not_in_list predicate produced by runtime filter
1187
0
        if (pred->is_runtime_filter()) {
1188
0
            return false;
1189
0
        }
1190
0
    }
1191
1192
    // UNTOKENIZED strings exceed ignore_above, they are written as null, causing range query errors
1193
0
    if (PredicateTypeTraits::is_range(pred->type()) &&
1194
0
        !IndexReaderHelper::has_bkd_index(_index_iterators[pred_column_id].get())) {
1195
0
        return false;
1196
0
    }
1197
1198
    // Function filter no apply inverted index
1199
0
    if (dynamic_cast<LikeColumnPredicate<TYPE_CHAR>*>(pred.get()) != nullptr ||
1200
0
        dynamic_cast<LikeColumnPredicate<TYPE_STRING>*>(pred.get()) != nullptr) {
1201
0
        return false;
1202
0
    }
1203
1204
0
    bool handle_by_fulltext = _column_has_fulltext_index(pred_column_id);
1205
0
    if (handle_by_fulltext) {
1206
        // when predicate is leafNode of andNode,
1207
        // can apply 'match query' and 'equal query' and 'list query' for fulltext index.
1208
0
        return pred->type() == PredicateType::MATCH || pred->type() == PredicateType::IS_NULL ||
1209
0
               pred->type() == PredicateType::IS_NOT_NULL ||
1210
0
               PredicateTypeTraits::is_equal_or_list(pred->type());
1211
0
    }
1212
1213
0
    return true;
1214
0
}
1215
1216
// TODO: optimization when all expr can not evaluate by inverted/ann index,
1217
0
Status SegmentIterator::_apply_index_expr() {
1218
0
    bool enable_ann_index_result_cache =
1219
0
            !_opts.runtime_state ||
1220
0
            !_opts.runtime_state->query_options().__isset.enable_ann_index_result_cache ||
1221
0
            _opts.runtime_state->query_options().enable_ann_index_result_cache;
1222
1223
0
    for (const auto& expr_ctx : _common_expr_ctxs_push_down) {
1224
0
        if (Status st = expr_ctx->evaluate_inverted_index(num_rows()); !st.ok()) {
1225
0
            if (_downgrade_without_index(st) || st.code() == ErrorCode::NOT_IMPLEMENTED_ERROR) {
1226
0
                continue;
1227
0
            } else {
1228
                // other code is not to be handled, we should just break
1229
0
                LOG(WARNING) << "failed to evaluate inverted index for expr_ctx: "
1230
0
                             << expr_ctx->root()->debug_string()
1231
0
                             << ", error msg: " << st.to_string();
1232
0
                return st;
1233
0
            }
1234
0
        }
1235
0
    }
1236
1237
    // Evaluate inverted index for virtual column MATCH expressions (projections).
1238
    // Unlike common exprs which filter rows, these only compute index result bitmaps
1239
    // for later materialization via fast_execute().
1240
0
    for (auto& [cid, expr_ctx] : _virtual_column_exprs) {
1241
0
        if (expr_ctx->get_index_context() == nullptr) {
1242
0
            continue;
1243
0
        }
1244
0
        if (Status st = expr_ctx->evaluate_inverted_index(num_rows()); !st.ok()) {
1245
0
            if (_downgrade_without_index(st) || st.code() == ErrorCode::NOT_IMPLEMENTED_ERROR) {
1246
0
                continue;
1247
0
            } else {
1248
0
                LOG(WARNING) << "failed to evaluate inverted index for virtual column expr: "
1249
0
                             << expr_ctx->root()->debug_string()
1250
0
                             << ", error msg: " << st.to_string();
1251
0
                return st;
1252
0
            }
1253
0
        }
1254
0
    }
1255
1256
    // Apply ann range search
1257
0
    for (const auto& expr_ctx : _common_expr_ctxs_push_down) {
1258
0
        segment_v2::AnnIndexStats ann_index_stats;
1259
0
        size_t origin_rows = _row_bitmap.cardinality();
1260
0
        RETURN_IF_ERROR(expr_ctx->evaluate_ann_range_search(
1261
0
                _index_iterators, _schema->column_ids(), _column_iterators,
1262
0
                _common_expr_to_slotref_map, _row_bitmap, ann_index_stats,
1263
0
                enable_ann_index_result_cache));
1264
0
        _opts.stats->rows_ann_index_range_filtered += (origin_rows - _row_bitmap.cardinality());
1265
0
        _opts.stats->ann_index_load_ns += ann_index_stats.load_index_costs_ns.value();
1266
0
        _opts.stats->ann_index_range_search_ns += ann_index_stats.search_costs_ns.value();
1267
0
        _opts.stats->ann_ivf_on_disk_load_ns += ann_index_stats.ivf_on_disk_load_costs_ns.value();
1268
0
        _opts.stats->ann_ivf_on_disk_cache_hit_cnt +=
1269
0
                ann_index_stats.ivf_on_disk_cache_hit_cnt.value();
1270
0
        _opts.stats->ann_ivf_on_disk_cache_miss_cnt +=
1271
0
                ann_index_stats.ivf_on_disk_cache_miss_cnt.value();
1272
0
        _opts.stats->ann_range_engine_search_ns += ann_index_stats.engine_search_ns.value();
1273
0
        _opts.stats->ann_range_result_convert_ns += ann_index_stats.result_process_costs_ns.value();
1274
0
        _opts.stats->ann_range_engine_convert_ns += ann_index_stats.engine_convert_ns.value();
1275
0
        _opts.stats->ann_range_pre_process_ns += ann_index_stats.engine_prepare_ns.value();
1276
0
        _opts.stats->ann_fall_back_brute_force_cnt += ann_index_stats.fall_back_brute_force_cnt;
1277
0
        _opts.stats->ann_index_range_cache_hits += ann_index_stats.range_cache_hits.value();
1278
0
    }
1279
1280
0
    for (auto it = _common_expr_ctxs_push_down.begin(); it != _common_expr_ctxs_push_down.end();) {
1281
0
        if ((*it)->root()->ann_range_search_executedd()) {
1282
0
            _opts.stats->ann_index_range_search_cnt++;
1283
0
            it = _common_expr_ctxs_push_down.erase(it);
1284
0
        } else {
1285
0
            ++it;
1286
0
        }
1287
0
    }
1288
    // TODO:Do we need to remove these expr root from _remaining_conjunct_roots?
1289
1290
0
    return Status::OK();
1291
0
}
1292
1293
0
bool SegmentIterator::_downgrade_without_index(Status res, bool need_remaining) {
1294
0
    bool is_fallback =
1295
0
            _opts.runtime_state->query_options().enable_fallback_on_missing_inverted_index;
1296
0
    if ((res.code() == ErrorCode::INVERTED_INDEX_FILE_NOT_FOUND && is_fallback) ||
1297
0
        res.code() == ErrorCode::INVERTED_INDEX_BYPASS ||
1298
0
        res.code() == ErrorCode::INVERTED_INDEX_EVALUATE_SKIPPED ||
1299
0
        (res.code() == ErrorCode::INVERTED_INDEX_NO_TERMS && need_remaining) ||
1300
0
        res.code() == ErrorCode::INVERTED_INDEX_FILE_CORRUPTED) {
1301
        // 1. INVERTED_INDEX_FILE_NOT_FOUND means index file has not been built,
1302
        //    usually occurs when creating a new index, queries can be downgraded
1303
        //    without index.
1304
        // 2. INVERTED_INDEX_BYPASS means the hit of condition by index
1305
        //    has reached the optimal limit, downgrade without index query can
1306
        //    improve query performance.
1307
        // 3. INVERTED_INDEX_EVALUATE_SKIPPED means the inverted index is not
1308
        //    suitable for executing this predicate, skipped it and filter data
1309
        //    by function later.
1310
        // 4. INVERTED_INDEX_NO_TERMS means the column has fulltext index,
1311
        //    but the column condition value no terms in specified parser,
1312
        //    such as: where A = '' and B = ','
1313
        //    the predicate of A and B need downgrade without index query.
1314
        // 5. INVERTED_INDEX_FILE_CORRUPTED means the index file is corrupted,
1315
        //    such as when index segment files are not generated
1316
        // above case can downgrade without index query
1317
0
        _opts.stats->inverted_index_downgrade_count++;
1318
0
        if (!res.is<ErrorCode::INVERTED_INDEX_BYPASS>()) {
1319
0
            LOG(INFO) << "will downgrade without index to evaluate predicate, because of res: "
1320
0
                      << res;
1321
0
        } else {
1322
0
            VLOG_DEBUG << "will downgrade without index to evaluate predicate, because of res: "
1323
0
                       << res;
1324
0
        }
1325
0
        return true;
1326
0
    }
1327
0
    return false;
1328
0
}
1329
1330
0
bool SegmentIterator::_column_has_fulltext_index(int32_t cid) {
1331
0
    bool has_fulltext_index =
1332
0
            _index_iterators[cid] != nullptr &&
1333
0
            _index_iterators[cid]->get_reader(InvertedIndexReaderType::FULLTEXT) &&
1334
0
            _index_iterators[cid]->get_reader(InvertedIndexReaderType::STRING_TYPE) == nullptr;
1335
1336
0
    return has_fulltext_index;
1337
0
}
1338
1339
0
inline bool SegmentIterator::_inverted_index_not_support_pred_type(const PredicateType& type) {
1340
0
    return type == PredicateType::BF || type == PredicateType::BITMAP_FILTER;
1341
0
}
1342
1343
Status SegmentIterator::_apply_inverted_index_on_column_predicate(
1344
        std::shared_ptr<ColumnPredicate> pred,
1345
0
        std::vector<std::shared_ptr<ColumnPredicate>>& remaining_predicates, bool* continue_apply) {
1346
0
    if (!_check_apply_by_inverted_index(pred)) {
1347
0
        remaining_predicates.emplace_back(pred);
1348
0
    } else {
1349
0
        bool need_remaining_after_evaluate = _column_has_fulltext_index(pred->column_id()) &&
1350
0
                                             PredicateTypeTraits::is_equal_or_list(pred->type());
1351
0
        Status res =
1352
0
                pred->evaluate(_storage_name_and_type[pred->column_id()],
1353
0
                               _index_iterators[pred->column_id()].get(), num_rows(), &_row_bitmap);
1354
0
        if (!res.ok()) {
1355
0
            if (_downgrade_without_index(res, need_remaining_after_evaluate)) {
1356
0
                remaining_predicates.emplace_back(pred);
1357
0
                return Status::OK();
1358
0
            }
1359
0
            LOG(WARNING) << "failed to evaluate index"
1360
0
                         << ", column predicate type: " << pred->pred_type_string(pred->type())
1361
0
                         << ", error msg: " << res;
1362
0
            return res;
1363
0
        }
1364
1365
0
        if (_row_bitmap.isEmpty()) {
1366
            // all rows have been pruned, no need to process further predicates
1367
0
            *continue_apply = false;
1368
0
        }
1369
1370
0
        if (need_remaining_after_evaluate) {
1371
0
            remaining_predicates.emplace_back(pred);
1372
0
            return Status::OK();
1373
0
        }
1374
0
        if (!pred->is_runtime_filter()) {
1375
0
            _column_predicate_index_exec_status[pred->column_id()][pred] = true;
1376
0
        }
1377
0
    }
1378
0
    return Status::OK();
1379
0
}
1380
1381
27.1k
bool SegmentIterator::_need_read_data(ColumnId cid) {
1382
27.1k
    if (_opts.runtime_state && !_opts.runtime_state->query_options().enable_no_need_read_data_opt) {
1383
0
        return true;
1384
0
    }
1385
    // only support DUP_KEYS and UNIQUE_KEYS with MOW
1386
27.1k
    if (!((_opts.tablet_schema->keys_type() == KeysType::DUP_KEYS ||
1387
27.1k
           (_opts.tablet_schema->keys_type() == KeysType::UNIQUE_KEYS &&
1388
11.1k
            _opts.enable_unique_key_merge_on_write)))) {
1389
7.58k
        return true;
1390
7.58k
    }
1391
    // this is a virtual column, we always need to read data
1392
19.5k
    if (this->_vir_cid_to_idx_in_block.contains(cid)) {
1393
0
        return true;
1394
0
    }
1395
1396
    // if there is a delete predicate, we always need to read data
1397
19.5k
    if (_has_delete_predicate(cid)) {
1398
1.74k
        return true;
1399
1.74k
    }
1400
17.8k
    if (_output_columns.count(-1)) {
1401
        // if _output_columns contains -1, it means that the light
1402
        // weight schema change may not be enabled or other reasons
1403
        // caused the column unique_id not be set, to prevent errors
1404
        // occurring, return true here that column data needs to be read
1405
0
        return true;
1406
0
    }
1407
    // Check the following conditions:
1408
    // 1. If the column represented by the unique ID is an inverted index column (indicated by '_need_read_data_indices.count(unique_id) > 0 && !_need_read_data_indices[unique_id]')
1409
    //    and it's not marked for projection in '_output_columns'.
1410
    // 2. Or, if the column is an inverted index column and it's marked for projection in '_output_columns',
1411
    //    and the operation is a push down of the 'COUNT_ON_INDEX' aggregation function.
1412
    // If any of the above conditions are met, log a debug message indicating that there's no need to read data for the indexed column.
1413
    // Then, return false.
1414
17.8k
    const auto& column = _opts.tablet_schema->column(cid);
1415
    // Different subcolumns may share the same parent_unique_id, so we choose to abandon this optimization.
1416
17.8k
    if (column.is_extracted_column() &&
1417
17.8k
        _opts.push_down_agg_type_opt != TPushAggOp::COUNT_ON_INDEX) {
1418
13
        return true;
1419
13
    }
1420
17.8k
    int32_t unique_id = column.unique_id();
1421
17.8k
    if (unique_id < 0) {
1422
1
        unique_id = column.parent_unique_id();
1423
1
    }
1424
17.8k
    if ((_need_read_data_indices.contains(cid) && !_need_read_data_indices[cid] &&
1425
17.8k
         !_output_columns.contains(unique_id)) ||
1426
17.8k
        (_need_read_data_indices.contains(cid) && !_need_read_data_indices[cid] &&
1427
17.8k
         _output_columns.count(unique_id) == 1 &&
1428
17.8k
         _opts.push_down_agg_type_opt == TPushAggOp::COUNT_ON_INDEX)) {
1429
1
        VLOG_DEBUG << "SegmentIterator no need read data for column: "
1430
0
                   << _opts.tablet_schema->column_by_uid(unique_id).name();
1431
1
        return false;
1432
1
    }
1433
17.8k
    return true;
1434
17.8k
}
1435
1436
0
Status SegmentIterator::_apply_inverted_index() {
1437
0
    std::vector<std::shared_ptr<ColumnPredicate>> remaining_predicates;
1438
0
    std::set<std::shared_ptr<ColumnPredicate>> no_need_to_pass_column_predicate_set;
1439
1440
0
    for (auto pred : _col_predicates) {
1441
0
        if (no_need_to_pass_column_predicate_set.count(pred) > 0) {
1442
0
            continue;
1443
0
        } else {
1444
0
            bool continue_apply = true;
1445
0
            RETURN_IF_ERROR(_apply_inverted_index_on_column_predicate(pred, remaining_predicates,
1446
0
                                                                      &continue_apply));
1447
0
            if (!continue_apply) {
1448
0
                break;
1449
0
            }
1450
0
        }
1451
0
    }
1452
1453
0
    _col_predicates = std::move(remaining_predicates);
1454
0
    return Status::OK();
1455
0
}
1456
1457
/**
1458
 * @brief Checks if all conditions related to a specific column have passed in both
1459
 * `_column_predicate_inverted_index_status` and `_common_expr_inverted_index_status`.
1460
 *
1461
 * This function first checks the conditions in `_column_predicate_inverted_index_status`
1462
 * for the given `ColumnId`. If all conditions pass, it sets `default_return` to `true`.
1463
 * It then checks the conditions in `_common_expr_inverted_index_status` for the same column.
1464
 *
1465
 * The function returns `true` if all conditions in both maps pass. If any condition fails
1466
 * in either map, the function immediately returns `false`. If the column does not exist
1467
 * in one of the maps, the function returns `default_return`.
1468
 *
1469
 * @param cid The ColumnId of the column to check.
1470
 * @param default_return The default value to return if the column is not found in the status maps.
1471
 * @return true if all conditions in both status maps pass, or if the column is not found
1472
 *         and `default_return` is true.
1473
 * @return false if any condition in either status map fails, or if the column is not found
1474
 *         and `default_return` is false.
1475
 */
1476
bool SegmentIterator::_check_all_conditions_passed_inverted_index_for_column(ColumnId cid,
1477
3
                                                                             bool default_return) {
1478
3
    auto pred_it = _column_predicate_index_exec_status.find(cid);
1479
3
    if (pred_it != _column_predicate_index_exec_status.end()) {
1480
2
        const auto& pred_map = pred_it->second;
1481
2
        bool pred_passed = std::all_of(pred_map.begin(), pred_map.end(),
1482
2
                                       [](const auto& pred_entry) { return pred_entry.second; });
1483
2
        if (!pred_passed) {
1484
1
            return false;
1485
1
        } else {
1486
1
            default_return = true;
1487
1
        }
1488
2
    }
1489
1490
2
    auto expr_it = _common_expr_index_exec_status.find(cid);
1491
2
    if (expr_it != _common_expr_index_exec_status.end()) {
1492
0
        const auto& expr_map = expr_it->second;
1493
0
        return std::all_of(expr_map.begin(), expr_map.end(),
1494
0
                           [](const auto& expr_entry) { return expr_entry.second; });
1495
0
    }
1496
2
    return default_return;
1497
2
}
1498
1499
2.82k
Status SegmentIterator::_init_return_column_iterators() {
1500
2.82k
    SCOPED_RAW_TIMER(&_opts.stats->segment_iterator_init_return_column_iterators_timer_ns);
1501
2.82k
    if (_cur_rowid >= num_rows()) {
1502
0
        return Status::OK();
1503
0
    }
1504
1505
7.05k
    for (auto cid : _schema->column_ids()) {
1506
7.05k
        if (_schema->column(cid)->name() == BeConsts::ROWID_COL) {
1507
0
            _column_iterators[cid].reset(
1508
0
                    new RowIdColumnIterator(_opts.tablet_id, _opts.rowset_id, _segment->id()));
1509
0
            continue;
1510
0
        }
1511
1512
7.05k
        if (_schema->column(cid)->name().starts_with(BeConsts::GLOBAL_ROWID_COL)) {
1513
0
            auto& id_file_map = _opts.runtime_state->get_id_file_map();
1514
0
            uint32_t file_id = id_file_map->get_file_mapping_id(std::make_shared<FileMapping>(
1515
0
                    _opts.tablet_id, _opts.rowset_id, _segment->id()));
1516
0
            _column_iterators[cid].reset(new RowIdColumnIteratorV2(
1517
0
                    IdManager::ID_VERSION, BackendOptions::get_backend_id(), file_id));
1518
0
            continue;
1519
0
        }
1520
1521
7.05k
        if (_schema->column(cid)->name().starts_with(BeConsts::VIRTUAL_COLUMN_PREFIX)) {
1522
0
            _column_iterators[cid] = std::make_unique<VirtualColumnIterator>();
1523
0
            continue;
1524
0
        }
1525
1526
7.05k
        std::set<ColumnId> del_cond_id_set;
1527
7.05k
        _opts.delete_condition_predicates->get_all_column_ids(del_cond_id_set);
1528
7.05k
        std::vector<bool> tmp_is_pred_column;
1529
7.05k
        tmp_is_pred_column.resize(_schema->columns().size(), false);
1530
7.05k
        for (auto predicate : _col_predicates) {
1531
0
            auto p_cid = predicate->column_id();
1532
0
            tmp_is_pred_column[p_cid] = true;
1533
0
        }
1534
        // handle delete_condition
1535
7.05k
        for (auto d_cid : del_cond_id_set) {
1536
1.32k
            tmp_is_pred_column[d_cid] = true;
1537
1.32k
        }
1538
1539
7.05k
        if (_column_iterators[cid] == nullptr) {
1540
7.05k
            RETURN_IF_ERROR(_segment->new_column_iterator(_opts.tablet_schema->column(cid),
1541
7.05k
                                                          &_column_iterators[cid], &_opts,
1542
7.05k
                                                          &_variant_sparse_column_cache));
1543
7.05k
            ColumnIteratorOptions iter_opts {
1544
7.05k
                    .use_page_cache = _opts.use_page_cache,
1545
                    // If the col is predicate column, then should read the last page to check
1546
                    // if the column is full dict encoding
1547
7.05k
                    .is_predicate_column = tmp_is_pred_column[cid],
1548
7.05k
                    .file_reader = _file_reader.get(),
1549
7.05k
                    .stats = _opts.stats,
1550
7.05k
                    .io_ctx = _opts.io_ctx,
1551
7.05k
            };
1552
7.05k
            RETURN_IF_ERROR(_column_iterators[cid]->init(iter_opts));
1553
7.05k
        }
1554
7.05k
    }
1555
1556
2.82k
#ifndef NDEBUG
1557
2.82k
    for (auto pair : _vir_cid_to_idx_in_block) {
1558
0
        ColumnId vir_col_cid = pair.first;
1559
0
        DCHECK(_column_iterators[vir_col_cid] != nullptr)
1560
0
                << "Virtual column iterator for " << vir_col_cid << " should not be null";
1561
0
        ColumnIterator* column_iter = _column_iterators[vir_col_cid].get();
1562
0
        DCHECK(dynamic_cast<VirtualColumnIterator*>(column_iter) != nullptr)
1563
0
                << "Virtual column iterator for " << vir_col_cid
1564
0
                << " should be VirtualColumnIterator";
1565
0
    }
1566
2.82k
#endif
1567
2.82k
    return Status::OK();
1568
2.82k
}
1569
1570
2.82k
Status SegmentIterator::_init_index_iterators() {
1571
2.82k
    SCOPED_RAW_TIMER(&_opts.stats->segment_iterator_init_index_iterators_timer_ns);
1572
2.82k
    if (_cur_rowid >= num_rows()) {
1573
0
        return Status::OK();
1574
0
    }
1575
1576
2.82k
    _index_query_context = std::make_shared<IndexQueryContext>();
1577
2.82k
    _index_query_context->io_ctx = &_opts.io_ctx;
1578
2.82k
    _index_query_context->stats = _opts.stats;
1579
2.82k
    _index_query_context->runtime_state = _opts.runtime_state;
1580
1581
2.82k
    if (_score_runtime) {
1582
0
        _index_query_context->collection_statistics = _opts.collection_statistics;
1583
0
        _index_query_context->collection_similarity = std::make_shared<CollectionSimilarity>();
1584
0
        _index_query_context->query_limit = _score_runtime->get_limit();
1585
0
        _index_query_context->is_asc = _score_runtime->is_asc();
1586
0
    }
1587
1588
    // Inverted index iterators
1589
7.05k
    for (auto cid : _schema->column_ids()) {
1590
        // Use segment’s own index_meta, for compatibility with future indexing needs to default to lowercase.
1591
7.05k
        if (_index_iterators[cid] == nullptr) {
1592
            // In the _opts.tablet_schema, the sub-column type information for the variant is FieldType::OLAP_FIELD_TYPE_VARIANT.
1593
            // This is because the sub-column is created in create_materialized_variant_column.
1594
            // We use this column to locate the metadata for the inverted index, which requires a unique_id and path.
1595
7.05k
            const auto& column = _opts.tablet_schema->column(cid);
1596
7.05k
            std::vector<const TabletIndex*> inverted_indexs;
1597
            // Keep shared_ptr alive to prevent use-after-free when accessing raw pointers
1598
7.05k
            TabletIndexes inverted_indexs_holder;
1599
            // If the column is an extracted column, we need to find the sub-column in the parent column reader.
1600
7.05k
            std::shared_ptr<ColumnReader> column_reader;
1601
7.05k
            if (column.is_extracted_column()) {
1602
6
                if (!_segment->_column_reader_cache->get_column_reader(
1603
6
                            column.parent_unique_id(), &column_reader, _opts.stats) ||
1604
6
                    column_reader == nullptr) {
1605
0
                    continue;
1606
0
                }
1607
6
                auto* variant_reader = assert_cast<VariantColumnReader*>(column_reader.get());
1608
6
                DataTypePtr data_type = _storage_name_and_type[cid].second;
1609
6
                if (data_type != nullptr &&
1610
6
                    data_type->get_primitive_type() == PrimitiveType::TYPE_VARIANT) {
1611
0
                    DataTypePtr inferred_type;
1612
0
                    Status st = variant_reader->infer_data_type_for_path(
1613
0
                            &inferred_type, column, _opts, _segment->_column_reader_cache.get());
1614
0
                    if (st.ok() && inferred_type != nullptr) {
1615
0
                        data_type = inferred_type;
1616
0
                    }
1617
0
                }
1618
6
                inverted_indexs_holder =
1619
6
                        variant_reader->find_subcolumn_tablet_indexes(column, data_type);
1620
                // Extract raw pointers from shared_ptr for iteration
1621
6
                for (const auto& index_ptr : inverted_indexs_holder) {
1622
0
                    inverted_indexs.push_back(index_ptr.get());
1623
0
                }
1624
6
            }
1625
            // If the column is not an extracted column, we can directly get the inverted index metadata from the tablet schema.
1626
7.05k
            else {
1627
7.05k
                inverted_indexs = _segment->_tablet_schema->inverted_indexs(column);
1628
7.05k
            }
1629
7.05k
            for (const auto& inverted_index : inverted_indexs) {
1630
2.56k
                RETURN_IF_ERROR(_segment->new_index_iterator(column, inverted_index, _opts,
1631
2.56k
                                                             &_index_iterators[cid]));
1632
2.56k
            }
1633
7.05k
            if (_index_iterators[cid] != nullptr) {
1634
2.55k
                _index_iterators[cid]->set_context(_index_query_context);
1635
2.55k
            }
1636
7.05k
        }
1637
7.05k
    }
1638
1639
    // Ann index iterators
1640
7.05k
    for (auto cid : _schema->column_ids()) {
1641
7.05k
        if (_index_iterators[cid] == nullptr) {
1642
4.50k
            const auto& column = _opts.tablet_schema->column(cid);
1643
4.50k
            const auto* index_meta = _segment->_tablet_schema->ann_index(column);
1644
4.50k
            if (index_meta) {
1645
1
                RETURN_IF_ERROR(_segment->new_index_iterator(column, index_meta, _opts,
1646
1
                                                             &_index_iterators[cid]));
1647
1648
1
                if (_index_iterators[cid] != nullptr) {
1649
1
                    _index_iterators[cid]->set_context(_index_query_context);
1650
1
                }
1651
1
            }
1652
4.50k
        }
1653
7.05k
    }
1654
1655
2.82k
    return Status::OK();
1656
2.82k
}
1657
1658
Status SegmentIterator::_lookup_ordinal(const RowCursor& key, bool is_include, rowid_t upper_bound,
1659
0
                                        rowid_t* rowid) {
1660
0
    if (_segment->_tablet_schema->keys_type() == UNIQUE_KEYS &&
1661
0
        _segment->get_primary_key_index() != nullptr) {
1662
0
        return _lookup_ordinal_from_pk_index(key, is_include, rowid);
1663
0
    }
1664
0
    return _lookup_ordinal_from_sk_index(key, is_include, upper_bound, rowid);
1665
0
}
1666
1667
// look up one key to get its ordinal at which can get data by using short key index.
1668
// 'upper_bound' is defined the max ordinal the function will search.
1669
// We use upper_bound to reduce search times.
1670
// If we find a valid ordinal, it will be set in rowid and with Status::OK()
1671
// If we can not find a valid key in this segment, we will set rowid to upper_bound
1672
// Otherwise return error.
1673
// 1. get [start, end) ordinal through short key index
1674
// 2. binary search to find exact ordinal that match the input condition
1675
// Make is_include template to reduce branch
1676
Status SegmentIterator::_lookup_ordinal_from_sk_index(const RowCursor& key, bool is_include,
1677
0
                                                      rowid_t upper_bound, rowid_t* rowid) {
1678
0
    const ShortKeyIndexDecoder* sk_index_decoder = _segment->get_short_key_index();
1679
0
    DCHECK(sk_index_decoder != nullptr);
1680
1681
0
    std::string index_key;
1682
0
    key.encode_key_with_padding(&index_key, _segment->_tablet_schema->num_short_key_columns(),
1683
0
                                is_include);
1684
1685
0
    const auto& key_col_ids = key.schema()->column_ids();
1686
1687
    // Clone the key once and pad CHAR fields to storage format before the binary search.
1688
    // _seek_block holds storage-format data where CHAR is zero-padded to column length,
1689
    // while RowCursor holds CHAR in compute format (unpadded). Padding once here avoids
1690
    // repeated allocation inside the comparison loop.
1691
0
    RowCursor padded_key = key.clone();
1692
0
    padded_key.pad_char_fields();
1693
1694
0
    ssize_t start_block_id = 0;
1695
0
    auto start_iter = sk_index_decoder->lower_bound(index_key);
1696
0
    if (start_iter.valid()) {
1697
        // Because previous block may contain this key, so we should set rowid to
1698
        // last block's first row.
1699
0
        start_block_id = start_iter.ordinal();
1700
0
        if (start_block_id > 0) {
1701
0
            start_block_id--;
1702
0
        }
1703
0
    } else {
1704
        // When we don't find a valid index item, which means all short key is
1705
        // smaller than input key, this means that this key may exist in the last
1706
        // row block. so we set the rowid to first row of last row block.
1707
0
        start_block_id = sk_index_decoder->num_items() - 1;
1708
0
    }
1709
0
    rowid_t start = cast_set<rowid_t>(start_block_id) * sk_index_decoder->num_rows_per_block();
1710
1711
0
    rowid_t end = upper_bound;
1712
0
    auto end_iter = sk_index_decoder->upper_bound(index_key);
1713
0
    if (end_iter.valid()) {
1714
0
        end = cast_set<rowid_t>(end_iter.ordinal()) * sk_index_decoder->num_rows_per_block();
1715
0
    }
1716
1717
    // binary search to find the exact key
1718
0
    while (start < end) {
1719
0
        rowid_t mid = (start + end) / 2;
1720
0
        RETURN_IF_ERROR(_seek_and_peek(mid));
1721
0
        int cmp = _compare_short_key_with_seek_block(padded_key, key_col_ids);
1722
0
        if (cmp > 0) {
1723
0
            start = mid + 1;
1724
0
        } else if (cmp == 0) {
1725
0
            if (is_include) {
1726
                // lower bound
1727
0
                end = mid;
1728
0
            } else {
1729
                // upper bound
1730
0
                start = mid + 1;
1731
0
            }
1732
0
        } else {
1733
0
            end = mid;
1734
0
        }
1735
0
    }
1736
1737
0
    *rowid = start;
1738
0
    return Status::OK();
1739
0
}
1740
1741
Status SegmentIterator::_lookup_ordinal_from_pk_index(const RowCursor& key, bool is_include,
1742
0
                                                      rowid_t* rowid) {
1743
0
    DCHECK(_segment->_tablet_schema->keys_type() == UNIQUE_KEYS);
1744
0
    const PrimaryKeyIndexReader* pk_index_reader = _segment->get_primary_key_index();
1745
0
    DCHECK(pk_index_reader != nullptr);
1746
1747
0
    std::string index_key;
1748
0
    key.encode_key_with_padding<true>(&index_key, _segment->_tablet_schema->num_key_columns(),
1749
0
                                      is_include);
1750
0
    if (index_key < _segment->min_key()) {
1751
0
        *rowid = 0;
1752
0
        return Status::OK();
1753
0
    } else if (index_key > _segment->max_key()) {
1754
0
        *rowid = num_rows();
1755
0
        return Status::OK();
1756
0
    }
1757
0
    bool exact_match = false;
1758
1759
0
    std::unique_ptr<segment_v2::IndexedColumnIterator> index_iterator;
1760
0
    RETURN_IF_ERROR(pk_index_reader->new_iterator(&index_iterator, _opts.stats));
1761
1762
0
    Status status = index_iterator->seek_at_or_after(&index_key, &exact_match);
1763
0
    if (UNLIKELY(!status.ok())) {
1764
0
        *rowid = num_rows();
1765
0
        if (status.is<ENTRY_NOT_FOUND>()) {
1766
0
            return Status::OK();
1767
0
        }
1768
0
        return status;
1769
0
    }
1770
0
    *rowid = cast_set<rowid_t>(index_iterator->get_current_ordinal());
1771
1772
    // The sequence column needs to be removed from primary key index when comparing key
1773
0
    bool has_seq_col = _segment->_tablet_schema->has_sequence_col();
1774
    // Used to get key range from primary key index,
1775
    // for mow with cluster key table, we should get key range from short key index.
1776
0
    DCHECK(_segment->_tablet_schema->cluster_key_uids().empty());
1777
1778
    // if full key is exact_match, the primary key without sequence column should also the same
1779
0
    if (has_seq_col && !exact_match) {
1780
0
        size_t seq_col_length =
1781
0
                _segment->_tablet_schema->column(_segment->_tablet_schema->sequence_col_idx())
1782
0
                        .length() +
1783
0
                1;
1784
0
        auto index_type = DataTypeFactory::instance().create_data_type(
1785
0
                _segment->_pk_index_reader->type(), 1, 0);
1786
0
        auto index_column = index_type->create_column();
1787
0
        size_t num_to_read = 1;
1788
0
        size_t num_read = num_to_read;
1789
0
        RETURN_IF_ERROR(index_iterator->next_batch(&num_read, index_column));
1790
0
        DCHECK(num_to_read == num_read);
1791
1792
0
        Slice sought_key =
1793
0
                Slice(index_column->get_data_at(0).data, index_column->get_data_at(0).size);
1794
0
        Slice sought_key_without_seq =
1795
0
                Slice(sought_key.get_data(), sought_key.get_size() - seq_col_length);
1796
1797
        // compare key
1798
0
        if (Slice(index_key).compare(sought_key_without_seq) == 0) {
1799
0
            exact_match = true;
1800
0
        }
1801
0
    }
1802
1803
    // find the key in primary key index, and the is_include is false, so move
1804
    // to the next row.
1805
0
    if (exact_match && !is_include) {
1806
0
        *rowid += 1;
1807
0
    }
1808
0
    return Status::OK();
1809
0
}
1810
1811
// seek to the row and load that row to _key_cursor
1812
0
Status SegmentIterator::_seek_and_peek(rowid_t rowid) {
1813
0
    {
1814
0
        _opts.stats->block_init_seek_num += 1;
1815
0
        SCOPED_RAW_TIMER(&_opts.stats->block_init_seek_ns);
1816
0
        RETURN_IF_ERROR(_seek_columns(_seek_schema->column_ids(), rowid));
1817
0
    }
1818
0
    size_t num_rows = 1;
1819
1820
    //note(wb) reset _seek_block for memory reuse
1821
    // it is easier to use row based memory layout for clear memory
1822
0
    for (int i = 0; i < _seek_block.size(); i++) {
1823
0
        _seek_block[i]->clear();
1824
0
    }
1825
0
    RETURN_IF_ERROR(_read_columns(_seek_schema->column_ids(), _seek_block, num_rows));
1826
0
    return Status::OK();
1827
0
}
1828
1829
0
Status SegmentIterator::_seek_columns(const std::vector<ColumnId>& column_ids, rowid_t pos) {
1830
0
    for (auto cid : column_ids) {
1831
0
        if (!_need_read_data(cid)) {
1832
0
            continue;
1833
0
        }
1834
0
        RETURN_IF_ERROR(_column_iterators[cid]->seek_to_ordinal(pos));
1835
0
    }
1836
0
    return Status::OK();
1837
0
}
1838
1839
/* ---------------------- for vectorization implementation  ---------------------- */
1840
1841
/**
1842
 *  For storage layer data type, can be measured from two perspectives:
1843
 *  1 Whether the type can be read in a fast way(batch read using SIMD)
1844
 *    Such as integer type and float type, this type can be read in SIMD way.
1845
 *    For the type string/bitmap/hll, they can not be read in batch way, so read this type data is slow.
1846
 *   If a type can be read fast, we can try to eliminate Lazy Materialization, because we think for this type, seek cost > read cost.
1847
 *   This is an estimate, if we want more precise cost, statistics collection is necessary(this is a todo).
1848
 *   In short, when returned non-pred columns contains string/hll/bitmap, we using Lazy Materialization.
1849
 *   Otherwise, we disable it.
1850
 *
1851
 *   When Lazy Materialization enable, we need to read column at least two times.
1852
 *   First time to read Pred col, second time to read non-pred.
1853
 *   Here's an interesting question to research, whether read Pred col once is the best plan.
1854
 *   (why not read Pred col twice or more?)
1855
 *
1856
 *   When Lazy Materialization disable, we just need to read once.
1857
 *
1858
 *
1859
 *  2 Whether the predicate type can be evaluate in a fast way(using SIMD to eval pred)
1860
 *    Such as integer type and float type, they can be eval fast.
1861
 *    But for BloomFilter/string/date, they eval slow.
1862
 *    If a type can be eval fast, we use vectorization to eval it.
1863
 *    Otherwise, we use short-circuit to eval it.
1864
 *
1865
 *
1866
 */
1867
1868
// todo(wb) need a UT here
1869
2.82k
Status SegmentIterator::_vec_init_lazy_materialization() {
1870
2.82k
    _is_pred_column.resize(_schema->columns().size(), false);
1871
1872
    // including short/vec/delete pred
1873
2.82k
    std::set<ColumnId> pred_column_ids;
1874
2.82k
    _lazy_materialization_read = false;
1875
1876
2.82k
    std::set<ColumnId> del_cond_id_set;
1877
2.82k
    _opts.delete_condition_predicates->get_all_column_ids(del_cond_id_set);
1878
1879
2.82k
    std::set<std::shared_ptr<const ColumnPredicate>> delete_predicate_set {};
1880
2.82k
    _opts.delete_condition_predicates->get_all_column_predicate(delete_predicate_set);
1881
2.82k
    for (auto predicate : delete_predicate_set) {
1882
467
        if (PredicateTypeTraits::is_range(predicate->type())) {
1883
327
            _delete_range_column_ids.push_back(predicate->column_id());
1884
327
        } else if (PredicateTypeTraits::is_bloom_filter(predicate->type())) {
1885
0
            _delete_bloom_filter_column_ids.push_back(predicate->column_id());
1886
0
        }
1887
467
    }
1888
1889
    // Step1: extract columns that can be lazy materialization
1890
2.82k
    if (!_col_predicates.empty() || !del_cond_id_set.empty()) {
1891
467
        std::set<ColumnId> short_cir_pred_col_id_set; // using set for distinct cid
1892
467
        std::set<ColumnId> vec_pred_col_id_set;
1893
1894
467
        for (auto predicate : _col_predicates) {
1895
0
            auto cid = predicate->column_id();
1896
0
            _is_pred_column[cid] = true;
1897
0
            pred_column_ids.insert(cid);
1898
1899
            // check pred using short eval or vec eval
1900
0
            if (_can_evaluated_by_vectorized(predicate)) {
1901
0
                vec_pred_col_id_set.insert(cid);
1902
0
                _pre_eval_block_predicate.push_back(predicate);
1903
0
            } else {
1904
0
                short_cir_pred_col_id_set.insert(cid);
1905
0
                _short_cir_eval_predicate.push_back(predicate);
1906
0
            }
1907
0
            if (predicate->is_runtime_filter()) {
1908
0
                _filter_info_id.push_back(predicate);
1909
0
            }
1910
0
        }
1911
1912
        // handle delete_condition
1913
467
        if (!del_cond_id_set.empty()) {
1914
467
            short_cir_pred_col_id_set.insert(del_cond_id_set.begin(), del_cond_id_set.end());
1915
467
            pred_column_ids.insert(del_cond_id_set.begin(), del_cond_id_set.end());
1916
1917
467
            for (auto cid : del_cond_id_set) {
1918
467
                _is_pred_column[cid] = true;
1919
467
            }
1920
467
        }
1921
1922
467
        _vec_pred_column_ids.assign(vec_pred_col_id_set.cbegin(), vec_pred_col_id_set.cend());
1923
467
        _short_cir_pred_column_ids.assign(short_cir_pred_col_id_set.cbegin(),
1924
467
                                          short_cir_pred_col_id_set.cend());
1925
467
    }
1926
1927
2.82k
    if (!_vec_pred_column_ids.empty()) {
1928
0
        _is_need_vec_eval = true;
1929
0
    }
1930
2.82k
    if (!_short_cir_pred_column_ids.empty()) {
1931
467
        _is_need_short_eval = true;
1932
467
    }
1933
1934
    // ColumnId to column index in block
1935
    // ColumnId will contail all columns in tablet schema, including virtual columns and global rowid column,
1936
2.82k
    _schema_block_id_map.resize(_schema->columns().size(), -1);
1937
    // Use cols read by query to initialize _schema_block_id_map.
1938
    // We need to know the index of each column in the block.
1939
    // There is an assumption here that the columns in the block are in the same order as in the read schema.
1940
    // TODO: A probelm is that, delete condition columns will exist in _schema->column_ids but not in block if
1941
    // delete column is not read by the query.
1942
9.87k
    for (int i = 0; i < _schema->num_column_ids(); i++) {
1943
7.05k
        auto cid = _schema->column_id(i);
1944
7.05k
        _schema_block_id_map[cid] = i;
1945
7.05k
    }
1946
1947
    // Step2: extract columns that can execute expr context
1948
2.82k
    _is_common_expr_column.resize(_schema->columns().size(), false);
1949
2.82k
    if (_enable_common_expr_pushdown && !_remaining_conjunct_roots.empty()) {
1950
0
        for (auto expr : _remaining_conjunct_roots) {
1951
0
            RETURN_IF_ERROR(_extract_common_expr_columns(expr));
1952
0
        }
1953
0
        if (!_common_expr_columns.empty()) {
1954
0
            _is_need_expr_eval = true;
1955
0
            for (auto cid : _schema->column_ids()) {
1956
                // pred column also needs to be filtered by expr, exclude additional delete condition column.
1957
                // if delete condition column not in the block, no filter is needed
1958
                // and will be removed from _columns_to_filter in the first next_batch.
1959
0
                if (_is_common_expr_column[cid] || _is_pred_column[cid]) {
1960
0
                    auto loc = _schema_block_id_map[cid];
1961
0
                    _columns_to_filter.push_back(loc);
1962
0
                }
1963
0
            }
1964
1965
0
            for (auto pair : _vir_cid_to_idx_in_block) {
1966
0
                _columns_to_filter.push_back(cast_set<ColumnId>(pair.second));
1967
0
            }
1968
0
        }
1969
0
    }
1970
1971
    // Step 3: fill non predicate columns and second read column
1972
    // if _schema columns size equal to pred_column_ids size, lazy_materialization_read is false,
1973
    // all columns are lazy materialization columns without non predicte column.
1974
    // If common expr pushdown exists, and expr column is not contained in lazy materialization columns,
1975
    // add to second read column, which will be read after lazy materialization
1976
2.82k
    if (_schema->column_ids().size() > pred_column_ids.size()) {
1977
        // pred_column_ids maybe empty, so that could not set _lazy_materialization_read = true here
1978
        // has to check there is at least one predicate column
1979
6.99k
        for (auto cid : _schema->column_ids()) {
1980
6.99k
            if (!_is_pred_column[cid]) {
1981
6.58k
                if (_is_need_vec_eval || _is_need_short_eval) {
1982
862
                    _lazy_materialization_read = true;
1983
862
                }
1984
6.58k
                if (_is_common_expr_column[cid]) {
1985
0
                    _common_expr_column_ids.push_back(cid);
1986
6.58k
                } else {
1987
6.58k
                    _non_predicate_columns.push_back(cid);
1988
6.58k
                }
1989
6.58k
            }
1990
6.99k
        }
1991
2.76k
    }
1992
1993
    // Step 4: fill first read columns
1994
2.82k
    if (_lazy_materialization_read) {
1995
        // insert pred cid to first_read_columns
1996
410
        for (auto cid : pred_column_ids) {
1997
410
            _predicate_column_ids.push_back(cid);
1998
410
        }
1999
2.41k
    } else if (!_is_need_vec_eval && !_is_need_short_eval && !_is_need_expr_eval) {
2000
8.08k
        for (int i = 0; i < _schema->num_column_ids(); i++) {
2001
5.72k
            auto cid = _schema->column_id(i);
2002
5.72k
            _predicate_column_ids.push_back(cid);
2003
5.72k
        }
2004
2.35k
    } else {
2005
57
        if (_is_need_vec_eval || _is_need_short_eval) {
2006
            // TODO To refactor, because we suppose lazy materialization is better performance.
2007
            // pred exits, but we can eliminate lazy materialization
2008
            // insert pred/non-pred cid to first read columns
2009
57
            std::set<ColumnId> pred_id_set;
2010
57
            pred_id_set.insert(_short_cir_pred_column_ids.begin(),
2011
57
                               _short_cir_pred_column_ids.end());
2012
57
            pred_id_set.insert(_vec_pred_column_ids.begin(), _vec_pred_column_ids.end());
2013
2014
57
            DCHECK(_common_expr_column_ids.empty());
2015
            // _non_predicate_column_ids must be empty. Otherwise _lazy_materialization_read must not false.
2016
114
            for (int i = 0; i < _schema->num_column_ids(); i++) {
2017
57
                auto cid = _schema->column_id(i);
2018
57
                if (pred_id_set.find(cid) != pred_id_set.end()) {
2019
57
                    _predicate_column_ids.push_back(cid);
2020
57
                }
2021
57
            }
2022
57
        } else if (_is_need_expr_eval) {
2023
0
            DCHECK(!_is_need_vec_eval && !_is_need_short_eval);
2024
0
            for (auto cid : _common_expr_columns) {
2025
0
                _predicate_column_ids.push_back(cid);
2026
0
            }
2027
0
        }
2028
57
    }
2029
2030
2.82k
    VLOG_DEBUG << fmt::format(
2031
0
            "Laze materialization init end. "
2032
0
            "lazy_materialization_read: {}, "
2033
0
            "_col_predicates size: {}, "
2034
0
            "_cols_read_by_column_predicate: [{}], "
2035
0
            "_non_predicate_columns: [{}], "
2036
0
            "_cols_read_by_common_expr: [{}], "
2037
0
            "columns_to_filter: [{}], "
2038
0
            "_schema_block_id_map: [{}]",
2039
0
            _lazy_materialization_read, _col_predicates.size(),
2040
0
            fmt::join(_predicate_column_ids, ","), fmt::join(_non_predicate_columns, ","),
2041
0
            fmt::join(_common_expr_column_ids, ","), fmt::join(_columns_to_filter, ","),
2042
0
            fmt::join(_schema_block_id_map, ","));
2043
2.82k
    return Status::OK();
2044
2.82k
}
2045
2046
0
bool SegmentIterator::_can_evaluated_by_vectorized(std::shared_ptr<ColumnPredicate> predicate) {
2047
0
    auto cid = predicate->column_id();
2048
0
    FieldType field_type = _schema->column(cid)->type();
2049
0
    if (field_type == FieldType::OLAP_FIELD_TYPE_VARIANT) {
2050
        // Use variant cast dst type
2051
0
        field_type = _opts.target_cast_type_for_variants[_schema->column(cid)->name()]
2052
0
                             ->get_storage_field_type();
2053
0
    }
2054
0
    switch (predicate->type()) {
2055
0
    case PredicateType::EQ:
2056
0
    case PredicateType::NE:
2057
0
    case PredicateType::LE:
2058
0
    case PredicateType::LT:
2059
0
    case PredicateType::GE:
2060
0
    case PredicateType::GT: {
2061
0
        if (field_type == FieldType::OLAP_FIELD_TYPE_VARCHAR ||
2062
0
            field_type == FieldType::OLAP_FIELD_TYPE_CHAR ||
2063
0
            field_type == FieldType::OLAP_FIELD_TYPE_STRING) {
2064
0
            return config::enable_low_cardinality_optimize &&
2065
0
                   _opts.io_ctx.reader_type == ReaderType::READER_QUERY &&
2066
0
                   _column_iterators[cid]->is_all_dict_encoding();
2067
0
        } else if (field_type == FieldType::OLAP_FIELD_TYPE_DECIMAL) {
2068
0
            return false;
2069
0
        }
2070
0
        return true;
2071
0
    }
2072
0
    default:
2073
0
        return false;
2074
0
    }
2075
0
}
2076
2077
7.06k
bool SegmentIterator::_has_char_type(const StorageField& column_desc) {
2078
7.06k
    switch (column_desc.type()) {
2079
0
    case FieldType::OLAP_FIELD_TYPE_CHAR:
2080
0
        return true;
2081
2
    case FieldType::OLAP_FIELD_TYPE_ARRAY:
2082
2
        return _has_char_type(*column_desc.get_sub_field(0));
2083
2
    case FieldType::OLAP_FIELD_TYPE_MAP:
2084
2
        return _has_char_type(*column_desc.get_sub_field(0)) ||
2085
2
               _has_char_type(*column_desc.get_sub_field(1));
2086
0
    case FieldType::OLAP_FIELD_TYPE_STRUCT:
2087
0
        for (int idx = 0; idx < column_desc.get_sub_field_count(); ++idx) {
2088
0
            if (_has_char_type(*column_desc.get_sub_field(idx))) {
2089
0
                return true;
2090
0
            }
2091
0
        }
2092
0
        return false;
2093
7.05k
    default:
2094
7.05k
        return false;
2095
7.06k
    }
2096
7.06k
};
2097
2098
2.82k
void SegmentIterator::_vec_init_char_column_id(Block* block) {
2099
2.82k
    if (!_char_type_idx.empty()) {
2100
0
        return;
2101
0
    }
2102
2.82k
    _is_char_type.resize(_schema->columns().size(), false);
2103
9.87k
    for (size_t i = 0; i < _schema->num_column_ids(); i++) {
2104
7.05k
        auto cid = _schema->column_id(i);
2105
7.05k
        const StorageField* column_desc = _schema->column(cid);
2106
2107
        // The additional deleted filter condition will be in the materialized column at the end of the block.
2108
        // After _output_column_by_sel_idx, it will be erased, so we do not need to shrink it.
2109
7.05k
        if (i < block->columns()) {
2110
7.05k
            if (_has_char_type(*column_desc)) {
2111
0
                _char_type_idx.emplace_back(i);
2112
0
            }
2113
7.05k
        }
2114
2115
7.05k
        if (column_desc->type() == FieldType::OLAP_FIELD_TYPE_CHAR) {
2116
0
            _is_char_type[cid] = true;
2117
0
        }
2118
7.05k
    }
2119
2.82k
}
2120
2121
bool SegmentIterator::_prune_column(ColumnId cid, MutableColumnPtr& column, bool fill_defaults,
2122
27.1k
                                    size_t num_of_defaults) {
2123
27.1k
    if (_need_read_data(cid)) {
2124
27.1k
        return false;
2125
27.1k
    }
2126
0
    if (!fill_defaults) {
2127
0
        return true;
2128
0
    }
2129
0
    if (column->is_nullable()) {
2130
0
        auto nullable_col_ptr = reinterpret_cast<ColumnNullable*>(column.get());
2131
0
        nullable_col_ptr->get_null_map_column().insert_many_defaults(num_of_defaults);
2132
0
        nullable_col_ptr->get_nested_column_ptr()->insert_many_defaults(num_of_defaults);
2133
0
    } else {
2134
        // assert(column->is_const());
2135
0
        column->insert_many_defaults(num_of_defaults);
2136
0
    }
2137
0
    return true;
2138
0
}
2139
2140
Status SegmentIterator::_read_columns(const std::vector<ColumnId>& column_ids,
2141
0
                                      MutableColumns& column_block, size_t nrows) {
2142
0
    for (auto cid : column_ids) {
2143
0
        auto& column = column_block[cid];
2144
0
        size_t rows_read = nrows;
2145
0
        if (_prune_column(cid, column, true, rows_read)) {
2146
0
            continue;
2147
0
        }
2148
0
        RETURN_IF_ERROR(_column_iterators[cid]->next_batch(&rows_read, column));
2149
0
        if (nrows != rows_read) {
2150
0
            return Status::Error<ErrorCode::INTERNAL_ERROR>("nrows({}) != rows_read({})", nrows,
2151
0
                                                            rows_read);
2152
0
        }
2153
0
    }
2154
0
    return Status::OK();
2155
0
}
2156
2157
Status SegmentIterator::_init_current_block(Block* block,
2158
                                            std::vector<MutableColumnPtr>& current_columns,
2159
12.9k
                                            uint32_t nrows_read_limit) {
2160
12.9k
    block->clear_column_data(_schema->num_column_ids());
2161
2162
40.9k
    for (size_t i = 0; i < _schema->num_column_ids(); i++) {
2163
28.0k
        auto cid = _schema->column_id(i);
2164
28.0k
        const auto* column_desc = _schema->column(cid);
2165
2166
28.0k
        auto file_column_type = _storage_name_and_type[cid].second;
2167
28.0k
        auto expected_type = Schema::get_data_type_ptr(*column_desc);
2168
28.0k
        if (!_is_pred_column[cid] && !file_column_type->equals(*expected_type)) {
2169
            // The storage layer type is different from schema needed type, so we use storage
2170
            // type to read columns instead of schema type for safety
2171
0
            VLOG_DEBUG << fmt::format(
2172
0
                    "Recreate column with expected type {}, file column type {}, col_name {}, "
2173
0
                    "col_path {}",
2174
0
                    block->get_by_position(i).type->get_name(), file_column_type->get_name(),
2175
0
                    column_desc->name(),
2176
0
                    column_desc->path() == nullptr ? "" : column_desc->path()->get_path());
2177
            // TODO reuse
2178
0
            current_columns[cid] = file_column_type->create_column();
2179
0
            current_columns[cid]->reserve(nrows_read_limit);
2180
28.0k
        } else {
2181
            // the column in block must clear() here to insert new data
2182
28.0k
            if (_is_pred_column[cid] ||
2183
28.0k
                i >= block->columns()) { //todo(wb) maybe we can release it after output block
2184
2.16k
                if (current_columns[cid].get() == nullptr) {
2185
0
                    return Status::InternalError(
2186
0
                            "SegmentIterator meet invalid column, id={}, name={}", cid,
2187
0
                            _schema->column(cid)->name());
2188
0
                }
2189
2.16k
                current_columns[cid]->clear();
2190
25.9k
            } else { // non-predicate column
2191
25.9k
                current_columns[cid] = std::move(*block->get_by_position(i).column).mutate();
2192
25.9k
                current_columns[cid]->reserve(nrows_read_limit);
2193
25.9k
            }
2194
28.0k
        }
2195
28.0k
    }
2196
2197
12.9k
    for (auto entry : _virtual_column_exprs) {
2198
0
        auto cid = entry.first;
2199
0
        current_columns[cid] = ColumnNothing::create(0);
2200
0
        current_columns[cid]->reserve(nrows_read_limit);
2201
0
    }
2202
2203
12.9k
    return Status::OK();
2204
12.9k
}
2205
2206
10.1k
Status SegmentIterator::_output_non_pred_columns(Block* block) {
2207
10.1k
    SCOPED_RAW_TIMER(&_opts.stats->output_col_ns);
2208
10.1k
    VLOG_DEBUG << fmt::format(
2209
0
            "Output non-predicate columns, _non_predicate_columns: [{}], "
2210
0
            "_schema_block_id_map: [{}]",
2211
0
            fmt::join(_non_predicate_columns, ","), fmt::join(_schema_block_id_map, ","));
2212
10.1k
    RETURN_IF_ERROR(_convert_to_expected_type(_non_predicate_columns));
2213
19.3k
    for (auto cid : _non_predicate_columns) {
2214
19.3k
        auto loc = _schema_block_id_map[cid];
2215
        // Whether a delete predicate column gets output depends on how the caller builds
2216
        // the block passed to next_batch(). Both calling paths now build the block with
2217
        // only the output schema (return_columns), so delete predicate columns are skipped:
2218
        //
2219
        // 1) VMergeIterator path: block_reset() builds _block using the output schema
2220
        //    (return_columns only), e.g. block has 2 columns {c1, c2}.
2221
        //    Here loc=2 for delete predicate c3, block->columns()=2, so loc < block->columns()
2222
        //    is false, and c3 is skipped.
2223
        //
2224
        // 2) VUnionIterator path: the caller's block is built with only return_columns
2225
        //    (output schema), e.g. block has 2 columns {c1, c2}.
2226
        //    Here loc=2 for c3, block->columns()=2, so loc < block->columns() is false,
2227
        //    and c3 is skipped — same behavior as the VMergeIterator path.
2228
19.3k
        if (loc < block->columns()) {
2229
19.3k
            bool column_in_block_is_nothing = check_and_get_column<const ColumnNothing>(
2230
19.3k
                    block->get_by_position(loc).column.get());
2231
19.3k
            bool column_is_normal = !_vir_cid_to_idx_in_block.contains(cid);
2232
19.3k
            bool return_column_is_nothing =
2233
19.3k
                    check_and_get_column<const ColumnNothing>(_current_return_columns[cid].get());
2234
19.3k
            VLOG_DEBUG << fmt::format(
2235
0
                    "Cid {} loc {}, column_in_block_is_nothing {}, column_is_normal {}, "
2236
0
                    "return_column_is_nothing {}",
2237
0
                    cid, loc, column_in_block_is_nothing, column_is_normal,
2238
0
                    return_column_is_nothing);
2239
2240
19.3k
            if (column_in_block_is_nothing || column_is_normal) {
2241
19.3k
                block->replace_by_position(loc, std::move(_current_return_columns[cid]));
2242
19.3k
                VLOG_DEBUG << fmt::format(
2243
0
                        "Output non-predicate column, cid: {}, loc: {}, col_name: {}, rows {}", cid,
2244
0
                        loc, _schema->column(cid)->name(),
2245
0
                        block->get_by_position(loc).column->size());
2246
19.3k
            }
2247
            // Means virtual column in block has been materialized(maybe by common expr).
2248
            // so do nothing here.
2249
19.3k
        }
2250
19.3k
    }
2251
10.1k
    return Status::OK();
2252
10.1k
}
2253
2254
/**
2255
 * Reads columns by their index, handling both continuous and discontinuous rowid scenarios.
2256
 *
2257
 * This function is designed to read a specified number of rows (up to nrows_read_limit)
2258
 * from the segment iterator, dealing with both continuous and discontinuous rowid arrays.
2259
 * It operates as follows:
2260
 *
2261
 * 1. Reads a batch of rowids (up to the specified limit), and checks if they are continuous.
2262
 *    Continuous here means that the rowids form an unbroken sequence (e.g., 1, 2, 3, 4...).
2263
 *
2264
 * 2. For each column that needs to be read (identified by _predicate_column_ids):
2265
 *    - If the rowids are continuous, the function uses seek_to_ordinal and next_batch
2266
 *      for efficient reading.
2267
 *    - If the rowids are not continuous, the function processes them in smaller batches
2268
 *      (each of size up to 256). Each batch is checked for internal continuity:
2269
 *        a. If a batch is continuous, uses seek_to_ordinal and next_batch for that batch.
2270
 *        b. If a batch is not continuous, uses read_by_rowids for individual rowids in the batch.
2271
 *
2272
 * This approach optimizes reading performance by leveraging batch processing for continuous
2273
 * rowid sequences and handling discontinuities gracefully in smaller chunks.
2274
 */
2275
12.9k
Status SegmentIterator::_read_columns_by_index(uint32_t nrows_read_limit, uint16_t& nrows_read) {
2276
12.9k
    SCOPED_RAW_TIMER(&_opts.stats->predicate_column_read_ns);
2277
2278
12.9k
    nrows_read = (uint16_t)_range_iter->read_batch_rowids(_block_rowids.data(), nrows_read_limit);
2279
12.9k
    bool is_continuous = (nrows_read > 1) &&
2280
12.9k
                         (_block_rowids[nrows_read - 1] - _block_rowids[0] == nrows_read - 1);
2281
12.9k
    VLOG_DEBUG << fmt::format(
2282
0
            "nrows_read from range iterator: {}, is_continus {}, _cols_read_by_column_predicate "
2283
0
            "[{}]",
2284
0
            nrows_read, is_continuous, fmt::join(_predicate_column_ids, ","));
2285
2286
12.9k
    LOG_IF(INFO, config::enable_segment_prefetch_verbose_log) << fmt::format(
2287
0
            "[verbose] SegmentIterator::_read_columns_by_index read {} rowids, continuous: {}, "
2288
0
            "rowids: [{}...{}]",
2289
0
            nrows_read, is_continuous, nrows_read > 0 ? _block_rowids[0] : 0,
2290
0
            nrows_read > 0 ? _block_rowids[nrows_read - 1] : 0);
2291
25.0k
    for (auto cid : _predicate_column_ids) {
2292
25.0k
        auto& column = _current_return_columns[cid];
2293
25.0k
        VLOG_DEBUG << fmt::format("Reading column {}, col_name {}", cid,
2294
0
                                  _schema->column(cid)->name());
2295
25.0k
        if (!_virtual_column_exprs.contains(cid)) {
2296
25.0k
            if (_no_need_read_key_data(cid, column, nrows_read)) {
2297
0
                VLOG_DEBUG << fmt::format("Column {} no need to read.", cid);
2298
0
                continue;
2299
0
            }
2300
25.0k
            if (_prune_column(cid, column, true, nrows_read)) {
2301
0
                VLOG_DEBUG << fmt::format("Column {} is pruned. No need to read data.", cid);
2302
0
                continue;
2303
0
            }
2304
25.0k
            DBUG_EXECUTE_IF("segment_iterator._read_columns_by_index", {
2305
25.0k
                auto col_name = _opts.tablet_schema->column(cid).name();
2306
25.0k
                auto debug_col_name =
2307
25.0k
                        DebugPoints::instance()->get_debug_param_or_default<std::string>(
2308
25.0k
                                "segment_iterator._read_columns_by_index", "column_name", "");
2309
25.0k
                if (debug_col_name.empty() && col_name != "__DORIS_DELETE_SIGN__") {
2310
25.0k
                    return Status::Error<ErrorCode::INTERNAL_ERROR>(
2311
25.0k
                            "does not need to read data, {}", col_name);
2312
25.0k
                }
2313
25.0k
                if (debug_col_name.find(col_name) != std::string::npos) {
2314
25.0k
                    return Status::Error<ErrorCode::INTERNAL_ERROR>(
2315
25.0k
                            "does not need to read data, {}", col_name);
2316
25.0k
                }
2317
25.0k
            })
2318
25.0k
        }
2319
2320
25.0k
        if (is_continuous) {
2321
18.5k
            size_t rows_read = nrows_read;
2322
18.5k
            _opts.stats->predicate_column_read_seek_num += 1;
2323
18.5k
            if (_opts.runtime_state && _opts.runtime_state->enable_profile()) {
2324
0
                SCOPED_RAW_TIMER(&_opts.stats->predicate_column_read_seek_ns);
2325
0
                RETURN_IF_ERROR(_column_iterators[cid]->seek_to_ordinal(_block_rowids[0]));
2326
18.5k
            } else {
2327
18.5k
                RETURN_IF_ERROR(_column_iterators[cid]->seek_to_ordinal(_block_rowids[0]));
2328
18.5k
            }
2329
18.5k
            RETURN_IF_ERROR(_column_iterators[cid]->next_batch(&rows_read, column));
2330
18.5k
            if (rows_read != nrows_read) {
2331
0
                return Status::Error<ErrorCode::INTERNAL_ERROR>("nrows({}) != rows_read({})",
2332
0
                                                                nrows_read, rows_read);
2333
0
            }
2334
18.5k
        } else {
2335
6.50k
            const uint32_t batch_size = _range_iter->get_batch_size();
2336
6.50k
            uint32_t processed = 0;
2337
7.69k
            while (processed < nrows_read) {
2338
1.19k
                uint32_t current_batch_size = std::min(batch_size, nrows_read - processed);
2339
1.19k
                bool batch_continuous = (current_batch_size > 1) &&
2340
1.19k
                                        (_block_rowids[processed + current_batch_size - 1] -
2341
1.17k
                                                 _block_rowids[processed] ==
2342
1.17k
                                         current_batch_size - 1);
2343
2344
1.19k
                if (batch_continuous) {
2345
0
                    size_t rows_read = current_batch_size;
2346
0
                    _opts.stats->predicate_column_read_seek_num += 1;
2347
0
                    if (_opts.runtime_state && _opts.runtime_state->enable_profile()) {
2348
0
                        SCOPED_RAW_TIMER(&_opts.stats->predicate_column_read_seek_ns);
2349
0
                        RETURN_IF_ERROR(
2350
0
                                _column_iterators[cid]->seek_to_ordinal(_block_rowids[processed]));
2351
0
                    } else {
2352
0
                        RETURN_IF_ERROR(
2353
0
                                _column_iterators[cid]->seek_to_ordinal(_block_rowids[processed]));
2354
0
                    }
2355
0
                    RETURN_IF_ERROR(_column_iterators[cid]->next_batch(&rows_read, column));
2356
0
                    if (rows_read != current_batch_size) {
2357
0
                        return Status::Error<ErrorCode::INTERNAL_ERROR>(
2358
0
                                "batch nrows({}) != rows_read({})", current_batch_size, rows_read);
2359
0
                    }
2360
1.19k
                } else {
2361
1.19k
                    RETURN_IF_ERROR(_column_iterators[cid]->read_by_rowids(
2362
1.19k
                            &_block_rowids[processed], current_batch_size, column));
2363
1.19k
                }
2364
1.19k
                processed += current_batch_size;
2365
1.19k
            }
2366
6.50k
        }
2367
25.0k
    }
2368
2369
12.9k
    return Status::OK();
2370
12.9k
}
2371
void SegmentIterator::_replace_version_col_if_needed(const std::vector<ColumnId>& column_ids,
2372
14.3k
                                                     size_t num_rows) {
2373
    // Only the rowset with single version need to replace the version column.
2374
    // Doris can't determine the version before publish_version finished, so
2375
    // we can't write data to __DORIS_VERSION_COL__ in segment writer, the value
2376
    // is 0 by default.
2377
    // So we need to replace the value to real version while reading.
2378
14.3k
    if (_opts.version.first != _opts.version.second) {
2379
6.49k
        return;
2380
6.49k
    }
2381
7.81k
    int32_t version_idx = _schema->version_col_idx();
2382
7.81k
    if (std::ranges::find(column_ids, version_idx) == column_ids.end()) {
2383
7.81k
        return;
2384
7.81k
    }
2385
2386
0
    const auto* column_desc = _schema->column(version_idx);
2387
0
    auto column = Schema::get_data_type_ptr(*column_desc)->create_column();
2388
0
    DCHECK(_schema->column(version_idx)->type() == FieldType::OLAP_FIELD_TYPE_BIGINT);
2389
0
    auto* col_ptr = assert_cast<ColumnInt64*>(column.get());
2390
0
    for (size_t j = 0; j < num_rows; j++) {
2391
0
        col_ptr->insert_value(_opts.version.second);
2392
0
    }
2393
0
    _current_return_columns[version_idx] = std::move(column);
2394
0
    VLOG_DEBUG << "replaced version column in segment iterator, version_col_idx:" << version_idx;
2395
0
}
2396
2397
uint16_t SegmentIterator::_evaluate_vectorization_predicate(uint16_t* sel_rowid_idx,
2398
1.69k
                                                            uint16_t selected_size) {
2399
1.69k
    SCOPED_RAW_TIMER(&_opts.stats->vec_cond_ns);
2400
1.69k
    bool all_pred_always_true = true;
2401
1.69k
    for (const auto& pred : _pre_eval_block_predicate) {
2402
0
        if (!pred->always_true()) {
2403
0
            all_pred_always_true = false;
2404
0
        } else {
2405
0
            pred->update_filter_info(0, 0, selected_size);
2406
0
        }
2407
0
    }
2408
2409
1.69k
    const uint16_t original_size = selected_size;
2410
    //If all predicates are always_true, then return directly.
2411
1.69k
    if (all_pred_always_true || !_is_need_vec_eval) {
2412
3.90M
        for (uint16_t i = 0; i < original_size; ++i) {
2413
3.90M
            sel_rowid_idx[i] = i;
2414
3.90M
        }
2415
        // All preds are always_true, so return immediately and update the profile statistics here.
2416
1.69k
        _opts.stats->vec_cond_input_rows += original_size;
2417
1.69k
        return original_size;
2418
1.69k
    }
2419
2420
0
    _ret_flags.resize(original_size);
2421
0
    DCHECK(!_pre_eval_block_predicate.empty());
2422
0
    bool is_first = true;
2423
0
    for (auto& pred : _pre_eval_block_predicate) {
2424
0
        if (pred->always_true()) {
2425
0
            continue;
2426
0
        }
2427
0
        auto column_id = pred->column_id();
2428
0
        auto& column = _current_return_columns[column_id];
2429
0
        if (is_first) {
2430
0
            pred->evaluate_vec(*column, original_size, (bool*)_ret_flags.data());
2431
0
            is_first = false;
2432
0
        } else {
2433
0
            pred->evaluate_and_vec(*column, original_size, (bool*)_ret_flags.data());
2434
0
        }
2435
0
    }
2436
2437
0
    uint16_t new_size = 0;
2438
2439
0
    uint16_t sel_pos = 0;
2440
0
    const uint16_t sel_end = sel_pos + selected_size;
2441
0
    static constexpr size_t SIMD_BYTES = simd::bits_mask_length();
2442
0
    const uint16_t sel_end_simd = sel_pos + selected_size / SIMD_BYTES * SIMD_BYTES;
2443
2444
0
    while (sel_pos < sel_end_simd) {
2445
0
        auto mask = simd::bytes_mask_to_bits_mask(_ret_flags.data() + sel_pos);
2446
0
        if (0 == mask) {
2447
            //pass
2448
0
        } else if (simd::bits_mask_all() == mask) {
2449
0
            for (uint16_t i = 0; i < SIMD_BYTES; i++) {
2450
0
                sel_rowid_idx[new_size++] = sel_pos + i;
2451
0
            }
2452
0
        } else {
2453
0
            simd::iterate_through_bits_mask(
2454
0
                    [&](const int bit_pos) {
2455
0
                        sel_rowid_idx[new_size++] = sel_pos + (uint16_t)bit_pos;
2456
0
                    },
2457
0
                    mask);
2458
0
        }
2459
0
        sel_pos += SIMD_BYTES;
2460
0
    }
2461
2462
0
    for (; sel_pos < sel_end; sel_pos++) {
2463
0
        if (_ret_flags[sel_pos]) {
2464
0
            sel_rowid_idx[new_size++] = sel_pos;
2465
0
        }
2466
0
    }
2467
2468
0
    _opts.stats->vec_cond_input_rows += original_size;
2469
0
    _opts.stats->rows_vec_cond_filtered += original_size - new_size;
2470
0
    return new_size;
2471
1.69k
}
2472
2473
uint16_t SegmentIterator::_evaluate_short_circuit_predicate(uint16_t* vec_sel_rowid_idx,
2474
1.69k
                                                            uint16_t selected_size) {
2475
1.69k
    SCOPED_RAW_TIMER(&_opts.stats->short_cond_ns);
2476
1.69k
    if (!_is_need_short_eval) {
2477
0
        return selected_size;
2478
0
    }
2479
2480
1.69k
    uint16_t original_size = selected_size;
2481
1.69k
    for (auto predicate : _short_cir_eval_predicate) {
2482
0
        auto column_id = predicate->column_id();
2483
0
        auto& short_cir_column = _current_return_columns[column_id];
2484
0
        selected_size = predicate->evaluate(*short_cir_column, vec_sel_rowid_idx, selected_size);
2485
0
    }
2486
2487
1.69k
    _opts.stats->short_circuit_cond_input_rows += original_size;
2488
1.69k
    _opts.stats->rows_short_circuit_cond_filtered += original_size - selected_size;
2489
2490
    // evaluate delete condition
2491
1.69k
    original_size = selected_size;
2492
1.69k
    selected_size = _opts.delete_condition_predicates->evaluate(_current_return_columns,
2493
1.69k
                                                                vec_sel_rowid_idx, selected_size);
2494
1.69k
    _opts.stats->rows_vec_del_cond_filtered += original_size - selected_size;
2495
1.69k
    return selected_size;
2496
1.69k
}
2497
2498
1
static void shrink_materialized_block_columns(Block* block, size_t rows) {
2499
2
    for (auto& entry : *block) {
2500
2
        if (entry.column && entry.column->size() > rows) {
2501
1
            entry.column = entry.column->shrink(rows);
2502
1
        }
2503
2
    }
2504
1
}
2505
2506
static void slice_materialized_block_columns(Block* block, size_t offset, size_t rows,
2507
1
                                             size_t original_rows) {
2508
1
    for (auto& entry : *block) {
2509
1
        if (!entry.column || entry.column->size() == 0) {
2510
0
            continue;
2511
0
        }
2512
1
        DORIS_CHECK(entry.column->size() == original_rows);
2513
1
        entry.column = entry.column->cut(offset, rows);
2514
1
    }
2515
1
}
2516
2517
1.69k
Status SegmentIterator::_apply_read_limit_to_selected_rows(Block* block, uint16_t& selected_size) {
2518
1.69k
    if (_opts.read_limit == 0) {
2519
1.69k
        return Status::OK();
2520
1.69k
    }
2521
2
    DORIS_CHECK(_rows_returned <= _opts.read_limit);
2522
2
    size_t remaining = _opts.read_limit - _rows_returned;
2523
2
    if (remaining == 0) {
2524
0
        selected_size = 0;
2525
0
        shrink_materialized_block_columns(block, 0);
2526
0
        return Status::OK();
2527
0
    }
2528
2
    if (selected_size > remaining) {
2529
2
        if (_opts.read_orderby_key_reverse) {
2530
1
            const auto original_size = selected_size;
2531
1
            const auto offset = original_size - remaining;
2532
21
            for (size_t i = 0; i < remaining; ++i) {
2533
20
                _sel_rowid_idx[i] = _sel_rowid_idx[offset + i];
2534
20
            }
2535
1
            selected_size = cast_set<uint16_t>(remaining);
2536
1
            slice_materialized_block_columns(block, offset, remaining, original_size);
2537
1
            return Status::OK();
2538
1
        }
2539
1
        selected_size = cast_set<uint16_t>(remaining);
2540
1
        shrink_materialized_block_columns(block, selected_size);
2541
1
    }
2542
1
    return Status::OK();
2543
2
}
2544
2545
Status SegmentIterator::_read_columns_by_rowids(std::vector<ColumnId>& read_column_ids,
2546
                                                std::vector<rowid_t>& rowid_vector,
2547
                                                uint16_t* sel_rowid_idx, size_t select_size,
2548
                                                MutableColumns* mutable_columns,
2549
1.38k
                                                bool init_condition_cache) {
2550
1.38k
    SCOPED_RAW_TIMER(&_opts.stats->lazy_read_ns);
2551
1.38k
    std::vector<rowid_t> rowids(select_size);
2552
2553
1.38k
    if (init_condition_cache) {
2554
0
        DCHECK(_condition_cache);
2555
0
        auto& condition_cache = *_condition_cache;
2556
0
        for (size_t i = 0; i < select_size; ++i) {
2557
0
            rowids[i] = rowid_vector[sel_rowid_idx[i]];
2558
0
            condition_cache[rowids[i] / SegmentIterator::CONDITION_CACHE_OFFSET] = true;
2559
0
        }
2560
1.38k
    } else {
2561
2.74M
        for (size_t i = 0; i < select_size; ++i) {
2562
2.74M
            rowids[i] = rowid_vector[sel_rowid_idx[i]];
2563
2.74M
        }
2564
1.38k
    }
2565
2566
2.10k
    for (auto cid : read_column_ids) {
2567
2.10k
        auto& colunm = (*mutable_columns)[cid];
2568
2.10k
        if (_no_need_read_key_data(cid, colunm, select_size)) {
2569
0
            continue;
2570
0
        }
2571
2.10k
        if (_prune_column(cid, colunm, true, select_size)) {
2572
0
            continue;
2573
0
        }
2574
2575
2.10k
        DBUG_EXECUTE_IF("segment_iterator._read_columns_by_index", {
2576
2.10k
            auto debug_col_name = DebugPoints::instance()->get_debug_param_or_default<std::string>(
2577
2.10k
                    "segment_iterator._read_columns_by_index", "column_name", "");
2578
2.10k
            if (debug_col_name.empty()) {
2579
2.10k
                return Status::Error<ErrorCode::INTERNAL_ERROR>("does not need to read data");
2580
2.10k
            }
2581
2.10k
            auto col_name = _opts.tablet_schema->column(cid).name();
2582
2.10k
            if (debug_col_name.find(col_name) != std::string::npos) {
2583
2.10k
                return Status::Error<ErrorCode::INTERNAL_ERROR>("does not need to read data, {}",
2584
2.10k
                                                                debug_col_name);
2585
2.10k
            }
2586
2.10k
        })
2587
2588
2.10k
        if (_current_return_columns[cid].get() == nullptr) {
2589
0
            return Status::InternalError(
2590
0
                    "SegmentIterator meet invalid column, return columns size {}, cid {}",
2591
0
                    _current_return_columns.size(), cid);
2592
0
        }
2593
2.10k
        RETURN_IF_ERROR(_column_iterators[cid]->read_by_rowids(rowids.data(), select_size,
2594
2.10k
                                                               _current_return_columns[cid]));
2595
2.10k
    }
2596
2597
1.38k
    return Status::OK();
2598
1.38k
}
2599
2600
12.9k
Status SegmentIterator::next_batch(Block* block) {
2601
    // Replace virtual columns with ColumnNothing at the begining of each next_batch call.
2602
12.9k
    _init_virtual_columns(block);
2603
12.9k
    auto status = [&]() {
2604
12.9k
        RETURN_IF_CATCH_EXCEPTION({
2605
            // Adaptive batch size: predict how many rows this batch should read.
2606
12.9k
            if (_block_size_predictor) {
2607
12.9k
                auto predicted = static_cast<uint32_t>(_block_size_predictor->predict_next_rows());
2608
12.9k
                _opts.block_row_max = std::min(predicted, _initial_block_row_max);
2609
12.9k
                _opts.stats->adaptive_batch_size_predict_min_rows =
2610
12.9k
                        std::min(_opts.stats->adaptive_batch_size_predict_min_rows,
2611
12.9k
                                 static_cast<int64_t>(predicted));
2612
12.9k
                _opts.stats->adaptive_batch_size_predict_max_rows =
2613
12.9k
                        std::max(_opts.stats->adaptive_batch_size_predict_max_rows,
2614
12.9k
                                 static_cast<int64_t>(predicted));
2615
12.9k
            } else {
2616
                // No predictor — record the fixed batch size using min/max so we don't
2617
                // clobber values already accumulated by other segment iterators that
2618
                // share the same OlapReaderStatistics.
2619
12.9k
                _opts.stats->adaptive_batch_size_predict_min_rows =
2620
12.9k
                        std::min(_opts.stats->adaptive_batch_size_predict_min_rows,
2621
12.9k
                                 static_cast<int64_t>(_opts.block_row_max));
2622
12.9k
                _opts.stats->adaptive_batch_size_predict_max_rows =
2623
12.9k
                        std::max(_opts.stats->adaptive_batch_size_predict_max_rows,
2624
12.9k
                                 static_cast<int64_t>(_opts.block_row_max));
2625
12.9k
            }
2626
2627
12.9k
            auto res = _next_batch_internal(block);
2628
2629
12.9k
            if (res.is<END_OF_FILE>()) {
2630
                // Since we have a type check at the caller.
2631
                // So a replacement of nothing column with real column is needed.
2632
12.9k
                const auto& idx_to_datatype = _opts.vir_col_idx_to_type;
2633
12.9k
                for (const auto& pair : _vir_cid_to_idx_in_block) {
2634
12.9k
                    size_t idx = pair.second;
2635
12.9k
                    auto type = idx_to_datatype.find(idx)->second;
2636
12.9k
                    block->replace_by_position(idx, type->create_column());
2637
12.9k
                }
2638
2639
12.9k
                if (_opts.condition_cache_digest && !_find_condition_cache) {
2640
12.9k
                    auto* condition_cache = ConditionCache::instance();
2641
12.9k
                    ConditionCache::CacheKey cache_key(_opts.rowset_id, _segment->id(),
2642
12.9k
                                                       _opts.condition_cache_digest);
2643
12.9k
                    VLOG_DEBUG << "Condition cache insert, query id: "
2644
12.9k
                               << print_id(_opts.runtime_state->query_id())
2645
12.9k
                               << ", rowset id: " << _opts.rowset_id.to_string()
2646
12.9k
                               << ", segment id: " << _segment->id()
2647
12.9k
                               << ", cache digest: " << _opts.condition_cache_digest;
2648
12.9k
                    condition_cache->insert(cache_key, std::move(_condition_cache));
2649
12.9k
                }
2650
12.9k
                return res;
2651
12.9k
            }
2652
2653
12.9k
            RETURN_IF_ERROR(res);
2654
            // reverse block row order if read_orderby_key_reverse is true for key topn
2655
            // it should be processed for all success _next_batch_internal
2656
12.9k
            if (_opts.read_orderby_key_reverse) {
2657
12.9k
                size_t num_rows = block->rows();
2658
12.9k
                if (num_rows == 0) {
2659
12.9k
                    return Status::OK();
2660
12.9k
                }
2661
12.9k
                size_t num_columns = block->columns();
2662
12.9k
                IColumn::Permutation permutation;
2663
12.9k
                for (size_t i = 0; i < num_rows; ++i) permutation.emplace_back(num_rows - 1 - i);
2664
2665
12.9k
                for (size_t i = 0; i < num_columns; ++i)
2666
12.9k
                    block->get_by_position(i).column =
2667
12.9k
                            block->get_by_position(i).column->permute(permutation, num_rows);
2668
12.9k
            }
2669
2670
12.9k
            RETURN_IF_ERROR(block->check_type_and_column());
2671
2672
            // Adaptive batch size: update EWMA estimate from the completed batch.
2673
            // block->bytes() is accurate here: predicates have been applied and non-predicate
2674
            // columns have been filled for surviving rows by _next_batch_internal.
2675
12.9k
            if (_block_size_predictor && block->rows() > 0) {
2676
12.9k
                _block_size_predictor->update(*block);
2677
12.9k
            }
2678
2679
12.9k
            return Status::OK();
2680
12.9k
        });
2681
12.9k
    }();
2682
2683
    // if rows read by batch is 0, will return end of file, we should not remove segment cache in this situation.
2684
12.9k
    if (!status.ok() && !status.is<END_OF_FILE>()) {
2685
0
        _segment->update_healthy_status(status);
2686
0
    }
2687
12.9k
    return status;
2688
12.9k
}
2689
2690
12.9k
Status SegmentIterator::_convert_to_expected_type(const std::vector<ColumnId>& col_ids) {
2691
26.3k
    for (ColumnId i : col_ids) {
2692
26.3k
        if (!_current_return_columns[i] || _converted_column_ids[i] || _is_pred_column[i]) {
2693
467
            continue;
2694
467
        }
2695
25.9k
        const StorageField* field_type = _schema->column(i);
2696
25.9k
        DataTypePtr expected_type = Schema::get_data_type_ptr(*field_type);
2697
25.9k
        DataTypePtr file_column_type = _storage_name_and_type[i].second;
2698
25.9k
        if (!file_column_type->equals(*expected_type)) {
2699
0
            ColumnPtr expected;
2700
0
            ColumnPtr original = _current_return_columns[i]->assume_mutable()->get_ptr();
2701
0
            RETURN_IF_ERROR(variant_util::cast_column({original, file_column_type, ""},
2702
0
                                                      expected_type, &expected));
2703
0
            _current_return_columns[i] = expected->assume_mutable();
2704
0
            _converted_column_ids[i] = true;
2705
0
            VLOG_DEBUG << fmt::format(
2706
0
                    "Convert {} fom file column type {} to {}, num_rows {}",
2707
0
                    field_type->path() == nullptr ? "" : field_type->path()->get_path(),
2708
0
                    file_column_type->get_name(), expected_type->get_name(),
2709
0
                    _current_return_columns[i]->size());
2710
0
        }
2711
25.9k
    }
2712
12.9k
    return Status::OK();
2713
12.9k
}
2714
2715
Status SegmentIterator::copy_column_data_by_selector(IColumn* input_col_ptr,
2716
                                                     MutableColumnPtr& output_col,
2717
                                                     uint16_t* sel_rowid_idx, uint16_t select_size,
2718
1.39k
                                                     size_t batch_size) {
2719
1.39k
    if (output_col->is_nullable() != input_col_ptr->is_nullable()) {
2720
0
        LOG(WARNING) << "nullable mismatch for output_column: " << output_col->dump_structure()
2721
0
                     << " input_column: " << input_col_ptr->dump_structure()
2722
0
                     << " select_size: " << select_size;
2723
0
        return Status::RuntimeError("copy_column_data_by_selector nullable mismatch");
2724
0
    }
2725
1.39k
    output_col->reserve(select_size);
2726
1.39k
    return input_col_ptr->filter_by_selector(sel_rowid_idx, select_size, output_col.get());
2727
1.39k
}
2728
2729
12.9k
Status SegmentIterator::_next_batch_internal(Block* block) {
2730
12.9k
    SCOPED_CONCURRENCY_COUNT(ConcurrencyStatsManager::instance().segment_iterator_next_batch);
2731
2732
12.9k
    bool is_mem_reuse = block->mem_reuse();
2733
12.9k
    DCHECK(is_mem_reuse);
2734
2735
12.9k
    RETURN_IF_ERROR(_lazy_init(block));
2736
2737
12.9k
    SCOPED_RAW_TIMER(&_opts.stats->block_load_ns);
2738
2739
12.9k
    if (_opts.read_limit > 0 && _rows_returned >= _opts.read_limit) {
2740
0
        return _process_eof(block);
2741
0
    }
2742
2743
    // If the row bitmap size is smaller than nrows_read_limit, there's no need to reserve that many column rows.
2744
12.9k
    uint32_t nrows_read_limit =
2745
12.9k
            std::min(cast_set<uint32_t>(_row_bitmap.cardinality()), _opts.block_row_max);
2746
12.9k
    if (_can_opt_limit_reads()) {
2747
        // No SegmentIterator-side conjunct remains to be evaluated, so LIMIT is equivalent before
2748
        // and after filtering. Cap the first read directly; this is the no-conjunct fast path that
2749
        // avoids reading rows past the pushed-down local LIMIT.
2750
0
        size_t cap = (_opts.read_limit > _rows_returned) ? (_opts.read_limit - _rows_returned) : 0;
2751
0
        if (cap < nrows_read_limit) {
2752
0
            nrows_read_limit = static_cast<uint32_t>(cap);
2753
0
        }
2754
0
    }
2755
12.9k
    DBUG_EXECUTE_IF("segment_iterator.topn_opt_1", {
2756
12.9k
        if (nrows_read_limit != 1) {
2757
12.9k
            return Status::Error<ErrorCode::INTERNAL_ERROR>(
2758
12.9k
                    "topn opt 1 execute failed: nrows_read_limit={}, "
2759
12.9k
                    "_opts.read_limit={}",
2760
12.9k
                    nrows_read_limit, _opts.read_limit);
2761
12.9k
        }
2762
12.9k
    })
2763
2764
12.9k
    RETURN_IF_ERROR(_init_current_block(block, _current_return_columns, nrows_read_limit));
2765
12.9k
    _converted_column_ids.assign(_schema->columns().size(), false);
2766
2767
12.9k
    _selected_size = 0;
2768
12.9k
    RETURN_IF_ERROR(_read_columns_by_index(nrows_read_limit, _selected_size));
2769
12.9k
    _replace_version_col_if_needed(_predicate_column_ids, _selected_size);
2770
2771
12.9k
    _opts.stats->blocks_load += 1;
2772
12.9k
    _opts.stats->raw_rows_read += _selected_size;
2773
2774
12.9k
    if (_selected_size == 0) {
2775
2.81k
        return _process_eof(block);
2776
2.81k
    }
2777
2778
10.1k
    if (_is_need_vec_eval || _is_need_short_eval || _is_need_expr_eval) {
2779
1.69k
        _sel_rowid_idx.resize(_selected_size);
2780
2781
1.69k
        if (_is_need_vec_eval || _is_need_short_eval) {
2782
1.69k
            _convert_dict_code_for_predicate_if_necessary();
2783
2784
            // step 1: evaluate vectorization predicate
2785
1.69k
            _selected_size =
2786
1.69k
                    _evaluate_vectorization_predicate(_sel_rowid_idx.data(), _selected_size);
2787
2788
            // step 2: evaluate short circuit predicate
2789
            // todo(wb) research whether need to read short predicate after vectorization evaluation
2790
            //          to reduce cost of read short circuit columns.
2791
            //          In SSB test, it make no difference; So need more scenarios to test
2792
1.69k
            _selected_size =
2793
1.69k
                    _evaluate_short_circuit_predicate(_sel_rowid_idx.data(), _selected_size);
2794
1.69k
            VLOG_DEBUG << fmt::format("After evaluate predicates, selected size: {} ",
2795
0
                                      _selected_size);
2796
1.69k
            if (_selected_size > 0) {
2797
                // step 3.1: output short circuit and predicate column
2798
                // when lazy materialization enables, _predicate_column_ids = distinct(_short_cir_pred_column_ids + _vec_pred_column_ids)
2799
                // see _vec_init_lazy_materialization
2800
                // todo(wb) need to tell input columnids from output columnids
2801
1.66k
                RETURN_IF_ERROR(_output_column_by_sel_idx(block, _predicate_column_ids,
2802
1.66k
                                                          _sel_rowid_idx.data(), _selected_size));
2803
2804
                // step 3.2: read remaining expr column and evaluate it.
2805
1.66k
                if (_is_need_expr_eval) {
2806
                    // The predicate column contains the remaining expr column, no need second read.
2807
0
                    if (_common_expr_column_ids.size() > 0) {
2808
0
                        SCOPED_RAW_TIMER(&_opts.stats->non_predicate_read_ns);
2809
0
                        RETURN_IF_ERROR(_read_columns_by_rowids(
2810
0
                                _common_expr_column_ids, _block_rowids, _sel_rowid_idx.data(),
2811
0
                                _selected_size, &_current_return_columns));
2812
0
                        _replace_version_col_if_needed(_common_expr_column_ids, _selected_size);
2813
0
                        RETURN_IF_ERROR(_process_columns(_common_expr_column_ids, block));
2814
0
                    }
2815
2816
0
                    DCHECK(block->columns() > _schema_block_id_map[*_common_expr_columns.begin()]);
2817
0
                    RETURN_IF_ERROR(
2818
0
                            _process_common_expr(_sel_rowid_idx.data(), _selected_size, block));
2819
0
                }
2820
1.66k
            } else {
2821
30
                _fill_column_nothing();
2822
30
                if (_is_need_expr_eval) {
2823
0
                    RETURN_IF_ERROR(_process_columns(_common_expr_column_ids, block));
2824
0
                }
2825
30
            }
2826
1.69k
        } else if (_is_need_expr_eval) {
2827
0
            DCHECK(!_predicate_column_ids.empty());
2828
0
            RETURN_IF_ERROR(_process_columns(_predicate_column_ids, block));
2829
            // first read all rows are insert block, initialize sel_rowid_idx to all rows.
2830
0
            for (uint16_t i = 0; i < _selected_size; ++i) {
2831
0
                _sel_rowid_idx[i] = i;
2832
0
            }
2833
0
            RETURN_IF_ERROR(_process_common_expr(_sel_rowid_idx.data(), _selected_size, block));
2834
0
        }
2835
2836
1.69k
        RETURN_IF_ERROR(_apply_read_limit_to_selected_rows(block, _selected_size));
2837
2838
        // step4: read non_predicate column
2839
1.69k
        if (_selected_size > 0) {
2840
1.66k
            if (!_non_predicate_columns.empty()) {
2841
1.38k
                RETURN_IF_ERROR(_read_columns_by_rowids(
2842
1.38k
                        _non_predicate_columns, _block_rowids, _sel_rowid_idx.data(),
2843
1.38k
                        _selected_size, &_current_return_columns,
2844
1.38k
                        _opts.condition_cache_digest && !_find_condition_cache));
2845
1.38k
                _replace_version_col_if_needed(_non_predicate_columns, _selected_size);
2846
1.38k
            } else {
2847
286
                if (_opts.condition_cache_digest && !_find_condition_cache) {
2848
0
                    auto& condition_cache = *_condition_cache;
2849
0
                    for (size_t i = 0; i < _selected_size; ++i) {
2850
0
                        auto rowid = _block_rowids[_sel_rowid_idx[i]];
2851
0
                        condition_cache[rowid / SegmentIterator::CONDITION_CACHE_OFFSET] = true;
2852
0
                    }
2853
0
                }
2854
286
            }
2855
1.66k
        }
2856
1.69k
    }
2857
2858
    // step5: output columns
2859
10.1k
    RETURN_IF_ERROR(_output_non_pred_columns(block));
2860
    // Convert inverted index bitmaps to result columns for virtual column exprs
2861
    // (e.g., MATCH projections). This must run before _materialization_of_virtual_column
2862
    // so that fast_execute() can find the pre-computed result columns.
2863
10.1k
    if (!_virtual_column_exprs.empty()) {
2864
0
        bool use_sel = _is_need_vec_eval || _is_need_short_eval || _is_need_expr_eval;
2865
0
        uint16_t* sel_rowid_idx = use_sel ? _sel_rowid_idx.data() : nullptr;
2866
0
        std::vector<VExprContext*> vir_ctxs;
2867
0
        vir_ctxs.reserve(_virtual_column_exprs.size());
2868
0
        for (auto& [cid, ctx] : _virtual_column_exprs) {
2869
0
            vir_ctxs.push_back(ctx.get());
2870
0
        }
2871
0
        _output_index_result_column(vir_ctxs, sel_rowid_idx, _selected_size, block);
2872
0
    }
2873
10.1k
    RETURN_IF_ERROR(_materialization_of_virtual_column(block));
2874
    // shrink char_type suffix zero data
2875
10.1k
    block->shrink_char_type_column_suffix_zero(_char_type_idx);
2876
10.1k
    if (_opts.read_limit > 0) {
2877
0
        _rows_returned += block->rows();
2878
0
    }
2879
10.1k
    return _check_output_block(block);
2880
10.1k
}
2881
2882
0
Status SegmentIterator::_process_columns(const std::vector<ColumnId>& column_ids, Block* block) {
2883
0
    RETURN_IF_ERROR(_convert_to_expected_type(column_ids));
2884
0
    for (auto cid : column_ids) {
2885
0
        auto loc = _schema_block_id_map[cid];
2886
0
        block->replace_by_position(loc, std::move(_current_return_columns[cid]));
2887
0
    }
2888
0
    return Status::OK();
2889
0
}
2890
2891
30
void SegmentIterator::_fill_column_nothing() {
2892
    // If column_predicate filters out all rows, the corresponding column in _current_return_columns[cid] must be a ColumnNothing.
2893
    // Because:
2894
    // 1. Before each batch, _init_return_columns is called to initialize _current_return_columns, and virtual columns in _current_return_columns are initialized as ColumnNothing.
2895
    // 2. When select_size == 0, the read method of VirtualColumnIterator will definitely not be called, so the corresponding Column remains a ColumnNothing
2896
30
    for (const auto pair : _vir_cid_to_idx_in_block) {
2897
0
        auto cid = pair.first;
2898
0
        auto pos = pair.second;
2899
0
        [[maybe_unused]] const auto* nothing_col =
2900
0
                assert_cast<const ColumnNothing*>(_current_return_columns[cid].get());
2901
0
        _current_return_columns[cid] = _opts.vir_col_idx_to_type[pos]->create_column();
2902
0
    }
2903
30
}
2904
2905
10.1k
Status SegmentIterator::_check_output_block(Block* block) {
2906
10.1k
#ifndef NDEBUG
2907
10.1k
    size_t rows = block->rows();
2908
10.1k
    size_t idx = 0;
2909
20.7k
    for (const auto& entry : *block) {
2910
20.7k
        if (!entry.column) {
2911
0
            return Status::InternalError(
2912
0
                    "Column in idx {} is null, block columns {}, normal_columns {}, "
2913
0
                    "virtual_columns {}",
2914
0
                    idx, block->columns(), _schema->num_column_ids(), _virtual_column_exprs.size());
2915
20.7k
        } else if (check_and_get_column<ColumnNothing>(entry.column.get())) {
2916
0
            if (rows > 0) {
2917
0
                std::vector<std::string> vcid_to_idx;
2918
0
                for (const auto& pair : _vir_cid_to_idx_in_block) {
2919
0
                    vcid_to_idx.push_back(fmt::format("{}-{}", pair.first, pair.second));
2920
0
                }
2921
0
                std::string vir_cid_to_idx_in_block_msg =
2922
0
                        fmt::format("_vir_cid_to_idx_in_block:[{}]", fmt::join(vcid_to_idx, ","));
2923
0
                return Status::InternalError(
2924
0
                        "Column in idx {} is nothing, block columns {}, normal_columns {}, "
2925
0
                        "vir_cid_to_idx_in_block_msg {}",
2926
0
                        idx, block->columns(), _schema->num_column_ids(),
2927
0
                        vir_cid_to_idx_in_block_msg);
2928
0
            }
2929
20.7k
        } else if (entry.column->size() != rows) {
2930
0
            return Status::InternalError(
2931
0
                    "Unmatched size {}, expected {}, column: {}, type: {}, idx_in_block: {}, "
2932
0
                    "block: {}",
2933
0
                    entry.column->size(), rows, entry.column->get_name(), entry.type->get_name(),
2934
0
                    idx, block->dump_structure());
2935
0
        }
2936
20.7k
        idx++;
2937
20.7k
    }
2938
10.1k
#endif
2939
10.1k
    return Status::OK();
2940
10.1k
}
2941
2942
0
Status SegmentIterator::_process_column_predicate() {
2943
0
    return Status::OK();
2944
0
}
2945
2946
2.81k
Status SegmentIterator::_process_eof(Block* block) {
2947
    // Convert all columns in _current_return_columns to schema column
2948
2.81k
    RETURN_IF_ERROR(_convert_to_expected_type(_schema->column_ids()));
2949
9.66k
    for (int i = 0; i < block->columns(); i++) {
2950
6.84k
        auto cid = _schema->column_id(i);
2951
6.84k
        if (!_is_pred_column[cid]) {
2952
6.56k
            block->replace_by_position(i, std::move(_current_return_columns[cid]));
2953
6.56k
        }
2954
6.84k
    }
2955
2.81k
    block->clear_column_data();
2956
    // clear and release iterators memory footprint in advance
2957
2.81k
    _column_iterators.clear();
2958
2.81k
    _index_iterators.clear();
2959
2.81k
    return Status::EndOfFile("no more data in segment");
2960
2.81k
}
2961
2962
Status SegmentIterator::_process_common_expr(uint16_t* sel_rowid_idx, uint16_t& selected_size,
2963
0
                                             Block* block) {
2964
    // Here we just use col0 as row_number indicator. when reach here, we will calculate the predicates first.
2965
    //  then use the result to reduce our data read(that is, expr push down). there's now row in block means the first
2966
    //  column is not in common expr. so it's safe to replace it temporarily to provide correct `selected_size`.
2967
0
    VLOG_DEBUG << fmt::format("Execute common expr. block rows {}, selected size {}", block->rows(),
2968
0
                              _selected_size);
2969
2970
0
    bool need_mock_col = block->rows() != selected_size;
2971
0
    MutableColumnPtr col0;
2972
0
    if (need_mock_col) {
2973
0
        col0 = std::move(*block->get_by_position(0).column).mutate();
2974
0
        block->replace_by_position(
2975
0
                0, block->get_by_position(0).type->create_column_const_with_default_value(
2976
0
                           _selected_size));
2977
0
    }
2978
2979
0
    std::vector<VExprContext*> common_ctxs;
2980
0
    common_ctxs.reserve(_common_expr_ctxs_push_down.size());
2981
0
    for (auto& ctx : _common_expr_ctxs_push_down) {
2982
0
        common_ctxs.push_back(ctx.get());
2983
0
    }
2984
0
    _output_index_result_column(common_ctxs, _sel_rowid_idx.data(), _selected_size, block);
2985
0
    block->shrink_char_type_column_suffix_zero(_char_type_idx);
2986
0
    RETURN_IF_ERROR(_execute_common_expr(_sel_rowid_idx.data(), _selected_size, block));
2987
2988
0
    if (need_mock_col) {
2989
0
        block->replace_by_position(0, std::move(col0));
2990
0
    }
2991
2992
0
    VLOG_DEBUG << fmt::format("Execute common expr end. block rows {}, selected size {}",
2993
0
                              block->rows(), _selected_size);
2994
0
    return Status::OK();
2995
0
}
2996
2997
Status SegmentIterator::_execute_common_expr(uint16_t* sel_rowid_idx, uint16_t& selected_size,
2998
0
                                             Block* block) {
2999
0
    SCOPED_RAW_TIMER(&_opts.stats->expr_filter_ns);
3000
0
    DCHECK(!_remaining_conjunct_roots.empty());
3001
0
    DCHECK(block->rows() != 0);
3002
0
    int prev_columns = block->columns();
3003
0
    uint16_t original_size = selected_size;
3004
0
    _opts.stats->expr_cond_input_rows += original_size;
3005
3006
0
    IColumn::Filter filter;
3007
0
    RETURN_IF_ERROR(VExprContext::execute_conjuncts_and_filter_block(
3008
0
            _common_expr_ctxs_push_down, block, _columns_to_filter, prev_columns, filter));
3009
3010
0
    selected_size = _evaluate_common_expr_filter(sel_rowid_idx, selected_size, filter);
3011
0
    _opts.stats->rows_expr_cond_filtered += original_size - selected_size;
3012
0
    return Status::OK();
3013
0
}
3014
3015
uint16_t SegmentIterator::_evaluate_common_expr_filter(uint16_t* sel_rowid_idx,
3016
                                                       uint16_t selected_size,
3017
0
                                                       const IColumn::Filter& filter) {
3018
0
    size_t count = filter.size() - simd::count_zero_num((int8_t*)filter.data(), filter.size());
3019
0
    if (count == 0) {
3020
0
        return 0;
3021
0
    } else {
3022
0
        const UInt8* filt_pos = filter.data();
3023
3024
0
        uint16_t new_size = 0;
3025
0
        uint32_t sel_pos = 0;
3026
0
        const uint32_t sel_end = selected_size;
3027
0
        static constexpr size_t SIMD_BYTES = simd::bits_mask_length();
3028
0
        const uint32_t sel_end_simd = sel_pos + selected_size / SIMD_BYTES * SIMD_BYTES;
3029
3030
0
        while (sel_pos < sel_end_simd) {
3031
0
            auto mask = simd::bytes_mask_to_bits_mask(filt_pos + sel_pos);
3032
0
            if (0 == mask) {
3033
                //pass
3034
0
            } else if (simd::bits_mask_all() == mask) {
3035
0
                for (uint32_t i = 0; i < SIMD_BYTES; i++) {
3036
0
                    sel_rowid_idx[new_size++] = sel_rowid_idx[sel_pos + i];
3037
0
                }
3038
0
            } else {
3039
0
                simd::iterate_through_bits_mask(
3040
0
                        [&](const size_t bit_pos) {
3041
0
                            sel_rowid_idx[new_size++] = sel_rowid_idx[sel_pos + bit_pos];
3042
0
                        },
3043
0
                        mask);
3044
0
            }
3045
0
            sel_pos += SIMD_BYTES;
3046
0
        }
3047
3048
0
        for (; sel_pos < sel_end; sel_pos++) {
3049
0
            if (filt_pos[sel_pos]) {
3050
0
                sel_rowid_idx[new_size++] = sel_rowid_idx[sel_pos];
3051
0
            }
3052
0
        }
3053
0
        return new_size;
3054
0
    }
3055
0
}
3056
3057
void SegmentIterator::_output_index_result_column(const std::vector<VExprContext*>& expr_ctxs,
3058
                                                  uint16_t* sel_rowid_idx, uint16_t select_size,
3059
0
                                                  Block* block) {
3060
0
    SCOPED_RAW_TIMER(&_opts.stats->output_index_result_column_timer);
3061
0
    if (block->rows() == 0) {
3062
0
        return;
3063
0
    }
3064
0
    for (auto* expr_ctx_ptr : expr_ctxs) {
3065
0
        auto index_ctx = expr_ctx_ptr->get_index_context();
3066
0
        if (index_ctx == nullptr) {
3067
0
            continue;
3068
0
        }
3069
0
        for (auto& inverted_index_result_bitmap_for_expr : index_ctx->get_index_result_bitmap()) {
3070
0
            const auto* expr = inverted_index_result_bitmap_for_expr.first;
3071
0
            const auto& result_bitmap = inverted_index_result_bitmap_for_expr.second;
3072
0
            const auto& index_result_bitmap = result_bitmap.get_data_bitmap();
3073
0
            auto index_result_column = ColumnUInt8::create();
3074
0
            ColumnUInt8::Container& vec_match_pred = index_result_column->get_data();
3075
0
            vec_match_pred.resize(block->rows());
3076
0
            std::fill(vec_match_pred.begin(), vec_match_pred.end(), 0);
3077
3078
0
            const auto& null_bitmap = result_bitmap.get_null_bitmap();
3079
0
            bool has_null_bitmap = null_bitmap != nullptr && !null_bitmap->isEmpty();
3080
0
            bool expr_returns_nullable = expr->data_type()->is_nullable();
3081
3082
0
            ColumnUInt8::MutablePtr null_map_column = nullptr;
3083
0
            ColumnUInt8::Container* null_map_data = nullptr;
3084
0
            if (has_null_bitmap && expr_returns_nullable) {
3085
0
                null_map_column = ColumnUInt8::create();
3086
0
                auto& null_map_vec = null_map_column->get_data();
3087
0
                null_map_vec.resize(block->rows());
3088
0
                std::fill(null_map_vec.begin(), null_map_vec.end(), 0);
3089
0
                null_map_data = &null_map_column->get_data();
3090
0
            }
3091
3092
0
            roaring::BulkContext bulk_context;
3093
0
            for (uint32_t i = 0; i < select_size; i++) {
3094
0
                auto rowid = sel_rowid_idx ? _block_rowids[sel_rowid_idx[i]] : _block_rowids[i];
3095
0
                if (index_result_bitmap) {
3096
0
                    vec_match_pred[i] = index_result_bitmap->containsBulk(bulk_context, rowid);
3097
0
                }
3098
0
                if (null_map_data != nullptr && null_bitmap->contains(rowid)) {
3099
0
                    (*null_map_data)[i] = 1;
3100
0
                    vec_match_pred[i] = 0;
3101
0
                }
3102
0
            }
3103
3104
0
            DCHECK(block->rows() == vec_match_pred.size());
3105
3106
0
            if (null_map_column) {
3107
0
                index_ctx->set_index_result_column_for_expr(
3108
0
                        expr, ColumnNullable::create(std::move(index_result_column),
3109
0
                                                     std::move(null_map_column)));
3110
0
            } else {
3111
0
                index_ctx->set_index_result_column_for_expr(expr, std::move(index_result_column));
3112
0
            }
3113
0
        }
3114
0
    }
3115
0
}
3116
3117
1.69k
void SegmentIterator::_convert_dict_code_for_predicate_if_necessary() {
3118
1.69k
    for (auto predicate : _short_cir_eval_predicate) {
3119
0
        _convert_dict_code_for_predicate_if_necessary_impl(predicate);
3120
0
    }
3121
3122
1.69k
    for (auto predicate : _pre_eval_block_predicate) {
3123
0
        _convert_dict_code_for_predicate_if_necessary_impl(predicate);
3124
0
    }
3125
3126
1.69k
    for (auto column_id : _delete_range_column_ids) {
3127
1.55k
        _current_return_columns[column_id].get()->convert_dict_codes_if_necessary();
3128
1.55k
    }
3129
3130
1.69k
    for (auto column_id : _delete_bloom_filter_column_ids) {
3131
0
        _current_return_columns[column_id].get()->initialize_hash_values_for_runtime_filter();
3132
0
    }
3133
1.69k
}
3134
3135
void SegmentIterator::_convert_dict_code_for_predicate_if_necessary_impl(
3136
0
        std::shared_ptr<ColumnPredicate> predicate) {
3137
0
    auto& column = _current_return_columns[predicate->column_id()];
3138
0
    auto* col_ptr = column.get();
3139
3140
0
    if (PredicateTypeTraits::is_range(predicate->type())) {
3141
0
        col_ptr->convert_dict_codes_if_necessary();
3142
0
    } else if (PredicateTypeTraits::is_bloom_filter(predicate->type())) {
3143
0
        col_ptr->initialize_hash_values_for_runtime_filter();
3144
0
    }
3145
0
}
3146
3147
2.93k
Status SegmentIterator::current_block_row_locations(std::vector<RowLocation>* block_row_locations) {
3148
2.93k
    DCHECK(_opts.record_rowids);
3149
2.93k
    DCHECK_GE(_block_rowids.size(), _selected_size);
3150
2.93k
    block_row_locations->resize(_selected_size);
3151
2.93k
    uint32_t sid = segment_id();
3152
2.93k
    if (!_is_need_vec_eval && !_is_need_short_eval && !_is_need_expr_eval) {
3153
4.24M
        for (auto i = 0; i < _selected_size; i++) {
3154
4.23M
            (*block_row_locations)[i] = RowLocation(sid, _block_rowids[i]);
3155
4.23M
        }
3156
1.76k
    } else {
3157
2.46M
        for (auto i = 0; i < _selected_size; i++) {
3158
2.46M
            (*block_row_locations)[i] = RowLocation(sid, _block_rowids[_sel_rowid_idx[i]]);
3159
2.46M
        }
3160
1.16k
    }
3161
2.93k
    return Status::OK();
3162
2.93k
}
3163
3164
2.82k
Status SegmentIterator::_construct_compound_expr_context() {
3165
2.82k
    ColumnIteratorOptions iter_opts {
3166
2.82k
            .use_page_cache = _opts.use_page_cache,
3167
2.82k
            .file_reader = _file_reader.get(),
3168
2.82k
            .stats = _opts.stats,
3169
2.82k
            .io_ctx = _opts.io_ctx,
3170
2.82k
    };
3171
2.82k
    auto inverted_index_context = std::make_shared<IndexExecContext>(
3172
2.82k
            _schema->column_ids(), _index_iterators, _storage_name_and_type,
3173
2.82k
            _common_expr_index_exec_status, _score_runtime, _segment.get(), iter_opts);
3174
2.82k
    inverted_index_context->set_index_query_context(_index_query_context);
3175
2.82k
    for (const auto& expr_ctx : _opts.common_expr_ctxs_push_down) {
3176
0
        VExprContextSPtr context;
3177
        // _ann_range_search_runtime will do deep copy.
3178
0
        RETURN_IF_ERROR(expr_ctx->clone(_opts.runtime_state, context));
3179
0
        context->set_index_context(inverted_index_context);
3180
0
        _common_expr_ctxs_push_down.emplace_back(context);
3181
0
    }
3182
    // Clone virtual column exprs before setting IndexExecContext, because
3183
    // IndexExecContext holds segment-specific index iterator references.
3184
    // Without cloning, shared VExprContext would be overwritten per-segment
3185
    // and could point to the wrong segment's context.
3186
2.82k
    for (auto& [cid, expr_ctx] : _virtual_column_exprs) {
3187
0
        VExprContextSPtr context;
3188
0
        RETURN_IF_ERROR(expr_ctx->clone(_opts.runtime_state, context));
3189
0
        context->set_index_context(inverted_index_context);
3190
0
        expr_ctx = context;
3191
0
    }
3192
2.82k
    return Status::OK();
3193
2.82k
}
3194
3195
2.82k
void SegmentIterator::_calculate_expr_in_remaining_conjunct_root() {
3196
2.82k
    for (const auto& root_expr_ctx : _common_expr_ctxs_push_down) {
3197
0
        const auto& root_expr = root_expr_ctx->root();
3198
0
        if (root_expr == nullptr) {
3199
0
            continue;
3200
0
        }
3201
0
        _common_expr_to_slotref_map[root_expr_ctx.get()] = std::unordered_map<ColumnId, VExpr*>();
3202
3203
0
        std::stack<VExprSPtr> stack;
3204
0
        stack.emplace(root_expr);
3205
3206
0
        while (!stack.empty()) {
3207
0
            const auto& expr = stack.top();
3208
0
            stack.pop();
3209
3210
0
            for (const auto& child : expr->children()) {
3211
0
                if (child->is_virtual_slot_ref()) {
3212
                    // Expand virtual slot ref to its underlying expression tree and
3213
                    // collect real slot refs used inside. We still associate those
3214
                    // slot refs with the current parent expr node for inverted index
3215
                    // tracking, just like normal slot refs.
3216
0
                    auto* vir_slot_ref = assert_cast<VirtualSlotRef*>(child.get());
3217
0
                    auto vir_expr = vir_slot_ref->get_virtual_column_expr();
3218
0
                    if (vir_expr) {
3219
0
                        std::stack<VExprSPtr> vir_stack;
3220
0
                        vir_stack.emplace(vir_expr);
3221
3222
0
                        while (!vir_stack.empty()) {
3223
0
                            const auto& vir_node = vir_stack.top();
3224
0
                            vir_stack.pop();
3225
3226
0
                            for (const auto& vir_child : vir_node->children()) {
3227
0
                                if (vir_child->is_slot_ref()) {
3228
0
                                    auto* inner_slot_ref = assert_cast<VSlotRef*>(vir_child.get());
3229
0
                                    _common_expr_index_exec_status[_schema->column_id(
3230
0
                                            inner_slot_ref->column_id())][expr.get()] = false;
3231
0
                                    _common_expr_to_slotref_map[root_expr_ctx.get()]
3232
0
                                                               [inner_slot_ref->column_id()] =
3233
0
                                                                       expr.get();
3234
0
                                }
3235
3236
0
                                if (!vir_child->children().empty()) {
3237
0
                                    vir_stack.emplace(vir_child);
3238
0
                                }
3239
0
                            }
3240
0
                        }
3241
0
                    }
3242
0
                }
3243
                // Example: CAST(v['a'] AS VARCHAR) MATCH 'hello', do not add CAST expr to index tracking.
3244
0
                auto expr_without_cast = VExpr::expr_without_cast(child);
3245
0
                if (expr_without_cast->is_slot_ref() && expr->op() != TExprOpcode::CAST) {
3246
0
                    auto* column_slot_ref = assert_cast<VSlotRef*>(expr_without_cast.get());
3247
0
                    _common_expr_index_exec_status[_schema->column_id(column_slot_ref->column_id())]
3248
0
                                                  [expr.get()] = false;
3249
0
                    _common_expr_to_slotref_map[root_expr_ctx.get()][column_slot_ref->column_id()] =
3250
0
                            expr.get();
3251
0
                }
3252
0
            }
3253
3254
0
            const auto& children = expr->children();
3255
0
            for (int i = cast_set<int>(children.size()) - 1; i >= 0; --i) {
3256
0
                if (!children[i]->children().empty()) {
3257
0
                    stack.emplace(children[i]);
3258
0
                }
3259
0
            }
3260
0
        }
3261
0
    }
3262
2.82k
}
3263
3264
bool SegmentIterator::_no_need_read_key_data(ColumnId cid, MutableColumnPtr& column,
3265
27.1k
                                             size_t nrows_read) {
3266
27.1k
    if (_opts.runtime_state && !_opts.runtime_state->query_options().enable_no_need_read_data_opt) {
3267
0
        return false;
3268
0
    }
3269
3270
27.1k
    if (!((_opts.tablet_schema->keys_type() == KeysType::DUP_KEYS ||
3271
27.1k
           (_opts.tablet_schema->keys_type() == KeysType::UNIQUE_KEYS &&
3272
11.1k
            _opts.enable_unique_key_merge_on_write)))) {
3273
7.58k
        return false;
3274
7.58k
    }
3275
3276
19.5k
    if (_opts.push_down_agg_type_opt != TPushAggOp::COUNT_ON_INDEX) {
3277
19.5k
        return false;
3278
19.5k
    }
3279
3280
0
    if (!_opts.tablet_schema->column(cid).is_key()) {
3281
0
        return false;
3282
0
    }
3283
3284
0
    if (_has_delete_predicate(cid)) {
3285
0
        return false;
3286
0
    }
3287
3288
0
    if (!_check_all_conditions_passed_inverted_index_for_column(cid)) {
3289
0
        return false;
3290
0
    }
3291
3292
0
    if (column->is_nullable()) {
3293
0
        auto* nullable_col_ptr = reinterpret_cast<ColumnNullable*>(column.get());
3294
0
        nullable_col_ptr->get_null_map_column().insert_many_defaults(nrows_read);
3295
0
        nullable_col_ptr->get_nested_column_ptr()->insert_many_defaults(nrows_read);
3296
0
    } else {
3297
0
        column->insert_many_defaults(nrows_read);
3298
0
    }
3299
3300
0
    return true;
3301
0
}
3302
3303
19.5k
bool SegmentIterator::_has_delete_predicate(ColumnId cid) {
3304
19.5k
    std::set<uint32_t> delete_columns_set;
3305
19.5k
    _opts.delete_condition_predicates->get_all_column_ids(delete_columns_set);
3306
19.5k
    return delete_columns_set.contains(cid);
3307
19.5k
}
3308
3309
12.9k
bool SegmentIterator::_can_opt_limit_reads() {
3310
12.9k
    if (_opts.read_limit == 0) {
3311
12.9k
        return false;
3312
12.9k
    }
3313
3314
    // If SegmentIterator still needs to evaluate predicates/common exprs, LIMIT must be applied to
3315
    // post-filter rows by _apply_read_limit_to_selected_rows(); capping the raw read here could
3316
    // return fewer rows than the query LIMIT.
3317
7
    if (_is_need_vec_eval || _is_need_short_eval || _is_need_expr_eval) {
3318
3
        return false;
3319
3
    }
3320
3321
4
    if (_opts.delete_condition_predicates->num_of_column_predicate() > 0) {
3322
1
        return false;
3323
1
    }
3324
3325
3
    bool all_true = std::ranges::all_of(_schema->column_ids(), [this](auto cid) {
3326
3
        if (cid == _opts.tablet_schema->delete_sign_idx()) {
3327
0
            return true;
3328
0
        }
3329
3
        if (_check_all_conditions_passed_inverted_index_for_column(cid, true)) {
3330
2
            return true;
3331
2
        }
3332
1
        return false;
3333
3
    });
3334
3335
3
    DBUG_EXECUTE_IF("segment_iterator.topn_opt_1", {
3336
3
        LOG(INFO) << "col_predicates: " << _col_predicates.size() << ", all_true: " << all_true;
3337
3
    })
3338
3339
3
    DBUG_EXECUTE_IF("segment_iterator.topn_opt_2", {
3340
3
        if (all_true) {
3341
3
            return Status::Error<ErrorCode::INTERNAL_ERROR>("topn opt 2 execute failed");
3342
3
        }
3343
3
    })
3344
3345
3
    return all_true;
3346
3
}
3347
3348
// Before get next batch. make sure all virtual columns in block has type ColumnNothing.
3349
12.9k
void SegmentIterator::_init_virtual_columns(Block* block) {
3350
12.9k
    for (const auto& pair : _vir_cid_to_idx_in_block) {
3351
0
        auto& col_with_type_and_name = block->get_by_position(pair.second);
3352
0
        col_with_type_and_name.column = ColumnNothing::create(0);
3353
0
        col_with_type_and_name.type = _opts.vir_col_idx_to_type[pair.second];
3354
0
    }
3355
12.9k
}
3356
3357
10.1k
Status SegmentIterator::_materialization_of_virtual_column(Block* block) {
3358
    // Some expr can not process empty block, such as function `element_at`.
3359
    // So materialize virtual column in advance to avoid errors.
3360
10.1k
    if (block->rows() == 0) {
3361
30
        for (const auto& pair : _vir_cid_to_idx_in_block) {
3362
0
            auto& col_with_type_and_name = block->get_by_position(pair.second);
3363
0
            col_with_type_and_name.column = _opts.vir_col_idx_to_type[pair.second]->create_column();
3364
0
            col_with_type_and_name.type = _opts.vir_col_idx_to_type[pair.second];
3365
0
        }
3366
30
        return Status::OK();
3367
30
    }
3368
3369
10.0k
    for (const auto& cid_and_expr : _virtual_column_exprs) {
3370
0
        auto cid = cid_and_expr.first;
3371
0
        auto column_expr = cid_and_expr.second;
3372
0
        size_t idx_in_block = _vir_cid_to_idx_in_block[cid];
3373
0
        if (block->columns() <= idx_in_block) {
3374
0
            return Status::InternalError(
3375
0
                    "Virtual column index {} is out of range, block columns {}, "
3376
0
                    "virtual columns size {}, virtual column expr {}",
3377
0
                    idx_in_block, block->columns(), _vir_cid_to_idx_in_block.size(),
3378
0
                    column_expr->root()->debug_string());
3379
0
        } else if (block->get_by_position(idx_in_block).column.get() == nullptr) {
3380
0
            return Status::InternalError(
3381
0
                    "Virtual column index {} is null, block columns {}, virtual columns size {}, "
3382
0
                    "virtual column expr {}",
3383
0
                    idx_in_block, block->columns(), _vir_cid_to_idx_in_block.size(),
3384
0
                    column_expr->root()->debug_string());
3385
0
        }
3386
0
        block->shrink_char_type_column_suffix_zero(_char_type_idx);
3387
0
        if (check_and_get_column<const ColumnNothing>(
3388
0
                    block->get_by_position(idx_in_block).column.get())) {
3389
0
            VLOG_DEBUG << fmt::format("Virtual column is doing materialization, cid {}, col idx {}",
3390
0
                                      cid, idx_in_block);
3391
0
            ColumnPtr result_column;
3392
0
            RETURN_IF_ERROR(column_expr->execute(block, result_column));
3393
3394
0
            block->replace_by_position(idx_in_block, std::move(result_column));
3395
0
            if (block->get_by_position(idx_in_block).column->size() == 0) {
3396
0
                LOG_WARNING("Result of expr column {} is empty. cid {}, idx_in_block {}",
3397
0
                            column_expr->root()->debug_string(), cid, idx_in_block);
3398
0
            }
3399
0
        }
3400
0
    }
3401
10.0k
    return Status::OK();
3402
10.0k
}
3403
3404
2.82k
void SegmentIterator::_prepare_score_column_materialization() {
3405
2.82k
    if (_score_runtime == nullptr) {
3406
2.82k
        return;
3407
2.82k
    }
3408
3409
0
    ScoreRangeFilterPtr filter;
3410
0
    if (_score_runtime->has_score_range_filter()) {
3411
0
        const auto& range_info = _score_runtime->get_score_range_info();
3412
0
        filter = std::make_shared<ScoreRangeFilter>(range_info->op, range_info->threshold);
3413
0
    }
3414
3415
0
    IColumn::MutablePtr result_column;
3416
0
    auto result_row_ids = std::make_unique<std::vector<uint64_t>>();
3417
0
    if (_score_runtime->get_limit() > 0 && _col_predicates.empty() &&
3418
0
        _common_expr_ctxs_push_down.empty()) {
3419
0
        OrderType order_type = _score_runtime->is_asc() ? OrderType::ASC : OrderType::DESC;
3420
0
        _index_query_context->collection_similarity->get_topn_bm25_scores(
3421
0
                &_row_bitmap, result_column, result_row_ids, order_type,
3422
0
                _score_runtime->get_limit(), filter);
3423
0
    } else {
3424
0
        _index_query_context->collection_similarity->get_bm25_scores(&_row_bitmap, result_column,
3425
0
                                                                     result_row_ids, filter);
3426
0
    }
3427
0
    const size_t dst_col_idx = _score_runtime->get_dest_column_idx();
3428
0
    auto* column_iter = _column_iterators[_schema->column_id(dst_col_idx)].get();
3429
0
    auto* virtual_column_iter = dynamic_cast<VirtualColumnIterator*>(column_iter);
3430
0
    virtual_column_iter->prepare_materialization(
3431
0
            std::move(result_column),
3432
0
            std::shared_ptr<std::vector<uint64_t>>(std::move(result_row_ids)));
3433
0
}
3434
3435
} // namespace segment_v2
3436
} // namespace doris