Coverage Report

Created: 2026-04-10 04:05

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/storage/rowset/beta_rowset_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "storage/rowset/beta_rowset_reader.h"
19
20
#include <stddef.h>
21
22
#include <algorithm>
23
#include <memory>
24
#include <ostream>
25
#include <roaring/roaring.hh>
26
#include <set>
27
#include <string>
28
#include <unordered_map>
29
#include <utility>
30
31
#include "common/logging.h"
32
#include "common/status.h"
33
#include "core/block/block.h"
34
#include "io/io_common.h"
35
#include "runtime/descriptors.h"
36
#include "runtime/runtime_profile.h"
37
#include "storage/cache/schema_cache.h"
38
#include "storage/delete/delete_handler.h"
39
#include "storage/iterator/vgeneric_iterators.h"
40
#include "storage/olap_define.h"
41
#include "storage/predicate/block_column_predicate.h"
42
#include "storage/predicate/column_predicate.h"
43
#include "storage/row_cursor.h"
44
#include "storage/rowset/rowset_meta.h"
45
#include "storage/rowset/rowset_reader_context.h"
46
#include "storage/schema.h"
47
#include "storage/segment/lazy_init_segment_iterator.h"
48
#include "storage/segment/segment.h"
49
#include "storage/tablet/tablet_meta.h"
50
#include "storage/tablet/tablet_schema.h"
51
52
namespace doris {
53
using namespace ErrorCode;
54
55
BetaRowsetReader::BetaRowsetReader(BetaRowsetSharedPtr rowset)
56
910
        : _read_context(nullptr), _rowset(std::move(rowset)), _stats(&_owned_stats) {
57
910
    _rowset->acquire();
58
910
}
59
60
941
void BetaRowsetReader::reset_read_options() {
61
941
    _read_options.delete_condition_predicates = AndBlockColumnPredicate::create_shared();
62
941
    _read_options.column_predicates.clear();
63
941
    _read_options.col_id_to_predicates.clear();
64
941
    _read_options.del_predicates_for_zone_map.clear();
65
941
    _read_options.key_ranges.clear();
66
941
}
67
68
0
RowsetReaderSharedPtr BetaRowsetReader::clone() {
69
0
    return RowsetReaderSharedPtr(new BetaRowsetReader(_rowset));
70
0
}
71
72
0
void BetaRowsetReader::update_profile(RuntimeProfile* profile) {
73
0
    if (_iterator != nullptr) {
74
0
        _iterator->update_profile(profile);
75
0
    }
76
0
}
77
78
Status BetaRowsetReader::get_segment_iterators(RowsetReaderContext* read_context,
79
                                               std::vector<RowwiseIteratorUPtr>* out_iters,
80
1.18k
                                               bool use_cache) {
81
1.18k
    _read_context = read_context;
82
    // The segment iterator is created with its own statistics,
83
    // and the member variable '_stats'  is initialized by '_stats(&owned_stats)'.
84
    // The choice of statistics used depends on the workload of the rowset reader.
85
    // For instance, if it's for query, the get_segment_iterators function
86
    // will receive one valid read_context with corresponding valid statistics,
87
    // and we will use those statistics.
88
    // However, for compaction or schema change workloads,
89
    // the read_context passed to the function will have null statistics,
90
    // and in such cases we will try to use the beta rowset reader's own statistics.
91
1.18k
    if (_read_context->stats != nullptr) {
92
1.09k
        _stats = _read_context->stats;
93
1.09k
    }
94
1.18k
    SCOPED_RAW_TIMER(&_stats->rowset_reader_get_segment_iterators_timer_ns);
95
96
1.18k
    RETURN_IF_ERROR(_rowset->load());
97
98
    // convert RowsetReaderContext to StorageReadOptions
99
1.18k
    _read_options.block_row_max = read_context->batch_size;
100
1.18k
    _read_options.stats = _stats;
101
1.18k
    _read_options.push_down_agg_type_opt = _read_context->push_down_agg_type_opt;
102
1.18k
    _read_options.remaining_conjunct_roots = _read_context->remaining_conjunct_roots;
103
1.18k
    _read_options.common_expr_ctxs_push_down = _read_context->common_expr_ctxs_push_down;
104
1.18k
    _read_options.virtual_column_exprs = _read_context->virtual_column_exprs;
105
106
1.18k
    _read_options.all_access_paths = _read_context->all_access_paths;
107
1.18k
    _read_options.predicate_access_paths = _read_context->predicate_access_paths;
108
109
1.18k
    _read_options.ann_topn_runtime = _read_context->ann_topn_runtime;
110
1.18k
    _read_options.vir_cid_to_idx_in_block = _read_context->vir_cid_to_idx_in_block;
111
1.18k
    _read_options.vir_col_idx_to_type = _read_context->vir_col_idx_to_type;
112
1.18k
    _read_options.score_runtime = _read_context->score_runtime;
113
1.18k
    _read_options.collection_statistics = _read_context->collection_statistics;
114
1.18k
    _read_options.rowset_id = _rowset->rowset_id();
115
1.18k
    _read_options.version = _rowset->version();
116
1.18k
    _read_options.tablet_id = _rowset->rowset_meta()->tablet_id();
117
1.18k
    _read_options.topn_limit = _topn_limit;
118
1.18k
    if (_read_context->lower_bound_keys != nullptr) {
119
1.08k
        for (int i = 0; i < _read_context->lower_bound_keys->size(); ++i) {
120
0
            _read_options.key_ranges.emplace_back(&_read_context->lower_bound_keys->at(i),
121
0
                                                  _read_context->is_lower_keys_included->at(i),
122
0
                                                  &_read_context->upper_bound_keys->at(i),
123
0
                                                  _read_context->is_upper_keys_included->at(i));
124
0
        }
125
1.08k
    }
126
127
    // delete_hanlder is always set, but it maybe not init, so that it will return empty conditions
128
    // or predicates when it is not inited.
129
1.18k
    if (_read_context->delete_handler != nullptr) {
130
1.08k
        _read_context->delete_handler->get_delete_conditions_after_version(
131
1.08k
                _rowset->end_version(), _read_options.delete_condition_predicates.get(),
132
1.08k
                &_read_options.del_predicates_for_zone_map);
133
1.08k
    }
134
135
1.18k
    std::vector<uint32_t> read_columns;
136
1.18k
    std::set<uint32_t> read_columns_set;
137
1.18k
    std::set<uint32_t> delete_columns_set;
138
4.90k
    for (int i = 0; i < _read_context->return_columns->size(); ++i) {
139
3.71k
        read_columns.push_back(_read_context->return_columns->at(i));
140
3.71k
        read_columns_set.insert(_read_context->return_columns->at(i));
141
3.71k
    }
142
1.18k
    _read_options.delete_condition_predicates->get_all_column_ids(delete_columns_set);
143
1.18k
    for (auto cid : delete_columns_set) {
144
406
        if (read_columns_set.find(cid) == read_columns_set.end()) {
145
254
            read_columns.push_back(cid);
146
254
        }
147
406
    }
148
    // disable condition cache if you have delete condition
149
1.18k
    _read_context->condition_cache_digest =
150
1.18k
            delete_columns_set.empty() ? _read_context->condition_cache_digest : 0;
151
    // create segment iterators
152
1.18k
    VLOG_NOTICE << "read columns size: " << read_columns.size();
153
1.18k
    _input_schema = std::make_shared<Schema>(_read_context->tablet_schema->columns(), read_columns);
154
    // output_schema only contains return_columns (excludes extra columns like delete-predicate columns).
155
    // It is used by merge/union iterators to determine how many columns to copy to the output block.
156
1.18k
    _output_schema = std::make_shared<Schema>(_read_context->tablet_schema->columns(),
157
1.18k
                                              *(_read_context->return_columns));
158
1.18k
    if (_read_context->predicates != nullptr) {
159
1.08k
        _read_options.column_predicates.insert(_read_options.column_predicates.end(),
160
1.08k
                                               _read_context->predicates->begin(),
161
1.08k
                                               _read_context->predicates->end());
162
1.08k
        for (auto pred : *(_read_context->predicates)) {
163
0
            if (_read_options.col_id_to_predicates.count(pred->column_id()) < 1) {
164
0
                _read_options.col_id_to_predicates.insert(
165
0
                        {pred->column_id(), AndBlockColumnPredicate::create_shared()});
166
0
            }
167
0
            _read_options.col_id_to_predicates[pred->column_id()]->add_column_predicate(
168
0
                    SingleColumnBlockPredicate::create_unique(pred));
169
0
        }
170
1.08k
    }
171
172
    // Take a delete-bitmap for each segment, the bitmap contains all deletes
173
    // until the max read version, which is read_context->version.second
174
1.18k
    if (_read_context->delete_bitmap != nullptr) {
175
5
        {
176
5
            SCOPED_RAW_TIMER(&_stats->delete_bitmap_get_agg_ns);
177
5
            RowsetId rowset_id = rowset()->rowset_id();
178
39
            for (uint32_t seg_id = 0; seg_id < rowset()->num_segments(); ++seg_id) {
179
34
                auto d = _read_context->delete_bitmap->get_agg(
180
34
                        {rowset_id, seg_id, _read_context->version.second});
181
34
                if (d->isEmpty()) {
182
11
                    continue; // Empty delete bitmap for the segment
183
11
                }
184
23
                VLOG_TRACE << "Get the delete bitmap for rowset: " << rowset_id.to_string()
185
0
                           << ", segment id:" << seg_id << ", size:" << d->cardinality();
186
23
                _read_options.delete_bitmap.emplace(seg_id, std::move(d));
187
23
            }
188
5
        }
189
5
    }
190
191
1.18k
    if (_should_push_down_value_predicates()) {
192
        // sequence mapping currently only support merge on read, so can not push down value predicates
193
603
        if (_read_context->value_predicates != nullptr &&
194
603
            !read_context->tablet_schema->has_seq_map()) {
195
538
            _read_options.column_predicates.insert(_read_options.column_predicates.end(),
196
538
                                                   _read_context->value_predicates->begin(),
197
538
                                                   _read_context->value_predicates->end());
198
538
            for (auto pred : *(_read_context->value_predicates)) {
199
0
                if (_read_options.col_id_to_predicates.count(pred->column_id()) < 1) {
200
0
                    _read_options.col_id_to_predicates.insert(
201
0
                            {pred->column_id(), AndBlockColumnPredicate::create_shared()});
202
0
                }
203
0
                _read_options.col_id_to_predicates[pred->column_id()]->add_column_predicate(
204
0
                        SingleColumnBlockPredicate::create_unique(pred));
205
0
            }
206
538
        }
207
603
    }
208
1.18k
    _read_options.use_page_cache = _read_context->use_page_cache;
209
1.18k
    _read_options.tablet_schema = _read_context->tablet_schema;
210
1.18k
    _read_options.enable_unique_key_merge_on_write =
211
1.18k
            _read_context->enable_unique_key_merge_on_write;
212
1.18k
    _read_options.record_rowids = _read_context->record_rowids;
213
1.18k
    _read_options.topn_filter_source_node_ids = _read_context->topn_filter_source_node_ids;
214
1.18k
    _read_options.topn_filter_target_node_id = _read_context->topn_filter_target_node_id;
215
1.18k
    _read_options.read_orderby_key_reverse = _read_context->read_orderby_key_reverse;
216
1.18k
    _read_options.read_orderby_key_columns = _read_context->read_orderby_key_columns;
217
1.18k
    _read_options.io_ctx.reader_type = _read_context->reader_type;
218
1.18k
    _read_options.io_ctx.file_cache_stats = &_stats->file_cache_stats;
219
1.18k
    _read_options.runtime_state = _read_context->runtime_state;
220
1.18k
    _read_options.output_columns = _read_context->output_columns;
221
1.18k
    _read_options.io_ctx.reader_type = _read_context->reader_type;
222
1.18k
    _read_options.io_ctx.is_disposable = _read_context->reader_type != ReaderType::READER_QUERY;
223
1.18k
    _read_options.target_cast_type_for_variants = _read_context->target_cast_type_for_variants;
224
1.18k
    if (_read_context->runtime_state != nullptr) {
225
0
        _read_options.io_ctx.query_id = &_read_context->runtime_state->query_id();
226
0
        _read_options.io_ctx.read_file_cache =
227
0
                _read_context->runtime_state->query_options().enable_file_cache;
228
0
        _read_options.io_ctx.is_disposable =
229
0
                _read_context->runtime_state->query_options().disable_file_cache;
230
0
    }
231
232
1.18k
    if (_read_context->condition_cache_digest) {
233
0
        for (const auto& key_range : _read_options.key_ranges) {
234
0
            _read_context->condition_cache_digest =
235
0
                    key_range.get_digest(_read_context->condition_cache_digest);
236
0
        }
237
0
        _read_options.condition_cache_digest = _read_context->condition_cache_digest;
238
0
    }
239
240
1.18k
    _read_options.io_ctx.expiration_time = read_context->ttl_seconds;
241
242
1.18k
    bool enable_segment_cache = true;
243
1.18k
    auto* state = read_context->runtime_state;
244
1.18k
    if (state != nullptr) {
245
0
        enable_segment_cache = state->query_options().__isset.enable_segment_cache
246
0
                                       ? state->query_options().enable_segment_cache
247
0
                                       : true;
248
0
    }
249
    // When reader type is for query, session variable `enable_segment_cache` should be respected.
250
1.18k
    bool should_use_cache = use_cache || (_read_context->reader_type == ReaderType::READER_QUERY &&
251
1.18k
                                          enable_segment_cache);
252
253
1.18k
    auto segment_count = _rowset->num_segments();
254
1.18k
    auto [seg_start, seg_end] = _segment_offsets;
255
    // If seg_start == seg_end, it means that the segments of a rowset is not
256
    // split scanned by multiple scanners, and the rowset reader is used to read the whole rowset.
257
1.18k
    if (seg_start == seg_end) {
258
1.18k
        seg_start = 0;
259
1.18k
        seg_end = segment_count;
260
1.18k
    }
261
1.18k
    if (_read_context->record_rowids && _read_context->rowid_conversion) {
262
        // init segment rowid map for rowid conversion
263
396
        std::vector<uint32_t> segment_rows;
264
396
        RETURN_IF_ERROR(_rowset->get_segment_num_rows(&segment_rows, should_use_cache, _stats));
265
396
        RETURN_IF_ERROR(_read_context->rowid_conversion->init_segment_map(rowset()->rowset_id(),
266
396
                                                                          segment_rows));
267
396
    }
268
269
6.79k
    for (int64_t i = seg_start; i < seg_end; i++) {
270
5.60k
        SCOPED_RAW_TIMER(&_stats->rowset_reader_create_iterators_timer_ns);
271
5.60k
        std::unique_ptr<RowwiseIterator> iter;
272
273
        /// For iterators, we don't need to initialize them all at once when creating them.
274
        /// Instead, we should initialize each iterator separately when really using them.
275
        /// This optimization minimizes the lifecycle of resources like column readers
276
        /// and prevents excessive memory consumption, especially for wide tables.
277
5.60k
        if (_segment_row_ranges.empty()) {
278
5.60k
            _read_options.row_ranges.clear();
279
5.60k
            iter = std::make_unique<LazyInitSegmentIterator>(_rowset, i, should_use_cache,
280
5.60k
                                                             _input_schema, _read_options);
281
5.60k
        } else {
282
0
            DCHECK_EQ(seg_end - seg_start, _segment_row_ranges.size());
283
0
            auto local_options = _read_options;
284
0
            local_options.row_ranges = _segment_row_ranges[i - seg_start];
285
0
            if (local_options.condition_cache_digest) {
286
0
                local_options.condition_cache_digest =
287
0
                        local_options.row_ranges.get_digest(local_options.condition_cache_digest);
288
0
            }
289
0
            iter = std::make_unique<LazyInitSegmentIterator>(_rowset, i, should_use_cache,
290
0
                                                             _input_schema, local_options);
291
0
        }
292
293
5.60k
        if (iter->empty()) {
294
0
            continue;
295
0
        }
296
5.60k
        out_iters->push_back(std::move(iter));
297
5.60k
    }
298
299
1.18k
    return Status::OK();
300
1.18k
}
301
302
247
Status BetaRowsetReader::init(RowsetReaderContext* read_context, const RowSetSplits& rs_splits) {
303
247
    _read_context = read_context;
304
247
    _read_context->rowset_id = _rowset->rowset_id();
305
247
    _segment_offsets = rs_splits.segment_offsets;
306
247
    _segment_row_ranges = rs_splits.segment_row_ranges;
307
247
    return Status::OK();
308
247
}
309
310
6.74k
Status BetaRowsetReader::_init_iterator_once() {
311
6.74k
    return _init_iter_once.call([this] { return _init_iterator(); });
312
6.74k
}
313
314
247
Status BetaRowsetReader::_init_iterator() {
315
247
    std::vector<RowwiseIteratorUPtr> iterators;
316
247
    RETURN_IF_ERROR(get_segment_iterators(_read_context, &iterators));
317
318
247
    SCOPED_RAW_TIMER(&_stats->rowset_reader_init_iterators_timer_ns);
319
320
247
    if (_read_context->merged_rows == nullptr) {
321
103
        _read_context->merged_rows = &_merged_rows;
322
103
    }
323
    // merge or union segment iterator
324
247
    if (is_merge_iterator()) {
325
8
        auto sequence_loc = -1;
326
8
        if (_read_context->sequence_id_idx != -1) {
327
0
            for (int loc = 0; loc < _read_context->return_columns->size(); loc++) {
328
0
                if (_read_context->return_columns->at(loc) == _read_context->sequence_id_idx) {
329
0
                    sequence_loc = loc;
330
0
                    break;
331
0
                }
332
0
            }
333
0
        }
334
8
        _iterator = new_merge_iterator(std::move(iterators), sequence_loc, _read_context->is_unique,
335
8
                                       _read_context->read_orderby_key_reverse,
336
8
                                       _read_context->merged_rows, _output_schema);
337
239
    } else {
338
239
        if (_read_context->read_orderby_key_reverse) {
339
            // reverse iterators to read backward for ORDER BY key DESC
340
0
            std::reverse(iterators.begin(), iterators.end());
341
0
        }
342
239
        _iterator = new_union_iterator(std::move(iterators), _output_schema);
343
239
    }
344
345
247
    auto s = _iterator->init(_read_options);
346
247
    if (!s.ok()) {
347
0
        LOG(WARNING) << "failed to init iterator: " << s.to_string();
348
0
        _iterator.reset();
349
0
        return Status::Error<ROWSET_READER_INIT>(s.to_string());
350
0
    }
351
247
    return Status::OK();
352
247
}
353
354
1.18k
bool BetaRowsetReader::_should_push_down_value_predicates() const {
355
    // if unique table with rowset [0-x] or [0-1] [2-y] [...],
356
    // value column predicates can be pushdown on rowset [0-x] or [2-y], [2-y]
357
    // must be compaction, not overlapping and don't have sequence column
358
1.18k
    return _rowset->keys_type() == UNIQUE_KEYS &&
359
1.18k
           (((_rowset->start_version() == 0 || _rowset->start_version() == 2) &&
360
673
             !_rowset->_rowset_meta->is_segments_overlapping() &&
361
673
             _read_context->sequence_id_idx == -1) ||
362
673
            _read_context->enable_unique_key_merge_on_write ||
363
673
            _read_context->enable_mor_value_predicate_pushdown);
364
1.18k
}
365
} // namespace doris