be/src/storage/segment/column_reader.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "storage/segment/column_reader.h" |
19 | | |
20 | | #include <assert.h> |
21 | | #include <gen_cpp/Descriptors_types.h> |
22 | | #include <gen_cpp/segment_v2.pb.h> |
23 | | #include <glog/logging.h> |
24 | | |
25 | | #include <algorithm> |
26 | | #include <memory> |
27 | | #include <ostream> |
28 | | #include <set> |
29 | | #include <utility> |
30 | | |
31 | | #include "common/compiler_util.h" // IWYU pragma: keep |
32 | | #include "common/status.h" |
33 | | #include "core/assert_cast.h" |
34 | | #include "core/binary_cast.hpp" |
35 | | #include "core/column/column.h" |
36 | | #include "core/column/column_array.h" |
37 | | #include "core/column/column_map.h" |
38 | | #include "core/column/column_nullable.h" |
39 | | #include "core/column/column_struct.h" |
40 | | #include "core/column/column_vector.h" |
41 | | #include "core/data_type/data_type_agg_state.h" |
42 | | #include "core/data_type/data_type_factory.hpp" |
43 | | #include "core/data_type/data_type_nullable.h" |
44 | | #include "core/data_type/define_primitive_type.h" |
45 | | #include "core/decimal12.h" |
46 | | #include "core/string_ref.h" |
47 | | #include "core/types.h" |
48 | | #include "core/value/decimalv2_value.h" |
49 | | #include "core/value/vdatetime_value.h" //for VecDateTime |
50 | | #include "io/fs/file_reader.h" |
51 | | #include "storage/index/ann/ann_index_reader.h" |
52 | | #include "storage/index/bloom_filter/bloom_filter.h" |
53 | | #include "storage/index/bloom_filter/bloom_filter_index_reader.h" |
54 | | #include "storage/index/index_file_reader.h" |
55 | | #include "storage/index/index_reader.h" |
56 | | #include "storage/index/inverted/analyzer/analyzer.h" |
57 | | #include "storage/index/inverted/inverted_index_reader.h" |
58 | | #include "storage/index/zone_map/zone_map_index.h" |
59 | | #include "storage/iterators.h" |
60 | | #include "storage/olap_common.h" |
61 | | #include "storage/predicate/block_column_predicate.h" |
62 | | #include "storage/predicate/column_predicate.h" |
63 | | #include "storage/segment/binary_dict_page.h" // for BinaryDictPageDecoder |
64 | | #include "storage/segment/binary_plain_page.h" |
65 | | #include "storage/segment/column_meta_accessor.h" |
66 | | #include "storage/segment/encoding_info.h" // for EncodingInfo |
67 | | #include "storage/segment/page_decoder.h" |
68 | | #include "storage/segment/page_handle.h" // for PageHandle |
69 | | #include "storage/segment/page_io.h" |
70 | | #include "storage/segment/page_pointer.h" // for PagePointer |
71 | | #include "storage/segment/row_ranges.h" |
72 | | #include "storage/segment/segment.h" |
73 | | #include "storage/segment/segment_file_access_range_builder.h" |
74 | | #include "storage/segment/variant/variant_column_reader.h" |
75 | | #include "storage/tablet/tablet_schema.h" |
76 | | #include "storage/types.h" // for TypeInfo |
77 | | #include "util/bitmap.h" |
78 | | #include "util/block_compression.h" |
79 | | #include "util/concurrency_stats.h" |
80 | | #include "util/rle_encoding.h" // for RleDecoder |
81 | | #include "util/slice.h" |
82 | | |
83 | | namespace doris::segment_v2 { |
84 | | #include "storage/segment/column_reader.h" |
85 | | |
86 | 32 | inline bool read_as_string(PrimitiveType type) { |
87 | 32 | return type == PrimitiveType::TYPE_STRING || type == PrimitiveType::INVALID_TYPE || |
88 | 32 | type == PrimitiveType::TYPE_BITMAP || type == PrimitiveType::TYPE_FIXED_LENGTH_OBJECT; |
89 | 32 | } |
90 | | |
91 | | Status ColumnReader::create_array(const ColumnReaderOptions& opts, const ColumnMetaPB& meta, |
92 | | const io::FileReaderSPtr& file_reader, |
93 | 2.26k | std::shared_ptr<ColumnReader>* reader) { |
94 | 2.26k | DCHECK(meta.children_columns_size() == 2 || meta.children_columns_size() == 3); |
95 | | |
96 | 2.26k | std::shared_ptr<ColumnReader> item_reader; |
97 | 2.26k | RETURN_IF_ERROR(ColumnReader::create(opts, meta.children_columns(0), |
98 | 2.26k | meta.children_columns(0).num_rows(), file_reader, |
99 | 2.26k | &item_reader)); |
100 | | |
101 | 2.26k | std::shared_ptr<ColumnReader> offset_reader; |
102 | 2.26k | RETURN_IF_ERROR(ColumnReader::create(opts, meta.children_columns(1), |
103 | 2.26k | meta.children_columns(1).num_rows(), file_reader, |
104 | 2.26k | &offset_reader)); |
105 | | |
106 | 2.26k | std::shared_ptr<ColumnReader> null_reader; |
107 | 2.26k | if (meta.is_nullable()) { |
108 | 2.04k | RETURN_IF_ERROR(ColumnReader::create(opts, meta.children_columns(2), |
109 | 2.04k | meta.children_columns(2).num_rows(), file_reader, |
110 | 2.04k | &null_reader)); |
111 | 2.04k | } |
112 | | |
113 | | // The num rows of the array reader equals to the num rows of the length reader. |
114 | 2.26k | uint64_t array_num_rows = meta.children_columns(1).num_rows(); |
115 | 2.26k | std::shared_ptr<ColumnReader> array_reader( |
116 | 2.26k | new ColumnReader(opts, meta, array_num_rows, file_reader)); |
117 | | // array reader do not need to init |
118 | 2.26k | array_reader->_sub_readers.resize(meta.children_columns_size()); |
119 | 2.26k | array_reader->_sub_readers[0] = std::move(item_reader); |
120 | 2.26k | array_reader->_sub_readers[1] = std::move(offset_reader); |
121 | 2.26k | if (meta.is_nullable()) { |
122 | 2.04k | array_reader->_sub_readers[2] = std::move(null_reader); |
123 | 2.04k | } |
124 | 2.26k | array_reader->_meta_type = FieldType::OLAP_FIELD_TYPE_ARRAY; |
125 | 2.26k | *reader = std::move(array_reader); |
126 | 2.26k | return Status::OK(); |
127 | 2.26k | } |
128 | | |
129 | | Status ColumnReader::create_map(const ColumnReaderOptions& opts, const ColumnMetaPB& meta, |
130 | | const io::FileReaderSPtr& file_reader, |
131 | 1.61k | std::shared_ptr<ColumnReader>* reader) { |
132 | | // map reader now has 3 sub readers for key, value, offsets(scalar), null(scala) |
133 | 1.61k | DCHECK(meta.children_columns_size() == 3 || meta.children_columns_size() == 4); |
134 | 1.61k | std::shared_ptr<ColumnReader> key_reader; |
135 | 1.61k | RETURN_IF_ERROR(ColumnReader::create(opts, meta.children_columns(0), |
136 | 1.61k | meta.children_columns(0).num_rows(), file_reader, |
137 | 1.61k | &key_reader)); |
138 | 1.61k | std::shared_ptr<ColumnReader> val_reader; |
139 | 1.61k | RETURN_IF_ERROR(ColumnReader::create(opts, meta.children_columns(1), |
140 | 1.61k | meta.children_columns(1).num_rows(), file_reader, |
141 | 1.61k | &val_reader)); |
142 | 1.61k | std::shared_ptr<ColumnReader> offset_reader; |
143 | 1.61k | RETURN_IF_ERROR(ColumnReader::create(opts, meta.children_columns(2), |
144 | 1.61k | meta.children_columns(2).num_rows(), file_reader, |
145 | 1.61k | &offset_reader)); |
146 | 1.61k | std::shared_ptr<ColumnReader> null_reader; |
147 | 1.61k | if (meta.is_nullable()) { |
148 | 1.21k | RETURN_IF_ERROR(ColumnReader::create(opts, meta.children_columns(3), |
149 | 1.21k | meta.children_columns(3).num_rows(), file_reader, |
150 | 1.21k | &null_reader)); |
151 | 1.21k | } |
152 | | |
153 | | // The num rows of the map reader equals to the num rows of the length reader. |
154 | 1.61k | uint64_t map_num_rows = meta.children_columns(2).num_rows(); |
155 | 1.61k | std::shared_ptr<ColumnReader> map_reader( |
156 | 1.61k | new ColumnReader(opts, meta, map_num_rows, file_reader)); |
157 | 1.61k | map_reader->_sub_readers.resize(meta.children_columns_size()); |
158 | | |
159 | 1.61k | map_reader->_sub_readers[0] = std::move(key_reader); |
160 | 1.61k | map_reader->_sub_readers[1] = std::move(val_reader); |
161 | 1.61k | map_reader->_sub_readers[2] = std::move(offset_reader); |
162 | 1.61k | if (meta.is_nullable()) { |
163 | 1.21k | map_reader->_sub_readers[3] = std::move(null_reader); |
164 | 1.21k | } |
165 | 1.61k | map_reader->_meta_type = FieldType::OLAP_FIELD_TYPE_MAP; |
166 | 1.61k | *reader = std::move(map_reader); |
167 | 1.61k | return Status::OK(); |
168 | 1.61k | } |
169 | | |
170 | | Status ColumnReader::create_struct(const ColumnReaderOptions& opts, const ColumnMetaPB& meta, |
171 | | uint64_t num_rows, const io::FileReaderSPtr& file_reader, |
172 | 402 | std::shared_ptr<ColumnReader>* reader) { |
173 | | // not support empty struct |
174 | 402 | DCHECK(meta.children_columns_size() >= 1); |
175 | | // create struct column reader |
176 | 402 | std::shared_ptr<ColumnReader> struct_reader( |
177 | 402 | new ColumnReader(opts, meta, num_rows, file_reader)); |
178 | 402 | struct_reader->_sub_readers.reserve(meta.children_columns_size()); |
179 | | // now we support struct column can add the children columns according to the schema-change behavior |
180 | 3.40k | for (int i = 0; i < meta.children_columns_size(); i++) { |
181 | 3.00k | std::shared_ptr<ColumnReader> sub_reader; |
182 | 3.00k | RETURN_IF_ERROR(ColumnReader::create(opts, meta.children_columns(i), |
183 | 3.00k | meta.children_columns(i).num_rows(), file_reader, |
184 | 3.00k | &sub_reader)); |
185 | 3.00k | struct_reader->_sub_readers.push_back(std::move(sub_reader)); |
186 | 3.00k | } |
187 | 402 | struct_reader->_meta_type = FieldType::OLAP_FIELD_TYPE_STRUCT; |
188 | 402 | *reader = std::move(struct_reader); |
189 | 402 | return Status::OK(); |
190 | 402 | } |
191 | | |
192 | | Status ColumnReader::create_agg_state(const ColumnReaderOptions& opts, const ColumnMetaPB& meta, |
193 | | uint64_t num_rows, const io::FileReaderSPtr& file_reader, |
194 | 32 | std::shared_ptr<ColumnReader>* reader) { |
195 | 32 | if (!meta.has_function_name()) { // meet old version ColumnMetaPB |
196 | 0 | std::shared_ptr<ColumnReader> reader_local( |
197 | 0 | new ColumnReader(opts, meta, num_rows, file_reader)); |
198 | 0 | RETURN_IF_ERROR(reader_local->init(&meta)); |
199 | 0 | *reader = std::move(reader_local); |
200 | 0 | return Status::OK(); |
201 | 0 | } |
202 | | |
203 | 32 | auto data_type = DataTypeFactory::instance().create_data_type(meta); |
204 | 32 | const auto* agg_state_type = assert_cast<const DataTypeAggState*>(data_type.get()); |
205 | 32 | agg_state_type->check_function_compatibility(opts.be_exec_version); |
206 | 32 | auto type = agg_state_type->get_serialized_type()->get_primitive_type(); |
207 | | |
208 | 32 | if (read_as_string(type)) { |
209 | 32 | std::shared_ptr<ColumnReader> reader_local( |
210 | 32 | new ColumnReader(opts, meta, num_rows, file_reader)); |
211 | 32 | RETURN_IF_ERROR(reader_local->init(&meta)); |
212 | 32 | *reader = std::move(reader_local); |
213 | 32 | return Status::OK(); |
214 | 32 | } else if (type == PrimitiveType::TYPE_MAP) { |
215 | 0 | return create_map(opts, meta, file_reader, reader); |
216 | 0 | } else if (type == PrimitiveType::TYPE_ARRAY) { |
217 | 0 | return create_array(opts, meta, file_reader, reader); |
218 | 0 | } else if (type == PrimitiveType::TYPE_STRUCT) { |
219 | 0 | return create_struct(opts, meta, num_rows, file_reader, reader); |
220 | 0 | } |
221 | | |
222 | 0 | return Status::InternalError("Not supported type: {}, serialized type: {}", |
223 | 0 | agg_state_type->get_name(), int(type)); |
224 | 32 | } |
225 | | |
226 | 407 | bool ColumnReader::is_compaction_reader_type(ReaderType type) { |
227 | 407 | return type == ReaderType::READER_BASE_COMPACTION || |
228 | 407 | type == ReaderType::READER_CUMULATIVE_COMPACTION || |
229 | 407 | type == ReaderType::READER_COLD_DATA_COMPACTION || |
230 | 407 | type == ReaderType::READER_SEGMENT_COMPACTION || |
231 | 407 | type == ReaderType::READER_FULL_COMPACTION; |
232 | 407 | } |
233 | | |
234 | | Status ColumnReader::create(const ColumnReaderOptions& opts, const ColumnMetaPB& meta, |
235 | | uint64_t num_rows, const io::FileReaderSPtr& file_reader, |
236 | 8.24M | std::shared_ptr<ColumnReader>* reader) { |
237 | 8.24M | if (is_scalar_type((FieldType)meta.type())) { |
238 | 8.23M | std::shared_ptr<ColumnReader> reader_local( |
239 | 8.23M | new ColumnReader(opts, meta, num_rows, file_reader)); |
240 | 8.23M | RETURN_IF_ERROR(reader_local->init(&meta)); |
241 | 8.23M | *reader = std::move(reader_local); |
242 | 8.23M | return Status::OK(); |
243 | 8.23M | } else { |
244 | 5.69k | auto type = (FieldType)meta.type(); |
245 | 5.69k | switch (type) { |
246 | 32 | case FieldType::OLAP_FIELD_TYPE_AGG_STATE: { |
247 | 32 | return create_agg_state(opts, meta, num_rows, file_reader, reader); |
248 | 0 | } |
249 | 402 | case FieldType::OLAP_FIELD_TYPE_STRUCT: { |
250 | 402 | return create_struct(opts, meta, num_rows, file_reader, reader); |
251 | 0 | } |
252 | 2.26k | case FieldType::OLAP_FIELD_TYPE_ARRAY: { |
253 | 2.26k | return create_array(opts, meta, file_reader, reader); |
254 | 0 | } |
255 | 1.61k | case FieldType::OLAP_FIELD_TYPE_MAP: { |
256 | 1.61k | return create_map(opts, meta, file_reader, reader); |
257 | 0 | } |
258 | 397 | case FieldType::OLAP_FIELD_TYPE_VARIANT: { |
259 | | // Read variant only root data using a single ColumnReader |
260 | 397 | std::shared_ptr<ColumnReader> reader_local( |
261 | 397 | new ColumnReader(opts, meta, num_rows, file_reader)); |
262 | 397 | RETURN_IF_ERROR(reader_local->init(&meta)); |
263 | 397 | *reader = std::move(reader_local); |
264 | 397 | return Status::OK(); |
265 | 397 | } |
266 | 0 | default: |
267 | 0 | return Status::NotSupported("unsupported type for ColumnReader: {}", |
268 | 0 | std::to_string(int(type))); |
269 | 5.69k | } |
270 | 5.69k | } |
271 | 8.24M | } |
272 | | |
273 | 433 | ColumnReader::ColumnReader() = default; |
274 | | |
275 | | ColumnReader::ColumnReader(const ColumnReaderOptions& opts, const ColumnMetaPB& meta, |
276 | | uint64_t num_rows, io::FileReaderSPtr file_reader) |
277 | 8.24M | : _use_index_page_cache(!config::disable_storage_page_cache), |
278 | 8.24M | _opts(opts), |
279 | 8.24M | _num_rows(num_rows), |
280 | 8.24M | _file_reader(std::move(file_reader)), |
281 | 8.24M | _file_reader_factory(opts.file_reader_factory), |
282 | 8.24M | _dict_encoding_type(UNKNOWN_DICT_ENCODING) { |
283 | | // The factory is stored separately from _opts because _opts is copied into |
284 | | // page-read options and metadata helpers. Keeping the factory out of _opts |
285 | | // avoids retaining a ColumnReaderCache-capturing lambda in code paths that |
286 | | // only need immutable column metadata. |
287 | 8.24M | _opts.file_reader_factory = nullptr; |
288 | 8.24M | _meta_length = meta.length(); |
289 | 8.24M | _meta_type = (FieldType)meta.type(); |
290 | 8.24M | if (_meta_type == FieldType::OLAP_FIELD_TYPE_ARRAY) { |
291 | 2.26k | _meta_children_column_type = (FieldType)meta.children_columns(0).type(); |
292 | 2.26k | } |
293 | 8.24M | _data_type = DataTypeFactory::instance().create_data_type(meta); |
294 | 8.24M | _meta_is_nullable = meta.is_nullable(); |
295 | 8.24M | _meta_dict_page = meta.dict_page(); |
296 | 8.24M | _meta_compression = meta.compression(); |
297 | 8.24M | } |
298 | | |
299 | 7.73M | Result<io::FileReaderSPtr> ColumnReader::_new_data_file_reader() const { |
300 | | // This is intentionally called by FileColumnIterator::init() instead of by |
301 | | // ColumnReader::create(). A cached ColumnReader can be shared by many scan |
302 | | // iterators, while every FileColumnIterator needs its own data reader when |
303 | | // cache-block prefetch is enabled. Returning the shared reader in the |
304 | | // fallback path keeps the old behavior for local files and for scans that do |
305 | | // not request cache-aware prefetch. |
306 | 7.73M | if (_file_reader_factory) { |
307 | 7.73M | return _file_reader_factory(); |
308 | 7.73M | } |
309 | 18.4E | return _file_reader; |
310 | 7.73M | } |
311 | | |
312 | 8.25M | ColumnReader::~ColumnReader() = default; |
313 | | |
314 | 8.23M | int64_t ColumnReader::get_metadata_size() const { |
315 | 8.23M | return sizeof(ColumnReader) + (_segment_zone_map ? _segment_zone_map->ByteSizeLong() : 0); |
316 | 8.23M | } |
317 | | |
318 | | #ifdef BE_TEST |
319 | | /// This function is only used in UT to verify the correctness of data read from zone map |
320 | | /// See UT case 'SegCompactionMoWTest.SegCompactionInterleaveWithBig_ooooOOoOooooooooO' |
321 | | /// be/test/olap/segcompaction_mow_test.cpp |
322 | | void ColumnReader::check_data_by_zone_map_for_test(const MutableColumnPtr& dst) const { |
323 | | if (!_segment_zone_map) { |
324 | | return; |
325 | | } |
326 | | |
327 | | const auto rows = dst->size(); |
328 | | if (rows == 0) { |
329 | | return; |
330 | | } |
331 | | |
332 | | FieldType type = _type_info->type(); |
333 | | |
334 | | if (type != FieldType::OLAP_FIELD_TYPE_INT) { |
335 | | return; |
336 | | } |
337 | | |
338 | | auto* non_nullable_column = |
339 | | dst->is_nullable() |
340 | | ? assert_cast<ColumnNullable*>(dst.get())->get_nested_column_ptr().get() |
341 | | : dst.get(); |
342 | | |
343 | | /// `PredicateColumnType<TYPE_INT>` does not support `void get(size_t n, Field& res)`, |
344 | | /// So here only check `CoumnVector<TYPE_INT>` |
345 | | if (check_and_get_column<ColumnVector<TYPE_INT>>(non_nullable_column) == nullptr) { |
346 | | return; |
347 | | } |
348 | | |
349 | | ZoneMap zone_map; |
350 | | THROW_IF_ERROR(ZoneMap::from_proto(*_segment_zone_map, _data_type, zone_map)); |
351 | | |
352 | | if (zone_map.has_null) { |
353 | | return; |
354 | | } |
355 | | |
356 | | for (size_t i = 0; i != rows; ++i) { |
357 | | Field field; |
358 | | dst->get(i, field); |
359 | | DCHECK(!field.is_null()); |
360 | | const auto v = field.get<TYPE_INT>(); |
361 | | DCHECK_GE(v, zone_map.min_value.get<TYPE_INT>()); |
362 | | DCHECK_LE(v, zone_map.max_value.get<TYPE_INT>()); |
363 | | } |
364 | | } |
365 | | #endif |
366 | | |
367 | 8.22M | Status ColumnReader::init(const ColumnMetaPB* meta) { |
368 | 8.22M | _type_info = get_type_info(meta); |
369 | | |
370 | 8.23M | if (meta->has_be_exec_version()) { |
371 | 8.23M | _be_exec_version = meta->be_exec_version(); |
372 | 8.23M | } |
373 | | |
374 | 8.22M | if (_type_info == nullptr) { |
375 | 0 | return Status::NotSupported("unsupported typeinfo, type={}", meta->type()); |
376 | 0 | } |
377 | 8.22M | RETURN_IF_ERROR(EncodingInfo::get(_type_info->type(), meta->encoding(), {}, &_encoding_info)); |
378 | | |
379 | 24.6M | for (int i = 0; i < meta->indexes_size(); i++) { |
380 | 16.4M | const auto& index_meta = meta->indexes(i); |
381 | 16.4M | switch (index_meta.type()) { |
382 | 0 | case BITMAP_INDEX: |
383 | 0 | break; |
384 | 8.23M | case ORDINAL_INDEX: |
385 | 8.23M | _ordinal_index.reset( |
386 | 8.23M | new OrdinalIndexReader(_file_reader, _num_rows, index_meta.ordinal_index())); |
387 | 8.23M | break; |
388 | 8.21M | case ZONE_MAP_INDEX: |
389 | 8.21M | _segment_zone_map = |
390 | 8.21M | std::make_unique<ZoneMapPB>(index_meta.zone_map_index().segment_zone_map()); |
391 | 8.21M | _zone_map_index.reset(new ZoneMapIndexReader( |
392 | 8.21M | _file_reader, index_meta.zone_map_index().page_zone_maps())); |
393 | 8.21M | break; |
394 | 92 | case BLOOM_FILTER_INDEX: |
395 | 92 | _bloom_filter_index.reset( |
396 | 92 | new BloomFilterIndexReader(_file_reader, index_meta.bloom_filter_index())); |
397 | 92 | break; |
398 | 0 | case NESTED_OFFSETS_INDEX: |
399 | 0 | break; |
400 | 0 | default: |
401 | 0 | return Status::Corruption("Bad file {}: invalid column index type {}", |
402 | 0 | _file_reader->path().native(), index_meta.type()); |
403 | 16.4M | } |
404 | 16.4M | } |
405 | 8.22M | update_metadata_size(); |
406 | | |
407 | | // ArrayColumnWriter writes a single empty array and flushes. In this scenario, |
408 | | // the item writer doesn't write any data and the corresponding ordinal index is empty. |
409 | 8.22M | if (_ordinal_index == nullptr && !is_empty()) { |
410 | 0 | return Status::Corruption("Bad file {}: missing ordinal index for column {}", |
411 | 0 | _file_reader->path().native(), meta->column_id()); |
412 | 0 | } |
413 | | |
414 | 8.22M | return Status::OK(); |
415 | 8.22M | } |
416 | | |
417 | | Status ColumnReader::new_index_iterator(const std::shared_ptr<IndexFileReader>& index_file_reader, |
418 | | const TabletIndex* index_meta, |
419 | 2.56k | std::unique_ptr<IndexIterator>* iterator) { |
420 | 2.56k | RETURN_IF_ERROR(_load_index(index_file_reader, index_meta)); |
421 | 2.56k | { |
422 | 2.56k | std::shared_lock<std::shared_mutex> rlock(_load_index_lock); |
423 | 2.56k | auto iter = _index_readers.find(index_meta->index_id()); |
424 | 2.56k | if (iter != _index_readers.end()) { |
425 | 2.56k | if (iter->second != nullptr) { |
426 | 2.56k | RETURN_IF_ERROR(iter->second->new_iterator(iterator)); |
427 | 2.56k | } |
428 | 2.56k | } |
429 | 2.56k | } |
430 | 2.56k | return Status::OK(); |
431 | 2.56k | } |
432 | | |
433 | | Status ColumnReader::read_page(const ColumnIteratorOptions& iter_opts, const PagePointer& pp, |
434 | | PageHandle* handle, Slice* page_body, PageFooterPB* footer, |
435 | 147k | BlockCompressionCodec* codec, bool is_dict_page) const { |
436 | 147k | SCOPED_CONCURRENCY_COUNT(ConcurrencyStatsManager::instance().column_reader_read_page); |
437 | 147k | iter_opts.sanity_check(); |
438 | 147k | PageReadOptions opts(iter_opts.io_ctx); |
439 | 147k | opts.verify_checksum = _opts.verify_checksum; |
440 | 147k | opts.use_page_cache = iter_opts.use_page_cache; |
441 | 147k | opts.kept_in_memory = _opts.kept_in_memory; |
442 | 147k | opts.type = iter_opts.type; |
443 | 147k | opts.file_reader = iter_opts.file_reader; |
444 | 147k | opts.page_pointer = pp; |
445 | 147k | opts.codec = codec; |
446 | 147k | opts.stats = iter_opts.stats; |
447 | 147k | opts.encoding_info = _encoding_info; |
448 | 147k | opts.is_dict_page = is_dict_page; |
449 | | |
450 | 147k | return PageIO::read_and_decompress_page(opts, handle, page_body, footer); |
451 | 147k | } |
452 | | |
453 | | Status ColumnReader::get_row_ranges_by_zone_map( |
454 | | const AndBlockColumnPredicate* col_predicates, |
455 | | const std::vector<std::shared_ptr<const ColumnPredicate>>* delete_predicates, |
456 | 3.74k | RowRanges* row_ranges, const ColumnIteratorOptions& iter_opts) { |
457 | 3.74k | std::vector<uint32_t> page_indexes; |
458 | 3.74k | RETURN_IF_ERROR( |
459 | 3.74k | _get_filtered_pages(col_predicates, delete_predicates, &page_indexes, iter_opts)); |
460 | 3.74k | RETURN_IF_ERROR(_calculate_row_ranges(page_indexes, row_ranges, iter_opts)); |
461 | 3.74k | return Status::OK(); |
462 | 3.74k | } |
463 | | |
464 | 7.05k | Status ColumnReader::next_batch_of_zone_map(size_t* n, MutableColumnPtr& dst) const { |
465 | 7.05k | if (_segment_zone_map == nullptr) { |
466 | 0 | return Status::InternalError("segment zonemap not exist"); |
467 | 0 | } |
468 | | // TODO: this work to get min/max value seems should only do once |
469 | 7.05k | ZoneMap zone_map; |
470 | 7.05k | RETURN_IF_ERROR(ZoneMap::from_proto(*_segment_zone_map, _data_type, zone_map)); |
471 | | |
472 | 7.05k | dst->reserve(*n); |
473 | 7.05k | if (!zone_map.has_not_null) { |
474 | 1.01k | assert_cast<ColumnNullable&>(*dst).insert_many_defaults(*n); |
475 | 1.01k | return Status::OK(); |
476 | 1.01k | } |
477 | 6.03k | dst->insert(zone_map.max_value); |
478 | 12.1k | for (int i = 1; i < *n; ++i) { |
479 | 6.06k | dst->insert(zone_map.min_value); |
480 | 6.06k | } |
481 | 6.03k | return Status::OK(); |
482 | 7.05k | } |
483 | | |
484 | | Status ColumnReader::match_condition(const AndBlockColumnPredicate* col_predicates, |
485 | 505k | bool* matched) const { |
486 | 505k | *matched = true; |
487 | 505k | if (_zone_map_index == nullptr) { |
488 | 0 | return Status::OK(); |
489 | 0 | } |
490 | 505k | ZoneMap zone_map; |
491 | 505k | RETURN_IF_ERROR(ZoneMap::from_proto(*_segment_zone_map, _data_type, zone_map)); |
492 | | |
493 | 505k | *matched = _zone_map_match_condition(zone_map, col_predicates); |
494 | 505k | return Status::OK(); |
495 | 505k | } |
496 | | |
497 | | Status ColumnReader::prune_predicates_by_zone_map( |
498 | | std::vector<std::shared_ptr<ColumnPredicate>>& predicates, const int column_id, |
499 | 480k | bool* pruned) const { |
500 | 480k | *pruned = false; |
501 | 480k | if (_zone_map_index == nullptr) { |
502 | 0 | return Status::OK(); |
503 | 0 | } |
504 | | |
505 | 480k | ZoneMap zone_map; |
506 | 480k | RETURN_IF_ERROR(ZoneMap::from_proto(*_segment_zone_map, _data_type, zone_map)); |
507 | 480k | if (zone_map.pass_all) { |
508 | 0 | return Status::OK(); |
509 | 0 | } |
510 | | |
511 | 967k | for (auto it = predicates.begin(); it != predicates.end();) { |
512 | 486k | auto predicate = *it; |
513 | 486k | if (predicate->column_id() == column_id && predicate->is_always_true(zone_map)) { |
514 | 36 | *pruned = true; |
515 | 36 | it = predicates.erase(it); |
516 | 486k | } else { |
517 | 486k | ++it; |
518 | 486k | } |
519 | 486k | } |
520 | 480k | return Status::OK(); |
521 | 480k | } |
522 | | |
523 | | bool ColumnReader::_zone_map_match_condition(const ZoneMap& zone_map, |
524 | 506k | const AndBlockColumnPredicate* col_predicates) const { |
525 | 506k | if (zone_map.pass_all) { |
526 | 0 | return true; |
527 | 0 | } |
528 | | |
529 | 506k | return col_predicates->evaluate_and(zone_map); |
530 | 506k | } |
531 | | |
532 | | Status ColumnReader::_get_filtered_pages( |
533 | | const AndBlockColumnPredicate* col_predicates, |
534 | | const std::vector<std::shared_ptr<const ColumnPredicate>>* delete_predicates, |
535 | 3.75k | std::vector<uint32_t>* page_indexes, const ColumnIteratorOptions& iter_opts) { |
536 | 3.75k | RETURN_IF_ERROR(_load_zone_map_index(_use_index_page_cache, _opts.kept_in_memory, iter_opts)); |
537 | | |
538 | 3.75k | const std::vector<ZoneMapPB>& zone_maps = _zone_map_index->page_zone_maps(); |
539 | 3.75k | size_t page_size = _zone_map_index->num_pages(); |
540 | 7.50k | for (size_t i = 0; i < page_size; ++i) { |
541 | 3.75k | if (zone_maps[i].pass_all()) { |
542 | 3.34k | page_indexes->push_back(cast_set<uint32_t>(i)); |
543 | 3.34k | } else { |
544 | 408 | segment_v2::ZoneMap zone_map; |
545 | 408 | RETURN_IF_ERROR(ZoneMap::from_proto(zone_maps[i], _data_type, zone_map)); |
546 | 408 | if (_zone_map_match_condition(zone_map, col_predicates)) { |
547 | 406 | bool should_read = true; |
548 | 406 | if (delete_predicates != nullptr) { |
549 | 0 | for (auto del_pred : *delete_predicates) { |
550 | | // TODO: Both `min_value` and `max_value` should be 0 or neither should be 0. |
551 | | // So nullable only need to judge once. |
552 | 0 | if (del_pred->evaluate_del(zone_map)) { |
553 | 0 | should_read = false; |
554 | 0 | break; |
555 | 0 | } |
556 | 0 | } |
557 | 0 | } |
558 | 406 | if (should_read) { |
559 | 406 | page_indexes->push_back(cast_set<uint32_t>(i)); |
560 | 406 | } |
561 | 406 | } |
562 | 408 | } |
563 | 3.75k | } |
564 | 3.75k | VLOG(1) << "total-pages: " << page_size << " not-filtered-pages: " << page_indexes->size() |
565 | 4 | << " filtered-percent:" |
566 | 4 | << 1.0 - (static_cast<double>(page_indexes->size()) / |
567 | 4 | (static_cast<double>(page_size) * 1.0)); |
568 | 3.75k | return Status::OK(); |
569 | 3.75k | } |
570 | | |
571 | | Status ColumnReader::_calculate_row_ranges(const std::vector<uint32_t>& page_indexes, |
572 | | RowRanges* row_ranges, |
573 | 3.74k | const ColumnIteratorOptions& iter_opts) { |
574 | 3.74k | row_ranges->clear(); |
575 | 3.74k | RETURN_IF_ERROR(_load_ordinal_index(_use_index_page_cache, _opts.kept_in_memory, iter_opts)); |
576 | 3.75k | for (auto i : page_indexes) { |
577 | 3.75k | ordinal_t page_first_id = _ordinal_index->get_first_ordinal(i); |
578 | 3.75k | ordinal_t page_last_id = _ordinal_index->get_last_ordinal(i); |
579 | 3.75k | RowRanges page_row_ranges(RowRanges::create_single(page_first_id, page_last_id + 1)); |
580 | 3.75k | RowRanges::ranges_union(*row_ranges, page_row_ranges, row_ranges); |
581 | 3.75k | } |
582 | 3.74k | return Status::OK(); |
583 | 3.74k | } |
584 | | |
585 | | Status ColumnReader::get_row_ranges_by_bloom_filter(const AndBlockColumnPredicate* col_predicates, |
586 | | RowRanges* row_ranges, |
587 | 0 | const ColumnIteratorOptions& iter_opts) { |
588 | 0 | RETURN_IF_ERROR(_load_ordinal_index(_use_index_page_cache, _opts.kept_in_memory, iter_opts)); |
589 | 0 | RETURN_IF_ERROR( |
590 | 0 | _load_bloom_filter_index(_use_index_page_cache, _opts.kept_in_memory, iter_opts)); |
591 | 0 | RowRanges bf_row_ranges; |
592 | 0 | std::unique_ptr<BloomFilterIndexIterator> bf_iter; |
593 | 0 | RETURN_IF_ERROR(_bloom_filter_index->new_iterator(&bf_iter, iter_opts.stats)); |
594 | 0 | size_t range_size = row_ranges->range_size(); |
595 | | // get covered page ids |
596 | 0 | std::set<uint32_t> page_ids; |
597 | 0 | for (int i = 0; i < range_size; ++i) { |
598 | 0 | int64_t from = row_ranges->get_range_from(i); |
599 | 0 | int64_t idx = from; |
600 | 0 | int64_t to = row_ranges->get_range_to(i); |
601 | 0 | auto iter = _ordinal_index->seek_at_or_before(from); |
602 | 0 | while (idx < to && iter.valid()) { |
603 | 0 | page_ids.insert(iter.page_index()); |
604 | 0 | idx = iter.last_ordinal() + 1; |
605 | 0 | iter.next(); |
606 | 0 | } |
607 | 0 | } |
608 | 0 | for (auto& pid : page_ids) { |
609 | 0 | std::unique_ptr<BloomFilter> bf; |
610 | 0 | RETURN_IF_ERROR(bf_iter->read_bloom_filter(pid, &bf)); |
611 | 0 | if (col_predicates->evaluate_and(bf.get())) { |
612 | 0 | bf_row_ranges.add(RowRange(_ordinal_index->get_first_ordinal(pid), |
613 | 0 | _ordinal_index->get_last_ordinal(pid) + 1)); |
614 | 0 | } |
615 | 0 | } |
616 | 0 | RowRanges::ranges_intersection(*row_ranges, bf_row_ranges, row_ranges); |
617 | 0 | return Status::OK(); |
618 | 0 | } |
619 | | |
620 | | Status ColumnReader::_load_ordinal_index(bool use_page_cache, bool kept_in_memory, |
621 | 105k | const ColumnIteratorOptions& iter_opts) { |
622 | 105k | if (!_ordinal_index) { |
623 | 0 | return Status::InternalError("ordinal_index not inited"); |
624 | 0 | } |
625 | 105k | return _ordinal_index->load(use_page_cache, kept_in_memory, iter_opts.stats); |
626 | 105k | } |
627 | | |
628 | | Status ColumnReader::_load_zone_map_index(bool use_page_cache, bool kept_in_memory, |
629 | 3.75k | const ColumnIteratorOptions& iter_opts) { |
630 | 3.75k | if (_zone_map_index != nullptr) { |
631 | 3.75k | return _zone_map_index->load(use_page_cache, kept_in_memory, iter_opts.stats); |
632 | 3.75k | } |
633 | 0 | return Status::OK(); |
634 | 3.75k | } |
635 | | |
636 | | Status ColumnReader::_load_index(const std::shared_ptr<IndexFileReader>& index_file_reader, |
637 | 2.56k | const TabletIndex* index_meta) { |
638 | 2.56k | std::unique_lock<std::shared_mutex> wlock(_load_index_lock); |
639 | | |
640 | 2.56k | if (index_meta == nullptr) { |
641 | 0 | return Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>( |
642 | 0 | "Failed to load inverted index: index metadata is null"); |
643 | 0 | } |
644 | | |
645 | 2.56k | auto it = _index_readers.find(index_meta->index_id()); |
646 | 2.56k | if (it != _index_readers.end()) { |
647 | 0 | return Status::OK(); |
648 | 0 | } |
649 | | |
650 | 2.56k | bool should_analyzer = |
651 | 2.56k | inverted_index::InvertedIndexAnalyzer::should_analyzer(index_meta->properties()); |
652 | | |
653 | 2.56k | FieldType type; |
654 | 2.56k | if (_meta_type == FieldType::OLAP_FIELD_TYPE_ARRAY) { |
655 | 2 | type = _meta_children_column_type; |
656 | 2.56k | } else { |
657 | 2.56k | type = _type_info->type(); |
658 | 2.56k | } |
659 | | |
660 | 2.56k | if (index_meta->index_type() == IndexType::ANN) { |
661 | 1 | _index_readers[index_meta->index_id()] = |
662 | 1 | std::make_shared<AnnIndexReader>(index_meta, index_file_reader); |
663 | 1 | return Status::OK(); |
664 | 1 | } |
665 | | |
666 | 2.56k | IndexReaderPtr index_reader; |
667 | | |
668 | 2.56k | if (is_string_type(type)) { |
669 | 2.16k | if (should_analyzer) { |
670 | 1.68k | try { |
671 | 1.68k | index_reader = FullTextIndexReader::create_shared(index_meta, index_file_reader); |
672 | 1.68k | } catch (const CLuceneError& e) { |
673 | 0 | return Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>( |
674 | 0 | "create FullTextIndexReader error: {}", e.what()); |
675 | 0 | } |
676 | 1.68k | } else { |
677 | 477 | try { |
678 | 477 | index_reader = |
679 | 477 | StringTypeInvertedIndexReader::create_shared(index_meta, index_file_reader); |
680 | 477 | } catch (const CLuceneError& e) { |
681 | 0 | return Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>( |
682 | 0 | "create StringTypeInvertedIndexReader error: {}", e.what()); |
683 | 0 | } |
684 | 477 | } |
685 | 2.16k | } else if (is_numeric_type(type)) { |
686 | 402 | try { |
687 | 402 | index_reader = BkdIndexReader::create_shared(index_meta, index_file_reader); |
688 | 402 | } catch (const CLuceneError& e) { |
689 | 0 | return Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>( |
690 | 0 | "create BkdIndexReader error: {}", e.what()); |
691 | 0 | } |
692 | 402 | } else { |
693 | 0 | return Status::Error<ErrorCode::INVERTED_INDEX_NOT_SUPPORTED>( |
694 | 0 | "Field type {} is not supported for inverted index", type); |
695 | 0 | } |
696 | 2.56k | _index_readers[index_meta->index_id()] = index_reader; |
697 | 2.56k | return Status::OK(); |
698 | 2.56k | } |
699 | | |
700 | 3.66k | bool ColumnReader::has_bloom_filter_index(bool ngram) const { |
701 | 3.67k | if (_bloom_filter_index == nullptr) return false; |
702 | | |
703 | 18.4E | if (ngram) { |
704 | 0 | return _bloom_filter_index->algorithm() == BloomFilterAlgorithmPB::NGRAM_BLOOM_FILTER; |
705 | 18.4E | } else { |
706 | 18.4E | return _bloom_filter_index->algorithm() != BloomFilterAlgorithmPB::NGRAM_BLOOM_FILTER; |
707 | 18.4E | } |
708 | 18.4E | } |
709 | | |
710 | | Status ColumnReader::_load_bloom_filter_index(bool use_page_cache, bool kept_in_memory, |
711 | 0 | const ColumnIteratorOptions& iter_opts) { |
712 | 0 | if (_bloom_filter_index != nullptr) { |
713 | 0 | return _bloom_filter_index->load(use_page_cache, kept_in_memory, iter_opts.stats); |
714 | 0 | } |
715 | 0 | return Status::OK(); |
716 | 0 | } |
717 | | |
718 | | Status ColumnReader::seek_at_or_before(ordinal_t ordinal, OrdinalPageIndexIterator* iter, |
719 | 102k | const ColumnIteratorOptions& iter_opts) { |
720 | 102k | RETURN_IF_ERROR(_load_ordinal_index(_use_index_page_cache, _opts.kept_in_memory, iter_opts)); |
721 | 102k | *iter = _ordinal_index->seek_at_or_before(ordinal); |
722 | 102k | if (!iter->valid()) { |
723 | 0 | return Status::NotFound("Failed to seek to ordinal {}, ", ordinal); |
724 | 0 | } |
725 | 102k | return Status::OK(); |
726 | 102k | } |
727 | | |
728 | | Status ColumnReader::get_ordinal_index_reader(OrdinalIndexReader*& reader, |
729 | 0 | OlapReaderStatistics* index_load_stats) { |
730 | 0 | CHECK(_ordinal_index) << fmt::format("ordinal index is null for column reader of type {}", |
731 | 0 | std::to_string(int(_meta_type))); |
732 | 0 | RETURN_IF_ERROR( |
733 | 0 | _ordinal_index->load(_use_index_page_cache, _opts.kept_in_memory, index_load_stats)); |
734 | 0 | reader = _ordinal_index.get(); |
735 | 0 | return Status::OK(); |
736 | 0 | } |
737 | | |
738 | 15.5k | Status ColumnReader::new_iterator(ColumnIteratorUPtr* iterator, const TabletColumn* tablet_column) { |
739 | 15.5k | return new_iterator(iterator, tablet_column, nullptr); |
740 | 15.5k | } |
741 | | |
742 | | Status ColumnReader::new_iterator(ColumnIteratorUPtr* iterator, const TabletColumn* tablet_column, |
743 | 7.73M | const StorageReadOptions* opt) { |
744 | 7.73M | if (is_empty()) { |
745 | 996 | *iterator = std::make_unique<EmptyFileColumnIterator>(); |
746 | 996 | return Status::OK(); |
747 | 996 | } |
748 | 7.73M | if (is_scalar_type(_meta_type)) { |
749 | 7.72M | if (is_string_type(_meta_type)) { |
750 | 4.80M | *iterator = std::make_unique<StringFileColumnIterator>(shared_from_this()); |
751 | 4.80M | } else { |
752 | 2.92M | *iterator = std::make_unique<FileColumnIterator>(shared_from_this()); |
753 | 2.92M | } |
754 | 18.4E | (*iterator)->set_column_name(tablet_column ? tablet_column->name() : ""); |
755 | 7.72M | return Status::OK(); |
756 | 7.72M | } else { |
757 | 4.47k | auto type = _meta_type; |
758 | 4.47k | switch (type) { |
759 | 32 | case FieldType::OLAP_FIELD_TYPE_AGG_STATE: { |
760 | 32 | return new_agg_state_iterator(iterator); |
761 | 0 | } |
762 | 402 | case FieldType::OLAP_FIELD_TYPE_STRUCT: { |
763 | 402 | return new_struct_iterator(iterator, tablet_column); |
764 | 0 | } |
765 | 2.23k | case FieldType::OLAP_FIELD_TYPE_ARRAY: { |
766 | 2.23k | return new_array_iterator(iterator, tablet_column); |
767 | 0 | } |
768 | 1.40k | case FieldType::OLAP_FIELD_TYPE_MAP: { |
769 | 1.40k | return new_map_iterator(iterator, tablet_column); |
770 | 0 | } |
771 | 0 | default: |
772 | 0 | return Status::NotSupported("unsupported type to create iterator: {}", |
773 | 0 | std::to_string(int(type))); |
774 | 4.47k | } |
775 | 4.47k | } |
776 | 7.73M | } |
777 | | |
778 | 32 | Status ColumnReader::new_agg_state_iterator(ColumnIteratorUPtr* iterator) { |
779 | 32 | *iterator = std::make_unique<FileColumnIterator>(shared_from_this()); |
780 | 32 | return Status::OK(); |
781 | 32 | } |
782 | | |
783 | | Status ColumnReader::new_array_iterator(ColumnIteratorUPtr* iterator, |
784 | 2.23k | const TabletColumn* tablet_column) { |
785 | 2.23k | ColumnIteratorUPtr item_iterator; |
786 | 2.23k | RETURN_IF_ERROR(_sub_readers[0]->new_iterator( |
787 | 2.23k | &item_iterator, tablet_column && tablet_column->get_subtype_count() > 0 |
788 | 2.23k | ? &tablet_column->get_sub_column(0) |
789 | 2.23k | : nullptr)); |
790 | | |
791 | 2.23k | item_iterator->set_column_name(tablet_column ? tablet_column->get_sub_column(0).name() : ""); |
792 | | |
793 | 2.23k | ColumnIteratorUPtr offset_iterator; |
794 | 2.23k | RETURN_IF_ERROR(_sub_readers[1]->new_iterator(&offset_iterator, nullptr)); |
795 | 2.23k | auto* file_iter = static_cast<FileColumnIterator*>(offset_iterator.release()); |
796 | 2.23k | OffsetFileColumnIteratorUPtr ofcIter = std::make_unique<OffsetFileColumnIterator>( |
797 | 2.23k | std::unique_ptr<FileColumnIterator>(file_iter)); |
798 | | |
799 | 2.23k | ColumnIteratorUPtr null_iterator; |
800 | 2.23k | if (is_nullable()) { |
801 | 2.01k | RETURN_IF_ERROR(_sub_readers[2]->new_iterator(&null_iterator, nullptr)); |
802 | 2.01k | } |
803 | 2.23k | *iterator = std::make_unique<ArrayFileColumnIterator>(shared_from_this(), std::move(ofcIter), |
804 | 2.23k | std::move(item_iterator), |
805 | 2.23k | std::move(null_iterator)); |
806 | 2.23k | return Status::OK(); |
807 | 2.23k | } |
808 | | |
809 | | Status ColumnReader::new_map_iterator(ColumnIteratorUPtr* iterator, |
810 | 1.40k | const TabletColumn* tablet_column) { |
811 | 1.40k | ColumnIteratorUPtr key_iterator; |
812 | 1.40k | RETURN_IF_ERROR(_sub_readers[0]->new_iterator( |
813 | 1.40k | &key_iterator, tablet_column && tablet_column->get_subtype_count() > 1 |
814 | 1.40k | ? &tablet_column->get_sub_column(0) |
815 | 1.40k | : nullptr)); |
816 | 1.40k | key_iterator->set_column_name(tablet_column ? tablet_column->get_sub_column(0).name() : ""); |
817 | 1.40k | ColumnIteratorUPtr val_iterator; |
818 | 1.40k | RETURN_IF_ERROR(_sub_readers[1]->new_iterator( |
819 | 1.40k | &val_iterator, tablet_column && tablet_column->get_subtype_count() > 1 |
820 | 1.40k | ? &tablet_column->get_sub_column(1) |
821 | 1.40k | : nullptr)); |
822 | 1.40k | val_iterator->set_column_name(tablet_column ? tablet_column->get_sub_column(1).name() : ""); |
823 | 1.40k | ColumnIteratorUPtr offsets_iterator; |
824 | 1.40k | RETURN_IF_ERROR(_sub_readers[2]->new_iterator(&offsets_iterator, nullptr)); |
825 | 1.40k | auto* file_iter = static_cast<FileColumnIterator*>(offsets_iterator.release()); |
826 | 1.40k | OffsetFileColumnIteratorUPtr ofcIter = std::make_unique<OffsetFileColumnIterator>( |
827 | 1.40k | std::unique_ptr<FileColumnIterator>(file_iter)); |
828 | | |
829 | 1.40k | ColumnIteratorUPtr null_iterator; |
830 | 1.40k | if (is_nullable()) { |
831 | 1.21k | RETURN_IF_ERROR(_sub_readers[3]->new_iterator(&null_iterator, nullptr)); |
832 | 1.21k | } |
833 | 1.40k | *iterator = std::make_unique<MapFileColumnIterator>( |
834 | 1.40k | shared_from_this(), std::move(null_iterator), std::move(ofcIter), |
835 | 1.40k | std::move(key_iterator), std::move(val_iterator)); |
836 | 1.40k | return Status::OK(); |
837 | 1.40k | } |
838 | | |
839 | | Status ColumnReader::new_struct_iterator(ColumnIteratorUPtr* iterator, |
840 | 400 | const TabletColumn* tablet_column) { |
841 | 400 | std::vector<ColumnIteratorUPtr> sub_column_iterators; |
842 | 400 | size_t child_size = is_nullable() ? _sub_readers.size() - 1 : _sub_readers.size(); |
843 | 400 | size_t tablet_column_size = tablet_column ? tablet_column->get_sub_columns().size() : 0; |
844 | 400 | sub_column_iterators.reserve(child_size); |
845 | | |
846 | 3.00k | for (uint64_t i = 0; i < child_size; i++) { |
847 | 2.60k | ColumnIteratorUPtr sub_column_iterator; |
848 | 2.60k | RETURN_IF_ERROR(_sub_readers[i]->new_iterator( |
849 | 2.60k | &sub_column_iterator, tablet_column ? &tablet_column->get_sub_column(i) : nullptr)); |
850 | 2.60k | sub_column_iterator->set_column_name(tablet_column ? tablet_column->get_sub_column(i).name() |
851 | 2.60k | : ""); |
852 | 2.60k | sub_column_iterators.emplace_back(std::move(sub_column_iterator)); |
853 | 2.60k | } |
854 | | |
855 | | // create default_iterator for schema-change behavior which increase column |
856 | 400 | for (size_t i = child_size; i < tablet_column_size; i++) { |
857 | 0 | TabletColumn column = tablet_column->get_sub_column(i); |
858 | 0 | ColumnIteratorUPtr it; |
859 | 0 | RETURN_IF_ERROR(Segment::new_default_iterator(column, &it)); |
860 | 0 | it->set_column_name(column.name()); |
861 | 0 | sub_column_iterators.emplace_back(std::move(it)); |
862 | 0 | } |
863 | | |
864 | 400 | ColumnIteratorUPtr null_iterator; |
865 | 402 | if (is_nullable()) { |
866 | 402 | RETURN_IF_ERROR(_sub_readers[child_size]->new_iterator(&null_iterator, nullptr)); |
867 | 402 | } |
868 | 400 | *iterator = std::make_unique<StructFileColumnIterator>( |
869 | 400 | shared_from_this(), std::move(null_iterator), std::move(sub_column_iterators)); |
870 | 400 | return Status::OK(); |
871 | 400 | } |
872 | | |
873 | | Result<TColumnAccessPaths> ColumnIterator::_get_sub_access_paths( |
874 | 4.36k | const TColumnAccessPaths& access_paths) { |
875 | 4.36k | TColumnAccessPaths sub_access_paths = access_paths; |
876 | 6.56k | for (auto it = sub_access_paths.begin(); it != sub_access_paths.end();) { |
877 | 2.20k | TColumnAccessPath& name_path = *it; |
878 | 2.20k | if (name_path.data_access_path.path.empty()) { |
879 | 1 | return ResultError( |
880 | 1 | Status::InternalError("Invalid access path for struct column: path is empty")); |
881 | 1 | } |
882 | | |
883 | 2.20k | if (!StringCaseEqual()(name_path.data_access_path.path[0], _column_name)) { |
884 | 1 | return ResultError(Status::InternalError( |
885 | 1 | R"(Invalid access path for column: expected name "{}", got "{}")", _column_name, |
886 | 1 | name_path.data_access_path.path[0])); |
887 | 1 | } |
888 | | |
889 | 2.20k | name_path.data_access_path.path.erase(name_path.data_access_path.path.begin()); |
890 | 2.20k | if (!name_path.data_access_path.path.empty()) { |
891 | 26 | ++it; |
892 | 2.17k | } else { |
893 | 2.17k | set_need_to_read(); |
894 | 2.17k | it = sub_access_paths.erase(it); |
895 | 2.17k | } |
896 | 2.20k | } |
897 | 4.36k | return sub_access_paths; |
898 | 4.36k | } |
899 | | |
900 | | ///====================== MapFileColumnIterator ============================//// |
901 | | MapFileColumnIterator::MapFileColumnIterator(std::shared_ptr<ColumnReader> reader, |
902 | | ColumnIteratorUPtr null_iterator, |
903 | | OffsetFileColumnIteratorUPtr offsets_iterator, |
904 | | ColumnIteratorUPtr key_iterator, |
905 | | ColumnIteratorUPtr val_iterator) |
906 | 1.40k | : _map_reader(reader), |
907 | 1.40k | _offsets_iterator(std::move(offsets_iterator)), |
908 | 1.40k | _key_iterator(std::move(key_iterator)), |
909 | 1.40k | _val_iterator(std::move(val_iterator)) { |
910 | 1.40k | if (_map_reader->is_nullable()) { |
911 | 1.21k | _null_iterator = std::move(null_iterator); |
912 | 1.21k | } |
913 | 1.40k | } |
914 | | |
915 | 1.39k | Status MapFileColumnIterator::init(const ColumnIteratorOptions& opts) { |
916 | 1.39k | if (_reading_flag == ReadingFlag::SKIP_READING) { |
917 | 0 | DLOG(INFO) << "Map column iterator column " << _column_name << " skip reading."; |
918 | 0 | return Status::OK(); |
919 | 0 | } |
920 | 1.39k | RETURN_IF_ERROR(_key_iterator->init(opts)); |
921 | 1.39k | RETURN_IF_ERROR(_val_iterator->init(opts)); |
922 | 1.39k | RETURN_IF_ERROR(_offsets_iterator->init(opts)); |
923 | 1.39k | if (_map_reader->is_nullable()) { |
924 | 1.21k | RETURN_IF_ERROR(_null_iterator->init(opts)); |
925 | 1.21k | } |
926 | 1.39k | return Status::OK(); |
927 | 1.39k | } |
928 | | |
929 | 1.22k | Status MapFileColumnIterator::seek_to_ordinal(ordinal_t ord) { |
930 | 1.22k | if (_reading_flag == ReadingFlag::SKIP_READING) { |
931 | 0 | DLOG(INFO) << "Map column iterator column " << _column_name << " skip reading."; |
932 | 0 | return Status::OK(); |
933 | 0 | } |
934 | | |
935 | 1.22k | if (read_null_map_only()) { |
936 | | // In NULL_MAP_ONLY mode, only seek the null iterator; skip offset/key/val iterators |
937 | 0 | if (_map_reader->is_nullable() && _null_iterator) { |
938 | 0 | RETURN_IF_ERROR(_null_iterator->seek_to_ordinal(ord)); |
939 | 0 | } |
940 | 0 | return Status::OK(); |
941 | 0 | } |
942 | | |
943 | 1.22k | if (_map_reader->is_nullable()) { |
944 | 1.05k | RETURN_IF_ERROR(_null_iterator->seek_to_ordinal(ord)); |
945 | 1.05k | } |
946 | 1.22k | RETURN_IF_ERROR(_offsets_iterator->seek_to_ordinal(ord)); |
947 | 1.22k | if (read_offset_only()) { |
948 | | // In OFFSET_ONLY mode, key/value iterators are SKIP_READING, no need to seek them |
949 | 0 | return Status::OK(); |
950 | 0 | } |
951 | | // here to use offset info |
952 | 1.22k | ordinal_t offset = 0; |
953 | 1.22k | RETURN_IF_ERROR(_offsets_iterator->_peek_one_offset(&offset)); |
954 | 1.22k | RETURN_IF_ERROR(_key_iterator->seek_to_ordinal(offset)); |
955 | 1.22k | RETURN_IF_ERROR(_val_iterator->seek_to_ordinal(offset)); |
956 | 1.22k | return Status::OK(); |
957 | 1.22k | } |
958 | | |
959 | | Status MapFileColumnIterator::init_cache_block_prefetch( |
960 | 0 | const SegmentCacheBlockPrefetchParams& params) { |
961 | 0 | RETURN_IF_ERROR(_offsets_iterator->init_cache_block_prefetch(params)); |
962 | 0 | if (_map_reader->is_nullable()) { |
963 | 0 | RETURN_IF_ERROR(_null_iterator->init_cache_block_prefetch(params)); |
964 | 0 | } |
965 | 0 | RETURN_IF_ERROR(_key_iterator->init_cache_block_prefetch(params)); |
966 | 0 | RETURN_IF_ERROR(_val_iterator->init_cache_block_prefetch(params)); |
967 | 0 | return Status::OK(); |
968 | 0 | } |
969 | | |
970 | | void MapFileColumnIterator::collect_cache_block_prefetch_iterators( |
971 | | std::map<FileAccessRangeBuildMethod, std::vector<ColumnIterator*>>& iterators, |
972 | 0 | FileAccessRangeBuildMethod init_method) { |
973 | 0 | _offsets_iterator->collect_cache_block_prefetch_iterators(iterators, init_method); |
974 | 0 | if (_map_reader->is_nullable()) { |
975 | 0 | _null_iterator->collect_cache_block_prefetch_iterators(iterators, init_method); |
976 | 0 | } |
977 | | // the actual data pages to read of key/value column depends on the read result of offset column, |
978 | | // so we can't init prefetch blocks according to rowids, just prefetch all data blocks here. |
979 | 0 | _key_iterator->collect_cache_block_prefetch_iterators( |
980 | 0 | iterators, FileAccessRangeBuildMethod::ALL_DATA_PAGES); |
981 | 0 | _val_iterator->collect_cache_block_prefetch_iterators( |
982 | 0 | iterators, FileAccessRangeBuildMethod::ALL_DATA_PAGES); |
983 | 0 | } |
984 | | |
985 | 1.22k | Status MapFileColumnIterator::next_batch(size_t* n, MutableColumnPtr& dst, bool* has_null) { |
986 | 1.22k | if (_reading_flag == ReadingFlag::SKIP_READING) { |
987 | 0 | DLOG(INFO) << "Map column iterator column " << _column_name << " skip reading."; |
988 | 0 | dst->insert_many_defaults(*n); |
989 | 0 | return Status::OK(); |
990 | 0 | } |
991 | | |
992 | 1.22k | if (read_null_map_only()) { |
993 | | // NULL_MAP_ONLY mode: read null map, fill nested ColumnMap with empty defaults |
994 | 0 | DORIS_CHECK(dst->is_nullable()); |
995 | 0 | auto& nullable_col = assert_cast<ColumnNullable&>(*dst); |
996 | 0 | auto null_map_ptr = nullable_col.get_null_map_column_ptr(); |
997 | 0 | size_t num_read = *n; |
998 | 0 | if (_null_iterator) { |
999 | 0 | bool null_signs_has_null = false; |
1000 | 0 | RETURN_IF_ERROR( |
1001 | 0 | _null_iterator->next_batch(&num_read, null_map_ptr, &null_signs_has_null)); |
1002 | 0 | } else { |
1003 | | // schema-change: column became nullable but old segment has no null data |
1004 | 0 | auto& null_map = assert_cast<ColumnUInt8&, TypeCheckOnRelease::DISABLE>(*null_map_ptr); |
1005 | 0 | null_map.insert_many_vals(0, num_read); |
1006 | 0 | } |
1007 | 0 | DCHECK(num_read == *n); |
1008 | | // fill nested ColumnMap with empty (zero-element) maps |
1009 | 0 | auto& column_map = assert_cast<ColumnMap&, TypeCheckOnRelease::DISABLE>( |
1010 | 0 | nullable_col.get_nested_column()); |
1011 | 0 | column_map.insert_many_defaults(num_read); |
1012 | 0 | *has_null = true; |
1013 | 0 | return Status::OK(); |
1014 | 0 | } |
1015 | | |
1016 | 1.22k | auto& column_map = assert_cast<ColumnMap&, TypeCheckOnRelease::DISABLE>( |
1017 | 1.22k | dst->is_nullable() ? static_cast<ColumnNullable&>(*dst).get_nested_column() : *dst); |
1018 | 1.22k | auto column_offsets_ptr = column_map.get_offsets_column().assume_mutable(); |
1019 | 1.22k | bool offsets_has_null = false; |
1020 | 1.22k | ssize_t start = column_offsets_ptr->size(); |
1021 | 1.22k | RETURN_IF_ERROR(_offsets_iterator->next_batch(n, column_offsets_ptr, &offsets_has_null)); |
1022 | 1.22k | if (*n == 0) { |
1023 | 0 | return Status::OK(); |
1024 | 0 | } |
1025 | 1.22k | auto& column_offsets = static_cast<ColumnArray::ColumnOffsets&>(*column_offsets_ptr); |
1026 | 1.22k | RETURN_IF_ERROR(_offsets_iterator->_calculate_offsets(start, column_offsets)); |
1027 | 1.22k | DCHECK(column_offsets.get_data().back() >= column_offsets.get_data()[start - 1]); |
1028 | 1.22k | size_t num_items = |
1029 | 1.22k | column_offsets.get_data().back() - column_offsets.get_data()[start - 1]; // -1 is valid |
1030 | 1.22k | auto key_ptr = column_map.get_keys().assume_mutable(); |
1031 | 1.22k | auto val_ptr = column_map.get_values().assume_mutable(); |
1032 | | |
1033 | 1.22k | if (num_items > 0) { |
1034 | 1.12k | if (read_offset_only()) { |
1035 | | // OFFSET_ONLY mode: skip reading actual key/value data, fill with defaults |
1036 | 0 | key_ptr->insert_many_defaults(num_items); |
1037 | 0 | val_ptr->insert_many_defaults(num_items); |
1038 | 1.12k | } else { |
1039 | 1.12k | size_t num_read = num_items; |
1040 | 1.12k | bool key_has_null = false; |
1041 | 1.12k | bool val_has_null = false; |
1042 | 1.12k | RETURN_IF_ERROR(_key_iterator->next_batch(&num_read, key_ptr, &key_has_null)); |
1043 | 1.12k | RETURN_IF_ERROR(_val_iterator->next_batch(&num_read, val_ptr, &val_has_null)); |
1044 | 1.12k | DCHECK(num_read == num_items); |
1045 | 1.12k | } |
1046 | | |
1047 | 1.12k | column_map.get_keys_ptr() = std::move(key_ptr); |
1048 | 1.12k | column_map.get_values_ptr() = std::move(val_ptr); |
1049 | 1.12k | } |
1050 | | |
1051 | 1.22k | if (dst->is_nullable()) { |
1052 | 1.05k | size_t num_read = *n; |
1053 | 1.05k | auto null_map_ptr = static_cast<ColumnNullable&>(*dst).get_null_map_column_ptr(); |
1054 | | // in not-null to null linked-schemachange mode, |
1055 | | // actually we do not change dat data include meta in footer, |
1056 | | // so may dst from changed meta which is nullable but old data is not nullable, |
1057 | | // if so, we should set null_map to all null by default |
1058 | 1.05k | if (_null_iterator) { |
1059 | 1.05k | bool null_signs_has_null = false; |
1060 | 1.05k | RETURN_IF_ERROR( |
1061 | 1.05k | _null_iterator->next_batch(&num_read, null_map_ptr, &null_signs_has_null)); |
1062 | 1.05k | } else { |
1063 | 0 | auto& null_map = assert_cast<ColumnUInt8&, TypeCheckOnRelease::DISABLE>(*null_map_ptr); |
1064 | 0 | null_map.insert_many_vals(0, num_read); |
1065 | 0 | } |
1066 | 1.05k | DCHECK(num_read == *n); |
1067 | 1.05k | } |
1068 | 1.22k | return Status::OK(); |
1069 | 1.22k | } |
1070 | | |
1071 | | Status MapFileColumnIterator::read_by_rowids(const rowid_t* rowids, const size_t count, |
1072 | 274 | MutableColumnPtr& dst) { |
1073 | 274 | if (_reading_flag == ReadingFlag::SKIP_READING) { |
1074 | 1 | DLOG(INFO) << "File column iterator column " << _column_name << " skip reading."; |
1075 | 1 | dst->insert_many_defaults(count); |
1076 | 1 | return Status::OK(); |
1077 | 1 | } |
1078 | | |
1079 | 273 | if (read_null_map_only()) { |
1080 | | // NULL_MAP_ONLY mode: read null map by rowids, fill nested ColumnMap with empty defaults |
1081 | 0 | DORIS_CHECK(dst->is_nullable()); |
1082 | 0 | auto& nullable_col = assert_cast<ColumnNullable&>(*dst); |
1083 | 0 | if (_null_iterator) { |
1084 | 0 | auto null_map_ptr = nullable_col.get_null_map_column_ptr(); |
1085 | 0 | RETURN_IF_ERROR(_null_iterator->read_by_rowids(rowids, count, null_map_ptr)); |
1086 | 0 | } else { |
1087 | | // schema-change: column became nullable but old segment has no null data |
1088 | 0 | auto null_map_ptr = nullable_col.get_null_map_column_ptr(); |
1089 | 0 | auto& null_map = assert_cast<ColumnUInt8&, TypeCheckOnRelease::DISABLE>(*null_map_ptr); |
1090 | 0 | null_map.insert_many_vals(0, count); |
1091 | 0 | } |
1092 | | // fill nested ColumnMap with empty (zero-element) maps |
1093 | 0 | auto& column_map = assert_cast<ColumnMap&, TypeCheckOnRelease::DISABLE>( |
1094 | 0 | nullable_col.get_nested_column()); |
1095 | 0 | column_map.insert_many_defaults(count); |
1096 | 0 | return Status::OK(); |
1097 | 0 | } |
1098 | | |
1099 | 273 | if (count == 0) { |
1100 | 0 | return Status::OK(); |
1101 | 0 | } |
1102 | | // resolve ColumnMap and nullable wrapper |
1103 | 273 | const auto* column_map = check_and_get_column<ColumnMap>( |
1104 | 273 | dst->is_nullable() ? static_cast<ColumnNullable&>(*dst).get_nested_column() : *dst); |
1105 | 273 | auto offsets_ptr = column_map->get_offsets_column().assume_mutable(); |
1106 | 273 | auto& offsets = static_cast<ColumnArray::ColumnOffsets&>(*offsets_ptr); |
1107 | 273 | size_t base = offsets.get_data().empty() ? 0 : offsets.get_data().back(); |
1108 | | |
1109 | | // 1. bulk read null-map if nullable |
1110 | 273 | std::vector<uint8_t> null_mask; // 0: not null, 1: null |
1111 | 273 | if (_map_reader->is_nullable()) { |
1112 | | // For nullable map columns, the destination column must also be nullable. |
1113 | 264 | if (UNLIKELY(!dst->is_nullable())) { |
1114 | 0 | return Status::InternalError( |
1115 | 0 | "unexpected non-nullable destination column for nullable map reader"); |
1116 | 0 | } |
1117 | 264 | auto null_map_ptr = static_cast<ColumnNullable&>(*dst).get_null_map_column_ptr(); |
1118 | 264 | size_t null_before = null_map_ptr->size(); |
1119 | 264 | RETURN_IF_ERROR(_null_iterator->read_by_rowids(rowids, count, null_map_ptr)); |
1120 | | // extract a light-weight view to decide element reads |
1121 | 264 | auto& null_map_col = assert_cast<ColumnUInt8&>(*null_map_ptr); |
1122 | 264 | null_mask.reserve(count); |
1123 | 528 | for (size_t i = 0; i < count; ++i) { |
1124 | 264 | null_mask.push_back(null_map_col.get_element(null_before + i)); |
1125 | 264 | } |
1126 | 264 | } else if (dst->is_nullable()) { |
1127 | | // in not-null to null linked-schemachange mode, |
1128 | | // actually we do not change dat data include meta in footer, |
1129 | | // so may dst from changed meta which is nullable but old data is not nullable, |
1130 | | // if so, we should set null_map to all null by default |
1131 | 0 | auto null_map_ptr = static_cast<ColumnNullable&>(*dst).get_null_map_column_ptr(); |
1132 | 0 | auto& null_map = assert_cast<ColumnUInt8&>(*null_map_ptr); |
1133 | 0 | null_map.insert_many_vals(0, count); |
1134 | 0 | } |
1135 | | |
1136 | | // 2. bulk read start ordinals for requested rows |
1137 | 273 | MutableColumnPtr starts_col = ColumnOffset64::create(); |
1138 | 273 | starts_col->reserve(count); |
1139 | 273 | RETURN_IF_ERROR(_offsets_iterator->read_by_rowids(rowids, count, starts_col)); |
1140 | | |
1141 | | // 3. bulk read next-start ordinals for rowid+1 (within bounds) |
1142 | 273 | std::vector<rowid_t> next_rowids(count); |
1143 | 2.97k | for (size_t i = 0; i < count; ++i) { |
1144 | 2.69k | uint64_t nr = rowids[i] + 1; |
1145 | 2.69k | next_rowids[i] = nr < _map_reader->num_rows() ? static_cast<rowid_t>(nr) |
1146 | 2.69k | : static_cast<rowid_t>(0); // placeholder |
1147 | 2.69k | } |
1148 | 273 | MutableColumnPtr next_starts_col = ColumnOffset64::create(); |
1149 | 273 | next_starts_col->reserve(count); |
1150 | | // read for all; we'll fix out-of-bound cases below |
1151 | 273 | RETURN_IF_ERROR(_offsets_iterator->read_by_rowids(next_rowids.data(), count, next_starts_col)); |
1152 | | |
1153 | | // 4. fix next_start for rows whose next_rowid is out-of-bound (rowid == num_rows-1) |
1154 | 2.97k | for (size_t i = 0; i < count; ++i) { |
1155 | 2.69k | if (rowids[i] + 1 >= _map_reader->num_rows()) { |
1156 | | // seek to the last row and consume one to move decoder to end-of-page, |
1157 | | // then peek page-tail sentinel next_array_item_ordinal as next_start |
1158 | 266 | RETURN_IF_ERROR(_offsets_iterator->seek_to_ordinal(rowids[i])); |
1159 | 266 | size_t one = 1; |
1160 | 266 | bool has_null_unused = false; |
1161 | 266 | MutableColumnPtr tmp = ColumnOffset64::create(); |
1162 | 266 | RETURN_IF_ERROR(_offsets_iterator->next_batch(&one, tmp, &has_null_unused)); |
1163 | 266 | ordinal_t ns = 0; |
1164 | 266 | RETURN_IF_ERROR(_offsets_iterator->_peek_one_offset(&ns)); |
1165 | | // overwrite with sentinel |
1166 | 266 | assert_cast<ColumnOffset64&, TypeCheckOnRelease::DISABLE>(*next_starts_col) |
1167 | 266 | .get_data()[i] = ns; |
1168 | 266 | } |
1169 | 2.69k | } |
1170 | | |
1171 | | // 5. compute sizes and append offsets prefix-sum |
1172 | 273 | auto& starts_data = assert_cast<ColumnOffset64&>(*starts_col).get_data(); |
1173 | 273 | auto& next_starts_data = assert_cast<ColumnOffset64&>(*next_starts_col).get_data(); |
1174 | 273 | std::vector<size_t> sizes(count, 0); |
1175 | 273 | size_t acc = base; |
1176 | 273 | const auto original_size = offsets.get_data().back(); |
1177 | 273 | offsets.get_data().reserve(offsets.get_data().size() + count); |
1178 | 2.97k | for (size_t i = 0; i < count; ++i) { |
1179 | 2.69k | size_t sz = static_cast<size_t>(next_starts_data[i] - starts_data[i]); |
1180 | 2.69k | if (_map_reader->is_nullable() && !null_mask.empty() && null_mask[i]) { |
1181 | 82 | sz = 0; // null rows do not consume elements |
1182 | 82 | } |
1183 | 2.69k | sizes[i] = sz; |
1184 | 2.69k | acc += sz; |
1185 | 2.69k | offsets.get_data().push_back(acc); |
1186 | 2.69k | } |
1187 | | |
1188 | | // 6. read key/value elements for non-empty sizes |
1189 | 273 | auto keys_ptr = column_map->get_keys().assume_mutable(); |
1190 | 273 | auto vals_ptr = column_map->get_values().assume_mutable(); |
1191 | | |
1192 | 273 | size_t this_run = sizes[0]; |
1193 | 273 | auto start_idx = starts_data[0]; |
1194 | 273 | auto last_idx = starts_data[0] + this_run; |
1195 | 2.69k | for (size_t i = 1; i < count; ++i) { |
1196 | 2.42k | size_t sz = sizes[i]; |
1197 | 2.42k | if (sz == 0) { |
1198 | 2.24k | continue; |
1199 | 2.24k | } |
1200 | 183 | auto start = static_cast<ordinal_t>(starts_data[i]); |
1201 | 183 | if (start != last_idx) { |
1202 | 182 | size_t n = this_run; |
1203 | 182 | bool dummy_has_null = false; |
1204 | | |
1205 | 182 | if (this_run != 0) { |
1206 | 182 | if (_key_iterator->reading_flag() != ReadingFlag::SKIP_READING) { |
1207 | 182 | RETURN_IF_ERROR(_key_iterator->seek_to_ordinal(start_idx)); |
1208 | 182 | RETURN_IF_ERROR(_key_iterator->next_batch(&n, keys_ptr, &dummy_has_null)); |
1209 | 182 | DCHECK(n == this_run); |
1210 | 182 | } |
1211 | | |
1212 | 182 | if (_val_iterator->reading_flag() != ReadingFlag::SKIP_READING) { |
1213 | 182 | n = this_run; |
1214 | 182 | RETURN_IF_ERROR(_val_iterator->seek_to_ordinal(start_idx)); |
1215 | 182 | RETURN_IF_ERROR(_val_iterator->next_batch(&n, vals_ptr, &dummy_has_null)); |
1216 | 182 | DCHECK(n == this_run); |
1217 | 182 | } |
1218 | 182 | } |
1219 | 182 | start_idx = start; |
1220 | 182 | this_run = sz; |
1221 | 182 | last_idx = start + sz; |
1222 | 182 | continue; |
1223 | 182 | } |
1224 | | |
1225 | 1 | this_run += sz; |
1226 | 1 | last_idx += sz; |
1227 | 1 | } |
1228 | | |
1229 | 273 | size_t n = this_run; |
1230 | 273 | const size_t total_count = offsets.get_data().back() - original_size; |
1231 | 273 | bool dummy_has_null = false; |
1232 | 273 | if (_key_iterator->reading_flag() != ReadingFlag::SKIP_READING) { |
1233 | 273 | if (this_run != 0) { |
1234 | 152 | RETURN_IF_ERROR(_key_iterator->seek_to_ordinal(start_idx)); |
1235 | 152 | RETURN_IF_ERROR(_key_iterator->next_batch(&n, keys_ptr, &dummy_has_null)); |
1236 | 152 | DCHECK(n == this_run); |
1237 | 152 | } |
1238 | 273 | } else { |
1239 | 0 | keys_ptr->insert_many_defaults(total_count); |
1240 | 0 | } |
1241 | | |
1242 | 273 | if (_val_iterator->reading_flag() != ReadingFlag::SKIP_READING) { |
1243 | 273 | if (this_run != 0) { |
1244 | 152 | n = this_run; |
1245 | 152 | RETURN_IF_ERROR(_val_iterator->seek_to_ordinal(start_idx)); |
1246 | 152 | RETURN_IF_ERROR(_val_iterator->next_batch(&n, vals_ptr, &dummy_has_null)); |
1247 | 152 | DCHECK(n == this_run); |
1248 | 152 | } |
1249 | 273 | } else { |
1250 | 0 | vals_ptr->insert_many_defaults(total_count); |
1251 | 0 | } |
1252 | | |
1253 | 273 | return Status::OK(); |
1254 | 273 | } |
1255 | | |
1256 | 226 | void MapFileColumnIterator::set_need_to_read() { |
1257 | 226 | set_reading_flag(ReadingFlag::NEED_TO_READ); |
1258 | 226 | _key_iterator->set_need_to_read(); |
1259 | 226 | _val_iterator->set_need_to_read(); |
1260 | 226 | } |
1261 | | |
1262 | 226 | void MapFileColumnIterator::remove_pruned_sub_iterators() { |
1263 | 226 | _key_iterator->remove_pruned_sub_iterators(); |
1264 | 226 | _val_iterator->remove_pruned_sub_iterators(); |
1265 | 226 | } |
1266 | | |
1267 | | Status MapFileColumnIterator::set_access_paths(const TColumnAccessPaths& all_access_paths, |
1268 | 216 | const TColumnAccessPaths& predicate_access_paths) { |
1269 | 216 | if (all_access_paths.empty()) { |
1270 | 0 | return Status::OK(); |
1271 | 0 | } |
1272 | | |
1273 | 216 | if (!predicate_access_paths.empty()) { |
1274 | 0 | set_reading_flag(ReadingFlag::READING_FOR_PREDICATE); |
1275 | 0 | DLOG(INFO) << "Map column iterator set sub-column " << _column_name |
1276 | 0 | << " to READING_FOR_PREDICATE"; |
1277 | 0 | } |
1278 | | |
1279 | 216 | auto sub_all_access_paths = DORIS_TRY(_get_sub_access_paths(all_access_paths)); |
1280 | 216 | auto sub_predicate_access_paths = DORIS_TRY(_get_sub_access_paths(predicate_access_paths)); |
1281 | | |
1282 | 216 | if (sub_all_access_paths.empty()) { |
1283 | 216 | return Status::OK(); |
1284 | 216 | } |
1285 | | |
1286 | | // Check for meta-only modes (OFFSET_ONLY or NULL_MAP_ONLY) |
1287 | 0 | _check_and_set_meta_read_mode(sub_all_access_paths); |
1288 | 0 | if (read_offset_only()) { |
1289 | 0 | _key_iterator->set_reading_flag(ReadingFlag::SKIP_READING); |
1290 | 0 | _val_iterator->set_reading_flag(ReadingFlag::SKIP_READING); |
1291 | 0 | DLOG(INFO) << "Map column iterator set column " << _column_name |
1292 | 0 | << " to OFFSET_ONLY reading mode, key/value columns set to SKIP_READING"; |
1293 | 0 | return Status::OK(); |
1294 | 0 | } |
1295 | 0 | if (read_null_map_only()) { |
1296 | 0 | _key_iterator->set_reading_flag(ReadingFlag::SKIP_READING); |
1297 | 0 | _val_iterator->set_reading_flag(ReadingFlag::SKIP_READING); |
1298 | 0 | DLOG(INFO) << "Map column iterator set column " << _column_name |
1299 | 0 | << " to NULL_MAP_ONLY reading mode, key/value columns set to SKIP_READING"; |
1300 | 0 | return Status::OK(); |
1301 | 0 | } |
1302 | | |
1303 | 0 | TColumnAccessPaths key_all_access_paths; |
1304 | 0 | TColumnAccessPaths val_all_access_paths; |
1305 | 0 | TColumnAccessPaths key_predicate_access_paths; |
1306 | 0 | TColumnAccessPaths val_predicate_access_paths; |
1307 | |
|
1308 | 2 | for (auto paths : sub_all_access_paths) { |
1309 | 2 | if (paths.data_access_path.path[0] == ACCESS_ALL) { |
1310 | | // ACCESS_ALL means element_at(map, key) style access: the key column must be |
1311 | | // fully read so that the runtime can match the requested key, while any sub-path |
1312 | | // qualifiers (e.g. OFFSET) apply only to the value column. |
1313 | | // For key: create a path with just the column name (= full data access). |
1314 | 1 | TColumnAccessPath key_path; |
1315 | 1 | key_path.__set_type(paths.type); |
1316 | 1 | TDataAccessPath key_data_path; |
1317 | 1 | key_data_path.__set_path({_key_iterator->column_name()}); |
1318 | 1 | key_path.__set_data_access_path(key_data_path); |
1319 | 1 | key_all_access_paths.emplace_back(std::move(key_path)); |
1320 | | // For value: pass the full sub-path so qualifiers like OFFSET propagate. |
1321 | 1 | paths.data_access_path.path[0] = _val_iterator->column_name(); |
1322 | 1 | val_all_access_paths.emplace_back(paths); |
1323 | 1 | } else if (paths.data_access_path.path[0] == ACCESS_MAP_KEYS) { |
1324 | 1 | paths.data_access_path.path[0] = _key_iterator->column_name(); |
1325 | 1 | key_all_access_paths.emplace_back(paths); |
1326 | 1 | } else if (paths.data_access_path.path[0] == ACCESS_MAP_VALUES) { |
1327 | 0 | paths.data_access_path.path[0] = _val_iterator->column_name(); |
1328 | 0 | val_all_access_paths.emplace_back(paths); |
1329 | 0 | } |
1330 | 2 | } |
1331 | 0 | const auto need_read_keys = !key_all_access_paths.empty(); |
1332 | 0 | const auto need_read_values = !val_all_access_paths.empty(); |
1333 | |
|
1334 | 0 | for (auto paths : sub_predicate_access_paths) { |
1335 | 0 | if (paths.data_access_path.path[0] == ACCESS_ALL) { |
1336 | | // Same logic as above: key needs full data, value gets the sub-path. |
1337 | 0 | TColumnAccessPath key_path; |
1338 | 0 | key_path.__set_type(paths.type); |
1339 | 0 | TDataAccessPath key_data_path; |
1340 | 0 | key_data_path.__set_path({_key_iterator->column_name()}); |
1341 | 0 | key_path.__set_data_access_path(key_data_path); |
1342 | 0 | key_predicate_access_paths.emplace_back(std::move(key_path)); |
1343 | 0 | paths.data_access_path.path[0] = _val_iterator->column_name(); |
1344 | 0 | val_predicate_access_paths.emplace_back(paths); |
1345 | 0 | } else if (paths.data_access_path.path[0] == ACCESS_MAP_KEYS) { |
1346 | 0 | paths.data_access_path.path[0] = _key_iterator->column_name(); |
1347 | 0 | key_predicate_access_paths.emplace_back(paths); |
1348 | 0 | } else if (paths.data_access_path.path[0] == ACCESS_MAP_VALUES) { |
1349 | 0 | paths.data_access_path.path[0] = _val_iterator->column_name(); |
1350 | 0 | val_predicate_access_paths.emplace_back(paths); |
1351 | 0 | } |
1352 | 0 | } |
1353 | |
|
1354 | 2 | if (need_read_keys) { |
1355 | 2 | _key_iterator->set_reading_flag(ReadingFlag::NEED_TO_READ); |
1356 | 2 | RETURN_IF_ERROR( |
1357 | 2 | _key_iterator->set_access_paths(key_all_access_paths, key_predicate_access_paths)); |
1358 | 18.4E | } else { |
1359 | 18.4E | _key_iterator->set_reading_flag(ReadingFlag::SKIP_READING); |
1360 | 18.4E | DLOG(INFO) << "Map column iterator set key column to SKIP_READING"; |
1361 | 18.4E | } |
1362 | | |
1363 | 1 | if (need_read_values) { |
1364 | 1 | _val_iterator->set_reading_flag(ReadingFlag::NEED_TO_READ); |
1365 | 1 | RETURN_IF_ERROR( |
1366 | 1 | _val_iterator->set_access_paths(val_all_access_paths, val_predicate_access_paths)); |
1367 | 18.4E | } else { |
1368 | 18.4E | _val_iterator->set_reading_flag(ReadingFlag::SKIP_READING); |
1369 | 18.4E | DLOG(INFO) << "Map column iterator set value column to SKIP_READING"; |
1370 | 18.4E | } |
1371 | 0 | return Status::OK(); |
1372 | 0 | } |
1373 | | |
1374 | | //////////////////////////////////////////////////////////////////////////////// |
1375 | | |
1376 | | StructFileColumnIterator::StructFileColumnIterator( |
1377 | | std::shared_ptr<ColumnReader> reader, ColumnIteratorUPtr null_iterator, |
1378 | | std::vector<ColumnIteratorUPtr>&& sub_column_iterators) |
1379 | 406 | : _struct_reader(reader), _sub_column_iterators(std::move(sub_column_iterators)) { |
1380 | 406 | if (_struct_reader->is_nullable()) { |
1381 | 402 | _null_iterator = std::move(null_iterator); |
1382 | 402 | } |
1383 | 406 | } |
1384 | | |
1385 | 400 | Status StructFileColumnIterator::init(const ColumnIteratorOptions& opts) { |
1386 | 400 | if (_reading_flag == ReadingFlag::SKIP_READING) { |
1387 | 0 | DLOG(INFO) << "Struct column iterator column " << _column_name << " skip reading."; |
1388 | 0 | return Status::OK(); |
1389 | 0 | } |
1390 | | |
1391 | 2.58k | for (auto& column_iterator : _sub_column_iterators) { |
1392 | 2.58k | RETURN_IF_ERROR(column_iterator->init(opts)); |
1393 | 2.58k | } |
1394 | 402 | if (_struct_reader->is_nullable()) { |
1395 | 402 | RETURN_IF_ERROR(_null_iterator->init(opts)); |
1396 | 402 | } |
1397 | 400 | return Status::OK(); |
1398 | 400 | } |
1399 | | |
1400 | 402 | Status StructFileColumnIterator::next_batch(size_t* n, MutableColumnPtr& dst, bool* has_null) { |
1401 | 402 | if (_reading_flag == ReadingFlag::SKIP_READING) { |
1402 | 0 | DLOG(INFO) << "Struct column iterator column " << _column_name << " skip reading."; |
1403 | 0 | dst->insert_many_defaults(*n); |
1404 | 0 | return Status::OK(); |
1405 | 0 | } |
1406 | | |
1407 | 402 | if (read_null_map_only()) { |
1408 | | // NULL_MAP_ONLY mode: read null map, fill nested ColumnStruct with empty defaults |
1409 | 0 | DORIS_CHECK(dst->is_nullable()); |
1410 | 0 | auto& nullable_col = assert_cast<ColumnNullable&>(*dst); |
1411 | 0 | auto null_map_ptr = nullable_col.get_null_map_column_ptr(); |
1412 | 0 | size_t num_read = *n; |
1413 | 0 | if (_null_iterator) { |
1414 | 0 | bool null_signs_has_null = false; |
1415 | 0 | RETURN_IF_ERROR( |
1416 | 0 | _null_iterator->next_batch(&num_read, null_map_ptr, &null_signs_has_null)); |
1417 | 0 | } else { |
1418 | | // schema-change: column became nullable but old segment has no null data |
1419 | 0 | auto& null_map = assert_cast<ColumnUInt8&, TypeCheckOnRelease::DISABLE>(*null_map_ptr); |
1420 | 0 | null_map.insert_many_vals(0, num_read); |
1421 | 0 | } |
1422 | 0 | DCHECK(num_read == *n); |
1423 | | // fill nested ColumnStruct with defaults to maintain consistent column sizes |
1424 | 0 | auto& column_struct = assert_cast<ColumnStruct&, TypeCheckOnRelease::DISABLE>( |
1425 | 0 | nullable_col.get_nested_column()); |
1426 | 0 | column_struct.insert_many_defaults(num_read); |
1427 | 0 | *has_null = true; |
1428 | 0 | return Status::OK(); |
1429 | 0 | } |
1430 | | |
1431 | 402 | auto& column_struct = assert_cast<ColumnStruct&, TypeCheckOnRelease::DISABLE>( |
1432 | 402 | dst->is_nullable() ? static_cast<ColumnNullable&>(*dst).get_nested_column() : *dst); |
1433 | 3.00k | for (size_t i = 0; i < column_struct.tuple_size(); i++) { |
1434 | 2.60k | size_t num_read = *n; |
1435 | 2.60k | auto sub_column_ptr = column_struct.get_column(i).assume_mutable(); |
1436 | 2.60k | bool column_has_null = false; |
1437 | 2.60k | RETURN_IF_ERROR( |
1438 | 2.60k | _sub_column_iterators[i]->next_batch(&num_read, sub_column_ptr, &column_has_null)); |
1439 | 2.60k | DCHECK(num_read == *n); |
1440 | 2.60k | column_struct.get_column_ptr(i) = std::move(sub_column_ptr); |
1441 | 2.60k | } |
1442 | | |
1443 | 402 | if (dst->is_nullable()) { |
1444 | 402 | size_t num_read = *n; |
1445 | 402 | auto null_map_ptr = static_cast<ColumnNullable&>(*dst).get_null_map_column_ptr(); |
1446 | | // in not-null to null linked-schemachange mode, |
1447 | | // actually we do not change dat data include meta in footer, |
1448 | | // so may dst from changed meta which is nullable but old data is not nullable, |
1449 | | // if so, we should set null_map to all null by default |
1450 | 402 | if (_null_iterator) { |
1451 | 402 | bool null_signs_has_null = false; |
1452 | 402 | RETURN_IF_ERROR( |
1453 | 402 | _null_iterator->next_batch(&num_read, null_map_ptr, &null_signs_has_null)); |
1454 | 402 | } else { |
1455 | 0 | auto& null_map = assert_cast<ColumnUInt8&, TypeCheckOnRelease::DISABLE>(*null_map_ptr); |
1456 | 0 | null_map.insert_many_vals(0, num_read); |
1457 | 0 | } |
1458 | 402 | DCHECK(num_read == *n); |
1459 | 402 | } |
1460 | | |
1461 | 402 | return Status::OK(); |
1462 | 402 | } |
1463 | | |
1464 | 402 | Status StructFileColumnIterator::seek_to_ordinal(ordinal_t ord) { |
1465 | 402 | if (_reading_flag == ReadingFlag::SKIP_READING) { |
1466 | 0 | DLOG(INFO) << "Struct column iterator column " << _column_name << " skip reading."; |
1467 | 0 | return Status::OK(); |
1468 | 0 | } |
1469 | | |
1470 | 402 | if (read_null_map_only()) { |
1471 | | // In NULL_MAP_ONLY mode, only seek the null iterator; skip all sub-column iterators |
1472 | 0 | if (_struct_reader->is_nullable() && _null_iterator) { |
1473 | 0 | RETURN_IF_ERROR(_null_iterator->seek_to_ordinal(ord)); |
1474 | 0 | } |
1475 | 0 | return Status::OK(); |
1476 | 0 | } |
1477 | | |
1478 | 2.60k | for (auto& column_iterator : _sub_column_iterators) { |
1479 | 2.60k | RETURN_IF_ERROR(column_iterator->seek_to_ordinal(ord)); |
1480 | 2.60k | } |
1481 | 402 | if (_struct_reader->is_nullable()) { |
1482 | 402 | RETURN_IF_ERROR(_null_iterator->seek_to_ordinal(ord)); |
1483 | 402 | } |
1484 | 402 | return Status::OK(); |
1485 | 402 | } |
1486 | | |
1487 | | Status StructFileColumnIterator::init_cache_block_prefetch( |
1488 | 0 | const SegmentCacheBlockPrefetchParams& params) { |
1489 | 0 | for (auto& column_iterator : _sub_column_iterators) { |
1490 | 0 | RETURN_IF_ERROR(column_iterator->init_cache_block_prefetch(params)); |
1491 | 0 | } |
1492 | 0 | if (_struct_reader->is_nullable()) { |
1493 | 0 | RETURN_IF_ERROR(_null_iterator->init_cache_block_prefetch(params)); |
1494 | 0 | } |
1495 | 0 | return Status::OK(); |
1496 | 0 | } |
1497 | | |
1498 | | void StructFileColumnIterator::collect_cache_block_prefetch_iterators( |
1499 | | std::map<FileAccessRangeBuildMethod, std::vector<ColumnIterator*>>& iterators, |
1500 | 0 | FileAccessRangeBuildMethod init_method) { |
1501 | 0 | for (auto& column_iterator : _sub_column_iterators) { |
1502 | 0 | column_iterator->collect_cache_block_prefetch_iterators(iterators, init_method); |
1503 | 0 | } |
1504 | 0 | if (_struct_reader->is_nullable()) { |
1505 | 0 | _null_iterator->collect_cache_block_prefetch_iterators(iterators, init_method); |
1506 | 0 | } |
1507 | 0 | } |
1508 | | |
1509 | | Status StructFileColumnIterator::read_by_rowids(const rowid_t* rowids, const size_t count, |
1510 | 348 | MutableColumnPtr& dst) { |
1511 | 348 | if (_reading_flag == ReadingFlag::SKIP_READING) { |
1512 | 0 | DLOG(INFO) << "Struct column iterator column " << _column_name << " skip reading."; |
1513 | 0 | dst->insert_many_defaults(count); |
1514 | 0 | return Status::OK(); |
1515 | 0 | } |
1516 | | |
1517 | 348 | if (count == 0) { |
1518 | 0 | return Status::OK(); |
1519 | 0 | } |
1520 | | |
1521 | 348 | size_t this_run = 1; |
1522 | 348 | auto start_idx = rowids[0]; |
1523 | 348 | auto last_idx = rowids[0]; |
1524 | 348 | for (size_t i = 1; i < count; ++i) { |
1525 | 0 | if (last_idx == rowids[i] - 1) { |
1526 | 0 | last_idx = rowids[i]; |
1527 | 0 | this_run++; |
1528 | 0 | continue; |
1529 | 0 | } |
1530 | 0 | RETURN_IF_ERROR(seek_to_ordinal(start_idx)); |
1531 | 0 | size_t num_read = this_run; |
1532 | 0 | RETURN_IF_ERROR(next_batch(&num_read, dst)); |
1533 | 0 | DCHECK_EQ(num_read, this_run); |
1534 | |
|
1535 | 0 | start_idx = rowids[i]; |
1536 | 0 | last_idx = rowids[i]; |
1537 | 0 | this_run = 1; |
1538 | 0 | } |
1539 | | |
1540 | 348 | RETURN_IF_ERROR(seek_to_ordinal(start_idx)); |
1541 | 348 | size_t num_read = this_run; |
1542 | 348 | RETURN_IF_ERROR(next_batch(&num_read, dst)); |
1543 | 348 | DCHECK_EQ(num_read, this_run); |
1544 | 348 | return Status::OK(); |
1545 | 348 | } |
1546 | | |
1547 | 345 | void StructFileColumnIterator::set_need_to_read() { |
1548 | 345 | set_reading_flag(ReadingFlag::NEED_TO_READ); |
1549 | 2.43k | for (auto& sub_iterator : _sub_column_iterators) { |
1550 | 2.43k | sub_iterator->set_need_to_read(); |
1551 | 2.43k | } |
1552 | 345 | } |
1553 | | |
1554 | 342 | void StructFileColumnIterator::remove_pruned_sub_iterators() { |
1555 | 2.74k | for (auto it = _sub_column_iterators.begin(); it != _sub_column_iterators.end();) { |
1556 | 2.39k | auto& sub_iterator = *it; |
1557 | 2.39k | if (sub_iterator->reading_flag() == ReadingFlag::SKIP_READING) { |
1558 | 0 | DLOG(INFO) << "Struct column iterator remove pruned sub-column " |
1559 | 0 | << sub_iterator->column_name(); |
1560 | 0 | it = _sub_column_iterators.erase(it); |
1561 | 2.39k | } else { |
1562 | 2.39k | sub_iterator->remove_pruned_sub_iterators(); |
1563 | 2.39k | ++it; |
1564 | 2.39k | } |
1565 | 2.39k | } |
1566 | 342 | } |
1567 | | |
1568 | | Status StructFileColumnIterator::set_access_paths( |
1569 | | const TColumnAccessPaths& all_access_paths, |
1570 | 350 | const TColumnAccessPaths& predicate_access_paths) { |
1571 | 350 | if (all_access_paths.empty()) { |
1572 | 1 | return Status::OK(); |
1573 | 1 | } |
1574 | | |
1575 | 349 | if (!predicate_access_paths.empty()) { |
1576 | 4 | set_reading_flag(ReadingFlag::READING_FOR_PREDICATE); |
1577 | 4 | DLOG(INFO) << "Struct column iterator set sub-column " << _column_name |
1578 | 4 | << " to READING_FOR_PREDICATE"; |
1579 | 4 | } |
1580 | 349 | auto sub_all_access_paths = DORIS_TRY(_get_sub_access_paths(all_access_paths)); |
1581 | 347 | auto sub_predicate_access_paths = DORIS_TRY(_get_sub_access_paths(predicate_access_paths)); |
1582 | | |
1583 | | // Check for NULL_MAP_ONLY mode: only read null map, skip all sub-columns |
1584 | 347 | _check_and_set_meta_read_mode(sub_all_access_paths); |
1585 | 347 | if (read_null_map_only()) { |
1586 | 0 | for (auto& sub_iterator : _sub_column_iterators) { |
1587 | 0 | sub_iterator->set_reading_flag(ReadingFlag::SKIP_READING); |
1588 | 0 | } |
1589 | 0 | DLOG(INFO) << "Struct column iterator set column " << _column_name |
1590 | 0 | << " to NULL_MAP_ONLY reading mode, all sub-columns set to SKIP_READING"; |
1591 | 0 | return Status::OK(); |
1592 | 0 | } |
1593 | | |
1594 | 347 | const auto no_sub_column_to_skip = sub_all_access_paths.empty(); |
1595 | 347 | const auto no_predicate_sub_column = sub_predicate_access_paths.empty(); |
1596 | | |
1597 | 2.42k | for (auto& sub_iterator : _sub_column_iterators) { |
1598 | 2.42k | const auto name = sub_iterator->column_name(); |
1599 | 2.42k | bool need_to_read = no_sub_column_to_skip; |
1600 | 2.42k | TColumnAccessPaths sub_all_access_paths_of_this; |
1601 | 2.42k | if (!need_to_read) { |
1602 | 4 | for (const auto& paths : sub_all_access_paths) { |
1603 | 4 | if (paths.data_access_path.path[0] == name) { |
1604 | 2 | sub_all_access_paths_of_this.emplace_back(paths); |
1605 | 2 | } |
1606 | 4 | } |
1607 | 4 | need_to_read = !sub_all_access_paths_of_this.empty(); |
1608 | 4 | } |
1609 | | |
1610 | 2.42k | if (!need_to_read) { |
1611 | 2 | set_reading_flag(ReadingFlag::SKIP_READING); |
1612 | 2 | sub_iterator->set_reading_flag(ReadingFlag::SKIP_READING); |
1613 | 2 | DLOG(INFO) << "Struct column iterator set sub-column " << name << " to SKIP_READING"; |
1614 | 2 | continue; |
1615 | 2 | } |
1616 | 2.42k | set_reading_flag(ReadingFlag::NEED_TO_READ); |
1617 | 2.42k | sub_iterator->set_reading_flag(ReadingFlag::NEED_TO_READ); |
1618 | | |
1619 | 2.42k | TColumnAccessPaths sub_predicate_access_paths_of_this; |
1620 | | |
1621 | 2.42k | if (!no_predicate_sub_column) { |
1622 | 3 | for (const auto& paths : sub_predicate_access_paths) { |
1623 | 3 | if (StringCaseEqual()(paths.data_access_path.path[0], name)) { |
1624 | 2 | sub_predicate_access_paths_of_this.emplace_back(paths); |
1625 | 2 | } |
1626 | 3 | } |
1627 | 3 | } |
1628 | | |
1629 | 2.42k | RETURN_IF_ERROR(sub_iterator->set_access_paths(sub_all_access_paths_of_this, |
1630 | 2.42k | sub_predicate_access_paths_of_this)); |
1631 | 2.42k | } |
1632 | 347 | return Status::OK(); |
1633 | 347 | } |
1634 | | |
1635 | | //////////////////////////////////////////////////////////////////////////////// |
1636 | 3.63k | Status OffsetFileColumnIterator::init(const ColumnIteratorOptions& opts) { |
1637 | 3.63k | RETURN_IF_ERROR(_offset_iterator->init(opts)); |
1638 | | // allocate peek tmp column once |
1639 | 3.63k | _peek_tmp_col = ColumnOffset64::create(); |
1640 | 3.63k | return Status::OK(); |
1641 | 3.63k | } |
1642 | | |
1643 | 3.78k | Status OffsetFileColumnIterator::next_batch(size_t* n, MutableColumnPtr& dst, bool* has_null) { |
1644 | 3.78k | RETURN_IF_ERROR(_offset_iterator->next_batch(n, dst, has_null)); |
1645 | 3.78k | return Status::OK(); |
1646 | 3.78k | } |
1647 | | |
1648 | 7.29k | Status OffsetFileColumnIterator::_peek_one_offset(ordinal_t* offset) { |
1649 | 7.29k | if (_offset_iterator->get_current_page()->has_remaining()) { |
1650 | 3.66k | PageDecoder* offset_page_decoder = _offset_iterator->get_current_page()->data_decoder.get(); |
1651 | 3.66k | size_t n = 1; |
1652 | 3.66k | _peek_tmp_col->clear(); |
1653 | 3.66k | RETURN_IF_ERROR(offset_page_decoder->peek_next_batch(&n, _peek_tmp_col)); // not null |
1654 | 3.66k | DCHECK(_peek_tmp_col->size() == 1); |
1655 | 3.66k | *offset = |
1656 | 3.66k | assert_cast<const ColumnOffset64*, TypeCheckOnRelease::DISABLE>(_peek_tmp_col.get()) |
1657 | 3.66k | ->get_element(0); |
1658 | 3.66k | } else { |
1659 | 3.63k | *offset = _offset_iterator->get_current_page()->next_array_item_ordinal; |
1660 | 3.63k | } |
1661 | 7.29k | return Status::OK(); |
1662 | 7.29k | } |
1663 | | |
1664 | | Status OffsetFileColumnIterator::init_cache_block_prefetch( |
1665 | 0 | const SegmentCacheBlockPrefetchParams& params) { |
1666 | 0 | return _offset_iterator->init_cache_block_prefetch(params); |
1667 | 0 | } |
1668 | | |
1669 | | void OffsetFileColumnIterator::collect_cache_block_prefetch_iterators( |
1670 | | std::map<FileAccessRangeBuildMethod, std::vector<ColumnIterator*>>& iterators, |
1671 | 0 | FileAccessRangeBuildMethod init_method) { |
1672 | 0 | _offset_iterator->collect_cache_block_prefetch_iterators(iterators, init_method); |
1673 | 0 | } |
1674 | | |
1675 | | /** |
1676 | | * first_storage_offset read from page should smaller than next_storage_offset which here call _peek_one_offset from page, |
1677 | | and first_column_offset is keep in memory data which is different dimension with (first_storage_offset and next_storage_offset) |
1678 | | eg. step1. read page: first_storage_offset = 16382 |
1679 | | step2. read page below with _peek_one_offset(&last_offset): last_offset = 16387 |
1680 | | step3. first_offset = 126 which is calculate in column offsets |
1681 | | for loop column offsets element in size |
1682 | | we can calculate from first_storage_offset to next_storage_offset one by one to fill with offsets_data in memory column offsets |
1683 | | * @param start |
1684 | | * @param column_offsets |
1685 | | * @return |
1686 | | */ |
1687 | | Status OffsetFileColumnIterator::_calculate_offsets(ssize_t start, |
1688 | 3.51k | ColumnArray::ColumnOffsets& column_offsets) { |
1689 | 3.51k | ordinal_t next_storage_offset = 0; |
1690 | 3.51k | RETURN_IF_ERROR(_peek_one_offset(&next_storage_offset)); |
1691 | | |
1692 | | // calculate real offsets |
1693 | 3.51k | auto& offsets_data = column_offsets.get_data(); |
1694 | 3.51k | ordinal_t first_column_offset = offsets_data[start - 1]; // -1 is valid |
1695 | 3.51k | ordinal_t first_storage_offset = offsets_data[start]; |
1696 | 3.51k | DCHECK(next_storage_offset >= first_storage_offset); |
1697 | 1.80M | for (ssize_t i = start; i < offsets_data.size() - 1; ++i) { |
1698 | 1.80M | offsets_data[i] = first_column_offset + (offsets_data[i + 1] - first_storage_offset); |
1699 | 1.80M | } |
1700 | | // last offset |
1701 | 3.51k | offsets_data[offsets_data.size() - 1] = |
1702 | 3.51k | first_column_offset + (next_storage_offset - first_storage_offset); |
1703 | 3.51k | return Status::OK(); |
1704 | 3.51k | } |
1705 | | |
1706 | | //////////////////////////////////////////////////////////////////////////////// |
1707 | | ArrayFileColumnIterator::ArrayFileColumnIterator(std::shared_ptr<ColumnReader> reader, |
1708 | | OffsetFileColumnIteratorUPtr offset_reader, |
1709 | | ColumnIteratorUPtr item_iterator, |
1710 | | ColumnIteratorUPtr null_iterator) |
1711 | 2.24k | : _array_reader(reader), |
1712 | 2.24k | _offset_iterator(std::move(offset_reader)), |
1713 | 2.24k | _item_iterator(std::move(item_iterator)) { |
1714 | 2.24k | if (_array_reader->is_nullable()) { |
1715 | 2.01k | _null_iterator = std::move(null_iterator); |
1716 | 2.01k | } |
1717 | 2.24k | } |
1718 | | |
1719 | 2.23k | Status ArrayFileColumnIterator::init(const ColumnIteratorOptions& opts) { |
1720 | 2.23k | if (_reading_flag == ReadingFlag::SKIP_READING) { |
1721 | 0 | DLOG(INFO) << "Array column iterator column " << _column_name << " skip readking."; |
1722 | 0 | return Status::OK(); |
1723 | 0 | } |
1724 | | |
1725 | 2.23k | RETURN_IF_ERROR(_offset_iterator->init(opts)); |
1726 | 2.23k | RETURN_IF_ERROR(_item_iterator->init(opts)); |
1727 | 2.23k | if (_array_reader->is_nullable()) { |
1728 | 2.01k | RETURN_IF_ERROR(_null_iterator->init(opts)); |
1729 | 2.01k | } |
1730 | 2.23k | return Status::OK(); |
1731 | 2.23k | } |
1732 | | |
1733 | 2.28k | Status ArrayFileColumnIterator::_seek_by_offsets(ordinal_t ord) { |
1734 | 2.28k | if (read_offset_only()) { |
1735 | | // In OFFSET_ONLY mode, item iterator is SKIP_READING, no need to seek it |
1736 | 0 | return Status::OK(); |
1737 | 0 | } |
1738 | | // using offsets info |
1739 | 2.28k | ordinal_t offset = 0; |
1740 | 2.28k | RETURN_IF_ERROR(_offset_iterator->_peek_one_offset(&offset)); |
1741 | 2.28k | RETURN_IF_ERROR(_item_iterator->seek_to_ordinal(offset)); |
1742 | 2.28k | return Status::OK(); |
1743 | 2.28k | } |
1744 | | |
1745 | 2.28k | Status ArrayFileColumnIterator::seek_to_ordinal(ordinal_t ord) { |
1746 | 2.28k | if (_reading_flag == ReadingFlag::SKIP_READING) { |
1747 | 0 | DLOG(INFO) << "Array column iterator column " << _column_name << " skip reading."; |
1748 | 0 | return Status::OK(); |
1749 | 0 | } |
1750 | | |
1751 | 2.28k | if (read_null_map_only()) { |
1752 | | // In NULL_MAP_ONLY mode, only seek the null iterator; skip offset and item iterators |
1753 | 0 | if (_array_reader->is_nullable() && _null_iterator) { |
1754 | 0 | RETURN_IF_ERROR(_null_iterator->seek_to_ordinal(ord)); |
1755 | 0 | } |
1756 | 0 | return Status::OK(); |
1757 | 0 | } |
1758 | | |
1759 | 2.28k | RETURN_IF_ERROR(_offset_iterator->seek_to_ordinal(ord)); |
1760 | 2.28k | if (_array_reader->is_nullable()) { |
1761 | 2.06k | RETURN_IF_ERROR(_null_iterator->seek_to_ordinal(ord)); |
1762 | 2.06k | } |
1763 | 2.28k | return _seek_by_offsets(ord); |
1764 | 2.28k | } |
1765 | | |
1766 | 2.28k | Status ArrayFileColumnIterator::next_batch(size_t* n, MutableColumnPtr& dst, bool* has_null) { |
1767 | 2.28k | if (_reading_flag == ReadingFlag::SKIP_READING) { |
1768 | 0 | DLOG(INFO) << "Array column iterator column " << _column_name << " skip reading."; |
1769 | 0 | dst->insert_many_defaults(*n); |
1770 | 0 | return Status::OK(); |
1771 | 0 | } |
1772 | | |
1773 | 2.28k | if (read_null_map_only()) { |
1774 | | // NULL_MAP_ONLY mode: read null map, fill nested ColumnArray with empty defaults |
1775 | 0 | DORIS_CHECK(dst->is_nullable()); |
1776 | 0 | auto& nullable_col = assert_cast<ColumnNullable&>(*dst); |
1777 | 0 | auto null_map_ptr = nullable_col.get_null_map_column_ptr(); |
1778 | 0 | size_t num_read = *n; |
1779 | 0 | if (_null_iterator) { |
1780 | 0 | bool null_signs_has_null = false; |
1781 | 0 | RETURN_IF_ERROR( |
1782 | 0 | _null_iterator->next_batch(&num_read, null_map_ptr, &null_signs_has_null)); |
1783 | 0 | } else { |
1784 | | // schema-change: column became nullable but old segment has no null data |
1785 | 0 | auto& null_map = assert_cast<ColumnUInt8&, TypeCheckOnRelease::DISABLE>(*null_map_ptr); |
1786 | 0 | null_map.insert_many_vals(0, num_read); |
1787 | 0 | } |
1788 | 0 | DCHECK(num_read == *n); |
1789 | | // fill nested ColumnArray with empty (zero-length) arrays |
1790 | 0 | auto& column_array = assert_cast<ColumnArray&, TypeCheckOnRelease::DISABLE>( |
1791 | 0 | nullable_col.get_nested_column()); |
1792 | 0 | column_array.insert_many_defaults(num_read); |
1793 | 0 | *has_null = true; |
1794 | 0 | return Status::OK(); |
1795 | 0 | } |
1796 | | |
1797 | 2.28k | const auto* column_array = check_and_get_column<ColumnArray>( |
1798 | 2.28k | dst->is_nullable() ? static_cast<ColumnNullable&>(*dst).get_nested_column() : *dst); |
1799 | | |
1800 | 2.28k | bool offsets_has_null = false; |
1801 | 2.28k | auto column_offsets_ptr = column_array->get_offsets_column().assume_mutable(); |
1802 | 2.28k | ssize_t start = column_offsets_ptr->size(); |
1803 | 2.28k | RETURN_IF_ERROR(_offset_iterator->next_batch(n, column_offsets_ptr, &offsets_has_null)); |
1804 | 2.28k | if (*n == 0) { |
1805 | 0 | return Status::OK(); |
1806 | 0 | } |
1807 | 2.28k | auto& column_offsets = static_cast<ColumnArray::ColumnOffsets&>(*column_offsets_ptr); |
1808 | 2.28k | RETURN_IF_ERROR(_offset_iterator->_calculate_offsets(start, column_offsets)); |
1809 | 2.28k | size_t num_items = |
1810 | 2.28k | column_offsets.get_data().back() - column_offsets.get_data()[start - 1]; // -1 is valid |
1811 | 2.28k | auto column_items_ptr = column_array->get_data().assume_mutable(); |
1812 | 2.28k | if (num_items > 0) { |
1813 | 1.71k | if (read_offset_only()) { |
1814 | | // OFFSET_ONLY mode: skip reading actual item data, fill with defaults |
1815 | 0 | column_items_ptr->insert_many_defaults(num_items); |
1816 | 1.71k | } else { |
1817 | 1.71k | size_t num_read = num_items; |
1818 | 1.71k | bool items_has_null = false; |
1819 | 1.71k | RETURN_IF_ERROR( |
1820 | 1.71k | _item_iterator->next_batch(&num_read, column_items_ptr, &items_has_null)); |
1821 | 1.71k | DCHECK(num_read == num_items); |
1822 | 1.71k | } |
1823 | 1.71k | } |
1824 | | |
1825 | 2.28k | if (dst->is_nullable()) { |
1826 | 2.06k | auto null_map_ptr = static_cast<ColumnNullable&>(*dst).get_null_map_column_ptr(); |
1827 | 2.06k | size_t num_read = *n; |
1828 | | // in not-null to null linked-schemachange mode, |
1829 | | // actually we do not change dat data include meta in footer, |
1830 | | // so may dst from changed meta which is nullable but old data is not nullable, |
1831 | | // if so, we should set null_map to all null by default |
1832 | 2.06k | if (_null_iterator) { |
1833 | 2.06k | bool null_signs_has_null = false; |
1834 | 2.06k | RETURN_IF_ERROR( |
1835 | 2.06k | _null_iterator->next_batch(&num_read, null_map_ptr, &null_signs_has_null)); |
1836 | 2.06k | } else { |
1837 | 0 | auto& null_map = assert_cast<ColumnUInt8&, TypeCheckOnRelease::DISABLE>(*null_map_ptr); |
1838 | 0 | null_map.insert_many_vals(0, num_read); |
1839 | 0 | } |
1840 | 2.06k | DCHECK(num_read == *n); |
1841 | 2.06k | } |
1842 | | |
1843 | 2.28k | return Status::OK(); |
1844 | 2.28k | } |
1845 | | |
1846 | | Status ArrayFileColumnIterator::init_cache_block_prefetch( |
1847 | 0 | const SegmentCacheBlockPrefetchParams& params) { |
1848 | 0 | RETURN_IF_ERROR(_offset_iterator->init_cache_block_prefetch(params)); |
1849 | 0 | RETURN_IF_ERROR(_item_iterator->init_cache_block_prefetch(params)); |
1850 | 0 | if (_array_reader->is_nullable()) { |
1851 | 0 | RETURN_IF_ERROR(_null_iterator->init_cache_block_prefetch(params)); |
1852 | 0 | } |
1853 | 0 | return Status::OK(); |
1854 | 0 | } |
1855 | | |
1856 | | void ArrayFileColumnIterator::collect_cache_block_prefetch_iterators( |
1857 | | std::map<FileAccessRangeBuildMethod, std::vector<ColumnIterator*>>& iterators, |
1858 | 0 | FileAccessRangeBuildMethod init_method) { |
1859 | 0 | _offset_iterator->collect_cache_block_prefetch_iterators(iterators, init_method); |
1860 | | // the actual data pages to read of item column depends on the read result of offset column, |
1861 | | // so we can't init prefetch blocks according to rowids, just prefetch all data blocks here. |
1862 | 0 | _item_iterator->collect_cache_block_prefetch_iterators( |
1863 | 0 | iterators, FileAccessRangeBuildMethod::ALL_DATA_PAGES); |
1864 | 0 | if (_array_reader->is_nullable()) { |
1865 | 0 | _null_iterator->collect_cache_block_prefetch_iterators(iterators, init_method); |
1866 | 0 | } |
1867 | 0 | } |
1868 | | |
1869 | | Status ArrayFileColumnIterator::read_by_rowids(const rowid_t* rowids, const size_t count, |
1870 | 1.46k | MutableColumnPtr& dst) { |
1871 | 1.46k | if (_reading_flag == ReadingFlag::SKIP_READING) { |
1872 | 0 | DLOG(INFO) << "Array column iterator column " << _column_name << " skip reading."; |
1873 | 0 | dst->insert_many_defaults(count); |
1874 | 0 | return Status::OK(); |
1875 | 0 | } |
1876 | | |
1877 | 2.93k | for (size_t i = 0; i < count; ++i) { |
1878 | | // TODO(cambyszju): now read array one by one, need optimize later |
1879 | 1.46k | RETURN_IF_ERROR(seek_to_ordinal(rowids[i])); |
1880 | 1.46k | size_t num_read = 1; |
1881 | 1.46k | RETURN_IF_ERROR(next_batch(&num_read, dst)); |
1882 | 1.46k | } |
1883 | 1.46k | return Status::OK(); |
1884 | 1.46k | } |
1885 | | |
1886 | 1.68k | void ArrayFileColumnIterator::set_need_to_read() { |
1887 | 1.68k | set_reading_flag(ReadingFlag::NEED_TO_READ); |
1888 | 1.68k | _item_iterator->set_need_to_read(); |
1889 | 1.68k | } |
1890 | | |
1891 | 1.68k | void ArrayFileColumnIterator::remove_pruned_sub_iterators() { |
1892 | 1.68k | _item_iterator->remove_pruned_sub_iterators(); |
1893 | 1.68k | } |
1894 | | |
1895 | | Status ArrayFileColumnIterator::set_access_paths(const TColumnAccessPaths& all_access_paths, |
1896 | 1.60k | const TColumnAccessPaths& predicate_access_paths) { |
1897 | 1.60k | if (all_access_paths.empty()) { |
1898 | 0 | return Status::OK(); |
1899 | 0 | } |
1900 | | |
1901 | 1.60k | if (!predicate_access_paths.empty()) { |
1902 | 0 | set_reading_flag(ReadingFlag::READING_FOR_PREDICATE); |
1903 | 0 | DLOG(INFO) << "Array column iterator set sub-column " << _column_name |
1904 | 0 | << " to READING_FOR_PREDICATE"; |
1905 | 0 | } |
1906 | | |
1907 | 1.60k | auto sub_all_access_paths = DORIS_TRY(_get_sub_access_paths(all_access_paths)); |
1908 | 1.60k | auto sub_predicate_access_paths = DORIS_TRY(_get_sub_access_paths(predicate_access_paths)); |
1909 | | |
1910 | | // Check for meta-only modes (OFFSET_ONLY or NULL_MAP_ONLY) |
1911 | 1.60k | _check_and_set_meta_read_mode(sub_all_access_paths); |
1912 | 1.60k | if (read_offset_only()) { |
1913 | 0 | _item_iterator->set_reading_flag(ReadingFlag::SKIP_READING); |
1914 | 0 | DLOG(INFO) << "Array column iterator set column " << _column_name |
1915 | 0 | << " to OFFSET_ONLY reading mode, item column set to SKIP_READING"; |
1916 | 0 | return Status::OK(); |
1917 | 0 | } |
1918 | 1.60k | if (read_null_map_only()) { |
1919 | 0 | _item_iterator->set_reading_flag(ReadingFlag::SKIP_READING); |
1920 | 0 | DLOG(INFO) << "Array column iterator set column " << _column_name |
1921 | 0 | << " to NULL_MAP_ONLY reading mode, item column set to SKIP_READING"; |
1922 | 0 | return Status::OK(); |
1923 | 0 | } |
1924 | | |
1925 | 1.60k | const auto no_sub_column_to_skip = sub_all_access_paths.empty(); |
1926 | 1.60k | const auto no_predicate_sub_column = sub_predicate_access_paths.empty(); |
1927 | | |
1928 | 1.60k | if (!no_sub_column_to_skip) { |
1929 | 1 | for (auto& path : sub_all_access_paths) { |
1930 | 1 | if (path.data_access_path.path[0] == ACCESS_ALL) { |
1931 | 1 | path.data_access_path.path[0] = _item_iterator->column_name(); |
1932 | 1 | } |
1933 | 1 | } |
1934 | 1 | } |
1935 | | |
1936 | 1.60k | if (!no_predicate_sub_column) { |
1937 | 0 | for (auto& path : sub_predicate_access_paths) { |
1938 | 0 | if (path.data_access_path.path[0] == ACCESS_ALL) { |
1939 | 0 | path.data_access_path.path[0] = _item_iterator->column_name(); |
1940 | 0 | } |
1941 | 0 | } |
1942 | 0 | } |
1943 | | |
1944 | 1.61k | if (!no_sub_column_to_skip || !no_predicate_sub_column) { |
1945 | 1 | _item_iterator->set_reading_flag(ReadingFlag::NEED_TO_READ); |
1946 | 1 | RETURN_IF_ERROR( |
1947 | 1 | _item_iterator->set_access_paths(sub_all_access_paths, sub_predicate_access_paths)); |
1948 | 1 | } |
1949 | 1.60k | return Status::OK(); |
1950 | 1.60k | } |
1951 | | |
1952 | | //////////////////////////////////////////////////////////////////////////////// |
1953 | | // StringFileColumnIterator implementation |
1954 | | //////////////////////////////////////////////////////////////////////////////// |
1955 | | |
1956 | | StringFileColumnIterator::StringFileColumnIterator(std::shared_ptr<ColumnReader> reader) |
1957 | 4.81M | : FileColumnIterator(std::move(reader)) {} |
1958 | | |
1959 | 4.81M | Status StringFileColumnIterator::init(const ColumnIteratorOptions& opts) { |
1960 | 4.81M | if (read_offset_only()) { |
1961 | | // Propagate only_read_offsets to the FileColumnIterator's options |
1962 | 18 | auto modified_opts = opts; |
1963 | 18 | modified_opts.only_read_offsets = true; |
1964 | 18 | return FileColumnIterator::init(modified_opts); |
1965 | 18 | } |
1966 | 4.81M | return FileColumnIterator::init(opts); |
1967 | 4.81M | } |
1968 | | |
1969 | | Status StringFileColumnIterator::set_access_paths( |
1970 | | const TColumnAccessPaths& all_access_paths, |
1971 | 616 | const TColumnAccessPaths& predicate_access_paths) { |
1972 | 616 | if (all_access_paths.empty()) { |
1973 | 596 | return Status::OK(); |
1974 | 596 | } |
1975 | | |
1976 | 20 | if (!predicate_access_paths.empty()) { |
1977 | 0 | set_reading_flag(ReadingFlag::READING_FOR_PREDICATE); |
1978 | 0 | } |
1979 | | |
1980 | | // Strip the column name from path[0] before checking for meta-only modes. |
1981 | | // Raw paths look like ["col_name", "OFFSET"] or ["col_name", "NULL"]. |
1982 | 20 | auto sub_all_access_paths = DORIS_TRY(_get_sub_access_paths(all_access_paths)); |
1983 | 20 | _check_and_set_meta_read_mode(sub_all_access_paths); |
1984 | | // OFFSET_ONLY mode is fundamentally incompatible with CHAR columns: |
1985 | | // CHAR is stored padded to its declared length (see |
1986 | | // OlapColumnDataConvertorChar::clone_and_padding), so the per-row length |
1987 | | // recorded in dict word info / page headers is always the padded length |
1988 | | // (e.g. 25 for CHAR(25)) — never the logical length expected by length(). |
1989 | | // Recovering the logical length requires scanning the chars buffer with |
1990 | | // strnlen() (shrink_padding_chars), which OFFSET_ONLY by definition skips. |
1991 | | // There is no partial-benefit path: any optimization that still produces |
1992 | | // the correct length() result must read the chars buffer in full. |
1993 | | // |
1994 | | // FE (NestedColumnPruning) already filters CHAR slots out of the |
1995 | | // OFFSET-only access plan, so reaching this branch means an FE/BE |
1996 | | // contract violation. Fail loudly instead of silently falling back. |
1997 | 20 | if (read_offset_only() && get_reader() != nullptr && |
1998 | 20 | get_reader()->get_meta_type() == FieldType::OLAP_FIELD_TYPE_CHAR) { |
1999 | 0 | return Status::InternalError( |
2000 | 0 | "OFFSET_ONLY access path is not supported on CHAR column '{}': CHAR is stored " |
2001 | 0 | "padded so the per-row length information available without reading the chars " |
2002 | 0 | "buffer is always the padded length, not the logical length. The FE planner " |
2003 | 0 | "must not emit an OFFSET access path for CHAR columns.", |
2004 | 0 | _column_name); |
2005 | 0 | } |
2006 | 20 | if (read_offset_only()) { |
2007 | 19 | DLOG(INFO) << "String column iterator set column " << _column_name |
2008 | 19 | << " to OFFSET_ONLY reading mode"; |
2009 | 19 | } else if (read_null_map_only()) { |
2010 | 0 | DLOG(INFO) << "String column iterator set column " << _column_name |
2011 | 0 | << " to NULL_MAP_ONLY reading mode"; |
2012 | 0 | } |
2013 | | |
2014 | 20 | return Status::OK(); |
2015 | 20 | } |
2016 | | |
2017 | | //////////////////////////////////////////////////////////////////////////////// |
2018 | | |
2019 | 7.73M | FileColumnIterator::FileColumnIterator(std::shared_ptr<ColumnReader> reader) : _reader(reader) {} |
2020 | | |
2021 | 1.97k | void ColumnIterator::_check_and_set_meta_read_mode(const TColumnAccessPaths& sub_all_access_paths) { |
2022 | 1.97k | for (const auto& path : sub_all_access_paths) { |
2023 | 24 | if (!path.data_access_path.path.empty()) { |
2024 | 24 | if (StringCaseEqual()(path.data_access_path.path[0], ACCESS_OFFSET)) { |
2025 | 19 | _read_mode = ReadMode::OFFSET_ONLY; |
2026 | 19 | return; |
2027 | 19 | } else if (StringCaseEqual()(path.data_access_path.path[0], ACCESS_NULL)) { |
2028 | 0 | _read_mode = ReadMode::NULL_MAP_ONLY; |
2029 | 0 | return; |
2030 | 0 | } |
2031 | 24 | } |
2032 | 24 | } |
2033 | 1.95k | _read_mode = ReadMode::DEFAULT; |
2034 | 1.95k | } |
2035 | | |
2036 | 7.72M | Status FileColumnIterator::init(const ColumnIteratorOptions& opts) { |
2037 | 7.72M | if (_reading_flag == ReadingFlag::SKIP_READING) { |
2038 | 0 | DLOG(INFO) << "File column iterator column " << _column_name << " skip reading."; |
2039 | 0 | return Status::OK(); |
2040 | 0 | } |
2041 | | |
2042 | 7.72M | _opts = opts; |
2043 | | // Install the physical data reader for this iterator before any page read. |
2044 | | // When cache-block prefetch is enabled, this call opens a dedicated |
2045 | | // CacheBlockAwarePrefetchRemoteReader for this iterator. All subsequent |
2046 | | // PageIO reads use _opts.file_reader, so data-page and dict-page read_at() |
2047 | | // calls advance only this iterator's single prefetch pattern. Other columns |
2048 | | // or another scan over the same column have their own FileColumnIterator and |
2049 | | // therefore their own reader/pattern state. |
2050 | 7.72M | _data_file_reader = DORIS_TRY(_reader->_new_data_file_reader()); |
2051 | 7.72M | _opts.file_reader = _data_file_reader.get(); |
2052 | 7.72M | if (!_opts.use_page_cache) { |
2053 | 7.72M | _reader->disable_index_meta_cache(); |
2054 | 7.72M | } |
2055 | 7.72M | RETURN_IF_ERROR(get_block_compression_codec(_reader->get_compression(), &_compress_codec)); |
2056 | 7.72M | if (config::enable_low_cardinality_optimize && |
2057 | 7.72M | opts.io_ctx.reader_type == ReaderType::READER_QUERY && |
2058 | 7.72M | _reader->encoding_info()->encoding() == DICT_ENCODING) { |
2059 | 4.77M | auto dict_encoding_type = _reader->get_dict_encoding_type(); |
2060 | | // Only if the column is a predicate column, then we need check the all dict encoding flag |
2061 | | // because we could rewrite the predciate to accelarate query speed. But if it is not a |
2062 | | // predicate column, then it is useless. And it has a bad impact on cold read(first time read) |
2063 | | // because it will load the column's ordinal index and zonemap index and maybe other indices. |
2064 | | // it has bad impact on primary key query. For example, select * from table where pk = 1, and |
2065 | | // the table has 2000 columns. |
2066 | 4.77M | if (dict_encoding_type == ColumnReader::UNKNOWN_DICT_ENCODING && opts.is_predicate_column) { |
2067 | 1.24k | RETURN_IF_ERROR(seek_to_ordinal(_reader->num_rows() - 1)); |
2068 | 1.24k | _is_all_dict_encoding = _page.is_dict_encoding; |
2069 | 1.24k | _reader->set_dict_encoding_type(_is_all_dict_encoding |
2070 | 1.24k | ? ColumnReader::ALL_DICT_ENCODING |
2071 | 1.24k | : ColumnReader::PARTIAL_DICT_ENCODING); |
2072 | 4.77M | } else { |
2073 | 4.77M | _is_all_dict_encoding = dict_encoding_type == ColumnReader::ALL_DICT_ENCODING; |
2074 | 4.77M | } |
2075 | 4.77M | } |
2076 | 7.72M | return Status::OK(); |
2077 | 7.72M | } |
2078 | | |
2079 | 7.74M | FileColumnIterator::~FileColumnIterator() = default; |
2080 | | |
2081 | 157k | Status FileColumnIterator::seek_to_ordinal(ordinal_t ord) { |
2082 | 157k | if (_reading_flag == ReadingFlag::SKIP_READING) { |
2083 | 0 | DLOG(INFO) << "File column iterator column " << _column_name << " skip reading."; |
2084 | 0 | return Status::OK(); |
2085 | 0 | } |
2086 | | |
2087 | | // if current page contains this row, we don't need to seek |
2088 | 157k | if (!_page || !_page.contains(ord) || !_page_iter.valid()) { |
2089 | 102k | RETURN_IF_ERROR(_reader->seek_at_or_before(ord, &_page_iter, _opts)); |
2090 | 102k | RETURN_IF_ERROR(_read_data_page(_page_iter)); |
2091 | 102k | } |
2092 | 157k | RETURN_IF_ERROR(_seek_to_pos_in_page(&_page, ord - _page.first_ordinal)); |
2093 | 157k | _current_ordinal = ord; |
2094 | 157k | return Status::OK(); |
2095 | 157k | } |
2096 | | |
2097 | 0 | Status FileColumnIterator::seek_to_page_start() { |
2098 | 0 | return seek_to_ordinal(_page.first_ordinal); |
2099 | 0 | } |
2100 | | |
2101 | 159k | Status FileColumnIterator::_seek_to_pos_in_page(ParsedPage* page, ordinal_t offset_in_page) const { |
2102 | 159k | if (page->offset_in_page == offset_in_page) { |
2103 | | // fast path, do nothing |
2104 | 120k | return Status::OK(); |
2105 | 120k | } |
2106 | | |
2107 | 39.4k | ordinal_t pos_in_data = offset_in_page; |
2108 | 39.4k | if (_page.has_null) { |
2109 | 7.27k | ordinal_t offset_in_data = 0; |
2110 | 7.27k | ordinal_t skips = offset_in_page; |
2111 | | |
2112 | 7.27k | if (offset_in_page > page->offset_in_page) { |
2113 | | // forward, reuse null bitmap |
2114 | 3.04k | skips = offset_in_page - page->offset_in_page; |
2115 | 3.04k | offset_in_data = page->data_decoder->current_index(); |
2116 | 4.22k | } else { |
2117 | | // rewind null bitmap, and |
2118 | 4.22k | page->null_decoder = RleDecoder<bool>((const uint8_t*)page->null_bitmap.data, |
2119 | 4.22k | cast_set<int>(page->null_bitmap.size), 1); |
2120 | 4.22k | } |
2121 | | |
2122 | 7.27k | auto skip_nulls = page->null_decoder.Skip(skips); |
2123 | 7.27k | pos_in_data = offset_in_data + skips - skip_nulls; |
2124 | 7.27k | } |
2125 | | |
2126 | 39.4k | RETURN_IF_ERROR(page->data_decoder->seek_to_position_in_page(pos_in_data)); |
2127 | 39.4k | page->offset_in_page = offset_in_page; |
2128 | 39.4k | return Status::OK(); |
2129 | 39.4k | } |
2130 | | |
2131 | 7.06k | Status FileColumnIterator::next_batch_of_zone_map(size_t* n, MutableColumnPtr& dst) { |
2132 | 7.06k | return _reader->next_batch_of_zone_map(n, dst); |
2133 | 7.06k | } |
2134 | | |
2135 | 106k | Status FileColumnIterator::next_batch(size_t* n, MutableColumnPtr& dst, bool* has_null) { |
2136 | 106k | if (read_null_map_only()) { |
2137 | 0 | DLOG(INFO) << "File column iterator column " << _column_name |
2138 | 0 | << " in NULL_MAP_ONLY mode, reading only null map."; |
2139 | 0 | DORIS_CHECK(dst->is_nullable()); |
2140 | 0 | auto& nullable_col = assert_cast<ColumnNullable&>(*dst); |
2141 | 0 | auto& null_map_data = nullable_col.get_null_map_data(); |
2142 | |
|
2143 | 0 | size_t remaining = *n; |
2144 | 0 | *has_null = false; |
2145 | 0 | while (remaining > 0) { |
2146 | 0 | if (!_page.has_remaining()) { |
2147 | 0 | bool eos = false; |
2148 | 0 | RETURN_IF_ERROR(_load_next_page(&eos)); |
2149 | 0 | if (eos) { |
2150 | 0 | break; |
2151 | 0 | } |
2152 | 0 | } |
2153 | | |
2154 | 0 | size_t nrows_in_page = std::min(remaining, _page.remaining()); |
2155 | 0 | size_t nrows_to_read = nrows_in_page; |
2156 | 0 | if (_page.has_null) { |
2157 | 0 | while (nrows_to_read > 0) { |
2158 | 0 | bool is_null = false; |
2159 | 0 | size_t this_run = _page.null_decoder.GetNextRun(&is_null, nrows_to_read); |
2160 | 0 | const size_t cur_size = null_map_data.size(); |
2161 | 0 | null_map_data.resize(cur_size + this_run); |
2162 | 0 | memset(null_map_data.data() + cur_size, is_null ? 1 : 0, this_run); |
2163 | 0 | if (is_null) { |
2164 | 0 | *has_null = true; |
2165 | 0 | } |
2166 | 0 | nrows_to_read -= this_run; |
2167 | 0 | _page.offset_in_page += this_run; |
2168 | 0 | _current_ordinal += this_run; |
2169 | 0 | } |
2170 | 0 | } else { |
2171 | 0 | const size_t cur_size = null_map_data.size(); |
2172 | 0 | null_map_data.resize(cur_size + nrows_to_read); |
2173 | 0 | memset(null_map_data.data() + cur_size, 0, nrows_to_read); |
2174 | 0 | _page.offset_in_page += nrows_to_read; |
2175 | 0 | _current_ordinal += nrows_to_read; |
2176 | 0 | } |
2177 | 0 | remaining -= nrows_in_page; |
2178 | 0 | } |
2179 | 0 | *n -= remaining; |
2180 | 0 | nullable_col.get_nested_column().insert_many_defaults(*n); |
2181 | 0 | return Status::OK(); |
2182 | 0 | } |
2183 | | |
2184 | 106k | if (_reading_flag == ReadingFlag::SKIP_READING) { |
2185 | 0 | DLOG(INFO) << "File column iterator column " << _column_name << " skip reading."; |
2186 | 0 | dst->insert_many_defaults(*n); |
2187 | 0 | return Status::OK(); |
2188 | 0 | } |
2189 | | |
2190 | 106k | size_t curr_size = dst->byte_size(); |
2191 | 106k | dst->reserve(*n); |
2192 | 106k | size_t remaining = *n; |
2193 | 106k | *has_null = false; |
2194 | 215k | while (remaining > 0) { |
2195 | 109k | if (!_page.has_remaining()) { |
2196 | 2.54k | bool eos = false; |
2197 | 2.54k | RETURN_IF_ERROR(_load_next_page(&eos)); |
2198 | 2.54k | if (eos) { |
2199 | 0 | break; |
2200 | 0 | } |
2201 | 2.54k | } |
2202 | | |
2203 | | // number of rows to be read from this page |
2204 | 109k | size_t nrows_in_page = std::min(remaining, _page.remaining()); |
2205 | 109k | size_t nrows_to_read = nrows_in_page; |
2206 | 109k | if (_page.has_null) { |
2207 | 65.5k | while (nrows_to_read > 0) { |
2208 | 50.7k | bool is_null = false; |
2209 | 50.7k | size_t this_run = _page.null_decoder.GetNextRun(&is_null, nrows_to_read); |
2210 | | // we use num_rows only for CHECK |
2211 | 50.7k | size_t num_rows = this_run; |
2212 | 50.7k | if (!is_null) { |
2213 | 26.1k | RETURN_IF_ERROR(_page.data_decoder->next_batch(&num_rows, dst)); |
2214 | 26.1k | DCHECK_EQ(this_run, num_rows); |
2215 | 26.1k | } else { |
2216 | 24.5k | *has_null = true; |
2217 | 24.5k | auto* null_col = check_and_get_column<ColumnNullable>(dst.get()); |
2218 | 24.6k | if (null_col != nullptr) { |
2219 | 24.6k | null_col->insert_many_defaults(this_run); |
2220 | 18.4E | } else { |
2221 | 18.4E | return Status::InternalError("unexpected column type in column reader"); |
2222 | 18.4E | } |
2223 | 24.5k | } |
2224 | | |
2225 | 50.7k | nrows_to_read -= this_run; |
2226 | 50.7k | _page.offset_in_page += this_run; |
2227 | 50.7k | _current_ordinal += this_run; |
2228 | 50.7k | } |
2229 | 94.3k | } else { |
2230 | 94.3k | RETURN_IF_ERROR(_page.data_decoder->next_batch(&nrows_to_read, dst)); |
2231 | 94.3k | DCHECK_EQ(nrows_to_read, nrows_in_page); |
2232 | | |
2233 | 94.3k | _page.offset_in_page += nrows_to_read; |
2234 | 94.3k | _current_ordinal += nrows_to_read; |
2235 | 94.3k | } |
2236 | 109k | remaining -= nrows_in_page; |
2237 | 109k | } |
2238 | 106k | *n -= remaining; |
2239 | 106k | _opts.stats->bytes_read += (dst->byte_size() - curr_size) + BitmapSize(*n); |
2240 | | |
2241 | | #ifdef BE_TEST |
2242 | | _reader->check_data_by_zone_map_for_test(dst); |
2243 | | #endif |
2244 | 106k | return Status::OK(); |
2245 | 106k | } |
2246 | | |
2247 | | Status FileColumnIterator::read_by_rowids(const rowid_t* rowids, const size_t count, |
2248 | 48.9k | MutableColumnPtr& dst) { |
2249 | 48.9k | if (read_null_map_only()) { |
2250 | 0 | DLOG(INFO) << "File column iterator column " << _column_name |
2251 | 0 | << " in NULL_MAP_ONLY mode, reading only null map by rowids."; |
2252 | |
|
2253 | 0 | DORIS_CHECK(dst->is_nullable()); |
2254 | 0 | auto& nullable_col = assert_cast<ColumnNullable&>(*dst); |
2255 | 0 | auto& null_map_data = nullable_col.get_null_map_data(); |
2256 | 0 | const size_t base_size = null_map_data.size(); |
2257 | 0 | null_map_data.resize(base_size + count); |
2258 | |
|
2259 | 0 | nullable_col.get_nested_column().insert_many_defaults(count); |
2260 | |
|
2261 | 0 | size_t remaining = count; |
2262 | 0 | size_t total_read_count = 0; |
2263 | 0 | size_t nrows_to_read = 0; |
2264 | 0 | while (remaining > 0) { |
2265 | 0 | RETURN_IF_ERROR(seek_to_ordinal(rowids[total_read_count])); |
2266 | | |
2267 | 0 | nrows_to_read = std::min(remaining, _page.remaining()); |
2268 | |
|
2269 | 0 | if (_page.has_null) { |
2270 | 0 | size_t already_read = 0; |
2271 | 0 | while ((nrows_to_read - already_read) > 0) { |
2272 | 0 | bool is_null = false; |
2273 | 0 | size_t this_run = std::min(nrows_to_read - already_read, _page.remaining()); |
2274 | 0 | if (UNLIKELY(this_run == 0)) { |
2275 | 0 | break; |
2276 | 0 | } |
2277 | 0 | this_run = _page.null_decoder.GetNextRun(&is_null, this_run); |
2278 | |
|
2279 | 0 | size_t offset = total_read_count + already_read; |
2280 | 0 | size_t this_read_count = 0; |
2281 | 0 | rowid_t current_ordinal_in_page = |
2282 | 0 | cast_set<uint32_t>(_page.offset_in_page + _page.first_ordinal); |
2283 | 0 | for (size_t i = 0; i < this_run; ++i) { |
2284 | 0 | if (rowids[offset + i] - current_ordinal_in_page >= this_run) { |
2285 | 0 | break; |
2286 | 0 | } |
2287 | 0 | this_read_count++; |
2288 | 0 | } |
2289 | |
|
2290 | 0 | if (this_read_count > 0) { |
2291 | 0 | memset(null_map_data.data() + base_size + offset, is_null ? 1 : 0, |
2292 | 0 | this_read_count); |
2293 | 0 | } |
2294 | |
|
2295 | 0 | already_read += this_read_count; |
2296 | 0 | _page.offset_in_page += this_run; |
2297 | 0 | } |
2298 | |
|
2299 | 0 | nrows_to_read = already_read; |
2300 | 0 | total_read_count += nrows_to_read; |
2301 | 0 | remaining -= nrows_to_read; |
2302 | 0 | } else { |
2303 | 0 | memset(null_map_data.data() + base_size + total_read_count, 0, nrows_to_read); |
2304 | 0 | total_read_count += nrows_to_read; |
2305 | 0 | remaining -= nrows_to_read; |
2306 | 0 | } |
2307 | 0 | } |
2308 | | |
2309 | 0 | null_map_data.resize(base_size + total_read_count); |
2310 | 0 | nullable_col.get_nested_column().insert_many_defaults(total_read_count); |
2311 | 0 | return Status::OK(); |
2312 | 0 | } |
2313 | | |
2314 | 48.9k | if (_reading_flag == ReadingFlag::SKIP_READING) { |
2315 | 0 | DLOG(INFO) << "File column iterator column " << _column_name << " skip reading."; |
2316 | 0 | dst->insert_many_defaults(count); |
2317 | 0 | return Status::OK(); |
2318 | 0 | } |
2319 | | |
2320 | 48.9k | size_t remaining = count; |
2321 | 48.9k | size_t total_read_count = 0; |
2322 | 48.9k | size_t nrows_to_read = 0; |
2323 | 98.4k | while (remaining > 0) { |
2324 | 49.4k | RETURN_IF_ERROR(seek_to_ordinal(rowids[total_read_count])); |
2325 | | |
2326 | | // number of rows to be read from this page |
2327 | 49.4k | nrows_to_read = std::min(remaining, _page.remaining()); |
2328 | | |
2329 | 49.4k | if (_page.has_null) { |
2330 | 5.07k | size_t already_read = 0; |
2331 | 17.5k | while ((nrows_to_read - already_read) > 0) { |
2332 | 12.5k | bool is_null = false; |
2333 | 12.5k | size_t this_run = std::min(nrows_to_read - already_read, _page.remaining()); |
2334 | 12.5k | if (UNLIKELY(this_run == 0)) { |
2335 | 0 | break; |
2336 | 0 | } |
2337 | 12.5k | this_run = _page.null_decoder.GetNextRun(&is_null, this_run); |
2338 | 12.5k | size_t offset = total_read_count + already_read; |
2339 | 12.5k | size_t this_read_count = 0; |
2340 | 12.5k | rowid_t current_ordinal_in_page = |
2341 | 12.5k | cast_set<uint32_t>(_page.offset_in_page + _page.first_ordinal); |
2342 | 114k | for (size_t i = 0; i < this_run; ++i) { |
2343 | 103k | if (rowids[offset + i] - current_ordinal_in_page >= this_run) { |
2344 | 1.00k | break; |
2345 | 1.00k | } |
2346 | 102k | this_read_count++; |
2347 | 102k | } |
2348 | | |
2349 | 12.5k | auto origin_index = _page.data_decoder->current_index(); |
2350 | 12.5k | if (this_read_count > 0) { |
2351 | 11.8k | if (is_null) { |
2352 | 7.98k | auto* null_col = check_and_get_column<ColumnNullable>(dst.get()); |
2353 | 7.98k | if (UNLIKELY(null_col == nullptr)) { |
2354 | 0 | return Status::InternalError("unexpected column type in column reader"); |
2355 | 0 | } |
2356 | | |
2357 | 7.98k | null_col->insert_many_defaults(this_read_count); |
2358 | 7.98k | } else { |
2359 | 3.83k | size_t read_count = this_read_count; |
2360 | | |
2361 | | // ordinal in nullable columns' data buffer maybe be not continuously(the data doesn't contain null value), |
2362 | | // so we need use `page_start_off_in_decoder` to calculate the actual offset in `data_decoder` |
2363 | 3.83k | size_t page_start_off_in_decoder = |
2364 | 3.83k | _page.first_ordinal + _page.offset_in_page - origin_index; |
2365 | 3.83k | RETURN_IF_ERROR(_page.data_decoder->read_by_rowids( |
2366 | 3.83k | &rowids[offset], page_start_off_in_decoder, &read_count, dst)); |
2367 | 3.83k | DCHECK_EQ(read_count, this_read_count); |
2368 | 3.83k | } |
2369 | 11.8k | } |
2370 | | |
2371 | 12.5k | if (!is_null) { |
2372 | 4.00k | RETURN_IF_ERROR( |
2373 | 4.00k | _page.data_decoder->seek_to_position_in_page(origin_index + this_run)); |
2374 | 4.00k | } |
2375 | | |
2376 | 12.5k | already_read += this_read_count; |
2377 | 12.5k | _page.offset_in_page += this_run; |
2378 | 12.5k | DCHECK(_page.offset_in_page <= _page.num_rows); |
2379 | 12.5k | } |
2380 | | |
2381 | 5.07k | nrows_to_read = already_read; |
2382 | 5.07k | total_read_count += nrows_to_read; |
2383 | 5.07k | remaining -= nrows_to_read; |
2384 | 44.4k | } else { |
2385 | 44.4k | RETURN_IF_ERROR(_page.data_decoder->read_by_rowids( |
2386 | 44.4k | &rowids[total_read_count], _page.first_ordinal, &nrows_to_read, dst)); |
2387 | 44.4k | total_read_count += nrows_to_read; |
2388 | 44.4k | remaining -= nrows_to_read; |
2389 | 44.4k | } |
2390 | 49.4k | } |
2391 | 48.9k | return Status::OK(); |
2392 | 48.9k | } |
2393 | | |
2394 | 2.54k | Status FileColumnIterator::_load_next_page(bool* eos) { |
2395 | 2.54k | _page_iter.next(); |
2396 | 2.54k | if (!_page_iter.valid()) { |
2397 | 0 | *eos = true; |
2398 | 0 | return Status::OK(); |
2399 | 0 | } |
2400 | | |
2401 | 2.54k | RETURN_IF_ERROR(_read_data_page(_page_iter)); |
2402 | 2.54k | RETURN_IF_ERROR(_seek_to_pos_in_page(&_page, 0)); |
2403 | 2.54k | *eos = false; |
2404 | 2.54k | return Status::OK(); |
2405 | 2.54k | } |
2406 | | |
2407 | 104k | Status FileColumnIterator::_read_data_page(const OrdinalPageIndexIterator& iter) { |
2408 | 104k | LOG_IF(INFO, config::enable_segment_prefetch_verbose_log) << fmt::format( |
2409 | 0 | "[verbose] FileColumnIterator::_read_data_page page_offset={}, has_prefetch_pattern={}", |
2410 | 0 | iter.page().offset, |
2411 | 0 | _cache_block_prefetch_reader != nullptr && |
2412 | 0 | _cache_block_prefetch_reader->has_read_pattern()); |
2413 | | |
2414 | 104k | PageHandle handle; |
2415 | 104k | Slice page_body; |
2416 | 104k | PageFooterPB footer; |
2417 | 104k | _opts.type = DATA_PAGE; |
2418 | 104k | PageDecoderOptions decoder_opts; |
2419 | 104k | decoder_opts.only_read_offsets = _opts.only_read_offsets; |
2420 | 104k | RETURN_IF_ERROR( |
2421 | 104k | _reader->read_page(_opts, iter.page(), &handle, &page_body, &footer, _compress_codec)); |
2422 | | // parse data page |
2423 | 104k | auto st = ParsedPage::create(std::move(handle), page_body, footer.data_page_footer(), |
2424 | 104k | _reader->encoding_info(), iter.page(), iter.page_index(), &_page, |
2425 | 104k | decoder_opts); |
2426 | 104k | if (!st.ok()) { |
2427 | 0 | LOG(WARNING) << "failed to create ParsedPage, file=" << _opts.file_reader->path().native() |
2428 | 0 | << ", page_offset=" << iter.page().offset << ", page_size=" << iter.page().size |
2429 | 0 | << ", page_index=" << iter.page_index() << ", error=" << st; |
2430 | 0 | return st; |
2431 | 0 | } |
2432 | | |
2433 | | // dictionary page is read when the first data page that uses it is read, |
2434 | | // this is to optimize the memory usage: when there is no query on one column, we could |
2435 | | // release the memory of dictionary page. |
2436 | | // note that concurrent iterators for the same column won't repeatedly read dictionary page |
2437 | | // because of page cache. |
2438 | 104k | if (_reader->encoding_info()->encoding() == DICT_ENCODING) { |
2439 | 44.7k | auto dict_page_decoder = reinterpret_cast<BinaryDictPageDecoder*>(_page.data_decoder.get()); |
2440 | 44.7k | if (dict_page_decoder->is_dict_encoding()) { |
2441 | 42.7k | if (_dict_decoder == nullptr) { |
2442 | 42.5k | RETURN_IF_ERROR(_read_dict_data()); |
2443 | 42.5k | CHECK_NOTNULL(_dict_decoder); |
2444 | 42.5k | } |
2445 | | |
2446 | 42.7k | dict_page_decoder->set_dict_decoder(cast_set<uint32_t>(_dict_decoder->count()), |
2447 | 42.7k | _dict_word_info.get()); |
2448 | 42.7k | } |
2449 | 44.7k | } |
2450 | 104k | return Status::OK(); |
2451 | 104k | } |
2452 | | |
2453 | 42.5k | Status FileColumnIterator::_read_dict_data() { |
2454 | 42.5k | CHECK_EQ(_reader->encoding_info()->encoding(), DICT_ENCODING); |
2455 | | // read dictionary page |
2456 | 42.5k | Slice dict_data; |
2457 | 42.5k | PageFooterPB dict_footer; |
2458 | 42.5k | _opts.type = INDEX_PAGE; |
2459 | | |
2460 | 42.5k | RETURN_IF_ERROR(_reader->read_page(_opts, _reader->get_dict_page_pointer(), &_dict_page_handle, |
2461 | 42.5k | &dict_data, &dict_footer, _compress_codec, true)); |
2462 | 42.5k | const EncodingInfo* encoding_info; |
2463 | 42.5k | RETURN_IF_ERROR(EncodingInfo::get(FieldType::OLAP_FIELD_TYPE_VARCHAR, |
2464 | 42.5k | dict_footer.dict_page_footer().encoding(), {}, |
2465 | 42.5k | &encoding_info)); |
2466 | 42.5k | RETURN_IF_ERROR(encoding_info->create_page_decoder(dict_data, {}, _dict_decoder)); |
2467 | 42.5k | RETURN_IF_ERROR(_dict_decoder->init()); |
2468 | | |
2469 | 42.5k | _dict_word_info.reset(new StringRef[_dict_decoder->count()]); |
2470 | 42.5k | RETURN_IF_ERROR(_dict_decoder->get_dict_word_info(_dict_word_info.get())); |
2471 | 42.5k | return Status::OK(); |
2472 | 42.5k | } |
2473 | | |
2474 | | Status FileColumnIterator::get_row_ranges_by_zone_map( |
2475 | | const AndBlockColumnPredicate* col_predicates, |
2476 | | const std::vector<std::shared_ptr<const ColumnPredicate>>* delete_predicates, |
2477 | 3.75k | RowRanges* row_ranges) { |
2478 | 3.75k | if (_reader->has_zone_map()) { |
2479 | 3.75k | RETURN_IF_ERROR(_reader->get_row_ranges_by_zone_map(col_predicates, delete_predicates, |
2480 | 3.75k | row_ranges, _opts)); |
2481 | 3.75k | } |
2482 | 3.75k | return Status::OK(); |
2483 | 3.75k | } |
2484 | | |
2485 | | Status FileColumnIterator::get_row_ranges_by_bloom_filter( |
2486 | 3.75k | const AndBlockColumnPredicate* col_predicates, RowRanges* row_ranges) { |
2487 | 3.75k | if ((col_predicates->can_do_bloom_filter(false) && _reader->has_bloom_filter_index(false)) || |
2488 | 3.75k | (col_predicates->can_do_bloom_filter(true) && _reader->has_bloom_filter_index(true))) { |
2489 | 0 | RETURN_IF_ERROR(_reader->get_row_ranges_by_bloom_filter(col_predicates, row_ranges, _opts)); |
2490 | 0 | } |
2491 | 3.75k | return Status::OK(); |
2492 | 3.75k | } |
2493 | | |
2494 | | Status FileColumnIterator::get_row_ranges_by_dict(const AndBlockColumnPredicate* col_predicates, |
2495 | 3.97k | RowRanges* row_ranges) { |
2496 | 3.97k | if (!_is_all_dict_encoding) { |
2497 | 3.29k | return Status::OK(); |
2498 | 3.29k | } |
2499 | | |
2500 | 680 | if (!_dict_decoder) { |
2501 | 0 | RETURN_IF_ERROR(_read_dict_data()); |
2502 | 0 | CHECK_NOTNULL(_dict_decoder); |
2503 | 0 | } |
2504 | | |
2505 | 680 | if (!col_predicates->evaluate_and(_dict_word_info.get(), _dict_decoder->count())) { |
2506 | 108 | row_ranges->clear(); |
2507 | 108 | } |
2508 | 680 | return Status::OK(); |
2509 | 680 | } |
2510 | | |
2511 | | Status FileColumnIterator::init_cache_block_prefetch( |
2512 | 0 | const SegmentCacheBlockPrefetchParams& params) { |
2513 | | // _data_file_reader is iterator-local. If it is cache-block aware, the |
2514 | | // pattern installed below belongs only to this iterator. That is why |
2515 | | // FileColumnIterator no longer has to keep a pattern handle or call prefetch |
2516 | | // before every page read: CacheBlockAwarePrefetchRemoteReader::read_at() |
2517 | | // observes the actual file offset used by PageIO and advances the single |
2518 | | // pattern itself. |
2519 | 0 | _cache_block_prefetch_reader = |
2520 | 0 | std::dynamic_pointer_cast<io::CacheBlockAwarePrefetchRemoteReader>(_data_file_reader); |
2521 | 0 | if (!_cache_block_prefetch_reader) { |
2522 | 0 | return Status::OK(); |
2523 | 0 | } |
2524 | | |
2525 | 0 | OrdinalIndexReader* ordinal_index = nullptr; |
2526 | 0 | RETURN_IF_ERROR(_reader->get_ordinal_index_reader(ordinal_index, params.read_options.stats)); |
2527 | 0 | _cache_block_prefetch_policy = params.policy; |
2528 | 0 | _cache_block_read_direction = params.read_options.read_orderby_key_reverse |
2529 | 0 | ? io::CacheBlockReadDirection::BACKWARD |
2530 | 0 | : io::CacheBlockReadDirection::FORWARD; |
2531 | 0 | _access_range_builder = std::make_unique<SegmentFileAccessRangeBuilder>( |
2532 | 0 | ordinal_index, _cache_block_read_direction); |
2533 | 0 | return Status::OK(); |
2534 | 0 | } |
2535 | | |
2536 | | void FileColumnIterator::collect_cache_block_prefetch_iterators( |
2537 | | std::map<FileAccessRangeBuildMethod, std::vector<ColumnIterator*>>& iterators, |
2538 | 0 | FileAccessRangeBuildMethod init_method) { |
2539 | 0 | if (_access_range_builder) { |
2540 | 0 | iterators[init_method].emplace_back(this); |
2541 | 0 | } |
2542 | 0 | } |
2543 | | |
2544 | | Status FileColumnIterator::install_cache_block_prefetch_pattern( |
2545 | 0 | std::vector<io::FileAccessRange> ranges) { |
2546 | 0 | DCHECK(_cache_block_prefetch_reader != nullptr); |
2547 | | // SegmentIterator builds these ranges once after index pruning. The ranges |
2548 | | // describe the file offsets this physical iterator will read later. Because |
2549 | | // the cache-aware reader is not shared, replacing its one pattern here cannot |
2550 | | // disturb any sibling column iterator or another scan iterator. |
2551 | 0 | io::CacheBlockReadPattern pattern { |
2552 | 0 | .direction = _cache_block_read_direction, |
2553 | 0 | .ranges = std::move(ranges), |
2554 | 0 | }; |
2555 | 0 | return _cache_block_prefetch_reader->set_read_pattern(std::move(pattern), |
2556 | 0 | _cache_block_prefetch_policy); |
2557 | 0 | } |
2558 | | |
2559 | 0 | void FileColumnIterator::async_touch_cache_block_prefetch_initial_window() { |
2560 | 0 | DCHECK(_cache_block_prefetch_reader != nullptr); |
2561 | 0 | _cache_block_prefetch_reader->async_touch_initial_window(&_opts.io_ctx); |
2562 | 0 | } |
2563 | | |
2564 | 129 | Status DefaultValueColumnIterator::init(const ColumnIteratorOptions& opts) { |
2565 | 129 | _opts = opts; |
2566 | | // be consistent with segment v1 |
2567 | | // if _has_default_value, we should create default column iterator for this column, and |
2568 | | // "NULL" is a special default value which means the default value is null. |
2569 | 129 | if (_has_default_value) { |
2570 | 59 | if (_default_value == "NULL") { |
2571 | 3 | _default_value_field = Field::create_field<TYPE_NULL>(Null {}); |
2572 | 56 | } else { |
2573 | 56 | if (_type_info->type() == FieldType::OLAP_FIELD_TYPE_ARRAY) { |
2574 | 0 | if (_default_value != "[]") { |
2575 | 0 | return Status::NotSupported("Array default {} is unsupported", _default_value); |
2576 | 0 | } else { |
2577 | 0 | _default_value_field = Field::create_field<TYPE_ARRAY>(Array {}); |
2578 | 0 | return Status::OK(); |
2579 | 0 | } |
2580 | 56 | } else if (_type_info->type() == FieldType::OLAP_FIELD_TYPE_STRUCT) { |
2581 | 0 | return Status::NotSupported("STRUCT default type is unsupported"); |
2582 | 56 | } else if (_type_info->type() == FieldType::OLAP_FIELD_TYPE_MAP) { |
2583 | 0 | return Status::NotSupported("MAP default type is unsupported"); |
2584 | 0 | } |
2585 | 56 | const auto t = _type_info->type(); |
2586 | 56 | const auto serde = DataTypeFactory::instance() |
2587 | 56 | .create_data_type(t, _precision, _scale, _len) |
2588 | 56 | ->get_serde(); |
2589 | 56 | RETURN_IF_ERROR(serde->from_fe_string(_default_value, _default_value_field)); |
2590 | 56 | } |
2591 | 70 | } else if (_is_nullable) { |
2592 | 70 | _default_value_field = Field::create_field<TYPE_NULL>(Null {}); |
2593 | 70 | } else { |
2594 | 0 | return Status::InternalError( |
2595 | 0 | "invalid default value column for no default value and not nullable"); |
2596 | 0 | } |
2597 | 129 | return Status::OK(); |
2598 | 129 | } |
2599 | | |
2600 | 5 | Status DefaultValueColumnIterator::next_batch(size_t* n, MutableColumnPtr& dst, bool* has_null) { |
2601 | 5 | *has_null = _default_value_field.is_null(); |
2602 | 5 | _insert_many_default(dst, *n); |
2603 | 5 | return Status::OK(); |
2604 | 5 | } |
2605 | | |
2606 | | Status DefaultValueColumnIterator::read_by_rowids(const rowid_t* rowids, const size_t count, |
2607 | 50 | MutableColumnPtr& dst) { |
2608 | 50 | _insert_many_default(dst, count); |
2609 | 50 | return Status::OK(); |
2610 | 50 | } |
2611 | | |
2612 | 55 | void DefaultValueColumnIterator::_insert_many_default(MutableColumnPtr& dst, size_t n) { |
2613 | 55 | if (_default_value_field.is_null()) { |
2614 | 29 | dst->insert_many_defaults(n); |
2615 | 29 | } else { |
2616 | 26 | dst = dst->convert_to_predicate_column_if_dictionary(); |
2617 | 26 | dst->insert_duplicate_fields(_default_value_field, n); |
2618 | 26 | } |
2619 | 55 | } |
2620 | | |
2621 | 0 | Status RowIdColumnIteratorV2::next_batch(size_t* n, MutableColumnPtr& dst, bool* has_null) { |
2622 | 0 | auto* string_column = assert_cast<ColumnString*, TypeCheckOnRelease::DISABLE>(dst.get()); |
2623 | |
|
2624 | 0 | for (uint32_t i = 0; i < *n; ++i) { |
2625 | 0 | uint32_t row_id = _current_rowid + i; |
2626 | 0 | GlobalRowLoacationV2 location(_version, _backend_id, _file_id, row_id); |
2627 | 0 | string_column->insert_data(reinterpret_cast<const char*>(&location), |
2628 | 0 | sizeof(GlobalRowLoacationV2)); |
2629 | 0 | } |
2630 | 0 | _current_rowid += *n; |
2631 | 0 | return Status::OK(); |
2632 | 0 | } |
2633 | | |
2634 | | Status RowIdColumnIteratorV2::read_by_rowids(const rowid_t* rowids, const size_t count, |
2635 | 39.8k | MutableColumnPtr& dst) { |
2636 | 39.8k | auto* string_column = assert_cast<ColumnString*>(dst.get()); |
2637 | | |
2638 | 18.1M | for (size_t i = 0; i < count; ++i) { |
2639 | 18.1M | uint32_t row_id = rowids[i]; |
2640 | 18.1M | GlobalRowLoacationV2 location(_version, _backend_id, _file_id, row_id); |
2641 | 18.1M | string_column->insert_data(reinterpret_cast<const char*>(&location), |
2642 | 18.1M | sizeof(GlobalRowLoacationV2)); |
2643 | 18.1M | } |
2644 | 39.8k | return Status::OK(); |
2645 | 39.8k | } |
2646 | | |
2647 | | } // namespace doris::segment_v2 |