be/src/storage/segment/segment_writer.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "storage/segment/segment_writer.h" |
19 | | |
20 | | #include <assert.h> |
21 | | #include <gen_cpp/segment_v2.pb.h> |
22 | | #include <parallel_hashmap/phmap.h> |
23 | | |
24 | | #include <algorithm> |
25 | | |
26 | | // IWYU pragma: no_include <opentelemetry/common/threadlocal.h> |
27 | | #include <crc32c/crc32c.h> |
28 | | |
29 | | #include "cloud/config.h" |
30 | | #include "common/cast_set.h" |
31 | | #include "common/compiler_util.h" // IWYU pragma: keep |
32 | | #include "common/config.h" |
33 | | #include "common/logging.h" // LOG |
34 | | #include "common/status.h" |
35 | | #include "core/block/block.h" |
36 | | #include "core/block/column_with_type_and_name.h" |
37 | | #include "core/column/column_nullable.h" |
38 | | #include "core/data_type/primitive_type.h" |
39 | | #include "core/field.h" |
40 | | #include "core/types.h" |
41 | | #include "core/value/vdatetime_value.h" |
42 | | #include "exec/common/variant_util.h" |
43 | | #include "io/cache/block_file_cache.h" |
44 | | #include "io/cache/block_file_cache_factory.h" |
45 | | #include "io/fs/file_system.h" |
46 | | #include "io/fs/file_writer.h" |
47 | | #include "io/fs/local_file_system.h" |
48 | | #include "runtime/exec_env.h" |
49 | | #include "runtime/memory/mem_tracker.h" |
50 | | #include "service/point_query_executor.h" |
51 | | #include "storage/data_dir.h" |
52 | | #include "storage/index/index_file_writer.h" |
53 | | #include "storage/index/index_writer.h" |
54 | | #include "storage/index/inverted/inverted_index_fs_directory.h" |
55 | | #include "storage/index/primary_key_index.h" |
56 | | #include "storage/index/short_key_index.h" |
57 | | #include "storage/iterator/olap_data_convertor.h" |
58 | | #include "storage/key_coder.h" |
59 | | #include "storage/olap_common.h" |
60 | | #include "storage/olap_define.h" |
61 | | #include "storage/partial_update_info.h" |
62 | | #include "storage/rowset/rowset_writer_context.h" // RowsetWriterContext |
63 | | #include "storage/rowset/segment_creator.h" |
64 | | #include "storage/segment/column_writer.h" // ColumnWriter |
65 | | #include "storage/segment/external_col_meta_util.h" |
66 | | #include "storage/segment/page_io.h" |
67 | | #include "storage/segment/page_pointer.h" |
68 | | #include "storage/segment/segment_loader.h" |
69 | | #include "storage/segment/variant/variant_ext_meta_writer.h" |
70 | | #include "storage/segment/variant_stats_calculator.h" |
71 | | #include "storage/storage_engine.h" |
72 | | #include "storage/tablet/tablet_schema.h" |
73 | | #include "storage/utils.h" |
74 | | #include "util/coding.h" |
75 | | #include "util/faststring.h" |
76 | | #include "util/jsonb/serialize.h" |
77 | | #include "util/simd/bits.h" |
78 | | namespace doris { |
79 | | namespace segment_v2 { |
80 | | |
81 | | using namespace ErrorCode; |
82 | | using namespace KeyConsts; |
83 | | |
84 | | const char* k_segment_magic = "D0R1"; |
85 | | const uint32_t k_segment_magic_length = 4; |
86 | | |
87 | 2.44k | inline std::string segment_mem_tracker_name(uint32_t segment_id) { |
88 | 2.44k | return "SegmentWriter:Segment-" + std::to_string(segment_id); |
89 | 2.44k | } |
90 | | |
91 | | SegmentWriter::SegmentWriter(io::FileWriter* file_writer, uint32_t segment_id, |
92 | | TabletSchemaSPtr tablet_schema, BaseTabletSPtr tablet, |
93 | | DataDir* data_dir, const SegmentWriterOptions& opts, |
94 | | IndexFileWriter* index_file_writer) |
95 | 2.44k | : _segment_id(segment_id), |
96 | 2.44k | _tablet_schema(std::move(tablet_schema)), |
97 | 2.44k | _tablet(std::move(tablet)), |
98 | 2.44k | _data_dir(data_dir), |
99 | 2.44k | _opts(opts), |
100 | 2.44k | _file_writer(file_writer), |
101 | 2.44k | _index_file_writer(index_file_writer), |
102 | 2.44k | _mem_tracker(std::make_unique<MemTracker>(segment_mem_tracker_name(segment_id))), |
103 | 2.44k | _mow_context(std::move(opts.mow_ctx)) { |
104 | 2.44k | CHECK_NOTNULL(file_writer); |
105 | 2.44k | _num_sort_key_columns = _tablet_schema->num_key_columns(); |
106 | 2.44k | _num_short_key_columns = _tablet_schema->num_short_key_columns(); |
107 | 2.44k | if (!_is_mow_with_cluster_key()) { |
108 | 2.44k | DCHECK(_num_sort_key_columns >= _num_short_key_columns) |
109 | 0 | << ", table_id=" << _tablet_schema->table_id() |
110 | 0 | << ", num_key_columns=" << _num_sort_key_columns |
111 | 0 | << ", num_short_key_columns=" << _num_short_key_columns |
112 | 0 | << ", cluster_key_columns=" << _tablet_schema->cluster_key_uids().size(); |
113 | 2.44k | } |
114 | 4.50k | for (size_t cid = 0; cid < _num_sort_key_columns; ++cid) { |
115 | 2.05k | const auto& column = _tablet_schema->column(cid); |
116 | 2.05k | _key_coders.push_back(get_key_coder(column.type())); |
117 | 2.05k | _key_index_size.push_back(cast_set<uint16_t>(column.index_length())); |
118 | 2.05k | } |
119 | 2.44k | if (_is_mow()) { |
120 | | // encode the sequence id into the primary key index |
121 | 69 | if (_tablet_schema->has_sequence_col()) { |
122 | 26 | const auto& column = _tablet_schema->column(_tablet_schema->sequence_col_idx()); |
123 | 26 | _seq_coder = get_key_coder(column.type()); |
124 | 26 | } |
125 | | // encode the rowid into the primary key index |
126 | 69 | if (_is_mow_with_cluster_key()) { |
127 | 0 | const auto* type_info = get_scalar_type_info<FieldType::OLAP_FIELD_TYPE_UNSIGNED_INT>(); |
128 | 0 | _rowid_coder = get_key_coder(type_info->type()); |
129 | | // primary keys |
130 | 0 | _primary_key_coders.swap(_key_coders); |
131 | | // cluster keys |
132 | 0 | _key_coders.clear(); |
133 | 0 | _key_index_size.clear(); |
134 | 0 | _num_sort_key_columns = _tablet_schema->cluster_key_uids().size(); |
135 | 0 | for (auto cid : _tablet_schema->cluster_key_uids()) { |
136 | 0 | const auto& column = _tablet_schema->column_by_uid(cid); |
137 | 0 | _key_coders.push_back(get_key_coder(column.type())); |
138 | 0 | _key_index_size.push_back(cast_set<uint16_t>(column.index_length())); |
139 | 0 | } |
140 | 0 | } |
141 | 69 | } |
142 | 2.44k | } |
143 | | |
144 | 2.44k | SegmentWriter::~SegmentWriter() { |
145 | 2.44k | _mem_tracker->release(_mem_tracker->consumption()); |
146 | 2.44k | } |
147 | | |
148 | | void SegmentWriter::init_column_meta(ColumnMetaPB* meta, uint32_t column_id, |
149 | 10.2k | const TabletColumn& column, TabletSchemaSPtr tablet_schema) { |
150 | 10.2k | meta->set_column_id(column_id); |
151 | 10.2k | meta->set_type(int(column.type())); |
152 | 10.2k | meta->set_length(column.length()); |
153 | 10.2k | meta->set_encoding(DEFAULT_ENCODING); |
154 | 10.2k | meta->set_compression(_opts.compression_type); |
155 | 10.2k | meta->set_is_nullable(column.is_nullable()); |
156 | 10.2k | meta->set_default_value(column.default_value()); |
157 | 10.2k | meta->set_precision(column.precision()); |
158 | 10.2k | meta->set_frac(column.frac()); |
159 | 10.2k | if (column.has_path_info()) { |
160 | 288 | column.path_info_ptr()->to_protobuf(meta->mutable_column_path_info(), |
161 | 288 | column.parent_unique_id()); |
162 | 288 | } |
163 | 10.2k | meta->set_unique_id(column.unique_id()); |
164 | 10.3k | for (uint32_t i = 0; i < column.get_subtype_count(); ++i) { |
165 | 15 | init_column_meta(meta->add_children_columns(), column_id, column.get_sub_column(i), |
166 | 15 | tablet_schema); |
167 | 15 | } |
168 | 10.2k | meta->set_result_is_nullable(column.get_result_is_nullable()); |
169 | 10.2k | meta->set_function_name(column.get_aggregation_name()); |
170 | 10.2k | meta->set_be_exec_version(column.get_be_exec_version()); |
171 | 10.2k | if (column.is_variant_type()) { |
172 | 285 | meta->set_variant_max_subcolumns_count(column.variant_max_subcolumns_count()); |
173 | 285 | meta->set_variant_enable_doc_mode(column.variant_enable_doc_mode()); |
174 | 285 | } |
175 | 10.2k | } |
176 | | |
177 | 1.25k | Status SegmentWriter::init() { |
178 | 1.25k | std::vector<uint32_t> column_ids; |
179 | 1.25k | auto column_cnt = cast_set<int>(_tablet_schema->num_columns()); |
180 | 8.12k | for (uint32_t i = 0; i < column_cnt; ++i) { |
181 | 6.86k | column_ids.emplace_back(i); |
182 | 6.86k | } |
183 | 1.25k | return init(column_ids, true); |
184 | 1.25k | } |
185 | | |
186 | | Status SegmentWriter::_create_column_writer(uint32_t cid, const TabletColumn& column, |
187 | 10.2k | const TabletSchemaSPtr& schema) { |
188 | 10.2k | ColumnWriterOptions opts; |
189 | 10.2k | opts.meta = _footer.add_columns(); |
190 | | |
191 | 10.2k | init_column_meta(opts.meta, cid, column, schema); |
192 | | |
193 | | // now we create zone map for key columns in AGG_KEYS or all column in UNIQUE_KEYS or DUP_KEYS |
194 | | // except for columns whose type don't support zone map. |
195 | 10.2k | opts.need_zone_map = column.is_key() || schema->keys_type() != KeysType::AGG_KEYS; |
196 | 10.2k | opts.need_bloom_filter = column.is_bf_column(); |
197 | 10.2k | if (opts.need_bloom_filter) { |
198 | 4 | opts.bf_options.fpp = schema->has_bf_fpp() ? schema->bloom_filter_fpp() : 0.05; |
199 | 4 | } |
200 | 10.2k | auto* tablet_index = schema->get_ngram_bf_index(column.unique_id()); |
201 | 10.2k | if (tablet_index) { |
202 | 0 | opts.need_bloom_filter = true; |
203 | 0 | opts.is_ngram_bf_index = true; |
204 | | //narrow convert from int32_t to uint8_t and uint16_t which is dangerous |
205 | 0 | auto gram_size = tablet_index->get_gram_size(); |
206 | 0 | auto gram_bf_size = tablet_index->get_gram_bf_size(); |
207 | 0 | if (gram_size > 256 || gram_size < 1) { |
208 | 0 | return Status::NotSupported("Do not support ngram bloom filter for ngram_size: ", |
209 | 0 | gram_size); |
210 | 0 | } |
211 | 0 | if (gram_bf_size > 65535 || gram_bf_size < 64) { |
212 | 0 | return Status::NotSupported("Do not support ngram bloom filter for bf_size: ", |
213 | 0 | gram_bf_size); |
214 | 0 | } |
215 | 0 | opts.gram_size = cast_set<uint8_t>(gram_size); |
216 | 0 | opts.gram_bf_size = cast_set<uint16_t>(gram_bf_size); |
217 | 0 | } |
218 | | |
219 | 10.2k | bool skip_inverted_index = false; |
220 | 10.2k | if (_opts.rowset_ctx != nullptr) { |
221 | | // skip write inverted index for index compaction column |
222 | 8.06k | skip_inverted_index = |
223 | 8.06k | _opts.rowset_ctx->columns_to_do_index_compaction.count(column.unique_id()) > 0; |
224 | 8.06k | } |
225 | | // skip write inverted index on load if skip_write_index_on_load is true |
226 | 10.2k | if (_opts.write_type == DataWriteType::TYPE_DIRECT && schema->skip_write_index_on_load()) { |
227 | 0 | skip_inverted_index = true; |
228 | 0 | } |
229 | | // indexes for this column |
230 | 10.2k | if (!skip_inverted_index) { |
231 | 9.85k | auto inverted_indexs = schema->inverted_indexs(column); |
232 | 9.85k | if (!inverted_indexs.empty()) { |
233 | 2.18k | opts.inverted_indexes = inverted_indexs; |
234 | 2.18k | opts.need_inverted_index = true; |
235 | 2.18k | DCHECK(_index_file_writer != nullptr); |
236 | 2.18k | } |
237 | 9.85k | } |
238 | | // indexes for this column |
239 | 10.2k | if (const auto& index = schema->ann_index(column); index != nullptr) { |
240 | 1 | opts.ann_index = index; |
241 | 1 | opts.need_ann_index = true; |
242 | 1 | DCHECK(_index_file_writer != nullptr); |
243 | 1 | } |
244 | | |
245 | 10.2k | opts.index_file_writer = _index_file_writer; |
246 | | |
247 | 10.2k | #define DISABLE_INDEX_IF_FIELD_TYPE(TYPE) \ |
248 | 92.4k | if (column.type() == FieldType::OLAP_FIELD_TYPE_##TYPE) { \ |
249 | 289 | opts.need_zone_map = false; \ |
250 | 289 | opts.need_bloom_filter = false; \ |
251 | 289 | } |
252 | | |
253 | 10.2k | DISABLE_INDEX_IF_FIELD_TYPE(STRUCT) |
254 | 10.2k | DISABLE_INDEX_IF_FIELD_TYPE(ARRAY) |
255 | 10.2k | DISABLE_INDEX_IF_FIELD_TYPE(JSONB) |
256 | 10.2k | DISABLE_INDEX_IF_FIELD_TYPE(AGG_STATE) |
257 | 10.2k | DISABLE_INDEX_IF_FIELD_TYPE(MAP) |
258 | 10.2k | DISABLE_INDEX_IF_FIELD_TYPE(BITMAP) |
259 | 10.2k | DISABLE_INDEX_IF_FIELD_TYPE(HLL) |
260 | 10.2k | DISABLE_INDEX_IF_FIELD_TYPE(QUANTILE_STATE) |
261 | 10.2k | DISABLE_INDEX_IF_FIELD_TYPE(VARIANT) |
262 | | |
263 | 10.2k | #undef DISABLE_INDEX_IF_FIELD_TYPE |
264 | | |
265 | 10.2k | int64_t storage_page_size = _tablet_schema->storage_page_size(); |
266 | | // storage_page_size must be between 4KB and 10MB. |
267 | 10.2k | if (storage_page_size >= 4096 && storage_page_size <= 10485760) { |
268 | 10.2k | opts.data_page_size = storage_page_size; |
269 | 10.2k | } |
270 | 10.2k | opts.dict_page_size = _tablet_schema->storage_dict_page_size(); |
271 | 10.2k | DBUG_EXECUTE_IF("VerticalSegmentWriter._create_column_writer.storage_page_size", { |
272 | 10.2k | auto table_id = DebugPoints::instance()->get_debug_param_or_default<int64_t>( |
273 | 10.2k | "VerticalSegmentWriter._create_column_writer.storage_page_size", "table_id", |
274 | 10.2k | INT_MIN); |
275 | 10.2k | auto target_data_page_size = DebugPoints::instance()->get_debug_param_or_default<int64_t>( |
276 | 10.2k | "VerticalSegmentWriter._create_column_writer.storage_page_size", |
277 | 10.2k | "storage_page_size", INT_MIN); |
278 | 10.2k | if (table_id == INT_MIN || target_data_page_size == INT_MIN) { |
279 | 10.2k | return Status::Error<ErrorCode::INTERNAL_ERROR>( |
280 | 10.2k | "Debug point parameters missing: either 'table_id' or 'storage_page_size' not " |
281 | 10.2k | "set."); |
282 | 10.2k | } |
283 | 10.2k | if (table_id == _tablet_schema->table_id() && |
284 | 10.2k | opts.data_page_size != target_data_page_size) { |
285 | 10.2k | return Status::Error<ErrorCode::INTERNAL_ERROR>( |
286 | 10.2k | "Mismatch in 'storage_page_size': expected size does not match the current " |
287 | 10.2k | "data page size. " |
288 | 10.2k | "Expected: " + |
289 | 10.2k | std::to_string(target_data_page_size) + |
290 | 10.2k | ", Actual: " + std::to_string(opts.data_page_size) + "."); |
291 | 10.2k | } |
292 | 10.2k | }) |
293 | 10.2k | if (column.is_row_store_column()) { |
294 | | // smaller page size for row store column |
295 | 0 | auto page_size = _tablet_schema->row_store_page_size(); |
296 | 0 | opts.data_page_size = |
297 | 0 | (page_size > 0) ? page_size : segment_v2::ROW_STORE_PAGE_SIZE_DEFAULT_VALUE; |
298 | 0 | } |
299 | | |
300 | 10.2k | opts.rowset_ctx = _opts.rowset_ctx; |
301 | 10.2k | opts.file_writer = _file_writer; |
302 | 10.2k | opts.compression_type = _opts.compression_type; |
303 | 10.2k | opts.footer = &_footer; |
304 | 10.2k | if (_opts.rowset_ctx != nullptr) { |
305 | 8.06k | opts.input_rs_readers = _opts.rowset_ctx->input_rs_readers; |
306 | 8.06k | } |
307 | 10.2k | opts.encoding_preference = {.integer_type_default_use_plain_encoding = |
308 | 10.2k | _tablet_schema->integer_type_default_use_plain_encoding(), |
309 | 10.2k | .binary_plain_encoding_default_impl = |
310 | 10.2k | _tablet_schema->binary_plain_encoding_default_impl()}; |
311 | | |
312 | 10.2k | std::unique_ptr<ColumnWriter> writer; |
313 | 10.2k | RETURN_IF_ERROR(ColumnWriter::create(opts, &column, _file_writer, &writer)); |
314 | 10.2k | RETURN_IF_ERROR(writer->init()); |
315 | 10.2k | _column_writers.push_back(std::move(writer)); |
316 | | |
317 | 10.2k | _olap_data_convertor->add_column_data_convertor(column); |
318 | 10.2k | return Status::OK(); |
319 | 10.2k | } |
320 | | |
321 | 3.37k | Status SegmentWriter::init(const std::vector<uint32_t>& col_ids, bool has_key) { |
322 | 3.37k | DCHECK(_column_writers.empty()); |
323 | 3.37k | DCHECK(_column_ids.empty()); |
324 | 3.37k | _has_key = has_key; |
325 | 3.37k | _column_writers.reserve(_tablet_schema->columns().size()); |
326 | 3.37k | _column_ids.insert(_column_ids.end(), col_ids.begin(), col_ids.end()); |
327 | 3.37k | _olap_data_convertor = std::make_unique<OlapBlockDataConvertor>(); |
328 | 3.37k | if (_opts.compression_type == UNKNOWN_COMPRESSION) { |
329 | 2.44k | _opts.compression_type = _tablet_schema->compression_type(); |
330 | 2.44k | } |
331 | | |
332 | | // Vertical compaction calls init() multiple times against the same writer; the footer accumulates entries |
333 | | // across calls, so this init()'s slice of footer columns starts at the current size. |
334 | 3.37k | const int variant_stats_footer_offset = _footer.columns_size(); |
335 | 3.37k | RETURN_IF_ERROR(_create_writers(_tablet_schema, col_ids)); |
336 | | |
337 | | // Initialize variant statistics calculator |
338 | 3.37k | _variant_stats_calculator = std::make_unique<VariantStatsCaculator>( |
339 | 3.37k | &_footer, _tablet_schema, col_ids, variant_stats_footer_offset); |
340 | | |
341 | | // we don't need the short key index for unique key merge on write table. |
342 | 3.37k | if (_has_key) { |
343 | 2.44k | if (_is_mow()) { |
344 | 69 | size_t seq_col_length = 0; |
345 | 69 | if (_tablet_schema->has_sequence_col()) { |
346 | 26 | seq_col_length = |
347 | 26 | _tablet_schema->column(_tablet_schema->sequence_col_idx()).length() + 1; |
348 | 26 | } |
349 | 69 | size_t rowid_length = 0; |
350 | 69 | if (_is_mow_with_cluster_key()) { |
351 | 0 | rowid_length = PrimaryKeyIndexReader::ROW_ID_LENGTH; |
352 | 0 | _short_key_index_builder.reset( |
353 | 0 | new ShortKeyIndexBuilder(_segment_id, _opts.num_rows_per_block)); |
354 | 0 | } |
355 | 69 | _primary_key_index_builder.reset( |
356 | 69 | new PrimaryKeyIndexBuilder(_file_writer, seq_col_length, rowid_length)); |
357 | 69 | RETURN_IF_ERROR(_primary_key_index_builder->init()); |
358 | 2.37k | } else { |
359 | 2.37k | _short_key_index_builder.reset( |
360 | 2.37k | new ShortKeyIndexBuilder(_segment_id, _opts.num_rows_per_block)); |
361 | 2.37k | } |
362 | 2.44k | } |
363 | 3.37k | return Status::OK(); |
364 | 3.37k | } |
365 | | |
366 | | Status SegmentWriter::_create_writers(const TabletSchemaSPtr& tablet_schema, |
367 | 3.37k | const std::vector<uint32_t>& col_ids) { |
368 | 3.37k | _olap_data_convertor->reserve(col_ids.size()); |
369 | 10.2k | for (auto& cid : col_ids) { |
370 | 10.2k | RETURN_IF_ERROR(_create_column_writer(cid, tablet_schema->column(cid), tablet_schema)); |
371 | 10.2k | } |
372 | 3.37k | return Status::OK(); |
373 | 3.37k | } |
374 | | |
375 | 800 | void SegmentWriter::_maybe_invalid_row_cache(const std::string& key) { |
376 | | // Just invalid row cache for simplicity, since the rowset is not visible at present. |
377 | | // If we update/insert cache, if load failed rowset will not be visible but cached data |
378 | | // will be visible, and lead to inconsistency. |
379 | 800 | if (!config::disable_storage_row_cache && _tablet_schema->has_row_store_for_all_columns() && |
380 | 800 | _opts.write_type == DataWriteType::TYPE_DIRECT) { |
381 | | // invalidate cache |
382 | 0 | RowCache::instance()->erase({_opts.rowset_ctx->tablet_id, key}); |
383 | 0 | } |
384 | 800 | } |
385 | | |
386 | 6 | void SegmentWriter::_serialize_block_to_row_column(const Block& block) { |
387 | 6 | if (block.rows() == 0) { |
388 | 0 | return; |
389 | 0 | } |
390 | 6 | MonotonicStopWatch watch; |
391 | 6 | watch.start(); |
392 | 6 | int row_column_id = 0; |
393 | 12 | for (int i = 0; i < _tablet_schema->num_columns(); ++i) { |
394 | 6 | if (_tablet_schema->column(i).is_row_store_column()) { |
395 | 0 | auto* row_store_column = static_cast<ColumnString*>( |
396 | 0 | block.get_by_position(i).column->assume_mutable_ref().assume_mutable().get()); |
397 | 0 | row_store_column->clear(); |
398 | 0 | DataTypeSerDeSPtrs serdes = create_data_type_serdes(block.get_data_types()); |
399 | 0 | JsonbSerializeUtil::block_to_jsonb(*_tablet_schema, block, *row_store_column, |
400 | 0 | cast_set<int>(_tablet_schema->num_columns()), serdes, |
401 | 0 | {_tablet_schema->row_columns_uids().begin(), |
402 | 0 | _tablet_schema->row_columns_uids().end()}); |
403 | 0 | break; |
404 | 0 | } |
405 | 6 | } |
406 | | |
407 | 6 | VLOG_DEBUG << "serialize , num_rows:" << block.rows() << ", row_column_id:" << row_column_id |
408 | 0 | << ", total_byte_size:" << block.allocated_bytes() << ", serialize_cost(us)" |
409 | 0 | << watch.elapsed_time() / 1000; |
410 | 6 | } |
411 | | |
412 | | Status SegmentWriter::probe_key_for_mow( |
413 | | std::string key, std::size_t segment_pos, bool have_input_seq_column, bool have_delete_sign, |
414 | | const std::vector<RowsetSharedPtr>& specified_rowsets, |
415 | | std::vector<std::unique_ptr<SegmentCacheHandle>>& segment_caches, |
416 | | bool& has_default_or_nullable, std::vector<bool>& use_default_or_null_flag, |
417 | | const std::function<void(const RowLocation& loc)>& found_cb, |
418 | 0 | const std::function<Status()>& not_found_cb, PartialUpdateStats& stats) { |
419 | 0 | RowLocation loc; |
420 | | // save rowset shared ptr so this rowset wouldn't delete |
421 | 0 | RowsetSharedPtr rowset; |
422 | 0 | auto st = _tablet->lookup_row_key( |
423 | 0 | key, _tablet_schema.get(), have_input_seq_column, specified_rowsets, &loc, |
424 | 0 | cast_set<uint32_t>(_mow_context->max_version), segment_caches, &rowset); |
425 | 0 | if (st.is<KEY_NOT_FOUND>()) { |
426 | 0 | if (!have_delete_sign) { |
427 | 0 | RETURN_IF_ERROR(not_found_cb()); |
428 | 0 | } |
429 | 0 | ++stats.num_rows_new_added; |
430 | 0 | has_default_or_nullable = true; |
431 | 0 | use_default_or_null_flag.emplace_back(true); |
432 | 0 | return Status::OK(); |
433 | 0 | } |
434 | 0 | if (!st.ok() && !st.is<KEY_ALREADY_EXISTS>()) { |
435 | 0 | LOG(WARNING) << "failed to lookup row key, error: " << st; |
436 | 0 | return st; |
437 | 0 | } |
438 | | |
439 | | // 1. if the delete sign is marked, it means that the value columns of the row will not |
440 | | // be read. So we don't need to read the missing values from the previous rows. |
441 | | // 2. the one exception is when there are sequence columns in the table, we need to read |
442 | | // the sequence columns, otherwise it may cause the merge-on-read based compaction |
443 | | // policy to produce incorrect results |
444 | | // TODO(bobhan1): only read seq col rather than all columns in this situation for |
445 | | // partial update and flexible partial update |
446 | | |
447 | | // TODO(bobhan1): handle sequence column here |
448 | 0 | if (st.is<KEY_ALREADY_EXISTS>() || (have_delete_sign && !_tablet_schema->has_sequence_col())) { |
449 | 0 | has_default_or_nullable = true; |
450 | 0 | use_default_or_null_flag.emplace_back(true); |
451 | 0 | } else { |
452 | | // partial update should not contain invisible columns |
453 | 0 | use_default_or_null_flag.emplace_back(false); |
454 | 0 | _rsid_to_rowset.emplace(rowset->rowset_id(), rowset); |
455 | 0 | found_cb(loc); |
456 | 0 | } |
457 | |
|
458 | 0 | if (st.is<KEY_ALREADY_EXISTS>()) { |
459 | | // although we need to mark delete current row, we still need to read missing columns |
460 | | // for this row, we need to ensure that each column is aligned |
461 | 0 | _mow_context->delete_bitmap->add( |
462 | 0 | {_opts.rowset_ctx->rowset_id, _segment_id, DeleteBitmap::TEMP_VERSION_COMMON}, |
463 | 0 | cast_set<uint32_t>(segment_pos)); |
464 | 0 | ++stats.num_rows_deleted; |
465 | 0 | } else { |
466 | 0 | _mow_context->delete_bitmap->add( |
467 | 0 | {loc.rowset_id, loc.segment_id, DeleteBitmap::TEMP_VERSION_COMMON}, loc.row_id); |
468 | 0 | ++stats.num_rows_updated; |
469 | 0 | } |
470 | 0 | return Status::OK(); |
471 | 0 | } |
472 | | |
473 | 0 | Status SegmentWriter::partial_update_preconditions_check(size_t row_pos) { |
474 | 0 | if (!_is_mow()) { |
475 | 0 | auto msg = fmt::format( |
476 | 0 | "Can only do partial update on merge-on-write unique table, but found: " |
477 | 0 | "keys_type={}, _opts.enable_unique_key_merge_on_write={}, tablet_id={}", |
478 | 0 | _tablet_schema->keys_type(), _opts.enable_unique_key_merge_on_write, |
479 | 0 | _tablet->tablet_id()); |
480 | 0 | DCHECK(false) << msg; |
481 | 0 | return Status::InternalError<false>(msg); |
482 | 0 | } |
483 | 0 | if (_opts.rowset_ctx->partial_update_info == nullptr) { |
484 | 0 | auto msg = |
485 | 0 | fmt::format("partial_update_info should not be nullptr, please check, tablet_id={}", |
486 | 0 | _tablet->tablet_id()); |
487 | 0 | DCHECK(false) << msg; |
488 | 0 | return Status::InternalError<false>(msg); |
489 | 0 | } |
490 | 0 | if (!_opts.rowset_ctx->partial_update_info->is_fixed_partial_update()) { |
491 | 0 | auto msg = fmt::format( |
492 | 0 | "in fixed partial update code, but update_mode={}, please check, tablet_id={}", |
493 | 0 | _opts.rowset_ctx->partial_update_info->update_mode(), _tablet->tablet_id()); |
494 | 0 | DCHECK(false) << msg; |
495 | 0 | return Status::InternalError<false>(msg); |
496 | 0 | } |
497 | 0 | if (row_pos != 0) { |
498 | 0 | auto msg = fmt::format("row_pos should be 0, but found {}, tablet_id={}", row_pos, |
499 | 0 | _tablet->tablet_id()); |
500 | 0 | DCHECK(false) << msg; |
501 | 0 | return Status::InternalError<false>(msg); |
502 | 0 | } |
503 | 0 | return Status::OK(); |
504 | 0 | } |
505 | | |
506 | | // for partial update, we should do following steps to fill content of block: |
507 | | // 1. set block data to data convertor, and get all key_column's converted slice |
508 | | // 2. get pk of input block, and read missing columns |
509 | | // 2.1 first find key location{rowset_id, segment_id, row_id} |
510 | | // 2.2 build read plan to read by batch |
511 | | // 2.3 fill block |
512 | | // 3. set columns to data convertor and then write all columns |
513 | | Status SegmentWriter::append_block_with_partial_content(const Block* block, size_t row_pos, |
514 | 0 | size_t num_rows) { |
515 | 0 | if (block->columns() < _tablet_schema->num_key_columns() || |
516 | 0 | block->columns() >= _tablet_schema->num_columns()) { |
517 | 0 | return Status::InvalidArgument( |
518 | 0 | fmt::format("illegal partial update block columns: {}, num key columns: {}, total " |
519 | 0 | "schema columns: {}", |
520 | 0 | block->columns(), _tablet_schema->num_key_columns(), |
521 | 0 | _tablet_schema->num_columns())); |
522 | 0 | } |
523 | 0 | RETURN_IF_ERROR(partial_update_preconditions_check(row_pos)); |
524 | | |
525 | | // find missing column cids |
526 | 0 | const auto& missing_cids = _opts.rowset_ctx->partial_update_info->missing_cids; |
527 | 0 | const auto& including_cids = _opts.rowset_ctx->partial_update_info->update_cids; |
528 | | |
529 | | // create full block and fill with input columns |
530 | 0 | auto full_block = _tablet_schema->create_block(); |
531 | 0 | size_t input_id = 0; |
532 | 0 | for (auto i : including_cids) { |
533 | 0 | full_block.replace_by_position(i, block->get_by_position(input_id++).column); |
534 | 0 | } |
535 | |
|
536 | 0 | if (_opts.rowset_ctx->write_type != DataWriteType::TYPE_COMPACTION && |
537 | 0 | _tablet_schema->num_variant_columns() > 0) { |
538 | 0 | RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns( |
539 | 0 | full_block, *_tablet_schema, including_cids)); |
540 | 0 | } |
541 | 0 | RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_columns( |
542 | 0 | &full_block, row_pos, num_rows, including_cids)); |
543 | | |
544 | 0 | bool have_input_seq_column = false; |
545 | | // write including columns |
546 | 0 | std::vector<IOlapColumnDataAccessor*> key_columns; |
547 | 0 | IOlapColumnDataAccessor* seq_column = nullptr; |
548 | 0 | size_t segment_start_pos = 0; |
549 | 0 | for (auto cid : including_cids) { |
550 | | // here we get segment column row num before append data. |
551 | 0 | segment_start_pos = _column_writers[cid]->get_next_rowid(); |
552 | | // olap data convertor alway start from id = 0 |
553 | 0 | auto converted_result = _olap_data_convertor->convert_column_data(cid); |
554 | 0 | if (!converted_result.first.ok()) { |
555 | 0 | return converted_result.first; |
556 | 0 | } |
557 | 0 | if (cid < _num_sort_key_columns) { |
558 | 0 | key_columns.push_back(converted_result.second); |
559 | 0 | } else if (_tablet_schema->has_sequence_col() && |
560 | 0 | cid == _tablet_schema->sequence_col_idx()) { |
561 | 0 | seq_column = converted_result.second; |
562 | 0 | have_input_seq_column = true; |
563 | 0 | } |
564 | 0 | RETURN_IF_ERROR(_column_writers[cid]->append(converted_result.second->get_nullmap(), |
565 | 0 | converted_result.second->get_data(), |
566 | 0 | num_rows)); |
567 | 0 | } |
568 | | |
569 | 0 | bool has_default_or_nullable = false; |
570 | 0 | std::vector<bool> use_default_or_null_flag; |
571 | 0 | use_default_or_null_flag.reserve(num_rows); |
572 | 0 | const auto* delete_signs = |
573 | 0 | BaseTablet::get_delete_sign_column_data(full_block, row_pos + num_rows); |
574 | |
|
575 | 0 | const std::vector<RowsetSharedPtr>& specified_rowsets = _mow_context->rowset_ptrs; |
576 | 0 | std::vector<std::unique_ptr<SegmentCacheHandle>> segment_caches(specified_rowsets.size()); |
577 | |
|
578 | 0 | FixedReadPlan read_plan; |
579 | | |
580 | | // locate rows in base data |
581 | 0 | PartialUpdateStats stats; |
582 | |
|
583 | 0 | for (size_t block_pos = row_pos; block_pos < row_pos + num_rows; block_pos++) { |
584 | | // block segment |
585 | | // 2 -> 0 |
586 | | // 3 -> 1 |
587 | | // 4 -> 2 |
588 | | // 5 -> 3 |
589 | | // here row_pos = 2, num_rows = 4. |
590 | 0 | size_t delta_pos = block_pos - row_pos; |
591 | 0 | size_t segment_pos = segment_start_pos + delta_pos; |
592 | 0 | std::string key = _full_encode_keys(key_columns, delta_pos); |
593 | 0 | _maybe_invalid_row_cache(key); |
594 | 0 | if (have_input_seq_column) { |
595 | 0 | _encode_seq_column(seq_column, delta_pos, &key); |
596 | 0 | } |
597 | | // If the table have sequence column, and the include-cids don't contain the sequence |
598 | | // column, we need to update the primary key index builder at the end of this method. |
599 | | // At that time, we have a valid sequence column to encode the key with seq col. |
600 | 0 | if (!_tablet_schema->has_sequence_col() || have_input_seq_column) { |
601 | 0 | RETURN_IF_ERROR(_primary_key_index_builder->add_item(key)); |
602 | 0 | } |
603 | | |
604 | | // mark key with delete sign as deleted. |
605 | 0 | bool have_delete_sign = (delete_signs != nullptr && delete_signs[block_pos] != 0); |
606 | |
|
607 | 0 | auto not_found_cb = [&]() { |
608 | 0 | return _opts.rowset_ctx->partial_update_info->handle_new_key( |
609 | 0 | *_tablet_schema, [&]() -> std::string { |
610 | 0 | return block->dump_one_line(block_pos, |
611 | 0 | cast_set<int>(_num_sort_key_columns)); |
612 | 0 | }); |
613 | 0 | }; |
614 | 0 | auto update_read_plan = [&](const RowLocation& loc) { |
615 | 0 | read_plan.prepare_to_read(loc, segment_pos); |
616 | 0 | }; |
617 | 0 | RETURN_IF_ERROR(probe_key_for_mow(std::move(key), segment_pos, have_input_seq_column, |
618 | 0 | have_delete_sign, specified_rowsets, segment_caches, |
619 | 0 | has_default_or_nullable, use_default_or_null_flag, |
620 | 0 | update_read_plan, not_found_cb, stats)); |
621 | 0 | } |
622 | 0 | CHECK_EQ(use_default_or_null_flag.size(), num_rows); |
623 | |
|
624 | 0 | if (config::enable_merge_on_write_correctness_check) { |
625 | 0 | _tablet->add_sentinel_mark_to_delete_bitmap(_mow_context->delete_bitmap.get(), |
626 | 0 | *_mow_context->rowset_ids); |
627 | 0 | } |
628 | | |
629 | | // read to fill full block |
630 | 0 | RETURN_IF_ERROR(read_plan.fill_missing_columns( |
631 | 0 | _opts.rowset_ctx, _rsid_to_rowset, *_tablet_schema, full_block, |
632 | 0 | use_default_or_null_flag, has_default_or_nullable, |
633 | 0 | cast_set<uint32_t>(segment_start_pos), block)); |
634 | | |
635 | 0 | if (_tablet_schema->num_variant_columns() > 0) { |
636 | 0 | RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns( |
637 | 0 | full_block, *_tablet_schema, missing_cids)); |
638 | 0 | } |
639 | | |
640 | | // convert block to row store format |
641 | 0 | _serialize_block_to_row_column(full_block); |
642 | | |
643 | | // convert missing columns and send to column writer |
644 | 0 | RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_columns( |
645 | 0 | &full_block, row_pos, num_rows, missing_cids)); |
646 | 0 | for (auto cid : missing_cids) { |
647 | 0 | auto converted_result = _olap_data_convertor->convert_column_data(cid); |
648 | 0 | if (!converted_result.first.ok()) { |
649 | 0 | return converted_result.first; |
650 | 0 | } |
651 | 0 | if (_tablet_schema->has_sequence_col() && !have_input_seq_column && |
652 | 0 | cid == _tablet_schema->sequence_col_idx()) { |
653 | 0 | DCHECK_EQ(seq_column, nullptr); |
654 | 0 | seq_column = converted_result.second; |
655 | 0 | } |
656 | 0 | RETURN_IF_ERROR(_column_writers[cid]->append(converted_result.second->get_nullmap(), |
657 | 0 | converted_result.second->get_data(), |
658 | 0 | num_rows)); |
659 | 0 | } |
660 | 0 | _num_rows_updated += stats.num_rows_updated; |
661 | 0 | _num_rows_deleted += stats.num_rows_deleted; |
662 | 0 | _num_rows_new_added += stats.num_rows_new_added; |
663 | 0 | _num_rows_filtered += stats.num_rows_filtered; |
664 | 0 | if (_tablet_schema->has_sequence_col() && !have_input_seq_column) { |
665 | 0 | DCHECK_NE(seq_column, nullptr); |
666 | 0 | if (_num_rows_written != row_pos || |
667 | 0 | _primary_key_index_builder->num_rows() != _num_rows_written) { |
668 | 0 | return Status::InternalError( |
669 | 0 | "Correctness check failed, _num_rows_written: {}, row_pos: {}, primary key " |
670 | 0 | "index builder num rows: {}", |
671 | 0 | _num_rows_written, row_pos, _primary_key_index_builder->num_rows()); |
672 | 0 | } |
673 | 0 | RETURN_IF_ERROR( |
674 | 0 | _generate_primary_key_index(_key_coders, key_columns, seq_column, num_rows, false)); |
675 | 0 | } |
676 | | |
677 | 0 | _num_rows_written += num_rows; |
678 | 0 | DCHECK_EQ(_primary_key_index_builder->num_rows(), _num_rows_written) |
679 | 0 | << "primary key index builder num rows(" << _primary_key_index_builder->num_rows() |
680 | 0 | << ") not equal to segment writer's num rows written(" << _num_rows_written << ")"; |
681 | 0 | _olap_data_convertor->clear_source_content(); |
682 | |
|
683 | 0 | return Status::OK(); |
684 | 0 | } |
685 | | |
686 | 3.94k | Status SegmentWriter::append_block(const Block* block, size_t row_pos, size_t num_rows) { |
687 | 3.94k | if (_opts.rowset_ctx->partial_update_info && |
688 | 3.94k | _opts.rowset_ctx->partial_update_info->is_partial_update() && |
689 | 3.94k | _opts.write_type == DataWriteType::TYPE_DIRECT && |
690 | 3.94k | !_opts.rowset_ctx->is_transient_rowset_writer) { |
691 | 0 | if (_opts.rowset_ctx->partial_update_info->is_fixed_partial_update()) { |
692 | 0 | RETURN_IF_ERROR(append_block_with_partial_content(block, row_pos, num_rows)); |
693 | 0 | } else { |
694 | 0 | return Status::NotSupported<false>( |
695 | 0 | "SegmentWriter doesn't support flexible partial update, please set " |
696 | 0 | "enable_vertical_segment_writer=true in be.conf on all BEs to use " |
697 | 0 | "VerticalSegmentWriter."); |
698 | 0 | } |
699 | 0 | return Status::OK(); |
700 | 0 | } |
701 | 3.94k | if (block->columns() < _column_writers.size()) { |
702 | 0 | return Status::InternalError( |
703 | 0 | "block->columns() < _column_writers.size(), block->columns()=" + |
704 | 0 | std::to_string(block->columns()) + |
705 | 0 | ", _column_writers.size()=" + std::to_string(_column_writers.size()) + |
706 | 0 | ", _tablet_schema->dump_structure()=" + _tablet_schema->dump_structure()); |
707 | 0 | } |
708 | 3.94k | CHECK(block->columns() >= _column_writers.size()) |
709 | 0 | << ", block->columns()=" << block->columns() |
710 | 0 | << ", _column_writers.size()=" << _column_writers.size() |
711 | 0 | << ", _tablet_schema->dump_structure()=" << _tablet_schema->dump_structure(); |
712 | | // Row column should be filled here when it's a directly write from memtable |
713 | | // or it's schema change write(since column data type maybe changed, so we should reubild) |
714 | 3.94k | if (_opts.write_type == DataWriteType::TYPE_DIRECT || |
715 | 3.94k | _opts.write_type == DataWriteType::TYPE_SCHEMA_CHANGE) { |
716 | 6 | _serialize_block_to_row_column(*block); |
717 | 6 | } |
718 | | |
719 | 3.94k | if (_opts.rowset_ctx->write_type != DataWriteType::TYPE_COMPACTION && |
720 | 3.94k | _tablet_schema->num_variant_columns() > 0) { |
721 | 141 | RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns( |
722 | 141 | const_cast<Block&>(*block), *_tablet_schema, _column_ids)); |
723 | 141 | } |
724 | | |
725 | 3.94k | _olap_data_convertor->set_source_content(block, row_pos, num_rows); |
726 | | |
727 | | // find all row pos for short key indexes |
728 | 3.94k | std::vector<size_t> short_key_pos; |
729 | 3.94k | if (_has_key) { |
730 | | // We build a short key index every `_opts.num_rows_per_block` rows. Specifically, we |
731 | | // build a short key index using 1st rows for first block and `_short_key_row_pos - _row_count` |
732 | | // for next blocks. |
733 | | // Ensure we build a short key index using 1st rows only for the first block (ISSUE-9766). |
734 | 2.95k | if (UNLIKELY(_short_key_row_pos == 0 && _num_rows_written == 0)) { |
735 | 2.38k | short_key_pos.push_back(0); |
736 | 2.38k | } |
737 | 13.6k | while (_short_key_row_pos + _opts.num_rows_per_block < _num_rows_written + num_rows) { |
738 | 10.6k | _short_key_row_pos += _opts.num_rows_per_block; |
739 | 10.6k | short_key_pos.push_back(_short_key_row_pos - _num_rows_written); |
740 | 10.6k | } |
741 | 2.95k | } |
742 | | |
743 | | // convert column data from engine format to storage layer format |
744 | 3.94k | std::vector<IOlapColumnDataAccessor*> key_columns; |
745 | 3.94k | IOlapColumnDataAccessor* seq_column = nullptr; |
746 | 13.5k | for (size_t id = 0; id < _column_writers.size(); ++id) { |
747 | | // olap data convertor alway start from id = 0 |
748 | 9.58k | auto converted_result = _olap_data_convertor->convert_column_data(id); |
749 | 9.58k | if (!converted_result.first.ok()) { |
750 | 0 | return converted_result.first; |
751 | 0 | } |
752 | 9.58k | auto cid = _column_ids[id]; |
753 | 9.58k | if (_has_key && cid < _tablet_schema->num_key_columns()) { |
754 | 2.55k | key_columns.push_back(converted_result.second); |
755 | 7.02k | } else if (_has_key && _tablet_schema->has_sequence_col() && |
756 | 7.02k | cid == _tablet_schema->sequence_col_idx()) { |
757 | 90 | seq_column = converted_result.second; |
758 | 90 | } |
759 | 9.58k | RETURN_IF_ERROR(_column_writers[id]->append(converted_result.second->get_nullmap(), |
760 | 9.58k | converted_result.second->get_data(), num_rows)); |
761 | 9.58k | } |
762 | 3.94k | if (_opts.write_type == DataWriteType::TYPE_COMPACTION) { |
763 | 2.21k | RETURN_IF_ERROR( |
764 | 2.21k | _variant_stats_calculator->calculate_variant_stats(block, row_pos, num_rows)); |
765 | 2.21k | } |
766 | 3.94k | if (_has_key) { |
767 | 2.95k | if (_is_mow_with_cluster_key()) { |
768 | | // for now we don't need to query short key index for CLUSTER BY feature, |
769 | | // but we still write the index for future usage. |
770 | | // 1. generate primary key index, the key_columns is primary_key_columns |
771 | 0 | RETURN_IF_ERROR(_generate_primary_key_index(_primary_key_coders, key_columns, |
772 | 0 | seq_column, num_rows, true)); |
773 | | // 2. generate short key index (use cluster key) |
774 | 0 | key_columns.clear(); |
775 | 0 | for (const auto& cid : _tablet_schema->cluster_key_uids()) { |
776 | | // find cluster key index in tablet schema |
777 | 0 | auto cluster_key_index = _tablet_schema->field_index(cid); |
778 | 0 | if (cluster_key_index == -1) { |
779 | 0 | return Status::InternalError( |
780 | 0 | "could not find cluster key column with unique_id=" + |
781 | 0 | std::to_string(cid) + " in tablet schema"); |
782 | 0 | } |
783 | 0 | bool found = false; |
784 | 0 | for (auto i = 0; i < _column_ids.size(); ++i) { |
785 | 0 | if (_column_ids[i] == cluster_key_index) { |
786 | 0 | auto converted_result = _olap_data_convertor->convert_column_data(i); |
787 | 0 | if (!converted_result.first.ok()) { |
788 | 0 | return converted_result.first; |
789 | 0 | } |
790 | 0 | key_columns.push_back(converted_result.second); |
791 | 0 | found = true; |
792 | 0 | break; |
793 | 0 | } |
794 | 0 | } |
795 | 0 | if (!found) { |
796 | 0 | return Status::InternalError( |
797 | 0 | "could not found cluster key column with unique_id=" + |
798 | 0 | std::to_string(cid) + |
799 | 0 | ", tablet schema index=" + std::to_string(cluster_key_index)); |
800 | 0 | } |
801 | 0 | } |
802 | 0 | RETURN_IF_ERROR(_generate_short_key_index(key_columns, num_rows, short_key_pos)); |
803 | 2.95k | } else if (_is_mow()) { |
804 | 10 | RETURN_IF_ERROR(_generate_primary_key_index(_key_coders, key_columns, seq_column, |
805 | 10 | num_rows, false)); |
806 | 2.94k | } else { |
807 | 2.94k | RETURN_IF_ERROR(_generate_short_key_index(key_columns, num_rows, short_key_pos)); |
808 | 2.94k | } |
809 | 2.95k | } |
810 | | |
811 | 3.94k | _num_rows_written += num_rows; |
812 | 3.94k | _olap_data_convertor->clear_source_content(); |
813 | 3.94k | return Status::OK(); |
814 | 3.94k | } |
815 | | |
816 | 2.07k | int64_t SegmentWriter::max_row_to_add(size_t row_avg_size_in_bytes) { |
817 | 2.07k | auto segment_size = estimate_segment_size(); |
818 | 2.07k | if (segment_size >= MAX_SEGMENT_SIZE || _num_rows_written >= _opts.max_rows_per_segment) |
819 | 354 | [[unlikely]] { |
820 | 354 | return 0; |
821 | 354 | } |
822 | 1.72k | int64_t size_rows = ((int64_t)MAX_SEGMENT_SIZE - (int64_t)segment_size) / row_avg_size_in_bytes; |
823 | 1.72k | int64_t count_rows = (int64_t)_opts.max_rows_per_segment - _num_rows_written; |
824 | | |
825 | 1.72k | return std::min(size_rows, count_rows); |
826 | 2.07k | } |
827 | | |
828 | | std::string SegmentWriter::_full_encode_keys( |
829 | 6.69k | const std::vector<IOlapColumnDataAccessor*>& key_columns, size_t pos, bool null_first) { |
830 | 6.69k | assert(_key_index_size.size() == _num_sort_key_columns); |
831 | 6.69k | assert(key_columns.size() == _num_sort_key_columns && |
832 | 6.69k | _key_coders.size() == _num_sort_key_columns); |
833 | 6.69k | return _full_encode_keys(_key_coders, key_columns, pos, null_first); |
834 | 6.69k | } |
835 | | |
836 | | std::string SegmentWriter::_full_encode_keys( |
837 | | const std::vector<const KeyCoder*>& key_coders, |
838 | 6.70k | const std::vector<IOlapColumnDataAccessor*>& key_columns, size_t pos, bool null_first) { |
839 | 6.70k | assert(key_columns.size() == key_coders.size()); |
840 | | |
841 | 6.70k | std::string encoded_keys; |
842 | 6.70k | size_t cid = 0; |
843 | 6.70k | for (const auto& column : key_columns) { |
844 | 5.90k | auto field = column->get_data_at(pos); |
845 | 5.90k | if (UNLIKELY(!field)) { |
846 | 0 | if (null_first) { |
847 | 0 | encoded_keys.push_back(KEY_NULL_FIRST_MARKER); |
848 | 0 | } else { |
849 | 0 | encoded_keys.push_back(KEY_NORMAL_MARKER); |
850 | 0 | } |
851 | 0 | ++cid; |
852 | 0 | continue; |
853 | 0 | } |
854 | 5.90k | encoded_keys.push_back(KEY_NORMAL_MARKER); |
855 | 5.90k | DCHECK(key_coders[cid] != nullptr); |
856 | 5.90k | key_coders[cid]->full_encode_ascending(field, &encoded_keys); |
857 | 5.90k | ++cid; |
858 | 5.90k | } |
859 | 6.70k | return encoded_keys; |
860 | 6.70k | } |
861 | | |
862 | | void SegmentWriter::_encode_seq_column(const IOlapColumnDataAccessor* seq_column, size_t pos, |
863 | 0 | std::string* encoded_keys) { |
864 | 0 | auto field = seq_column->get_data_at(pos); |
865 | | // To facilitate the use of the primary key index, encode the seq column |
866 | | // to the minimum value of the corresponding length when the seq column |
867 | | // is null |
868 | 0 | if (UNLIKELY(!field)) { |
869 | 0 | encoded_keys->push_back(KEY_NULL_FIRST_MARKER); |
870 | 0 | size_t seq_col_length = _tablet_schema->column(_tablet_schema->sequence_col_idx()).length(); |
871 | 0 | encoded_keys->append(seq_col_length, KEY_MINIMAL_MARKER); |
872 | 0 | return; |
873 | 0 | } |
874 | 0 | encoded_keys->push_back(KEY_NORMAL_MARKER); |
875 | 0 | _seq_coder->full_encode_ascending(field, encoded_keys); |
876 | 0 | } |
877 | | |
878 | 0 | void SegmentWriter::_encode_rowid(const uint32_t rowid, std::string* encoded_keys) { |
879 | 0 | encoded_keys->push_back(KEY_NORMAL_MARKER); |
880 | 0 | _rowid_coder->full_encode_ascending(&rowid, encoded_keys); |
881 | 0 | } |
882 | | |
883 | | std::string SegmentWriter::_encode_keys(const std::vector<IOlapColumnDataAccessor*>& key_columns, |
884 | 13.0k | size_t pos) { |
885 | 13.0k | assert(key_columns.size() == _num_short_key_columns); |
886 | | |
887 | 13.0k | std::string encoded_keys; |
888 | 13.0k | size_t cid = 0; |
889 | 13.0k | for (const auto& column : key_columns) { |
890 | 10.0k | auto field = column->get_data_at(pos); |
891 | 10.0k | if (UNLIKELY(!field)) { |
892 | 0 | encoded_keys.push_back(KEY_NULL_FIRST_MARKER); |
893 | 0 | ++cid; |
894 | 0 | continue; |
895 | 0 | } |
896 | 10.0k | encoded_keys.push_back(KEY_NORMAL_MARKER); |
897 | 10.0k | _key_coders[cid]->encode_ascending(field, _key_index_size[cid], &encoded_keys); |
898 | 10.0k | ++cid; |
899 | 10.0k | } |
900 | 13.0k | return encoded_keys; |
901 | 13.0k | } |
902 | | |
903 | | // TODO(lingbin): Currently this function does not include the size of various indexes, |
904 | | // We should make this more precise. |
905 | | // NOTE: This function will be called when any row of data is added, so we need to |
906 | | // make this function efficient. |
907 | 2.40k | uint64_t SegmentWriter::estimate_segment_size() { |
908 | | // footer_size(4) + checksum(4) + segment_magic(4) |
909 | 2.40k | uint64_t size = 12; |
910 | 9.64k | for (auto& column_writer : _column_writers) { |
911 | 9.64k | size += column_writer->estimate_buffer_size(); |
912 | 9.64k | } |
913 | 2.40k | if (_is_mow_with_cluster_key()) { |
914 | 0 | size += _primary_key_index_builder->size() + _short_key_index_builder->size(); |
915 | 2.40k | } else if (_is_mow()) { |
916 | 0 | size += _primary_key_index_builder->size(); |
917 | 2.40k | } else { |
918 | 2.40k | size += _short_key_index_builder->size(); |
919 | 2.40k | } |
920 | | |
921 | | // update the mem_tracker of segment size |
922 | 2.40k | _mem_tracker->consume(size - _mem_tracker->consumption()); |
923 | 2.40k | return size; |
924 | 2.40k | } |
925 | | |
926 | 3.37k | Status SegmentWriter::finalize_columns_data() { |
927 | 3.37k | if (_has_key) { |
928 | 2.44k | _row_count = _num_rows_written; |
929 | 2.44k | } else { |
930 | 924 | DCHECK(_row_count == _num_rows_written) |
931 | 0 | << "_row_count != _num_rows_written:" << _row_count << " vs. " << _num_rows_written; |
932 | 924 | if (_row_count != _num_rows_written) { |
933 | 0 | std::stringstream ss; |
934 | 0 | ss << "_row_count != _num_rows_written:" << _row_count << " vs. " << _num_rows_written; |
935 | 0 | LOG(WARNING) << ss.str(); |
936 | 0 | return Status::InternalError(ss.str()); |
937 | 0 | } |
938 | 924 | } |
939 | 3.37k | _num_rows_written = 0; |
940 | | |
941 | 10.2k | for (auto& column_writer : _column_writers) { |
942 | 10.2k | RETURN_IF_ERROR(column_writer->finish()); |
943 | 10.2k | } |
944 | 3.37k | RETURN_IF_ERROR(_write_data()); |
945 | | |
946 | 3.37k | return Status::OK(); |
947 | 3.37k | } |
948 | | |
949 | 3.37k | Status SegmentWriter::finalize_columns_index(uint64_t* index_size) { |
950 | 3.37k | uint64_t index_start = _file_writer->bytes_appended(); |
951 | 3.37k | RETURN_IF_ERROR(_write_ordinal_index()); |
952 | 3.37k | RETURN_IF_ERROR(_write_zone_map()); |
953 | 3.37k | RETURN_IF_ERROR(_write_inverted_index()); |
954 | 3.37k | RETURN_IF_ERROR(_write_ann_index()); |
955 | 3.37k | RETURN_IF_ERROR(_write_bloom_filter_index()); |
956 | | |
957 | 3.37k | *index_size = _file_writer->bytes_appended() - index_start; |
958 | 3.37k | if (_has_key) { |
959 | 2.44k | if (_is_mow_with_cluster_key()) { |
960 | | // 1. sort primary keys |
961 | 0 | std::sort(_primary_keys.begin(), _primary_keys.end()); |
962 | | // 2. write primary keys index |
963 | 0 | std::string last_key; |
964 | 0 | for (const auto& key : _primary_keys) { |
965 | 0 | DCHECK(key.compare(last_key) > 0) |
966 | 0 | << "found duplicate key or key is not sorted! current key: " << key |
967 | 0 | << ", last key: " << last_key; |
968 | 0 | RETURN_IF_ERROR(_primary_key_index_builder->add_item(key)); |
969 | 0 | last_key = key; |
970 | 0 | } |
971 | | |
972 | 0 | RETURN_IF_ERROR(_write_short_key_index()); |
973 | 0 | *index_size = _file_writer->bytes_appended() - index_start; |
974 | 0 | RETURN_IF_ERROR(_write_primary_key_index()); |
975 | 0 | *index_size += _primary_key_index_builder->disk_size(); |
976 | 2.44k | } else if (_is_mow()) { |
977 | 69 | RETURN_IF_ERROR(_write_primary_key_index()); |
978 | | // IndexedColumnWriter write data pages mixed with segment data, we should use |
979 | | // the stat from primary key index builder. |
980 | 69 | *index_size += _primary_key_index_builder->disk_size(); |
981 | 2.37k | } else { |
982 | 2.37k | RETURN_IF_ERROR(_write_short_key_index()); |
983 | 2.37k | *index_size = _file_writer->bytes_appended() - index_start; |
984 | 2.37k | } |
985 | 2.44k | } |
986 | | // reset all column writers and data_conveter |
987 | 3.37k | clear(); |
988 | | |
989 | 3.37k | return Status::OK(); |
990 | 3.37k | } |
991 | | |
992 | 2.44k | Status SegmentWriter::finalize_footer(uint64_t* segment_file_size) { |
993 | 2.44k | RETURN_IF_ERROR(_write_footer()); |
994 | | // finish |
995 | 2.44k | RETURN_IF_ERROR(_file_writer->close(true)); |
996 | 2.44k | *segment_file_size = _file_writer->bytes_appended(); |
997 | 2.44k | if (*segment_file_size == 0) { |
998 | 0 | return Status::Corruption("Bad segment, file size = 0"); |
999 | 0 | } |
1000 | 2.44k | return Status::OK(); |
1001 | 2.44k | } |
1002 | | |
1003 | 1.25k | Status SegmentWriter::finalize(uint64_t* segment_file_size, uint64_t* index_size) { |
1004 | 1.25k | MonotonicStopWatch timer; |
1005 | 1.25k | timer.start(); |
1006 | | // check disk capacity |
1007 | 1.25k | if (_data_dir != nullptr && _data_dir->reach_capacity_limit((int64_t)estimate_segment_size())) { |
1008 | 0 | return Status::Error<DISK_REACH_CAPACITY_LIMIT>("disk {} exceed capacity limit, path: {}", |
1009 | 0 | _data_dir->path_hash(), _data_dir->path()); |
1010 | 0 | } |
1011 | | // write data |
1012 | 1.25k | RETURN_IF_ERROR(finalize_columns_data()); |
1013 | | // Get the index start before finalize_footer since this function would write new data. |
1014 | 1.25k | uint64_t index_start = _file_writer->bytes_appended(); |
1015 | | // write index |
1016 | 1.25k | RETURN_IF_ERROR(finalize_columns_index(index_size)); |
1017 | | // write footer |
1018 | 1.25k | RETURN_IF_ERROR(finalize_footer(segment_file_size)); |
1019 | | |
1020 | 1.25k | if (timer.elapsed_time() > 5000000000l) { |
1021 | 0 | LOG(INFO) << "segment flush consumes a lot time_ns " << timer.elapsed_time() |
1022 | 0 | << ", segmemt_size " << *segment_file_size; |
1023 | 0 | } |
1024 | | // When the cache type is not ttl(expiration time == 0), the data should be split into normal cache queue |
1025 | | // and index cache queue |
1026 | 1.25k | if (auto* cache_builder = _file_writer->cache_builder(); cache_builder != nullptr && |
1027 | 1.25k | cache_builder->_expiration_time == 0 && |
1028 | 1.25k | config::is_cloud_mode()) { |
1029 | 0 | auto size = *index_size + *segment_file_size; |
1030 | 0 | auto holder = cache_builder->allocate_cache_holder(index_start, size, _tablet->tablet_id()); |
1031 | 0 | for (auto& segment : holder->file_blocks) { |
1032 | 0 | static_cast<void>(segment->change_cache_type(io::FileCacheType::INDEX)); |
1033 | 0 | } |
1034 | 0 | } |
1035 | 1.25k | return Status::OK(); |
1036 | 1.25k | } |
1037 | | |
1038 | 3.39k | void SegmentWriter::clear() { |
1039 | 10.2k | for (auto& column_writer : _column_writers) { |
1040 | 10.2k | column_writer.reset(); |
1041 | 10.2k | } |
1042 | 3.39k | _column_writers.clear(); |
1043 | 3.39k | _column_ids.clear(); |
1044 | 3.39k | _olap_data_convertor.reset(); |
1045 | 3.39k | } |
1046 | | |
1047 | | // write column data to file one by one |
1048 | 3.37k | Status SegmentWriter::_write_data() { |
1049 | 10.2k | for (auto& column_writer : _column_writers) { |
1050 | 10.2k | RETURN_IF_ERROR(column_writer->write_data()); |
1051 | | |
1052 | 10.2k | auto* column_meta = column_writer->get_column_meta(); |
1053 | 10.2k | DCHECK(column_meta != nullptr); |
1054 | 10.2k | column_meta->set_compressed_data_bytes( |
1055 | 10.2k | (column_meta->has_compressed_data_bytes() ? column_meta->compressed_data_bytes() |
1056 | 10.2k | : 0) + |
1057 | 10.2k | column_writer->get_total_compressed_data_pages_bytes()); |
1058 | 10.2k | column_meta->set_uncompressed_data_bytes( |
1059 | 10.2k | (column_meta->has_uncompressed_data_bytes() ? column_meta->uncompressed_data_bytes() |
1060 | 10.2k | : 0) + |
1061 | 10.2k | column_writer->get_total_uncompressed_data_pages_bytes()); |
1062 | 10.2k | column_meta->set_raw_data_bytes( |
1063 | 10.2k | (column_meta->has_raw_data_bytes() ? column_meta->raw_data_bytes() : 0) + |
1064 | 10.2k | column_writer->get_raw_data_bytes()); |
1065 | 10.2k | } |
1066 | 3.37k | return Status::OK(); |
1067 | 3.37k | } |
1068 | | |
1069 | | // write ordinal index after data has been written |
1070 | 3.37k | Status SegmentWriter::_write_ordinal_index() { |
1071 | 10.2k | for (auto& column_writer : _column_writers) { |
1072 | 10.2k | RETURN_IF_ERROR(column_writer->write_ordinal_index()); |
1073 | 10.2k | } |
1074 | 3.37k | return Status::OK(); |
1075 | 3.37k | } |
1076 | | |
1077 | 3.37k | Status SegmentWriter::_write_zone_map() { |
1078 | 10.2k | for (auto& column_writer : _column_writers) { |
1079 | 10.2k | RETURN_IF_ERROR(column_writer->write_zone_map()); |
1080 | 10.2k | } |
1081 | 3.37k | return Status::OK(); |
1082 | 3.37k | } |
1083 | | |
1084 | 3.37k | Status SegmentWriter::_write_inverted_index() { |
1085 | 10.2k | for (auto& column_writer : _column_writers) { |
1086 | 10.2k | RETURN_IF_ERROR(column_writer->write_inverted_index()); |
1087 | 10.2k | } |
1088 | 3.37k | return Status::OK(); |
1089 | 3.37k | } |
1090 | | |
1091 | 3.37k | Status SegmentWriter::_write_ann_index() { |
1092 | 10.2k | for (auto& column_writer : _column_writers) { |
1093 | 10.2k | RETURN_IF_ERROR(column_writer->write_ann_index()); |
1094 | 10.2k | } |
1095 | 3.37k | return Status::OK(); |
1096 | 3.37k | } |
1097 | | |
1098 | 3.37k | Status SegmentWriter::_write_bloom_filter_index() { |
1099 | 10.2k | for (auto& column_writer : _column_writers) { |
1100 | 10.2k | RETURN_IF_ERROR(column_writer->write_bloom_filter_index()); |
1101 | 10.2k | } |
1102 | 3.37k | return Status::OK(); |
1103 | 3.37k | } |
1104 | | |
1105 | 2.37k | Status SegmentWriter::_write_short_key_index() { |
1106 | 2.37k | std::vector<Slice> body; |
1107 | 2.37k | PageFooterPB footer; |
1108 | 2.37k | RETURN_IF_ERROR(_short_key_index_builder->finalize(_row_count, &body, &footer)); |
1109 | 2.37k | PagePointer pp; |
1110 | | // short key index page is not compressed right now |
1111 | 2.37k | RETURN_IF_ERROR(PageIO::write_page(_file_writer, body, footer, &pp)); |
1112 | 2.37k | pp.to_proto(_footer.mutable_short_key_index_page()); |
1113 | 2.37k | return Status::OK(); |
1114 | 2.37k | } |
1115 | | |
1116 | 69 | Status SegmentWriter::_write_primary_key_index() { |
1117 | 69 | CHECK_EQ(_primary_key_index_builder->num_rows(), _row_count); |
1118 | 69 | return _primary_key_index_builder->finalize(_footer.mutable_primary_key_index_meta()); |
1119 | 69 | } |
1120 | | |
1121 | 2.44k | Status SegmentWriter::_write_footer() { |
1122 | 2.44k | _footer.set_num_rows(_row_count); |
1123 | | // Decide whether to externalize ColumnMetaPB by tablet default, and stamp footer version |
1124 | 2.44k | if (_tablet_schema->is_external_segment_column_meta_used()) { |
1125 | 71 | _footer.set_version(SEGMENT_FOOTER_VERSION_V3_EXT_COL_META); |
1126 | 71 | VLOG_DEBUG << "use external column meta"; |
1127 | | // External ColumnMetaPB writing (optional) |
1128 | 71 | RETURN_IF_ERROR(ExternalColMetaUtil::write_external_column_meta( |
1129 | 71 | _file_writer, &_footer, _opts.compression_type, |
1130 | 71 | [this](const std::vector<Slice>& slices) { return _write_raw_data(slices); })); |
1131 | 71 | } |
1132 | | |
1133 | | // Footer := SegmentFooterPB, FooterPBSize(4), FooterPBChecksum(4), MagicNumber(4) |
1134 | 2.44k | std::string footer_buf; |
1135 | 2.44k | VLOG_DEBUG << "footer " << _footer.DebugString(); |
1136 | 2.44k | if (!_footer.SerializeToString(&footer_buf)) { |
1137 | 0 | return Status::InternalError("failed to serialize segment footer"); |
1138 | 0 | } |
1139 | | |
1140 | 2.44k | faststring fixed_buf; |
1141 | | // footer's size |
1142 | 2.44k | put_fixed32_le(&fixed_buf, cast_set<uint32_t>(footer_buf.size())); |
1143 | | // footer's checksum |
1144 | 2.44k | uint32_t checksum = crc32c::Crc32c(footer_buf.data(), footer_buf.size()); |
1145 | 2.44k | put_fixed32_le(&fixed_buf, checksum); |
1146 | | // Append magic number. we don't write magic number in the header because |
1147 | | // that will need an extra seek when reading |
1148 | 2.44k | fixed_buf.append(k_segment_magic, k_segment_magic_length); |
1149 | | |
1150 | 2.44k | std::vector<Slice> slices {footer_buf, fixed_buf}; |
1151 | 2.44k | return _write_raw_data(slices); |
1152 | 2.44k | } |
1153 | | |
1154 | 4.17k | Status SegmentWriter::_write_raw_data(const std::vector<Slice>& slices) { |
1155 | 4.17k | RETURN_IF_ERROR(_file_writer->appendv(&slices[0], slices.size())); |
1156 | 4.17k | return Status::OK(); |
1157 | 4.17k | } |
1158 | | |
1159 | 2.44k | Slice SegmentWriter::min_encoded_key() { |
1160 | 2.44k | return (_primary_key_index_builder == nullptr) ? Slice(_min_key.data(), _min_key.size()) |
1161 | 2.44k | : _primary_key_index_builder->min_key(); |
1162 | 2.44k | } |
1163 | 2.44k | Slice SegmentWriter::max_encoded_key() { |
1164 | 2.44k | return (_primary_key_index_builder == nullptr) ? Slice(_max_key.data(), _max_key.size()) |
1165 | 2.44k | : _primary_key_index_builder->max_key(); |
1166 | 2.44k | } |
1167 | | |
1168 | 203 | void SegmentWriter::set_min_max_key(const Slice& key) { |
1169 | 203 | if (UNLIKELY(_is_first_row)) { |
1170 | 5 | _min_key.append(key.get_data(), key.get_size()); |
1171 | 5 | _is_first_row = false; |
1172 | 5 | } |
1173 | 203 | if (key.compare(_max_key) > 0) { |
1174 | 203 | _max_key.clear(); |
1175 | 203 | _max_key.append(key.get_data(), key.get_size()); |
1176 | 203 | } |
1177 | 203 | } |
1178 | | |
1179 | 2.94k | void SegmentWriter::set_min_key(const Slice& key) { |
1180 | 2.94k | if (UNLIKELY(_is_first_row)) { |
1181 | 2.37k | _min_key.append(key.get_data(), key.get_size()); |
1182 | 2.37k | _is_first_row = false; |
1183 | 2.37k | } |
1184 | 2.94k | } |
1185 | | |
1186 | 2.94k | void SegmentWriter::set_max_key(const Slice& key) { |
1187 | 2.94k | _max_key.clear(); |
1188 | 2.94k | _max_key.append(key.get_data(), key.get_size()); |
1189 | 2.94k | } |
1190 | | |
1191 | 0 | void SegmentWriter::set_mow_context(std::shared_ptr<MowContext> mow_context) { |
1192 | 0 | _mow_context = mow_context; |
1193 | 0 | } |
1194 | | |
1195 | | Status SegmentWriter::_generate_primary_key_index( |
1196 | | const std::vector<const KeyCoder*>& primary_key_coders, |
1197 | | const std::vector<IOlapColumnDataAccessor*>& primary_key_columns, |
1198 | 10 | IOlapColumnDataAccessor* seq_column, size_t num_rows, bool need_sort) { |
1199 | 10 | if (!need_sort) { // mow table without cluster key |
1200 | 10 | std::string last_key; |
1201 | 810 | for (size_t pos = 0; pos < num_rows; pos++) { |
1202 | | // use _key_coders |
1203 | 800 | std::string key = _full_encode_keys(primary_key_columns, pos); |
1204 | 800 | _maybe_invalid_row_cache(key); |
1205 | 800 | if (_tablet_schema->has_sequence_col()) { |
1206 | 0 | _encode_seq_column(seq_column, pos, &key); |
1207 | 0 | } |
1208 | 800 | DCHECK(key.compare(last_key) > 0) |
1209 | 0 | << "found duplicate key or key is not sorted! current key: " << key |
1210 | 0 | << ", last key: " << last_key; |
1211 | 800 | RETURN_IF_ERROR(_primary_key_index_builder->add_item(key)); |
1212 | 800 | last_key = std::move(key); |
1213 | 800 | } |
1214 | 10 | } else { // mow table with cluster key |
1215 | | // generate primary keys in memory |
1216 | 0 | for (uint32_t pos = 0; pos < num_rows; pos++) { |
1217 | 0 | std::string key = _full_encode_keys(primary_key_coders, primary_key_columns, pos); |
1218 | 0 | _maybe_invalid_row_cache(key); |
1219 | 0 | if (_tablet_schema->has_sequence_col()) { |
1220 | 0 | _encode_seq_column(seq_column, pos, &key); |
1221 | 0 | } |
1222 | 0 | _encode_rowid(pos + _num_rows_written, &key); |
1223 | 0 | _primary_keys_size += key.size(); |
1224 | 0 | _primary_keys.emplace_back(std::move(key)); |
1225 | 0 | } |
1226 | 0 | } |
1227 | 10 | return Status::OK(); |
1228 | 10 | } |
1229 | | |
1230 | | Status SegmentWriter::_generate_short_key_index(std::vector<IOlapColumnDataAccessor*>& key_columns, |
1231 | | size_t num_rows, |
1232 | 2.94k | const std::vector<size_t>& short_key_pos) { |
1233 | | // use _key_coders |
1234 | 2.94k | set_min_key(_full_encode_keys(key_columns, 0)); |
1235 | 2.94k | set_max_key(_full_encode_keys(key_columns, num_rows - 1)); |
1236 | 2.94k | DCHECK(Slice(_max_key.data(), _max_key.size()) |
1237 | 0 | .compare(Slice(_min_key.data(), _min_key.size())) >= 0) |
1238 | 0 | << "key is not sorted! min key: " << _min_key << ", max key: " << _max_key; |
1239 | | |
1240 | 2.94k | key_columns.resize(_num_short_key_columns); |
1241 | 2.94k | std::string last_key; |
1242 | 13.0k | for (const auto pos : short_key_pos) { |
1243 | 13.0k | std::string key = _encode_keys(key_columns, pos); |
1244 | 13.0k | DCHECK(key.compare(last_key) >= 0) |
1245 | 0 | << "key is not sorted! current key: " << key << ", last key: " << last_key; |
1246 | 13.0k | RETURN_IF_ERROR(_short_key_index_builder->add_item(key)); |
1247 | 13.0k | last_key = std::move(key); |
1248 | 13.0k | } |
1249 | 2.94k | return Status::OK(); |
1250 | 2.94k | } |
1251 | | |
1252 | 281k | inline bool SegmentWriter::_is_mow() { |
1253 | 281k | return _tablet_schema->keys_type() == UNIQUE_KEYS && _opts.enable_unique_key_merge_on_write; |
1254 | 281k | } |
1255 | | |
1256 | 139k | inline bool SegmentWriter::_is_mow_with_cluster_key() { |
1257 | 139k | return _is_mow() && !_tablet_schema->cluster_key_uids().empty(); |
1258 | 139k | } |
1259 | | |
1260 | | } // namespace segment_v2 |
1261 | | } // namespace doris |