be/src/storage/segment/vertical_segment_writer.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "storage/segment/vertical_segment_writer.h" |
19 | | |
20 | | #include <crc32c/crc32c.h> |
21 | | #include <gen_cpp/olap_file.pb.h> |
22 | | #include <gen_cpp/segment_v2.pb.h> |
23 | | #include <parallel_hashmap/phmap.h> |
24 | | |
25 | | #include <cassert> |
26 | | #include <memory> |
27 | | #include <ostream> |
28 | | #include <string> |
29 | | #include <unordered_map> |
30 | | #include <unordered_set> |
31 | | #include <utility> |
32 | | |
33 | | #include "cloud/config.h" |
34 | | #include "common/cast_set.h" |
35 | | #include "common/compiler_util.h" // IWYU pragma: keep |
36 | | #include "common/config.h" |
37 | | #include "common/logging.h" // LOG |
38 | | #include "common/status.h" |
39 | | #include "core/assert_cast.h" |
40 | | #include "core/block/block.h" |
41 | | #include "core/block/column_with_type_and_name.h" |
42 | | #include "core/column/column_nullable.h" |
43 | | #include "core/column/column_vector.h" |
44 | | #include "core/data_type/data_type.h" |
45 | | #include "core/data_type/data_type_factory.hpp" |
46 | | #include "core/data_type/data_type_number.h" // IWYU pragma: keep |
47 | | #include "core/types.h" |
48 | | #include "exec/common/variant_util.h" |
49 | | #include "io/fs/file_writer.h" |
50 | | #include "io/fs/local_file_system.h" |
51 | | #include "runtime/exec_env.h" |
52 | | #include "runtime/memory/mem_tracker.h" |
53 | | #include "service/point_query_executor.h" |
54 | | #include "storage/data_dir.h" |
55 | | #include "storage/index/index_file_writer.h" |
56 | | #include "storage/index/inverted/inverted_index_desc.h" |
57 | | #include "storage/index/inverted/inverted_index_fs_directory.h" |
58 | | #include "storage/index/primary_key_index.h" |
59 | | #include "storage/index/short_key_index.h" |
60 | | #include "storage/iterator/olap_data_convertor.h" |
61 | | #include "storage/key_coder.h" |
62 | | #include "storage/olap_common.h" |
63 | | #include "storage/partial_update_info.h" |
64 | | #include "storage/row_cursor.h" // RowCursor // IWYU pragma: keep |
65 | | #include "storage/rowset/rowset_fwd.h" |
66 | | #include "storage/rowset/rowset_writer_context.h" // RowsetWriterContext |
67 | | #include "storage/rowset/segment_creator.h" |
68 | | #include "storage/segment/column_writer.h" // ColumnWriter |
69 | | #include "storage/segment/external_col_meta_util.h" |
70 | | #include "storage/segment/page_io.h" |
71 | | #include "storage/segment/page_pointer.h" |
72 | | #include "storage/segment/segment_loader.h" |
73 | | #include "storage/segment/variant/variant_ext_meta_writer.h" |
74 | | #include "storage/tablet/base_tablet.h" |
75 | | #include "storage/tablet/tablet_schema.h" |
76 | | #include "storage/utils.h" |
77 | | #include "util/coding.h" |
78 | | #include "util/debug_points.h" |
79 | | #include "util/faststring.h" |
80 | | #include "util/json/path_in_data.h" |
81 | | #include "util/jsonb/serialize.h" |
82 | | namespace doris::segment_v2 { |
83 | | |
84 | | using namespace ErrorCode; |
85 | | using namespace KeyConsts; |
86 | | |
87 | | static const char* k_segment_magic = "D0R1"; |
88 | | static const uint32_t k_segment_magic_length = 4; |
89 | | |
90 | 12 | inline std::string vertical_segment_writer_mem_tracker_name(uint32_t segment_id) { |
91 | 12 | return "VerticalSegmentWriter:Segment-" + std::to_string(segment_id); |
92 | 12 | } |
93 | | |
94 | | VerticalSegmentWriter::VerticalSegmentWriter(io::FileWriter* file_writer, uint32_t segment_id, |
95 | | TabletSchemaSPtr tablet_schema, BaseTabletSPtr tablet, |
96 | | DataDir* data_dir, |
97 | | const VerticalSegmentWriterOptions& opts, |
98 | | IndexFileWriter* index_file_writer) |
99 | 12 | : _segment_id(segment_id), |
100 | 12 | _tablet_schema(std::move(tablet_schema)), |
101 | 12 | _tablet(std::move(tablet)), |
102 | 12 | _data_dir(data_dir), |
103 | 12 | _opts(opts), |
104 | 12 | _file_writer(file_writer), |
105 | 12 | _index_file_writer(index_file_writer), |
106 | 12 | _mem_tracker(std::make_unique<MemTracker>( |
107 | 12 | vertical_segment_writer_mem_tracker_name(segment_id))), |
108 | 12 | _mow_context(std::move(opts.mow_ctx)), |
109 | 12 | _block_aggregator(*this) { |
110 | 12 | CHECK_NOTNULL(file_writer); |
111 | 12 | _num_sort_key_columns = _tablet_schema->num_key_columns(); |
112 | 12 | _num_short_key_columns = _tablet_schema->num_short_key_columns(); |
113 | 12 | if (!_is_mow_with_cluster_key()) { |
114 | 11 | DCHECK(_num_sort_key_columns >= _num_short_key_columns) |
115 | 0 | << ", table_id=" << _tablet_schema->table_id() |
116 | 0 | << ", num_key_columns=" << _num_sort_key_columns |
117 | 0 | << ", num_short_key_columns=" << _num_short_key_columns |
118 | 0 | << ", cluster_key_columns=" << _tablet_schema->cluster_key_uids().size(); |
119 | 11 | } |
120 | 43 | for (size_t cid = 0; cid < _num_sort_key_columns; ++cid) { |
121 | 31 | const auto& column = _tablet_schema->column(cid); |
122 | 31 | _key_coders.push_back(get_key_coder(column.type())); |
123 | 31 | _key_index_size.push_back(cast_set<uint16_t>(column.index_length())); |
124 | 31 | } |
125 | | // encode the sequence id into the primary key index |
126 | 12 | if (_is_mow()) { |
127 | 3 | if (_tablet_schema->has_sequence_col()) { |
128 | 3 | const auto& column = _tablet_schema->column(_tablet_schema->sequence_col_idx()); |
129 | 3 | _seq_coder = get_key_coder(column.type()); |
130 | 3 | } |
131 | | // encode the rowid into the primary key index |
132 | 3 | if (_is_mow_with_cluster_key()) { |
133 | 1 | _rowid_coder = get_key_coder(FieldType::OLAP_FIELD_TYPE_UNSIGNED_INT); |
134 | | // primary keys |
135 | 1 | _primary_key_coders.swap(_key_coders); |
136 | | // cluster keys |
137 | 1 | _key_coders.clear(); |
138 | 1 | _key_index_size.clear(); |
139 | 1 | _num_sort_key_columns = _tablet_schema->cluster_key_uids().size(); |
140 | 2 | for (auto cid : _tablet_schema->cluster_key_uids()) { |
141 | 2 | const auto& column = _tablet_schema->column_by_uid(cid); |
142 | 2 | _key_coders.push_back(get_key_coder(column.type())); |
143 | 2 | _key_index_size.push_back(cast_set<uint16_t>(column.index_length())); |
144 | 2 | } |
145 | 1 | } |
146 | 3 | } |
147 | 12 | } |
148 | | |
149 | 12 | VerticalSegmentWriter::~VerticalSegmentWriter() { |
150 | 12 | _mem_tracker->release(_mem_tracker->consumption()); |
151 | 12 | } |
152 | | |
153 | | void VerticalSegmentWriter::_init_column_meta(ColumnMetaPB* meta, uint32_t column_id, |
154 | 61 | const TabletColumn& column) { |
155 | 61 | meta->set_column_id(column_id); |
156 | 61 | meta->set_type(int(column.type())); |
157 | 61 | meta->set_length(cast_set<int32_t>(column.length())); |
158 | 61 | meta->set_encoding(DEFAULT_ENCODING); |
159 | 61 | meta->set_compression(_opts.compression_type); |
160 | 61 | meta->set_is_nullable(column.is_nullable()); |
161 | 61 | meta->set_default_value(column.default_value()); |
162 | 61 | meta->set_precision(column.precision()); |
163 | 61 | meta->set_frac(column.frac()); |
164 | 61 | if (column.has_path_info()) { |
165 | 0 | column.path_info_ptr()->to_protobuf(meta->mutable_column_path_info(), |
166 | 0 | column.parent_unique_id()); |
167 | 0 | } |
168 | 61 | meta->set_unique_id(column.unique_id()); |
169 | 61 | for (uint32_t i = 0; i < column.get_subtype_count(); ++i) { |
170 | 0 | _init_column_meta(meta->add_children_columns(), column_id, column.get_sub_column(i)); |
171 | 0 | } |
172 | 61 | if (column.is_variant_type()) { |
173 | 0 | meta->set_variant_max_subcolumns_count(column.variant_max_subcolumns_count()); |
174 | 0 | meta->set_variant_enable_doc_mode(column.variant_enable_doc_mode()); |
175 | 0 | } |
176 | 61 | meta->set_result_is_nullable(column.get_result_is_nullable()); |
177 | 61 | meta->set_function_name(column.get_aggregation_name()); |
178 | 61 | meta->set_be_exec_version(column.get_be_exec_version()); |
179 | 61 | } |
180 | | |
181 | | Status VerticalSegmentWriter::_create_column_writer(uint32_t cid, const TabletColumn& column, |
182 | 61 | const TabletSchemaSPtr& tablet_schema) { |
183 | 61 | ColumnWriterOptions opts; |
184 | 61 | opts.meta = _footer.add_columns(); |
185 | | |
186 | 61 | _init_column_meta(opts.meta, cid, column); |
187 | | |
188 | | // now we create zone map for key columns in AGG_KEYS or all column in UNIQUE_KEYS or DUP_KEYS |
189 | | // except for columns whose type don't support zone map. |
190 | 61 | opts.need_zone_map = column.is_key() || tablet_schema->keys_type() != KeysType::AGG_KEYS; |
191 | 61 | opts.need_bloom_filter = column.is_bf_column(); |
192 | 61 | if (opts.need_bloom_filter) { |
193 | 0 | opts.bf_options.fpp = |
194 | 0 | tablet_schema->has_bf_fpp() ? tablet_schema->bloom_filter_fpp() : 0.05; |
195 | 0 | } |
196 | 61 | auto* tablet_index = tablet_schema->get_ngram_bf_index(column.unique_id()); |
197 | 61 | if (tablet_index) { |
198 | 0 | opts.need_bloom_filter = true; |
199 | 0 | opts.is_ngram_bf_index = true; |
200 | | //narrow convert from int32_t to uint8_t and uint16_t which is dangerous |
201 | 0 | auto gram_size = tablet_index->get_gram_size(); |
202 | 0 | auto gram_bf_size = tablet_index->get_gram_bf_size(); |
203 | 0 | if (gram_size > 256 || gram_size < 1) { |
204 | 0 | return Status::NotSupported("Do not support ngram bloom filter for ngram_size: ", |
205 | 0 | gram_size); |
206 | 0 | } |
207 | 0 | if (gram_bf_size > 65535 || gram_bf_size < 64) { |
208 | 0 | return Status::NotSupported("Do not support ngram bloom filter for bf_size: ", |
209 | 0 | gram_bf_size); |
210 | 0 | } |
211 | 0 | opts.gram_size = cast_set<uint8_t>(gram_size); |
212 | 0 | opts.gram_bf_size = cast_set<uint16_t>(gram_bf_size); |
213 | 0 | } |
214 | | |
215 | 61 | bool skip_inverted_index = false; |
216 | 61 | if (_opts.rowset_ctx != nullptr) { |
217 | | // skip write inverted index for index compaction column |
218 | 61 | skip_inverted_index = |
219 | 61 | _opts.rowset_ctx->columns_to_do_index_compaction.contains(column.unique_id()); |
220 | 61 | } |
221 | | // skip write inverted index on load if skip_write_index_on_load is true |
222 | 61 | if (_opts.write_type == DataWriteType::TYPE_DIRECT && |
223 | 61 | tablet_schema->skip_write_index_on_load()) { |
224 | 0 | skip_inverted_index = true; |
225 | 0 | } |
226 | 61 | if (!skip_inverted_index) { |
227 | 61 | auto inverted_indexs = tablet_schema->inverted_indexs(column); |
228 | 61 | if (!inverted_indexs.empty()) { |
229 | 0 | opts.inverted_indexes = inverted_indexs; |
230 | 0 | opts.need_inverted_index = true; |
231 | 0 | DCHECK(_index_file_writer != nullptr); |
232 | 0 | } |
233 | 61 | } |
234 | 61 | opts.index_file_writer = _index_file_writer; |
235 | | |
236 | 61 | if (const auto& index = tablet_schema->ann_index(column); index != nullptr) { |
237 | 0 | opts.ann_index = index; |
238 | 0 | opts.need_ann_index = true; |
239 | 0 | DCHECK(_index_file_writer != nullptr); |
240 | 0 | opts.index_file_writer = _index_file_writer; |
241 | 0 | } |
242 | | |
243 | 61 | #define DISABLE_INDEX_IF_FIELD_TYPE(TYPE) \ |
244 | 549 | if (column.type() == FieldType::OLAP_FIELD_TYPE_##TYPE) { \ |
245 | 0 | opts.need_zone_map = false; \ |
246 | 0 | opts.need_bloom_filter = false; \ |
247 | 0 | } |
248 | | |
249 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(STRUCT) |
250 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(ARRAY) |
251 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(JSONB) |
252 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(AGG_STATE) |
253 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(MAP) |
254 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(BITMAP) |
255 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(HLL) |
256 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(QUANTILE_STATE) |
257 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(VARIANT) |
258 | | |
259 | 61 | #undef DISABLE_INDEX_IF_FIELD_TYPE |
260 | | |
261 | 61 | #undef CHECK_FIELD_TYPE |
262 | | |
263 | 61 | int64_t storage_page_size = _tablet_schema->storage_page_size(); |
264 | | // storage_page_size must be between 4KB and 10MB. |
265 | 61 | if (storage_page_size >= 4096 && storage_page_size <= 10485760) { |
266 | 61 | opts.data_page_size = storage_page_size; |
267 | 61 | } |
268 | 61 | opts.dict_page_size = _tablet_schema->storage_dict_page_size(); |
269 | 61 | DBUG_EXECUTE_IF("VerticalSegmentWriter._create_column_writer.storage_page_size", { |
270 | 61 | auto table_id = DebugPoints::instance()->get_debug_param_or_default<int64_t>( |
271 | 61 | "VerticalSegmentWriter._create_column_writer.storage_page_size", "table_id", |
272 | 61 | INT_MIN); |
273 | 61 | auto target_data_page_size = DebugPoints::instance()->get_debug_param_or_default<int64_t>( |
274 | 61 | "VerticalSegmentWriter._create_column_writer.storage_page_size", |
275 | 61 | "storage_page_size", INT_MIN); |
276 | 61 | if (table_id == INT_MIN || target_data_page_size == INT_MIN) { |
277 | 61 | return Status::Error<ErrorCode::INTERNAL_ERROR>( |
278 | 61 | "Debug point parameters missing: either 'table_id' or 'storage_page_size' not " |
279 | 61 | "set."); |
280 | 61 | } |
281 | 61 | if (table_id == _tablet_schema->table_id() && |
282 | 61 | opts.data_page_size != target_data_page_size) { |
283 | 61 | return Status::Error<ErrorCode::INTERNAL_ERROR>( |
284 | 61 | "Mismatch in 'storage_page_size': expected size does not match the current " |
285 | 61 | "data page size. " |
286 | 61 | "Expected: " + |
287 | 61 | std::to_string(target_data_page_size) + |
288 | 61 | ", Actual: " + std::to_string(opts.data_page_size) + "."); |
289 | 61 | } |
290 | 61 | }) |
291 | 61 | if (column.is_row_store_column()) { |
292 | | // smaller page size for row store column |
293 | 0 | auto page_size = _tablet_schema->row_store_page_size(); |
294 | 0 | opts.data_page_size = |
295 | 0 | (page_size > 0) ? page_size : segment_v2::ROW_STORE_PAGE_SIZE_DEFAULT_VALUE; |
296 | 0 | } |
297 | | |
298 | 61 | opts.rowset_ctx = _opts.rowset_ctx; |
299 | 61 | opts.file_writer = _file_writer; |
300 | 61 | opts.compression_type = _opts.compression_type; |
301 | 61 | opts.footer = &_footer; |
302 | 61 | opts.input_rs_readers = _opts.rowset_ctx->input_rs_readers; |
303 | | |
304 | 61 | opts.encoding_preference = {.integer_type_default_use_plain_encoding = |
305 | 61 | _tablet_schema->integer_type_default_use_plain_encoding(), |
306 | 61 | .binary_plain_encoding_default_impl = |
307 | 61 | _tablet_schema->binary_plain_encoding_default_impl()}; |
308 | 61 | std::unique_ptr<ColumnWriter> writer; |
309 | 61 | RETURN_IF_ERROR(ColumnWriter::create(opts, &column, _file_writer, &writer)); |
310 | 61 | RETURN_IF_ERROR(writer->init()); |
311 | 61 | _column_writers[cid] = std::move(writer); |
312 | 61 | _olap_data_convertor->add_column_data_convertor_at(column, cid); |
313 | 61 | return Status::OK(); |
314 | 61 | }; |
315 | | |
316 | 12 | Status VerticalSegmentWriter::init() { |
317 | 12 | DCHECK(_column_writers.empty()); |
318 | 12 | if (_opts.compression_type == UNKNOWN_COMPRESSION) { |
319 | 0 | _opts.compression_type = _tablet_schema->compression_type(); |
320 | 0 | } |
321 | 12 | _olap_data_convertor = std::make_unique<OlapBlockDataConvertor>(); |
322 | 12 | _olap_data_convertor->resize(_tablet_schema->num_columns()); |
323 | 12 | _column_writers.resize(_tablet_schema->num_columns()); |
324 | | // we don't need the short key index for unique key merge on write table. |
325 | 12 | if (_is_mow()) { |
326 | 3 | size_t seq_col_length = 0; |
327 | 3 | if (_tablet_schema->has_sequence_col()) { |
328 | 3 | seq_col_length = |
329 | 3 | _tablet_schema->column(_tablet_schema->sequence_col_idx()).length() + 1; |
330 | 3 | } |
331 | 3 | size_t rowid_length = 0; |
332 | 3 | if (_is_mow_with_cluster_key()) { |
333 | 1 | rowid_length = PrimaryKeyIndexReader::ROW_ID_LENGTH; |
334 | 1 | _short_key_index_builder.reset( |
335 | 1 | new ShortKeyIndexBuilder(_segment_id, _opts.num_rows_per_block)); |
336 | 1 | } |
337 | 3 | _primary_key_index_builder.reset( |
338 | 3 | new PrimaryKeyIndexBuilder(_file_writer, seq_col_length, rowid_length)); |
339 | 3 | RETURN_IF_ERROR(_primary_key_index_builder->init()); |
340 | 9 | } else { |
341 | 9 | _short_key_index_builder.reset( |
342 | 9 | new ShortKeyIndexBuilder(_segment_id, _opts.num_rows_per_block)); |
343 | 9 | } |
344 | 12 | return Status::OK(); |
345 | 12 | } |
346 | | |
347 | 8 | void VerticalSegmentWriter::_maybe_invalid_row_cache(const std::string& key) const { |
348 | | // Just invalid row cache for simplicity, since the rowset is not visible at present. |
349 | | // If we update/insert cache, if load failed rowset will not be visible but cached data |
350 | | // will be visible, and lead to inconsistency. |
351 | 8 | if (!config::disable_storage_row_cache && _tablet_schema->has_row_store_for_all_columns() && |
352 | 8 | _opts.write_type == DataWriteType::TYPE_DIRECT) { |
353 | | // invalidate cache |
354 | 0 | RowCache::instance()->erase({_opts.rowset_ctx->tablet_id, key}); |
355 | 0 | } |
356 | 8 | } |
357 | | |
358 | 12 | void VerticalSegmentWriter::_serialize_block_to_row_column(const Block& block) { |
359 | 12 | if (block.rows() == 0) { |
360 | 0 | return; |
361 | 0 | } |
362 | 12 | MonotonicStopWatch watch; |
363 | 12 | watch.start(); |
364 | 12 | int row_column_id = 0; |
365 | 73 | for (int i = 0; i < _tablet_schema->num_columns(); ++i) { |
366 | 61 | if (_tablet_schema->column(i).is_row_store_column()) { |
367 | 0 | auto* row_store_column = static_cast<ColumnString*>( |
368 | 0 | block.get_by_position(i).column->assume_mutable_ref().assume_mutable().get()); |
369 | 0 | row_store_column->clear(); |
370 | 0 | DataTypeSerDeSPtrs serdes = create_data_type_serdes(block.get_data_types()); |
371 | 0 | std::unordered_set<int> row_store_cids_set(_tablet_schema->row_columns_uids().begin(), |
372 | 0 | _tablet_schema->row_columns_uids().end()); |
373 | 0 | JsonbSerializeUtil::block_to_jsonb(*_tablet_schema, block, *row_store_column, |
374 | 0 | cast_set<int>(_tablet_schema->num_columns()), serdes, |
375 | 0 | row_store_cids_set); |
376 | 0 | break; |
377 | 0 | } |
378 | 61 | } |
379 | | |
380 | 12 | VLOG_DEBUG << "serialize , num_rows:" << block.rows() << ", row_column_id:" << row_column_id |
381 | 0 | << ", total_byte_size:" << block.allocated_bytes() << ", serialize_cost(us)" |
382 | 0 | << watch.elapsed_time() / 1000; |
383 | 12 | } |
384 | | |
385 | | Status VerticalSegmentWriter::_probe_key_for_mow( |
386 | | std::string key, std::size_t segment_pos, bool have_input_seq_column, bool have_delete_sign, |
387 | | const std::vector<RowsetSharedPtr>& specified_rowsets, |
388 | | std::vector<std::unique_ptr<SegmentCacheHandle>>& segment_caches, |
389 | | bool& has_default_or_nullable, std::vector<bool>& use_default_or_null_flag, |
390 | | const std::function<void(const RowLocation& loc)>& found_cb, |
391 | 0 | const std::function<Status()>& not_found_cb, PartialUpdateStats& stats) { |
392 | 0 | RowLocation loc; |
393 | | // save rowset shared ptr so this rowset wouldn't delete |
394 | 0 | RowsetSharedPtr rowset; |
395 | 0 | auto st = _tablet->lookup_row_key(key, _tablet_schema.get(), have_input_seq_column, |
396 | 0 | specified_rowsets, &loc, _mow_context->max_version, |
397 | 0 | segment_caches, &rowset); |
398 | 0 | if (st.is<KEY_NOT_FOUND>()) { |
399 | 0 | if (!have_delete_sign) { |
400 | 0 | RETURN_IF_ERROR(not_found_cb()); |
401 | 0 | } |
402 | 0 | ++stats.num_rows_new_added; |
403 | 0 | has_default_or_nullable = true; |
404 | 0 | use_default_or_null_flag.emplace_back(true); |
405 | 0 | return Status::OK(); |
406 | 0 | } |
407 | 0 | if (!st.ok() && !st.is<KEY_ALREADY_EXISTS>()) { |
408 | 0 | LOG(WARNING) << "failed to lookup row key, error: " << st; |
409 | 0 | return st; |
410 | 0 | } |
411 | | |
412 | | // 1. if the delete sign is marked, it means that the value columns of the row will not |
413 | | // be read. So we don't need to read the missing values from the previous rows. |
414 | | // 2. the one exception is when there are sequence columns in the table, we need to read |
415 | | // the sequence columns, otherwise it may cause the merge-on-read based compaction |
416 | | // policy to produce incorrect results |
417 | | |
418 | | // 3. In flexible partial update, we may delete the existing rows before if there exists |
419 | | // insert after delete in one load. In this case, the insert should also be treated |
420 | | // as newly inserted rows, note that the sequence column value is filled in |
421 | | // BlockAggregator::aggregate_for_insert_after_delete() if this row doesn't specify the sequence column |
422 | 0 | if (st.is<KEY_ALREADY_EXISTS>() || (have_delete_sign && !_tablet_schema->has_sequence_col()) || |
423 | 0 | (_opts.rowset_ctx->partial_update_info->is_flexible_partial_update() && |
424 | 0 | _mow_context->delete_bitmap->contains( |
425 | 0 | {loc.rowset_id, loc.segment_id, DeleteBitmap::TEMP_VERSION_COMMON}, loc.row_id))) { |
426 | 0 | has_default_or_nullable = true; |
427 | 0 | use_default_or_null_flag.emplace_back(true); |
428 | 0 | } else { |
429 | | // partial update should not contain invisible columns |
430 | 0 | use_default_or_null_flag.emplace_back(false); |
431 | 0 | _rsid_to_rowset.emplace(rowset->rowset_id(), rowset); |
432 | 0 | found_cb(loc); |
433 | 0 | } |
434 | |
|
435 | 0 | if (st.is<KEY_ALREADY_EXISTS>()) { |
436 | | // although we need to mark delete current row, we still need to read missing columns |
437 | | // for this row, we need to ensure that each column is aligned |
438 | 0 | _mow_context->delete_bitmap->add( |
439 | 0 | {_opts.rowset_ctx->rowset_id, _segment_id, DeleteBitmap::TEMP_VERSION_COMMON}, |
440 | 0 | cast_set<uint32_t>(segment_pos)); |
441 | 0 | ++stats.num_rows_deleted; |
442 | 0 | } else { |
443 | 0 | _mow_context->delete_bitmap->add( |
444 | 0 | {loc.rowset_id, loc.segment_id, DeleteBitmap::TEMP_VERSION_COMMON}, loc.row_id); |
445 | 0 | ++stats.num_rows_updated; |
446 | 0 | } |
447 | 0 | return Status::OK(); |
448 | 0 | } |
449 | | |
450 | 61 | Status VerticalSegmentWriter::_finalize_column_writer_and_update_meta(size_t cid) { |
451 | 61 | RETURN_IF_ERROR(_column_writers[cid]->finish()); |
452 | 61 | RETURN_IF_ERROR(_column_writers[cid]->write_data()); |
453 | | |
454 | 61 | auto* column_meta = _column_writers[cid]->get_column_meta(); |
455 | 61 | column_meta->set_compressed_data_bytes( |
456 | 61 | _column_writers[cid]->get_total_compressed_data_pages_bytes()); |
457 | 61 | column_meta->set_uncompressed_data_bytes( |
458 | 61 | _column_writers[cid]->get_total_uncompressed_data_pages_bytes()); |
459 | 61 | column_meta->set_raw_data_bytes(_column_writers[cid]->get_raw_data_bytes()); |
460 | 61 | return Status::OK(); |
461 | 61 | } |
462 | | |
463 | | Status VerticalSegmentWriter::_partial_update_preconditions_check(size_t row_pos, |
464 | 0 | bool is_flexible_update) { |
465 | 0 | if (!_is_mow()) { |
466 | 0 | auto msg = fmt::format( |
467 | 0 | "Can only do partial update on merge-on-write unique table, but found: " |
468 | 0 | "keys_type={}, _opts.enable_unique_key_merge_on_write={}, tablet_id={}", |
469 | 0 | _tablet_schema->keys_type(), _opts.enable_unique_key_merge_on_write, |
470 | 0 | _tablet->tablet_id()); |
471 | 0 | DCHECK(false) << msg; |
472 | 0 | return Status::InternalError<false>(msg); |
473 | 0 | } |
474 | 0 | if (_opts.rowset_ctx->partial_update_info == nullptr) { |
475 | 0 | auto msg = |
476 | 0 | fmt::format("partial_update_info should not be nullptr, please check, tablet_id={}", |
477 | 0 | _tablet->tablet_id()); |
478 | 0 | DCHECK(false) << msg; |
479 | 0 | return Status::InternalError<false>(msg); |
480 | 0 | } |
481 | 0 | if (!is_flexible_update) { |
482 | 0 | if (!_opts.rowset_ctx->partial_update_info->is_fixed_partial_update()) { |
483 | 0 | auto msg = fmt::format( |
484 | 0 | "in fixed partial update code, but update_mode={}, please check, tablet_id={}", |
485 | 0 | _opts.rowset_ctx->partial_update_info->update_mode(), _tablet->tablet_id()); |
486 | 0 | DCHECK(false) << msg; |
487 | 0 | return Status::InternalError<false>(msg); |
488 | 0 | } |
489 | 0 | } else { |
490 | 0 | if (!_opts.rowset_ctx->partial_update_info->is_flexible_partial_update()) { |
491 | 0 | auto msg = fmt::format( |
492 | 0 | "in flexible partial update code, but update_mode={}, please check, " |
493 | 0 | "tablet_id={}", |
494 | 0 | _opts.rowset_ctx->partial_update_info->update_mode(), _tablet->tablet_id()); |
495 | 0 | DCHECK(false) << msg; |
496 | 0 | return Status::InternalError<false>(msg); |
497 | 0 | } |
498 | 0 | } |
499 | 0 | if (row_pos != 0) { |
500 | 0 | auto msg = fmt::format("row_pos should be 0, but found {}, tablet_id={}", row_pos, |
501 | 0 | _tablet->tablet_id()); |
502 | 0 | DCHECK(false) << msg; |
503 | 0 | return Status::InternalError<false>(msg); |
504 | 0 | } |
505 | 0 | return Status::OK(); |
506 | 0 | } |
507 | | |
508 | | // for partial update, we should do following steps to fill content of block: |
509 | | // 1. set block data to data convertor, and get all key_column's converted slice |
510 | | // 2. get pk of input block, and read missing columns |
511 | | // 2.1 first find key location{rowset_id, segment_id, row_id} |
512 | | // 2.2 build read plan to read by batch |
513 | | // 2.3 fill block |
514 | | // 3. set columns to data convertor and then write all columns |
515 | | Status VerticalSegmentWriter::_append_block_with_partial_content(RowsInBlock& data, |
516 | 0 | Block& full_block) { |
517 | 0 | DBUG_EXECUTE_IF("_append_block_with_partial_content.block", DBUG_BLOCK); |
518 | |
|
519 | 0 | RETURN_IF_ERROR(_partial_update_preconditions_check(data.row_pos, false)); |
520 | | // create full block and fill with input columns |
521 | 0 | full_block = _tablet_schema->create_block(); |
522 | 0 | const auto& including_cids = _opts.rowset_ctx->partial_update_info->update_cids; |
523 | 0 | size_t input_id = 0; |
524 | 0 | for (auto i : including_cids) { |
525 | 0 | full_block.replace_by_position(i, data.block->get_by_position(input_id++).column); |
526 | 0 | } |
527 | |
|
528 | 0 | if (_opts.rowset_ctx->write_type != DataWriteType::TYPE_COMPACTION && |
529 | 0 | _tablet_schema->num_variant_columns() > 0) { |
530 | 0 | RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns( |
531 | 0 | full_block, *_tablet_schema, including_cids)); |
532 | 0 | } |
533 | 0 | bool have_input_seq_column = false; |
534 | | // write including columns |
535 | 0 | std::vector<IOlapColumnDataAccessor*> key_columns; |
536 | 0 | IOlapColumnDataAccessor* seq_column = nullptr; |
537 | 0 | uint32_t segment_start_pos = 0; |
538 | 0 | for (auto cid : including_cids) { |
539 | 0 | RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); |
540 | 0 | RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_columns( |
541 | 0 | &full_block, data.row_pos, data.num_rows, std::vector<uint32_t> {cid})); |
542 | | // here we get segment column row num before append data. |
543 | 0 | segment_start_pos = cast_set<uint32_t>(_column_writers[cid]->get_next_rowid()); |
544 | | // olap data convertor alway start from id = 0 |
545 | 0 | auto [status, column] = _olap_data_convertor->convert_column_data(cid); |
546 | 0 | if (!status.ok()) { |
547 | 0 | return status; |
548 | 0 | } |
549 | 0 | if (cid < _num_sort_key_columns) { |
550 | 0 | key_columns.push_back(column); |
551 | 0 | } else if (_tablet_schema->has_sequence_col() && |
552 | 0 | cid == _tablet_schema->sequence_col_idx()) { |
553 | 0 | seq_column = column; |
554 | 0 | have_input_seq_column = true; |
555 | 0 | } |
556 | 0 | RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), |
557 | 0 | data.num_rows)); |
558 | 0 | RETURN_IF_ERROR(_finalize_column_writer_and_update_meta(cid)); |
559 | | // Don't clear source content for key columns and sequence column here, |
560 | | // as they will be used later in _full_encode_keys() and _generate_primary_key_index(). |
561 | | // They will be cleared at the end of this method. |
562 | 0 | bool is_key_column = (cid < _num_sort_key_columns); |
563 | 0 | bool is_seq_column = (_tablet_schema->has_sequence_col() && |
564 | 0 | cid == _tablet_schema->sequence_col_idx() && have_input_seq_column); |
565 | 0 | if (!is_key_column && !is_seq_column) { |
566 | 0 | _olap_data_convertor->clear_source_content(cid); |
567 | 0 | } |
568 | 0 | } |
569 | | |
570 | 0 | bool has_default_or_nullable = false; |
571 | 0 | std::vector<bool> use_default_or_null_flag; |
572 | 0 | use_default_or_null_flag.reserve(data.num_rows); |
573 | 0 | const auto* delete_signs = |
574 | 0 | BaseTablet::get_delete_sign_column_data(full_block, data.row_pos + data.num_rows); |
575 | |
|
576 | 0 | DBUG_EXECUTE_IF("VerticalSegmentWriter._append_block_with_partial_content.sleep", |
577 | 0 | { sleep(60); }) |
578 | 0 | const std::vector<RowsetSharedPtr>& specified_rowsets = _mow_context->rowset_ptrs; |
579 | 0 | std::vector<std::unique_ptr<SegmentCacheHandle>> segment_caches(specified_rowsets.size()); |
580 | |
|
581 | 0 | FixedReadPlan read_plan; |
582 | | |
583 | | // locate rows in base data |
584 | 0 | PartialUpdateStats stats; |
585 | |
|
586 | 0 | for (size_t block_pos = data.row_pos; block_pos < data.row_pos + data.num_rows; block_pos++) { |
587 | | // block segment |
588 | | // 2 -> 0 |
589 | | // 3 -> 1 |
590 | | // 4 -> 2 |
591 | | // 5 -> 3 |
592 | | // here row_pos = 2, num_rows = 4. |
593 | 0 | size_t delta_pos = block_pos - data.row_pos; |
594 | 0 | size_t segment_pos = segment_start_pos + delta_pos; |
595 | 0 | std::string key = _full_encode_keys(key_columns, delta_pos); |
596 | 0 | _maybe_invalid_row_cache(key); |
597 | 0 | if (have_input_seq_column) { |
598 | 0 | _encode_seq_column(seq_column, delta_pos, &key); |
599 | 0 | } |
600 | | // If the table have sequence column, and the include-cids don't contain the sequence |
601 | | // column, we need to update the primary key index builder at the end of this method. |
602 | | // At that time, we have a valid sequence column to encode the key with seq col. |
603 | 0 | if (!_tablet_schema->has_sequence_col() || have_input_seq_column) { |
604 | 0 | RETURN_IF_ERROR(_primary_key_index_builder->add_item(key)); |
605 | 0 | } |
606 | | |
607 | | // mark key with delete sign as deleted. |
608 | 0 | bool have_delete_sign = (delete_signs != nullptr && delete_signs[block_pos] != 0); |
609 | |
|
610 | 0 | auto not_found_cb = [&]() { |
611 | 0 | return _opts.rowset_ctx->partial_update_info->handle_new_key( |
612 | 0 | *_tablet_schema, [&]() -> std::string { |
613 | 0 | return data.block->dump_one_line(block_pos, |
614 | 0 | cast_set<int>(_num_sort_key_columns)); |
615 | 0 | }); |
616 | 0 | }; |
617 | 0 | auto update_read_plan = [&](const RowLocation& loc) { |
618 | 0 | read_plan.prepare_to_read(loc, segment_pos); |
619 | 0 | }; |
620 | 0 | RETURN_IF_ERROR(_probe_key_for_mow(std::move(key), segment_pos, have_input_seq_column, |
621 | 0 | have_delete_sign, specified_rowsets, segment_caches, |
622 | 0 | has_default_or_nullable, use_default_or_null_flag, |
623 | 0 | update_read_plan, not_found_cb, stats)); |
624 | 0 | } |
625 | 0 | CHECK_EQ(use_default_or_null_flag.size(), data.num_rows); |
626 | |
|
627 | 0 | if (config::enable_merge_on_write_correctness_check) { |
628 | 0 | _tablet->add_sentinel_mark_to_delete_bitmap(_mow_context->delete_bitmap.get(), |
629 | 0 | *_mow_context->rowset_ids); |
630 | 0 | } |
631 | | |
632 | | // read to fill full_block |
633 | 0 | RETURN_IF_ERROR(read_plan.fill_missing_columns( |
634 | 0 | _opts.rowset_ctx, _rsid_to_rowset, *_tablet_schema, full_block, |
635 | 0 | use_default_or_null_flag, has_default_or_nullable, segment_start_pos, data.block)); |
636 | | |
637 | 0 | if (_tablet_schema->num_variant_columns() > 0) { |
638 | 0 | RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns( |
639 | 0 | full_block, *_tablet_schema, _opts.rowset_ctx->partial_update_info->missing_cids)); |
640 | 0 | } |
641 | | |
642 | | // row column should be filled here |
643 | | // convert block to row store format |
644 | 0 | _serialize_block_to_row_column(full_block); |
645 | | |
646 | | // convert missing columns and send to column writer |
647 | 0 | const auto& missing_cids = _opts.rowset_ctx->partial_update_info->missing_cids; |
648 | 0 | for (auto cid : missing_cids) { |
649 | 0 | RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); |
650 | 0 | RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_columns( |
651 | 0 | &full_block, data.row_pos, data.num_rows, std::vector<uint32_t> {cid})); |
652 | 0 | auto [status, column] = _olap_data_convertor->convert_column_data(cid); |
653 | 0 | if (!status.ok()) { |
654 | 0 | return status; |
655 | 0 | } |
656 | 0 | if (_tablet_schema->has_sequence_col() && !have_input_seq_column && |
657 | 0 | cid == _tablet_schema->sequence_col_idx()) { |
658 | 0 | DCHECK_EQ(seq_column, nullptr); |
659 | 0 | seq_column = column; |
660 | 0 | } |
661 | 0 | RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), |
662 | 0 | data.num_rows)); |
663 | 0 | RETURN_IF_ERROR(_finalize_column_writer_and_update_meta(cid)); |
664 | | // Don't clear source content for sequence column here if it will be used later |
665 | | // in _generate_primary_key_index(). It will be cleared at the end of this method. |
666 | 0 | bool is_seq_column = (_tablet_schema->has_sequence_col() && !have_input_seq_column && |
667 | 0 | cid == _tablet_schema->sequence_col_idx()); |
668 | 0 | if (!is_seq_column) { |
669 | 0 | _olap_data_convertor->clear_source_content(cid); |
670 | 0 | } |
671 | 0 | } |
672 | | |
673 | 0 | _num_rows_updated += stats.num_rows_updated; |
674 | 0 | _num_rows_deleted += stats.num_rows_deleted; |
675 | 0 | _num_rows_new_added += stats.num_rows_new_added; |
676 | 0 | _num_rows_filtered += stats.num_rows_filtered; |
677 | 0 | if (_tablet_schema->has_sequence_col() && !have_input_seq_column) { |
678 | 0 | DCHECK_NE(seq_column, nullptr); |
679 | 0 | if (_num_rows_written != data.row_pos || |
680 | 0 | _primary_key_index_builder->num_rows() != _num_rows_written) { |
681 | 0 | return Status::InternalError( |
682 | 0 | "Correctness check failed, _num_rows_written: {}, row_pos: {}, primary key " |
683 | 0 | "index builder num rows: {}", |
684 | 0 | _num_rows_written, data.row_pos, _primary_key_index_builder->num_rows()); |
685 | 0 | } |
686 | 0 | RETURN_IF_ERROR(_generate_primary_key_index(_key_coders, key_columns, seq_column, |
687 | 0 | data.num_rows, false)); |
688 | 0 | } |
689 | | |
690 | 0 | _num_rows_written += data.num_rows; |
691 | 0 | DCHECK_EQ(_primary_key_index_builder->num_rows(), _num_rows_written) |
692 | 0 | << "primary key index builder num rows(" << _primary_key_index_builder->num_rows() |
693 | 0 | << ") not equal to segment writer's num rows written(" << _num_rows_written << ")"; |
694 | 0 | _olap_data_convertor->clear_source_content(); |
695 | 0 | return Status::OK(); |
696 | 0 | } |
697 | | |
698 | | Status VerticalSegmentWriter::_append_block_with_flexible_partial_content(RowsInBlock& data, |
699 | 0 | Block& full_block) { |
700 | 0 | RETURN_IF_ERROR(_partial_update_preconditions_check(data.row_pos, true)); |
701 | | |
702 | | // data.block has the same schema with full_block |
703 | 0 | DCHECK(data.block->columns() == _tablet_schema->num_columns()); |
704 | | |
705 | | // create full block and fill with sort key columns |
706 | 0 | full_block = _tablet_schema->create_block(); |
707 | | |
708 | | // Use _num_rows_written instead of creating column writer 0, since all column writers |
709 | | // should have the same row count, which equals _num_rows_written. |
710 | 0 | uint32_t segment_start_pos = cast_set<uint32_t>(_num_rows_written); |
711 | |
|
712 | 0 | DCHECK(_tablet_schema->has_skip_bitmap_col()); |
713 | 0 | auto skip_bitmap_col_idx = _tablet_schema->skip_bitmap_col_idx(); |
714 | |
|
715 | 0 | bool has_default_or_nullable = false; |
716 | 0 | std::vector<bool> use_default_or_null_flag; |
717 | 0 | use_default_or_null_flag.reserve(data.num_rows); |
718 | |
|
719 | 0 | int32_t seq_map_col_unique_id = _opts.rowset_ctx->partial_update_info->sequence_map_col_uid(); |
720 | 0 | bool schema_has_sequence_col = _tablet_schema->has_sequence_col(); |
721 | |
|
722 | 0 | DBUG_EXECUTE_IF("VerticalSegmentWriter._append_block_with_flexible_partial_content.sleep", |
723 | 0 | { sleep(60); }) |
724 | 0 | const std::vector<RowsetSharedPtr>& specified_rowsets = _mow_context->rowset_ptrs; |
725 | 0 | std::vector<std::unique_ptr<SegmentCacheHandle>> segment_caches(specified_rowsets.size()); |
726 | | |
727 | | // Ensure all primary key column writers and sequence column writer are created before |
728 | | // aggregate_for_flexible_partial_update, because it internally calls convert_pk_columns |
729 | | // and convert_seq_column which need the convertors in _olap_data_convertor |
730 | 0 | for (uint32_t cid = 0; cid < _tablet_schema->num_key_columns(); ++cid) { |
731 | 0 | RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); |
732 | 0 | } |
733 | 0 | if (schema_has_sequence_col) { |
734 | 0 | uint32_t cid = _tablet_schema->sequence_col_idx(); |
735 | 0 | RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); |
736 | 0 | } |
737 | | |
738 | | // 1. aggregate duplicate rows in block |
739 | 0 | RETURN_IF_ERROR(_block_aggregator.aggregate_for_flexible_partial_update( |
740 | 0 | const_cast<Block*>(data.block), data.num_rows, specified_rowsets, segment_caches)); |
741 | 0 | if (data.block->rows() != data.num_rows) { |
742 | 0 | data.num_rows = data.block->rows(); |
743 | 0 | _olap_data_convertor->clear_source_content(); |
744 | 0 | } |
745 | | |
746 | | // 2. encode primary key columns |
747 | | // we can only encode primary key columns currently becasue all non-primary columns in flexible partial update |
748 | | // can have missing cells |
749 | 0 | std::vector<IOlapColumnDataAccessor*> key_columns {}; |
750 | 0 | RETURN_IF_ERROR(_block_aggregator.convert_pk_columns(const_cast<Block*>(data.block), |
751 | 0 | data.row_pos, data.num_rows, key_columns)); |
752 | | // 3. encode sequence column |
753 | | // We encode the seguence column even thought it may have invalid values in some rows because we need to |
754 | | // encode the value of sequence column in key for rows that have a valid value in sequence column during |
755 | | // lookup_raw_key. We will encode the sequence column again at the end of this method. At that time, we have |
756 | | // a valid sequence column to encode the key with seq col. |
757 | 0 | IOlapColumnDataAccessor* seq_column {nullptr}; |
758 | 0 | RETURN_IF_ERROR(_block_aggregator.convert_seq_column(const_cast<Block*>(data.block), |
759 | 0 | data.row_pos, data.num_rows, seq_column)); |
760 | | |
761 | 0 | std::vector<BitmapValue>* skip_bitmaps = &( |
762 | 0 | assert_cast<ColumnBitmap*>( |
763 | 0 | data.block->get_by_position(skip_bitmap_col_idx).column->assume_mutable().get()) |
764 | 0 | ->get_data()); |
765 | 0 | const auto* delete_signs = |
766 | 0 | BaseTablet::get_delete_sign_column_data(*data.block, data.row_pos + data.num_rows); |
767 | 0 | DCHECK(delete_signs != nullptr); |
768 | |
|
769 | 0 | for (std::size_t cid {0}; cid < _tablet_schema->num_key_columns(); cid++) { |
770 | 0 | full_block.replace_by_position(cid, data.block->get_by_position(cid).column); |
771 | 0 | } |
772 | | |
773 | | // 4. write primary key columns data |
774 | 0 | for (std::size_t cid {0}; cid < _tablet_schema->num_key_columns(); cid++) { |
775 | 0 | const auto& column = key_columns[cid]; |
776 | 0 | DCHECK(_column_writers[cid]->get_next_rowid() == _num_rows_written); |
777 | 0 | RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), |
778 | 0 | data.num_rows)); |
779 | 0 | DCHECK(_column_writers[cid]->get_next_rowid() == _num_rows_written + data.num_rows); |
780 | 0 | RETURN_IF_ERROR(_finalize_column_writer_and_update_meta(cid)); |
781 | 0 | } |
782 | | |
783 | | // 5. genreate read plan |
784 | 0 | FlexibleReadPlan read_plan {_tablet_schema->has_row_store_for_all_columns()}; |
785 | 0 | PartialUpdateStats stats; |
786 | 0 | RETURN_IF_ERROR(_generate_flexible_read_plan( |
787 | 0 | read_plan, data, segment_start_pos, schema_has_sequence_col, seq_map_col_unique_id, |
788 | 0 | skip_bitmaps, key_columns, seq_column, delete_signs, specified_rowsets, segment_caches, |
789 | 0 | has_default_or_nullable, use_default_or_null_flag, stats)); |
790 | 0 | CHECK_EQ(use_default_or_null_flag.size(), data.num_rows); |
791 | |
|
792 | 0 | if (config::enable_merge_on_write_correctness_check) { |
793 | 0 | _tablet->add_sentinel_mark_to_delete_bitmap(_mow_context->delete_bitmap.get(), |
794 | 0 | *_mow_context->rowset_ids); |
795 | 0 | } |
796 | | |
797 | | // 6. read according plan to fill full_block |
798 | 0 | RETURN_IF_ERROR(read_plan.fill_non_primary_key_columns( |
799 | 0 | _opts.rowset_ctx, _rsid_to_rowset, *_tablet_schema, full_block, |
800 | 0 | use_default_or_null_flag, has_default_or_nullable, segment_start_pos, |
801 | 0 | cast_set<uint32_t>(data.row_pos), data.block, skip_bitmaps)); |
802 | | |
803 | | // TODO(bobhan1): should we replace the skip bitmap column with empty bitmaps to reduce storage occupation? |
804 | | // this column is not needed in read path for merge-on-write table |
805 | | |
806 | | // 7. fill row store column |
807 | 0 | _serialize_block_to_row_column(full_block); |
808 | |
|
809 | 0 | std::vector<uint32_t> column_ids; |
810 | 0 | for (uint32_t i = 0; i < _tablet_schema->num_columns(); ++i) { |
811 | 0 | column_ids.emplace_back(i); |
812 | 0 | } |
813 | 0 | if (_opts.rowset_ctx->write_type != DataWriteType::TYPE_COMPACTION && |
814 | 0 | _tablet_schema->num_variant_columns() > 0) { |
815 | 0 | RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns( |
816 | 0 | full_block, *_tablet_schema, column_ids)); |
817 | 0 | } |
818 | | |
819 | | // 8. encode and write all non-primary key columns(including sequence column if exists) |
820 | 0 | for (auto cid = _tablet_schema->num_key_columns(); cid < _tablet_schema->num_columns(); cid++) { |
821 | 0 | if (cid != _tablet_schema->sequence_col_idx()) { |
822 | 0 | RETURN_IF_ERROR(_create_column_writer(cast_set<uint32_t>(cid), |
823 | 0 | _tablet_schema->column(cid), _tablet_schema)); |
824 | 0 | } |
825 | 0 | RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_column( |
826 | 0 | full_block.get_by_position(cid), data.row_pos, data.num_rows, |
827 | 0 | cast_set<uint32_t>(cid))); |
828 | 0 | auto [status, column] = _olap_data_convertor->convert_column_data(cid); |
829 | 0 | if (!status.ok()) { |
830 | 0 | return status; |
831 | 0 | } |
832 | 0 | if (cid == _tablet_schema->sequence_col_idx()) { |
833 | | // should use the latest encoded sequence column to build the primary index |
834 | 0 | seq_column = column; |
835 | 0 | } |
836 | 0 | DCHECK(_column_writers[cid]->get_next_rowid() == _num_rows_written); |
837 | 0 | RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), |
838 | 0 | data.num_rows)); |
839 | 0 | DCHECK(_column_writers[cid]->get_next_rowid() == _num_rows_written + data.num_rows); |
840 | 0 | RETURN_IF_ERROR(_finalize_column_writer_and_update_meta(cid)); |
841 | 0 | } |
842 | | |
843 | 0 | _num_rows_updated += stats.num_rows_updated; |
844 | 0 | _num_rows_deleted += stats.num_rows_deleted; |
845 | 0 | _num_rows_new_added += stats.num_rows_new_added; |
846 | 0 | _num_rows_filtered += stats.num_rows_filtered; |
847 | |
|
848 | 0 | if (_num_rows_written != data.row_pos || |
849 | 0 | _primary_key_index_builder->num_rows() != _num_rows_written) { |
850 | 0 | return Status::InternalError( |
851 | 0 | "Correctness check failed, _num_rows_written: {}, row_pos: {}, primary key " |
852 | 0 | "index builder num rows: {}", |
853 | 0 | _num_rows_written, data.row_pos, _primary_key_index_builder->num_rows()); |
854 | 0 | } |
855 | | |
856 | | // 9. build primary key index |
857 | 0 | RETURN_IF_ERROR(_generate_primary_key_index(_key_coders, key_columns, seq_column, data.num_rows, |
858 | 0 | false)); |
859 | | |
860 | 0 | _num_rows_written += data.num_rows; |
861 | 0 | DCHECK_EQ(_primary_key_index_builder->num_rows(), _num_rows_written) |
862 | 0 | << "primary key index builder num rows(" << _primary_key_index_builder->num_rows() |
863 | 0 | << ") not equal to segment writer's num rows written(" << _num_rows_written << ")"; |
864 | 0 | _olap_data_convertor->clear_source_content(); |
865 | 0 | return Status::OK(); |
866 | 0 | } |
867 | | |
868 | | Status VerticalSegmentWriter::_generate_encoded_default_seq_value(const TabletSchema& tablet_schema, |
869 | | const PartialUpdateInfo& info, |
870 | 0 | std::string* encoded_value) { |
871 | 0 | const auto& seq_column = tablet_schema.column(tablet_schema.sequence_col_idx()); |
872 | 0 | auto block = tablet_schema.create_block_by_cids( |
873 | 0 | {cast_set<uint32_t>(tablet_schema.sequence_col_idx())}); |
874 | 0 | if (seq_column.has_default_value()) { |
875 | 0 | auto idx = tablet_schema.sequence_col_idx() - tablet_schema.num_key_columns(); |
876 | 0 | const auto& default_value = info.default_values[idx]; |
877 | 0 | StringRef str {default_value}; |
878 | 0 | RETURN_IF_ERROR(block.get_by_position(0).type->get_serde()->default_from_string( |
879 | 0 | str, *block.get_by_position(0).column->assume_mutable().get())); |
880 | |
|
881 | 0 | } else { |
882 | 0 | block.get_by_position(0).column->assume_mutable()->insert_default(); |
883 | 0 | } |
884 | 0 | DCHECK_EQ(block.rows(), 1); |
885 | 0 | auto olap_data_convertor = std::make_unique<OlapBlockDataConvertor>(); |
886 | 0 | olap_data_convertor->add_column_data_convertor(seq_column); |
887 | 0 | olap_data_convertor->set_source_content(&block, 0, 1); |
888 | 0 | auto [status, column] = olap_data_convertor->convert_column_data(0); |
889 | 0 | if (!status.ok()) { |
890 | 0 | return status; |
891 | 0 | } |
892 | | // include marker |
893 | 0 | _encode_seq_column(column, 0, encoded_value); |
894 | 0 | return Status::OK(); |
895 | 0 | } |
896 | | |
897 | | Status VerticalSegmentWriter::_generate_flexible_read_plan( |
898 | | FlexibleReadPlan& read_plan, RowsInBlock& data, size_t segment_start_pos, |
899 | | bool schema_has_sequence_col, int32_t seq_map_col_unique_id, |
900 | | std::vector<BitmapValue>* skip_bitmaps, |
901 | | const std::vector<IOlapColumnDataAccessor*>& key_columns, |
902 | | IOlapColumnDataAccessor* seq_column, const signed char* delete_signs, |
903 | | const std::vector<RowsetSharedPtr>& specified_rowsets, |
904 | | std::vector<std::unique_ptr<SegmentCacheHandle>>& segment_caches, |
905 | | bool& has_default_or_nullable, std::vector<bool>& use_default_or_null_flag, |
906 | 0 | PartialUpdateStats& stats) { |
907 | 0 | int32_t delete_sign_col_unique_id = |
908 | 0 | _tablet_schema->column(_tablet_schema->delete_sign_idx()).unique_id(); |
909 | 0 | int32_t seq_col_unique_id = |
910 | 0 | (_tablet_schema->has_sequence_col() |
911 | 0 | ? _tablet_schema->column(_tablet_schema->sequence_col_idx()).unique_id() |
912 | 0 | : -1); |
913 | 0 | for (size_t block_pos = data.row_pos; block_pos < data.row_pos + data.num_rows; block_pos++) { |
914 | 0 | size_t delta_pos = block_pos - data.row_pos; |
915 | 0 | size_t segment_pos = segment_start_pos + delta_pos; |
916 | 0 | auto& skip_bitmap = skip_bitmaps->at(block_pos); |
917 | |
|
918 | 0 | std::string key = _full_encode_keys(key_columns, delta_pos); |
919 | 0 | _maybe_invalid_row_cache(key); |
920 | 0 | bool row_has_sequence_col = |
921 | 0 | (schema_has_sequence_col && !skip_bitmap.contains(seq_col_unique_id)); |
922 | 0 | if (row_has_sequence_col) { |
923 | 0 | _encode_seq_column(seq_column, delta_pos, &key); |
924 | 0 | } |
925 | | |
926 | | // mark key with delete sign as deleted. |
927 | 0 | bool have_delete_sign = |
928 | 0 | (!skip_bitmap.contains(delete_sign_col_unique_id) && delete_signs[block_pos] != 0); |
929 | |
|
930 | 0 | auto not_found_cb = [&]() { |
931 | 0 | return _opts.rowset_ctx->partial_update_info->handle_new_key( |
932 | 0 | *_tablet_schema, |
933 | 0 | [&]() -> std::string { |
934 | 0 | return data.block->dump_one_line(block_pos, |
935 | 0 | cast_set<int>(_num_sort_key_columns)); |
936 | 0 | }, |
937 | 0 | &skip_bitmap); |
938 | 0 | }; |
939 | 0 | auto update_read_plan = [&](const RowLocation& loc) { |
940 | 0 | read_plan.prepare_to_read(loc, segment_pos, skip_bitmap); |
941 | 0 | }; |
942 | |
|
943 | 0 | RETURN_IF_ERROR(_probe_key_for_mow(std::move(key), segment_pos, row_has_sequence_col, |
944 | 0 | have_delete_sign, specified_rowsets, segment_caches, |
945 | 0 | has_default_or_nullable, use_default_or_null_flag, |
946 | 0 | update_read_plan, not_found_cb, stats)); |
947 | 0 | } |
948 | 0 | return Status::OK(); |
949 | 0 | } |
950 | | |
951 | 12 | Status VerticalSegmentWriter::batch_block(const Block* block, size_t row_pos, size_t num_rows) { |
952 | 12 | if (_opts.rowset_ctx->partial_update_info && |
953 | 12 | _opts.rowset_ctx->partial_update_info->is_partial_update() && |
954 | 12 | _opts.write_type == DataWriteType::TYPE_DIRECT && |
955 | 12 | !_opts.rowset_ctx->is_transient_rowset_writer) { |
956 | 0 | if (_opts.rowset_ctx->partial_update_info->is_flexible_partial_update()) { |
957 | 0 | if (block->columns() != _tablet_schema->num_columns()) { |
958 | 0 | return Status::InvalidArgument( |
959 | 0 | "illegal flexible partial update block columns, block columns = {}, " |
960 | 0 | "tablet_schema columns = {}", |
961 | 0 | block->dump_structure(), _tablet_schema->dump_structure()); |
962 | 0 | } |
963 | 0 | } else { |
964 | 0 | if (block->columns() < _tablet_schema->num_key_columns() || |
965 | 0 | block->columns() >= _tablet_schema->num_columns()) { |
966 | 0 | return Status::InvalidArgument(fmt::format( |
967 | 0 | "illegal partial update block columns: {}, num key columns: {}, total " |
968 | 0 | "schema columns: {}", |
969 | 0 | block->columns(), _tablet_schema->num_key_columns(), |
970 | 0 | _tablet_schema->num_columns())); |
971 | 0 | } |
972 | 0 | } |
973 | 12 | } else if (block->columns() != _tablet_schema->num_columns()) { |
974 | 0 | return Status::InvalidArgument( |
975 | 0 | "illegal block columns, block columns = {}, tablet_schema columns = {}", |
976 | 0 | block->dump_structure(), _tablet_schema->dump_structure()); |
977 | 0 | } |
978 | 12 | _batched_blocks.emplace_back(block, row_pos, num_rows); |
979 | 12 | return Status::OK(); |
980 | 12 | } |
981 | | |
982 | 12 | Status VerticalSegmentWriter::write_batch() { |
983 | 12 | if (_opts.rowset_ctx->partial_update_info && |
984 | 12 | _opts.rowset_ctx->partial_update_info->is_partial_update() && |
985 | 12 | _opts.write_type == DataWriteType::TYPE_DIRECT && |
986 | 12 | !_opts.rowset_ctx->is_transient_rowset_writer) { |
987 | 0 | bool is_flexible_partial_update = |
988 | 0 | _opts.rowset_ctx->partial_update_info->is_flexible_partial_update(); |
989 | 0 | Block full_block; |
990 | 0 | for (auto& data : _batched_blocks) { |
991 | 0 | if (is_flexible_partial_update) { |
992 | 0 | RETURN_IF_ERROR(_append_block_with_flexible_partial_content(data, full_block)); |
993 | 0 | } else { |
994 | 0 | RETURN_IF_ERROR(_append_block_with_partial_content(data, full_block)); |
995 | 0 | } |
996 | 0 | } |
997 | 0 | return Status::OK(); |
998 | 0 | } |
999 | | // Row column should be filled here when it's a directly write from memtable |
1000 | | // or it's schema change write(since column data type maybe changed, so we should reubild) |
1001 | 12 | if (_opts.write_type == DataWriteType::TYPE_DIRECT || |
1002 | 12 | _opts.write_type == DataWriteType::TYPE_SCHEMA_CHANGE) { |
1003 | 12 | for (auto& data : _batched_blocks) { |
1004 | | // TODO: maybe we should pass range to this method |
1005 | 12 | _serialize_block_to_row_column(*data.block); |
1006 | 12 | } |
1007 | 12 | } |
1008 | | |
1009 | 12 | std::vector<uint32_t> column_ids; |
1010 | 73 | for (uint32_t i = 0; i < _tablet_schema->num_columns(); ++i) { |
1011 | 61 | column_ids.emplace_back(i); |
1012 | 61 | } |
1013 | 12 | if (_opts.rowset_ctx->write_type != DataWriteType::TYPE_COMPACTION && |
1014 | 12 | _tablet_schema->num_variant_columns() > 0) { |
1015 | 0 | for (auto& data : _batched_blocks) { |
1016 | 0 | RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns( |
1017 | 0 | const_cast<Block&>(*data.block), *_tablet_schema, column_ids)); |
1018 | 0 | } |
1019 | 0 | } |
1020 | | |
1021 | 12 | std::vector<IOlapColumnDataAccessor*> key_columns; |
1022 | 12 | IOlapColumnDataAccessor* seq_column = nullptr; |
1023 | | // the key is cluster key column unique id |
1024 | 12 | std::map<uint32_t, IOlapColumnDataAccessor*> cid_to_column; |
1025 | 73 | for (uint32_t cid = 0; cid < _tablet_schema->num_columns(); ++cid) { |
1026 | 61 | RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); |
1027 | 61 | for (auto& data : _batched_blocks) { |
1028 | 61 | RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_columns( |
1029 | 61 | data.block, data.row_pos, data.num_rows, std::vector<uint32_t> {cid})); |
1030 | | |
1031 | | // convert column data from engine format to storage layer format |
1032 | 61 | auto [status, column] = _olap_data_convertor->convert_column_data(cid); |
1033 | 61 | if (!status.ok()) { |
1034 | 0 | return status; |
1035 | 0 | } |
1036 | 61 | if (cid < _tablet_schema->num_key_columns()) { |
1037 | 31 | key_columns.push_back(column); |
1038 | 31 | } |
1039 | 61 | if (_tablet_schema->has_sequence_col() && cid == _tablet_schema->sequence_col_idx()) { |
1040 | 7 | seq_column = column; |
1041 | 7 | } |
1042 | 61 | auto column_unique_id = _tablet_schema->column(cid).unique_id(); |
1043 | 61 | if (_is_mow_with_cluster_key() && |
1044 | 61 | std::find(_tablet_schema->cluster_key_uids().begin(), |
1045 | 5 | _tablet_schema->cluster_key_uids().end(), |
1046 | 5 | column_unique_id) != _tablet_schema->cluster_key_uids().end()) { |
1047 | 2 | cid_to_column[column_unique_id] = column; |
1048 | 2 | } |
1049 | 61 | RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), |
1050 | 61 | data.num_rows)); |
1051 | 61 | _olap_data_convertor->clear_source_content(); |
1052 | 61 | } |
1053 | 61 | if (_data_dir != nullptr && |
1054 | 61 | _data_dir->reach_capacity_limit(_column_writers[cid]->estimate_buffer_size())) { |
1055 | 0 | return Status::Error<DISK_REACH_CAPACITY_LIMIT>("disk {} exceed capacity limit.", |
1056 | 0 | _data_dir->path_hash()); |
1057 | 0 | } |
1058 | 61 | RETURN_IF_ERROR(_finalize_column_writer_and_update_meta(cid)); |
1059 | 61 | } |
1060 | | |
1061 | 12 | for (auto& data : _batched_blocks) { |
1062 | 12 | _olap_data_convertor->set_source_content(data.block, data.row_pos, data.num_rows); |
1063 | 12 | RETURN_IF_ERROR(_generate_key_index(data, key_columns, seq_column, cid_to_column)); |
1064 | 12 | _olap_data_convertor->clear_source_content(); |
1065 | 12 | _num_rows_written += data.num_rows; |
1066 | 12 | } |
1067 | | |
1068 | 12 | _batched_blocks.clear(); |
1069 | 12 | return Status::OK(); |
1070 | 12 | } |
1071 | | |
1072 | | Status VerticalSegmentWriter::_generate_key_index( |
1073 | | RowsInBlock& data, std::vector<IOlapColumnDataAccessor*>& key_columns, |
1074 | | IOlapColumnDataAccessor* seq_column, |
1075 | 12 | std::map<uint32_t, IOlapColumnDataAccessor*>& cid_to_column) { |
1076 | | // find all row pos for short key indexes |
1077 | 12 | std::vector<size_t> short_key_pos; |
1078 | | // We build a short key index every `_opts.num_rows_per_block` rows. Specifically, we |
1079 | | // build a short key index using 1st rows for first block and `_short_key_row_pos - _row_count` |
1080 | | // for next blocks. |
1081 | 12 | if (_short_key_row_pos == 0 && _num_rows_written == 0) { |
1082 | 12 | short_key_pos.push_back(0); |
1083 | 12 | } |
1084 | 12 | while (_short_key_row_pos + _opts.num_rows_per_block < _num_rows_written + data.num_rows) { |
1085 | 0 | _short_key_row_pos += _opts.num_rows_per_block; |
1086 | 0 | short_key_pos.push_back(_short_key_row_pos - _num_rows_written); |
1087 | 0 | } |
1088 | 12 | if (_is_mow_with_cluster_key()) { |
1089 | | // 1. generate primary key index |
1090 | 1 | RETURN_IF_ERROR(_generate_primary_key_index(_primary_key_coders, key_columns, seq_column, |
1091 | 1 | data.num_rows, true)); |
1092 | | // 2. generate short key index (use cluster key) |
1093 | 1 | std::vector<IOlapColumnDataAccessor*> short_key_columns; |
1094 | 2 | for (const auto& cid : _tablet_schema->cluster_key_uids()) { |
1095 | 2 | short_key_columns.push_back(cid_to_column[cid]); |
1096 | 2 | } |
1097 | 1 | RETURN_IF_ERROR(_generate_short_key_index(short_key_columns, data.num_rows, short_key_pos)); |
1098 | 11 | } else if (_is_mow()) { |
1099 | 2 | RETURN_IF_ERROR(_generate_primary_key_index(_key_coders, key_columns, seq_column, |
1100 | 2 | data.num_rows, false)); |
1101 | 9 | } else { // other tables |
1102 | 9 | RETURN_IF_ERROR(_generate_short_key_index(key_columns, data.num_rows, short_key_pos)); |
1103 | 9 | } |
1104 | 12 | return Status::OK(); |
1105 | 12 | } |
1106 | | |
1107 | | Status VerticalSegmentWriter::_generate_primary_key_index( |
1108 | | const std::vector<const KeyCoder*>& primary_key_coders, |
1109 | | const std::vector<IOlapColumnDataAccessor*>& primary_key_columns, |
1110 | 3 | IOlapColumnDataAccessor* seq_column, size_t num_rows, bool need_sort) { |
1111 | 3 | if (!need_sort) { // mow table without cluster key |
1112 | 2 | std::string last_key; |
1113 | 6 | for (size_t pos = 0; pos < num_rows; pos++) { |
1114 | | // use _key_coders |
1115 | 4 | std::string key = _full_encode_keys(primary_key_columns, pos); |
1116 | 4 | _maybe_invalid_row_cache(key); |
1117 | 4 | if (_tablet_schema->has_sequence_col()) { |
1118 | 4 | _encode_seq_column(seq_column, pos, &key); |
1119 | 4 | } |
1120 | 4 | DCHECK(key.compare(last_key) > 0) |
1121 | 0 | << "found duplicate key or key is not sorted! current key: " << key |
1122 | 0 | << ", last key: " << last_key; |
1123 | 4 | RETURN_IF_ERROR(_primary_key_index_builder->add_item(key)); |
1124 | 4 | last_key = std::move(key); |
1125 | 4 | } |
1126 | 2 | } else { // mow table with cluster key |
1127 | | // 1. generate primary keys in memory |
1128 | 1 | std::vector<std::string> primary_keys; |
1129 | 5 | for (uint32_t pos = 0; pos < num_rows; pos++) { |
1130 | 4 | std::string key = _full_encode_keys(primary_key_coders, primary_key_columns, pos); |
1131 | 4 | _maybe_invalid_row_cache(key); |
1132 | 4 | if (_tablet_schema->has_sequence_col()) { |
1133 | 4 | _encode_seq_column(seq_column, pos, &key); |
1134 | 4 | } |
1135 | 4 | _encode_rowid(pos, &key); |
1136 | 4 | primary_keys.emplace_back(std::move(key)); |
1137 | 4 | } |
1138 | | // 2. sort primary keys |
1139 | 1 | std::sort(primary_keys.begin(), primary_keys.end()); |
1140 | | // 3. write primary keys index |
1141 | 1 | std::string last_key; |
1142 | 4 | for (const auto& key : primary_keys) { |
1143 | 4 | DCHECK(key.compare(last_key) > 0) |
1144 | 0 | << "found duplicate key or key is not sorted! current key: " << key |
1145 | 0 | << ", last key: " << last_key; |
1146 | 4 | RETURN_IF_ERROR(_primary_key_index_builder->add_item(key)); |
1147 | 4 | last_key = key; |
1148 | 4 | } |
1149 | 1 | } |
1150 | 3 | return Status::OK(); |
1151 | 3 | } |
1152 | | |
1153 | | Status VerticalSegmentWriter::_generate_short_key_index( |
1154 | | std::vector<IOlapColumnDataAccessor*>& key_columns, size_t num_rows, |
1155 | 10 | const std::vector<size_t>& short_key_pos) { |
1156 | | // use _key_coders |
1157 | 10 | _set_min_key(_full_encode_keys(key_columns, 0)); |
1158 | 10 | _set_max_key(_full_encode_keys(key_columns, num_rows - 1)); |
1159 | 10 | DCHECK(Slice(_max_key.data(), _max_key.size()) |
1160 | 0 | .compare(Slice(_min_key.data(), _min_key.size())) >= 0) |
1161 | 0 | << "key is not sorted! min key: " << _min_key << ", max key: " << _max_key; |
1162 | | |
1163 | 10 | key_columns.resize(_num_short_key_columns); |
1164 | 10 | std::string last_key; |
1165 | 10 | for (const auto pos : short_key_pos) { |
1166 | 10 | std::string key = _encode_keys(key_columns, pos); |
1167 | 10 | DCHECK(key.compare(last_key) >= 0) |
1168 | 0 | << "key is not sorted! current key: " << key << ", last key: " << last_key; |
1169 | 10 | RETURN_IF_ERROR(_short_key_index_builder->add_item(key)); |
1170 | 10 | last_key = std::move(key); |
1171 | 10 | } |
1172 | 10 | return Status::OK(); |
1173 | 10 | } |
1174 | | |
1175 | 4 | void VerticalSegmentWriter::_encode_rowid(const uint32_t rowid, std::string* encoded_keys) { |
1176 | 4 | encoded_keys->push_back(KEY_NORMAL_MARKER); |
1177 | 4 | _rowid_coder->full_encode_ascending(&rowid, encoded_keys); |
1178 | 4 | } |
1179 | | |
1180 | | std::string VerticalSegmentWriter::_full_encode_keys( |
1181 | 24 | const std::vector<IOlapColumnDataAccessor*>& key_columns, size_t pos) { |
1182 | 24 | assert(_key_index_size.size() == _num_sort_key_columns); |
1183 | 24 | if (!(key_columns.size() == _num_sort_key_columns && |
1184 | 24 | _key_coders.size() == _num_sort_key_columns)) { |
1185 | 0 | LOG_INFO("key_columns.size()={}, _key_coders.size()={}, _num_sort_key_columns={}, ", |
1186 | 0 | key_columns.size(), _key_coders.size(), _num_sort_key_columns); |
1187 | 0 | } |
1188 | 24 | assert(key_columns.size() == _num_sort_key_columns && |
1189 | 24 | _key_coders.size() == _num_sort_key_columns); |
1190 | 24 | return _full_encode_keys(_key_coders, key_columns, pos); |
1191 | 24 | } |
1192 | | |
1193 | | std::string VerticalSegmentWriter::_full_encode_keys( |
1194 | | const std::vector<const KeyCoder*>& key_coders, |
1195 | 28 | const std::vector<IOlapColumnDataAccessor*>& key_columns, size_t pos) { |
1196 | 28 | assert(key_columns.size() == key_coders.size()); |
1197 | | |
1198 | 28 | std::string encoded_keys; |
1199 | 28 | size_t cid = 0; |
1200 | 70 | for (const auto& column : key_columns) { |
1201 | 70 | auto field = column->get_data_at(pos); |
1202 | 70 | if (UNLIKELY(!field)) { |
1203 | 0 | encoded_keys.push_back(KEY_NULL_FIRST_MARKER); |
1204 | 0 | ++cid; |
1205 | 0 | continue; |
1206 | 0 | } |
1207 | 70 | encoded_keys.push_back(KEY_NORMAL_MARKER); |
1208 | 70 | DCHECK(key_coders[cid] != nullptr); |
1209 | 70 | key_coders[cid]->full_encode_ascending(field, &encoded_keys); |
1210 | 70 | ++cid; |
1211 | 70 | } |
1212 | 28 | return encoded_keys; |
1213 | 28 | } |
1214 | | |
1215 | | void VerticalSegmentWriter::_encode_seq_column(const IOlapColumnDataAccessor* seq_column, |
1216 | 8 | size_t pos, std::string* encoded_keys) { |
1217 | 8 | const auto* field = seq_column->get_data_at(pos); |
1218 | | // To facilitate the use of the primary key index, encode the seq column |
1219 | | // to the minimum value of the corresponding length when the seq column |
1220 | | // is null |
1221 | 8 | if (UNLIKELY(!field)) { |
1222 | 0 | encoded_keys->push_back(KEY_NULL_FIRST_MARKER); |
1223 | 0 | size_t seq_col_length = _tablet_schema->column(_tablet_schema->sequence_col_idx()).length(); |
1224 | 0 | encoded_keys->append(seq_col_length, KEY_MINIMAL_MARKER); |
1225 | 0 | return; |
1226 | 0 | } |
1227 | 8 | encoded_keys->push_back(KEY_NORMAL_MARKER); |
1228 | 8 | _seq_coder->full_encode_ascending(field, encoded_keys); |
1229 | 8 | } |
1230 | | |
1231 | | std::string VerticalSegmentWriter::_encode_keys( |
1232 | 10 | const std::vector<IOlapColumnDataAccessor*>& key_columns, size_t pos) { |
1233 | 10 | assert(key_columns.size() == _num_short_key_columns); |
1234 | | |
1235 | 10 | std::string encoded_keys; |
1236 | 10 | size_t cid = 0; |
1237 | 22 | for (const auto& column : key_columns) { |
1238 | 22 | auto field = column->get_data_at(pos); |
1239 | 22 | if (UNLIKELY(!field)) { |
1240 | 0 | encoded_keys.push_back(KEY_NULL_FIRST_MARKER); |
1241 | 0 | ++cid; |
1242 | 0 | continue; |
1243 | 0 | } |
1244 | 22 | encoded_keys.push_back(KEY_NORMAL_MARKER); |
1245 | 22 | _key_coders[cid]->encode_ascending(field, _key_index_size[cid], &encoded_keys); |
1246 | 22 | ++cid; |
1247 | 22 | } |
1248 | 10 | return encoded_keys; |
1249 | 10 | } |
1250 | | |
1251 | | // TODO(lingbin): Currently this function does not include the size of various indexes, |
1252 | | // We should make this more precise. |
1253 | 12 | uint64_t VerticalSegmentWriter::_estimated_remaining_size() { |
1254 | | // footer_size(4) + checksum(4) + segment_magic(4) |
1255 | 12 | uint64_t size = 12; |
1256 | 12 | if (_is_mow_with_cluster_key()) { |
1257 | 1 | size += _primary_key_index_builder->size() + _short_key_index_builder->size(); |
1258 | 11 | } else if (_is_mow()) { |
1259 | 2 | size += _primary_key_index_builder->size(); |
1260 | 9 | } else { |
1261 | 9 | size += _short_key_index_builder->size(); |
1262 | 9 | } |
1263 | | |
1264 | | // update the mem_tracker of segment size |
1265 | 12 | _mem_tracker->consume(size - _mem_tracker->consumption()); |
1266 | 12 | return size; |
1267 | 12 | } |
1268 | | |
1269 | 12 | Status VerticalSegmentWriter::finalize_columns_index(uint64_t* index_size) { |
1270 | 12 | uint64_t index_start = _file_writer->bytes_appended(); |
1271 | 12 | RETURN_IF_ERROR(_write_ordinal_index()); |
1272 | 12 | RETURN_IF_ERROR(_write_zone_map()); |
1273 | 12 | RETURN_IF_ERROR(_write_inverted_index()); |
1274 | 12 | RETURN_IF_ERROR(_write_ann_index()); |
1275 | 12 | RETURN_IF_ERROR(_write_bloom_filter_index()); |
1276 | | |
1277 | 12 | *index_size = _file_writer->bytes_appended() - index_start; |
1278 | 12 | if (_is_mow_with_cluster_key()) { |
1279 | 1 | RETURN_IF_ERROR(_write_short_key_index()); |
1280 | 1 | *index_size = _file_writer->bytes_appended() - index_start; |
1281 | 1 | RETURN_IF_ERROR(_write_primary_key_index()); |
1282 | 1 | *index_size += _primary_key_index_builder->disk_size(); |
1283 | 11 | } else if (_is_mow()) { |
1284 | 2 | RETURN_IF_ERROR(_write_primary_key_index()); |
1285 | | // IndexedColumnWriter write data pages mixed with segment data, we should use |
1286 | | // the stat from primary key index builder. |
1287 | 2 | *index_size += _primary_key_index_builder->disk_size(); |
1288 | 9 | } else { |
1289 | 9 | RETURN_IF_ERROR(_write_short_key_index()); |
1290 | 9 | *index_size = _file_writer->bytes_appended() - index_start; |
1291 | 9 | } |
1292 | | |
1293 | | // reset all column writers and data_conveter |
1294 | 12 | clear(); |
1295 | | |
1296 | 12 | return Status::OK(); |
1297 | 12 | } |
1298 | | |
1299 | 12 | Status VerticalSegmentWriter::finalize_footer(uint64_t* segment_file_size) { |
1300 | 12 | RETURN_IF_ERROR(_write_footer()); |
1301 | | // finish |
1302 | 12 | RETURN_IF_ERROR(_file_writer->close(true)); |
1303 | 12 | *segment_file_size = _file_writer->bytes_appended(); |
1304 | 12 | if (*segment_file_size == 0) { |
1305 | 0 | return Status::Corruption("Bad segment, file size = 0"); |
1306 | 0 | } |
1307 | 12 | return Status::OK(); |
1308 | 12 | } |
1309 | | |
1310 | 12 | Status VerticalSegmentWriter::finalize(uint64_t* segment_file_size, uint64_t* index_size) { |
1311 | 12 | MonotonicStopWatch timer; |
1312 | 12 | timer.start(); |
1313 | | // check disk capacity |
1314 | 12 | if (_data_dir != nullptr && |
1315 | 12 | _data_dir->reach_capacity_limit((int64_t)_estimated_remaining_size())) { |
1316 | 0 | return Status::Error<DISK_REACH_CAPACITY_LIMIT>("disk {} exceed capacity limit.", |
1317 | 0 | _data_dir->path_hash()); |
1318 | 0 | } |
1319 | 12 | _row_count = _num_rows_written; |
1320 | 12 | _num_rows_written = 0; |
1321 | | // write index |
1322 | 12 | RETURN_IF_ERROR(finalize_columns_index(index_size)); |
1323 | | // write footer |
1324 | 12 | RETURN_IF_ERROR(finalize_footer(segment_file_size)); |
1325 | | |
1326 | 12 | if (timer.elapsed_time() > 5000000000L) { |
1327 | 0 | LOG(INFO) << "segment flush consumes a lot time_ns " << timer.elapsed_time() |
1328 | 0 | << ", segmemt_size " << *segment_file_size; |
1329 | 0 | } |
1330 | 12 | return Status::OK(); |
1331 | 12 | } |
1332 | | |
1333 | 12 | void VerticalSegmentWriter::clear() { |
1334 | 61 | for (auto& column_writer : _column_writers) { |
1335 | 61 | column_writer.reset(); |
1336 | 61 | } |
1337 | 12 | _column_writers.clear(); |
1338 | 12 | _olap_data_convertor.reset(); |
1339 | 12 | } |
1340 | | |
1341 | | // write ordinal index after data has been written |
1342 | 12 | Status VerticalSegmentWriter::_write_ordinal_index() { |
1343 | 61 | for (auto& column_writer : _column_writers) { |
1344 | 61 | RETURN_IF_ERROR(column_writer->write_ordinal_index()); |
1345 | 61 | } |
1346 | 12 | return Status::OK(); |
1347 | 12 | } |
1348 | | |
1349 | 12 | Status VerticalSegmentWriter::_write_zone_map() { |
1350 | 61 | for (auto& column_writer : _column_writers) { |
1351 | 61 | RETURN_IF_ERROR(column_writer->write_zone_map()); |
1352 | 61 | } |
1353 | 12 | return Status::OK(); |
1354 | 12 | } |
1355 | | |
1356 | 12 | Status VerticalSegmentWriter::_write_inverted_index() { |
1357 | 61 | for (auto& column_writer : _column_writers) { |
1358 | 61 | RETURN_IF_ERROR(column_writer->write_inverted_index()); |
1359 | 61 | } |
1360 | 12 | return Status::OK(); |
1361 | 12 | } |
1362 | | |
1363 | 12 | Status VerticalSegmentWriter::_write_ann_index() { |
1364 | 61 | for (auto& column_writer : _column_writers) { |
1365 | 61 | RETURN_IF_ERROR(column_writer->write_ann_index()); |
1366 | 61 | } |
1367 | 12 | return Status::OK(); |
1368 | 12 | } |
1369 | | |
1370 | 12 | Status VerticalSegmentWriter::_write_bloom_filter_index() { |
1371 | 61 | for (auto& column_writer : _column_writers) { |
1372 | 61 | RETURN_IF_ERROR(column_writer->write_bloom_filter_index()); |
1373 | 61 | } |
1374 | 12 | return Status::OK(); |
1375 | 12 | } |
1376 | | |
1377 | 10 | Status VerticalSegmentWriter::_write_short_key_index() { |
1378 | 10 | std::vector<Slice> body; |
1379 | 10 | PageFooterPB footer; |
1380 | 10 | RETURN_IF_ERROR(_short_key_index_builder->finalize(_row_count, &body, &footer)); |
1381 | 10 | PagePointer pp; |
1382 | | // short key index page is not compressed right now |
1383 | 10 | RETURN_IF_ERROR(PageIO::write_page(_file_writer, body, footer, &pp)); |
1384 | 10 | pp.to_proto(_footer.mutable_short_key_index_page()); |
1385 | 10 | return Status::OK(); |
1386 | 10 | } |
1387 | | |
1388 | 3 | Status VerticalSegmentWriter::_write_primary_key_index() { |
1389 | 3 | CHECK_EQ(_primary_key_index_builder->num_rows(), _row_count); |
1390 | 3 | return _primary_key_index_builder->finalize(_footer.mutable_primary_key_index_meta()); |
1391 | 3 | } |
1392 | | |
1393 | 12 | Status VerticalSegmentWriter::_write_footer() { |
1394 | 12 | _footer.set_num_rows(_row_count); |
1395 | | |
1396 | | // Decide whether to externalize ColumnMetaPB by tablet default, and stamp footer version |
1397 | | |
1398 | 12 | if (_tablet_schema->is_external_segment_column_meta_used()) { |
1399 | 0 | _footer.set_version(SEGMENT_FOOTER_VERSION_V3_EXT_COL_META); |
1400 | 0 | VLOG_DEBUG << "use external column meta"; |
1401 | | // External ColumnMetaPB writing (optional) |
1402 | 0 | RETURN_IF_ERROR(ExternalColMetaUtil::write_external_column_meta( |
1403 | 0 | _file_writer, &_footer, _opts.compression_type, |
1404 | 0 | [this](const std::vector<Slice>& slices) { return _write_raw_data(slices); })); |
1405 | 0 | } |
1406 | | |
1407 | | // Footer := SegmentFooterPB, FooterPBSize(4), FooterPBChecksum(4), MagicNumber(4) |
1408 | 12 | VLOG_DEBUG << "footer " << _footer.DebugString(); |
1409 | 12 | std::string footer_buf; |
1410 | 12 | if (!_footer.SerializeToString(&footer_buf)) { |
1411 | 0 | return Status::InternalError("failed to serialize segment footer"); |
1412 | 0 | } |
1413 | | |
1414 | 12 | faststring fixed_buf; |
1415 | | // footer's size |
1416 | 12 | put_fixed32_le(&fixed_buf, cast_set<uint32_t>(footer_buf.size())); |
1417 | | // footer's checksum |
1418 | 12 | uint32_t checksum = crc32c::Crc32c(footer_buf.data(), footer_buf.size()); |
1419 | 12 | put_fixed32_le(&fixed_buf, checksum); |
1420 | | // Append magic number. we don't write magic number in the header because |
1421 | | // that will need an extra seek when reading |
1422 | 12 | fixed_buf.append(k_segment_magic, k_segment_magic_length); |
1423 | | |
1424 | 12 | std::vector<Slice> slices {footer_buf, fixed_buf}; |
1425 | 12 | return _write_raw_data(slices); |
1426 | 12 | } |
1427 | | |
1428 | 12 | Status VerticalSegmentWriter::_write_raw_data(const std::vector<Slice>& slices) { |
1429 | 12 | RETURN_IF_ERROR(_file_writer->appendv(&slices[0], slices.size())); |
1430 | 12 | return Status::OK(); |
1431 | 12 | } |
1432 | | |
1433 | 12 | Slice VerticalSegmentWriter::min_encoded_key() { |
1434 | 12 | return (_primary_key_index_builder == nullptr) ? Slice(_min_key.data(), _min_key.size()) |
1435 | 12 | : _primary_key_index_builder->min_key(); |
1436 | 12 | } |
1437 | 12 | Slice VerticalSegmentWriter::max_encoded_key() { |
1438 | 12 | return (_primary_key_index_builder == nullptr) ? Slice(_max_key.data(), _max_key.size()) |
1439 | 12 | : _primary_key_index_builder->max_key(); |
1440 | 12 | } |
1441 | | |
1442 | 0 | void VerticalSegmentWriter::_set_min_max_key(const Slice& key) { |
1443 | 0 | if (UNLIKELY(_is_first_row)) { |
1444 | 0 | _min_key.append(key.get_data(), key.get_size()); |
1445 | 0 | _is_first_row = false; |
1446 | 0 | } |
1447 | 0 | if (key.compare(_max_key) > 0) { |
1448 | 0 | _max_key.clear(); |
1449 | 0 | _max_key.append(key.get_data(), key.get_size()); |
1450 | 0 | } |
1451 | 0 | } |
1452 | | |
1453 | 10 | void VerticalSegmentWriter::_set_min_key(const Slice& key) { |
1454 | 10 | if (UNLIKELY(_is_first_row)) { |
1455 | 10 | _min_key.append(key.get_data(), key.get_size()); |
1456 | 10 | _is_first_row = false; |
1457 | 10 | } |
1458 | 10 | } |
1459 | | |
1460 | 10 | void VerticalSegmentWriter::_set_max_key(const Slice& key) { |
1461 | 10 | _max_key.clear(); |
1462 | 10 | _max_key.append(key.get_data(), key.get_size()); |
1463 | 10 | } |
1464 | | |
1465 | 172 | inline bool VerticalSegmentWriter::_is_mow() { |
1466 | 172 | return _tablet_schema->keys_type() == UNIQUE_KEYS && _opts.enable_unique_key_merge_on_write; |
1467 | 172 | } |
1468 | | |
1469 | 115 | inline bool VerticalSegmentWriter::_is_mow_with_cluster_key() { |
1470 | 115 | return _is_mow() && !_tablet_schema->cluster_key_uids().empty(); |
1471 | 115 | } |
1472 | | |
1473 | | } // namespace doris::segment_v2 |