be/src/storage/segment/vertical_segment_writer.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "storage/segment/vertical_segment_writer.h" |
19 | | |
20 | | #include <crc32c/crc32c.h> |
21 | | #include <gen_cpp/olap_file.pb.h> |
22 | | #include <gen_cpp/segment_v2.pb.h> |
23 | | #include <parallel_hashmap/phmap.h> |
24 | | |
25 | | #include <cassert> |
26 | | #include <memory> |
27 | | #include <ostream> |
28 | | #include <string> |
29 | | #include <unordered_map> |
30 | | #include <unordered_set> |
31 | | #include <utility> |
32 | | |
33 | | #include "cloud/config.h" |
34 | | #include "common/cast_set.h" |
35 | | #include "common/compiler_util.h" // IWYU pragma: keep |
36 | | #include "common/config.h" |
37 | | #include "common/logging.h" // LOG |
38 | | #include "common/status.h" |
39 | | #include "core/assert_cast.h" |
40 | | #include "core/block/block.h" |
41 | | #include "core/block/column_with_type_and_name.h" |
42 | | #include "core/column/column_nullable.h" |
43 | | #include "core/column/column_vector.h" |
44 | | #include "core/data_type/data_type.h" |
45 | | #include "core/data_type/data_type_factory.hpp" |
46 | | #include "core/data_type/data_type_number.h" // IWYU pragma: keep |
47 | | #include "core/types.h" |
48 | | #include "exec/common/variant_util.h" |
49 | | #include "io/fs/file_writer.h" |
50 | | #include "io/fs/local_file_system.h" |
51 | | #include "runtime/exec_env.h" |
52 | | #include "runtime/memory/mem_tracker.h" |
53 | | #include "service/point_query_executor.h" |
54 | | #include "storage/data_dir.h" |
55 | | #include "storage/index/index_file_writer.h" |
56 | | #include "storage/index/inverted/inverted_index_desc.h" |
57 | | #include "storage/index/inverted/inverted_index_fs_directory.h" |
58 | | #include "storage/index/primary_key_index.h" |
59 | | #include "storage/index/short_key_index.h" |
60 | | #include "storage/iterator/olap_data_convertor.h" |
61 | | #include "storage/key_coder.h" |
62 | | #include "storage/olap_common.h" |
63 | | #include "storage/partial_update_info.h" |
64 | | #include "storage/row_cursor.h" // RowCursor // IWYU pragma: keep |
65 | | #include "storage/rowset/rowset_fwd.h" |
66 | | #include "storage/rowset/rowset_writer_context.h" // RowsetWriterContext |
67 | | #include "storage/rowset/segment_creator.h" |
68 | | #include "storage/segment/column_writer.h" // ColumnWriter |
69 | | #include "storage/segment/external_col_meta_util.h" |
70 | | #include "storage/segment/page_io.h" |
71 | | #include "storage/segment/page_pointer.h" |
72 | | #include "storage/segment/segment_loader.h" |
73 | | #include "storage/segment/variant/variant_ext_meta_writer.h" |
74 | | #include "storage/tablet/base_tablet.h" |
75 | | #include "storage/tablet/tablet_schema.h" |
76 | | #include "storage/utils.h" |
77 | | #include "util/coding.h" |
78 | | #include "util/debug_points.h" |
79 | | #include "util/faststring.h" |
80 | | #include "util/json/path_in_data.h" |
81 | | #include "util/jsonb/serialize.h" |
82 | | namespace doris::segment_v2 { |
83 | | |
84 | | #include "common/compile_check_begin.h" |
85 | | |
86 | | using namespace ErrorCode; |
87 | | using namespace KeyConsts; |
88 | | |
89 | | static const char* k_segment_magic = "D0R1"; |
90 | | static const uint32_t k_segment_magic_length = 4; |
91 | | |
92 | 12 | inline std::string vertical_segment_writer_mem_tracker_name(uint32_t segment_id) { |
93 | 12 | return "VerticalSegmentWriter:Segment-" + std::to_string(segment_id); |
94 | 12 | } |
95 | | |
96 | | VerticalSegmentWriter::VerticalSegmentWriter(io::FileWriter* file_writer, uint32_t segment_id, |
97 | | TabletSchemaSPtr tablet_schema, BaseTabletSPtr tablet, |
98 | | DataDir* data_dir, |
99 | | const VerticalSegmentWriterOptions& opts, |
100 | | IndexFileWriter* index_file_writer) |
101 | 12 | : _segment_id(segment_id), |
102 | 12 | _tablet_schema(std::move(tablet_schema)), |
103 | 12 | _tablet(std::move(tablet)), |
104 | 12 | _data_dir(data_dir), |
105 | 12 | _opts(opts), |
106 | 12 | _file_writer(file_writer), |
107 | 12 | _index_file_writer(index_file_writer), |
108 | 12 | _mem_tracker(std::make_unique<MemTracker>( |
109 | 12 | vertical_segment_writer_mem_tracker_name(segment_id))), |
110 | 12 | _mow_context(std::move(opts.mow_ctx)), |
111 | 12 | _block_aggregator(*this) { |
112 | 12 | CHECK_NOTNULL(file_writer); |
113 | 12 | _num_sort_key_columns = _tablet_schema->num_key_columns(); |
114 | 12 | _num_short_key_columns = _tablet_schema->num_short_key_columns(); |
115 | 12 | if (!_is_mow_with_cluster_key()) { |
116 | 11 | DCHECK(_num_sort_key_columns >= _num_short_key_columns) |
117 | 0 | << ", table_id=" << _tablet_schema->table_id() |
118 | 0 | << ", num_key_columns=" << _num_sort_key_columns |
119 | 0 | << ", num_short_key_columns=" << _num_short_key_columns |
120 | 0 | << ", cluster_key_columns=" << _tablet_schema->cluster_key_uids().size(); |
121 | 11 | } |
122 | 43 | for (size_t cid = 0; cid < _num_sort_key_columns; ++cid) { |
123 | 31 | const auto& column = _tablet_schema->column(cid); |
124 | 31 | _key_coders.push_back(get_key_coder(column.type())); |
125 | 31 | _key_index_size.push_back(cast_set<uint16_t>(column.index_length())); |
126 | 31 | } |
127 | | // encode the sequence id into the primary key index |
128 | 12 | if (_is_mow()) { |
129 | 3 | if (_tablet_schema->has_sequence_col()) { |
130 | 3 | const auto& column = _tablet_schema->column(_tablet_schema->sequence_col_idx()); |
131 | 3 | _seq_coder = get_key_coder(column.type()); |
132 | 3 | } |
133 | | // encode the rowid into the primary key index |
134 | 3 | if (_is_mow_with_cluster_key()) { |
135 | 1 | const auto* type_info = get_scalar_type_info<FieldType::OLAP_FIELD_TYPE_UNSIGNED_INT>(); |
136 | 1 | _rowid_coder = get_key_coder(type_info->type()); |
137 | | // primary keys |
138 | 1 | _primary_key_coders.swap(_key_coders); |
139 | | // cluster keys |
140 | 1 | _key_coders.clear(); |
141 | 1 | _key_index_size.clear(); |
142 | 1 | _num_sort_key_columns = _tablet_schema->cluster_key_uids().size(); |
143 | 2 | for (auto cid : _tablet_schema->cluster_key_uids()) { |
144 | 2 | const auto& column = _tablet_schema->column_by_uid(cid); |
145 | 2 | _key_coders.push_back(get_key_coder(column.type())); |
146 | 2 | _key_index_size.push_back(cast_set<uint16_t>(column.index_length())); |
147 | 2 | } |
148 | 1 | } |
149 | 3 | } |
150 | 12 | } |
151 | | |
152 | 12 | VerticalSegmentWriter::~VerticalSegmentWriter() { |
153 | 12 | _mem_tracker->release(_mem_tracker->consumption()); |
154 | 12 | } |
155 | | |
156 | | void VerticalSegmentWriter::_init_column_meta(ColumnMetaPB* meta, uint32_t column_id, |
157 | 61 | const TabletColumn& column) { |
158 | 61 | meta->set_column_id(column_id); |
159 | 61 | meta->set_type(int(column.type())); |
160 | 61 | meta->set_length(cast_set<int32_t>(column.length())); |
161 | 61 | meta->set_encoding(DEFAULT_ENCODING); |
162 | 61 | meta->set_compression(_opts.compression_type); |
163 | 61 | meta->set_is_nullable(column.is_nullable()); |
164 | 61 | meta->set_default_value(column.default_value()); |
165 | 61 | meta->set_precision(column.precision()); |
166 | 61 | meta->set_frac(column.frac()); |
167 | 61 | if (column.has_path_info()) { |
168 | 0 | column.path_info_ptr()->to_protobuf(meta->mutable_column_path_info(), |
169 | 0 | column.parent_unique_id()); |
170 | 0 | } |
171 | 61 | meta->set_unique_id(column.unique_id()); |
172 | 61 | for (uint32_t i = 0; i < column.get_subtype_count(); ++i) { |
173 | 0 | _init_column_meta(meta->add_children_columns(), column_id, column.get_sub_column(i)); |
174 | 0 | } |
175 | 61 | if (column.is_variant_type()) { |
176 | 0 | meta->set_variant_max_subcolumns_count(column.variant_max_subcolumns_count()); |
177 | 0 | } |
178 | 61 | meta->set_result_is_nullable(column.get_result_is_nullable()); |
179 | 61 | meta->set_function_name(column.get_aggregation_name()); |
180 | 61 | meta->set_be_exec_version(column.get_be_exec_version()); |
181 | 61 | } |
182 | | |
183 | | Status VerticalSegmentWriter::_create_column_writer(uint32_t cid, const TabletColumn& column, |
184 | 61 | const TabletSchemaSPtr& tablet_schema) { |
185 | 61 | ColumnWriterOptions opts; |
186 | 61 | opts.meta = _footer.add_columns(); |
187 | | |
188 | 61 | _init_column_meta(opts.meta, cid, column); |
189 | | |
190 | | // now we create zone map for key columns in AGG_KEYS or all column in UNIQUE_KEYS or DUP_KEYS |
191 | | // except for columns whose type don't support zone map. |
192 | 61 | opts.need_zone_map = column.is_key() || tablet_schema->keys_type() != KeysType::AGG_KEYS; |
193 | 61 | opts.need_bloom_filter = column.is_bf_column(); |
194 | 61 | if (opts.need_bloom_filter) { |
195 | 0 | opts.bf_options.fpp = |
196 | 0 | tablet_schema->has_bf_fpp() ? tablet_schema->bloom_filter_fpp() : 0.05; |
197 | 0 | } |
198 | 61 | auto* tablet_index = tablet_schema->get_ngram_bf_index(column.unique_id()); |
199 | 61 | if (tablet_index) { |
200 | 0 | opts.need_bloom_filter = true; |
201 | 0 | opts.is_ngram_bf_index = true; |
202 | | //narrow convert from int32_t to uint8_t and uint16_t which is dangerous |
203 | 0 | auto gram_size = tablet_index->get_gram_size(); |
204 | 0 | auto gram_bf_size = tablet_index->get_gram_bf_size(); |
205 | 0 | if (gram_size > 256 || gram_size < 1) { |
206 | 0 | return Status::NotSupported("Do not support ngram bloom filter for ngram_size: ", |
207 | 0 | gram_size); |
208 | 0 | } |
209 | 0 | if (gram_bf_size > 65535 || gram_bf_size < 64) { |
210 | 0 | return Status::NotSupported("Do not support ngram bloom filter for bf_size: ", |
211 | 0 | gram_bf_size); |
212 | 0 | } |
213 | 0 | opts.gram_size = cast_set<uint8_t>(gram_size); |
214 | 0 | opts.gram_bf_size = cast_set<uint16_t>(gram_bf_size); |
215 | 0 | } |
216 | | |
217 | 61 | bool skip_inverted_index = false; |
218 | 61 | if (_opts.rowset_ctx != nullptr) { |
219 | | // skip write inverted index for index compaction column |
220 | 61 | skip_inverted_index = |
221 | 61 | _opts.rowset_ctx->columns_to_do_index_compaction.contains(column.unique_id()); |
222 | 61 | } |
223 | | // skip write inverted index on load if skip_write_index_on_load is true |
224 | 61 | if (_opts.write_type == DataWriteType::TYPE_DIRECT && |
225 | 61 | tablet_schema->skip_write_index_on_load()) { |
226 | 0 | skip_inverted_index = true; |
227 | 0 | } |
228 | 61 | if (!skip_inverted_index) { |
229 | 61 | auto inverted_indexs = tablet_schema->inverted_indexs(column); |
230 | 61 | if (!inverted_indexs.empty()) { |
231 | 0 | opts.inverted_indexes = inverted_indexs; |
232 | 0 | opts.need_inverted_index = true; |
233 | 0 | DCHECK(_index_file_writer != nullptr); |
234 | 0 | } |
235 | 61 | } |
236 | 61 | opts.index_file_writer = _index_file_writer; |
237 | | |
238 | 61 | if (const auto& index = tablet_schema->ann_index(column); index != nullptr) { |
239 | 0 | opts.ann_index = index; |
240 | 0 | opts.need_ann_index = true; |
241 | 0 | DCHECK(_index_file_writer != nullptr); |
242 | 0 | opts.index_file_writer = _index_file_writer; |
243 | 0 | } |
244 | | |
245 | 61 | #define DISABLE_INDEX_IF_FIELD_TYPE(TYPE) \ |
246 | 549 | if (column.type() == FieldType::OLAP_FIELD_TYPE_##TYPE) { \ |
247 | 0 | opts.need_zone_map = false; \ |
248 | 0 | opts.need_bloom_filter = false; \ |
249 | 0 | } |
250 | | |
251 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(STRUCT) |
252 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(ARRAY) |
253 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(JSONB) |
254 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(AGG_STATE) |
255 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(MAP) |
256 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(BITMAP) |
257 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(HLL) |
258 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(QUANTILE_STATE) |
259 | 61 | DISABLE_INDEX_IF_FIELD_TYPE(VARIANT) |
260 | | |
261 | 61 | #undef DISABLE_INDEX_IF_FIELD_TYPE |
262 | | |
263 | 61 | #undef CHECK_FIELD_TYPE |
264 | | |
265 | 61 | int64_t storage_page_size = _tablet_schema->storage_page_size(); |
266 | | // storage_page_size must be between 4KB and 10MB. |
267 | 61 | if (storage_page_size >= 4096 && storage_page_size <= 10485760) { |
268 | 61 | opts.data_page_size = storage_page_size; |
269 | 61 | } |
270 | 61 | opts.dict_page_size = _tablet_schema->storage_dict_page_size(); |
271 | 61 | DBUG_EXECUTE_IF("VerticalSegmentWriter._create_column_writer.storage_page_size", { |
272 | 61 | auto table_id = DebugPoints::instance()->get_debug_param_or_default<int64_t>( |
273 | 61 | "VerticalSegmentWriter._create_column_writer.storage_page_size", "table_id", |
274 | 61 | INT_MIN); |
275 | 61 | auto target_data_page_size = DebugPoints::instance()->get_debug_param_or_default<int64_t>( |
276 | 61 | "VerticalSegmentWriter._create_column_writer.storage_page_size", |
277 | 61 | "storage_page_size", INT_MIN); |
278 | 61 | if (table_id == INT_MIN || target_data_page_size == INT_MIN) { |
279 | 61 | return Status::Error<ErrorCode::INTERNAL_ERROR>( |
280 | 61 | "Debug point parameters missing: either 'table_id' or 'storage_page_size' not " |
281 | 61 | "set."); |
282 | 61 | } |
283 | 61 | if (table_id == _tablet_schema->table_id() && |
284 | 61 | opts.data_page_size != target_data_page_size) { |
285 | 61 | return Status::Error<ErrorCode::INTERNAL_ERROR>( |
286 | 61 | "Mismatch in 'storage_page_size': expected size does not match the current " |
287 | 61 | "data page size. " |
288 | 61 | "Expected: " + |
289 | 61 | std::to_string(target_data_page_size) + |
290 | 61 | ", Actual: " + std::to_string(opts.data_page_size) + "."); |
291 | 61 | } |
292 | 61 | }) |
293 | 61 | if (column.is_row_store_column()) { |
294 | | // smaller page size for row store column |
295 | 0 | auto page_size = _tablet_schema->row_store_page_size(); |
296 | 0 | opts.data_page_size = |
297 | 0 | (page_size > 0) ? page_size : segment_v2::ROW_STORE_PAGE_SIZE_DEFAULT_VALUE; |
298 | 0 | } |
299 | | |
300 | 61 | opts.rowset_ctx = _opts.rowset_ctx; |
301 | 61 | opts.file_writer = _file_writer; |
302 | 61 | opts.compression_type = _opts.compression_type; |
303 | 61 | opts.footer = &_footer; |
304 | 61 | opts.input_rs_readers = _opts.rowset_ctx->input_rs_readers; |
305 | | |
306 | 61 | opts.encoding_preference = {.integer_type_default_use_plain_encoding = |
307 | 61 | _tablet_schema->integer_type_default_use_plain_encoding(), |
308 | 61 | .binary_plain_encoding_default_impl = |
309 | 61 | _tablet_schema->binary_plain_encoding_default_impl()}; |
310 | 61 | std::unique_ptr<ColumnWriter> writer; |
311 | 61 | RETURN_IF_ERROR(ColumnWriter::create(opts, &column, _file_writer, &writer)); |
312 | 61 | RETURN_IF_ERROR(writer->init()); |
313 | 61 | _column_writers[cid] = std::move(writer); |
314 | 61 | _olap_data_convertor->add_column_data_convertor_at(column, cid); |
315 | 61 | return Status::OK(); |
316 | 61 | }; |
317 | | |
318 | 12 | Status VerticalSegmentWriter::init() { |
319 | 12 | DCHECK(_column_writers.empty()); |
320 | 12 | if (_opts.compression_type == UNKNOWN_COMPRESSION) { |
321 | 0 | _opts.compression_type = _tablet_schema->compression_type(); |
322 | 0 | } |
323 | 12 | _olap_data_convertor = std::make_unique<OlapBlockDataConvertor>(); |
324 | 12 | _olap_data_convertor->resize(_tablet_schema->num_columns()); |
325 | 12 | _column_writers.resize(_tablet_schema->num_columns()); |
326 | | // we don't need the short key index for unique key merge on write table. |
327 | 12 | if (_is_mow()) { |
328 | 3 | size_t seq_col_length = 0; |
329 | 3 | if (_tablet_schema->has_sequence_col()) { |
330 | 3 | seq_col_length = |
331 | 3 | _tablet_schema->column(_tablet_schema->sequence_col_idx()).length() + 1; |
332 | 3 | } |
333 | 3 | size_t rowid_length = 0; |
334 | 3 | if (_is_mow_with_cluster_key()) { |
335 | 1 | rowid_length = PrimaryKeyIndexReader::ROW_ID_LENGTH; |
336 | 1 | _short_key_index_builder.reset( |
337 | 1 | new ShortKeyIndexBuilder(_segment_id, _opts.num_rows_per_block)); |
338 | 1 | } |
339 | 3 | _primary_key_index_builder.reset( |
340 | 3 | new PrimaryKeyIndexBuilder(_file_writer, seq_col_length, rowid_length)); |
341 | 3 | RETURN_IF_ERROR(_primary_key_index_builder->init()); |
342 | 9 | } else { |
343 | 9 | _short_key_index_builder.reset( |
344 | 9 | new ShortKeyIndexBuilder(_segment_id, _opts.num_rows_per_block)); |
345 | 9 | } |
346 | 12 | return Status::OK(); |
347 | 12 | } |
348 | | |
349 | 8 | void VerticalSegmentWriter::_maybe_invalid_row_cache(const std::string& key) const { |
350 | | // Just invalid row cache for simplicity, since the rowset is not visible at present. |
351 | | // If we update/insert cache, if load failed rowset will not be visible but cached data |
352 | | // will be visible, and lead to inconsistency. |
353 | 8 | if (!config::disable_storage_row_cache && _tablet_schema->has_row_store_for_all_columns() && |
354 | 8 | _opts.write_type == DataWriteType::TYPE_DIRECT) { |
355 | | // invalidate cache |
356 | 0 | RowCache::instance()->erase({_opts.rowset_ctx->tablet_id, key}); |
357 | 0 | } |
358 | 8 | } |
359 | | |
360 | 12 | void VerticalSegmentWriter::_serialize_block_to_row_column(const Block& block) { |
361 | 12 | if (block.rows() == 0) { |
362 | 0 | return; |
363 | 0 | } |
364 | 12 | MonotonicStopWatch watch; |
365 | 12 | watch.start(); |
366 | 12 | int row_column_id = 0; |
367 | 73 | for (int i = 0; i < _tablet_schema->num_columns(); ++i) { |
368 | 61 | if (_tablet_schema->column(i).is_row_store_column()) { |
369 | 0 | auto* row_store_column = static_cast<ColumnString*>( |
370 | 0 | block.get_by_position(i).column->assume_mutable_ref().assume_mutable().get()); |
371 | 0 | row_store_column->clear(); |
372 | 0 | DataTypeSerDeSPtrs serdes = create_data_type_serdes(block.get_data_types()); |
373 | 0 | std::unordered_set<int> row_store_cids_set(_tablet_schema->row_columns_uids().begin(), |
374 | 0 | _tablet_schema->row_columns_uids().end()); |
375 | 0 | JsonbSerializeUtil::block_to_jsonb(*_tablet_schema, block, *row_store_column, |
376 | 0 | cast_set<int>(_tablet_schema->num_columns()), serdes, |
377 | 0 | row_store_cids_set); |
378 | 0 | break; |
379 | 0 | } |
380 | 61 | } |
381 | | |
382 | 12 | VLOG_DEBUG << "serialize , num_rows:" << block.rows() << ", row_column_id:" << row_column_id |
383 | 0 | << ", total_byte_size:" << block.allocated_bytes() << ", serialize_cost(us)" |
384 | 0 | << watch.elapsed_time() / 1000; |
385 | 12 | } |
386 | | |
387 | | Status VerticalSegmentWriter::_probe_key_for_mow( |
388 | | std::string key, std::size_t segment_pos, bool have_input_seq_column, bool have_delete_sign, |
389 | | const std::vector<RowsetSharedPtr>& specified_rowsets, |
390 | | std::vector<std::unique_ptr<SegmentCacheHandle>>& segment_caches, |
391 | | bool& has_default_or_nullable, std::vector<bool>& use_default_or_null_flag, |
392 | | const std::function<void(const RowLocation& loc)>& found_cb, |
393 | 0 | const std::function<Status()>& not_found_cb, PartialUpdateStats& stats) { |
394 | 0 | RowLocation loc; |
395 | | // save rowset shared ptr so this rowset wouldn't delete |
396 | 0 | RowsetSharedPtr rowset; |
397 | 0 | auto st = _tablet->lookup_row_key(key, _tablet_schema.get(), have_input_seq_column, |
398 | 0 | specified_rowsets, &loc, _mow_context->max_version, |
399 | 0 | segment_caches, &rowset); |
400 | 0 | if (st.is<KEY_NOT_FOUND>()) { |
401 | 0 | if (!have_delete_sign) { |
402 | 0 | RETURN_IF_ERROR(not_found_cb()); |
403 | 0 | } |
404 | 0 | ++stats.num_rows_new_added; |
405 | 0 | has_default_or_nullable = true; |
406 | 0 | use_default_or_null_flag.emplace_back(true); |
407 | 0 | return Status::OK(); |
408 | 0 | } |
409 | 0 | if (!st.ok() && !st.is<KEY_ALREADY_EXISTS>()) { |
410 | 0 | LOG(WARNING) << "failed to lookup row key, error: " << st; |
411 | 0 | return st; |
412 | 0 | } |
413 | | |
414 | | // 1. if the delete sign is marked, it means that the value columns of the row will not |
415 | | // be read. So we don't need to read the missing values from the previous rows. |
416 | | // 2. the one exception is when there are sequence columns in the table, we need to read |
417 | | // the sequence columns, otherwise it may cause the merge-on-read based compaction |
418 | | // policy to produce incorrect results |
419 | | |
420 | | // 3. In flexible partial update, we may delete the existing rows before if there exists |
421 | | // insert after delete in one load. In this case, the insert should also be treated |
422 | | // as newly inserted rows, note that the sequence column value is filled in |
423 | | // BlockAggregator::aggregate_for_insert_after_delete() if this row doesn't specify the sequence column |
424 | 0 | if (st.is<KEY_ALREADY_EXISTS>() || (have_delete_sign && !_tablet_schema->has_sequence_col()) || |
425 | 0 | (_opts.rowset_ctx->partial_update_info->is_flexible_partial_update() && |
426 | 0 | _mow_context->delete_bitmap->contains( |
427 | 0 | {loc.rowset_id, loc.segment_id, DeleteBitmap::TEMP_VERSION_COMMON}, loc.row_id))) { |
428 | 0 | has_default_or_nullable = true; |
429 | 0 | use_default_or_null_flag.emplace_back(true); |
430 | 0 | } else { |
431 | | // partial update should not contain invisible columns |
432 | 0 | use_default_or_null_flag.emplace_back(false); |
433 | 0 | _rsid_to_rowset.emplace(rowset->rowset_id(), rowset); |
434 | 0 | found_cb(loc); |
435 | 0 | } |
436 | |
|
437 | 0 | if (st.is<KEY_ALREADY_EXISTS>()) { |
438 | | // although we need to mark delete current row, we still need to read missing columns |
439 | | // for this row, we need to ensure that each column is aligned |
440 | 0 | _mow_context->delete_bitmap->add( |
441 | 0 | {_opts.rowset_ctx->rowset_id, _segment_id, DeleteBitmap::TEMP_VERSION_COMMON}, |
442 | 0 | cast_set<uint32_t>(segment_pos)); |
443 | 0 | ++stats.num_rows_deleted; |
444 | 0 | } else { |
445 | 0 | _mow_context->delete_bitmap->add( |
446 | 0 | {loc.rowset_id, loc.segment_id, DeleteBitmap::TEMP_VERSION_COMMON}, loc.row_id); |
447 | 0 | ++stats.num_rows_updated; |
448 | 0 | } |
449 | 0 | return Status::OK(); |
450 | 0 | } |
451 | | |
452 | 61 | Status VerticalSegmentWriter::_finalize_column_writer_and_update_meta(size_t cid) { |
453 | 61 | RETURN_IF_ERROR(_column_writers[cid]->finish()); |
454 | 61 | RETURN_IF_ERROR(_column_writers[cid]->write_data()); |
455 | | |
456 | 61 | auto* column_meta = _column_writers[cid]->get_column_meta(); |
457 | 61 | column_meta->set_compressed_data_bytes( |
458 | 61 | _column_writers[cid]->get_total_compressed_data_pages_bytes()); |
459 | 61 | column_meta->set_uncompressed_data_bytes( |
460 | 61 | _column_writers[cid]->get_total_uncompressed_data_pages_bytes()); |
461 | 61 | column_meta->set_raw_data_bytes(_column_writers[cid]->get_raw_data_bytes()); |
462 | 61 | return Status::OK(); |
463 | 61 | } |
464 | | |
465 | | Status VerticalSegmentWriter::_partial_update_preconditions_check(size_t row_pos, |
466 | 0 | bool is_flexible_update) { |
467 | 0 | if (!_is_mow()) { |
468 | 0 | auto msg = fmt::format( |
469 | 0 | "Can only do partial update on merge-on-write unique table, but found: " |
470 | 0 | "keys_type={}, _opts.enable_unique_key_merge_on_write={}, tablet_id={}", |
471 | 0 | _tablet_schema->keys_type(), _opts.enable_unique_key_merge_on_write, |
472 | 0 | _tablet->tablet_id()); |
473 | 0 | DCHECK(false) << msg; |
474 | 0 | return Status::InternalError<false>(msg); |
475 | 0 | } |
476 | 0 | if (_opts.rowset_ctx->partial_update_info == nullptr) { |
477 | 0 | auto msg = |
478 | 0 | fmt::format("partial_update_info should not be nullptr, please check, tablet_id={}", |
479 | 0 | _tablet->tablet_id()); |
480 | 0 | DCHECK(false) << msg; |
481 | 0 | return Status::InternalError<false>(msg); |
482 | 0 | } |
483 | 0 | if (!is_flexible_update) { |
484 | 0 | if (!_opts.rowset_ctx->partial_update_info->is_fixed_partial_update()) { |
485 | 0 | auto msg = fmt::format( |
486 | 0 | "in fixed partial update code, but update_mode={}, please check, tablet_id={}", |
487 | 0 | _opts.rowset_ctx->partial_update_info->update_mode(), _tablet->tablet_id()); |
488 | 0 | DCHECK(false) << msg; |
489 | 0 | return Status::InternalError<false>(msg); |
490 | 0 | } |
491 | 0 | } else { |
492 | 0 | if (!_opts.rowset_ctx->partial_update_info->is_flexible_partial_update()) { |
493 | 0 | auto msg = fmt::format( |
494 | 0 | "in flexible partial update code, but update_mode={}, please check, " |
495 | 0 | "tablet_id={}", |
496 | 0 | _opts.rowset_ctx->partial_update_info->update_mode(), _tablet->tablet_id()); |
497 | 0 | DCHECK(false) << msg; |
498 | 0 | return Status::InternalError<false>(msg); |
499 | 0 | } |
500 | 0 | } |
501 | 0 | if (row_pos != 0) { |
502 | 0 | auto msg = fmt::format("row_pos should be 0, but found {}, tablet_id={}", row_pos, |
503 | 0 | _tablet->tablet_id()); |
504 | 0 | DCHECK(false) << msg; |
505 | 0 | return Status::InternalError<false>(msg); |
506 | 0 | } |
507 | 0 | return Status::OK(); |
508 | 0 | } |
509 | | |
510 | | // for partial update, we should do following steps to fill content of block: |
511 | | // 1. set block data to data convertor, and get all key_column's converted slice |
512 | | // 2. get pk of input block, and read missing columns |
513 | | // 2.1 first find key location{rowset_id, segment_id, row_id} |
514 | | // 2.2 build read plan to read by batch |
515 | | // 2.3 fill block |
516 | | // 3. set columns to data convertor and then write all columns |
517 | | Status VerticalSegmentWriter::_append_block_with_partial_content(RowsInBlock& data, |
518 | 0 | Block& full_block) { |
519 | 0 | DBUG_EXECUTE_IF("_append_block_with_partial_content.block", DBUG_BLOCK); |
520 | |
|
521 | 0 | RETURN_IF_ERROR(_partial_update_preconditions_check(data.row_pos, false)); |
522 | | // create full block and fill with input columns |
523 | 0 | full_block = _tablet_schema->create_block(); |
524 | 0 | const auto& including_cids = _opts.rowset_ctx->partial_update_info->update_cids; |
525 | 0 | size_t input_id = 0; |
526 | 0 | for (auto i : including_cids) { |
527 | 0 | full_block.replace_by_position(i, data.block->get_by_position(input_id++).column); |
528 | 0 | } |
529 | |
|
530 | 0 | if (_opts.rowset_ctx->write_type != DataWriteType::TYPE_COMPACTION && |
531 | 0 | _tablet_schema->num_variant_columns() > 0) { |
532 | 0 | RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns( |
533 | 0 | full_block, *_tablet_schema, including_cids)); |
534 | 0 | } |
535 | 0 | bool have_input_seq_column = false; |
536 | | // write including columns |
537 | 0 | std::vector<IOlapColumnDataAccessor*> key_columns; |
538 | 0 | IOlapColumnDataAccessor* seq_column = nullptr; |
539 | 0 | uint32_t segment_start_pos = 0; |
540 | 0 | for (auto cid : including_cids) { |
541 | 0 | RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); |
542 | 0 | RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_columns( |
543 | 0 | &full_block, data.row_pos, data.num_rows, std::vector<uint32_t> {cid})); |
544 | | // here we get segment column row num before append data. |
545 | 0 | segment_start_pos = cast_set<uint32_t>(_column_writers[cid]->get_next_rowid()); |
546 | | // olap data convertor alway start from id = 0 |
547 | 0 | auto [status, column] = _olap_data_convertor->convert_column_data(cid); |
548 | 0 | if (!status.ok()) { |
549 | 0 | return status; |
550 | 0 | } |
551 | 0 | if (cid < _num_sort_key_columns) { |
552 | 0 | key_columns.push_back(column); |
553 | 0 | } else if (_tablet_schema->has_sequence_col() && |
554 | 0 | cid == _tablet_schema->sequence_col_idx()) { |
555 | 0 | seq_column = column; |
556 | 0 | have_input_seq_column = true; |
557 | 0 | } |
558 | 0 | RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), |
559 | 0 | data.num_rows)); |
560 | 0 | RETURN_IF_ERROR(_finalize_column_writer_and_update_meta(cid)); |
561 | | // Don't clear source content for key columns and sequence column here, |
562 | | // as they will be used later in _full_encode_keys() and _generate_primary_key_index(). |
563 | | // They will be cleared at the end of this method. |
564 | 0 | bool is_key_column = (cid < _num_sort_key_columns); |
565 | 0 | bool is_seq_column = (_tablet_schema->has_sequence_col() && |
566 | 0 | cid == _tablet_schema->sequence_col_idx() && have_input_seq_column); |
567 | 0 | if (!is_key_column && !is_seq_column) { |
568 | 0 | _olap_data_convertor->clear_source_content(cid); |
569 | 0 | } |
570 | 0 | } |
571 | | |
572 | 0 | bool has_default_or_nullable = false; |
573 | 0 | std::vector<bool> use_default_or_null_flag; |
574 | 0 | use_default_or_null_flag.reserve(data.num_rows); |
575 | 0 | const auto* delete_signs = |
576 | 0 | BaseTablet::get_delete_sign_column_data(full_block, data.row_pos + data.num_rows); |
577 | |
|
578 | 0 | DBUG_EXECUTE_IF("VerticalSegmentWriter._append_block_with_partial_content.sleep", |
579 | 0 | { sleep(60); }) |
580 | 0 | const std::vector<RowsetSharedPtr>& specified_rowsets = _mow_context->rowset_ptrs; |
581 | 0 | std::vector<std::unique_ptr<SegmentCacheHandle>> segment_caches(specified_rowsets.size()); |
582 | |
|
583 | 0 | FixedReadPlan read_plan; |
584 | | |
585 | | // locate rows in base data |
586 | 0 | PartialUpdateStats stats; |
587 | |
|
588 | 0 | for (size_t block_pos = data.row_pos; block_pos < data.row_pos + data.num_rows; block_pos++) { |
589 | | // block segment |
590 | | // 2 -> 0 |
591 | | // 3 -> 1 |
592 | | // 4 -> 2 |
593 | | // 5 -> 3 |
594 | | // here row_pos = 2, num_rows = 4. |
595 | 0 | size_t delta_pos = block_pos - data.row_pos; |
596 | 0 | size_t segment_pos = segment_start_pos + delta_pos; |
597 | 0 | std::string key = _full_encode_keys(key_columns, delta_pos); |
598 | 0 | _maybe_invalid_row_cache(key); |
599 | 0 | if (have_input_seq_column) { |
600 | 0 | _encode_seq_column(seq_column, delta_pos, &key); |
601 | 0 | } |
602 | | // If the table have sequence column, and the include-cids don't contain the sequence |
603 | | // column, we need to update the primary key index builder at the end of this method. |
604 | | // At that time, we have a valid sequence column to encode the key with seq col. |
605 | 0 | if (!_tablet_schema->has_sequence_col() || have_input_seq_column) { |
606 | 0 | RETURN_IF_ERROR(_primary_key_index_builder->add_item(key)); |
607 | 0 | } |
608 | | |
609 | | // mark key with delete sign as deleted. |
610 | 0 | bool have_delete_sign = (delete_signs != nullptr && delete_signs[block_pos] != 0); |
611 | |
|
612 | 0 | auto not_found_cb = [&]() { |
613 | 0 | return _opts.rowset_ctx->partial_update_info->handle_new_key( |
614 | 0 | *_tablet_schema, [&]() -> std::string { |
615 | 0 | return data.block->dump_one_line(block_pos, |
616 | 0 | cast_set<int>(_num_sort_key_columns)); |
617 | 0 | }); |
618 | 0 | }; |
619 | 0 | auto update_read_plan = [&](const RowLocation& loc) { |
620 | 0 | read_plan.prepare_to_read(loc, segment_pos); |
621 | 0 | }; |
622 | 0 | RETURN_IF_ERROR(_probe_key_for_mow(std::move(key), segment_pos, have_input_seq_column, |
623 | 0 | have_delete_sign, specified_rowsets, segment_caches, |
624 | 0 | has_default_or_nullable, use_default_or_null_flag, |
625 | 0 | update_read_plan, not_found_cb, stats)); |
626 | 0 | } |
627 | 0 | CHECK_EQ(use_default_or_null_flag.size(), data.num_rows); |
628 | |
|
629 | 0 | if (config::enable_merge_on_write_correctness_check) { |
630 | 0 | _tablet->add_sentinel_mark_to_delete_bitmap(_mow_context->delete_bitmap.get(), |
631 | 0 | *_mow_context->rowset_ids); |
632 | 0 | } |
633 | | |
634 | | // read to fill full_block |
635 | 0 | RETURN_IF_ERROR(read_plan.fill_missing_columns( |
636 | 0 | _opts.rowset_ctx, _rsid_to_rowset, *_tablet_schema, full_block, |
637 | 0 | use_default_or_null_flag, has_default_or_nullable, segment_start_pos, data.block)); |
638 | | |
639 | | // row column should be filled here |
640 | | // convert block to row store format |
641 | 0 | _serialize_block_to_row_column(full_block); |
642 | | |
643 | | // convert missing columns and send to column writer |
644 | 0 | const auto& missing_cids = _opts.rowset_ctx->partial_update_info->missing_cids; |
645 | 0 | for (auto cid : missing_cids) { |
646 | 0 | RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); |
647 | 0 | RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_columns( |
648 | 0 | &full_block, data.row_pos, data.num_rows, std::vector<uint32_t> {cid})); |
649 | 0 | auto [status, column] = _olap_data_convertor->convert_column_data(cid); |
650 | 0 | if (!status.ok()) { |
651 | 0 | return status; |
652 | 0 | } |
653 | 0 | if (_tablet_schema->has_sequence_col() && !have_input_seq_column && |
654 | 0 | cid == _tablet_schema->sequence_col_idx()) { |
655 | 0 | DCHECK_EQ(seq_column, nullptr); |
656 | 0 | seq_column = column; |
657 | 0 | } |
658 | 0 | RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), |
659 | 0 | data.num_rows)); |
660 | 0 | RETURN_IF_ERROR(_finalize_column_writer_and_update_meta(cid)); |
661 | | // Don't clear source content for sequence column here if it will be used later |
662 | | // in _generate_primary_key_index(). It will be cleared at the end of this method. |
663 | 0 | bool is_seq_column = (_tablet_schema->has_sequence_col() && !have_input_seq_column && |
664 | 0 | cid == _tablet_schema->sequence_col_idx()); |
665 | 0 | if (!is_seq_column) { |
666 | 0 | _olap_data_convertor->clear_source_content(cid); |
667 | 0 | } |
668 | 0 | } |
669 | | |
670 | 0 | _num_rows_updated += stats.num_rows_updated; |
671 | 0 | _num_rows_deleted += stats.num_rows_deleted; |
672 | 0 | _num_rows_new_added += stats.num_rows_new_added; |
673 | 0 | _num_rows_filtered += stats.num_rows_filtered; |
674 | 0 | if (_tablet_schema->has_sequence_col() && !have_input_seq_column) { |
675 | 0 | DCHECK_NE(seq_column, nullptr); |
676 | 0 | if (_num_rows_written != data.row_pos || |
677 | 0 | _primary_key_index_builder->num_rows() != _num_rows_written) { |
678 | 0 | return Status::InternalError( |
679 | 0 | "Correctness check failed, _num_rows_written: {}, row_pos: {}, primary key " |
680 | 0 | "index builder num rows: {}", |
681 | 0 | _num_rows_written, data.row_pos, _primary_key_index_builder->num_rows()); |
682 | 0 | } |
683 | 0 | RETURN_IF_ERROR(_generate_primary_key_index(_key_coders, key_columns, seq_column, |
684 | 0 | data.num_rows, false)); |
685 | 0 | } |
686 | | |
687 | 0 | _num_rows_written += data.num_rows; |
688 | 0 | DCHECK_EQ(_primary_key_index_builder->num_rows(), _num_rows_written) |
689 | 0 | << "primary key index builder num rows(" << _primary_key_index_builder->num_rows() |
690 | 0 | << ") not equal to segment writer's num rows written(" << _num_rows_written << ")"; |
691 | 0 | _olap_data_convertor->clear_source_content(); |
692 | 0 | return Status::OK(); |
693 | 0 | } |
694 | | |
695 | | Status VerticalSegmentWriter::_append_block_with_flexible_partial_content(RowsInBlock& data, |
696 | 0 | Block& full_block) { |
697 | 0 | RETURN_IF_ERROR(_partial_update_preconditions_check(data.row_pos, true)); |
698 | | |
699 | | // data.block has the same schema with full_block |
700 | 0 | DCHECK(data.block->columns() == _tablet_schema->num_columns()); |
701 | | |
702 | | // create full block and fill with sort key columns |
703 | 0 | full_block = _tablet_schema->create_block(); |
704 | | |
705 | | // Use _num_rows_written instead of creating column writer 0, since all column writers |
706 | | // should have the same row count, which equals _num_rows_written. |
707 | 0 | uint32_t segment_start_pos = cast_set<uint32_t>(_num_rows_written); |
708 | |
|
709 | 0 | DCHECK(_tablet_schema->has_skip_bitmap_col()); |
710 | 0 | auto skip_bitmap_col_idx = _tablet_schema->skip_bitmap_col_idx(); |
711 | |
|
712 | 0 | bool has_default_or_nullable = false; |
713 | 0 | std::vector<bool> use_default_or_null_flag; |
714 | 0 | use_default_or_null_flag.reserve(data.num_rows); |
715 | |
|
716 | 0 | int32_t seq_map_col_unique_id = _opts.rowset_ctx->partial_update_info->sequence_map_col_uid(); |
717 | 0 | bool schema_has_sequence_col = _tablet_schema->has_sequence_col(); |
718 | |
|
719 | 0 | DBUG_EXECUTE_IF("VerticalSegmentWriter._append_block_with_flexible_partial_content.sleep", |
720 | 0 | { sleep(60); }) |
721 | 0 | const std::vector<RowsetSharedPtr>& specified_rowsets = _mow_context->rowset_ptrs; |
722 | 0 | std::vector<std::unique_ptr<SegmentCacheHandle>> segment_caches(specified_rowsets.size()); |
723 | | |
724 | | // Ensure all primary key column writers and sequence column writer are created before |
725 | | // aggregate_for_flexible_partial_update, because it internally calls convert_pk_columns |
726 | | // and convert_seq_column which need the convertors in _olap_data_convertor |
727 | 0 | for (uint32_t cid = 0; cid < _tablet_schema->num_key_columns(); ++cid) { |
728 | 0 | RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); |
729 | 0 | } |
730 | 0 | if (schema_has_sequence_col) { |
731 | 0 | uint32_t cid = _tablet_schema->sequence_col_idx(); |
732 | 0 | RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); |
733 | 0 | } |
734 | | |
735 | | // 1. aggregate duplicate rows in block |
736 | 0 | RETURN_IF_ERROR(_block_aggregator.aggregate_for_flexible_partial_update( |
737 | 0 | const_cast<Block*>(data.block), data.num_rows, specified_rowsets, segment_caches)); |
738 | 0 | if (data.block->rows() != data.num_rows) { |
739 | 0 | data.num_rows = data.block->rows(); |
740 | 0 | _olap_data_convertor->clear_source_content(); |
741 | 0 | } |
742 | | |
743 | | // 2. encode primary key columns |
744 | | // we can only encode primary key columns currently becasue all non-primary columns in flexible partial update |
745 | | // can have missing cells |
746 | 0 | std::vector<IOlapColumnDataAccessor*> key_columns {}; |
747 | 0 | RETURN_IF_ERROR(_block_aggregator.convert_pk_columns(const_cast<Block*>(data.block), |
748 | 0 | data.row_pos, data.num_rows, key_columns)); |
749 | | // 3. encode sequence column |
750 | | // We encode the seguence column even thought it may have invalid values in some rows because we need to |
751 | | // encode the value of sequence column in key for rows that have a valid value in sequence column during |
752 | | // lookup_raw_key. We will encode the sequence column again at the end of this method. At that time, we have |
753 | | // a valid sequence column to encode the key with seq col. |
754 | 0 | IOlapColumnDataAccessor* seq_column {nullptr}; |
755 | 0 | RETURN_IF_ERROR(_block_aggregator.convert_seq_column(const_cast<Block*>(data.block), |
756 | 0 | data.row_pos, data.num_rows, seq_column)); |
757 | | |
758 | 0 | std::vector<BitmapValue>* skip_bitmaps = &( |
759 | 0 | assert_cast<ColumnBitmap*>( |
760 | 0 | data.block->get_by_position(skip_bitmap_col_idx).column->assume_mutable().get()) |
761 | 0 | ->get_data()); |
762 | 0 | const auto* delete_signs = |
763 | 0 | BaseTablet::get_delete_sign_column_data(*data.block, data.row_pos + data.num_rows); |
764 | 0 | DCHECK(delete_signs != nullptr); |
765 | |
|
766 | 0 | for (std::size_t cid {0}; cid < _tablet_schema->num_key_columns(); cid++) { |
767 | 0 | full_block.replace_by_position(cid, data.block->get_by_position(cid).column); |
768 | 0 | } |
769 | | |
770 | | // 4. write primary key columns data |
771 | 0 | for (std::size_t cid {0}; cid < _tablet_schema->num_key_columns(); cid++) { |
772 | 0 | const auto& column = key_columns[cid]; |
773 | 0 | DCHECK(_column_writers[cid]->get_next_rowid() == _num_rows_written); |
774 | 0 | RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), |
775 | 0 | data.num_rows)); |
776 | 0 | DCHECK(_column_writers[cid]->get_next_rowid() == _num_rows_written + data.num_rows); |
777 | 0 | RETURN_IF_ERROR(_finalize_column_writer_and_update_meta(cid)); |
778 | 0 | } |
779 | | |
780 | | // 5. genreate read plan |
781 | 0 | FlexibleReadPlan read_plan {_tablet_schema->has_row_store_for_all_columns()}; |
782 | 0 | PartialUpdateStats stats; |
783 | 0 | RETURN_IF_ERROR(_generate_flexible_read_plan( |
784 | 0 | read_plan, data, segment_start_pos, schema_has_sequence_col, seq_map_col_unique_id, |
785 | 0 | skip_bitmaps, key_columns, seq_column, delete_signs, specified_rowsets, segment_caches, |
786 | 0 | has_default_or_nullable, use_default_or_null_flag, stats)); |
787 | 0 | CHECK_EQ(use_default_or_null_flag.size(), data.num_rows); |
788 | |
|
789 | 0 | if (config::enable_merge_on_write_correctness_check) { |
790 | 0 | _tablet->add_sentinel_mark_to_delete_bitmap(_mow_context->delete_bitmap.get(), |
791 | 0 | *_mow_context->rowset_ids); |
792 | 0 | } |
793 | | |
794 | | // 6. read according plan to fill full_block |
795 | 0 | RETURN_IF_ERROR(read_plan.fill_non_primary_key_columns( |
796 | 0 | _opts.rowset_ctx, _rsid_to_rowset, *_tablet_schema, full_block, |
797 | 0 | use_default_or_null_flag, has_default_or_nullable, segment_start_pos, |
798 | 0 | cast_set<uint32_t>(data.row_pos), data.block, skip_bitmaps)); |
799 | | |
800 | | // TODO(bobhan1): should we replace the skip bitmap column with empty bitmaps to reduce storage occupation? |
801 | | // this column is not needed in read path for merge-on-write table |
802 | | |
803 | | // 7. fill row store column |
804 | 0 | _serialize_block_to_row_column(full_block); |
805 | |
|
806 | 0 | std::vector<uint32_t> column_ids; |
807 | 0 | for (uint32_t i = 0; i < _tablet_schema->num_columns(); ++i) { |
808 | 0 | column_ids.emplace_back(i); |
809 | 0 | } |
810 | 0 | if (_opts.rowset_ctx->write_type != DataWriteType::TYPE_COMPACTION && |
811 | 0 | _tablet_schema->num_variant_columns() > 0) { |
812 | 0 | RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns( |
813 | 0 | full_block, *_tablet_schema, column_ids)); |
814 | 0 | } |
815 | | |
816 | | // 8. encode and write all non-primary key columns(including sequence column if exists) |
817 | 0 | for (auto cid = _tablet_schema->num_key_columns(); cid < _tablet_schema->num_columns(); cid++) { |
818 | 0 | if (cid != _tablet_schema->sequence_col_idx()) { |
819 | 0 | RETURN_IF_ERROR(_create_column_writer(cast_set<uint32_t>(cid), |
820 | 0 | _tablet_schema->column(cid), _tablet_schema)); |
821 | 0 | } |
822 | 0 | RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_column( |
823 | 0 | full_block.get_by_position(cid), data.row_pos, data.num_rows, |
824 | 0 | cast_set<uint32_t>(cid))); |
825 | 0 | auto [status, column] = _olap_data_convertor->convert_column_data(cid); |
826 | 0 | if (!status.ok()) { |
827 | 0 | return status; |
828 | 0 | } |
829 | 0 | if (cid == _tablet_schema->sequence_col_idx()) { |
830 | | // should use the latest encoded sequence column to build the primary index |
831 | 0 | seq_column = column; |
832 | 0 | } |
833 | 0 | DCHECK(_column_writers[cid]->get_next_rowid() == _num_rows_written); |
834 | 0 | RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), |
835 | 0 | data.num_rows)); |
836 | 0 | DCHECK(_column_writers[cid]->get_next_rowid() == _num_rows_written + data.num_rows); |
837 | 0 | RETURN_IF_ERROR(_finalize_column_writer_and_update_meta(cid)); |
838 | 0 | } |
839 | | |
840 | 0 | _num_rows_updated += stats.num_rows_updated; |
841 | 0 | _num_rows_deleted += stats.num_rows_deleted; |
842 | 0 | _num_rows_new_added += stats.num_rows_new_added; |
843 | 0 | _num_rows_filtered += stats.num_rows_filtered; |
844 | |
|
845 | 0 | if (_num_rows_written != data.row_pos || |
846 | 0 | _primary_key_index_builder->num_rows() != _num_rows_written) { |
847 | 0 | return Status::InternalError( |
848 | 0 | "Correctness check failed, _num_rows_written: {}, row_pos: {}, primary key " |
849 | 0 | "index builder num rows: {}", |
850 | 0 | _num_rows_written, data.row_pos, _primary_key_index_builder->num_rows()); |
851 | 0 | } |
852 | | |
853 | | // 9. build primary key index |
854 | 0 | RETURN_IF_ERROR(_generate_primary_key_index(_key_coders, key_columns, seq_column, data.num_rows, |
855 | 0 | false)); |
856 | | |
857 | 0 | _num_rows_written += data.num_rows; |
858 | 0 | DCHECK_EQ(_primary_key_index_builder->num_rows(), _num_rows_written) |
859 | 0 | << "primary key index builder num rows(" << _primary_key_index_builder->num_rows() |
860 | 0 | << ") not equal to segment writer's num rows written(" << _num_rows_written << ")"; |
861 | 0 | _olap_data_convertor->clear_source_content(); |
862 | 0 | return Status::OK(); |
863 | 0 | } |
864 | | |
865 | | Status VerticalSegmentWriter::_generate_encoded_default_seq_value(const TabletSchema& tablet_schema, |
866 | | const PartialUpdateInfo& info, |
867 | 0 | std::string* encoded_value) { |
868 | 0 | const auto& seq_column = tablet_schema.column(tablet_schema.sequence_col_idx()); |
869 | 0 | auto block = tablet_schema.create_block_by_cids( |
870 | 0 | {cast_set<uint32_t>(tablet_schema.sequence_col_idx())}); |
871 | 0 | if (seq_column.has_default_value()) { |
872 | 0 | auto idx = tablet_schema.sequence_col_idx() - tablet_schema.num_key_columns(); |
873 | 0 | const auto& default_value = info.default_values[idx]; |
874 | 0 | StringRef str {default_value}; |
875 | 0 | RETURN_IF_ERROR(block.get_by_position(0).type->get_serde()->default_from_string( |
876 | 0 | str, *block.get_by_position(0).column->assume_mutable().get())); |
877 | |
|
878 | 0 | } else { |
879 | 0 | block.get_by_position(0).column->assume_mutable()->insert_default(); |
880 | 0 | } |
881 | 0 | DCHECK_EQ(block.rows(), 1); |
882 | 0 | auto olap_data_convertor = std::make_unique<OlapBlockDataConvertor>(); |
883 | 0 | olap_data_convertor->add_column_data_convertor(seq_column); |
884 | 0 | olap_data_convertor->set_source_content(&block, 0, 1); |
885 | 0 | auto [status, column] = olap_data_convertor->convert_column_data(0); |
886 | 0 | if (!status.ok()) { |
887 | 0 | return status; |
888 | 0 | } |
889 | | // include marker |
890 | 0 | _encode_seq_column(column, 0, encoded_value); |
891 | 0 | return Status::OK(); |
892 | 0 | } |
893 | | |
894 | | Status VerticalSegmentWriter::_generate_flexible_read_plan( |
895 | | FlexibleReadPlan& read_plan, RowsInBlock& data, size_t segment_start_pos, |
896 | | bool schema_has_sequence_col, int32_t seq_map_col_unique_id, |
897 | | std::vector<BitmapValue>* skip_bitmaps, |
898 | | const std::vector<IOlapColumnDataAccessor*>& key_columns, |
899 | | IOlapColumnDataAccessor* seq_column, const signed char* delete_signs, |
900 | | const std::vector<RowsetSharedPtr>& specified_rowsets, |
901 | | std::vector<std::unique_ptr<SegmentCacheHandle>>& segment_caches, |
902 | | bool& has_default_or_nullable, std::vector<bool>& use_default_or_null_flag, |
903 | 0 | PartialUpdateStats& stats) { |
904 | 0 | int32_t delete_sign_col_unique_id = |
905 | 0 | _tablet_schema->column(_tablet_schema->delete_sign_idx()).unique_id(); |
906 | 0 | int32_t seq_col_unique_id = |
907 | 0 | (_tablet_schema->has_sequence_col() |
908 | 0 | ? _tablet_schema->column(_tablet_schema->sequence_col_idx()).unique_id() |
909 | 0 | : -1); |
910 | 0 | for (size_t block_pos = data.row_pos; block_pos < data.row_pos + data.num_rows; block_pos++) { |
911 | 0 | size_t delta_pos = block_pos - data.row_pos; |
912 | 0 | size_t segment_pos = segment_start_pos + delta_pos; |
913 | 0 | auto& skip_bitmap = skip_bitmaps->at(block_pos); |
914 | |
|
915 | 0 | std::string key = _full_encode_keys(key_columns, delta_pos); |
916 | 0 | _maybe_invalid_row_cache(key); |
917 | 0 | bool row_has_sequence_col = |
918 | 0 | (schema_has_sequence_col && !skip_bitmap.contains(seq_col_unique_id)); |
919 | 0 | if (row_has_sequence_col) { |
920 | 0 | _encode_seq_column(seq_column, delta_pos, &key); |
921 | 0 | } |
922 | | |
923 | | // mark key with delete sign as deleted. |
924 | 0 | bool have_delete_sign = |
925 | 0 | (!skip_bitmap.contains(delete_sign_col_unique_id) && delete_signs[block_pos] != 0); |
926 | |
|
927 | 0 | auto not_found_cb = [&]() { |
928 | 0 | return _opts.rowset_ctx->partial_update_info->handle_new_key( |
929 | 0 | *_tablet_schema, |
930 | 0 | [&]() -> std::string { |
931 | 0 | return data.block->dump_one_line(block_pos, |
932 | 0 | cast_set<int>(_num_sort_key_columns)); |
933 | 0 | }, |
934 | 0 | &skip_bitmap); |
935 | 0 | }; |
936 | 0 | auto update_read_plan = [&](const RowLocation& loc) { |
937 | 0 | read_plan.prepare_to_read(loc, segment_pos, skip_bitmap); |
938 | 0 | }; |
939 | |
|
940 | 0 | RETURN_IF_ERROR(_probe_key_for_mow(std::move(key), segment_pos, row_has_sequence_col, |
941 | 0 | have_delete_sign, specified_rowsets, segment_caches, |
942 | 0 | has_default_or_nullable, use_default_or_null_flag, |
943 | 0 | update_read_plan, not_found_cb, stats)); |
944 | 0 | } |
945 | 0 | return Status::OK(); |
946 | 0 | } |
947 | | |
948 | 12 | Status VerticalSegmentWriter::batch_block(const Block* block, size_t row_pos, size_t num_rows) { |
949 | 12 | if (_opts.rowset_ctx->partial_update_info && |
950 | 12 | _opts.rowset_ctx->partial_update_info->is_partial_update() && |
951 | 12 | _opts.write_type == DataWriteType::TYPE_DIRECT && |
952 | 12 | !_opts.rowset_ctx->is_transient_rowset_writer) { |
953 | 0 | if (_opts.rowset_ctx->partial_update_info->is_flexible_partial_update()) { |
954 | 0 | if (block->columns() != _tablet_schema->num_columns()) { |
955 | 0 | return Status::InvalidArgument( |
956 | 0 | "illegal flexible partial update block columns, block columns = {}, " |
957 | 0 | "tablet_schema columns = {}", |
958 | 0 | block->dump_structure(), _tablet_schema->dump_structure()); |
959 | 0 | } |
960 | 0 | } else { |
961 | 0 | if (block->columns() < _tablet_schema->num_key_columns() || |
962 | 0 | block->columns() >= _tablet_schema->num_columns()) { |
963 | 0 | return Status::InvalidArgument(fmt::format( |
964 | 0 | "illegal partial update block columns: {}, num key columns: {}, total " |
965 | 0 | "schema columns: {}", |
966 | 0 | block->columns(), _tablet_schema->num_key_columns(), |
967 | 0 | _tablet_schema->num_columns())); |
968 | 0 | } |
969 | 0 | } |
970 | 12 | } else if (block->columns() != _tablet_schema->num_columns()) { |
971 | 0 | return Status::InvalidArgument( |
972 | 0 | "illegal block columns, block columns = {}, tablet_schema columns = {}", |
973 | 0 | block->dump_structure(), _tablet_schema->dump_structure()); |
974 | 0 | } |
975 | 12 | _batched_blocks.emplace_back(block, row_pos, num_rows); |
976 | 12 | return Status::OK(); |
977 | 12 | } |
978 | | |
979 | 12 | Status VerticalSegmentWriter::write_batch() { |
980 | 12 | if (_opts.rowset_ctx->partial_update_info && |
981 | 12 | _opts.rowset_ctx->partial_update_info->is_partial_update() && |
982 | 12 | _opts.write_type == DataWriteType::TYPE_DIRECT && |
983 | 12 | !_opts.rowset_ctx->is_transient_rowset_writer) { |
984 | 0 | bool is_flexible_partial_update = |
985 | 0 | _opts.rowset_ctx->partial_update_info->is_flexible_partial_update(); |
986 | 0 | Block full_block; |
987 | 0 | for (auto& data : _batched_blocks) { |
988 | 0 | if (is_flexible_partial_update) { |
989 | 0 | RETURN_IF_ERROR(_append_block_with_flexible_partial_content(data, full_block)); |
990 | 0 | } else { |
991 | 0 | RETURN_IF_ERROR(_append_block_with_partial_content(data, full_block)); |
992 | 0 | } |
993 | 0 | } |
994 | 0 | return Status::OK(); |
995 | 0 | } |
996 | | // Row column should be filled here when it's a directly write from memtable |
997 | | // or it's schema change write(since column data type maybe changed, so we should reubild) |
998 | 12 | if (_opts.write_type == DataWriteType::TYPE_DIRECT || |
999 | 12 | _opts.write_type == DataWriteType::TYPE_SCHEMA_CHANGE) { |
1000 | 12 | for (auto& data : _batched_blocks) { |
1001 | | // TODO: maybe we should pass range to this method |
1002 | 12 | _serialize_block_to_row_column(*data.block); |
1003 | 12 | } |
1004 | 12 | } |
1005 | | |
1006 | 12 | std::vector<uint32_t> column_ids; |
1007 | 73 | for (uint32_t i = 0; i < _tablet_schema->num_columns(); ++i) { |
1008 | 61 | column_ids.emplace_back(i); |
1009 | 61 | } |
1010 | 12 | if (_opts.rowset_ctx->write_type != DataWriteType::TYPE_COMPACTION && |
1011 | 12 | _tablet_schema->num_variant_columns() > 0) { |
1012 | 0 | for (auto& data : _batched_blocks) { |
1013 | 0 | RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns( |
1014 | 0 | const_cast<Block&>(*data.block), *_tablet_schema, column_ids)); |
1015 | 0 | } |
1016 | 0 | } |
1017 | | |
1018 | 12 | std::vector<IOlapColumnDataAccessor*> key_columns; |
1019 | 12 | IOlapColumnDataAccessor* seq_column = nullptr; |
1020 | | // the key is cluster key column unique id |
1021 | 12 | std::map<uint32_t, IOlapColumnDataAccessor*> cid_to_column; |
1022 | 73 | for (uint32_t cid = 0; cid < _tablet_schema->num_columns(); ++cid) { |
1023 | 61 | RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); |
1024 | 61 | for (auto& data : _batched_blocks) { |
1025 | 61 | RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_columns( |
1026 | 61 | data.block, data.row_pos, data.num_rows, std::vector<uint32_t> {cid})); |
1027 | | |
1028 | | // convert column data from engine format to storage layer format |
1029 | 61 | auto [status, column] = _olap_data_convertor->convert_column_data(cid); |
1030 | 61 | if (!status.ok()) { |
1031 | 0 | return status; |
1032 | 0 | } |
1033 | 61 | if (cid < _tablet_schema->num_key_columns()) { |
1034 | 31 | key_columns.push_back(column); |
1035 | 31 | } |
1036 | 61 | if (_tablet_schema->has_sequence_col() && cid == _tablet_schema->sequence_col_idx()) { |
1037 | 7 | seq_column = column; |
1038 | 7 | } |
1039 | 61 | auto column_unique_id = _tablet_schema->column(cid).unique_id(); |
1040 | 61 | if (_is_mow_with_cluster_key() && |
1041 | 61 | std::find(_tablet_schema->cluster_key_uids().begin(), |
1042 | 5 | _tablet_schema->cluster_key_uids().end(), |
1043 | 5 | column_unique_id) != _tablet_schema->cluster_key_uids().end()) { |
1044 | 2 | cid_to_column[column_unique_id] = column; |
1045 | 2 | } |
1046 | 61 | RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), |
1047 | 61 | data.num_rows)); |
1048 | 61 | _olap_data_convertor->clear_source_content(); |
1049 | 61 | } |
1050 | 61 | if (_data_dir != nullptr && |
1051 | 61 | _data_dir->reach_capacity_limit(_column_writers[cid]->estimate_buffer_size())) { |
1052 | 0 | return Status::Error<DISK_REACH_CAPACITY_LIMIT>("disk {} exceed capacity limit.", |
1053 | 0 | _data_dir->path_hash()); |
1054 | 0 | } |
1055 | 61 | RETURN_IF_ERROR(_finalize_column_writer_and_update_meta(cid)); |
1056 | 61 | } |
1057 | | |
1058 | 12 | for (auto& data : _batched_blocks) { |
1059 | 12 | _olap_data_convertor->set_source_content(data.block, data.row_pos, data.num_rows); |
1060 | 12 | RETURN_IF_ERROR(_generate_key_index(data, key_columns, seq_column, cid_to_column)); |
1061 | 12 | _olap_data_convertor->clear_source_content(); |
1062 | 12 | _num_rows_written += data.num_rows; |
1063 | 12 | } |
1064 | | |
1065 | 12 | _batched_blocks.clear(); |
1066 | 12 | return Status::OK(); |
1067 | 12 | } |
1068 | | |
1069 | | Status VerticalSegmentWriter::_generate_key_index( |
1070 | | RowsInBlock& data, std::vector<IOlapColumnDataAccessor*>& key_columns, |
1071 | | IOlapColumnDataAccessor* seq_column, |
1072 | 12 | std::map<uint32_t, IOlapColumnDataAccessor*>& cid_to_column) { |
1073 | | // find all row pos for short key indexes |
1074 | 12 | std::vector<size_t> short_key_pos; |
1075 | | // We build a short key index every `_opts.num_rows_per_block` rows. Specifically, we |
1076 | | // build a short key index using 1st rows for first block and `_short_key_row_pos - _row_count` |
1077 | | // for next blocks. |
1078 | 12 | if (_short_key_row_pos == 0 && _num_rows_written == 0) { |
1079 | 12 | short_key_pos.push_back(0); |
1080 | 12 | } |
1081 | 12 | while (_short_key_row_pos + _opts.num_rows_per_block < _num_rows_written + data.num_rows) { |
1082 | 0 | _short_key_row_pos += _opts.num_rows_per_block; |
1083 | 0 | short_key_pos.push_back(_short_key_row_pos - _num_rows_written); |
1084 | 0 | } |
1085 | 12 | if (_is_mow_with_cluster_key()) { |
1086 | | // 1. generate primary key index |
1087 | 1 | RETURN_IF_ERROR(_generate_primary_key_index(_primary_key_coders, key_columns, seq_column, |
1088 | 1 | data.num_rows, true)); |
1089 | | // 2. generate short key index (use cluster key) |
1090 | 1 | std::vector<IOlapColumnDataAccessor*> short_key_columns; |
1091 | 2 | for (const auto& cid : _tablet_schema->cluster_key_uids()) { |
1092 | 2 | short_key_columns.push_back(cid_to_column[cid]); |
1093 | 2 | } |
1094 | 1 | RETURN_IF_ERROR(_generate_short_key_index(short_key_columns, data.num_rows, short_key_pos)); |
1095 | 11 | } else if (_is_mow()) { |
1096 | 2 | RETURN_IF_ERROR(_generate_primary_key_index(_key_coders, key_columns, seq_column, |
1097 | 2 | data.num_rows, false)); |
1098 | 9 | } else { // other tables |
1099 | 9 | RETURN_IF_ERROR(_generate_short_key_index(key_columns, data.num_rows, short_key_pos)); |
1100 | 9 | } |
1101 | 12 | return Status::OK(); |
1102 | 12 | } |
1103 | | |
1104 | | Status VerticalSegmentWriter::_generate_primary_key_index( |
1105 | | const std::vector<const KeyCoder*>& primary_key_coders, |
1106 | | const std::vector<IOlapColumnDataAccessor*>& primary_key_columns, |
1107 | 3 | IOlapColumnDataAccessor* seq_column, size_t num_rows, bool need_sort) { |
1108 | 3 | if (!need_sort) { // mow table without cluster key |
1109 | 2 | std::string last_key; |
1110 | 6 | for (size_t pos = 0; pos < num_rows; pos++) { |
1111 | | // use _key_coders |
1112 | 4 | std::string key = _full_encode_keys(primary_key_columns, pos); |
1113 | 4 | _maybe_invalid_row_cache(key); |
1114 | 4 | if (_tablet_schema->has_sequence_col()) { |
1115 | 4 | _encode_seq_column(seq_column, pos, &key); |
1116 | 4 | } |
1117 | 4 | DCHECK(key.compare(last_key) > 0) |
1118 | 0 | << "found duplicate key or key is not sorted! current key: " << key |
1119 | 0 | << ", last key: " << last_key; |
1120 | 4 | RETURN_IF_ERROR(_primary_key_index_builder->add_item(key)); |
1121 | 4 | last_key = std::move(key); |
1122 | 4 | } |
1123 | 2 | } else { // mow table with cluster key |
1124 | | // 1. generate primary keys in memory |
1125 | 1 | std::vector<std::string> primary_keys; |
1126 | 5 | for (uint32_t pos = 0; pos < num_rows; pos++) { |
1127 | 4 | std::string key = _full_encode_keys(primary_key_coders, primary_key_columns, pos); |
1128 | 4 | _maybe_invalid_row_cache(key); |
1129 | 4 | if (_tablet_schema->has_sequence_col()) { |
1130 | 4 | _encode_seq_column(seq_column, pos, &key); |
1131 | 4 | } |
1132 | 4 | _encode_rowid(pos, &key); |
1133 | 4 | primary_keys.emplace_back(std::move(key)); |
1134 | 4 | } |
1135 | | // 2. sort primary keys |
1136 | 1 | std::sort(primary_keys.begin(), primary_keys.end()); |
1137 | | // 3. write primary keys index |
1138 | 1 | std::string last_key; |
1139 | 4 | for (const auto& key : primary_keys) { |
1140 | 4 | DCHECK(key.compare(last_key) > 0) |
1141 | 0 | << "found duplicate key or key is not sorted! current key: " << key |
1142 | 0 | << ", last key: " << last_key; |
1143 | 4 | RETURN_IF_ERROR(_primary_key_index_builder->add_item(key)); |
1144 | 4 | last_key = key; |
1145 | 4 | } |
1146 | 1 | } |
1147 | 3 | return Status::OK(); |
1148 | 3 | } |
1149 | | |
1150 | | Status VerticalSegmentWriter::_generate_short_key_index( |
1151 | | std::vector<IOlapColumnDataAccessor*>& key_columns, size_t num_rows, |
1152 | 10 | const std::vector<size_t>& short_key_pos) { |
1153 | | // use _key_coders |
1154 | 10 | _set_min_key(_full_encode_keys(key_columns, 0)); |
1155 | 10 | _set_max_key(_full_encode_keys(key_columns, num_rows - 1)); |
1156 | 10 | DCHECK(Slice(_max_key.data(), _max_key.size()) |
1157 | 0 | .compare(Slice(_min_key.data(), _min_key.size())) >= 0) |
1158 | 0 | << "key is not sorted! min key: " << _min_key << ", max key: " << _max_key; |
1159 | | |
1160 | 10 | key_columns.resize(_num_short_key_columns); |
1161 | 10 | std::string last_key; |
1162 | 10 | for (const auto pos : short_key_pos) { |
1163 | 10 | std::string key = _encode_keys(key_columns, pos); |
1164 | 10 | DCHECK(key.compare(last_key) >= 0) |
1165 | 0 | << "key is not sorted! current key: " << key << ", last key: " << last_key; |
1166 | 10 | RETURN_IF_ERROR(_short_key_index_builder->add_item(key)); |
1167 | 10 | last_key = std::move(key); |
1168 | 10 | } |
1169 | 10 | return Status::OK(); |
1170 | 10 | } |
1171 | | |
1172 | 4 | void VerticalSegmentWriter::_encode_rowid(const uint32_t rowid, std::string* encoded_keys) { |
1173 | 4 | encoded_keys->push_back(KEY_NORMAL_MARKER); |
1174 | 4 | _rowid_coder->full_encode_ascending(&rowid, encoded_keys); |
1175 | 4 | } |
1176 | | |
1177 | | std::string VerticalSegmentWriter::_full_encode_keys( |
1178 | 24 | const std::vector<IOlapColumnDataAccessor*>& key_columns, size_t pos) { |
1179 | 24 | assert(_key_index_size.size() == _num_sort_key_columns); |
1180 | 24 | if (!(key_columns.size() == _num_sort_key_columns && |
1181 | 24 | _key_coders.size() == _num_sort_key_columns)) { |
1182 | 0 | LOG_INFO("key_columns.size()={}, _key_coders.size()={}, _num_sort_key_columns={}, ", |
1183 | 0 | key_columns.size(), _key_coders.size(), _num_sort_key_columns); |
1184 | 0 | } |
1185 | 24 | assert(key_columns.size() == _num_sort_key_columns && |
1186 | 24 | _key_coders.size() == _num_sort_key_columns); |
1187 | 24 | return _full_encode_keys(_key_coders, key_columns, pos); |
1188 | 24 | } |
1189 | | |
1190 | | std::string VerticalSegmentWriter::_full_encode_keys( |
1191 | | const std::vector<const KeyCoder*>& key_coders, |
1192 | 28 | const std::vector<IOlapColumnDataAccessor*>& key_columns, size_t pos) { |
1193 | 28 | assert(key_columns.size() == key_coders.size()); |
1194 | | |
1195 | 28 | std::string encoded_keys; |
1196 | 28 | size_t cid = 0; |
1197 | 70 | for (const auto& column : key_columns) { |
1198 | 70 | auto field = column->get_data_at(pos); |
1199 | 70 | if (UNLIKELY(!field)) { |
1200 | 0 | encoded_keys.push_back(KEY_NULL_FIRST_MARKER); |
1201 | 0 | ++cid; |
1202 | 0 | continue; |
1203 | 0 | } |
1204 | 70 | encoded_keys.push_back(KEY_NORMAL_MARKER); |
1205 | 70 | DCHECK(key_coders[cid] != nullptr); |
1206 | 70 | key_coders[cid]->full_encode_ascending(field, &encoded_keys); |
1207 | 70 | ++cid; |
1208 | 70 | } |
1209 | 28 | return encoded_keys; |
1210 | 28 | } |
1211 | | |
1212 | | void VerticalSegmentWriter::_encode_seq_column(const IOlapColumnDataAccessor* seq_column, |
1213 | 8 | size_t pos, std::string* encoded_keys) { |
1214 | 8 | const auto* field = seq_column->get_data_at(pos); |
1215 | | // To facilitate the use of the primary key index, encode the seq column |
1216 | | // to the minimum value of the corresponding length when the seq column |
1217 | | // is null |
1218 | 8 | if (UNLIKELY(!field)) { |
1219 | 0 | encoded_keys->push_back(KEY_NULL_FIRST_MARKER); |
1220 | 0 | size_t seq_col_length = _tablet_schema->column(_tablet_schema->sequence_col_idx()).length(); |
1221 | 0 | encoded_keys->append(seq_col_length, KEY_MINIMAL_MARKER); |
1222 | 0 | return; |
1223 | 0 | } |
1224 | 8 | encoded_keys->push_back(KEY_NORMAL_MARKER); |
1225 | 8 | _seq_coder->full_encode_ascending(field, encoded_keys); |
1226 | 8 | } |
1227 | | |
1228 | | std::string VerticalSegmentWriter::_encode_keys( |
1229 | 10 | const std::vector<IOlapColumnDataAccessor*>& key_columns, size_t pos) { |
1230 | 10 | assert(key_columns.size() == _num_short_key_columns); |
1231 | | |
1232 | 10 | std::string encoded_keys; |
1233 | 10 | size_t cid = 0; |
1234 | 22 | for (const auto& column : key_columns) { |
1235 | 22 | auto field = column->get_data_at(pos); |
1236 | 22 | if (UNLIKELY(!field)) { |
1237 | 0 | encoded_keys.push_back(KEY_NULL_FIRST_MARKER); |
1238 | 0 | ++cid; |
1239 | 0 | continue; |
1240 | 0 | } |
1241 | 22 | encoded_keys.push_back(KEY_NORMAL_MARKER); |
1242 | 22 | _key_coders[cid]->encode_ascending(field, _key_index_size[cid], &encoded_keys); |
1243 | 22 | ++cid; |
1244 | 22 | } |
1245 | 10 | return encoded_keys; |
1246 | 10 | } |
1247 | | |
1248 | | // TODO(lingbin): Currently this function does not include the size of various indexes, |
1249 | | // We should make this more precise. |
1250 | 12 | uint64_t VerticalSegmentWriter::_estimated_remaining_size() { |
1251 | | // footer_size(4) + checksum(4) + segment_magic(4) |
1252 | 12 | uint64_t size = 12; |
1253 | 12 | if (_is_mow_with_cluster_key()) { |
1254 | 1 | size += _primary_key_index_builder->size() + _short_key_index_builder->size(); |
1255 | 11 | } else if (_is_mow()) { |
1256 | 2 | size += _primary_key_index_builder->size(); |
1257 | 9 | } else { |
1258 | 9 | size += _short_key_index_builder->size(); |
1259 | 9 | } |
1260 | | |
1261 | | // update the mem_tracker of segment size |
1262 | 12 | _mem_tracker->consume(size - _mem_tracker->consumption()); |
1263 | 12 | return size; |
1264 | 12 | } |
1265 | | |
1266 | 12 | Status VerticalSegmentWriter::finalize_columns_index(uint64_t* index_size) { |
1267 | 12 | uint64_t index_start = _file_writer->bytes_appended(); |
1268 | 12 | RETURN_IF_ERROR(_write_ordinal_index()); |
1269 | 12 | RETURN_IF_ERROR(_write_zone_map()); |
1270 | 12 | RETURN_IF_ERROR(_write_inverted_index()); |
1271 | 12 | RETURN_IF_ERROR(_write_ann_index()); |
1272 | 12 | RETURN_IF_ERROR(_write_bloom_filter_index()); |
1273 | | |
1274 | 12 | *index_size = _file_writer->bytes_appended() - index_start; |
1275 | 12 | if (_is_mow_with_cluster_key()) { |
1276 | 1 | RETURN_IF_ERROR(_write_short_key_index()); |
1277 | 1 | *index_size = _file_writer->bytes_appended() - index_start; |
1278 | 1 | RETURN_IF_ERROR(_write_primary_key_index()); |
1279 | 1 | *index_size += _primary_key_index_builder->disk_size(); |
1280 | 11 | } else if (_is_mow()) { |
1281 | 2 | RETURN_IF_ERROR(_write_primary_key_index()); |
1282 | | // IndexedColumnWriter write data pages mixed with segment data, we should use |
1283 | | // the stat from primary key index builder. |
1284 | 2 | *index_size += _primary_key_index_builder->disk_size(); |
1285 | 9 | } else { |
1286 | 9 | RETURN_IF_ERROR(_write_short_key_index()); |
1287 | 9 | *index_size = _file_writer->bytes_appended() - index_start; |
1288 | 9 | } |
1289 | | |
1290 | | // reset all column writers and data_conveter |
1291 | 12 | clear(); |
1292 | | |
1293 | 12 | return Status::OK(); |
1294 | 12 | } |
1295 | | |
1296 | 12 | Status VerticalSegmentWriter::finalize_footer(uint64_t* segment_file_size) { |
1297 | 12 | RETURN_IF_ERROR(_write_footer()); |
1298 | | // finish |
1299 | 12 | RETURN_IF_ERROR(_file_writer->close(true)); |
1300 | 12 | *segment_file_size = _file_writer->bytes_appended(); |
1301 | 12 | if (*segment_file_size == 0) { |
1302 | 0 | return Status::Corruption("Bad segment, file size = 0"); |
1303 | 0 | } |
1304 | 12 | return Status::OK(); |
1305 | 12 | } |
1306 | | |
1307 | 12 | Status VerticalSegmentWriter::finalize(uint64_t* segment_file_size, uint64_t* index_size) { |
1308 | 12 | MonotonicStopWatch timer; |
1309 | 12 | timer.start(); |
1310 | | // check disk capacity |
1311 | 12 | if (_data_dir != nullptr && |
1312 | 12 | _data_dir->reach_capacity_limit((int64_t)_estimated_remaining_size())) { |
1313 | 0 | return Status::Error<DISK_REACH_CAPACITY_LIMIT>("disk {} exceed capacity limit.", |
1314 | 0 | _data_dir->path_hash()); |
1315 | 0 | } |
1316 | 12 | _row_count = _num_rows_written; |
1317 | 12 | _num_rows_written = 0; |
1318 | | // write index |
1319 | 12 | RETURN_IF_ERROR(finalize_columns_index(index_size)); |
1320 | | // write footer |
1321 | 12 | RETURN_IF_ERROR(finalize_footer(segment_file_size)); |
1322 | | |
1323 | 12 | if (timer.elapsed_time() > 5000000000L) { |
1324 | 0 | LOG(INFO) << "segment flush consumes a lot time_ns " << timer.elapsed_time() |
1325 | 0 | << ", segmemt_size " << *segment_file_size; |
1326 | 0 | } |
1327 | 12 | return Status::OK(); |
1328 | 12 | } |
1329 | | |
1330 | 12 | void VerticalSegmentWriter::clear() { |
1331 | 61 | for (auto& column_writer : _column_writers) { |
1332 | 61 | column_writer.reset(); |
1333 | 61 | } |
1334 | 12 | _column_writers.clear(); |
1335 | 12 | _olap_data_convertor.reset(); |
1336 | 12 | } |
1337 | | |
1338 | | // write ordinal index after data has been written |
1339 | 12 | Status VerticalSegmentWriter::_write_ordinal_index() { |
1340 | 61 | for (auto& column_writer : _column_writers) { |
1341 | 61 | RETURN_IF_ERROR(column_writer->write_ordinal_index()); |
1342 | 61 | } |
1343 | 12 | return Status::OK(); |
1344 | 12 | } |
1345 | | |
1346 | 12 | Status VerticalSegmentWriter::_write_zone_map() { |
1347 | 61 | for (auto& column_writer : _column_writers) { |
1348 | 61 | RETURN_IF_ERROR(column_writer->write_zone_map()); |
1349 | 61 | } |
1350 | 12 | return Status::OK(); |
1351 | 12 | } |
1352 | | |
1353 | 12 | Status VerticalSegmentWriter::_write_inverted_index() { |
1354 | 61 | for (auto& column_writer : _column_writers) { |
1355 | 61 | RETURN_IF_ERROR(column_writer->write_inverted_index()); |
1356 | 61 | } |
1357 | 12 | return Status::OK(); |
1358 | 12 | } |
1359 | | |
1360 | 12 | Status VerticalSegmentWriter::_write_ann_index() { |
1361 | 61 | for (auto& column_writer : _column_writers) { |
1362 | 61 | RETURN_IF_ERROR(column_writer->write_ann_index()); |
1363 | 61 | } |
1364 | 12 | return Status::OK(); |
1365 | 12 | } |
1366 | | |
1367 | 12 | Status VerticalSegmentWriter::_write_bloom_filter_index() { |
1368 | 61 | for (auto& column_writer : _column_writers) { |
1369 | 61 | RETURN_IF_ERROR(column_writer->write_bloom_filter_index()); |
1370 | 61 | } |
1371 | 12 | return Status::OK(); |
1372 | 12 | } |
1373 | | |
1374 | 10 | Status VerticalSegmentWriter::_write_short_key_index() { |
1375 | 10 | std::vector<Slice> body; |
1376 | 10 | PageFooterPB footer; |
1377 | 10 | RETURN_IF_ERROR(_short_key_index_builder->finalize(_row_count, &body, &footer)); |
1378 | 10 | PagePointer pp; |
1379 | | // short key index page is not compressed right now |
1380 | 10 | RETURN_IF_ERROR(PageIO::write_page(_file_writer, body, footer, &pp)); |
1381 | 10 | pp.to_proto(_footer.mutable_short_key_index_page()); |
1382 | 10 | return Status::OK(); |
1383 | 10 | } |
1384 | | |
1385 | 3 | Status VerticalSegmentWriter::_write_primary_key_index() { |
1386 | 3 | CHECK_EQ(_primary_key_index_builder->num_rows(), _row_count); |
1387 | 3 | return _primary_key_index_builder->finalize(_footer.mutable_primary_key_index_meta()); |
1388 | 3 | } |
1389 | | |
1390 | 12 | Status VerticalSegmentWriter::_write_footer() { |
1391 | 12 | _footer.set_num_rows(_row_count); |
1392 | | |
1393 | | // Decide whether to externalize ColumnMetaPB by tablet default, and stamp footer version |
1394 | | |
1395 | 12 | if (_tablet_schema->is_external_segment_column_meta_used()) { |
1396 | 0 | _footer.set_version(SEGMENT_FOOTER_VERSION_V3_EXT_COL_META); |
1397 | 0 | VLOG_DEBUG << "use external column meta"; |
1398 | | // External ColumnMetaPB writing (optional) |
1399 | 0 | RETURN_IF_ERROR(ExternalColMetaUtil::write_external_column_meta( |
1400 | 0 | _file_writer, &_footer, _opts.compression_type, |
1401 | 0 | [this](const std::vector<Slice>& slices) { return _write_raw_data(slices); })); |
1402 | 0 | } |
1403 | | |
1404 | | // Footer := SegmentFooterPB, FooterPBSize(4), FooterPBChecksum(4), MagicNumber(4) |
1405 | 12 | VLOG_DEBUG << "footer " << _footer.DebugString(); |
1406 | 12 | std::string footer_buf; |
1407 | 12 | if (!_footer.SerializeToString(&footer_buf)) { |
1408 | 0 | return Status::InternalError("failed to serialize segment footer"); |
1409 | 0 | } |
1410 | | |
1411 | 12 | faststring fixed_buf; |
1412 | | // footer's size |
1413 | 12 | put_fixed32_le(&fixed_buf, cast_set<uint32_t>(footer_buf.size())); |
1414 | | // footer's checksum |
1415 | 12 | uint32_t checksum = crc32c::Crc32c(footer_buf.data(), footer_buf.size()); |
1416 | 12 | put_fixed32_le(&fixed_buf, checksum); |
1417 | | // Append magic number. we don't write magic number in the header because |
1418 | | // that will need an extra seek when reading |
1419 | 12 | fixed_buf.append(k_segment_magic, k_segment_magic_length); |
1420 | | |
1421 | 12 | std::vector<Slice> slices {footer_buf, fixed_buf}; |
1422 | 12 | return _write_raw_data(slices); |
1423 | 12 | } |
1424 | | |
1425 | 12 | Status VerticalSegmentWriter::_write_raw_data(const std::vector<Slice>& slices) { |
1426 | 12 | RETURN_IF_ERROR(_file_writer->appendv(&slices[0], slices.size())); |
1427 | 12 | return Status::OK(); |
1428 | 12 | } |
1429 | | |
1430 | 12 | Slice VerticalSegmentWriter::min_encoded_key() { |
1431 | 12 | return (_primary_key_index_builder == nullptr) ? Slice(_min_key.data(), _min_key.size()) |
1432 | 12 | : _primary_key_index_builder->min_key(); |
1433 | 12 | } |
1434 | 12 | Slice VerticalSegmentWriter::max_encoded_key() { |
1435 | 12 | return (_primary_key_index_builder == nullptr) ? Slice(_max_key.data(), _max_key.size()) |
1436 | 12 | : _primary_key_index_builder->max_key(); |
1437 | 12 | } |
1438 | | |
1439 | 0 | void VerticalSegmentWriter::_set_min_max_key(const Slice& key) { |
1440 | 0 | if (UNLIKELY(_is_first_row)) { |
1441 | 0 | _min_key.append(key.get_data(), key.get_size()); |
1442 | 0 | _is_first_row = false; |
1443 | 0 | } |
1444 | 0 | if (key.compare(_max_key) > 0) { |
1445 | 0 | _max_key.clear(); |
1446 | 0 | _max_key.append(key.get_data(), key.get_size()); |
1447 | 0 | } |
1448 | 0 | } |
1449 | | |
1450 | 10 | void VerticalSegmentWriter::_set_min_key(const Slice& key) { |
1451 | 10 | if (UNLIKELY(_is_first_row)) { |
1452 | 10 | _min_key.append(key.get_data(), key.get_size()); |
1453 | 10 | _is_first_row = false; |
1454 | 10 | } |
1455 | 10 | } |
1456 | | |
1457 | 10 | void VerticalSegmentWriter::_set_max_key(const Slice& key) { |
1458 | 10 | _max_key.clear(); |
1459 | 10 | _max_key.append(key.get_data(), key.get_size()); |
1460 | 10 | } |
1461 | | |
1462 | 172 | inline bool VerticalSegmentWriter::_is_mow() { |
1463 | 172 | return _tablet_schema->keys_type() == UNIQUE_KEYS && _opts.enable_unique_key_merge_on_write; |
1464 | 172 | } |
1465 | | |
1466 | 115 | inline bool VerticalSegmentWriter::_is_mow_with_cluster_key() { |
1467 | 115 | return _is_mow() && !_tablet_schema->cluster_key_uids().empty(); |
1468 | 115 | } |
1469 | | |
1470 | | #include "common/compile_check_end.h" |
1471 | | |
1472 | | } // namespace doris::segment_v2 |