/root/doris/be/src/storage/merger.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "storage/merger.h" |
19 | | |
20 | | #include <gen_cpp/olap_file.pb.h> |
21 | | #include <gen_cpp/types.pb.h> |
22 | | #include <stddef.h> |
23 | | #include <unistd.h> |
24 | | |
25 | | #include <algorithm> |
26 | | #include <iterator> |
27 | | #include <memory> |
28 | | #include <mutex> |
29 | | #include <numeric> |
30 | | #include <ostream> |
31 | | #include <shared_mutex> |
32 | | #include <string> |
33 | | #include <unordered_map> |
34 | | #include <utility> |
35 | | #include <vector> |
36 | | |
37 | | #include "cloud/config.h" |
38 | | #include "common/config.h" |
39 | | #include "common/logging.h" |
40 | | #include "common/status.h" |
41 | | #include "core/block/block.h" |
42 | | #include "storage/iterator/block_reader.h" |
43 | | #include "storage/iterator/vertical_block_reader.h" |
44 | | #include "storage/iterator/vertical_merge_iterator.h" |
45 | | #include "storage/iterators.h" |
46 | | #include "storage/olap_common.h" |
47 | | #include "storage/olap_define.h" |
48 | | #include "storage/rowid_conversion.h" |
49 | | #include "storage/rowset/beta_rowset.h" |
50 | | #include "storage/rowset/rowset.h" |
51 | | #include "storage/rowset/rowset_meta.h" |
52 | | #include "storage/rowset/rowset_writer.h" |
53 | | #include "storage/segment/segment.h" |
54 | | #include "storage/segment/segment_writer.h" |
55 | | #include "storage/storage_engine.h" |
56 | | #include "storage/tablet/base_tablet.h" |
57 | | #include "storage/tablet/tablet.h" |
58 | | #include "storage/tablet/tablet_fwd.h" |
59 | | #include "storage/tablet/tablet_meta.h" |
60 | | #include "storage/tablet/tablet_reader.h" |
61 | | #include "storage/types.h" |
62 | | #include "storage/utils.h" |
63 | | #include "util/slice.h" |
64 | | |
65 | | namespace doris { |
66 | | #include "common/compile_check_begin.h" |
67 | | Status Merger::vmerge_rowsets(BaseTabletSPtr tablet, ReaderType reader_type, |
68 | | const TabletSchema& cur_tablet_schema, |
69 | | const std::vector<RowsetReaderSharedPtr>& src_rowset_readers, |
70 | 48 | RowsetWriter* dst_rowset_writer, Statistics* stats_output) { |
71 | 48 | if (!cur_tablet_schema.cluster_key_uids().empty()) { |
72 | 0 | return Status::InternalError( |
73 | 0 | "mow table with cluster keys does not support non vertical compaction"); |
74 | 0 | } |
75 | 48 | BlockReader reader; |
76 | 48 | TabletReader::ReaderParams reader_params; |
77 | 48 | reader_params.tablet = tablet; |
78 | 48 | reader_params.reader_type = reader_type; |
79 | | |
80 | 48 | TabletReadSource read_source; |
81 | 48 | read_source.rs_splits.reserve(src_rowset_readers.size()); |
82 | 144 | for (const RowsetReaderSharedPtr& rs_reader : src_rowset_readers) { |
83 | 144 | read_source.rs_splits.emplace_back(rs_reader); |
84 | 144 | } |
85 | 48 | read_source.fill_delete_predicates(); |
86 | 48 | reader_params.set_read_source(std::move(read_source)); |
87 | | |
88 | 48 | reader_params.version = dst_rowset_writer->version(); |
89 | | |
90 | 48 | TabletSchemaSPtr merge_tablet_schema = std::make_shared<TabletSchema>(); |
91 | 48 | merge_tablet_schema->copy_from(cur_tablet_schema); |
92 | | |
93 | | // Merge the columns in delete predicate that not in latest schema in to current tablet schema |
94 | 48 | for (auto& del_pred_rs : reader_params.delete_predicates) { |
95 | 24 | merge_tablet_schema->merge_dropped_columns(*del_pred_rs->tablet_schema()); |
96 | 24 | } |
97 | 48 | reader_params.tablet_schema = merge_tablet_schema; |
98 | 48 | if (!tablet->tablet_schema()->cluster_key_uids().empty()) { |
99 | 0 | reader_params.delete_bitmap = tablet->tablet_meta()->delete_bitmap_ptr(); |
100 | 0 | } |
101 | | |
102 | 48 | if (stats_output && stats_output->rowid_conversion) { |
103 | 48 | reader_params.record_rowids = true; |
104 | 48 | reader_params.rowid_conversion = stats_output->rowid_conversion; |
105 | 48 | stats_output->rowid_conversion->set_dst_rowset_id(dst_rowset_writer->rowset_id()); |
106 | 48 | } |
107 | | |
108 | 48 | reader_params.return_columns.resize(cur_tablet_schema.num_columns()); |
109 | 48 | std::iota(reader_params.return_columns.begin(), reader_params.return_columns.end(), 0); |
110 | 48 | reader_params.origin_return_columns = &reader_params.return_columns; |
111 | 48 | RETURN_IF_ERROR(reader.init(reader_params)); |
112 | | |
113 | 48 | Block block = cur_tablet_schema.create_block(reader_params.return_columns); |
114 | 48 | size_t output_rows = 0; |
115 | 48 | bool eof = false; |
116 | 626 | while (!eof && !ExecEnv::GetInstance()->storage_engine().stopped()) { |
117 | 578 | auto tablet_state = tablet->tablet_state(); |
118 | 578 | if (tablet_state != TABLET_RUNNING && tablet_state != TABLET_NOTREADY) { |
119 | 0 | tablet->clear_cache(); |
120 | 0 | return Status::Error<INTERNAL_ERROR>("tablet {} is not used any more", |
121 | 0 | tablet->tablet_id()); |
122 | 0 | } |
123 | | |
124 | | // Read one block from block reader |
125 | 578 | RETURN_NOT_OK_STATUS_WITH_WARN(reader.next_block_with_aggregation(&block, &eof), |
126 | 578 | "failed to read next block when merging rowsets of tablet " + |
127 | 578 | std::to_string(tablet->tablet_id())); |
128 | 578 | RETURN_NOT_OK_STATUS_WITH_WARN(dst_rowset_writer->add_block(&block), |
129 | 578 | "failed to write block when merging rowsets of tablet " + |
130 | 578 | std::to_string(tablet->tablet_id())); |
131 | | |
132 | 578 | if (reader_params.record_rowids && block.rows() > 0) { |
133 | 578 | std::vector<uint32_t> segment_num_rows; |
134 | 578 | RETURN_IF_ERROR(dst_rowset_writer->get_segment_num_rows(&segment_num_rows)); |
135 | 578 | stats_output->rowid_conversion->add(reader.current_block_row_locations(), |
136 | 578 | segment_num_rows); |
137 | 578 | } |
138 | | |
139 | 578 | output_rows += block.rows(); |
140 | 578 | block.clear_column_data(); |
141 | 578 | } |
142 | 48 | if (ExecEnv::GetInstance()->storage_engine().stopped()) { |
143 | 0 | return Status::Error<INTERNAL_ERROR>("tablet {} failed to do compaction, engine stopped", |
144 | 0 | tablet->tablet_id()); |
145 | 0 | } |
146 | | |
147 | 48 | if (stats_output != nullptr) { |
148 | 48 | stats_output->output_rows = output_rows; |
149 | 48 | stats_output->merged_rows = reader.merged_rows(); |
150 | 48 | stats_output->filtered_rows = reader.filtered_rows(); |
151 | 48 | stats_output->bytes_read_from_local = reader.stats().file_cache_stats.bytes_read_from_local; |
152 | 48 | stats_output->bytes_read_from_remote = |
153 | 48 | reader.stats().file_cache_stats.bytes_read_from_remote; |
154 | 48 | stats_output->cached_bytes_total = reader.stats().file_cache_stats.bytes_write_into_cache; |
155 | 48 | if (config::is_cloud_mode()) { |
156 | 0 | stats_output->cloud_local_read_time = |
157 | 0 | reader.stats().file_cache_stats.local_io_timer / 1000; |
158 | 0 | stats_output->cloud_remote_read_time = |
159 | 0 | reader.stats().file_cache_stats.remote_io_timer / 1000; |
160 | 0 | } |
161 | 48 | } |
162 | | |
163 | 48 | RETURN_NOT_OK_STATUS_WITH_WARN(dst_rowset_writer->flush(), |
164 | 48 | "failed to flush rowset when merging rowsets of tablet " + |
165 | 48 | std::to_string(tablet->tablet_id())); |
166 | | |
167 | 48 | return Status::OK(); |
168 | 48 | } |
169 | | |
170 | | // split columns into several groups, make sure all keys in one group |
171 | | // unique_key should consider sequence&delete column |
172 | | void Merger::vertical_split_columns(const TabletSchema& tablet_schema, |
173 | | std::vector<std::vector<uint32_t>>* column_groups, |
174 | | std::vector<uint32_t>* key_group_cluster_key_idxes, |
175 | 98 | int32_t num_columns_per_group) { |
176 | 98 | size_t num_key_cols = tablet_schema.num_key_columns(); |
177 | 98 | size_t total_cols = tablet_schema.num_columns(); |
178 | 98 | std::vector<uint32_t> key_columns; |
179 | 188 | for (auto i = 0; i < num_key_cols; ++i) { |
180 | 90 | key_columns.emplace_back(i); |
181 | 90 | } |
182 | | // in unique key, sequence & delete sign column should merge with key columns |
183 | 98 | int32_t sequence_col_idx = -1; |
184 | 98 | int32_t delete_sign_idx = -1; |
185 | | // in key column compaction, seq_col real index is _num_key_columns |
186 | | // and delete_sign column is _block->columns() - 1 |
187 | 98 | if (tablet_schema.keys_type() == KeysType::UNIQUE_KEYS) { |
188 | 50 | if (tablet_schema.has_sequence_col()) { |
189 | 4 | sequence_col_idx = tablet_schema.sequence_col_idx(); |
190 | 4 | key_columns.emplace_back(sequence_col_idx); |
191 | 4 | } |
192 | 50 | delete_sign_idx = tablet_schema.field_index(DELETE_SIGN); |
193 | 50 | if (delete_sign_idx != -1) { |
194 | 44 | key_columns.emplace_back(delete_sign_idx); |
195 | 44 | } |
196 | 50 | if (!tablet_schema.cluster_key_uids().empty()) { |
197 | 0 | for (const auto& cid : tablet_schema.cluster_key_uids()) { |
198 | 0 | auto idx = tablet_schema.field_index(cid); |
199 | 0 | DCHECK(idx >= 0) << "could not find cluster key column with unique_id=" << cid |
200 | 0 | << " in tablet schema, table_id=" << tablet_schema.table_id(); |
201 | 0 | if (idx >= num_key_cols) { |
202 | 0 | key_columns.emplace_back(idx); |
203 | 0 | } |
204 | 0 | } |
205 | | // tablet schema unique ids: [1, 2, 5, 3, 6, 4], [1 2] is key columns |
206 | | // cluster key unique ids: [3, 1, 4] |
207 | | // the key_columns should be [0, 1, 3, 5] |
208 | | // the key_group_cluster_key_idxes should be [2, 1, 3] |
209 | 0 | for (const auto& cid : tablet_schema.cluster_key_uids()) { |
210 | 0 | auto idx = tablet_schema.field_index(cid); |
211 | 0 | for (auto i = 0; i < key_columns.size(); ++i) { |
212 | 0 | if (idx == key_columns[i]) { |
213 | 0 | key_group_cluster_key_idxes->emplace_back(i); |
214 | 0 | break; |
215 | 0 | } |
216 | 0 | } |
217 | 0 | } |
218 | 0 | } |
219 | 50 | } |
220 | 98 | VLOG_NOTICE << "sequence_col_idx=" << sequence_col_idx |
221 | 43 | << ", delete_sign_idx=" << delete_sign_idx; |
222 | | // for duplicate no keys |
223 | 98 | if (!key_columns.empty()) { |
224 | 79 | column_groups->emplace_back(key_columns); |
225 | 79 | } |
226 | | |
227 | 98 | std::vector<uint32_t> value_columns; |
228 | | |
229 | 1.00k | for (size_t i = num_key_cols; i < total_cols; ++i) { |
230 | 905 | if (i == sequence_col_idx || i == delete_sign_idx || |
231 | 905 | key_columns.end() != std::find(key_columns.begin(), key_columns.end(), i)) { |
232 | 48 | continue; |
233 | 48 | } |
234 | | |
235 | 857 | if (!value_columns.empty() && value_columns.size() % num_columns_per_group == 0) { |
236 | 140 | column_groups->push_back(value_columns); |
237 | 140 | value_columns.clear(); |
238 | 140 | } |
239 | 857 | value_columns.push_back(cast_set<uint32_t>(i)); |
240 | 857 | } |
241 | | |
242 | 98 | if (!value_columns.empty()) { |
243 | 98 | column_groups->push_back(value_columns); |
244 | 98 | } |
245 | 98 | } |
246 | | |
247 | | Status Merger::vertical_compact_one_group( |
248 | | BaseTabletSPtr tablet, ReaderType reader_type, const TabletSchema& tablet_schema, |
249 | | bool is_key, const std::vector<uint32_t>& column_group, RowSourcesBuffer* row_source_buf, |
250 | | const std::vector<RowsetReaderSharedPtr>& src_rowset_readers, |
251 | | RowsetWriter* dst_rowset_writer, uint32_t max_rows_per_segment, Statistics* stats_output, |
252 | | std::vector<uint32_t> key_group_cluster_key_idxes, int64_t batch_size, |
253 | 295 | CompactionSampleInfo* sample_info, bool enable_sparse_optimization) { |
254 | | // build tablet reader |
255 | 295 | VLOG_NOTICE << "vertical compact one group, max_rows_per_segment=" << max_rows_per_segment; |
256 | 295 | VerticalBlockReader reader(row_source_buf); |
257 | 295 | TabletReader::ReaderParams reader_params; |
258 | 295 | reader_params.is_key_column_group = is_key; |
259 | 295 | reader_params.key_group_cluster_key_idxes = key_group_cluster_key_idxes; |
260 | 295 | reader_params.tablet = tablet; |
261 | 295 | reader_params.reader_type = reader_type; |
262 | 295 | reader_params.enable_sparse_optimization = enable_sparse_optimization; |
263 | | |
264 | 295 | TabletReadSource read_source; |
265 | 295 | read_source.rs_splits.reserve(src_rowset_readers.size()); |
266 | 947 | for (const RowsetReaderSharedPtr& rs_reader : src_rowset_readers) { |
267 | 947 | read_source.rs_splits.emplace_back(rs_reader); |
268 | 947 | } |
269 | 295 | read_source.fill_delete_predicates(); |
270 | 295 | reader_params.set_read_source(std::move(read_source)); |
271 | | |
272 | 295 | reader_params.version = dst_rowset_writer->version(); |
273 | | |
274 | 295 | TabletSchemaSPtr merge_tablet_schema = std::make_shared<TabletSchema>(); |
275 | 295 | merge_tablet_schema->copy_from(tablet_schema); |
276 | | |
277 | 295 | for (auto& del_pred_rs : reader_params.delete_predicates) { |
278 | 125 | merge_tablet_schema->merge_dropped_columns(*del_pred_rs->tablet_schema()); |
279 | 125 | } |
280 | | |
281 | 295 | reader_params.tablet_schema = merge_tablet_schema; |
282 | 295 | bool has_cluster_key = false; |
283 | 295 | if (!tablet->tablet_schema()->cluster_key_uids().empty()) { |
284 | 0 | reader_params.delete_bitmap = tablet->tablet_meta()->delete_bitmap_ptr(); |
285 | 0 | has_cluster_key = true; |
286 | 0 | } |
287 | | |
288 | 295 | if (is_key && stats_output && stats_output->rowid_conversion) { |
289 | 86 | reader_params.record_rowids = true; |
290 | 86 | reader_params.rowid_conversion = stats_output->rowid_conversion; |
291 | 86 | stats_output->rowid_conversion->set_dst_rowset_id(dst_rowset_writer->rowset_id()); |
292 | 86 | } |
293 | | |
294 | 295 | reader_params.return_columns = column_group; |
295 | 295 | reader_params.origin_return_columns = &reader_params.return_columns; |
296 | 295 | reader_params.batch_size = batch_size; |
297 | 295 | RETURN_IF_ERROR(reader.init(reader_params, sample_info)); |
298 | | |
299 | 294 | Block block = tablet_schema.create_block(reader_params.return_columns); |
300 | 294 | size_t output_rows = 0; |
301 | 294 | bool eof = false; |
302 | 2.44k | while (!eof && !ExecEnv::GetInstance()->storage_engine().stopped()) { |
303 | 2.15k | auto tablet_state = tablet->tablet_state(); |
304 | 2.15k | if (tablet_state != TABLET_RUNNING && tablet_state != TABLET_NOTREADY) { |
305 | 0 | tablet->clear_cache(); |
306 | 0 | return Status::Error<INTERNAL_ERROR>("tablet {} is not used any more", |
307 | 0 | tablet->tablet_id()); |
308 | 0 | } |
309 | | // Read one block from block reader |
310 | 2.15k | RETURN_NOT_OK_STATUS_WITH_WARN(reader.next_block_with_aggregation(&block, &eof), |
311 | 2.15k | "failed to read next block when merging rowsets of tablet " + |
312 | 2.15k | std::to_string(tablet->tablet_id())); |
313 | 2.15k | RETURN_NOT_OK_STATUS_WITH_WARN( |
314 | 2.15k | dst_rowset_writer->add_columns(&block, column_group, is_key, max_rows_per_segment, |
315 | 2.15k | has_cluster_key), |
316 | 2.15k | "failed to write block when merging rowsets of tablet " + |
317 | 2.15k | std::to_string(tablet->tablet_id())); |
318 | | |
319 | 2.15k | if (is_key && reader_params.record_rowids && block.rows() > 0) { |
320 | 1.17k | std::vector<uint32_t> segment_num_rows; |
321 | 1.17k | RETURN_IF_ERROR(dst_rowset_writer->get_segment_num_rows(&segment_num_rows)); |
322 | 1.17k | stats_output->rowid_conversion->add(reader.current_block_row_locations(), |
323 | 1.17k | segment_num_rows); |
324 | 1.17k | } |
325 | 2.15k | output_rows += block.rows(); |
326 | 2.15k | block.clear_column_data(); |
327 | 2.15k | } |
328 | 294 | if (ExecEnv::GetInstance()->storage_engine().stopped()) { |
329 | 0 | return Status::Error<INTERNAL_ERROR>("tablet {} failed to do compaction, engine stopped", |
330 | 0 | tablet->tablet_id()); |
331 | 0 | } |
332 | | |
333 | 294 | if (stats_output != nullptr) { |
334 | 294 | if (is_key) { |
335 | 86 | stats_output->output_rows = output_rows; |
336 | 86 | stats_output->merged_rows = reader.merged_rows(); |
337 | 86 | stats_output->filtered_rows = reader.filtered_rows(); |
338 | 86 | } |
339 | 294 | stats_output->bytes_read_from_local = reader.stats().file_cache_stats.bytes_read_from_local; |
340 | 294 | stats_output->bytes_read_from_remote = |
341 | 294 | reader.stats().file_cache_stats.bytes_read_from_remote; |
342 | 294 | stats_output->cached_bytes_total = reader.stats().file_cache_stats.bytes_write_into_cache; |
343 | 294 | if (config::is_cloud_mode()) { |
344 | 0 | stats_output->cloud_local_read_time = |
345 | 0 | reader.stats().file_cache_stats.local_io_timer / 1000; |
346 | 0 | stats_output->cloud_remote_read_time = |
347 | 0 | reader.stats().file_cache_stats.remote_io_timer / 1000; |
348 | 0 | } |
349 | 294 | } |
350 | 294 | RETURN_IF_ERROR(dst_rowset_writer->flush_columns(is_key)); |
351 | | |
352 | 294 | return Status::OK(); |
353 | 294 | } |
354 | | |
355 | | // for segcompaction |
356 | | Status Merger::vertical_compact_one_group( |
357 | | int64_t tablet_id, ReaderType reader_type, const TabletSchema& tablet_schema, bool is_key, |
358 | | const std::vector<uint32_t>& column_group, RowSourcesBuffer* row_source_buf, |
359 | | VerticalBlockReader& src_block_reader, segment_v2::SegmentWriter& dst_segment_writer, |
360 | | Statistics* stats_output, uint64_t* index_size, KeyBoundsPB& key_bounds, |
361 | 22 | SimpleRowIdConversion* rowid_conversion) { |
362 | | // TODO: record_rowids |
363 | 22 | Block block = tablet_schema.create_block(column_group); |
364 | 22 | size_t output_rows = 0; |
365 | 22 | bool eof = false; |
366 | 138 | while (!eof && !ExecEnv::GetInstance()->storage_engine().stopped()) { |
367 | | // Read one block from block reader |
368 | 116 | RETURN_NOT_OK_STATUS_WITH_WARN(src_block_reader.next_block_with_aggregation(&block, &eof), |
369 | 116 | "failed to read next block when merging rowsets of tablet " + |
370 | 116 | std::to_string(tablet_id)); |
371 | 116 | if (!block.rows()) { |
372 | 0 | break; |
373 | 0 | } |
374 | 116 | RETURN_NOT_OK_STATUS_WITH_WARN(dst_segment_writer.append_block(&block, 0, block.rows()), |
375 | 116 | "failed to write block when merging rowsets of tablet " + |
376 | 116 | std::to_string(tablet_id)); |
377 | | |
378 | 116 | if (is_key && rowid_conversion != nullptr) { |
379 | 30 | rowid_conversion->add(src_block_reader.current_block_row_locations()); |
380 | 30 | } |
381 | 116 | output_rows += block.rows(); |
382 | 116 | block.clear_column_data(); |
383 | 116 | } |
384 | 22 | if (ExecEnv::GetInstance()->storage_engine().stopped()) { |
385 | 0 | return Status::Error<INTERNAL_ERROR>("tablet {} failed to do compaction, engine stopped", |
386 | 0 | tablet_id); |
387 | 0 | } |
388 | | |
389 | 22 | if (stats_output != nullptr) { |
390 | 22 | if (is_key) { |
391 | 11 | stats_output->output_rows = output_rows; |
392 | 11 | stats_output->merged_rows = src_block_reader.merged_rows(); |
393 | 11 | stats_output->filtered_rows = src_block_reader.filtered_rows(); |
394 | 11 | } |
395 | 22 | stats_output->bytes_read_from_local = |
396 | 22 | src_block_reader.stats().file_cache_stats.bytes_read_from_local; |
397 | 22 | stats_output->bytes_read_from_remote = |
398 | 22 | src_block_reader.stats().file_cache_stats.bytes_read_from_remote; |
399 | 22 | stats_output->cached_bytes_total = |
400 | 22 | src_block_reader.stats().file_cache_stats.bytes_write_into_cache; |
401 | 22 | } |
402 | | |
403 | | // segcompaction produce only one segment at once |
404 | 22 | RETURN_IF_ERROR(dst_segment_writer.finalize_columns_data()); |
405 | 22 | RETURN_IF_ERROR(dst_segment_writer.finalize_columns_index(index_size)); |
406 | | |
407 | 22 | if (is_key) { |
408 | 11 | Slice min_key = dst_segment_writer.min_encoded_key(); |
409 | 11 | Slice max_key = dst_segment_writer.max_encoded_key(); |
410 | 11 | DCHECK_LE(min_key.compare(max_key), 0); |
411 | 11 | key_bounds.set_min_key(min_key.to_string()); |
412 | 11 | key_bounds.set_max_key(max_key.to_string()); |
413 | 11 | } |
414 | | |
415 | 22 | return Status::OK(); |
416 | 22 | } |
417 | | |
418 | | int64_t estimate_batch_size(int group_index, BaseTabletSPtr tablet, int64_t way_cnt, |
419 | | ReaderType reader_type, int64_t group_per_row_from_footer, |
420 | 99 | bool footer_fallback) { |
421 | 99 | auto& sample_info_lock = tablet->get_sample_info_lock(reader_type); |
422 | 99 | auto& sample_infos = tablet->get_sample_infos(reader_type); |
423 | 99 | std::unique_lock<std::mutex> lock(sample_info_lock); |
424 | 99 | CompactionSampleInfo info = sample_infos[group_index]; |
425 | 99 | if (way_cnt <= 0) { |
426 | 0 | LOG(INFO) << "estimate batch size for vertical compaction, tablet id: " |
427 | 0 | << tablet->tablet_id() << " way cnt: " << way_cnt; |
428 | 0 | return 4096 - 32; |
429 | 0 | } |
430 | 99 | int64_t block_mem_limit = config::compaction_memory_bytes_limit / way_cnt; |
431 | 99 | if (tablet->last_compaction_status.is<ErrorCode::MEM_LIMIT_EXCEEDED>()) { |
432 | 0 | block_mem_limit /= 4; |
433 | 0 | } |
434 | | |
435 | 99 | int64_t group_data_size = 0; |
436 | 99 | if (info.group_data_size > 0 && info.bytes > 0 && info.rows > 0) { |
437 | 0 | double smoothing_factor = 0.5; |
438 | 0 | group_data_size = |
439 | 0 | int64_t((cast_set<double>(info.group_data_size) * (1 - smoothing_factor)) + |
440 | 0 | (cast_set<double>(info.bytes / info.rows) * smoothing_factor)); |
441 | 0 | sample_infos[group_index].group_data_size = group_data_size; |
442 | 99 | } else if (info.group_data_size > 0 && (info.bytes <= 0 || info.rows <= 0)) { |
443 | 0 | group_data_size = info.group_data_size; |
444 | 99 | } else if (info.group_data_size <= 0 && info.bytes > 0 && info.rows > 0) { |
445 | 0 | group_data_size = info.bytes / info.rows; |
446 | 0 | sample_infos[group_index].group_data_size = group_data_size; |
447 | 99 | } else { |
448 | | // No historical sampling data available. |
449 | | // Try to use raw_data_bytes from segment footer for a better estimate. |
450 | 99 | if (!footer_fallback && group_per_row_from_footer > 0) { |
451 | 96 | int64_t batch_size = block_mem_limit / group_per_row_from_footer; |
452 | 96 | int64_t res = std::max(std::min(batch_size, int64_t(4096 - 32)), int64_t(32L)); |
453 | 96 | LOG(INFO) << "estimate batch size from footer for vertical compaction, tablet id: " |
454 | 96 | << tablet->tablet_id() |
455 | 96 | << " group_per_row_from_footer: " << group_per_row_from_footer |
456 | 96 | << " way cnt: " << way_cnt << " batch size: " << res; |
457 | 96 | return res; |
458 | 96 | } |
459 | 99 | LOG(INFO) << "estimate batch size for vertical compaction, tablet id: " |
460 | 3 | << tablet->tablet_id() << " group data size: " << info.group_data_size |
461 | 3 | << " row num: " << info.rows << " consume bytes: " << info.bytes |
462 | 3 | << " footer_fallback: " << footer_fallback; |
463 | 3 | return 1024 - 32; |
464 | 99 | } |
465 | | |
466 | 0 | if (group_data_size <= 0) { |
467 | 0 | LOG(WARNING) << "estimate batch size for vertical compaction, tablet id: " |
468 | 0 | << tablet->tablet_id() << " unexpected group data size: " << group_data_size; |
469 | 0 | return 4096 - 32; |
470 | 0 | } |
471 | | |
472 | 0 | sample_infos[group_index].bytes = 0; |
473 | 0 | sample_infos[group_index].rows = 0; |
474 | |
|
475 | 0 | int64_t batch_size = block_mem_limit / group_data_size; |
476 | 0 | int64_t res = std::max(std::min(batch_size, int64_t(4096 - 32)), int64_t(32L)); |
477 | 0 | LOG(INFO) << "estimate batch size for vertical compaction, tablet id: " << tablet->tablet_id() |
478 | 0 | << " group data size: " << info.group_data_size << " row num: " << info.rows |
479 | 0 | << " consume bytes: " << info.bytes << " way cnt: " << way_cnt |
480 | 0 | << " batch size: " << res; |
481 | 0 | return res; |
482 | 0 | } |
483 | | |
484 | | // steps to do vertical merge: |
485 | | // 1. split columns into column groups |
486 | | // 2. compact groups one by one, generate a row_source_buf when compact key group |
487 | | // and use this row_source_buf to compact value column groups |
488 | | // 3. build output rowset |
489 | | Status Merger::vertical_merge_rowsets(BaseTabletSPtr tablet, ReaderType reader_type, |
490 | | const TabletSchema& tablet_schema, |
491 | | const std::vector<RowsetReaderSharedPtr>& src_rowset_readers, |
492 | | RowsetWriter* dst_rowset_writer, |
493 | | uint32_t max_rows_per_segment, int64_t merge_way_num, |
494 | | Statistics* stats_output, |
495 | 87 | VerticalCompactionProgressCallback progress_cb) { |
496 | 87 | LOG(INFO) << "Start to do vertical compaction, tablet_id: " << tablet->tablet_id(); |
497 | 87 | std::vector<std::vector<uint32_t>> column_groups; |
498 | 87 | std::vector<uint32_t> key_group_cluster_key_idxes; |
499 | | // If BE config vertical_compaction_num_columns_per_group has been modified from |
500 | | // its default value (5), use the BE config; otherwise use the tablet meta value. |
501 | 87 | constexpr int32_t default_num_columns_per_group = 5; |
502 | 87 | int32_t num_columns_per_group = |
503 | 87 | config::vertical_compaction_num_columns_per_group != default_num_columns_per_group |
504 | 87 | ? config::vertical_compaction_num_columns_per_group |
505 | 87 | : tablet->tablet_meta()->vertical_compaction_num_columns_per_group(); |
506 | | |
507 | 87 | DBUG_EXECUTE_IF("Merger.vertical_merge_rowsets.check_num_columns_per_group", { |
508 | 87 | auto expected_value = DebugPoints::instance()->get_debug_param_or_default<int32_t>( |
509 | 87 | "Merger.vertical_merge_rowsets.check_num_columns_per_group", "expected_value", -1); |
510 | 87 | auto expected_tablet_id = DebugPoints::instance()->get_debug_param_or_default<int64_t>( |
511 | 87 | "Merger.vertical_merge_rowsets.check_num_columns_per_group", "tablet_id", -1); |
512 | 87 | if (expected_tablet_id != -1 && expected_tablet_id == tablet->tablet_id()) { |
513 | 87 | if (expected_value != -1 && expected_value != num_columns_per_group) { |
514 | 87 | LOG(FATAL) << "DEBUG_POINT CHECK FAILED: expected num_columns_per_group=" |
515 | 87 | << expected_value << " but got " << num_columns_per_group |
516 | 87 | << " for tablet_id=" << tablet->tablet_id(); |
517 | 87 | } else { |
518 | 87 | LOG(INFO) << "DEBUG_POINT CHECK PASSED: num_columns_per_group=" |
519 | 87 | << num_columns_per_group << ", tablet_id=" << tablet->tablet_id(); |
520 | 87 | } |
521 | 87 | } |
522 | 87 | }); |
523 | | |
524 | 87 | vertical_split_columns(tablet_schema, &column_groups, &key_group_cluster_key_idxes, |
525 | 87 | num_columns_per_group); |
526 | | |
527 | 87 | if (progress_cb) { |
528 | 0 | progress_cb(column_groups.size(), 0); |
529 | 0 | } |
530 | | |
531 | | // Calculate total rows for density calculation after compaction |
532 | 87 | int64_t total_rows = 0; |
533 | 256 | for (const auto& rs_reader : src_rowset_readers) { |
534 | 256 | total_rows += rs_reader->rowset()->rowset_meta()->num_rows(); |
535 | 256 | } |
536 | | |
537 | | // Use historical density for sparse wide table optimization |
538 | | // density = (total_cells - null_cells) / total_cells, smaller means more sparse |
539 | | // When density <= threshold, enable sparse optimization |
540 | | // threshold = 0 means disable, 1 means always enable (default) |
541 | 87 | bool enable_sparse_optimization = false; |
542 | 87 | if (config::sparse_column_compaction_threshold_percent > 0 && |
543 | 87 | tablet->keys_type() == KeysType::UNIQUE_KEYS) { |
544 | 44 | double density = tablet->compaction_density.load(); |
545 | 44 | enable_sparse_optimization = density <= config::sparse_column_compaction_threshold_percent; |
546 | | |
547 | 44 | LOG(INFO) << "Vertical compaction sparse optimization check: tablet_id=" |
548 | 44 | << tablet->tablet_id() << ", density=" << density |
549 | 44 | << ", threshold=" << config::sparse_column_compaction_threshold_percent |
550 | 44 | << ", total_rows=" << total_rows |
551 | 44 | << ", num_columns=" << tablet_schema.num_columns() |
552 | 44 | << ", total_cells=" << total_rows * tablet_schema.num_columns() |
553 | 44 | << ", enable_sparse_optimization=" << enable_sparse_optimization; |
554 | 44 | } |
555 | | |
556 | 87 | RowSourcesBuffer row_sources_buf(tablet->tablet_id(), dst_rowset_writer->context().tablet_path, |
557 | 87 | reader_type); |
558 | 87 | Merger::Statistics total_stats; |
559 | 87 | if (stats_output != nullptr) { |
560 | 87 | total_stats.rowid_conversion = stats_output->rowid_conversion; |
561 | 87 | } |
562 | 87 | auto& sample_info_lock = tablet->get_sample_info_lock(reader_type); |
563 | 87 | auto& sample_infos = tablet->get_sample_infos(reader_type); |
564 | 87 | { |
565 | 87 | std::unique_lock<std::mutex> lock(sample_info_lock); |
566 | 87 | sample_infos.resize(column_groups.size()); |
567 | 87 | } |
568 | | // Collect per-column raw_data_bytes from segment footer for first-time batch size estimation. |
569 | | // raw_data_bytes is the original data size before encoding, close to runtime Block::bytes(). |
570 | | // Only collect when needed: skip if manual batch_size override is set, or if ALL groups |
571 | | // already have historical sampling data. Use per-group granularity so that schema evolution |
572 | | // (new groups without history) still gets footer-based estimation. |
573 | 87 | struct ColumnRawSizeInfo { |
574 | 87 | int64_t total_raw_bytes = 0; |
575 | 87 | int64_t rows_with_data = 0; |
576 | 87 | }; |
577 | 87 | std::unordered_map<int32_t, ColumnRawSizeInfo> column_raw_sizes; |
578 | 87 | bool need_footer_collection = false; |
579 | 87 | if (config::compaction_batch_size == -1) { |
580 | 51 | std::unique_lock<std::mutex> lock(sample_info_lock); |
581 | 51 | for (const auto& info : sample_infos) { |
582 | 51 | if (info.group_data_size <= 0 && info.bytes <= 0 && info.rows <= 0) { |
583 | 51 | need_footer_collection = true; |
584 | 51 | break; |
585 | 51 | } |
586 | 51 | } |
587 | 51 | } |
588 | 87 | if (need_footer_collection) { |
589 | 154 | for (const auto& rs_reader : src_rowset_readers) { |
590 | 154 | auto beta_rowset = std::dynamic_pointer_cast<BetaRowset>(rs_reader->rowset()); |
591 | 154 | if (!beta_rowset) { |
592 | 0 | continue; |
593 | 0 | } |
594 | 154 | std::vector<segment_v2::SegmentSharedPtr> segments; |
595 | 154 | auto st = beta_rowset->load_segments(&segments); |
596 | 154 | if (!st.ok()) { |
597 | 0 | LOG(WARNING) << "Failed to load segments for footer raw_data_bytes collection" |
598 | 0 | << ", tablet_id: " << tablet->tablet_id() |
599 | 0 | << ", rowset_id: " << beta_rowset->rowset_id() << ", status: " << st; |
600 | 0 | continue; |
601 | 0 | } |
602 | 255 | for (const auto& segment : segments) { |
603 | 255 | int64_t row_count = segment->num_rows(); |
604 | 255 | auto collect_st = segment->traverse_column_meta_pbs( |
605 | 1.21k | [&](const segment_v2::ColumnMetaPB& meta) { |
606 | 1.21k | int32_t uid = meta.unique_id(); |
607 | 1.21k | if (uid >= 0 && meta.has_raw_data_bytes()) { |
608 | 804 | auto& info = column_raw_sizes[uid]; |
609 | 804 | info.total_raw_bytes += meta.raw_data_bytes(); |
610 | 804 | info.rows_with_data += row_count; |
611 | 804 | } |
612 | 1.21k | }); |
613 | 255 | if (!collect_st.ok()) { |
614 | 0 | LOG(WARNING) << "Failed to traverse column meta for footer collection" |
615 | 0 | << ", tablet_id: " << tablet->tablet_id() |
616 | 0 | << ", status: " << collect_st; |
617 | 0 | } |
618 | 255 | } |
619 | 154 | } |
620 | 51 | } |
621 | | |
622 | | // Pre-compute per-row estimate for each column group from footer data. |
623 | 87 | std::vector<int64_t> group_per_row_from_footer(column_groups.size(), 0); |
624 | 87 | std::vector<bool> group_footer_fallback(column_groups.size(), false); |
625 | 382 | for (size_t i = 0; i < column_groups.size(); ++i) { |
626 | 295 | int64_t group_per_row = 0; |
627 | 295 | bool need_fallback = false; |
628 | 331 | for (uint32_t col_ordinal : column_groups[i]) { |
629 | 331 | const auto& col = tablet_schema.column(col_ordinal); |
630 | 331 | int32_t uid = col.unique_id(); |
631 | | |
632 | | // Variant columns (root or subcolumn): raw_data_bytes is 0 (TODO in writer), |
633 | | // cannot estimate from footer, fallback to default for the entire group. |
634 | 331 | if (uid < 0 || col.is_variant_type()) { |
635 | 3 | need_fallback = true; |
636 | 3 | break; |
637 | 3 | } |
638 | | |
639 | | // Any column without footer data (e.g. legacy segments written before |
640 | | // raw_data_bytes existed) makes the group sample partial and unreliable. |
641 | | // Fall back to the default for the whole group instead of summing only |
642 | | // the columns we measured. |
643 | 328 | auto it = column_raw_sizes.find(uid); |
644 | 328 | if (it == column_raw_sizes.end() || it->second.rows_with_data <= 0) { |
645 | 196 | need_fallback = true; |
646 | 196 | break; |
647 | 196 | } |
648 | | |
649 | 132 | int64_t raw_per_row = it->second.total_raw_bytes / it->second.rows_with_data; |
650 | 132 | int64_t col_per_row = 0; |
651 | | |
652 | 132 | if (col.type() == FieldType::OLAP_FIELD_TYPE_ARRAY || |
653 | 132 | col.type() == FieldType::OLAP_FIELD_TYPE_MAP || |
654 | 132 | col.type() == FieldType::OLAP_FIELD_TYPE_STRUCT) { |
655 | | // Complex types: raw_data_bytes recursively aggregates sub-writers. |
656 | 0 | col_per_row = raw_per_row; |
657 | 132 | } else if (col.is_length_variable_type()) { |
658 | | // Variable-length scalar (VARCHAR/STRING/HLL/BITMAP/...): raw_per_row |
659 | | // is the average char payload across all rows; reader still pays an |
660 | | // 8-byte offset entry per row regardless of null-ness. |
661 | 0 | col_per_row = raw_per_row + 8; |
662 | 0 | if (col.is_nullable()) { |
663 | 0 | col_per_row += 1; // null map |
664 | 0 | } |
665 | 132 | } else { |
666 | | // Fixed-width scalar (INT/BIGINT/DOUBLE/DATE/...). |
667 | | // raw_data_bytes only counts non-null payload (append_nulls() does |
668 | | // not advance the page builder), but FileColumnIterator::next_batch |
669 | | // still calls ColumnNullable::insert_many_defaults() for null runs, |
670 | | // which grows the nested PODArray by N * type_size. So the runtime |
671 | | // per-row footprint is at least type_size, no matter how sparse. |
672 | 132 | int64_t type_size = get_type_info(&col)->size(); |
673 | 132 | col_per_row = std::max(raw_per_row, type_size); |
674 | 132 | if (col.is_nullable()) { |
675 | 1 | col_per_row += 1; // null map |
676 | 1 | } |
677 | 132 | } |
678 | | |
679 | 132 | group_per_row += col_per_row; |
680 | 132 | } |
681 | 295 | group_per_row_from_footer[i] = group_per_row; |
682 | 295 | group_footer_fallback[i] = need_fallback; |
683 | 295 | } |
684 | | |
685 | | // compact group one by one |
686 | 381 | for (auto i = 0; i < column_groups.size(); ++i) { |
687 | 295 | VLOG_NOTICE << "row source size: " << row_sources_buf.total_size(); |
688 | 295 | bool is_key = (i == 0); |
689 | 295 | int64_t batch_size = config::compaction_batch_size != -1 |
690 | 295 | ? config::compaction_batch_size |
691 | 295 | : estimate_batch_size(i, tablet, merge_way_num, reader_type, |
692 | 99 | group_per_row_from_footer[i], |
693 | 99 | group_footer_fallback[i]); |
694 | 295 | CompactionSampleInfo sample_info; |
695 | 295 | Merger::Statistics group_stats; |
696 | 295 | group_stats.rowid_conversion = total_stats.rowid_conversion; |
697 | 295 | Merger::Statistics* group_stats_ptr = stats_output != nullptr ? &group_stats : nullptr; |
698 | 295 | Status st = vertical_compact_one_group( |
699 | 295 | tablet, reader_type, tablet_schema, is_key, column_groups[i], &row_sources_buf, |
700 | 295 | src_rowset_readers, dst_rowset_writer, max_rows_per_segment, group_stats_ptr, |
701 | 295 | key_group_cluster_key_idxes, batch_size, &sample_info, enable_sparse_optimization); |
702 | 295 | { |
703 | 295 | std::unique_lock<std::mutex> lock(sample_info_lock); |
704 | 295 | sample_infos[i] = sample_info; |
705 | 295 | } |
706 | 295 | RETURN_IF_ERROR(st); |
707 | 294 | if (stats_output != nullptr) { |
708 | 294 | total_stats.bytes_read_from_local += group_stats.bytes_read_from_local; |
709 | 294 | total_stats.bytes_read_from_remote += group_stats.bytes_read_from_remote; |
710 | 294 | total_stats.cached_bytes_total += group_stats.cached_bytes_total; |
711 | 294 | total_stats.cloud_local_read_time += group_stats.cloud_local_read_time; |
712 | 294 | total_stats.cloud_remote_read_time += group_stats.cloud_remote_read_time; |
713 | 294 | if (is_key) { |
714 | 86 | total_stats.output_rows = group_stats.output_rows; |
715 | 86 | total_stats.merged_rows = group_stats.merged_rows; |
716 | 86 | total_stats.filtered_rows = group_stats.filtered_rows; |
717 | 86 | total_stats.rowid_conversion = group_stats.rowid_conversion; |
718 | 86 | } |
719 | 294 | } |
720 | 294 | if (progress_cb) { |
721 | 0 | progress_cb(column_groups.size(), i + 1); |
722 | 0 | } |
723 | 294 | if (is_key) { |
724 | 86 | RETURN_IF_ERROR(row_sources_buf.flush()); |
725 | 86 | } |
726 | 294 | RETURN_IF_ERROR(row_sources_buf.seek_to_begin()); |
727 | 294 | } |
728 | | |
729 | | // Calculate and store density for next compaction's sparse optimization threshold |
730 | | // density = (total_cells - total_null_count) / total_cells |
731 | | // Smaller density means more sparse |
732 | 86 | { |
733 | 86 | std::unique_lock<std::mutex> lock(sample_info_lock); |
734 | 86 | int64_t total_null_count = 0; |
735 | 294 | for (const auto& info : sample_infos) { |
736 | 294 | total_null_count += info.null_count; |
737 | 294 | } |
738 | 86 | int64_t total_cells = total_rows * tablet_schema.num_columns(); |
739 | 86 | if (total_cells > 0) { |
740 | 85 | double density = static_cast<double>(total_cells - total_null_count) / |
741 | 85 | static_cast<double>(total_cells); |
742 | 85 | tablet->compaction_density.store(density); |
743 | 85 | LOG(INFO) << "Vertical compaction density update: tablet_id=" << tablet->tablet_id() |
744 | 85 | << ", total_cells=" << total_cells |
745 | 85 | << ", total_null_count=" << total_null_count << ", density=" << density; |
746 | 85 | } |
747 | 86 | } |
748 | | |
749 | | // finish compact, build output rowset |
750 | 86 | VLOG_NOTICE << "finish compact groups"; |
751 | 86 | RETURN_IF_ERROR(dst_rowset_writer->final_flush()); |
752 | | |
753 | 86 | if (stats_output != nullptr) { |
754 | 86 | *stats_output = total_stats; |
755 | 86 | } |
756 | | |
757 | 86 | return Status::OK(); |
758 | 86 | } |
759 | | #include "common/compile_check_end.h" |
760 | | } // namespace doris |