Coverage Report

Created: 2026-04-28 15:58

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/storage/merger.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "storage/merger.h"
19
20
#include <gen_cpp/olap_file.pb.h>
21
#include <gen_cpp/types.pb.h>
22
#include <stddef.h>
23
#include <unistd.h>
24
25
#include <algorithm>
26
#include <iterator>
27
#include <memory>
28
#include <mutex>
29
#include <numeric>
30
#include <ostream>
31
#include <shared_mutex>
32
#include <string>
33
#include <unordered_map>
34
#include <utility>
35
#include <vector>
36
37
#include "cloud/config.h"
38
#include "common/config.h"
39
#include "common/logging.h"
40
#include "common/status.h"
41
#include "core/block/block.h"
42
#include "storage/iterator/block_reader.h"
43
#include "storage/iterator/vertical_block_reader.h"
44
#include "storage/iterator/vertical_merge_iterator.h"
45
#include "storage/iterators.h"
46
#include "storage/olap_common.h"
47
#include "storage/olap_define.h"
48
#include "storage/rowid_conversion.h"
49
#include "storage/rowset/beta_rowset.h"
50
#include "storage/rowset/rowset.h"
51
#include "storage/rowset/rowset_meta.h"
52
#include "storage/rowset/rowset_writer.h"
53
#include "storage/segment/segment.h"
54
#include "storage/segment/segment_writer.h"
55
#include "storage/storage_engine.h"
56
#include "storage/tablet/base_tablet.h"
57
#include "storage/tablet/tablet.h"
58
#include "storage/tablet/tablet_fwd.h"
59
#include "storage/tablet/tablet_meta.h"
60
#include "storage/tablet/tablet_reader.h"
61
#include "storage/types.h"
62
#include "storage/utils.h"
63
#include "util/slice.h"
64
65
namespace doris {
66
Status Merger::vmerge_rowsets(BaseTabletSPtr tablet, ReaderType reader_type,
67
                              const TabletSchema& cur_tablet_schema,
68
                              const std::vector<RowsetReaderSharedPtr>& src_rowset_readers,
69
1.44k
                              RowsetWriter* dst_rowset_writer, Statistics* stats_output) {
70
1.44k
    if (!cur_tablet_schema.cluster_key_uids().empty()) {
71
0
        return Status::InternalError(
72
0
                "mow table with cluster keys does not support non vertical compaction");
73
0
    }
74
1.44k
    BlockReader reader;
75
1.44k
    TabletReader::ReaderParams reader_params;
76
1.44k
    reader_params.tablet = tablet;
77
1.44k
    reader_params.reader_type = reader_type;
78
79
1.44k
    TabletReadSource read_source;
80
1.44k
    read_source.rs_splits.reserve(src_rowset_readers.size());
81
1.55k
    for (const RowsetReaderSharedPtr& rs_reader : src_rowset_readers) {
82
1.55k
        read_source.rs_splits.emplace_back(rs_reader);
83
1.55k
    }
84
1.44k
    read_source.fill_delete_predicates();
85
1.44k
    reader_params.set_read_source(std::move(read_source));
86
87
1.44k
    reader_params.version = dst_rowset_writer->version();
88
89
1.44k
    TabletSchemaSPtr merge_tablet_schema = std::make_shared<TabletSchema>();
90
1.44k
    merge_tablet_schema->copy_from(cur_tablet_schema);
91
92
    // Merge the columns in delete predicate that not in latest schema in to current tablet schema
93
1.44k
    for (auto& del_pred_rs : reader_params.delete_predicates) {
94
24
        merge_tablet_schema->merge_dropped_columns(*del_pred_rs->tablet_schema());
95
24
    }
96
1.44k
    reader_params.tablet_schema = merge_tablet_schema;
97
1.44k
    if (!tablet->tablet_schema()->cluster_key_uids().empty()) {
98
0
        reader_params.delete_bitmap = tablet->tablet_meta()->delete_bitmap_ptr();
99
0
    }
100
101
1.44k
    if (stats_output && stats_output->rowid_conversion) {
102
48
        reader_params.record_rowids = true;
103
48
        reader_params.rowid_conversion = stats_output->rowid_conversion;
104
48
        stats_output->rowid_conversion->set_dst_rowset_id(dst_rowset_writer->rowset_id());
105
48
    }
106
107
1.44k
    reader_params.return_columns.resize(cur_tablet_schema.num_columns());
108
1.44k
    std::iota(reader_params.return_columns.begin(), reader_params.return_columns.end(), 0);
109
1.44k
    reader_params.origin_return_columns = &reader_params.return_columns;
110
1.44k
    RETURN_IF_ERROR(reader.init(reader_params));
111
112
1.44k
    Block block = cur_tablet_schema.create_block(reader_params.return_columns);
113
1.44k
    size_t output_rows = 0;
114
1.44k
    bool eof = false;
115
4.22k
    while (!eof && !ExecEnv::GetInstance()->storage_engine().stopped()) {
116
2.77k
        auto tablet_state = tablet->tablet_state();
117
2.77k
        if (tablet_state != TABLET_RUNNING && tablet_state != TABLET_NOTREADY) {
118
0
            tablet->clear_cache();
119
0
            return Status::Error<INTERNAL_ERROR>("tablet {} is not used any more",
120
0
                                                 tablet->tablet_id());
121
0
        }
122
123
        // Read one block from block reader
124
2.77k
        RETURN_NOT_OK_STATUS_WITH_WARN(reader.next_block_with_aggregation(&block, &eof),
125
2.77k
                                       "failed to read next block when merging rowsets of tablet " +
126
2.77k
                                               std::to_string(tablet->tablet_id()));
127
2.77k
        RETURN_NOT_OK_STATUS_WITH_WARN(dst_rowset_writer->add_block(&block),
128
2.77k
                                       "failed to write block when merging rowsets of tablet " +
129
2.77k
                                               std::to_string(tablet->tablet_id()));
130
131
2.77k
        if (reader_params.record_rowids && block.rows() > 0) {
132
578
            std::vector<uint32_t> segment_num_rows;
133
578
            RETURN_IF_ERROR(dst_rowset_writer->get_segment_num_rows(&segment_num_rows));
134
578
            stats_output->rowid_conversion->add(reader.current_block_row_locations(),
135
578
                                                segment_num_rows);
136
578
        }
137
138
2.77k
        output_rows += block.rows();
139
2.77k
        block.clear_column_data();
140
2.77k
    }
141
1.44k
    if (ExecEnv::GetInstance()->storage_engine().stopped()) {
142
0
        return Status::Error<INTERNAL_ERROR>("tablet {} failed to do compaction, engine stopped",
143
0
                                             tablet->tablet_id());
144
0
    }
145
146
1.44k
    if (stats_output != nullptr) {
147
1.44k
        stats_output->output_rows = output_rows;
148
1.44k
        stats_output->merged_rows = reader.merged_rows();
149
1.44k
        stats_output->filtered_rows = reader.filtered_rows();
150
1.44k
        stats_output->bytes_read_from_local = reader.stats().file_cache_stats.bytes_read_from_local;
151
1.44k
        stats_output->bytes_read_from_remote =
152
1.44k
                reader.stats().file_cache_stats.bytes_read_from_remote;
153
1.44k
        stats_output->cached_bytes_total = reader.stats().file_cache_stats.bytes_write_into_cache;
154
1.44k
        if (config::is_cloud_mode()) {
155
1.39k
            stats_output->cloud_local_read_time =
156
1.39k
                    reader.stats().file_cache_stats.local_io_timer / 1000;
157
1.39k
            stats_output->cloud_remote_read_time =
158
1.39k
                    reader.stats().file_cache_stats.remote_io_timer / 1000;
159
1.39k
        }
160
1.44k
    }
161
162
1.44k
    RETURN_NOT_OK_STATUS_WITH_WARN(dst_rowset_writer->flush(),
163
1.44k
                                   "failed to flush rowset when merging rowsets of tablet " +
164
1.44k
                                           std::to_string(tablet->tablet_id()));
165
166
1.44k
    return Status::OK();
167
1.44k
}
168
169
// split columns into several groups, make sure all keys in one group
170
// unique_key should consider sequence&delete column
171
void Merger::vertical_split_columns(const TabletSchema& tablet_schema,
172
                                    std::vector<std::vector<uint32_t>>* column_groups,
173
                                    std::vector<uint32_t>* key_group_cluster_key_idxes,
174
8.37k
                                    int32_t num_columns_per_group) {
175
8.37k
    size_t num_key_cols = tablet_schema.num_key_columns();
176
8.37k
    size_t total_cols = tablet_schema.num_columns();
177
8.37k
    std::vector<uint32_t> key_columns;
178
43.7k
    for (auto i = 0; i < num_key_cols; ++i) {
179
35.4k
        key_columns.emplace_back(i);
180
35.4k
    }
181
    // in unique key, sequence & delete sign column should merge with key columns
182
8.37k
    int32_t sequence_col_idx = -1;
183
8.37k
    int32_t delete_sign_idx = -1;
184
    // in key column compaction, seq_col real index is _num_key_columns
185
    // and delete_sign column is _block->columns() - 1
186
8.37k
    if (tablet_schema.keys_type() == KeysType::UNIQUE_KEYS) {
187
4.16k
        if (tablet_schema.has_sequence_col()) {
188
97
            sequence_col_idx = tablet_schema.sequence_col_idx();
189
97
            key_columns.emplace_back(sequence_col_idx);
190
97
        }
191
4.16k
        delete_sign_idx = tablet_schema.field_index(DELETE_SIGN);
192
4.16k
        if (delete_sign_idx != -1) {
193
4.15k
            key_columns.emplace_back(delete_sign_idx);
194
4.15k
        }
195
4.16k
        if (!tablet_schema.cluster_key_uids().empty()) {
196
463
            for (const auto& cid : tablet_schema.cluster_key_uids()) {
197
463
                auto idx = tablet_schema.field_index(cid);
198
18.4E
                DCHECK(idx >= 0) << "could not find cluster key column with unique_id=" << cid
199
18.4E
                                 << " in tablet schema, table_id=" << tablet_schema.table_id();
200
463
                if (idx >= num_key_cols) {
201
222
                    key_columns.emplace_back(idx);
202
222
                }
203
463
            }
204
            // tablet schema unique ids: [1, 2, 5, 3, 6, 4], [1 2] is key columns
205
            // cluster key unique ids: [3, 1, 4]
206
            // the key_columns should be [0, 1, 3, 5]
207
            // the key_group_cluster_key_idxes should be [2, 1, 3]
208
463
            for (const auto& cid : tablet_schema.cluster_key_uids()) {
209
463
                auto idx = tablet_schema.field_index(cid);
210
2.69k
                for (auto i = 0; i < key_columns.size(); ++i) {
211
2.69k
                    if (idx == key_columns[i]) {
212
464
                        key_group_cluster_key_idxes->emplace_back(i);
213
464
                        break;
214
464
                    }
215
2.69k
                }
216
463
            }
217
156
        }
218
4.16k
    }
219
8.37k
    VLOG_NOTICE << "sequence_col_idx=" << sequence_col_idx
220
47
                << ", delete_sign_idx=" << delete_sign_idx;
221
    // for duplicate no keys
222
8.37k
    if (!key_columns.empty()) {
223
8.35k
        column_groups->emplace_back(key_columns);
224
8.35k
    }
225
226
8.37k
    std::vector<uint32_t> value_columns;
227
228
71.5k
    for (size_t i = num_key_cols; i < total_cols; ++i) {
229
63.2k
        if (i == sequence_col_idx || i == delete_sign_idx ||
230
63.2k
            key_columns.end() != std::find(key_columns.begin(), key_columns.end(), i)) {
231
4.47k
            continue;
232
4.47k
        }
233
234
58.7k
        if (!value_columns.empty() && value_columns.size() % num_columns_per_group == 0) {
235
6.88k
            column_groups->push_back(value_columns);
236
6.88k
            value_columns.clear();
237
6.88k
        }
238
58.7k
        value_columns.push_back(cast_set<uint32_t>(i));
239
58.7k
    }
240
241
8.37k
    if (!value_columns.empty()) {
242
7.81k
        column_groups->push_back(value_columns);
243
7.81k
    }
244
8.37k
}
245
246
Status Merger::vertical_compact_one_group(
247
        BaseTabletSPtr tablet, ReaderType reader_type, const TabletSchema& tablet_schema,
248
        bool is_key, const std::vector<uint32_t>& column_group, RowSourcesBuffer* row_source_buf,
249
        const std::vector<RowsetReaderSharedPtr>& src_rowset_readers,
250
        RowsetWriter* dst_rowset_writer, uint32_t max_rows_per_segment, Statistics* stats_output,
251
        std::vector<uint32_t> key_group_cluster_key_idxes, int64_t batch_size,
252
23.0k
        CompactionSampleInfo* sample_info, bool enable_sparse_optimization) {
253
    // build tablet reader
254
23.0k
    VLOG_NOTICE << "vertical compact one group, max_rows_per_segment=" << max_rows_per_segment;
255
23.0k
    VerticalBlockReader reader(row_source_buf);
256
23.0k
    TabletReader::ReaderParams reader_params;
257
23.0k
    reader_params.is_key_column_group = is_key;
258
23.0k
    reader_params.key_group_cluster_key_idxes = key_group_cluster_key_idxes;
259
23.0k
    reader_params.tablet = tablet;
260
23.0k
    reader_params.reader_type = reader_type;
261
23.0k
    reader_params.enable_sparse_optimization = enable_sparse_optimization;
262
263
23.0k
    TabletReadSource read_source;
264
23.0k
    read_source.rs_splits.reserve(src_rowset_readers.size());
265
175k
    for (const RowsetReaderSharedPtr& rs_reader : src_rowset_readers) {
266
175k
        read_source.rs_splits.emplace_back(rs_reader);
267
175k
    }
268
23.0k
    read_source.fill_delete_predicates();
269
23.0k
    reader_params.set_read_source(std::move(read_source));
270
271
23.0k
    reader_params.version = dst_rowset_writer->version();
272
273
23.0k
    TabletSchemaSPtr merge_tablet_schema = std::make_shared<TabletSchema>();
274
23.0k
    merge_tablet_schema->copy_from(tablet_schema);
275
276
23.0k
    for (auto& del_pred_rs : reader_params.delete_predicates) {
277
708
        merge_tablet_schema->merge_dropped_columns(*del_pred_rs->tablet_schema());
278
708
    }
279
280
23.0k
    reader_params.tablet_schema = merge_tablet_schema;
281
23.0k
    bool has_cluster_key = false;
282
23.0k
    if (!tablet->tablet_schema()->cluster_key_uids().empty()) {
283
441
        reader_params.delete_bitmap = tablet->tablet_meta()->delete_bitmap_ptr();
284
441
        has_cluster_key = true;
285
441
    }
286
287
23.0k
    if (is_key && stats_output && stats_output->rowid_conversion) {
288
4.41k
        reader_params.record_rowids = true;
289
4.41k
        reader_params.rowid_conversion = stats_output->rowid_conversion;
290
4.41k
        stats_output->rowid_conversion->set_dst_rowset_id(dst_rowset_writer->rowset_id());
291
4.41k
    }
292
293
23.0k
    reader_params.return_columns = column_group;
294
23.0k
    reader_params.origin_return_columns = &reader_params.return_columns;
295
23.0k
    reader_params.batch_size = batch_size;
296
23.0k
    RETURN_IF_ERROR(reader.init(reader_params, sample_info));
297
298
23.0k
    Block block = tablet_schema.create_block(reader_params.return_columns);
299
23.0k
    size_t output_rows = 0;
300
23.0k
    bool eof = false;
301
56.9k
    while (!eof && !ExecEnv::GetInstance()->storage_engine().stopped()) {
302
33.8k
        auto tablet_state = tablet->tablet_state();
303
33.8k
        if (tablet_state != TABLET_RUNNING && tablet_state != TABLET_NOTREADY) {
304
0
            tablet->clear_cache();
305
0
            return Status::Error<INTERNAL_ERROR>("tablet {} is not used any more",
306
0
                                                 tablet->tablet_id());
307
0
        }
308
        // Read one block from block reader
309
33.8k
        RETURN_NOT_OK_STATUS_WITH_WARN(reader.next_block_with_aggregation(&block, &eof),
310
33.8k
                                       "failed to read next block when merging rowsets of tablet " +
311
33.8k
                                               std::to_string(tablet->tablet_id()));
312
33.8k
        RETURN_NOT_OK_STATUS_WITH_WARN(
313
33.8k
                dst_rowset_writer->add_columns(&block, column_group, is_key, max_rows_per_segment,
314
33.8k
                                               has_cluster_key),
315
33.8k
                "failed to write block when merging rowsets of tablet " +
316
33.8k
                        std::to_string(tablet->tablet_id()));
317
318
33.8k
        if (is_key && reader_params.record_rowids && block.rows() > 0) {
319
4.69k
            std::vector<uint32_t> segment_num_rows;
320
4.69k
            RETURN_IF_ERROR(dst_rowset_writer->get_segment_num_rows(&segment_num_rows));
321
4.69k
            stats_output->rowid_conversion->add(reader.current_block_row_locations(),
322
4.69k
                                                segment_num_rows);
323
4.69k
        }
324
33.8k
        output_rows += block.rows();
325
33.8k
        block.clear_column_data();
326
33.8k
    }
327
23.0k
    if (ExecEnv::GetInstance()->storage_engine().stopped()) {
328
0
        return Status::Error<INTERNAL_ERROR>("tablet {} failed to do compaction, engine stopped",
329
0
                                             tablet->tablet_id());
330
0
    }
331
332
23.0k
    if (stats_output != nullptr) {
333
23.0k
        if (is_key) {
334
8.36k
            stats_output->output_rows = output_rows;
335
8.36k
            stats_output->merged_rows = reader.merged_rows();
336
8.36k
            stats_output->filtered_rows = reader.filtered_rows();
337
8.36k
        }
338
23.0k
        stats_output->bytes_read_from_local = reader.stats().file_cache_stats.bytes_read_from_local;
339
23.0k
        stats_output->bytes_read_from_remote =
340
23.0k
                reader.stats().file_cache_stats.bytes_read_from_remote;
341
23.0k
        stats_output->cached_bytes_total = reader.stats().file_cache_stats.bytes_write_into_cache;
342
23.0k
        if (config::is_cloud_mode()) {
343
20.0k
            stats_output->cloud_local_read_time =
344
20.0k
                    reader.stats().file_cache_stats.local_io_timer / 1000;
345
20.0k
            stats_output->cloud_remote_read_time =
346
20.0k
                    reader.stats().file_cache_stats.remote_io_timer / 1000;
347
20.0k
        }
348
23.0k
    }
349
23.0k
    RETURN_IF_ERROR(dst_rowset_writer->flush_columns(is_key));
350
351
23.0k
    return Status::OK();
352
23.0k
}
353
354
// for segcompaction
355
Status Merger::vertical_compact_one_group(
356
        int64_t tablet_id, ReaderType reader_type, const TabletSchema& tablet_schema, bool is_key,
357
        const std::vector<uint32_t>& column_group, RowSourcesBuffer* row_source_buf,
358
        VerticalBlockReader& src_block_reader, segment_v2::SegmentWriter& dst_segment_writer,
359
        Statistics* stats_output, uint64_t* index_size, KeyBoundsPB& key_bounds,
360
22
        SimpleRowIdConversion* rowid_conversion) {
361
    // TODO: record_rowids
362
22
    Block block = tablet_schema.create_block(column_group);
363
22
    size_t output_rows = 0;
364
22
    bool eof = false;
365
138
    while (!eof && !ExecEnv::GetInstance()->storage_engine().stopped()) {
366
        // Read one block from block reader
367
116
        RETURN_NOT_OK_STATUS_WITH_WARN(src_block_reader.next_block_with_aggregation(&block, &eof),
368
116
                                       "failed to read next block when merging rowsets of tablet " +
369
116
                                               std::to_string(tablet_id));
370
116
        if (!block.rows()) {
371
0
            break;
372
0
        }
373
116
        RETURN_NOT_OK_STATUS_WITH_WARN(dst_segment_writer.append_block(&block, 0, block.rows()),
374
116
                                       "failed to write block when merging rowsets of tablet " +
375
116
                                               std::to_string(tablet_id));
376
377
116
        if (is_key && rowid_conversion != nullptr) {
378
30
            rowid_conversion->add(src_block_reader.current_block_row_locations());
379
30
        }
380
116
        output_rows += block.rows();
381
116
        block.clear_column_data();
382
116
    }
383
22
    if (ExecEnv::GetInstance()->storage_engine().stopped()) {
384
0
        return Status::Error<INTERNAL_ERROR>("tablet {} failed to do compaction, engine stopped",
385
0
                                             tablet_id);
386
0
    }
387
388
22
    if (stats_output != nullptr) {
389
22
        if (is_key) {
390
11
            stats_output->output_rows = output_rows;
391
11
            stats_output->merged_rows = src_block_reader.merged_rows();
392
11
            stats_output->filtered_rows = src_block_reader.filtered_rows();
393
11
        }
394
22
        stats_output->bytes_read_from_local =
395
22
                src_block_reader.stats().file_cache_stats.bytes_read_from_local;
396
22
        stats_output->bytes_read_from_remote =
397
22
                src_block_reader.stats().file_cache_stats.bytes_read_from_remote;
398
22
        stats_output->cached_bytes_total =
399
22
                src_block_reader.stats().file_cache_stats.bytes_write_into_cache;
400
22
    }
401
402
    // segcompaction produce only one segment at once
403
22
    RETURN_IF_ERROR(dst_segment_writer.finalize_columns_data());
404
22
    RETURN_IF_ERROR(dst_segment_writer.finalize_columns_index(index_size));
405
406
22
    if (is_key) {
407
11
        Slice min_key = dst_segment_writer.min_encoded_key();
408
11
        Slice max_key = dst_segment_writer.max_encoded_key();
409
11
        DCHECK_LE(min_key.compare(max_key), 0);
410
11
        key_bounds.set_min_key(min_key.to_string());
411
11
        key_bounds.set_max_key(max_key.to_string());
412
11
    }
413
414
22
    return Status::OK();
415
22
}
416
417
int64_t estimate_batch_size(int group_index, BaseTabletSPtr tablet, int64_t way_cnt,
418
                            ReaderType reader_type, int64_t group_per_row_from_footer,
419
22.8k
                            bool footer_fallback) {
420
22.8k
    auto& sample_info_lock = tablet->get_sample_info_lock(reader_type);
421
22.8k
    auto& sample_infos = tablet->get_sample_infos(reader_type);
422
22.8k
    std::unique_lock<std::mutex> lock(sample_info_lock);
423
22.8k
    CompactionSampleInfo info = sample_infos[group_index];
424
22.8k
    if (way_cnt <= 0) {
425
6.98k
        LOG(INFO) << "estimate batch size for vertical compaction, tablet id: "
426
6.98k
                  << tablet->tablet_id() << " way cnt: " << way_cnt;
427
6.98k
        return 4096 - 32;
428
6.98k
    }
429
15.8k
    int64_t block_mem_limit = config::compaction_memory_bytes_limit / way_cnt;
430
15.8k
    if (tablet->last_compaction_status.is<ErrorCode::MEM_LIMIT_EXCEEDED>()) {
431
0
        block_mem_limit /= 4;
432
0
    }
433
434
15.8k
    int64_t group_data_size = 0;
435
15.8k
    if (info.group_data_size > 0 && info.bytes > 0 && info.rows > 0) {
436
0
        double smoothing_factor = 0.5;
437
0
        group_data_size =
438
0
                int64_t((cast_set<double>(info.group_data_size) * (1 - smoothing_factor)) +
439
0
                        (cast_set<double>(info.bytes / info.rows) * smoothing_factor));
440
0
        sample_infos[group_index].group_data_size = group_data_size;
441
15.8k
    } else if (info.group_data_size > 0 && (info.bytes <= 0 || info.rows <= 0)) {
442
0
        group_data_size = info.group_data_size;
443
15.8k
    } else if (info.group_data_size <= 0 && info.bytes > 0 && info.rows > 0) {
444
8.81k
        group_data_size = info.bytes / info.rows;
445
8.81k
        sample_infos[group_index].group_data_size = group_data_size;
446
8.81k
    } else {
447
        // No historical sampling data available.
448
        // Try to use raw_data_bytes from segment footer for a better estimate.
449
7.03k
        if (!footer_fallback && group_per_row_from_footer > 0) {
450
6.54k
            int64_t batch_size = block_mem_limit / group_per_row_from_footer;
451
6.54k
            int64_t res = std::max(std::min(batch_size, int64_t(4096 - 32)), int64_t(32L));
452
6.54k
            LOG(INFO) << "estimate batch size from footer for vertical compaction, tablet id: "
453
6.54k
                      << tablet->tablet_id()
454
6.54k
                      << " group_per_row_from_footer: " << group_per_row_from_footer
455
6.54k
                      << " way cnt: " << way_cnt << " batch size: " << res;
456
6.54k
            return res;
457
6.54k
        }
458
7.03k
        LOG(INFO) << "estimate batch size for vertical compaction, tablet id: "
459
488
                  << tablet->tablet_id() << " group data size: " << info.group_data_size
460
488
                  << " row num: " << info.rows << " consume bytes: " << info.bytes
461
488
                  << " footer_fallback: " << footer_fallback;
462
488
        return 1024 - 32;
463
7.03k
    }
464
465
8.81k
    if (group_data_size <= 0) {
466
0
        LOG(WARNING) << "estimate batch size for vertical compaction, tablet id: "
467
0
                     << tablet->tablet_id() << " unexpected group data size: " << group_data_size;
468
0
        return 4096 - 32;
469
0
    }
470
471
8.81k
    sample_infos[group_index].bytes = 0;
472
8.81k
    sample_infos[group_index].rows = 0;
473
474
8.81k
    int64_t batch_size = block_mem_limit / group_data_size;
475
8.81k
    int64_t res = std::max(std::min(batch_size, int64_t(4096 - 32)), int64_t(32L));
476
8.81k
    LOG(INFO) << "estimate batch size for vertical compaction, tablet id: " << tablet->tablet_id()
477
8.81k
              << " group data size: " << info.group_data_size << " row num: " << info.rows
478
8.81k
              << " consume bytes: " << info.bytes << " way cnt: " << way_cnt
479
8.81k
              << " batch size: " << res;
480
8.81k
    return res;
481
8.81k
}
482
483
// steps to do vertical merge:
484
// 1. split columns into column groups
485
// 2. compact groups one by one, generate a row_source_buf when compact key group
486
// and use this row_source_buf to compact value column groups
487
// 3. build output rowset
488
Status Merger::vertical_merge_rowsets(BaseTabletSPtr tablet, ReaderType reader_type,
489
                                      const TabletSchema& tablet_schema,
490
                                      const std::vector<RowsetReaderSharedPtr>& src_rowset_readers,
491
                                      RowsetWriter* dst_rowset_writer,
492
                                      uint32_t max_rows_per_segment, int64_t merge_way_num,
493
                                      Statistics* stats_output,
494
8.36k
                                      VerticalCompactionProgressCallback progress_cb) {
495
8.36k
    LOG(INFO) << "Start to do vertical compaction, tablet_id: " << tablet->tablet_id();
496
8.36k
    std::vector<std::vector<uint32_t>> column_groups;
497
8.36k
    std::vector<uint32_t> key_group_cluster_key_idxes;
498
    // If BE config vertical_compaction_num_columns_per_group has been modified from
499
    // its default value (5), use the BE config; otherwise use the tablet meta value.
500
8.36k
    constexpr int32_t default_num_columns_per_group = 5;
501
8.36k
    int32_t num_columns_per_group =
502
8.36k
            config::vertical_compaction_num_columns_per_group != default_num_columns_per_group
503
8.36k
                    ? config::vertical_compaction_num_columns_per_group
504
8.36k
                    : tablet->tablet_meta()->vertical_compaction_num_columns_per_group();
505
506
8.36k
    DBUG_EXECUTE_IF("Merger.vertical_merge_rowsets.check_num_columns_per_group", {
507
8.36k
        auto expected_value = DebugPoints::instance()->get_debug_param_or_default<int32_t>(
508
8.36k
                "Merger.vertical_merge_rowsets.check_num_columns_per_group", "expected_value", -1);
509
8.36k
        auto expected_tablet_id = DebugPoints::instance()->get_debug_param_or_default<int64_t>(
510
8.36k
                "Merger.vertical_merge_rowsets.check_num_columns_per_group", "tablet_id", -1);
511
8.36k
        if (expected_tablet_id != -1 && expected_tablet_id == tablet->tablet_id()) {
512
8.36k
            if (expected_value != -1 && expected_value != num_columns_per_group) {
513
8.36k
                LOG(FATAL) << "DEBUG_POINT CHECK FAILED: expected num_columns_per_group="
514
8.36k
                           << expected_value << " but got " << num_columns_per_group
515
8.36k
                           << " for tablet_id=" << tablet->tablet_id();
516
8.36k
            } else {
517
8.36k
                LOG(INFO) << "DEBUG_POINT CHECK PASSED: num_columns_per_group="
518
8.36k
                          << num_columns_per_group << ", tablet_id=" << tablet->tablet_id();
519
8.36k
            }
520
8.36k
        }
521
8.36k
    });
522
523
8.36k
    vertical_split_columns(tablet_schema, &column_groups, &key_group_cluster_key_idxes,
524
8.36k
                           num_columns_per_group);
525
526
8.36k
    if (progress_cb) {
527
8.26k
        progress_cb(column_groups.size(), 0);
528
8.26k
    }
529
530
    // Calculate total rows for density calculation after compaction
531
8.36k
    int64_t total_rows = 0;
532
63.4k
    for (const auto& rs_reader : src_rowset_readers) {
533
63.4k
        total_rows += rs_reader->rowset()->rowset_meta()->num_rows();
534
63.4k
    }
535
536
    // Use historical density for sparse wide table optimization
537
    // density = (total_cells - null_cells) / total_cells, smaller means more sparse
538
    // When density <= threshold, enable sparse optimization
539
    // threshold = 0 means disable, 1 means always enable (default)
540
8.36k
    bool enable_sparse_optimization = false;
541
8.36k
    if (config::sparse_column_compaction_threshold_percent > 0 &&
542
8.36k
        tablet->keys_type() == KeysType::UNIQUE_KEYS) {
543
4.16k
        double density = tablet->compaction_density.load();
544
4.16k
        enable_sparse_optimization = density <= config::sparse_column_compaction_threshold_percent;
545
546
4.16k
        LOG(INFO) << "Vertical compaction sparse optimization check: tablet_id="
547
4.16k
                  << tablet->tablet_id() << ", density=" << density
548
4.16k
                  << ", threshold=" << config::sparse_column_compaction_threshold_percent
549
4.16k
                  << ", total_rows=" << total_rows
550
4.16k
                  << ", num_columns=" << tablet_schema.num_columns()
551
4.16k
                  << ", total_cells=" << total_rows * tablet_schema.num_columns()
552
4.16k
                  << ", enable_sparse_optimization=" << enable_sparse_optimization;
553
4.16k
    }
554
555
8.36k
    RowSourcesBuffer row_sources_buf(tablet->tablet_id(), dst_rowset_writer->context().tablet_path,
556
8.36k
                                     reader_type);
557
8.36k
    Merger::Statistics total_stats;
558
8.36k
    if (stats_output != nullptr) {
559
8.36k
        total_stats.rowid_conversion = stats_output->rowid_conversion;
560
8.36k
    }
561
8.36k
    auto& sample_info_lock = tablet->get_sample_info_lock(reader_type);
562
8.36k
    auto& sample_infos = tablet->get_sample_infos(reader_type);
563
8.36k
    {
564
8.36k
        std::unique_lock<std::mutex> lock(sample_info_lock);
565
8.36k
        sample_infos.resize(column_groups.size());
566
8.36k
    }
567
    // Collect per-column raw_data_bytes from segment footer for first-time batch size estimation.
568
    // raw_data_bytes is the original data size before encoding, close to runtime Block::bytes().
569
    // Only collect when needed: skip if manual batch_size override is set, or if ALL groups
570
    // already have historical sampling data. Use per-group granularity so that schema evolution
571
    // (new groups without history) still gets footer-based estimation.
572
8.36k
    struct ColumnRawSizeInfo {
573
8.36k
        int64_t total_raw_bytes = 0;
574
8.36k
        int64_t rows_with_data = 0;
575
8.36k
    };
576
8.36k
    std::unordered_map<int32_t, ColumnRawSizeInfo> column_raw_sizes;
577
8.36k
    bool need_footer_collection = false;
578
8.36k
    if (config::compaction_batch_size == -1) {
579
8.32k
        std::unique_lock<std::mutex> lock(sample_info_lock);
580
15.6k
        for (const auto& info : sample_infos) {
581
15.6k
            if (info.group_data_size <= 0 && info.bytes <= 0 && info.rows <= 0) {
582
4.70k
                need_footer_collection = true;
583
4.70k
                break;
584
4.70k
            }
585
15.6k
        }
586
8.32k
    }
587
8.36k
    if (need_footer_collection) {
588
32.8k
        for (const auto& rs_reader : src_rowset_readers) {
589
32.8k
            auto beta_rowset = std::dynamic_pointer_cast<BetaRowset>(rs_reader->rowset());
590
32.8k
            if (!beta_rowset) {
591
0
                continue;
592
0
            }
593
32.8k
            std::vector<segment_v2::SegmentSharedPtr> segments;
594
32.8k
            auto st = beta_rowset->load_segments(&segments);
595
32.8k
            if (!st.ok()) {
596
0
                LOG(WARNING) << "Failed to load segments for footer raw_data_bytes collection"
597
0
                             << ", tablet_id: " << tablet->tablet_id()
598
0
                             << ", rowset_id: " << beta_rowset->rowset_id() << ", status: " << st;
599
0
                continue;
600
0
            }
601
32.8k
            for (const auto& segment : segments) {
602
12.0k
                int64_t row_count = segment->num_rows();
603
12.0k
                auto collect_st = segment->traverse_column_meta_pbs(
604
125k
                        [&](const segment_v2::ColumnMetaPB& meta) {
605
125k
                            int32_t uid = meta.unique_id();
606
125k
                            if (uid >= 0 && meta.has_raw_data_bytes()) {
607
117k
                                auto& info = column_raw_sizes[uid];
608
117k
                                info.total_raw_bytes += meta.raw_data_bytes();
609
117k
                                info.rows_with_data += row_count;
610
117k
                            }
611
125k
                        });
612
12.0k
                if (!collect_st.ok()) {
613
0
                    LOG(WARNING) << "Failed to traverse column meta for footer collection"
614
0
                                 << ", tablet_id: " << tablet->tablet_id()
615
0
                                 << ", status: " << collect_st;
616
0
                }
617
12.0k
            }
618
32.8k
        }
619
4.70k
    }
620
621
    // Pre-compute per-row estimate for each column group from footer data.
622
8.36k
    std::vector<int64_t> group_per_row_from_footer(column_groups.size(), 0);
623
8.36k
    std::vector<bool> group_footer_fallback(column_groups.size(), false);
624
31.3k
    for (size_t i = 0; i < column_groups.size(); ++i) {
625
23.0k
        int64_t group_per_row = 0;
626
23.0k
        bool need_fallback = false;
627
40.5k
        for (uint32_t col_ordinal : column_groups[i]) {
628
40.5k
            const auto& col = tablet_schema.column(col_ordinal);
629
40.5k
            int32_t uid = col.unique_id();
630
631
            // Variant columns (root or subcolumn): raw_data_bytes is 0 (TODO in writer),
632
            // cannot estimate from footer, fallback to default for the entire group.
633
40.5k
            if (uid < 0 || col.is_variant_type()) {
634
1.07k
                need_fallback = true;
635
1.07k
                break;
636
1.07k
            }
637
638
            // Any column without footer data (e.g. legacy segments written before
639
            // raw_data_bytes existed) makes the group sample partial and unreliable.
640
            // Fall back to the default for the whole group instead of summing only
641
            // the columns we measured.
642
39.4k
            auto it = column_raw_sizes.find(uid);
643
39.4k
            if (it == column_raw_sizes.end() || it->second.rows_with_data <= 0) {
644
15.4k
                need_fallback = true;
645
15.4k
                break;
646
15.4k
            }
647
648
24.0k
            int64_t raw_per_row = it->second.total_raw_bytes / it->second.rows_with_data;
649
24.0k
            int64_t col_per_row = 0;
650
651
24.0k
            if (col.type() == FieldType::OLAP_FIELD_TYPE_ARRAY ||
652
24.0k
                col.type() == FieldType::OLAP_FIELD_TYPE_MAP ||
653
24.0k
                col.type() == FieldType::OLAP_FIELD_TYPE_STRUCT) {
654
                // Complex types: raw_data_bytes recursively aggregates sub-writers.
655
1.16k
                col_per_row = raw_per_row;
656
22.8k
            } else if (col.is_length_variable_type()) {
657
                // Variable-length scalar (VARCHAR/STRING/HLL/BITMAP/...): raw_per_row
658
                // is the average char payload across all rows; reader still pays an
659
                // 8-byte offset entry per row regardless of null-ness.
660
8.75k
                col_per_row = raw_per_row + 8;
661
8.75k
                if (col.is_nullable()) {
662
4.51k
                    col_per_row += 1; // null map
663
4.51k
                }
664
14.1k
            } else {
665
                // Fixed-width scalar (INT/BIGINT/DOUBLE/DATE/...).
666
                // raw_data_bytes only counts non-null payload (append_nulls() does
667
                // not advance the page builder), but FileColumnIterator::next_batch
668
                // still calls ColumnNullable::insert_many_defaults() for null runs,
669
                // which grows the nested PODArray by N * type_size. So the runtime
670
                // per-row footprint is at least type_size, no matter how sparse.
671
14.1k
                int64_t type_size = get_type_info(&col)->size();
672
14.1k
                col_per_row = std::max(raw_per_row, type_size);
673
14.1k
                if (col.is_nullable()) {
674
8.59k
                    col_per_row += 1; // null map
675
8.59k
                }
676
14.1k
            }
677
678
24.0k
            group_per_row += col_per_row;
679
24.0k
        }
680
23.0k
        group_per_row_from_footer[i] = group_per_row;
681
23.0k
        group_footer_fallback[i] = need_fallback;
682
23.0k
    }
683
684
    // compact group one by one
685
31.4k
    for (auto i = 0; i < column_groups.size(); ++i) {
686
23.0k
        VLOG_NOTICE << "row source size: " << row_sources_buf.total_size();
687
23.0k
        bool is_key = (i == 0);
688
23.0k
        int64_t batch_size = config::compaction_batch_size != -1
689
23.0k
                                     ? config::compaction_batch_size
690
23.0k
                                     : estimate_batch_size(i, tablet, merge_way_num, reader_type,
691
22.8k
                                                           group_per_row_from_footer[i],
692
22.8k
                                                           group_footer_fallback[i]);
693
23.0k
        CompactionSampleInfo sample_info;
694
23.0k
        Merger::Statistics group_stats;
695
23.0k
        group_stats.rowid_conversion = total_stats.rowid_conversion;
696
18.4E
        Merger::Statistics* group_stats_ptr = stats_output != nullptr ? &group_stats : nullptr;
697
23.0k
        Status st = vertical_compact_one_group(
698
23.0k
                tablet, reader_type, tablet_schema, is_key, column_groups[i], &row_sources_buf,
699
23.0k
                src_rowset_readers, dst_rowset_writer, max_rows_per_segment, group_stats_ptr,
700
23.0k
                key_group_cluster_key_idxes, batch_size, &sample_info, enable_sparse_optimization);
701
23.0k
        {
702
23.0k
            std::unique_lock<std::mutex> lock(sample_info_lock);
703
23.0k
            sample_infos[i] = sample_info;
704
23.0k
        }
705
23.0k
        RETURN_IF_ERROR(st);
706
23.0k
        if (stats_output != nullptr) {
707
23.0k
            total_stats.bytes_read_from_local += group_stats.bytes_read_from_local;
708
23.0k
            total_stats.bytes_read_from_remote += group_stats.bytes_read_from_remote;
709
23.0k
            total_stats.cached_bytes_total += group_stats.cached_bytes_total;
710
23.0k
            total_stats.cloud_local_read_time += group_stats.cloud_local_read_time;
711
23.0k
            total_stats.cloud_remote_read_time += group_stats.cloud_remote_read_time;
712
23.0k
            if (is_key) {
713
8.35k
                total_stats.output_rows = group_stats.output_rows;
714
8.35k
                total_stats.merged_rows = group_stats.merged_rows;
715
8.35k
                total_stats.filtered_rows = group_stats.filtered_rows;
716
8.35k
                total_stats.rowid_conversion = group_stats.rowid_conversion;
717
8.35k
            }
718
23.0k
        }
719
23.0k
        if (progress_cb) {
720
22.7k
            progress_cb(column_groups.size(), i + 1);
721
22.7k
        }
722
23.0k
        if (is_key) {
723
8.37k
            RETURN_IF_ERROR(row_sources_buf.flush());
724
8.37k
        }
725
23.0k
        RETURN_IF_ERROR(row_sources_buf.seek_to_begin());
726
23.0k
    }
727
728
    // Calculate and store density for next compaction's sparse optimization threshold
729
    // density = (total_cells - total_null_count) / total_cells
730
    // Smaller density means more sparse
731
8.36k
    {
732
8.36k
        std::unique_lock<std::mutex> lock(sample_info_lock);
733
8.36k
        int64_t total_null_count = 0;
734
23.0k
        for (const auto& info : sample_infos) {
735
23.0k
            total_null_count += info.null_count;
736
23.0k
        }
737
8.36k
        int64_t total_cells = total_rows * tablet_schema.num_columns();
738
8.36k
        if (total_cells > 0) {
739
5.48k
            double density = static_cast<double>(total_cells - total_null_count) /
740
5.48k
                             static_cast<double>(total_cells);
741
5.48k
            tablet->compaction_density.store(density);
742
5.48k
            LOG(INFO) << "Vertical compaction density update: tablet_id=" << tablet->tablet_id()
743
5.48k
                      << ", total_cells=" << total_cells
744
5.48k
                      << ", total_null_count=" << total_null_count << ", density=" << density;
745
5.48k
        }
746
8.36k
    }
747
748
    // finish compact, build output rowset
749
8.36k
    VLOG_NOTICE << "finish compact groups";
750
8.36k
    RETURN_IF_ERROR(dst_rowset_writer->final_flush());
751
752
8.36k
    if (stats_output != nullptr) {
753
8.36k
        *stats_output = total_stats;
754
8.36k
    }
755
756
8.36k
    return Status::OK();
757
8.36k
}
758
} // namespace doris