Coverage Report

Created: 2026-05-09 09:50

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/storage/compaction/compaction.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "storage/compaction/compaction.h"
19
20
#include <fmt/format.h>
21
#include <gen_cpp/olap_file.pb.h>
22
#include <glog/logging.h>
23
24
#include <algorithm>
25
#include <atomic>
26
#include <cstdint>
27
#include <cstdlib>
28
#include <list>
29
#include <map>
30
#include <memory>
31
#include <mutex>
32
#include <nlohmann/json.hpp>
33
#include <numeric>
34
#include <ostream>
35
#include <set>
36
#include <shared_mutex>
37
#include <utility>
38
39
#include "cloud/cloud_meta_mgr.h"
40
#include "cloud/cloud_storage_engine.h"
41
#include "cloud/cloud_tablet.h"
42
#include "cloud/pb_convert.h"
43
#include "common/config.h"
44
#include "common/metrics/doris_metrics.h"
45
#include "common/status.h"
46
#include "cpp/sync_point.h"
47
#include "exec/common/variant_util.h"
48
#include "io/cache/block_file_cache_factory.h"
49
#include "io/fs/file_system.h"
50
#include "io/fs/file_writer.h"
51
#include "io/fs/remote_file_system.h"
52
#include "io/io_common.h"
53
#include "runtime/memory/mem_tracker_limiter.h"
54
#include "runtime/thread_context.h"
55
#include "storage/compaction/collection_statistics.h"
56
#include "storage/compaction/cumulative_compaction.h"
57
#include "storage/compaction/cumulative_compaction_policy.h"
58
#include "storage/compaction/cumulative_compaction_time_series_policy.h"
59
#include "storage/compaction_task_tracker.h"
60
#include "storage/data_dir.h"
61
#include "storage/index/index_file_reader.h"
62
#include "storage/index/index_file_writer.h"
63
#include "storage/index/inverted/inverted_index_compaction.h"
64
#include "storage/index/inverted/inverted_index_desc.h"
65
#include "storage/index/inverted/inverted_index_fs_directory.h"
66
#include "storage/olap_common.h"
67
#include "storage/olap_define.h"
68
#include "storage/rowset/beta_rowset.h"
69
#include "storage/rowset/beta_rowset_reader.h"
70
#include "storage/rowset/beta_rowset_writer.h"
71
#include "storage/rowset/rowset.h"
72
#include "storage/rowset/rowset_fwd.h"
73
#include "storage/rowset/rowset_meta.h"
74
#include "storage/rowset/rowset_writer.h"
75
#include "storage/rowset/rowset_writer_context.h"
76
#include "storage/storage_engine.h"
77
#include "storage/storage_policy.h"
78
#include "storage/tablet/tablet.h"
79
#include "storage/tablet/tablet_meta.h"
80
#include "storage/tablet/tablet_meta_manager.h"
81
#include "storage/task/engine_checksum_task.h"
82
#include "storage/txn/txn_manager.h"
83
#include "storage/utils.h"
84
#include "util/pretty_printer.h"
85
#include "util/time.h"
86
#include "util/trace.h"
87
88
using std::vector;
89
90
namespace doris {
91
using namespace ErrorCode;
92
93
// Determine whether to enable index-only file cache mode for compaction output.
94
// This function decides if only index files should be written to cache, based on:
95
// - write_file_cache: whether file cache is enabled
96
// - compaction_type: type of compaction (base or cumulative)
97
// - enable_base_index_only: config flag for base compaction
98
// - enable_cumu_index_only: config flag for cumulative compaction
99
// Returns true if index-only mode should be enabled, false otherwise.
100
bool should_enable_compaction_cache_index_only(bool write_file_cache, ReaderType compaction_type,
101
                                               bool enable_base_index_only,
102
6.98k
                                               bool enable_cumu_index_only) {
103
6.98k
    if (!write_file_cache) {
104
120
        return false;
105
120
    }
106
107
6.86k
    if (compaction_type == ReaderType::READER_BASE_COMPACTION && enable_base_index_only) {
108
2
        return true;
109
2
    }
110
111
6.86k
    if (compaction_type == ReaderType::READER_CUMULATIVE_COMPACTION && enable_cumu_index_only) {
112
2
        return true;
113
2
    }
114
115
6.86k
    return false;
116
6.86k
}
117
118
namespace {
119
120
bool is_rowset_tidy(std::string& pre_max_key, bool& pre_rs_key_bounds_truncated,
121
3.81k
                    const RowsetSharedPtr& rhs) {
122
3.81k
    size_t min_tidy_size = config::ordered_data_compaction_min_segment_size;
123
3.81k
    if (rhs->num_segments() == 0) {
124
3.44k
        return true;
125
3.44k
    }
126
372
    if (rhs->is_segments_overlapping()) {
127
0
        return false;
128
0
    }
129
    // check segment size
130
372
    auto* beta_rowset = reinterpret_cast<BetaRowset*>(rhs.get());
131
372
    std::vector<size_t> segments_size;
132
372
    RETURN_FALSE_IF_ERROR(beta_rowset->get_segments_size(&segments_size));
133
377
    for (auto segment_size : segments_size) {
134
        // is segment is too small, need to do compaction
135
377
        if (segment_size < min_tidy_size) {
136
333
            return false;
137
333
        }
138
377
    }
139
38
    std::string min_key;
140
38
    auto ret = rhs->first_key(&min_key);
141
38
    if (!ret) {
142
0
        return false;
143
0
    }
144
38
    bool cur_rs_key_bounds_truncated {rhs->is_segments_key_bounds_truncated()};
145
38
    if (!Slice::lhs_is_strictly_less_than_rhs(Slice {pre_max_key}, pre_rs_key_bounds_truncated,
146
38
                                              Slice {min_key}, cur_rs_key_bounds_truncated)) {
147
5
        return false;
148
5
    }
149
38
    CHECK(rhs->last_key(&pre_max_key));
150
33
    pre_rs_key_bounds_truncated = cur_rs_key_bounds_truncated;
151
33
    return true;
152
38
}
153
154
} // namespace
155
156
Compaction::Compaction(BaseTabletSPtr tablet, const std::string& label)
157
314k
        : _compaction_id(CompactionTaskTracker::instance()->next_compaction_id()),
158
          _mem_tracker(
159
314k
                  MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::COMPACTION, label)),
160
314k
          _tablet(std::move(tablet)),
161
314k
          _is_vertical(config::enable_vertical_compaction),
162
314k
          _allow_delete_in_cumu_compaction(config::enable_delete_when_cumu_compaction),
163
          _enable_vertical_compact_variant_subcolumns(
164
314k
                  config::enable_vertical_compact_variant_subcolumns),
165
314k
          _enable_inverted_index_compaction(config::inverted_index_compaction_enable) {
166
314k
    init_profile(label);
167
314k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_mem_tracker);
168
314k
    _rowid_conversion = std::make_unique<RowIdConversion>();
169
314k
}
170
171
314k
Compaction::~Compaction() {
172
314k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_mem_tracker);
173
314k
    _output_rs_writer.reset();
174
314k
    _tablet.reset();
175
314k
    _input_rowsets.clear();
176
314k
    _output_rowset.reset();
177
314k
    _cur_tablet_schema.reset();
178
314k
    _rowid_conversion.reset();
179
314k
}
180
181
15.4k
std::string Compaction::input_version_range_str() const {
182
15.4k
    if (_input_rowsets.empty()) return "";
183
15.4k
    return fmt::format("[{}-{}]", _input_rowsets.front()->start_version(),
184
15.4k
                       _input_rowsets.back()->end_version());
185
15.4k
}
186
187
void Compaction::submit_profile_record(bool success, int64_t start_time_ms,
188
8.39k
                                       const std::string& status_msg) {
189
8.39k
    if (!profile_type().has_value()) {
190
846
        return;
191
846
    }
192
7.54k
    auto* tracker = CompactionTaskTracker::instance();
193
7.54k
    CompletionStats stats;
194
    // Input stats for backfill: local compaction fills these in build_basic_info()
195
    // which runs inside execute_compact_impl(), so they are available now.
196
7.54k
    stats.input_version_range = input_version_range_str();
197
7.54k
    stats.input_rowsets_count = static_cast<int64_t>(_input_rowsets.size());
198
7.54k
    stats.input_row_num = _input_row_num;
199
7.54k
    stats.input_data_size = _input_rowsets_data_size;
200
7.54k
    stats.input_index_size = _input_rowsets_index_size;
201
7.54k
    stats.input_total_size = _input_rowsets_total_size;
202
7.54k
    stats.input_segments_num = input_segments_num_value();
203
7.54k
    stats.end_time_ms = UnixMillis();
204
7.54k
    stats.merged_rows = _stats.merged_rows;
205
7.54k
    stats.filtered_rows = _stats.filtered_rows;
206
7.54k
    stats.output_rows = _stats.output_rows;
207
7.54k
    if (_output_rowset) {
208
7.54k
        stats.output_row_num = _output_rowset->num_rows();
209
7.54k
        stats.output_data_size = _output_rowset->data_disk_size();
210
7.54k
        stats.output_index_size = _output_rowset->index_disk_size();
211
7.54k
        stats.output_total_size = _output_rowset->total_disk_size();
212
7.54k
        stats.output_segments_num = _output_rowset->num_segments();
213
7.54k
    }
214
7.54k
    stats.output_version = _output_version.to_string();
215
7.55k
    if (_merge_rowsets_latency_timer) {
216
7.55k
        stats.merge_latency_ms = _merge_rowsets_latency_timer->value() / 1000000;
217
7.55k
    }
218
7.54k
    stats.bytes_read_from_local = _stats.bytes_read_from_local;
219
7.54k
    stats.bytes_read_from_remote = _stats.bytes_read_from_remote;
220
7.55k
    if (_mem_tracker) {
221
7.55k
        stats.peak_memory_bytes = _mem_tracker->peak_consumption();
222
7.55k
    }
223
7.54k
    if (success) {
224
7.50k
        tracker->complete(_compaction_id, stats);
225
7.50k
    } else {
226
38
        tracker->fail(_compaction_id, stats, status_msg);
227
38
    }
228
7.54k
}
229
230
314k
void Compaction::init_profile(const std::string& label) {
231
314k
    _profile = std::make_unique<RuntimeProfile>(label);
232
233
314k
    _input_rowsets_data_size_counter =
234
314k
            ADD_COUNTER(_profile, "input_rowsets_data_size", TUnit::BYTES);
235
314k
    _input_rowsets_counter = ADD_COUNTER(_profile, "input_rowsets_count", TUnit::UNIT);
236
314k
    _input_row_num_counter = ADD_COUNTER(_profile, "input_row_num", TUnit::UNIT);
237
314k
    _input_segments_num_counter = ADD_COUNTER(_profile, "input_segments_num", TUnit::UNIT);
238
314k
    _merged_rows_counter = ADD_COUNTER(_profile, "merged_rows", TUnit::UNIT);
239
314k
    _filtered_rows_counter = ADD_COUNTER(_profile, "filtered_rows", TUnit::UNIT);
240
314k
    _output_rowset_data_size_counter =
241
314k
            ADD_COUNTER(_profile, "output_rowset_data_size", TUnit::BYTES);
242
314k
    _output_row_num_counter = ADD_COUNTER(_profile, "output_row_num", TUnit::UNIT);
243
314k
    _output_segments_num_counter = ADD_COUNTER(_profile, "output_segments_num", TUnit::UNIT);
244
314k
    _merge_rowsets_latency_timer = ADD_TIMER(_profile, "merge_rowsets_latency");
245
314k
}
246
247
7.86k
int64_t Compaction::merge_way_num() {
248
7.86k
    int64_t way_num = 0;
249
61.6k
    for (auto&& rowset : _input_rowsets) {
250
61.6k
        way_num += rowset->rowset_meta()->get_merge_way_num();
251
61.6k
    }
252
253
7.86k
    return way_num;
254
7.86k
}
255
256
7.90k
Status Compaction::merge_input_rowsets() {
257
7.90k
    std::vector<RowsetReaderSharedPtr> input_rs_readers;
258
7.90k
    input_rs_readers.reserve(_input_rowsets.size());
259
61.8k
    for (auto& rowset : _input_rowsets) {
260
61.8k
        RowsetReaderSharedPtr rs_reader;
261
61.8k
        RETURN_IF_ERROR(rowset->create_reader(&rs_reader));
262
61.8k
        input_rs_readers.push_back(std::move(rs_reader));
263
61.8k
    }
264
265
7.90k
    RowsetWriterContext ctx;
266
    // Propagate input rowset readers into the rowset writer context before the writer is created.
267
    // Variant nested-group compaction uses this metadata to enable the streaming writer path.
268
7.90k
    ctx.input_rs_readers = input_rs_readers;
269
7.90k
    RETURN_IF_ERROR(construct_output_rowset_writer(ctx));
270
271
    // write merged rows to output rowset
272
    // The test results show that merger is low-memory-footprint, there is no need to tracker its mem pool
273
    // if ctx.columns_to_do_index_compaction.size() > 0, it means we need to do inverted index compaction.
274
    // the row ID conversion matrix needs to be used for inverted index compaction.
275
7.90k
    if (!ctx.columns_to_do_index_compaction.empty() ||
276
7.90k
        (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
277
7.50k
         _tablet->enable_unique_key_merge_on_write())) {
278
4.20k
        _stats.rowid_conversion = _rowid_conversion.get();
279
4.20k
    }
280
281
7.90k
    int64_t way_num = merge_way_num();
282
283
7.90k
    Status res;
284
7.90k
    {
285
7.90k
        SCOPED_TIMER(_merge_rowsets_latency_timer);
286
        // 1. Merge segment files and write bkd inverted index
287
        // TODO implement vertical compaction for seq map
288
7.90k
        if (_is_vertical && !_tablet->tablet_schema()->has_seq_map()) {
289
7.90k
            if (!_tablet->tablet_schema()->cluster_key_uids().empty()) {
290
157
                RETURN_IF_ERROR(update_delete_bitmap());
291
157
            }
292
7.90k
            auto progress_cb = [compaction_id = this->_compaction_id](int64_t total,
293
29.5k
                                                                      int64_t completed) {
294
29.5k
                CompactionTaskTracker::instance()->update_progress(compaction_id, total, completed);
295
29.5k
            };
296
7.90k
            res = Merger::vertical_merge_rowsets(_tablet, compaction_type(), *_cur_tablet_schema,
297
7.90k
                                                 input_rs_readers, _output_rs_writer.get(),
298
7.90k
                                                 cast_set<uint32_t>(get_avg_segment_rows()),
299
7.90k
                                                 way_num, &_stats, progress_cb);
300
7.90k
        } else {
301
5
            if (!_tablet->tablet_schema()->cluster_key_uids().empty()) {
302
0
                return Status::InternalError(
303
0
                        "mow table with cluster keys does not support non vertical compaction");
304
0
            }
305
5
            res = Merger::vmerge_rowsets(_tablet, compaction_type(), *_cur_tablet_schema,
306
5
                                         input_rs_readers, _output_rs_writer.get(), &_stats);
307
5
        }
308
309
7.90k
        _tablet->last_compaction_status = res;
310
7.90k
        if (!res.ok()) {
311
0
            return res;
312
0
        }
313
        // 2. Merge the remaining inverted index files of the string type
314
7.90k
        RETURN_IF_ERROR(do_inverted_index_compaction());
315
7.90k
    }
316
317
7.90k
    COUNTER_UPDATE(_merged_rows_counter, _stats.merged_rows);
318
7.90k
    COUNTER_UPDATE(_filtered_rows_counter, _stats.filtered_rows);
319
320
    // 3. In the `build`, `_close_file_writers` is called to close the inverted index file writer and write the final compound index file.
321
7.90k
    RETURN_NOT_OK_STATUS_WITH_WARN(_output_rs_writer->build(_output_rowset),
322
7.90k
                                   fmt::format("rowset writer build failed. output_version: {}",
323
7.90k
                                               _output_version.to_string()));
324
325
    // When true, writers should remove variant extracted subcolumns from the
326
    // schema stored in RowsetMeta. This is used when compaction temporarily
327
    // extends schema to split variant subcolumns for vertical compaction but
328
    // the final rowset meta must not persist those extracted subcolumns.
329
7.90k
    if (_enable_vertical_compact_variant_subcolumns &&
330
7.90k
        (_cur_tablet_schema->num_variant_columns() > 0)) {
331
524
        _output_rowset->rowset_meta()->set_tablet_schema(
332
524
                _cur_tablet_schema->copy_without_variant_extracted_columns());
333
524
    }
334
335
    //RETURN_IF_ERROR(_engine.meta_mgr().commit_rowset(*_output_rowset->rowset_meta().get()));
336
7.90k
    set_delete_predicate_for_output_rowset();
337
338
7.90k
    _local_read_bytes_total = _stats.bytes_read_from_local;
339
7.90k
    _remote_read_bytes_total = _stats.bytes_read_from_remote;
340
7.90k
    DorisMetrics::instance()->local_compaction_read_bytes_total->increment(_local_read_bytes_total);
341
7.90k
    DorisMetrics::instance()->remote_compaction_read_bytes_total->increment(
342
7.90k
            _remote_read_bytes_total);
343
7.90k
    DorisMetrics::instance()->local_compaction_write_bytes_total->increment(
344
7.90k
            _stats.cached_bytes_total);
345
346
7.90k
    COUNTER_UPDATE(_output_rowset_data_size_counter, _output_rowset->data_disk_size());
347
7.90k
    COUNTER_UPDATE(_output_row_num_counter, _output_rowset->num_rows());
348
7.90k
    COUNTER_UPDATE(_output_segments_num_counter, _output_rowset->num_segments());
349
350
7.90k
    return check_correctness();
351
7.90k
}
352
353
7.89k
void Compaction::set_delete_predicate_for_output_rowset() {
354
    // Now we support delete in cumu compaction, to make all data in rowsets whose version
355
    // is below output_version to be delete in the future base compaction, we should carry
356
    // all delete predicate in the output rowset.
357
    // Output start version > 2 means we must set the delete predicate in the output rowset
358
7.89k
    if (_output_rowset->version().first > 2 &&
359
7.89k
        (_allow_delete_in_cumu_compaction || is_index_change_compaction())) {
360
505
        DeletePredicatePB delete_predicate;
361
505
        std::accumulate(_input_rowsets.begin(), _input_rowsets.end(), &delete_predicate,
362
505
                        [](DeletePredicatePB* delete_predicate, const RowsetSharedPtr& rs) {
363
503
                            if (rs->rowset_meta()->has_delete_predicate()) {
364
3
                                delete_predicate->MergeFrom(rs->rowset_meta()->delete_predicate());
365
3
                            }
366
503
                            return delete_predicate;
367
503
                        });
368
        // now version in delete_predicate is deprecated
369
505
        if (!delete_predicate.in_predicates().empty() ||
370
505
            !delete_predicate.sub_predicates_v2().empty() ||
371
505
            !delete_predicate.sub_predicates().empty()) {
372
3
            _output_rowset->rowset_meta()->set_delete_predicate(std::move(delete_predicate));
373
3
        }
374
505
    }
375
7.89k
}
376
377
7.83k
int64_t Compaction::get_avg_segment_rows() {
378
    // take care of empty rowset
379
    // input_rowsets_size is total disk_size of input_rowset, this size is the
380
    // final size after codec and compress, so expect dest segment file size
381
    // in disk is config::vertical_compaction_max_segment_size
382
7.83k
    const auto& meta = _tablet->tablet_meta();
383
7.83k
    if (meta->compaction_policy() == CUMULATIVE_TIME_SERIES_POLICY) {
384
5
        int64_t compaction_goal_size_mbytes = meta->time_series_compaction_goal_size_mbytes();
385
        // The output segment rows should be less than total input rows
386
5
        return std::min((compaction_goal_size_mbytes * 1024 * 1024 * 2) /
387
5
                                (_input_rowsets_data_size / (_input_row_num + 1) + 1),
388
5
                        _input_row_num + 1);
389
5
    }
390
7.83k
    return std::min(config::vertical_compaction_max_segment_size /
391
7.83k
                            (_input_rowsets_data_size / (_input_row_num + 1) + 1),
392
7.83k
                    _input_row_num + 1);
393
7.83k
}
394
395
CompactionMixin::CompactionMixin(StorageEngine& engine, TabletSharedPtr tablet,
396
                                 const std::string& label)
397
205k
        : Compaction(tablet, label), _engine(engine) {}
398
399
205k
CompactionMixin::~CompactionMixin() {
400
205k
    if (_state != CompactionState::SUCCESS && _output_rowset != nullptr) {
401
6
        if (!_output_rowset->is_local()) {
402
0
            tablet()->record_unused_remote_rowset(_output_rowset->rowset_id(),
403
0
                                                  _output_rowset->rowset_meta()->resource_id(),
404
0
                                                  _output_rowset->num_segments());
405
0
            return;
406
0
        }
407
6
        _engine.add_unused_rowset(_output_rowset);
408
6
    }
409
205k
}
410
411
1.84M
Tablet* CompactionMixin::tablet() {
412
1.84M
    return static_cast<Tablet*>(_tablet.get());
413
1.84M
}
414
415
508
Status CompactionMixin::do_compact_ordered_rowsets() {
416
508
    RETURN_IF_ERROR(build_basic_info(true));
417
508
    RowsetWriterContext ctx;
418
508
    RETURN_IF_ERROR(construct_output_rowset_writer(ctx));
419
420
508
    LOG(INFO) << "start to do ordered data compaction, tablet=" << _tablet->tablet_id()
421
508
              << ", output_version=" << _output_version;
422
    // link data to new rowset
423
508
    auto seg_id = 0;
424
508
    bool segments_key_bounds_truncated {false};
425
508
    bool any_input_aggregated {false};
426
508
    std::vector<KeyBoundsPB> segment_key_bounds;
427
508
    std::vector<uint32_t> num_segment_rows;
428
3.14k
    for (auto rowset : _input_rowsets) {
429
3.14k
        RETURN_IF_ERROR(rowset->link_files_to(tablet()->tablet_path(),
430
3.14k
                                              _output_rs_writer->rowset_id(), seg_id));
431
3.14k
        seg_id += rowset->num_segments();
432
3.14k
        segments_key_bounds_truncated |= rowset->is_segments_key_bounds_truncated();
433
3.14k
        any_input_aggregated |= rowset->rowset_meta()->is_segments_key_bounds_aggregated();
434
3.14k
        std::vector<KeyBoundsPB> key_bounds;
435
3.14k
        RETURN_IF_ERROR(rowset->get_segments_key_bounds(&key_bounds));
436
3.14k
        segment_key_bounds.insert(segment_key_bounds.end(), key_bounds.begin(), key_bounds.end());
437
3.14k
        std::vector<uint32_t> input_segment_rows;
438
3.14k
        rowset->get_num_segment_rows(&input_segment_rows);
439
3.14k
        num_segment_rows.insert(num_segment_rows.end(), input_segment_rows.begin(),
440
3.14k
                                input_segment_rows.end());
441
3.14k
    }
442
    // build output rowset
443
508
    RowsetMetaSharedPtr rowset_meta = std::make_shared<RowsetMeta>();
444
508
    rowset_meta->set_num_rows(_input_row_num);
445
508
    rowset_meta->set_total_disk_size(_input_rowsets_data_size + _input_rowsets_index_size);
446
508
    rowset_meta->set_data_disk_size(_input_rowsets_data_size);
447
508
    rowset_meta->set_index_disk_size(_input_rowsets_index_size);
448
508
    rowset_meta->set_empty(_input_row_num == 0);
449
508
    rowset_meta->set_num_segments(_input_num_segments);
450
508
    rowset_meta->set_segments_overlap(NONOVERLAPPING);
451
508
    rowset_meta->set_rowset_state(VISIBLE);
452
508
    rowset_meta->set_segments_key_bounds_truncated(segments_key_bounds_truncated);
453
    // If any input was already aggregated we have no way to recover per-segment
454
    // bounds, so force aggregation on the output to keep the layout consistent
455
    // with `num_segments` / the aggregated flag, even if the config is off now.
456
508
    bool aggregate_key_bounds =
457
508
            any_input_aggregated || (config::enable_aggregate_non_mow_key_bounds &&
458
506
                                     !_tablet->enable_unique_key_merge_on_write());
459
508
    rowset_meta->set_segments_key_bounds(segment_key_bounds, aggregate_key_bounds);
460
508
    rowset_meta->set_num_segment_rows(num_segment_rows);
461
462
508
    _output_rowset = _output_rs_writer->manual_build(rowset_meta);
463
464
    // 2. check variant column path stats
465
508
    RETURN_IF_ERROR(variant_util::VariantCompactionUtil::check_path_stats(_input_rowsets,
466
508
                                                                          _output_rowset, _tablet));
467
508
    return Status::OK();
468
508
}
469
470
1.43k
Status CompactionMixin::build_basic_info(bool is_ordered_compaction) {
471
10.0k
    for (auto& rowset : _input_rowsets) {
472
10.0k
        const auto& rowset_meta = rowset->rowset_meta();
473
10.0k
        auto index_size = rowset_meta->index_disk_size();
474
10.0k
        auto total_size = rowset_meta->total_disk_size();
475
10.0k
        auto data_size = rowset_meta->data_disk_size();
476
        // corrupted index size caused by bug before 2.1.5 or 3.0.0 version
477
        // try to get real index size from disk.
478
10.0k
        if (index_size < 0 || index_size > total_size * 2) {
479
0
            LOG(ERROR) << "invalid index size:" << index_size << " total size:" << total_size
480
0
                       << " data size:" << data_size << " tablet:" << rowset_meta->tablet_id()
481
0
                       << " rowset:" << rowset_meta->rowset_id();
482
0
            index_size = 0;
483
0
            auto st = rowset->get_inverted_index_size(&index_size);
484
0
            if (!st.ok()) {
485
0
                LOG(ERROR) << "failed to get inverted index size. res=" << st;
486
0
            }
487
0
        }
488
10.0k
        _input_rowsets_data_size += data_size;
489
10.0k
        _input_rowsets_index_size += index_size;
490
10.0k
        _input_rowsets_total_size += total_size;
491
10.0k
        _input_row_num += rowset->num_rows();
492
10.0k
        _input_num_segments += rowset->num_segments();
493
10.0k
    }
494
1.43k
    COUNTER_UPDATE(_input_rowsets_data_size_counter, _input_rowsets_data_size);
495
1.43k
    COUNTER_UPDATE(_input_row_num_counter, _input_row_num);
496
1.43k
    COUNTER_UPDATE(_input_segments_num_counter, _input_num_segments);
497
498
1.43k
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::build_basic_info",
499
1.43k
                                      Status::OK());
500
501
1.43k
    _output_version =
502
1.43k
            Version(_input_rowsets.front()->start_version(), _input_rowsets.back()->end_version());
503
504
1.43k
    _newest_write_timestamp = _input_rowsets.back()->newest_write_timestamp();
505
506
1.43k
    std::vector<RowsetMetaSharedPtr> rowset_metas(_input_rowsets.size());
507
1.43k
    std::transform(_input_rowsets.begin(), _input_rowsets.end(), rowset_metas.begin(),
508
10.2k
                   [](const RowsetSharedPtr& rowset) { return rowset->rowset_meta(); });
509
1.43k
    _cur_tablet_schema = _tablet->tablet_schema_with_merged_max_schema_version(rowset_metas);
510
511
    // if enable_vertical_compact_variant_subcolumns is true, we need to compact the variant subcolumns in seperate column groups
512
    // so get_extended_compaction_schema will extended the schema for variant columns
513
    // for ordered compaction, we don't need to extend the schema for variant columns
514
1.43k
    if (_enable_vertical_compact_variant_subcolumns && !is_ordered_compaction) {
515
934
        RETURN_IF_ERROR(variant_util::VariantCompactionUtil::get_extended_compaction_schema(
516
934
                _input_rowsets, _cur_tablet_schema));
517
934
    }
518
1.43k
    return Status::OK();
519
1.43k
}
520
521
1.44k
bool CompactionMixin::handle_ordered_data_compaction() {
522
1.44k
    if (!config::enable_ordered_data_compaction) {
523
0
        return false;
524
0
    }
525
526
    // If some rowsets has idx files and some rowsets has not, we can not do link file compaction.
527
    // Since the output rowset will be broken.
528
529
    // Use schema version instead of schema hash to check if they are the same,
530
    // because light schema change will not change the schema hash on BE, but will increase the schema version
531
    // See fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java::2979
532
1.44k
    std::vector<int32_t> schema_versions_of_rowsets;
533
534
10.2k
    for (auto input_rowset : _input_rowsets) {
535
10.2k
        schema_versions_of_rowsets.push_back(input_rowset->rowset_meta()->schema_version());
536
10.2k
    }
537
538
    // If all rowsets has same schema version, then we can do link file compaction directly.
539
1.44k
    bool all_same_schema_version =
540
1.44k
            std::all_of(schema_versions_of_rowsets.begin(), schema_versions_of_rowsets.end(),
541
10.2k
                        [&](int32_t v) { return v == schema_versions_of_rowsets.front(); });
542
543
1.44k
    if (!all_same_schema_version) {
544
0
        return false;
545
0
    }
546
547
1.44k
    if (compaction_type() == ReaderType::READER_COLD_DATA_COMPACTION ||
548
1.44k
        compaction_type() == ReaderType::READER_FULL_COMPACTION) {
549
        // The remote file system and full compaction does not support to link files.
550
0
        return false;
551
0
    }
552
1.44k
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
553
1.44k
        _tablet->enable_unique_key_merge_on_write()) {
554
643
        return false;
555
643
    }
556
557
805
    if (_tablet->tablet_meta()->tablet_schema()->skip_write_index_on_load()) {
558
        // Expected to create index through normal compaction
559
0
        return false;
560
0
    }
561
562
    // check delete version: if compaction type is base compaction and
563
    // has a delete version, use original compaction
564
805
    if (compaction_type() == ReaderType::READER_BASE_COMPACTION ||
565
805
        (_allow_delete_in_cumu_compaction &&
566
793
         compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION)) {
567
36
        for (auto& rowset : _input_rowsets) {
568
36
            if (rowset->rowset_meta()->has_delete_predicate()) {
569
12
                return false;
570
12
            }
571
36
        }
572
12
    }
573
574
    // check if rowsets are tidy so we can just modify meta and do link
575
    // files to handle compaction
576
793
    auto input_size = _input_rowsets.size();
577
793
    std::string pre_max_key;
578
793
    bool pre_rs_key_bounds_truncated {false};
579
4.27k
    for (auto i = 0; i < input_size; ++i) {
580
3.81k
        if (!is_rowset_tidy(pre_max_key, pre_rs_key_bounds_truncated, _input_rowsets[i])) {
581
339
            if (i <= input_size / 2) {
582
285
                return false;
583
285
            } else {
584
54
                _input_rowsets.resize(i);
585
54
                break;
586
54
            }
587
339
        }
588
3.81k
    }
589
    // most rowset of current compaction is nonoverlapping
590
    // just handle nonoverlappint rowsets
591
508
    auto st = do_compact_ordered_rowsets();
592
508
    if (!st.ok()) {
593
0
        LOG(WARNING) << "failed to compact ordered rowsets: " << st;
594
0
        _pending_rs_guard.drop();
595
0
    }
596
597
508
    return st.ok();
598
793
}
599
600
1.43k
Status CompactionMixin::execute_compact() {
601
1.43k
    int64_t profile_start_time_ms = UnixMillis();
602
1.43k
    uint32_t checksum_before;
603
1.43k
    uint32_t checksum_after;
604
1.43k
    bool enable_compaction_checksum = config::enable_compaction_checksum;
605
1.43k
    if (enable_compaction_checksum) {
606
0
        EngineChecksumTask checksum_task(_engine, _tablet->tablet_id(), _tablet->schema_hash(),
607
0
                                         _input_rowsets.back()->end_version(), &checksum_before);
608
0
        auto st = checksum_task.execute();
609
0
        if (!st.ok()) {
610
0
            submit_profile_record(false, profile_start_time_ms, st.to_string());
611
0
            return st;
612
0
        }
613
0
    }
614
615
1.43k
    auto* data_dir = tablet()->data_dir();
616
1.43k
    int64_t permits = get_compaction_permits();
617
1.43k
    data_dir->disks_compaction_score_increment(permits);
618
1.43k
    data_dir->disks_compaction_num_increment(1);
619
620
1.43k
    auto record_compaction_stats = [&](const doris::Exception& ex) {
621
1.43k
        _tablet->compaction_count.fetch_add(1, std::memory_order_relaxed);
622
1.43k
        data_dir->disks_compaction_score_increment(-permits);
623
1.43k
        data_dir->disks_compaction_num_increment(-1);
624
1.43k
    };
625
    // Handler for execute_compact_impl failure (both Status error and C++ exception).
626
    // The macro calls this then returns, so submit_profile_record(false) must be here.
627
1.43k
    auto on_compact_impl_failure = [&](const doris::Exception& ex) {
628
0
        record_compaction_stats(ex);
629
0
        submit_profile_record(false, profile_start_time_ms,
630
0
                              ex.what() ? std::string(ex.what()) : "");
631
0
    };
632
633
1.43k
    HANDLE_EXCEPTION_IF_CATCH_EXCEPTION(execute_compact_impl(permits), on_compact_impl_failure);
634
    // Only reached on success (macro returns on failure).
635
1.43k
    record_compaction_stats(doris::Exception());
636
637
1.43k
    if (enable_compaction_checksum) {
638
0
        EngineChecksumTask checksum_task(_engine, _tablet->tablet_id(), _tablet->schema_hash(),
639
0
                                         _input_rowsets.back()->end_version(), &checksum_after);
640
0
        auto st = checksum_task.execute();
641
0
        if (!st.ok()) {
642
0
            submit_profile_record(false, profile_start_time_ms, st.to_string());
643
0
            return st;
644
0
        }
645
0
        if (checksum_before != checksum_after) {
646
0
            auto mismatch_st = Status::InternalError(
647
0
                    "compaction tablet checksum not consistent, before={}, after={}, tablet_id={}",
648
0
                    checksum_before, checksum_after, _tablet->tablet_id());
649
0
            submit_profile_record(false, profile_start_time_ms, mismatch_st.to_string());
650
0
            return mismatch_st;
651
0
        }
652
0
    }
653
654
1.43k
    DorisMetrics::instance()->local_compaction_read_rows_total->increment(_input_row_num);
655
1.43k
    DorisMetrics::instance()->local_compaction_read_bytes_total->increment(
656
1.43k
            _input_rowsets_total_size);
657
658
1.43k
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::execute_compact", Status::OK());
659
660
1.43k
    DorisMetrics::instance()->local_compaction_write_rows_total->increment(
661
1.43k
            _output_rowset->num_rows());
662
1.43k
    DorisMetrics::instance()->local_compaction_write_bytes_total->increment(
663
1.43k
            _output_rowset->total_disk_size());
664
665
1.43k
    _load_segment_to_cache();
666
1.43k
    submit_profile_record(true, profile_start_time_ms);
667
1.43k
    return Status::OK();
668
1.43k
}
669
670
1.43k
Status CompactionMixin::execute_compact_impl(int64_t permits) {
671
1.43k
    OlapStopWatch watch;
672
673
1.43k
    if (handle_ordered_data_compaction()) {
674
502
        RETURN_IF_ERROR(modify_rowsets());
675
502
        LOG(INFO) << "succeed to do ordered data " << compaction_name()
676
502
                  << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version
677
502
                  << ", disk=" << tablet()->data_dir()->path()
678
502
                  << ", segments=" << _input_num_segments << ", input_row_num=" << _input_row_num
679
502
                  << ", output_row_num=" << _output_rowset->num_rows()
680
502
                  << ", input_rowsets_data_size=" << _input_rowsets_data_size
681
502
                  << ", input_rowsets_index_size=" << _input_rowsets_index_size
682
502
                  << ", input_rowsets_total_size=" << _input_rowsets_total_size
683
502
                  << ", output_rowset_data_size=" << _output_rowset->data_disk_size()
684
502
                  << ", output_rowset_index_size=" << _output_rowset->index_disk_size()
685
502
                  << ", output_rowset_total_size=" << _output_rowset->total_disk_size()
686
502
                  << ". elapsed time=" << watch.get_elapse_second() << "s.";
687
502
        _state = CompactionState::SUCCESS;
688
502
        return Status::OK();
689
502
    }
690
934
    RETURN_IF_ERROR(build_basic_info());
691
692
934
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::execute_compact_impl",
693
934
                                      Status::OK());
694
695
934
    VLOG_DEBUG << "dump tablet schema: " << _cur_tablet_schema->dump_structure();
696
697
934
    LOG(INFO) << "start " << compaction_name() << ". tablet=" << _tablet->tablet_id()
698
934
              << ", output_version=" << _output_version << ", permits: " << permits;
699
700
934
    RETURN_IF_ERROR(merge_input_rowsets());
701
702
    // Currently, updates are only made in the time_series.
703
934
    update_compaction_level();
704
705
934
    RETURN_IF_ERROR(modify_rowsets());
706
707
934
    auto* cumu_policy = tablet()->cumulative_compaction_policy();
708
934
    DCHECK(cumu_policy);
709
934
    LOG(INFO) << "succeed to do " << compaction_name() << " is_vertical=" << _is_vertical
710
934
              << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version
711
934
              << ", current_max_version=" << tablet()->max_version().second
712
934
              << ", disk=" << tablet()->data_dir()->path()
713
934
              << ", input_segments=" << _input_num_segments << ", input_rowsets_data_size="
714
934
              << PrettyPrinter::print_bytes(_input_rowsets_data_size)
715
934
              << ", input_rowsets_index_size="
716
934
              << PrettyPrinter::print_bytes(_input_rowsets_index_size)
717
934
              << ", input_rowsets_total_size="
718
934
              << PrettyPrinter::print_bytes(_input_rowsets_total_size)
719
934
              << ", output_rowset_data_size="
720
934
              << PrettyPrinter::print_bytes(_output_rowset->data_disk_size())
721
934
              << ", output_rowset_index_size="
722
934
              << PrettyPrinter::print_bytes(_output_rowset->index_disk_size())
723
934
              << ", output_rowset_total_size="
724
934
              << PrettyPrinter::print_bytes(_output_rowset->total_disk_size())
725
934
              << ", input_row_num=" << _input_row_num
726
934
              << ", output_row_num=" << _output_rowset->num_rows()
727
934
              << ", filtered_row_num=" << _stats.filtered_rows
728
934
              << ", merged_row_num=" << _stats.merged_rows
729
934
              << ". elapsed time=" << watch.get_elapse_second()
730
934
              << "s. cumulative_compaction_policy=" << cumu_policy->name()
731
934
              << ", compact_row_per_second="
732
934
              << cast_set<double>(_input_row_num) / watch.get_elapse_second();
733
734
934
    _state = CompactionState::SUCCESS;
735
736
934
    return Status::OK();
737
934
}
738
739
7.94k
Status Compaction::do_inverted_index_compaction() {
740
7.94k
    const auto& ctx = _output_rs_writer->context();
741
7.94k
    if (!_enable_inverted_index_compaction || _input_row_num <= 0 ||
742
7.94k
        ctx.columns_to_do_index_compaction.empty()) {
743
7.71k
        return Status::OK();
744
7.71k
    }
745
746
230
    auto error_handler = [this](int64_t index_id, int64_t column_uniq_id) {
747
2
        LOG(WARNING) << "failed to do index compaction"
748
2
                     << ". tablet=" << _tablet->tablet_id() << ". column uniq id=" << column_uniq_id
749
2
                     << ". index_id=" << index_id;
750
4
        for (auto& rowset : _input_rowsets) {
751
4
            rowset->set_skip_index_compaction(cast_set<int32_t>(column_uniq_id));
752
4
            LOG(INFO) << "mark skipping inverted index compaction next time"
753
4
                      << ". tablet=" << _tablet->tablet_id() << ", rowset=" << rowset->rowset_id()
754
4
                      << ", column uniq id=" << column_uniq_id << ", index_id=" << index_id;
755
4
        }
756
2
    };
757
758
230
    DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_rowid_conversion_null",
759
230
                    { _stats.rowid_conversion = nullptr; })
760
230
    if (!_stats.rowid_conversion) {
761
0
        LOG(WARNING) << "failed to do index compaction, rowid conversion is null"
762
0
                     << ". tablet=" << _tablet->tablet_id()
763
0
                     << ", input row number=" << _input_row_num;
764
0
        mark_skip_index_compaction(ctx, error_handler);
765
766
0
        return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
767
0
                "failed to do index compaction, rowid conversion is null. tablet={}",
768
0
                _tablet->tablet_id());
769
0
    }
770
771
230
    OlapStopWatch inverted_watch;
772
773
    // translation vec
774
    // <<dest_idx_num, dest_docId>>
775
    // the first level vector: index indicates src segment.
776
    // the second level vector: index indicates row id of source segment,
777
    // value indicates row id of destination segment.
778
    // <UINT32_MAX, UINT32_MAX> indicates current row not exist.
779
230
    const auto& trans_vec = _stats.rowid_conversion->get_rowid_conversion_map();
780
781
    // source rowset,segment -> index_id
782
230
    const auto& src_seg_to_id_map = _stats.rowid_conversion->get_src_segment_to_id_map();
783
784
    // dest rowset id
785
230
    RowsetId dest_rowset_id = _stats.rowid_conversion->get_dst_rowset_id();
786
    // dest segment id -> num rows
787
230
    std::vector<uint32_t> dest_segment_num_rows;
788
230
    RETURN_IF_ERROR(_output_rs_writer->get_segment_num_rows(&dest_segment_num_rows));
789
790
230
    auto src_segment_num = src_seg_to_id_map.size();
791
230
    auto dest_segment_num = dest_segment_num_rows.size();
792
793
    // when all the input rowsets are deleted, the output rowset will be empty and dest_segment_num will be 0.
794
230
    if (dest_segment_num <= 0) {
795
2
        LOG(INFO) << "skip doing index compaction due to no output segments"
796
2
                  << ". tablet=" << _tablet->tablet_id() << ", input row number=" << _input_row_num
797
2
                  << ". elapsed time=" << inverted_watch.get_elapse_second() << "s.";
798
2
        return Status::OK();
799
2
    }
800
801
    // Only write info files when debug index compaction is enabled.
802
    // The files are used to debug index compaction and works with index_tool.
803
228
    if (config::debug_inverted_index_compaction) {
804
        // src index files
805
        // format: rowsetId_segmentId
806
0
        std::vector<std::string> src_index_files(src_segment_num);
807
0
        for (const auto& m : src_seg_to_id_map) {
808
0
            std::pair<RowsetId, uint32_t> p = m.first;
809
0
            src_index_files[m.second] = p.first.to_string() + "_" + std::to_string(p.second);
810
0
        }
811
812
        // dest index files
813
        // format: rowsetId_segmentId
814
0
        std::vector<std::string> dest_index_files(dest_segment_num);
815
0
        for (int i = 0; i < dest_segment_num; ++i) {
816
0
            auto prefix = dest_rowset_id.to_string() + "_" + std::to_string(i);
817
0
            dest_index_files[i] = prefix;
818
0
        }
819
820
0
        auto write_json_to_file = [&](const nlohmann::json& json_obj,
821
0
                                      const std::string& file_name) {
822
0
            io::FileWriterPtr file_writer;
823
0
            std::string file_path =
824
0
                    fmt::format("{}/{}.json", std::string(getenv("LOG_DIR")), file_name);
825
0
            RETURN_IF_ERROR(io::global_local_filesystem()->create_file(file_path, &file_writer));
826
0
            RETURN_IF_ERROR(file_writer->append(json_obj.dump()));
827
0
            RETURN_IF_ERROR(file_writer->append("\n"));
828
0
            return file_writer->close();
829
0
        };
830
831
        // Convert trans_vec to JSON and print it
832
0
        nlohmann::json trans_vec_json = trans_vec;
833
0
        auto output_version =
834
0
                _output_version.to_string().substr(1, _output_version.to_string().size() - 2);
835
0
        RETURN_IF_ERROR(write_json_to_file(
836
0
                trans_vec_json,
837
0
                fmt::format("trans_vec_{}_{}", _tablet->tablet_id(), output_version)));
838
839
0
        nlohmann::json src_index_files_json = src_index_files;
840
0
        RETURN_IF_ERROR(write_json_to_file(
841
0
                src_index_files_json,
842
0
                fmt::format("src_idx_dirs_{}_{}", _tablet->tablet_id(), output_version)));
843
844
0
        nlohmann::json dest_index_files_json = dest_index_files;
845
0
        RETURN_IF_ERROR(write_json_to_file(
846
0
                dest_index_files_json,
847
0
                fmt::format("dest_idx_dirs_{}_{}", _tablet->tablet_id(), output_version)));
848
849
0
        nlohmann::json dest_segment_num_rows_json = dest_segment_num_rows;
850
0
        RETURN_IF_ERROR(write_json_to_file(
851
0
                dest_segment_num_rows_json,
852
0
                fmt::format("dest_seg_num_rows_{}_{}", _tablet->tablet_id(), output_version)));
853
0
    }
854
855
    // create index_writer to compaction indexes
856
228
    std::unordered_map<RowsetId, Rowset*> rs_id_to_rowset_map;
857
1.44k
    for (auto&& rs : _input_rowsets) {
858
1.44k
        rs_id_to_rowset_map.emplace(rs->rowset_id(), rs.get());
859
1.44k
    }
860
861
    // src index dirs
862
228
    std::vector<std::unique_ptr<IndexFileReader>> index_file_readers(src_segment_num);
863
975
    for (const auto& m : src_seg_to_id_map) {
864
975
        const auto& [rowset_id, seg_id] = m.first;
865
866
975
        auto find_it = rs_id_to_rowset_map.find(rowset_id);
867
975
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_find_rowset_error",
868
975
                        { find_it = rs_id_to_rowset_map.end(); })
869
975
        if (find_it == rs_id_to_rowset_map.end()) [[unlikely]] {
870
0
            LOG(WARNING) << "failed to do index compaction, cannot find rowset. tablet_id="
871
0
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string();
872
0
            mark_skip_index_compaction(ctx, error_handler);
873
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
874
0
                    "failed to do index compaction, cannot find rowset. tablet_id={} rowset_id={}",
875
0
                    _tablet->tablet_id(), rowset_id.to_string());
876
0
        }
877
878
975
        auto* rowset = find_it->second;
879
975
        auto fs = rowset->rowset_meta()->fs();
880
975
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_get_fs_error", { fs = nullptr; })
881
975
        if (!fs) {
882
0
            LOG(WARNING) << "failed to do index compaction, get fs failed. resource_id="
883
0
                         << rowset->rowset_meta()->resource_id();
884
0
            mark_skip_index_compaction(ctx, error_handler);
885
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
886
0
                    "get fs failed, resource_id={}", rowset->rowset_meta()->resource_id());
887
0
        }
888
889
975
        auto seg_path = rowset->segment_path(seg_id);
890
975
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_seg_path_nullptr", {
891
975
            seg_path = ResultError(Status::Error<ErrorCode::INTERNAL_ERROR>(
892
975
                    "do_inverted_index_compaction_seg_path_nullptr"));
893
975
        })
894
975
        if (!seg_path.has_value()) {
895
0
            LOG(WARNING) << "failed to do index compaction, get segment path failed. tablet_id="
896
0
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string()
897
0
                         << " seg_id=" << seg_id;
898
0
            mark_skip_index_compaction(ctx, error_handler);
899
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
900
0
                    "get segment path failed. tablet_id={} rowset_id={} seg_id={}",
901
0
                    _tablet->tablet_id(), rowset_id.to_string(), seg_id);
902
0
        }
903
975
        auto index_file_reader = std::make_unique<IndexFileReader>(
904
975
                fs,
905
975
                std::string {InvertedIndexDescriptor::get_index_file_path_prefix(seg_path.value())},
906
975
                _cur_tablet_schema->get_inverted_index_storage_format(),
907
975
                rowset->rowset_meta()->inverted_index_file_info(seg_id), _tablet->tablet_id());
908
975
        auto st = index_file_reader->init(config::inverted_index_read_buffer_size);
909
975
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_init_inverted_index_file_reader",
910
975
                        {
911
975
                            st = Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
912
975
                                    "debug point: "
913
975
                                    "Compaction::do_inverted_index_compaction_init_inverted_index_"
914
975
                                    "file_reader error");
915
975
                        })
916
975
        if (!st.ok()) {
917
0
            LOG(WARNING) << "failed to do index compaction, init inverted index file reader "
918
0
                            "failed. tablet_id="
919
0
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string()
920
0
                         << " seg_id=" << seg_id;
921
0
            mark_skip_index_compaction(ctx, error_handler);
922
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
923
0
                    "init inverted index file reader failed. tablet_id={} rowset_id={} seg_id={}",
924
0
                    _tablet->tablet_id(), rowset_id.to_string(), seg_id);
925
0
        }
926
975
        index_file_readers[m.second] = std::move(index_file_reader);
927
975
    }
928
929
    // dest index files
930
    // format: rowsetId_segmentId
931
228
    auto& inverted_index_file_writers =
932
228
            dynamic_cast<BaseBetaRowsetWriter*>(_output_rs_writer.get())->index_file_writers();
933
228
    DBUG_EXECUTE_IF(
934
228
            "Compaction::do_inverted_index_compaction_inverted_index_file_writers_size_error",
935
228
            { inverted_index_file_writers.clear(); })
936
228
    if (inverted_index_file_writers.size() != dest_segment_num) {
937
0
        LOG(WARNING) << "failed to do index compaction, dest segment num not match. tablet_id="
938
0
                     << _tablet->tablet_id() << " dest_segment_num=" << dest_segment_num
939
0
                     << " inverted_index_file_writers.size()="
940
0
                     << inverted_index_file_writers.size();
941
0
        mark_skip_index_compaction(ctx, error_handler);
942
0
        return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
943
0
                "dest segment num not match. tablet_id={} dest_segment_num={} "
944
0
                "inverted_index_file_writers.size()={}",
945
0
                _tablet->tablet_id(), dest_segment_num, inverted_index_file_writers.size());
946
0
    }
947
948
    // use tmp file dir to store index files
949
228
    auto tmp_file_dir = ExecEnv::GetInstance()->get_tmp_file_dirs()->get_tmp_file_dir();
950
228
    auto index_tmp_path = tmp_file_dir / dest_rowset_id.to_string();
951
228
    LOG(INFO) << "start index compaction"
952
228
              << ". tablet=" << _tablet->tablet_id() << ", source index size=" << src_segment_num
953
228
              << ", destination index size=" << dest_segment_num << ".";
954
955
228
    Status status = Status::OK();
956
822
    for (auto&& column_uniq_id : ctx.columns_to_do_index_compaction) {
957
822
        auto col = _cur_tablet_schema->column_by_uid(column_uniq_id);
958
822
        auto index_metas = _cur_tablet_schema->inverted_indexs(col);
959
822
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_can_not_find_index_meta",
960
822
                        { index_metas.clear(); })
961
822
        if (index_metas.empty()) {
962
0
            status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
963
0
                    fmt::format("Can not find index_meta for col {}", col.name()));
964
0
            LOG(WARNING) << "failed to do index compaction, can not find index_meta for column"
965
0
                         << ". tablet=" << _tablet->tablet_id()
966
0
                         << ", column uniq id=" << column_uniq_id;
967
0
            error_handler(-1, column_uniq_id);
968
0
            break;
969
0
        }
970
828
        for (const auto& index_meta : index_metas) {
971
828
            std::vector<lucene::store::Directory*> dest_index_dirs(dest_segment_num);
972
828
            try {
973
828
                std::vector<std::unique_ptr<DorisCompoundReader, DirectoryDeleter>> src_idx_dirs(
974
828
                        src_segment_num);
975
3.82k
                for (int src_segment_id = 0; src_segment_id < src_segment_num; src_segment_id++) {
976
2.99k
                    auto res = index_file_readers[src_segment_id]->open(index_meta);
977
2.99k
                    DBUG_EXECUTE_IF("Compaction::open_inverted_index_file_reader", {
978
2.99k
                        res = ResultError(Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
979
2.99k
                                "debug point: Compaction::open_index_file_reader error"));
980
2.99k
                    })
981
2.99k
                    if (!res.has_value()) {
982
0
                        LOG(WARNING) << "failed to do index compaction, open inverted index file "
983
0
                                        "reader failed"
984
0
                                     << ". tablet=" << _tablet->tablet_id()
985
0
                                     << ", column uniq id=" << column_uniq_id
986
0
                                     << ", src_segment_id=" << src_segment_id;
987
0
                        throw Exception(ErrorCode::INVERTED_INDEX_COMPACTION_ERROR,
988
0
                                        res.error().msg());
989
0
                    }
990
2.99k
                    src_idx_dirs[src_segment_id] = std::move(res.value());
991
2.99k
                }
992
1.76k
                for (int dest_segment_id = 0; dest_segment_id < dest_segment_num;
993
939
                     dest_segment_id++) {
994
939
                    auto res = inverted_index_file_writers[dest_segment_id]->open(index_meta);
995
939
                    DBUG_EXECUTE_IF("Compaction::open_inverted_index_file_writer", {
996
939
                        res = ResultError(Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
997
939
                                "debug point: Compaction::open_inverted_index_file_writer error"));
998
939
                    })
999
939
                    if (!res.has_value()) {
1000
0
                        LOG(WARNING) << "failed to do index compaction, open inverted index file "
1001
0
                                        "writer failed"
1002
0
                                     << ". tablet=" << _tablet->tablet_id()
1003
0
                                     << ", column uniq id=" << column_uniq_id
1004
0
                                     << ", dest_segment_id=" << dest_segment_id;
1005
0
                        throw Exception(ErrorCode::INVERTED_INDEX_COMPACTION_ERROR,
1006
0
                                        res.error().msg());
1007
0
                    }
1008
                    // Destination directories in dest_index_dirs do not need to be deconstructed,
1009
                    // but their lifecycle must be managed by inverted_index_file_writers.
1010
939
                    dest_index_dirs[dest_segment_id] = res.value().get();
1011
939
                }
1012
828
                auto st = compact_column(index_meta->index_id(), src_idx_dirs, dest_index_dirs,
1013
828
                                         index_tmp_path.native(), trans_vec, dest_segment_num_rows);
1014
828
                if (!st.ok()) {
1015
2
                    error_handler(index_meta->index_id(), column_uniq_id);
1016
2
                    status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(st.msg());
1017
2
                }
1018
828
            } catch (CLuceneError& e) {
1019
0
                error_handler(index_meta->index_id(), column_uniq_id);
1020
0
                status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(e.what());
1021
0
            } catch (const Exception& e) {
1022
0
                error_handler(index_meta->index_id(), column_uniq_id);
1023
0
                status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(e.what());
1024
0
            }
1025
828
        }
1026
822
    }
1027
1028
    // check index compaction status. If status is not ok, we should return error and end this compaction round.
1029
228
    if (!status.ok()) {
1030
1
        return status;
1031
1
    }
1032
228
    LOG(INFO) << "succeed to do index compaction"
1033
227
              << ". tablet=" << _tablet->tablet_id()
1034
227
              << ". elapsed time=" << inverted_watch.get_elapse_second() << "s.";
1035
1036
227
    return Status::OK();
1037
228
}
1038
1039
void Compaction::mark_skip_index_compaction(
1040
        const RowsetWriterContext& context,
1041
0
        const std::function<void(int64_t, int64_t)>& error_handler) {
1042
0
    for (auto&& column_uniq_id : context.columns_to_do_index_compaction) {
1043
0
        auto col = _cur_tablet_schema->column_by_uid(column_uniq_id);
1044
0
        auto index_metas = _cur_tablet_schema->inverted_indexs(col);
1045
0
        DBUG_EXECUTE_IF("Compaction::mark_skip_index_compaction_can_not_find_index_meta",
1046
0
                        { index_metas.clear(); })
1047
0
        if (index_metas.empty()) {
1048
0
            LOG(WARNING) << "mark skip index compaction, can not find index_meta for column"
1049
0
                         << ". tablet=" << _tablet->tablet_id()
1050
0
                         << ", column uniq id=" << column_uniq_id;
1051
0
            error_handler(-1, column_uniq_id);
1052
0
            continue;
1053
0
        }
1054
0
        for (const auto& index_meta : index_metas) {
1055
0
            error_handler(index_meta->index_id(), column_uniq_id);
1056
0
        }
1057
0
    }
1058
0
}
1059
1060
static bool check_rowset_has_inverted_index(const RowsetSharedPtr& src_rs, int32_t col_unique_id,
1061
                                            const BaseTabletSPtr& tablet,
1062
10.4k
                                            const TabletSchemaSPtr& cur_tablet_schema) {
1063
10.4k
    auto* rowset = static_cast<BetaRowset*>(src_rs.get());
1064
10.4k
    DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_is_skip_index_compaction",
1065
10.4k
                    { rowset->set_skip_index_compaction(col_unique_id); })
1066
10.4k
    if (rowset->is_skip_index_compaction(col_unique_id)) {
1067
1
        LOG(WARNING) << "tablet[" << tablet->tablet_id() << "] rowset[" << rowset->rowset_id()
1068
1
                     << "] column_unique_id[" << col_unique_id
1069
1
                     << "] skip inverted index compaction due to last failure";
1070
1
        return false;
1071
1
    }
1072
1073
10.4k
    auto fs = rowset->rowset_meta()->fs();
1074
10.4k
    DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_get_fs_error", { fs = nullptr; })
1075
10.4k
    if (!fs) {
1076
2
        LOG(WARNING) << "get fs failed, resource_id=" << rowset->rowset_meta()->resource_id();
1077
2
        return false;
1078
2
    }
1079
1080
10.4k
    auto index_metas = rowset->tablet_schema()->inverted_indexs(col_unique_id);
1081
10.4k
    DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_index_meta_nullptr",
1082
10.4k
                    { index_metas.clear(); })
1083
10.4k
    if (index_metas.empty()) {
1084
0
        LOG(WARNING) << "tablet[" << tablet->tablet_id() << "] column_unique_id[" << col_unique_id
1085
0
                     << "] index meta is null, will skip index compaction";
1086
0
        return false;
1087
0
    }
1088
10.6k
    for (const auto& index_meta : index_metas) {
1089
14.1k
        for (auto i = 0; i < rowset->num_segments(); i++) {
1090
            // TODO: inverted_index_path
1091
3.46k
            auto seg_path = rowset->segment_path(i);
1092
3.46k
            DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_seg_path_nullptr", {
1093
3.46k
                seg_path = ResultError(Status::Error<ErrorCode::INTERNAL_ERROR>(
1094
3.46k
                        "construct_skip_inverted_index_seg_path_nullptr"));
1095
3.46k
            })
1096
3.46k
            if (!seg_path) {
1097
0
                LOG(WARNING) << seg_path.error();
1098
0
                return false;
1099
0
            }
1100
1101
3.46k
            std::string index_file_path;
1102
3.46k
            try {
1103
3.46k
                auto index_file_reader = std::make_unique<IndexFileReader>(
1104
3.46k
                        fs,
1105
3.46k
                        std::string {InvertedIndexDescriptor::get_index_file_path_prefix(
1106
3.46k
                                seg_path.value())},
1107
3.46k
                        cur_tablet_schema->get_inverted_index_storage_format(),
1108
3.46k
                        rowset->rowset_meta()->inverted_index_file_info(i), tablet->tablet_id());
1109
3.46k
                auto st = index_file_reader->init(config::inverted_index_read_buffer_size);
1110
3.46k
                index_file_path = index_file_reader->get_index_file_path(index_meta);
1111
3.46k
                DBUG_EXECUTE_IF(
1112
3.46k
                        "Compaction::construct_skip_inverted_index_index_file_reader_init_"
1113
3.46k
                        "status_not_ok",
1114
3.46k
                        {
1115
3.46k
                            st = Status::Error<ErrorCode::INTERNAL_ERROR>(
1116
3.46k
                                    "debug point: "
1117
3.46k
                                    "construct_skip_inverted_index_index_file_reader_init_"
1118
3.46k
                                    "status_"
1119
3.46k
                                    "not_ok");
1120
3.46k
                        })
1121
3.46k
                if (!st.ok()) {
1122
0
                    LOG(WARNING) << "init index " << index_file_path << " error:" << st;
1123
0
                    return false;
1124
0
                }
1125
1126
                // check index meta
1127
3.46k
                auto result = index_file_reader->open(index_meta);
1128
3.46k
                DBUG_EXECUTE_IF(
1129
3.46k
                        "Compaction::construct_skip_inverted_index_index_file_reader_open_"
1130
3.46k
                        "error",
1131
3.46k
                        {
1132
3.46k
                            result = ResultError(
1133
3.46k
                                    Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
1134
3.46k
                                            "CLuceneError occur when open idx file"));
1135
3.46k
                        })
1136
3.46k
                if (!result.has_value()) {
1137
0
                    LOG(WARNING) << "open index " << index_file_path << " error:" << result.error();
1138
0
                    return false;
1139
0
                }
1140
3.46k
                auto reader = std::move(result.value());
1141
3.46k
                std::vector<std::string> files;
1142
3.46k
                reader->list(&files);
1143
3.46k
                reader->close();
1144
3.46k
                DBUG_EXECUTE_IF(
1145
3.46k
                        "Compaction::construct_skip_inverted_index_index_reader_close_"
1146
3.46k
                        "error",
1147
3.46k
                        { _CLTHROWA(CL_ERR_IO, "debug point: reader close error"); })
1148
1149
3.46k
                DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_index_files_count",
1150
3.46k
                                { files.clear(); })
1151
1152
                // why is 3?
1153
                // slice type index file at least has 3 files: null_bitmap, segments_N, segments.gen
1154
3.46k
                if (files.size() < 3) {
1155
0
                    LOG(WARNING) << "tablet[" << tablet->tablet_id() << "] column_unique_id["
1156
0
                                 << col_unique_id << "]," << index_file_path
1157
0
                                 << " is corrupted, will skip index compaction";
1158
0
                    return false;
1159
0
                }
1160
3.46k
            } catch (CLuceneError& err) {
1161
0
                LOG(WARNING) << "tablet[" << tablet->tablet_id() << "] column_unique_id["
1162
0
                             << col_unique_id << "] open index[" << index_file_path
1163
0
                             << "], will skip index compaction, error:" << err.what();
1164
0
                return false;
1165
0
            }
1166
3.46k
        }
1167
10.6k
    }
1168
10.4k
    return true;
1169
10.4k
}
1170
1171
6.60k
void Compaction::construct_index_compaction_columns(RowsetWriterContext& ctx) {
1172
6.60k
    for (const auto& index : _cur_tablet_schema->inverted_indexes()) {
1173
2.30k
        auto col_unique_ids = index->col_unique_ids();
1174
        // check if column unique ids is empty to avoid crash
1175
2.30k
        if (col_unique_ids.empty()) {
1176
1
            LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] index[" << index->index_id()
1177
1
                         << "] has no column unique id, will skip index compaction."
1178
1
                         << " tablet_schema=" << _cur_tablet_schema->dump_full_schema();
1179
1
            continue;
1180
1
        }
1181
2.30k
        auto col_unique_id = col_unique_ids[0];
1182
2.30k
        if (!_cur_tablet_schema->has_column_unique_id(col_unique_id)) {
1183
0
            LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
1184
0
                         << col_unique_id << "] not found, will skip index compaction";
1185
0
            continue;
1186
0
        }
1187
        // Avoid doing inverted index compaction on non-slice type columns
1188
2.30k
        if (!field_is_slice_type(_cur_tablet_schema->column_by_uid(col_unique_id).type())) {
1189
814
            continue;
1190
814
        }
1191
1192
        // if index properties are different, index compaction maybe needs to be skipped.
1193
1.48k
        bool is_continue = false;
1194
1.48k
        std::optional<std::map<std::string, std::string>> first_properties;
1195
10.4k
        for (const auto& rowset : _input_rowsets) {
1196
10.4k
            auto tablet_indexs = rowset->tablet_schema()->inverted_indexs(col_unique_id);
1197
            // no inverted index or index id is different from current index id
1198
10.4k
            auto it = std::find_if(tablet_indexs.begin(), tablet_indexs.end(),
1199
10.5k
                                   [&index](const auto& tablet_index) {
1200
10.5k
                                       return tablet_index->index_id() == index->index_id();
1201
10.5k
                                   });
1202
10.4k
            if (it != tablet_indexs.end()) {
1203
10.4k
                const auto* tablet_index = *it;
1204
10.4k
                auto properties = tablet_index->properties();
1205
10.4k
                if (!first_properties.has_value()) {
1206
1.47k
                    first_properties = properties;
1207
9.00k
                } else {
1208
9.00k
                    DBUG_EXECUTE_IF(
1209
9.00k
                            "Compaction::do_inverted_index_compaction_index_properties_different",
1210
9.00k
                            { properties.emplace("dummy_key", "dummy_value"); })
1211
9.00k
                    if (properties != first_properties.value()) {
1212
3
                        is_continue = true;
1213
3
                        break;
1214
3
                    }
1215
9.00k
                }
1216
10.4k
            } else {
1217
3
                is_continue = true;
1218
3
                break;
1219
3
            }
1220
10.4k
        }
1221
1.48k
        if (is_continue) {
1222
5
            continue;
1223
5
        }
1224
1.48k
        bool all_have_inverted_index =
1225
1.48k
                std::all_of(_input_rowsets.begin(), _input_rowsets.end(),
1226
10.4k
                            [this, col_unique_id](const RowsetSharedPtr& src_rs) {
1227
10.4k
                                return check_rowset_has_inverted_index(src_rs, col_unique_id,
1228
10.4k
                                                                       _tablet, _cur_tablet_schema);
1229
10.4k
                            });
1230
1231
1.48k
        if (all_have_inverted_index) {
1232
1.47k
            ctx.columns_to_do_index_compaction.insert(col_unique_id);
1233
1.47k
        }
1234
1.48k
    }
1235
6.60k
}
1236
1237
0
Status CompactionMixin::update_delete_bitmap() {
1238
    // for mow with cluster keys, compaction read data with delete bitmap
1239
    // if tablet is not ready(such as schema change), we need to update delete bitmap
1240
0
    {
1241
0
        std::shared_lock meta_rlock(_tablet->get_header_lock());
1242
0
        if (_tablet->tablet_state() != TABLET_NOTREADY) {
1243
0
            return Status::OK();
1244
0
        }
1245
0
    }
1246
0
    OlapStopWatch watch;
1247
0
    std::vector<RowsetSharedPtr> rowsets;
1248
0
    for (const auto& rowset : _input_rowsets) {
1249
0
        std::lock_guard rwlock(tablet()->get_rowset_update_lock());
1250
0
        std::shared_lock rlock(_tablet->get_header_lock());
1251
0
        Status st = _tablet->update_delete_bitmap_without_lock(_tablet, rowset, &rowsets);
1252
0
        if (!st.ok()) {
1253
0
            LOG(INFO) << "failed update_delete_bitmap_without_lock for tablet_id="
1254
0
                      << _tablet->tablet_id() << ", st=" << st.to_string();
1255
0
            return st;
1256
0
        }
1257
0
        rowsets.push_back(rowset);
1258
0
    }
1259
0
    LOG(INFO) << "finish update delete bitmap for tablet: " << _tablet->tablet_id()
1260
0
              << ", rowsets: " << _input_rowsets.size() << ", cost: " << watch.get_elapse_time_us()
1261
0
              << "(us)";
1262
0
    return Status::OK();
1263
0
}
1264
1265
157
Status CloudCompactionMixin::update_delete_bitmap() {
1266
    // for mow with cluster keys, compaction read data with delete bitmap
1267
    // if tablet is not ready(such as schema change), we need to update delete bitmap
1268
157
    {
1269
157
        std::shared_lock meta_rlock(_tablet->get_header_lock());
1270
157
        if (_tablet->tablet_state() != TABLET_NOTREADY) {
1271
157
            return Status::OK();
1272
157
        }
1273
157
    }
1274
0
    OlapStopWatch watch;
1275
0
    std::vector<RowsetSharedPtr> rowsets;
1276
0
    for (const auto& rowset : _input_rowsets) {
1277
0
        Status st = _tablet->update_delete_bitmap_without_lock(_tablet, rowset, &rowsets);
1278
0
        if (!st.ok()) {
1279
0
            LOG(INFO) << "failed update_delete_bitmap_without_lock for tablet_id="
1280
0
                      << _tablet->tablet_id() << ", st=" << st.to_string();
1281
0
            return st;
1282
0
        }
1283
0
        rowsets.push_back(rowset);
1284
0
    }
1285
0
    LOG(INFO) << "finish update delete bitmap for tablet: " << _tablet->tablet_id()
1286
0
              << ", rowsets: " << _input_rowsets.size() << ", cost: " << watch.get_elapse_time_us()
1287
0
              << "(us)";
1288
0
    return Status::OK();
1289
0
}
1290
1291
1.47k
Status CompactionMixin::construct_output_rowset_writer(RowsetWriterContext& ctx) {
1292
    // only do index compaction for dup_keys and unique_keys with mow enabled
1293
1.47k
    if (_enable_inverted_index_compaction && (((_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1294
1.45k
                                                _tablet->enable_unique_key_merge_on_write()) ||
1295
1.45k
                                               _tablet->keys_type() == KeysType::DUP_KEYS))) {
1296
1.42k
        construct_index_compaction_columns(ctx);
1297
1.42k
    }
1298
1.47k
    ctx.version = _output_version;
1299
1.47k
    ctx.rowset_state = VISIBLE;
1300
1.47k
    ctx.segments_overlap = NONOVERLAPPING;
1301
1.47k
    ctx.tablet_schema = _cur_tablet_schema;
1302
1.47k
    ctx.newest_write_timestamp = _newest_write_timestamp;
1303
1.47k
    ctx.write_type = DataWriteType::TYPE_COMPACTION;
1304
1.47k
    ctx.compaction_type = compaction_type();
1305
1.47k
    ctx.allow_packed_file = false;
1306
1.47k
    _output_rs_writer = DORIS_TRY(_tablet->create_rowset_writer(ctx, _is_vertical));
1307
1.47k
    _pending_rs_guard = _engine.add_pending_rowset(ctx);
1308
1.47k
    return Status::OK();
1309
1.47k
}
1310
1311
1.43k
Status CompactionMixin::modify_rowsets() {
1312
1.43k
    std::vector<RowsetSharedPtr> output_rowsets;
1313
1.43k
    output_rowsets.push_back(_output_rowset);
1314
1315
1.43k
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1316
1.43k
        _tablet->enable_unique_key_merge_on_write()) {
1317
641
        Version version = tablet()->max_version();
1318
641
        DeleteBitmap output_rowset_delete_bitmap(_tablet->tablet_id());
1319
641
        std::unique_ptr<RowLocationSet> missed_rows;
1320
641
        if ((config::enable_missing_rows_correctness_check ||
1321
641
             config::enable_mow_compaction_correctness_check_core ||
1322
641
             config::enable_mow_compaction_correctness_check_fail) &&
1323
643
            !_allow_delete_in_cumu_compaction &&
1324
643
            compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
1325
639
            missed_rows = std::make_unique<RowLocationSet>();
1326
639
            LOG(INFO) << "RowLocation Set inited succ for tablet:" << _tablet->tablet_id();
1327
639
        }
1328
641
        std::unique_ptr<std::map<RowsetSharedPtr, RowLocationPairList>> location_map;
1329
641
        if (config::enable_rowid_conversion_correctness_check &&
1330
641
            tablet()->tablet_schema()->cluster_key_uids().empty()) {
1331
0
            location_map = std::make_unique<std::map<RowsetSharedPtr, RowLocationPairList>>();
1332
0
            LOG(INFO) << "Location Map inited succ for tablet:" << _tablet->tablet_id();
1333
0
        }
1334
        // Convert the delete bitmap of the input rowsets to output rowset.
1335
        // New loads are not blocked, so some keys of input rowsets might
1336
        // be deleted during the time. We need to deal with delete bitmap
1337
        // of incremental data later.
1338
        // TODO(LiaoXin): check if there are duplicate keys
1339
641
        std::size_t missed_rows_size = 0;
1340
641
        tablet()->calc_compaction_output_rowset_delete_bitmap(
1341
641
                _input_rowsets, *_rowid_conversion, 0, version.second + 1, missed_rows.get(),
1342
641
                location_map.get(), _tablet->tablet_meta()->delete_bitmap(),
1343
641
                &output_rowset_delete_bitmap);
1344
643
        if (missed_rows) {
1345
643
            missed_rows_size = missed_rows->size();
1346
643
            std::size_t merged_missed_rows_size = _stats.merged_rows;
1347
643
            if (!_tablet->tablet_meta()->tablet_schema()->cluster_key_uids().empty()) {
1348
0
                merged_missed_rows_size += _stats.filtered_rows;
1349
0
            }
1350
1351
            // Suppose a heavy schema change process on BE converting tablet A to tablet B.
1352
            // 1. during schema change double write, new loads write [X-Y] on tablet B.
1353
            // 2. rowsets with version [a],[a+1],...,[b-1],[b] on tablet B are picked for cumu compaction(X<=a<b<=Y).(cumu compaction
1354
            //    on new tablet during schema change double write is allowed after https://github.com/apache/doris/pull/16470)
1355
            // 3. schema change remove all rowsets on tablet B before version Z(b<=Z<=Y) before it begins to convert historical rowsets.
1356
            // 4. schema change finishes.
1357
            // 5. cumu compation begins on new tablet with version [a],...,[b]. If there are duplicate keys between these rowsets,
1358
            //    the compaction check will fail because these rowsets have skipped to calculate delete bitmap in commit phase and
1359
            //    publish phase because tablet B is in NOT_READY state when writing.
1360
1361
            // Considering that the cumu compaction will fail finally in this situation because `Tablet::modify_rowsets` will check if rowsets in
1362
            // `to_delete`(_input_rowsets) still exist in tablet's `_rs_version_map`, we can just skip to check missed rows here.
1363
643
            bool need_to_check_missed_rows = true;
1364
643
            {
1365
643
                std::shared_lock rlock(_tablet->get_header_lock());
1366
643
                need_to_check_missed_rows =
1367
643
                        std::all_of(_input_rowsets.begin(), _input_rowsets.end(),
1368
5.11k
                                    [&](const RowsetSharedPtr& rowset) {
1369
5.11k
                                        return tablet()->rowset_exists_unlocked(rowset);
1370
5.11k
                                    });
1371
643
            }
1372
1373
643
            if (_tablet->tablet_state() == TABLET_RUNNING &&
1374
643
                merged_missed_rows_size != missed_rows_size && need_to_check_missed_rows) {
1375
0
                std::stringstream ss;
1376
0
                ss << "cumulative compaction: the merged rows(" << _stats.merged_rows
1377
0
                   << "), filtered rows(" << _stats.filtered_rows
1378
0
                   << ") is not equal to missed rows(" << missed_rows_size
1379
0
                   << ") in rowid conversion, tablet_id: " << _tablet->tablet_id()
1380
0
                   << ", table_id:" << _tablet->table_id();
1381
0
                if (missed_rows_size == 0) {
1382
0
                    ss << ", debug info: ";
1383
0
                    DeleteBitmap subset_map(_tablet->tablet_id());
1384
0
                    for (auto rs : _input_rowsets) {
1385
0
                        _tablet->tablet_meta()->delete_bitmap().subset(
1386
0
                                {rs->rowset_id(), 0, 0},
1387
0
                                {rs->rowset_id(), rs->num_segments(), version.second + 1},
1388
0
                                &subset_map);
1389
0
                        ss << "(rowset id: " << rs->rowset_id()
1390
0
                           << ", delete bitmap cardinality: " << subset_map.cardinality() << ")";
1391
0
                    }
1392
0
                    ss << ", version[0-" << version.second + 1 << "]";
1393
0
                }
1394
0
                std::string err_msg = fmt::format(
1395
0
                        "cumulative compaction: the merged rows({}), filtered rows({})"
1396
0
                        " is not equal to missed rows({}) in rowid conversion,"
1397
0
                        " tablet_id: {}, table_id:{}",
1398
0
                        _stats.merged_rows, _stats.filtered_rows, missed_rows_size,
1399
0
                        _tablet->tablet_id(), _tablet->table_id());
1400
0
                LOG(WARNING) << err_msg;
1401
0
                if (config::enable_mow_compaction_correctness_check_core) {
1402
0
                    CHECK(false) << err_msg;
1403
0
                } else if (config::enable_mow_compaction_correctness_check_fail) {
1404
0
                    return Status::InternalError<false>(err_msg);
1405
0
                } else {
1406
0
                    DCHECK(false) << err_msg;
1407
0
                }
1408
0
            }
1409
643
        }
1410
1411
641
        if (location_map) {
1412
0
            RETURN_IF_ERROR(tablet()->check_rowid_conversion(_output_rowset, *location_map));
1413
0
            location_map->clear();
1414
0
        }
1415
1416
641
        {
1417
641
            std::lock_guard<std::mutex> wrlock_(tablet()->get_rowset_update_lock());
1418
641
            std::lock_guard<std::shared_mutex> wrlock(_tablet->get_header_lock());
1419
641
            SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD);
1420
1421
            // Here we will calculate all the rowsets delete bitmaps which are committed but not published to reduce the calculation pressure
1422
            // of publish phase.
1423
            // All rowsets which need to recalculate have been published so we don't need to acquire lock.
1424
            // Step1: collect this tablet's all committed rowsets' delete bitmaps
1425
641
            CommitTabletTxnInfoVec commit_tablet_txn_info_vec {};
1426
641
            _engine.txn_manager()->get_all_commit_tablet_txn_info_by_tablet(
1427
641
                    *tablet(), &commit_tablet_txn_info_vec);
1428
1429
            // Step2: calculate all rowsets' delete bitmaps which are published during compaction.
1430
641
            for (auto& it : commit_tablet_txn_info_vec) {
1431
6
                if (!_check_if_includes_input_rowsets(it.rowset_ids)) {
1432
                    // When calculating the delete bitmap of all committed rowsets relative to the compaction,
1433
                    // there may be cases where the compacted rowsets are newer than the committed rowsets.
1434
                    // At this time, row number conversion cannot be performed, otherwise data will be missing.
1435
                    // Therefore, we need to check if every committed rowset has calculated delete bitmap for
1436
                    // all compaction input rowsets.
1437
0
                    continue;
1438
0
                }
1439
6
                DeleteBitmap txn_output_delete_bitmap(_tablet->tablet_id());
1440
6
                tablet()->calc_compaction_output_rowset_delete_bitmap(
1441
6
                        _input_rowsets, *_rowid_conversion, 0, UINT64_MAX, missed_rows.get(),
1442
6
                        location_map.get(), *it.delete_bitmap.get(), &txn_output_delete_bitmap);
1443
6
                if (config::enable_merge_on_write_correctness_check) {
1444
6
                    RowsetIdUnorderedSet rowsetids;
1445
6
                    rowsetids.insert(_output_rowset->rowset_id());
1446
6
                    _tablet->add_sentinel_mark_to_delete_bitmap(&txn_output_delete_bitmap,
1447
6
                                                                rowsetids);
1448
6
                }
1449
6
                it.delete_bitmap->merge(txn_output_delete_bitmap);
1450
                // Step3: write back updated delete bitmap and tablet info.
1451
6
                it.rowset_ids.insert(_output_rowset->rowset_id());
1452
6
                _engine.txn_manager()->set_txn_related_delete_bitmap(
1453
6
                        it.partition_id, it.transaction_id, _tablet->tablet_id(),
1454
6
                        tablet()->tablet_uid(), true, it.delete_bitmap, it.rowset_ids,
1455
6
                        it.partial_update_info);
1456
6
            }
1457
1458
            // Convert the delete bitmap of the input rowsets to output rowset for
1459
            // incremental data.
1460
641
            tablet()->calc_compaction_output_rowset_delete_bitmap(
1461
641
                    _input_rowsets, *_rowid_conversion, version.second, UINT64_MAX,
1462
641
                    missed_rows.get(), location_map.get(), _tablet->tablet_meta()->delete_bitmap(),
1463
641
                    &output_rowset_delete_bitmap);
1464
1465
641
            if (location_map) {
1466
0
                RETURN_IF_ERROR(tablet()->check_rowid_conversion(_output_rowset, *location_map));
1467
0
            }
1468
1469
641
            tablet()->merge_delete_bitmap(output_rowset_delete_bitmap);
1470
641
            RETURN_IF_ERROR(tablet()->modify_rowsets(output_rowsets, _input_rowsets, true));
1471
641
        }
1472
795
    } else {
1473
795
        std::lock_guard<std::shared_mutex> wrlock(_tablet->get_header_lock());
1474
795
        SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD);
1475
795
        RETURN_IF_ERROR(tablet()->modify_rowsets(output_rowsets, _input_rowsets, true));
1476
795
    }
1477
1478
1.43k
    if (config::tablet_rowset_stale_sweep_by_size &&
1479
1.43k
        _tablet->tablet_meta()->all_stale_rs_metas().size() >=
1480
0
                config::tablet_rowset_stale_sweep_threshold_size) {
1481
0
        tablet()->delete_expired_stale_rowset();
1482
0
    }
1483
1484
1.43k
    int64_t cur_max_version = 0;
1485
1.43k
    {
1486
1.43k
        std::shared_lock rlock(_tablet->get_header_lock());
1487
1.43k
        cur_max_version = _tablet->max_version_unlocked();
1488
1.43k
        tablet()->save_meta();
1489
1.43k
    }
1490
1.43k
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1491
1.43k
        _tablet->enable_unique_key_merge_on_write()) {
1492
643
        auto st = TabletMetaManager::remove_old_version_delete_bitmap(
1493
643
                tablet()->data_dir(), _tablet->tablet_id(), cur_max_version);
1494
643
        if (!st.ok()) {
1495
0
            LOG(WARNING) << "failed to remove old version delete bitmap, st: " << st;
1496
0
        }
1497
643
    }
1498
1.43k
    DBUG_EXECUTE_IF("CumulativeCompaction.modify_rowsets.delete_expired_stale_rowset",
1499
1.43k
                    { tablet()->delete_expired_stale_rowset(); });
1500
1.43k
    _tablet->prefill_dbm_agg_cache_after_compaction(_output_rowset);
1501
1.43k
    return Status::OK();
1502
1.43k
}
1503
1504
bool CompactionMixin::_check_if_includes_input_rowsets(
1505
6
        const RowsetIdUnorderedSet& commit_rowset_ids_set) const {
1506
6
    std::vector<RowsetId> commit_rowset_ids {};
1507
6
    commit_rowset_ids.insert(commit_rowset_ids.end(), commit_rowset_ids_set.begin(),
1508
6
                             commit_rowset_ids_set.end());
1509
6
    std::sort(commit_rowset_ids.begin(), commit_rowset_ids.end());
1510
6
    std::vector<RowsetId> input_rowset_ids {};
1511
72
    for (const auto& rowset : _input_rowsets) {
1512
72
        input_rowset_ids.emplace_back(rowset->rowset_meta()->rowset_id());
1513
72
    }
1514
6
    std::sort(input_rowset_ids.begin(), input_rowset_ids.end());
1515
6
    return std::includes(commit_rowset_ids.begin(), commit_rowset_ids.end(),
1516
6
                         input_rowset_ids.begin(), input_rowset_ids.end());
1517
6
}
1518
1519
934
void CompactionMixin::update_compaction_level() {
1520
934
    auto* cumu_policy = tablet()->cumulative_compaction_policy();
1521
934
    if (cumu_policy && cumu_policy->name() == CUMULATIVE_TIME_SERIES_POLICY) {
1522
1
        int64_t compaction_level =
1523
1
                cumu_policy->get_compaction_level(tablet(), _input_rowsets, _output_rowset);
1524
1
        _output_rowset->rowset_meta()->set_compaction_level(compaction_level);
1525
1
    }
1526
934
}
1527
1528
7.90k
Status Compaction::check_correctness() {
1529
    // 1. check row number
1530
7.90k
    if (_input_row_num != _output_rowset->num_rows() + _stats.merged_rows + _stats.filtered_rows) {
1531
0
        return Status::Error<CHECK_LINES_ERROR>(
1532
0
                "row_num does not match between cumulative input and output! tablet={}, "
1533
0
                "input_row_num={}, merged_row_num={}, filtered_row_num={}, output_row_num={}",
1534
0
                _tablet->tablet_id(), _input_row_num, _stats.merged_rows, _stats.filtered_rows,
1535
0
                _output_rowset->num_rows());
1536
0
    }
1537
    // 2. check variant column path stats
1538
7.90k
    RETURN_IF_ERROR(variant_util::VariantCompactionUtil::check_path_stats(_input_rowsets,
1539
7.90k
                                                                          _output_rowset, _tablet));
1540
7.90k
    return Status::OK();
1541
7.90k
}
1542
1543
2.89k
int64_t CompactionMixin::get_compaction_permits() {
1544
2.89k
    int64_t permits = 0;
1545
20.8k
    for (auto&& rowset : _input_rowsets) {
1546
20.8k
        permits += rowset->rowset_meta()->get_compaction_score();
1547
20.8k
    }
1548
2.89k
    return permits;
1549
2.89k
}
1550
1551
0
int64_t CompactionMixin::calc_input_rowsets_total_size() const {
1552
0
    int64_t input_rowsets_total_size = 0;
1553
0
    for (const auto& rowset : _input_rowsets) {
1554
0
        const auto& rowset_meta = rowset->rowset_meta();
1555
0
        auto total_size = rowset_meta->total_disk_size();
1556
0
        input_rowsets_total_size += total_size;
1557
0
    }
1558
0
    return input_rowsets_total_size;
1559
0
}
1560
1561
0
int64_t CompactionMixin::calc_input_rowsets_row_num() const {
1562
0
    int64_t input_rowsets_row_num = 0;
1563
0
    for (const auto& rowset : _input_rowsets) {
1564
0
        const auto& rowset_meta = rowset->rowset_meta();
1565
0
        auto total_size = rowset_meta->total_disk_size();
1566
0
        input_rowsets_row_num += total_size;
1567
0
    }
1568
0
    return input_rowsets_row_num;
1569
0
}
1570
1571
8.34k
void Compaction::_load_segment_to_cache() {
1572
    // Load new rowset's segments to cache.
1573
8.34k
    SegmentCacheHandle handle;
1574
8.34k
    auto st = SegmentLoader::instance()->load_segments(
1575
8.34k
            std::static_pointer_cast<BetaRowset>(_output_rowset), &handle, true);
1576
8.34k
    if (!st.ok()) {
1577
0
        LOG(WARNING) << "failed to load segment to cache! output rowset version="
1578
0
                     << _output_rowset->start_version() << "-" << _output_rowset->end_version()
1579
0
                     << ".";
1580
0
    }
1581
8.34k
}
1582
1583
6.92k
Status CloudCompactionMixin::build_basic_info() {
1584
6.92k
    _output_version =
1585
6.92k
            Version(_input_rowsets.front()->start_version(), _input_rowsets.back()->end_version());
1586
1587
6.92k
    _newest_write_timestamp = _input_rowsets.back()->newest_write_timestamp();
1588
1589
6.92k
    std::vector<RowsetMetaSharedPtr> rowset_metas(_input_rowsets.size());
1590
6.92k
    std::transform(_input_rowsets.begin(), _input_rowsets.end(), rowset_metas.begin(),
1591
54.4k
                   [](const RowsetSharedPtr& rowset) { return rowset->rowset_meta(); });
1592
6.92k
    if (is_index_change_compaction()) {
1593
839
        RETURN_IF_ERROR(rebuild_tablet_schema());
1594
6.09k
    } else {
1595
6.09k
        _cur_tablet_schema = _tablet->tablet_schema_with_merged_max_schema_version(rowset_metas);
1596
6.09k
    }
1597
1598
    // if enable_vertical_compact_variant_subcolumns is true, we need to compact the variant subcolumns in seperate column groups
1599
    // so get_extended_compaction_schema will extended the schema for variant columns
1600
6.94k
    if (_enable_vertical_compact_variant_subcolumns) {
1601
6.94k
        RETURN_IF_ERROR(variant_util::VariantCompactionUtil::get_extended_compaction_schema(
1602
6.94k
                _input_rowsets, _cur_tablet_schema));
1603
6.94k
    }
1604
6.92k
    return Status::OK();
1605
6.92k
}
1606
1607
6.93k
int64_t CloudCompactionMixin::get_compaction_permits() {
1608
6.93k
    int64_t permits = 0;
1609
54.4k
    for (auto&& rowset : _input_rowsets) {
1610
54.4k
        permits += rowset->rowset_meta()->get_compaction_score();
1611
54.4k
    }
1612
6.93k
    return permits;
1613
6.93k
}
1614
1615
CloudCompactionMixin::CloudCompactionMixin(CloudStorageEngine& engine, CloudTabletSPtr tablet,
1616
                                           const std::string& label)
1617
109k
        : Compaction(tablet, label), _engine(engine) {
1618
109k
    auto uuid = UUIDGenerator::instance()->next_uuid();
1619
109k
    std::stringstream ss;
1620
109k
    ss << uuid;
1621
109k
    _uuid = ss.str();
1622
109k
}
1623
1624
6.95k
Status CloudCompactionMixin::execute_compact_impl(int64_t permits) {
1625
6.95k
    OlapStopWatch watch;
1626
1627
6.95k
    RETURN_IF_ERROR(build_basic_info());
1628
1629
6.95k
    LOG(INFO) << "start " << compaction_name() << ". tablet=" << _tablet->tablet_id()
1630
6.95k
              << ", output_version=" << _output_version << ", permits: " << permits;
1631
1632
6.95k
    RETURN_IF_ERROR(merge_input_rowsets());
1633
1634
6.95k
    DBUG_EXECUTE_IF("CloudFullCompaction::modify_rowsets.wrong_rowset_id", {
1635
6.95k
        DCHECK(compaction_type() == ReaderType::READER_FULL_COMPACTION);
1636
6.95k
        RowsetId id;
1637
6.95k
        id.version = 2;
1638
6.95k
        id.hi = _output_rowset->rowset_meta()->rowset_id().hi + ((int64_t)(1) << 56);
1639
6.95k
        id.mi = _output_rowset->rowset_meta()->rowset_id().mi;
1640
6.95k
        id.lo = _output_rowset->rowset_meta()->rowset_id().lo;
1641
6.95k
        _output_rowset->rowset_meta()->set_rowset_id(id);
1642
6.95k
        LOG(INFO) << "[Debug wrong rowset id]:"
1643
6.95k
                  << _output_rowset->rowset_meta()->rowset_id().to_string();
1644
6.95k
    })
1645
1646
    // Currently, updates are only made in the time_series.
1647
6.95k
    update_compaction_level();
1648
1649
6.95k
    RETURN_IF_ERROR(_engine.meta_mgr().commit_rowset(*_output_rowset->rowset_meta().get(), _uuid));
1650
1651
    // 4. modify rowsets in memory
1652
6.95k
    RETURN_IF_ERROR(modify_rowsets());
1653
1654
    // update compaction status data
1655
6.91k
    auto tablet = std::static_pointer_cast<CloudTablet>(_tablet);
1656
6.91k
    tablet->local_read_time_us.fetch_add(_stats.cloud_local_read_time);
1657
6.91k
    tablet->remote_read_time_us.fetch_add(_stats.cloud_remote_read_time);
1658
6.91k
    tablet->exec_compaction_time_us.fetch_add(watch.get_elapse_time_us());
1659
1660
6.91k
    return Status::OK();
1661
6.95k
}
1662
1663
6.82k
int64_t CloudCompactionMixin::initiator() const {
1664
6.82k
    return HashUtil::hash64(_uuid.data(), _uuid.size(), 0) & std::numeric_limits<int64_t>::max();
1665
6.82k
}
1666
1667
namespace cloud {
1668
size_t truncate_rowsets_by_txn_size(std::vector<RowsetSharedPtr>& rowsets, int64_t& kept_size_bytes,
1669
7.22k
                                    int64_t& truncated_size_bytes) {
1670
7.22k
    if (rowsets.empty()) {
1671
1
        kept_size_bytes = 0;
1672
1
        truncated_size_bytes = 0;
1673
1
        return 0;
1674
1
    }
1675
1676
7.22k
    int64_t max_size = config::compaction_txn_max_size_bytes;
1677
7.22k
    int64_t cumulative_meta_size = 0;
1678
7.22k
    size_t keep_count = 0;
1679
1680
64.2k
    for (size_t i = 0; i < rowsets.size(); ++i) {
1681
57.0k
        const auto& rs = rowsets[i];
1682
1683
        // Estimate rowset meta size using doris_rowset_meta_to_cloud
1684
57.0k
        auto cloud_meta = cloud::doris_rowset_meta_to_cloud(rs->rowset_meta()->get_rowset_pb(true));
1685
57.0k
        int64_t rowset_meta_size = cloud_meta.ByteSizeLong();
1686
1687
57.0k
        cumulative_meta_size += rowset_meta_size;
1688
1689
57.0k
        if (keep_count > 0 && cumulative_meta_size > max_size) {
1690
            // Rollback and stop
1691
4
            cumulative_meta_size -= rowset_meta_size;
1692
4
            break;
1693
4
        }
1694
1695
57.0k
        keep_count++;
1696
57.0k
    }
1697
1698
    // Ensure at least 1 rowset is kept
1699
7.22k
    if (keep_count == 0) {
1700
0
        keep_count = 1;
1701
        // Recalculate size for the first rowset
1702
0
        const auto& rs = rowsets[0];
1703
0
        auto cloud_meta = cloud::doris_rowset_meta_to_cloud(rs->rowset_meta()->get_rowset_pb());
1704
0
        cumulative_meta_size = cloud_meta.ByteSizeLong();
1705
0
    }
1706
1707
    // Calculate truncated size
1708
7.22k
    int64_t truncated_total_size = 0;
1709
7.22k
    size_t truncated_count = rowsets.size() - keep_count;
1710
7.22k
    if (truncated_count > 0) {
1711
35
        for (size_t i = keep_count; i < rowsets.size(); ++i) {
1712
31
            auto cloud_meta =
1713
31
                    cloud::doris_rowset_meta_to_cloud(rowsets[i]->rowset_meta()->get_rowset_pb());
1714
31
            truncated_total_size += cloud_meta.ByteSizeLong();
1715
31
        }
1716
4
        rowsets.resize(keep_count);
1717
4
    }
1718
1719
7.22k
    kept_size_bytes = cumulative_meta_size;
1720
7.22k
    truncated_size_bytes = truncated_total_size;
1721
7.22k
    return truncated_count;
1722
7.22k
}
1723
} // namespace cloud
1724
1725
6.36k
size_t CloudCompactionMixin::apply_txn_size_truncation_and_log(const std::string& compaction_name) {
1726
6.36k
    if (_input_rowsets.empty()) {
1727
1
        return 0;
1728
1
    }
1729
1730
6.35k
    int64_t original_count = _input_rowsets.size();
1731
6.35k
    int64_t original_start_version = _input_rowsets.front()->start_version();
1732
6.35k
    int64_t original_end_version = _input_rowsets.back()->end_version();
1733
1734
6.35k
    int64_t final_size = 0;
1735
6.35k
    int64_t truncated_size = 0;
1736
6.35k
    size_t truncated_count =
1737
6.35k
            cloud::truncate_rowsets_by_txn_size(_input_rowsets, final_size, truncated_size);
1738
1739
6.35k
    if (truncated_count > 0) {
1740
2
        int64_t original_size = final_size + truncated_size;
1741
2
        LOG(INFO) << compaction_name << " txn size estimation truncate"
1742
2
                  << ", tablet_id=" << _tablet->tablet_id() << ", original_version_range=["
1743
2
                  << original_start_version << "-" << original_end_version
1744
2
                  << "], final_version_range=[" << _input_rowsets.front()->start_version() << "-"
1745
2
                  << _input_rowsets.back()->end_version()
1746
2
                  << "], original_rowset_count=" << original_count
1747
2
                  << ", final_rowset_count=" << _input_rowsets.size()
1748
2
                  << ", truncated_rowset_count=" << truncated_count
1749
2
                  << ", original_size_bytes=" << original_size
1750
2
                  << ", final_size_bytes=" << final_size
1751
2
                  << ", truncated_size_bytes=" << truncated_size
1752
2
                  << ", threshold_bytes=" << config::compaction_txn_max_size_bytes;
1753
2
    }
1754
1755
6.35k
    return truncated_count;
1756
6.36k
}
1757
1758
6.86k
Status CloudCompactionMixin::execute_compact() {
1759
6.86k
    int64_t profile_start_time_ms = UnixMillis();
1760
6.86k
    TEST_INJECTION_POINT("Compaction::do_compaction");
1761
6.86k
    int64_t permits = get_compaction_permits();
1762
6.86k
    HANDLE_EXCEPTION_IF_CATCH_EXCEPTION(
1763
6.86k
            execute_compact_impl(permits), [&](const doris::Exception& ex) {
1764
6.86k
                auto st = garbage_collection();
1765
6.86k
                if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1766
6.86k
                    _tablet->enable_unique_key_merge_on_write() && !st.ok()) {
1767
                    // if compaction fail, be will try to abort compaction, and delete bitmap lock
1768
                    // will release if abort job successfully, but if abort failed, delete bitmap
1769
                    // lock will not release, in this situation, be need to send this rpc to ms
1770
                    // to try to release delete bitmap lock.
1771
6.86k
                    _engine.meta_mgr().remove_delete_bitmap_update_lock(
1772
6.86k
                            _tablet->table_id(), COMPACTION_DELETE_BITMAP_LOCK_ID, initiator(),
1773
6.86k
                            _tablet->tablet_id());
1774
6.86k
                }
1775
6.86k
                submit_profile_record(false, profile_start_time_ms, ex.what());
1776
6.92k
            });
1777
1778
6.92k
    DorisMetrics::instance()->remote_compaction_read_rows_total->increment(_input_row_num);
1779
6.92k
    DorisMetrics::instance()->remote_compaction_write_rows_total->increment(
1780
6.92k
            _output_rowset->num_rows());
1781
6.92k
    DorisMetrics::instance()->remote_compaction_write_bytes_total->increment(
1782
6.92k
            _output_rowset->total_disk_size());
1783
1784
6.92k
    _load_segment_to_cache();
1785
6.92k
    submit_profile_record(true, profile_start_time_ms);
1786
6.92k
    return Status::OK();
1787
6.86k
}
1788
1789
0
Status CloudCompactionMixin::modify_rowsets() {
1790
0
    return Status::OK();
1791
0
}
1792
1793
6.96k
Status CloudCompactionMixin::set_storage_resource_from_input_rowsets(RowsetWriterContext& ctx) {
1794
    // Set storage resource from input rowsets by iterating backwards to find the first rowset
1795
    // with non-empty resource_id. This handles two scenarios:
1796
    // 1. Hole rowsets compaction: Multiple hole rowsets may lack storage resource.
1797
    //    Example: [0-1, 2-2, 3-3, 4-4, 5-5] where 2-5 are hole rowsets.
1798
    //    If 0-1 lacks resource_id, then 2-5 also lack resource_id.
1799
    // 2. Schema change: New tablet may have later version empty rowsets without resource_id,
1800
    //    but middle rowsets get resource_id after historical rowsets are converted.
1801
    //    We iterate backwards to find the most recent rowset with valid resource_id.
1802
1803
6.96k
    for (const auto& rowset : std::ranges::reverse_view(_input_rowsets)) {
1804
6.96k
        const auto& resource_id = rowset->rowset_meta()->resource_id();
1805
1806
6.97k
        if (!resource_id.empty()) {
1807
6.97k
            ctx.storage_resource = *DORIS_TRY(rowset->rowset_meta()->remote_storage_resource());
1808
6.97k
            return Status::OK();
1809
6.97k
        }
1810
1811
        // Validate that non-empty rowsets (num_segments > 0) must have valid resource_id
1812
        // Only hole rowsets or empty rowsets are allowed to have empty resource_id
1813
18.4E
        if (rowset->num_segments() > 0) {
1814
0
            auto error_msg = fmt::format(
1815
0
                    "Non-empty rowset must have valid resource_id. "
1816
0
                    "rowset_id={}, version=[{}-{}], is_hole_rowset={}, num_segments={}, "
1817
0
                    "tablet_id={}, table_id={}",
1818
0
                    rowset->rowset_id().to_string(), rowset->start_version(), rowset->end_version(),
1819
0
                    rowset->is_hole_rowset(), rowset->num_segments(), _tablet->tablet_id(),
1820
0
                    _tablet->table_id());
1821
1822
0
#ifndef BE_TEST
1823
0
            DCHECK(false) << error_msg;
1824
0
#endif
1825
1826
0
            return Status::InternalError<false>(error_msg);
1827
0
        }
1828
18.4E
    }
1829
1830
18.4E
    return Status::OK();
1831
6.96k
}
1832
1833
6.96k
Status CloudCompactionMixin::construct_output_rowset_writer(RowsetWriterContext& ctx) {
1834
    // only do index compaction for dup_keys and unique_keys with mow enabled
1835
6.96k
    if (_enable_inverted_index_compaction && (((_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1836
6.11k
                                                _tablet->enable_unique_key_merge_on_write()) ||
1837
6.11k
                                               _tablet->keys_type() == KeysType::DUP_KEYS))) {
1838
5.18k
        construct_index_compaction_columns(ctx);
1839
5.18k
    }
1840
1841
    // Use the storage resource of the previous rowset.
1842
6.96k
    RETURN_IF_ERROR(set_storage_resource_from_input_rowsets(ctx));
1843
1844
6.96k
    ctx.txn_id = boost::uuids::hash_value(UUIDGenerator::instance()->next_uuid()) &
1845
6.96k
                 std::numeric_limits<int64_t>::max(); // MUST be positive
1846
6.96k
    ctx.txn_expiration = _expiration;
1847
1848
6.96k
    ctx.version = _output_version;
1849
6.96k
    ctx.rowset_state = VISIBLE;
1850
6.96k
    ctx.segments_overlap = NONOVERLAPPING;
1851
6.96k
    ctx.tablet_schema = _cur_tablet_schema;
1852
6.96k
    ctx.newest_write_timestamp = _newest_write_timestamp;
1853
6.96k
    ctx.write_type = DataWriteType::TYPE_COMPACTION;
1854
6.96k
    ctx.compaction_type = compaction_type();
1855
6.96k
    ctx.allow_packed_file = false;
1856
1857
    // We presume that the data involved in cumulative compaction is sufficiently 'hot'
1858
    // and should always be retained in the cache.
1859
    // TODO(gavin): Ensure that the retention of hot data is implemented with precision.
1860
1861
6.96k
    ctx.write_file_cache = should_cache_compaction_output();
1862
6.96k
    ctx.file_cache_ttl_sec = _tablet->ttl_seconds();
1863
6.96k
    ctx.approximate_bytes_to_write = _input_rowsets_total_size;
1864
1865
    // Set fine-grained control: only write index files to cache if configured
1866
6.96k
    ctx.compaction_output_write_index_only = should_enable_compaction_cache_index_only(
1867
6.96k
            ctx.write_file_cache, compaction_type(),
1868
6.96k
            config::enable_file_cache_write_base_compaction_index_only,
1869
6.96k
            config::enable_file_cache_write_cumu_compaction_index_only);
1870
1871
6.96k
    ctx.tablet = _tablet;
1872
6.96k
    ctx.job_id = _uuid;
1873
1874
6.96k
    _output_rs_writer = DORIS_TRY(_tablet->create_rowset_writer(ctx, _is_vertical));
1875
6.96k
    RETURN_IF_ERROR(
1876
6.96k
            _engine.meta_mgr().prepare_rowset(*_output_rs_writer->rowset_meta().get(), _uuid));
1877
6.96k
    return Status::OK();
1878
6.96k
}
1879
1880
42
Status CloudCompactionMixin::garbage_collection() {
1881
42
    if (!config::enable_file_cache) {
1882
0
        return Status::OK();
1883
0
    }
1884
42
    if (_output_rs_writer) {
1885
42
        auto* beta_rowset_writer = dynamic_cast<BaseBetaRowsetWriter*>(_output_rs_writer.get());
1886
42
        DCHECK(beta_rowset_writer);
1887
42
        for (const auto& [_, file_writer] : beta_rowset_writer->get_file_writers()) {
1888
42
            auto file_key = io::BlockFileCache::hash(file_writer->path().filename().native());
1889
42
            auto* file_cache = io::FileCacheFactory::instance()->get_by_path(file_key);
1890
42
            file_cache->remove_if_cached_async(file_key);
1891
42
        }
1892
42
        for (const auto& [_, index_writer] : beta_rowset_writer->index_file_writers()) {
1893
2
            for (const auto& file_name : index_writer->get_index_file_names()) {
1894
2
                auto file_key = io::BlockFileCache::hash(file_name);
1895
2
                auto* file_cache = io::FileCacheFactory::instance()->get_by_path(file_key);
1896
2
                file_cache->remove_if_cached_async(file_key);
1897
2
            }
1898
2
        }
1899
42
    }
1900
42
    return Status::OK();
1901
42
}
1902
1903
6.95k
void CloudCompactionMixin::update_compaction_level() {
1904
    // for index change compaction, compaction level should not changed.
1905
    // because input rowset num is 1.
1906
6.95k
    if (is_index_change_compaction()) {
1907
850
        DCHECK(_input_rowsets.size() == 1);
1908
850
        _output_rowset->rowset_meta()->set_compaction_level(
1909
850
                _input_rowsets.back()->rowset_meta()->compaction_level());
1910
6.10k
    } else {
1911
6.10k
        auto compaction_policy = _tablet->tablet_meta()->compaction_policy();
1912
6.10k
        auto cumu_policy = _engine.cumu_compaction_policy(compaction_policy);
1913
6.11k
        if (cumu_policy && cumu_policy->name() == CUMULATIVE_TIME_SERIES_POLICY) {
1914
4
            int64_t compaction_level = cumu_policy->get_compaction_level(
1915
4
                    cloud_tablet(), _input_rowsets, _output_rowset);
1916
4
            _output_rowset->rowset_meta()->set_compaction_level(compaction_level);
1917
4
        }
1918
6.10k
    }
1919
6.95k
}
1920
1921
// should skip hole rowsets, ortherwise the count will be wrong in ms
1922
6.94k
int64_t CloudCompactionMixin::num_input_rowsets() const {
1923
6.94k
    int64_t count = 0;
1924
54.8k
    for (const auto& r : _input_rowsets) {
1925
54.8k
        if (!r->is_hole_rowset()) {
1926
54.8k
            count++;
1927
54.8k
        }
1928
54.8k
    }
1929
6.94k
    return count;
1930
6.94k
}
1931
1932
6.98k
bool CloudCompactionMixin::should_cache_compaction_output() {
1933
6.98k
    if (compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
1934
6.82k
        return true;
1935
6.82k
    }
1936
1937
160
    if (compaction_type() == ReaderType::READER_BASE_COMPACTION) {
1938
67
        double input_rowsets_hit_cache_ratio = 0.0;
1939
1940
67
        int64_t _input_rowsets_cached_size =
1941
67
                _input_rowsets_cached_data_size + _input_rowsets_cached_index_size;
1942
67
        if (_input_rowsets_total_size > 0) {
1943
60
            input_rowsets_hit_cache_ratio =
1944
60
                    double(_input_rowsets_cached_size) / double(_input_rowsets_total_size);
1945
60
        }
1946
1947
67
        LOG(INFO) << "CloudBaseCompaction should_cache_compaction_output"
1948
67
                  << ", tablet_id=" << _tablet->tablet_id()
1949
67
                  << ", input_rowsets_hit_cache_ratio=" << input_rowsets_hit_cache_ratio
1950
67
                  << ", _input_rowsets_cached_size=" << _input_rowsets_cached_size
1951
67
                  << ", _input_rowsets_total_size=" << _input_rowsets_total_size
1952
67
                  << ", enable_file_cache_keep_base_compaction_output="
1953
67
                  << config::enable_file_cache_keep_base_compaction_output
1954
67
                  << ", file_cache_keep_base_compaction_output_min_hit_ratio="
1955
67
                  << config::file_cache_keep_base_compaction_output_min_hit_ratio;
1956
1957
67
        if (config::enable_file_cache_keep_base_compaction_output) {
1958
0
            return true;
1959
0
        }
1960
1961
67
        if (input_rowsets_hit_cache_ratio >
1962
67
            config::file_cache_keep_base_compaction_output_min_hit_ratio) {
1963
41
            return true;
1964
41
        }
1965
67
    }
1966
119
    return false;
1967
160
}
1968
1969
} // namespace doris