Coverage Report

Created: 2026-01-08 17:19

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/doris/be/src/olap/compaction.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "olap/compaction.h"
19
20
#include <fmt/format.h>
21
#include <gen_cpp/olap_file.pb.h>
22
#include <glog/logging.h>
23
24
#include <algorithm>
25
#include <atomic>
26
#include <cstdint>
27
#include <cstdlib>
28
#include <list>
29
#include <map>
30
#include <memory>
31
#include <mutex>
32
#include <nlohmann/json.hpp>
33
#include <numeric>
34
#include <ostream>
35
#include <set>
36
#include <shared_mutex>
37
#include <utility>
38
39
#include "cloud/cloud_meta_mgr.h"
40
#include "cloud/cloud_storage_engine.h"
41
#include "cloud/cloud_tablet.h"
42
#include "cloud/pb_convert.h"
43
#include "common/config.h"
44
#include "common/status.h"
45
#include "cpp/sync_point.h"
46
#include "io/cache/block_file_cache_factory.h"
47
#include "io/fs/file_system.h"
48
#include "io/fs/file_writer.h"
49
#include "io/fs/remote_file_system.h"
50
#include "io/io_common.h"
51
#include "olap/collection_statistics.h"
52
#include "olap/cumulative_compaction.h"
53
#include "olap/cumulative_compaction_policy.h"
54
#include "olap/cumulative_compaction_time_series_policy.h"
55
#include "olap/data_dir.h"
56
#include "olap/olap_common.h"
57
#include "olap/olap_define.h"
58
#include "olap/rowset/beta_rowset.h"
59
#include "olap/rowset/beta_rowset_reader.h"
60
#include "olap/rowset/beta_rowset_writer.h"
61
#include "olap/rowset/rowset.h"
62
#include "olap/rowset/rowset_fwd.h"
63
#include "olap/rowset/rowset_meta.h"
64
#include "olap/rowset/rowset_writer.h"
65
#include "olap/rowset/rowset_writer_context.h"
66
#include "olap/rowset/segment_v2/index_file_reader.h"
67
#include "olap/rowset/segment_v2/index_file_writer.h"
68
#include "olap/rowset/segment_v2/inverted_index_compaction.h"
69
#include "olap/rowset/segment_v2/inverted_index_desc.h"
70
#include "olap/rowset/segment_v2/inverted_index_fs_directory.h"
71
#include "olap/storage_engine.h"
72
#include "olap/storage_policy.h"
73
#include "olap/tablet.h"
74
#include "olap/tablet_meta.h"
75
#include "olap/tablet_meta_manager.h"
76
#include "olap/task/engine_checksum_task.h"
77
#include "olap/txn_manager.h"
78
#include "olap/utils.h"
79
#include "runtime/memory/mem_tracker_limiter.h"
80
#include "runtime/thread_context.h"
81
#include "util/doris_metrics.h"
82
#include "util/pretty_printer.h"
83
#include "util/time.h"
84
#include "util/trace.h"
85
#include "vec/common/schema_util.h"
86
87
using std::vector;
88
89
namespace doris {
90
using namespace ErrorCode;
91
namespace {
92
#include "common/compile_check_begin.h"
93
94
bool is_rowset_tidy(std::string& pre_max_key, bool& pre_rs_key_bounds_truncated,
95
39
                    const RowsetSharedPtr& rhs) {
96
39
    size_t min_tidy_size = config::ordered_data_compaction_min_segment_size;
97
39
    if (rhs->num_segments() == 0) {
98
0
        return true;
99
0
    }
100
39
    if (rhs->is_segments_overlapping()) {
101
0
        return false;
102
0
    }
103
    // check segment size
104
39
    auto* beta_rowset = reinterpret_cast<BetaRowset*>(rhs.get());
105
39
    std::vector<size_t> segments_size;
106
39
    RETURN_FALSE_IF_ERROR(beta_rowset->get_segments_size(&segments_size));
107
46
    for (auto segment_size : segments_size) {
108
        // is segment is too small, need to do compaction
109
46
        if (segment_size < min_tidy_size) {
110
0
            return false;
111
0
        }
112
46
    }
113
38
    std::string min_key;
114
38
    auto ret = rhs->first_key(&min_key);
115
38
    if (!ret) {
116
0
        return false;
117
0
    }
118
38
    bool cur_rs_key_bounds_truncated {rhs->is_segments_key_bounds_truncated()};
119
38
    if (!Slice::lhs_is_strictly_less_than_rhs(Slice {pre_max_key}, pre_rs_key_bounds_truncated,
120
38
                                              Slice {min_key}, cur_rs_key_bounds_truncated)) {
121
5
        return false;
122
5
    }
123
38
    CHECK(rhs->last_key(&pre_max_key));
124
33
    pre_rs_key_bounds_truncated = cur_rs_key_bounds_truncated;
125
33
    return true;
126
38
}
127
128
} // namespace
129
130
Compaction::Compaction(BaseTabletSPtr tablet, const std::string& label)
131
        : _mem_tracker(
132
97
                  MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::COMPACTION, label)),
133
97
          _tablet(std::move(tablet)),
134
97
          _is_vertical(config::enable_vertical_compaction),
135
97
          _allow_delete_in_cumu_compaction(config::enable_delete_when_cumu_compaction),
136
          _enable_vertical_compact_variant_subcolumns(
137
97
                  config::enable_vertical_compact_variant_subcolumns),
138
97
          _enable_inverted_index_compaction(config::inverted_index_compaction_enable) {
139
97
    init_profile(label);
140
97
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_mem_tracker);
141
97
    _rowid_conversion = std::make_unique<RowIdConversion>();
142
97
}
143
144
97
Compaction::~Compaction() {
145
97
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_mem_tracker);
146
97
    _output_rs_writer.reset();
147
97
    _tablet.reset();
148
97
    _input_rowsets.clear();
149
97
    _output_rowset.reset();
150
97
    _cur_tablet_schema.reset();
151
97
    _rowid_conversion.reset();
152
97
}
153
154
97
void Compaction::init_profile(const std::string& label) {
155
97
    _profile = std::make_unique<RuntimeProfile>(label);
156
157
97
    _input_rowsets_data_size_counter =
158
97
            ADD_COUNTER(_profile, "input_rowsets_data_size", TUnit::BYTES);
159
97
    _input_rowsets_counter = ADD_COUNTER(_profile, "input_rowsets_count", TUnit::UNIT);
160
97
    _input_row_num_counter = ADD_COUNTER(_profile, "input_row_num", TUnit::UNIT);
161
97
    _input_segments_num_counter = ADD_COUNTER(_profile, "input_segments_num", TUnit::UNIT);
162
97
    _merged_rows_counter = ADD_COUNTER(_profile, "merged_rows", TUnit::UNIT);
163
97
    _filtered_rows_counter = ADD_COUNTER(_profile, "filtered_rows", TUnit::UNIT);
164
97
    _output_rowset_data_size_counter =
165
97
            ADD_COUNTER(_profile, "output_rowset_data_size", TUnit::BYTES);
166
97
    _output_row_num_counter = ADD_COUNTER(_profile, "output_row_num", TUnit::UNIT);
167
97
    _output_segments_num_counter = ADD_COUNTER(_profile, "output_segments_num", TUnit::UNIT);
168
97
    _merge_rowsets_latency_timer = ADD_TIMER(_profile, "merge_rowsets_latency");
169
97
}
170
171
0
int64_t Compaction::merge_way_num() {
172
0
    int64_t way_num = 0;
173
0
    for (auto&& rowset : _input_rowsets) {
174
0
        way_num += rowset->rowset_meta()->get_merge_way_num();
175
0
    }
176
177
0
    return way_num;
178
0
}
179
180
0
Status Compaction::merge_input_rowsets() {
181
0
    std::vector<RowsetReaderSharedPtr> input_rs_readers;
182
0
    input_rs_readers.reserve(_input_rowsets.size());
183
0
    for (auto& rowset : _input_rowsets) {
184
0
        RowsetReaderSharedPtr rs_reader;
185
0
        RETURN_IF_ERROR(rowset->create_reader(&rs_reader));
186
0
        input_rs_readers.push_back(std::move(rs_reader));
187
0
    }
188
189
0
    RowsetWriterContext ctx;
190
0
    RETURN_IF_ERROR(construct_output_rowset_writer(ctx));
191
192
    // write merged rows to output rowset
193
    // The test results show that merger is low-memory-footprint, there is no need to tracker its mem pool
194
    // if ctx.columns_to_do_index_compaction.size() > 0, it means we need to do inverted index compaction.
195
    // the row ID conversion matrix needs to be used for inverted index compaction.
196
0
    if (!ctx.columns_to_do_index_compaction.empty() ||
197
0
        (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
198
0
         _tablet->enable_unique_key_merge_on_write())) {
199
0
        _stats.rowid_conversion = _rowid_conversion.get();
200
0
    }
201
202
0
    int64_t way_num = merge_way_num();
203
204
0
    Status res;
205
0
    {
206
0
        SCOPED_TIMER(_merge_rowsets_latency_timer);
207
        // 1. Merge segment files and write bkd inverted index
208
        // TODO implement vertical compaction for seq map
209
0
        if (_is_vertical && !_tablet->tablet_schema()->has_seq_map()) {
210
0
            if (!_tablet->tablet_schema()->cluster_key_uids().empty()) {
211
0
                RETURN_IF_ERROR(update_delete_bitmap());
212
0
            }
213
0
            res = Merger::vertical_merge_rowsets(_tablet, compaction_type(), *_cur_tablet_schema,
214
0
                                                 input_rs_readers, _output_rs_writer.get(),
215
0
                                                 cast_set<uint32_t>(get_avg_segment_rows()),
216
0
                                                 way_num, &_stats);
217
0
        } else {
218
0
            if (!_tablet->tablet_schema()->cluster_key_uids().empty()) {
219
0
                return Status::InternalError(
220
0
                        "mow table with cluster keys does not support non vertical compaction");
221
0
            }
222
0
            res = Merger::vmerge_rowsets(_tablet, compaction_type(), *_cur_tablet_schema,
223
0
                                         input_rs_readers, _output_rs_writer.get(), &_stats);
224
0
        }
225
226
0
        _tablet->last_compaction_status = res;
227
0
        if (!res.ok()) {
228
0
            return res;
229
0
        }
230
        // 2. Merge the remaining inverted index files of the string type
231
0
        RETURN_IF_ERROR(do_inverted_index_compaction());
232
0
    }
233
234
0
    COUNTER_UPDATE(_merged_rows_counter, _stats.merged_rows);
235
0
    COUNTER_UPDATE(_filtered_rows_counter, _stats.filtered_rows);
236
237
    // 3. In the `build`, `_close_file_writers` is called to close the inverted index file writer and write the final compound index file.
238
0
    RETURN_NOT_OK_STATUS_WITH_WARN(_output_rs_writer->build(_output_rowset),
239
0
                                   fmt::format("rowset writer build failed. output_version: {}",
240
0
                                               _output_version.to_string()));
241
242
    // When true, writers should remove variant extracted subcolumns from the
243
    // schema stored in RowsetMeta. This is used when compaction temporarily
244
    // extends schema to split variant subcolumns for vertical compaction but
245
    // the final rowset meta must not persist those extracted subcolumns.
246
0
    if (_enable_vertical_compact_variant_subcolumns &&
247
0
        (_cur_tablet_schema->num_variant_columns() > 0)) {
248
0
        _output_rowset->rowset_meta()->set_tablet_schema(
249
0
                _cur_tablet_schema->copy_without_variant_extracted_columns());
250
0
    }
251
252
    //RETURN_IF_ERROR(_engine.meta_mgr().commit_rowset(*_output_rowset->rowset_meta().get()));
253
0
    set_delete_predicate_for_output_rowset();
254
255
0
    _local_read_bytes_total = _stats.bytes_read_from_local;
256
0
    _remote_read_bytes_total = _stats.bytes_read_from_remote;
257
0
    DorisMetrics::instance()->local_compaction_read_bytes_total->increment(_local_read_bytes_total);
258
0
    DorisMetrics::instance()->remote_compaction_read_bytes_total->increment(
259
0
            _remote_read_bytes_total);
260
0
    DorisMetrics::instance()->local_compaction_write_bytes_total->increment(
261
0
            _stats.cached_bytes_total);
262
263
0
    COUNTER_UPDATE(_output_rowset_data_size_counter, _output_rowset->data_disk_size());
264
0
    COUNTER_UPDATE(_output_row_num_counter, _output_rowset->num_rows());
265
0
    COUNTER_UPDATE(_output_segments_num_counter, _output_rowset->num_segments());
266
267
0
    return check_correctness();
268
0
}
269
270
1
void Compaction::set_delete_predicate_for_output_rowset() {
271
    // Now we support delete in cumu compaction, to make all data in rowsets whose version
272
    // is below output_version to be delete in the future base compaction, we should carry
273
    // all delete predicate in the output rowset.
274
    // Output start version > 2 means we must set the delete predicate in the output rowset
275
1
    if (_output_rowset->version().first > 2 &&
276
1
        (_allow_delete_in_cumu_compaction || is_index_change_compaction())) {
277
1
        DeletePredicatePB delete_predicate;
278
1
        std::accumulate(_input_rowsets.begin(), _input_rowsets.end(), &delete_predicate,
279
1
                        [](DeletePredicatePB* delete_predicate, const RowsetSharedPtr& rs) {
280
1
                            if (rs->rowset_meta()->has_delete_predicate()) {
281
1
                                delete_predicate->MergeFrom(rs->rowset_meta()->delete_predicate());
282
1
                            }
283
1
                            return delete_predicate;
284
1
                        });
285
        // now version in delete_predicate is deprecated
286
1
        if (!delete_predicate.in_predicates().empty() ||
287
1
            !delete_predicate.sub_predicates_v2().empty() ||
288
1
            !delete_predicate.sub_predicates().empty()) {
289
1
            _output_rowset->rowset_meta()->set_delete_predicate(std::move(delete_predicate));
290
1
        }
291
1
    }
292
1
}
293
294
1
int64_t Compaction::get_avg_segment_rows() {
295
    // take care of empty rowset
296
    // input_rowsets_size is total disk_size of input_rowset, this size is the
297
    // final size after codec and compress, so expect dest segment file size
298
    // in disk is config::vertical_compaction_max_segment_size
299
1
    const auto& meta = _tablet->tablet_meta();
300
1
    if (meta->compaction_policy() == CUMULATIVE_TIME_SERIES_POLICY) {
301
0
        int64_t compaction_goal_size_mbytes = meta->time_series_compaction_goal_size_mbytes();
302
        // The output segment rows should be less than total input rows
303
0
        return std::min((compaction_goal_size_mbytes * 1024 * 1024 * 2) /
304
0
                                (_input_rowsets_data_size / (_input_row_num + 1) + 1),
305
0
                        _input_row_num + 1);
306
0
    }
307
1
    return std::min(config::vertical_compaction_max_segment_size /
308
1
                            (_input_rowsets_data_size / (_input_row_num + 1) + 1),
309
1
                    _input_row_num + 1);
310
1
}
311
312
CompactionMixin::CompactionMixin(StorageEngine& engine, TabletSharedPtr tablet,
313
                                 const std::string& label)
314
73
        : Compaction(tablet, label), _engine(engine) {}
315
316
73
CompactionMixin::~CompactionMixin() {
317
73
    if (_state != CompactionState::SUCCESS && _output_rowset != nullptr) {
318
6
        if (!_output_rowset->is_local()) {
319
0
            tablet()->record_unused_remote_rowset(_output_rowset->rowset_id(),
320
0
                                                  _output_rowset->rowset_meta()->resource_id(),
321
0
                                                  _output_rowset->num_segments());
322
0
            return;
323
0
        }
324
6
        _engine.add_unused_rowset(_output_rowset);
325
6
    }
326
73
}
327
328
187
Tablet* CompactionMixin::tablet() {
329
187
    return static_cast<Tablet*>(_tablet.get());
330
187
}
331
332
6
Status CompactionMixin::do_compact_ordered_rowsets() {
333
6
    RETURN_IF_ERROR(build_basic_info(true));
334
6
    RowsetWriterContext ctx;
335
6
    RETURN_IF_ERROR(construct_output_rowset_writer(ctx));
336
337
6
    LOG(INFO) << "start to do ordered data compaction, tablet=" << _tablet->tablet_id()
338
6
              << ", output_version=" << _output_version;
339
    // link data to new rowset
340
6
    auto seg_id = 0;
341
6
    bool segments_key_bounds_truncated {false};
342
6
    std::vector<KeyBoundsPB> segment_key_bounds;
343
28
    for (auto rowset : _input_rowsets) {
344
28
        RETURN_IF_ERROR(rowset->link_files_to(tablet()->tablet_path(),
345
28
                                              _output_rs_writer->rowset_id(), seg_id));
346
28
        seg_id += rowset->num_segments();
347
28
        segments_key_bounds_truncated |= rowset->is_segments_key_bounds_truncated();
348
28
        std::vector<KeyBoundsPB> key_bounds;
349
28
        RETURN_IF_ERROR(rowset->get_segments_key_bounds(&key_bounds));
350
28
        segment_key_bounds.insert(segment_key_bounds.end(), key_bounds.begin(), key_bounds.end());
351
28
    }
352
    // build output rowset
353
6
    RowsetMetaSharedPtr rowset_meta = std::make_shared<RowsetMeta>();
354
6
    rowset_meta->set_num_rows(_input_row_num);
355
6
    rowset_meta->set_total_disk_size(_input_rowsets_data_size + _input_rowsets_index_size);
356
6
    rowset_meta->set_data_disk_size(_input_rowsets_data_size);
357
6
    rowset_meta->set_index_disk_size(_input_rowsets_index_size);
358
6
    rowset_meta->set_empty(_input_row_num == 0);
359
6
    rowset_meta->set_num_segments(_input_num_segments);
360
6
    rowset_meta->set_segments_overlap(NONOVERLAPPING);
361
6
    rowset_meta->set_rowset_state(VISIBLE);
362
6
    rowset_meta->set_segments_key_bounds_truncated(segments_key_bounds_truncated);
363
6
    rowset_meta->set_segments_key_bounds(segment_key_bounds);
364
365
6
    _output_rowset = _output_rs_writer->manual_build(rowset_meta);
366
367
    // 2. check variant column path stats
368
6
    RETURN_IF_ERROR(vectorized::schema_util::VariantCompactionUtil::check_path_stats(
369
6
            _input_rowsets, _output_rowset, _tablet));
370
6
    return Status::OK();
371
6
}
372
373
43
Status CompactionMixin::build_basic_info(bool is_ordered_compaction) {
374
158
    for (auto& rowset : _input_rowsets) {
375
158
        const auto& rowset_meta = rowset->rowset_meta();
376
158
        auto index_size = rowset_meta->index_disk_size();
377
158
        auto total_size = rowset_meta->total_disk_size();
378
158
        auto data_size = rowset_meta->data_disk_size();
379
        // corrupted index size caused by bug before 2.1.5 or 3.0.0 version
380
        // try to get real index size from disk.
381
158
        if (index_size < 0 || index_size > total_size * 2) {
382
3
            LOG(ERROR) << "invalid index size:" << index_size << " total size:" << total_size
383
3
                       << " data size:" << data_size << " tablet:" << rowset_meta->tablet_id()
384
3
                       << " rowset:" << rowset_meta->rowset_id();
385
3
            index_size = 0;
386
3
            auto st = rowset->get_inverted_index_size(&index_size);
387
3
            if (!st.ok()) {
388
0
                LOG(ERROR) << "failed to get inverted index size. res=" << st;
389
0
            }
390
3
        }
391
158
        _input_rowsets_data_size += data_size;
392
158
        _input_rowsets_index_size += index_size;
393
158
        _input_rowsets_total_size += total_size;
394
158
        _input_row_num += rowset->num_rows();
395
158
        _input_num_segments += rowset->num_segments();
396
158
    }
397
43
    COUNTER_UPDATE(_input_rowsets_data_size_counter, _input_rowsets_data_size);
398
43
    COUNTER_UPDATE(_input_row_num_counter, _input_row_num);
399
43
    COUNTER_UPDATE(_input_segments_num_counter, _input_num_segments);
400
401
43
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::build_basic_info",
402
42
                                      Status::OK());
403
404
42
    _output_version =
405
42
            Version(_input_rowsets.front()->start_version(), _input_rowsets.back()->end_version());
406
407
42
    _newest_write_timestamp = _input_rowsets.back()->newest_write_timestamp();
408
409
42
    std::vector<RowsetMetaSharedPtr> rowset_metas(_input_rowsets.size());
410
42
    std::transform(_input_rowsets.begin(), _input_rowsets.end(), rowset_metas.begin(),
411
130
                   [](const RowsetSharedPtr& rowset) { return rowset->rowset_meta(); });
412
42
    _cur_tablet_schema = _tablet->tablet_schema_with_merged_max_schema_version(rowset_metas);
413
414
    // if enable_vertical_compact_variant_subcolumns is true, we need to compact the variant subcolumns in seperate column groups
415
    // so get_extended_compaction_schema will extended the schema for variant columns
416
    // for ordered compaction, we don't need to extend the schema for variant columns
417
42
    if (_enable_vertical_compact_variant_subcolumns && !is_ordered_compaction) {
418
36
        RETURN_IF_ERROR(
419
36
                vectorized::schema_util::VariantCompactionUtil::get_extended_compaction_schema(
420
36
                        _input_rowsets, _cur_tablet_schema));
421
36
    }
422
41
    return Status::OK();
423
42
}
424
425
12
bool CompactionMixin::handle_ordered_data_compaction() {
426
12
    if (!config::enable_ordered_data_compaction) {
427
0
        return false;
428
0
    }
429
430
    // If some rowsets has idx files and some rowsets has not, we can not do link file compaction.
431
    // Since the output rowset will be broken.
432
433
    // Use schema version instead of schema hash to check if they are the same,
434
    // because light schema change will not change the schema hash on BE, but will increase the schema version
435
    // See fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java::2979
436
12
    std::vector<int32_t> schema_versions_of_rowsets;
437
438
81
    for (auto input_rowset : _input_rowsets) {
439
81
        schema_versions_of_rowsets.push_back(input_rowset->rowset_meta()->schema_version());
440
81
    }
441
442
    // If all rowsets has same schema version, then we can do link file compaction directly.
443
12
    bool all_same_schema_version =
444
12
            std::all_of(schema_versions_of_rowsets.begin(), schema_versions_of_rowsets.end(),
445
81
                        [&](int32_t v) { return v == schema_versions_of_rowsets.front(); });
446
447
12
    if (!all_same_schema_version) {
448
0
        return false;
449
0
    }
450
451
12
    if (compaction_type() == ReaderType::READER_COLD_DATA_COMPACTION ||
452
12
        compaction_type() == ReaderType::READER_FULL_COMPACTION) {
453
        // The remote file system and full compaction does not support to link files.
454
0
        return false;
455
0
    }
456
12
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
457
12
        _tablet->enable_unique_key_merge_on_write()) {
458
0
        return false;
459
0
    }
460
461
12
    if (_tablet->tablet_meta()->tablet_schema()->skip_write_index_on_load()) {
462
        // Expected to create index through normal compaction
463
0
        return false;
464
0
    }
465
466
    // check delete version: if compaction type is base compaction and
467
    // has a delete version, use original compaction
468
12
    if (compaction_type() == ReaderType::READER_BASE_COMPACTION ||
469
12
        (_allow_delete_in_cumu_compaction &&
470
12
         compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION)) {
471
0
        for (auto& rowset : _input_rowsets) {
472
0
            if (rowset->rowset_meta()->has_delete_predicate()) {
473
0
                return false;
474
0
            }
475
0
        }
476
0
    }
477
478
    // check if rowsets are tidy so we can just modify meta and do link
479
    // files to handle compaction
480
12
    auto input_size = _input_rowsets.size();
481
12
    std::string pre_max_key;
482
12
    bool pre_rs_key_bounds_truncated {false};
483
45
    for (auto i = 0; i < input_size; ++i) {
484
39
        if (!is_rowset_tidy(pre_max_key, pre_rs_key_bounds_truncated, _input_rowsets[i])) {
485
6
            if (i <= input_size / 2) {
486
6
                return false;
487
6
            } else {
488
0
                _input_rowsets.resize(i);
489
0
                break;
490
0
            }
491
6
        }
492
39
    }
493
    // most rowset of current compaction is nonoverlapping
494
    // just handle nonoverlappint rowsets
495
6
    auto st = do_compact_ordered_rowsets();
496
6
    if (!st.ok()) {
497
0
        LOG(WARNING) << "failed to compact ordered rowsets: " << st;
498
0
        _pending_rs_guard.drop();
499
0
    }
500
501
6
    return st.ok();
502
12
}
503
504
1
Status CompactionMixin::execute_compact() {
505
1
    uint32_t checksum_before;
506
1
    uint32_t checksum_after;
507
1
    bool enable_compaction_checksum = config::enable_compaction_checksum;
508
1
    if (enable_compaction_checksum) {
509
0
        EngineChecksumTask checksum_task(_engine, _tablet->tablet_id(), _tablet->schema_hash(),
510
0
                                         _input_rowsets.back()->end_version(), &checksum_before);
511
0
        RETURN_IF_ERROR(checksum_task.execute());
512
0
    }
513
514
1
    auto* data_dir = tablet()->data_dir();
515
1
    int64_t permits = get_compaction_permits();
516
1
    data_dir->disks_compaction_score_increment(permits);
517
1
    data_dir->disks_compaction_num_increment(1);
518
519
1
    auto record_compaction_stats = [&](const doris::Exception& ex) {
520
1
        _tablet->compaction_count.fetch_add(1, std::memory_order_relaxed);
521
1
        data_dir->disks_compaction_score_increment(-permits);
522
1
        data_dir->disks_compaction_num_increment(-1);
523
1
    };
524
525
1
    HANDLE_EXCEPTION_IF_CATCH_EXCEPTION(execute_compact_impl(permits), record_compaction_stats);
526
1
    record_compaction_stats(doris::Exception());
527
528
1
    if (enable_compaction_checksum) {
529
0
        EngineChecksumTask checksum_task(_engine, _tablet->tablet_id(), _tablet->schema_hash(),
530
0
                                         _input_rowsets.back()->end_version(), &checksum_after);
531
0
        RETURN_IF_ERROR(checksum_task.execute());
532
0
        if (checksum_before != checksum_after) {
533
0
            return Status::InternalError(
534
0
                    "compaction tablet checksum not consistent, before={}, after={}, tablet_id={}",
535
0
                    checksum_before, checksum_after, _tablet->tablet_id());
536
0
        }
537
0
    }
538
539
1
    DorisMetrics::instance()->local_compaction_read_rows_total->increment(_input_row_num);
540
1
    DorisMetrics::instance()->local_compaction_read_bytes_total->increment(
541
1
            _input_rowsets_total_size);
542
543
1
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::execute_compact", Status::OK());
544
545
0
    DorisMetrics::instance()->local_compaction_write_rows_total->increment(
546
0
            _output_rowset->num_rows());
547
0
    DorisMetrics::instance()->local_compaction_write_bytes_total->increment(
548
0
            _output_rowset->total_disk_size());
549
550
0
    _load_segment_to_cache();
551
0
    return Status::OK();
552
1
}
553
554
1
Status CompactionMixin::execute_compact_impl(int64_t permits) {
555
1
    OlapStopWatch watch;
556
557
1
    if (handle_ordered_data_compaction()) {
558
0
        RETURN_IF_ERROR(modify_rowsets());
559
0
        LOG(INFO) << "succeed to do ordered data " << compaction_name()
560
0
                  << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version
561
0
                  << ", disk=" << tablet()->data_dir()->path()
562
0
                  << ", segments=" << _input_num_segments << ", input_row_num=" << _input_row_num
563
0
                  << ", output_row_num=" << _output_rowset->num_rows()
564
0
                  << ", input_rowsets_data_size=" << _input_rowsets_data_size
565
0
                  << ", input_rowsets_index_size=" << _input_rowsets_index_size
566
0
                  << ", input_rowsets_total_size=" << _input_rowsets_total_size
567
0
                  << ", output_rowset_data_size=" << _output_rowset->data_disk_size()
568
0
                  << ", output_rowset_index_size=" << _output_rowset->index_disk_size()
569
0
                  << ", output_rowset_total_size=" << _output_rowset->total_disk_size()
570
0
                  << ". elapsed time=" << watch.get_elapse_second() << "s.";
571
0
        _state = CompactionState::SUCCESS;
572
0
        return Status::OK();
573
0
    }
574
1
    RETURN_IF_ERROR(build_basic_info());
575
576
1
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::execute_compact_impl",
577
0
                                      Status::OK());
578
579
0
    VLOG_DEBUG << "dump tablet schema: " << _cur_tablet_schema->dump_structure();
580
581
0
    LOG(INFO) << "start " << compaction_name() << ". tablet=" << _tablet->tablet_id()
582
0
              << ", output_version=" << _output_version << ", permits: " << permits;
583
584
0
    RETURN_IF_ERROR(merge_input_rowsets());
585
586
    // Currently, updates are only made in the time_series.
587
0
    update_compaction_level();
588
589
0
    RETURN_IF_ERROR(modify_rowsets());
590
591
0
    auto* cumu_policy = tablet()->cumulative_compaction_policy();
592
0
    DCHECK(cumu_policy);
593
0
    LOG(INFO) << "succeed to do " << compaction_name() << " is_vertical=" << _is_vertical
594
0
              << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version
595
0
              << ", current_max_version=" << tablet()->max_version().second
596
0
              << ", disk=" << tablet()->data_dir()->path()
597
0
              << ", input_segments=" << _input_num_segments << ", input_rowsets_data_size="
598
0
              << PrettyPrinter::print_bytes(_input_rowsets_data_size)
599
0
              << ", input_rowsets_index_size="
600
0
              << PrettyPrinter::print_bytes(_input_rowsets_index_size)
601
0
              << ", input_rowsets_total_size="
602
0
              << PrettyPrinter::print_bytes(_input_rowsets_total_size)
603
0
              << ", output_rowset_data_size="
604
0
              << PrettyPrinter::print_bytes(_output_rowset->data_disk_size())
605
0
              << ", output_rowset_index_size="
606
0
              << PrettyPrinter::print_bytes(_output_rowset->index_disk_size())
607
0
              << ", output_rowset_total_size="
608
0
              << PrettyPrinter::print_bytes(_output_rowset->total_disk_size())
609
0
              << ", input_row_num=" << _input_row_num
610
0
              << ", output_row_num=" << _output_rowset->num_rows()
611
0
              << ", filtered_row_num=" << _stats.filtered_rows
612
0
              << ", merged_row_num=" << _stats.merged_rows
613
0
              << ". elapsed time=" << watch.get_elapse_second()
614
0
              << "s. cumulative_compaction_policy=" << cumu_policy->name()
615
0
              << ", compact_row_per_second="
616
0
              << cast_set<double>(_input_row_num) / watch.get_elapse_second();
617
618
0
    _state = CompactionState::SUCCESS;
619
620
0
    return Status::OK();
621
0
}
622
623
35
Status Compaction::do_inverted_index_compaction() {
624
35
    const auto& ctx = _output_rs_writer->context();
625
35
    if (!_enable_inverted_index_compaction || _input_row_num <= 0 ||
626
35
        ctx.columns_to_do_index_compaction.empty()) {
627
15
        return Status::OK();
628
15
    }
629
630
20
    auto error_handler = [this](int64_t index_id, int64_t column_uniq_id) {
631
2
        LOG(WARNING) << "failed to do index compaction"
632
2
                     << ". tablet=" << _tablet->tablet_id() << ". column uniq id=" << column_uniq_id
633
2
                     << ". index_id=" << index_id;
634
4
        for (auto& rowset : _input_rowsets) {
635
4
            rowset->set_skip_index_compaction(cast_set<int32_t>(column_uniq_id));
636
4
            LOG(INFO) << "mark skipping inverted index compaction next time"
637
4
                      << ". tablet=" << _tablet->tablet_id() << ", rowset=" << rowset->rowset_id()
638
4
                      << ", column uniq id=" << column_uniq_id << ", index_id=" << index_id;
639
4
        }
640
2
    };
641
642
20
    DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_rowid_conversion_null",
643
20
                    { _stats.rowid_conversion = nullptr; })
644
20
    if (!_stats.rowid_conversion) {
645
0
        LOG(WARNING) << "failed to do index compaction, rowid conversion is null"
646
0
                     << ". tablet=" << _tablet->tablet_id()
647
0
                     << ", input row number=" << _input_row_num;
648
0
        mark_skip_index_compaction(ctx, error_handler);
649
650
0
        return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
651
0
                "failed to do index compaction, rowid conversion is null. tablet={}",
652
0
                _tablet->tablet_id());
653
0
    }
654
655
20
    OlapStopWatch inverted_watch;
656
657
    // translation vec
658
    // <<dest_idx_num, dest_docId>>
659
    // the first level vector: index indicates src segment.
660
    // the second level vector: index indicates row id of source segment,
661
    // value indicates row id of destination segment.
662
    // <UINT32_MAX, UINT32_MAX> indicates current row not exist.
663
20
    const auto& trans_vec = _stats.rowid_conversion->get_rowid_conversion_map();
664
665
    // source rowset,segment -> index_id
666
20
    const auto& src_seg_to_id_map = _stats.rowid_conversion->get_src_segment_to_id_map();
667
668
    // dest rowset id
669
20
    RowsetId dest_rowset_id = _stats.rowid_conversion->get_dst_rowset_id();
670
    // dest segment id -> num rows
671
20
    std::vector<uint32_t> dest_segment_num_rows;
672
20
    RETURN_IF_ERROR(_output_rs_writer->get_segment_num_rows(&dest_segment_num_rows));
673
674
20
    auto src_segment_num = src_seg_to_id_map.size();
675
20
    auto dest_segment_num = dest_segment_num_rows.size();
676
677
    // when all the input rowsets are deleted, the output rowset will be empty and dest_segment_num will be 0.
678
20
    if (dest_segment_num <= 0) {
679
2
        LOG(INFO) << "skip doing index compaction due to no output segments"
680
2
                  << ". tablet=" << _tablet->tablet_id() << ", input row number=" << _input_row_num
681
2
                  << ". elapsed time=" << inverted_watch.get_elapse_second() << "s.";
682
2
        return Status::OK();
683
2
    }
684
685
    // Only write info files when debug index compaction is enabled.
686
    // The files are used to debug index compaction and works with index_tool.
687
18
    if (config::debug_inverted_index_compaction) {
688
        // src index files
689
        // format: rowsetId_segmentId
690
0
        std::vector<std::string> src_index_files(src_segment_num);
691
0
        for (const auto& m : src_seg_to_id_map) {
692
0
            std::pair<RowsetId, uint32_t> p = m.first;
693
0
            src_index_files[m.second] = p.first.to_string() + "_" + std::to_string(p.second);
694
0
        }
695
696
        // dest index files
697
        // format: rowsetId_segmentId
698
0
        std::vector<std::string> dest_index_files(dest_segment_num);
699
0
        for (int i = 0; i < dest_segment_num; ++i) {
700
0
            auto prefix = dest_rowset_id.to_string() + "_" + std::to_string(i);
701
0
            dest_index_files[i] = prefix;
702
0
        }
703
704
0
        auto write_json_to_file = [&](const nlohmann::json& json_obj,
705
0
                                      const std::string& file_name) {
706
0
            io::FileWriterPtr file_writer;
707
0
            std::string file_path =
708
0
                    fmt::format("{}/{}.json", std::string(getenv("LOG_DIR")), file_name);
709
0
            RETURN_IF_ERROR(io::global_local_filesystem()->create_file(file_path, &file_writer));
710
0
            RETURN_IF_ERROR(file_writer->append(json_obj.dump()));
711
0
            RETURN_IF_ERROR(file_writer->append("\n"));
712
0
            return file_writer->close();
713
0
        };
714
715
        // Convert trans_vec to JSON and print it
716
0
        nlohmann::json trans_vec_json = trans_vec;
717
0
        auto output_version =
718
0
                _output_version.to_string().substr(1, _output_version.to_string().size() - 2);
719
0
        RETURN_IF_ERROR(write_json_to_file(
720
0
                trans_vec_json,
721
0
                fmt::format("trans_vec_{}_{}", _tablet->tablet_id(), output_version)));
722
723
0
        nlohmann::json src_index_files_json = src_index_files;
724
0
        RETURN_IF_ERROR(write_json_to_file(
725
0
                src_index_files_json,
726
0
                fmt::format("src_idx_dirs_{}_{}", _tablet->tablet_id(), output_version)));
727
728
0
        nlohmann::json dest_index_files_json = dest_index_files;
729
0
        RETURN_IF_ERROR(write_json_to_file(
730
0
                dest_index_files_json,
731
0
                fmt::format("dest_idx_dirs_{}_{}", _tablet->tablet_id(), output_version)));
732
733
0
        nlohmann::json dest_segment_num_rows_json = dest_segment_num_rows;
734
0
        RETURN_IF_ERROR(write_json_to_file(
735
0
                dest_segment_num_rows_json,
736
0
                fmt::format("dest_seg_num_rows_{}_{}", _tablet->tablet_id(), output_version)));
737
0
    }
738
739
    // create index_writer to compaction indexes
740
18
    std::unordered_map<RowsetId, Rowset*> rs_id_to_rowset_map;
741
47
    for (auto&& rs : _input_rowsets) {
742
47
        rs_id_to_rowset_map.emplace(rs->rowset_id(), rs.get());
743
47
    }
744
745
    // src index dirs
746
18
    std::vector<std::unique_ptr<IndexFileReader>> index_file_readers(src_segment_num);
747
124
    for (const auto& m : src_seg_to_id_map) {
748
124
        const auto& [rowset_id, seg_id] = m.first;
749
750
124
        auto find_it = rs_id_to_rowset_map.find(rowset_id);
751
124
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_find_rowset_error",
752
124
                        { find_it = rs_id_to_rowset_map.end(); })
753
124
        if (find_it == rs_id_to_rowset_map.end()) [[unlikely]] {
754
0
            LOG(WARNING) << "failed to do index compaction, cannot find rowset. tablet_id="
755
0
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string();
756
0
            mark_skip_index_compaction(ctx, error_handler);
757
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
758
0
                    "failed to do index compaction, cannot find rowset. tablet_id={} rowset_id={}",
759
0
                    _tablet->tablet_id(), rowset_id.to_string());
760
0
        }
761
762
124
        auto* rowset = find_it->second;
763
124
        auto fs = rowset->rowset_meta()->fs();
764
124
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_get_fs_error", { fs = nullptr; })
765
124
        if (!fs) {
766
0
            LOG(WARNING) << "failed to do index compaction, get fs failed. resource_id="
767
0
                         << rowset->rowset_meta()->resource_id();
768
0
            mark_skip_index_compaction(ctx, error_handler);
769
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
770
0
                    "get fs failed, resource_id={}", rowset->rowset_meta()->resource_id());
771
0
        }
772
773
124
        auto seg_path = rowset->segment_path(seg_id);
774
124
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_seg_path_nullptr", {
775
124
            seg_path = ResultError(Status::Error<ErrorCode::INTERNAL_ERROR>(
776
124
                    "do_inverted_index_compaction_seg_path_nullptr"));
777
124
        })
778
124
        if (!seg_path.has_value()) {
779
0
            LOG(WARNING) << "failed to do index compaction, get segment path failed. tablet_id="
780
0
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string()
781
0
                         << " seg_id=" << seg_id;
782
0
            mark_skip_index_compaction(ctx, error_handler);
783
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
784
0
                    "get segment path failed. tablet_id={} rowset_id={} seg_id={}",
785
0
                    _tablet->tablet_id(), rowset_id.to_string(), seg_id);
786
0
        }
787
124
        auto index_file_reader = std::make_unique<IndexFileReader>(
788
124
                fs,
789
124
                std::string {InvertedIndexDescriptor::get_index_file_path_prefix(seg_path.value())},
790
124
                _cur_tablet_schema->get_inverted_index_storage_format(),
791
124
                rowset->rowset_meta()->inverted_index_file_info(seg_id));
792
124
        auto st = index_file_reader->init(config::inverted_index_read_buffer_size);
793
124
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_init_inverted_index_file_reader",
794
124
                        {
795
124
                            st = Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
796
124
                                    "debug point: "
797
124
                                    "Compaction::do_inverted_index_compaction_init_inverted_index_"
798
124
                                    "file_reader error");
799
124
                        })
800
124
        if (!st.ok()) {
801
0
            LOG(WARNING) << "failed to do index compaction, init inverted index file reader "
802
0
                            "failed. tablet_id="
803
0
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string()
804
0
                         << " seg_id=" << seg_id;
805
0
            mark_skip_index_compaction(ctx, error_handler);
806
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
807
0
                    "init inverted index file reader failed. tablet_id={} rowset_id={} seg_id={}",
808
0
                    _tablet->tablet_id(), rowset_id.to_string(), seg_id);
809
0
        }
810
124
        index_file_readers[m.second] = std::move(index_file_reader);
811
124
    }
812
813
    // dest index files
814
    // format: rowsetId_segmentId
815
18
    auto& inverted_index_file_writers =
816
18
            dynamic_cast<BaseBetaRowsetWriter*>(_output_rs_writer.get())->index_file_writers();
817
18
    DBUG_EXECUTE_IF(
818
18
            "Compaction::do_inverted_index_compaction_inverted_index_file_writers_size_error",
819
18
            { inverted_index_file_writers.clear(); })
820
18
    if (inverted_index_file_writers.size() != dest_segment_num) {
821
0
        LOG(WARNING) << "failed to do index compaction, dest segment num not match. tablet_id="
822
0
                     << _tablet->tablet_id() << " dest_segment_num=" << dest_segment_num
823
0
                     << " inverted_index_file_writers.size()="
824
0
                     << inverted_index_file_writers.size();
825
0
        mark_skip_index_compaction(ctx, error_handler);
826
0
        return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
827
0
                "dest segment num not match. tablet_id={} dest_segment_num={} "
828
0
                "inverted_index_file_writers.size()={}",
829
0
                _tablet->tablet_id(), dest_segment_num, inverted_index_file_writers.size());
830
0
    }
831
832
    // use tmp file dir to store index files
833
18
    auto tmp_file_dir = ExecEnv::GetInstance()->get_tmp_file_dirs()->get_tmp_file_dir();
834
18
    auto index_tmp_path = tmp_file_dir / dest_rowset_id.to_string();
835
18
    LOG(INFO) << "start index compaction"
836
18
              << ". tablet=" << _tablet->tablet_id() << ", source index size=" << src_segment_num
837
18
              << ", destination index size=" << dest_segment_num << ".";
838
839
18
    Status status = Status::OK();
840
310
    for (auto&& column_uniq_id : ctx.columns_to_do_index_compaction) {
841
310
        auto col = _cur_tablet_schema->column_by_uid(column_uniq_id);
842
310
        auto index_metas = _cur_tablet_schema->inverted_indexs(col);
843
310
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_can_not_find_index_meta",
844
310
                        { index_metas.clear(); })
845
310
        if (index_metas.empty()) {
846
0
            status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
847
0
                    fmt::format("Can not find index_meta for col {}", col.name()));
848
0
            LOG(WARNING) << "failed to do index compaction, can not find index_meta for column"
849
0
                         << ". tablet=" << _tablet->tablet_id()
850
0
                         << ", column uniq id=" << column_uniq_id;
851
0
            error_handler(-1, column_uniq_id);
852
0
            break;
853
0
        }
854
311
        for (const auto& index_meta : index_metas) {
855
311
            std::vector<lucene::store::Directory*> dest_index_dirs(dest_segment_num);
856
311
            try {
857
311
                std::vector<std::unique_ptr<DorisCompoundReader, DirectoryDeleter>> src_idx_dirs(
858
311
                        src_segment_num);
859
1.34k
                for (int src_segment_id = 0; src_segment_id < src_segment_num; src_segment_id++) {
860
1.03k
                    auto res = index_file_readers[src_segment_id]->open(index_meta);
861
1.03k
                    DBUG_EXECUTE_IF("Compaction::open_inverted_index_file_reader", {
862
1.03k
                        res = ResultError(Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
863
1.03k
                                "debug point: Compaction::open_index_file_reader error"));
864
1.03k
                    })
865
1.03k
                    if (!res.has_value()) {
866
0
                        LOG(WARNING) << "failed to do index compaction, open inverted index file "
867
0
                                        "reader failed"
868
0
                                     << ". tablet=" << _tablet->tablet_id()
869
0
                                     << ", column uniq id=" << column_uniq_id
870
0
                                     << ", src_segment_id=" << src_segment_id;
871
0
                        throw Exception(ErrorCode::INVERTED_INDEX_COMPACTION_ERROR,
872
0
                                        res.error().msg());
873
0
                    }
874
1.03k
                    src_idx_dirs[src_segment_id] = std::move(res.value());
875
1.03k
                }
876
733
                for (int dest_segment_id = 0; dest_segment_id < dest_segment_num;
877
422
                     dest_segment_id++) {
878
422
                    auto res = inverted_index_file_writers[dest_segment_id]->open(index_meta);
879
422
                    DBUG_EXECUTE_IF("Compaction::open_inverted_index_file_writer", {
880
422
                        res = ResultError(Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
881
422
                                "debug point: Compaction::open_inverted_index_file_writer error"));
882
422
                    })
883
422
                    if (!res.has_value()) {
884
0
                        LOG(WARNING) << "failed to do index compaction, open inverted index file "
885
0
                                        "writer failed"
886
0
                                     << ". tablet=" << _tablet->tablet_id()
887
0
                                     << ", column uniq id=" << column_uniq_id
888
0
                                     << ", dest_segment_id=" << dest_segment_id;
889
0
                        throw Exception(ErrorCode::INVERTED_INDEX_COMPACTION_ERROR,
890
0
                                        res.error().msg());
891
0
                    }
892
                    // Destination directories in dest_index_dirs do not need to be deconstructed,
893
                    // but their lifecycle must be managed by inverted_index_file_writers.
894
422
                    dest_index_dirs[dest_segment_id] = res.value().get();
895
422
                }
896
311
                auto st = compact_column(index_meta->index_id(), src_idx_dirs, dest_index_dirs,
897
311
                                         index_tmp_path.native(), trans_vec, dest_segment_num_rows);
898
311
                if (!st.ok()) {
899
2
                    error_handler(index_meta->index_id(), column_uniq_id);
900
2
                    status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(st.msg());
901
2
                }
902
311
            } catch (CLuceneError& e) {
903
0
                error_handler(index_meta->index_id(), column_uniq_id);
904
0
                status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(e.what());
905
0
            } catch (const Exception& e) {
906
0
                error_handler(index_meta->index_id(), column_uniq_id);
907
0
                status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(e.what());
908
0
            }
909
311
        }
910
310
    }
911
912
    // check index compaction status. If status is not ok, we should return error and end this compaction round.
913
18
    if (!status.ok()) {
914
1
        return status;
915
1
    }
916
18
    LOG(INFO) << "succeed to do index compaction"
917
17
              << ". tablet=" << _tablet->tablet_id()
918
17
              << ". elapsed time=" << inverted_watch.get_elapse_second() << "s.";
919
920
17
    return Status::OK();
921
18
}
922
923
void Compaction::mark_skip_index_compaction(
924
        const RowsetWriterContext& context,
925
0
        const std::function<void(int64_t, int64_t)>& error_handler) {
926
0
    for (auto&& column_uniq_id : context.columns_to_do_index_compaction) {
927
0
        auto col = _cur_tablet_schema->column_by_uid(column_uniq_id);
928
0
        auto index_metas = _cur_tablet_schema->inverted_indexs(col);
929
0
        DBUG_EXECUTE_IF("Compaction::mark_skip_index_compaction_can_not_find_index_meta",
930
0
                        { index_metas.clear(); })
931
0
        if (index_metas.empty()) {
932
0
            LOG(WARNING) << "mark skip index compaction, can not find index_meta for column"
933
0
                         << ". tablet=" << _tablet->tablet_id()
934
0
                         << ", column uniq id=" << column_uniq_id;
935
0
            error_handler(-1, column_uniq_id);
936
0
            continue;
937
0
        }
938
0
        for (const auto& index_meta : index_metas) {
939
0
            error_handler(index_meta->index_id(), column_uniq_id);
940
0
        }
941
0
    }
942
0
}
943
944
24
void Compaction::construct_index_compaction_columns(RowsetWriterContext& ctx) {
945
419
    for (const auto& index : _cur_tablet_schema->inverted_indexes()) {
946
419
        auto col_unique_ids = index->col_unique_ids();
947
        // check if column unique ids is empty to avoid crash
948
419
        if (col_unique_ids.empty()) {
949
1
            LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] index[" << index->index_id()
950
1
                         << "] has no column unique id, will skip index compaction."
951
1
                         << " tablet_schema=" << _cur_tablet_schema->dump_full_schema();
952
1
            continue;
953
1
        }
954
418
        auto col_unique_id = col_unique_ids[0];
955
418
        if (!_cur_tablet_schema->has_column_unique_id(col_unique_id)) {
956
0
            LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
957
0
                         << col_unique_id << "] not found, will skip index compaction";
958
0
            continue;
959
0
        }
960
        // Avoid doing inverted index compaction on non-slice type columns
961
418
        if (!field_is_slice_type(_cur_tablet_schema->column_by_uid(col_unique_id).type())) {
962
25
            continue;
963
25
        }
964
965
        // if index properties are different, index compaction maybe needs to be skipped.
966
393
        bool is_continue = false;
967
393
        std::optional<std::map<std::string, std::string>> first_properties;
968
1.30k
        for (const auto& rowset : _input_rowsets) {
969
1.30k
            auto tablet_indexs = rowset->tablet_schema()->inverted_indexs(col_unique_id);
970
            // no inverted index or index id is different from current index id
971
1.30k
            auto it = std::find_if(tablet_indexs.begin(), tablet_indexs.end(),
972
1.30k
                                   [&index](const auto& tablet_index) {
973
1.30k
                                       return tablet_index->index_id() == index->index_id();
974
1.30k
                                   });
975
1.30k
            if (it != tablet_indexs.end()) {
976
1.30k
                const auto* tablet_index = *it;
977
1.30k
                auto properties = tablet_index->properties();
978
1.30k
                if (!first_properties.has_value()) {
979
391
                    first_properties = properties;
980
911
                } else {
981
911
                    DBUG_EXECUTE_IF(
982
911
                            "Compaction::do_inverted_index_compaction_index_properties_different",
983
911
                            { properties.emplace("dummy_key", "dummy_value"); })
984
911
                    if (properties != first_properties.value()) {
985
3
                        is_continue = true;
986
3
                        break;
987
3
                    }
988
911
                }
989
1.30k
            } else {
990
2
                is_continue = true;
991
2
                break;
992
2
            }
993
1.30k
        }
994
393
        if (is_continue) {
995
5
            continue;
996
5
        }
997
1.29k
        auto has_inverted_index = [&](const RowsetSharedPtr& src_rs) {
998
1.29k
            auto* rowset = static_cast<BetaRowset*>(src_rs.get());
999
1.29k
            DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_is_skip_index_compaction",
1000
1.29k
                            { rowset->set_skip_index_compaction(col_unique_id); })
1001
1.29k
            if (rowset->is_skip_index_compaction(col_unique_id)) {
1002
1
                LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] rowset["
1003
1
                             << rowset->rowset_id() << "] column_unique_id[" << col_unique_id
1004
1
                             << "] skip inverted index compaction due to last failure";
1005
1
                return false;
1006
1
            }
1007
1008
1.29k
            auto fs = rowset->rowset_meta()->fs();
1009
1.29k
            DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_get_fs_error",
1010
1.29k
                            { fs = nullptr; })
1011
1.29k
            if (!fs) {
1012
0
                LOG(WARNING) << "get fs failed, resource_id="
1013
0
                             << rowset->rowset_meta()->resource_id();
1014
0
                return false;
1015
0
            }
1016
1017
1.29k
            auto index_metas = rowset->tablet_schema()->inverted_indexs(col_unique_id);
1018
1.29k
            DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_index_meta_nullptr",
1019
1.29k
                            { index_metas.clear(); })
1020
1.29k
            if (index_metas.empty()) {
1021
0
                LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
1022
0
                             << col_unique_id << "] index meta is null, will skip index compaction";
1023
0
                return false;
1024
0
            }
1025
1.29k
            for (const auto& index_meta : index_metas) {
1026
2.59k
                for (auto i = 0; i < rowset->num_segments(); i++) {
1027
                    // TODO: inverted_index_path
1028
1.30k
                    auto seg_path = rowset->segment_path(i);
1029
1.30k
                    DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_seg_path_nullptr", {
1030
1.30k
                        seg_path = ResultError(Status::Error<ErrorCode::INTERNAL_ERROR>(
1031
1.30k
                                "construct_skip_inverted_index_seg_path_nullptr"));
1032
1.30k
                    })
1033
1.30k
                    if (!seg_path) {
1034
0
                        LOG(WARNING) << seg_path.error();
1035
0
                        return false;
1036
0
                    }
1037
1038
1.30k
                    std::string index_file_path;
1039
1.30k
                    try {
1040
1.30k
                        auto index_file_reader = std::make_unique<IndexFileReader>(
1041
1.30k
                                fs,
1042
1.30k
                                std::string {InvertedIndexDescriptor::get_index_file_path_prefix(
1043
1.30k
                                        seg_path.value())},
1044
1.30k
                                _cur_tablet_schema->get_inverted_index_storage_format(),
1045
1.30k
                                rowset->rowset_meta()->inverted_index_file_info(i));
1046
1.30k
                        auto st = index_file_reader->init(config::inverted_index_read_buffer_size);
1047
1.30k
                        index_file_path = index_file_reader->get_index_file_path(index_meta);
1048
1.30k
                        DBUG_EXECUTE_IF(
1049
1.30k
                                "Compaction::construct_skip_inverted_index_index_file_reader_init_"
1050
1.30k
                                "status_not_ok",
1051
1.30k
                                {
1052
1.30k
                                    st = Status::Error<ErrorCode::INTERNAL_ERROR>(
1053
1.30k
                                            "debug point: "
1054
1.30k
                                            "construct_skip_inverted_index_index_file_reader_init_"
1055
1.30k
                                            "status_"
1056
1.30k
                                            "not_ok");
1057
1.30k
                                })
1058
1.30k
                        if (!st.ok()) {
1059
0
                            LOG(WARNING) << "init index " << index_file_path << " error:" << st;
1060
0
                            return false;
1061
0
                        }
1062
1063
                        // check index meta
1064
1.30k
                        auto result = index_file_reader->open(index_meta);
1065
1.30k
                        DBUG_EXECUTE_IF(
1066
1.30k
                                "Compaction::construct_skip_inverted_index_index_file_reader_open_"
1067
1.30k
                                "error",
1068
1.30k
                                {
1069
1.30k
                                    result = ResultError(
1070
1.30k
                                            Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
1071
1.30k
                                                    "CLuceneError occur when open idx file"));
1072
1.30k
                                })
1073
1.30k
                        if (!result.has_value()) {
1074
0
                            LOG(WARNING) << "open index " << index_file_path
1075
0
                                         << " error:" << result.error();
1076
0
                            return false;
1077
0
                        }
1078
1.30k
                        auto reader = std::move(result.value());
1079
1.30k
                        std::vector<std::string> files;
1080
1.30k
                        reader->list(&files);
1081
1.30k
                        reader->close();
1082
1.30k
                        DBUG_EXECUTE_IF(
1083
1.30k
                                "Compaction::construct_skip_inverted_index_index_reader_close_"
1084
1.30k
                                "error",
1085
1.30k
                                { _CLTHROWA(CL_ERR_IO, "debug point: reader close error"); })
1086
1087
1.30k
                        DBUG_EXECUTE_IF(
1088
1.30k
                                "Compaction::construct_skip_inverted_index_index_files_count",
1089
1.30k
                                { files.clear(); })
1090
1091
                        // why is 3?
1092
                        // slice type index file at least has 3 files: null_bitmap, segments_N, segments.gen
1093
1.30k
                        if (files.size() < 3) {
1094
0
                            LOG(WARNING)
1095
0
                                    << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
1096
0
                                    << col_unique_id << "]," << index_file_path
1097
0
                                    << " is corrupted, will skip index compaction";
1098
0
                            return false;
1099
0
                        }
1100
1.30k
                    } catch (CLuceneError& err) {
1101
0
                        LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
1102
0
                                     << col_unique_id << "] open index[" << index_file_path
1103
0
                                     << "], will skip index compaction, error:" << err.what();
1104
0
                        return false;
1105
0
                    }
1106
1.30k
                }
1107
1.29k
            }
1108
1.29k
            return true;
1109
1.29k
        };
1110
1111
388
        bool all_have_inverted_index = std::all_of(_input_rowsets.begin(), _input_rowsets.end(),
1112
388
                                                   std::move(has_inverted_index));
1113
1114
388
        if (all_have_inverted_index) {
1115
387
            ctx.columns_to_do_index_compaction.insert(col_unique_id);
1116
387
        }
1117
388
    }
1118
24
}
1119
1120
0
Status CompactionMixin::update_delete_bitmap() {
1121
    // for mow with cluster keys, compaction read data with delete bitmap
1122
    // if tablet is not ready(such as schema change), we need to update delete bitmap
1123
0
    {
1124
0
        std::shared_lock meta_rlock(_tablet->get_header_lock());
1125
0
        if (_tablet->tablet_state() != TABLET_NOTREADY) {
1126
0
            return Status::OK();
1127
0
        }
1128
0
    }
1129
0
    OlapStopWatch watch;
1130
0
    std::vector<RowsetSharedPtr> rowsets;
1131
0
    for (const auto& rowset : _input_rowsets) {
1132
0
        std::lock_guard rwlock(tablet()->get_rowset_update_lock());
1133
0
        std::shared_lock rlock(_tablet->get_header_lock());
1134
0
        Status st = _tablet->update_delete_bitmap_without_lock(_tablet, rowset, &rowsets);
1135
0
        if (!st.ok()) {
1136
0
            LOG(INFO) << "failed update_delete_bitmap_without_lock for tablet_id="
1137
0
                      << _tablet->tablet_id() << ", st=" << st.to_string();
1138
0
            return st;
1139
0
        }
1140
0
        rowsets.push_back(rowset);
1141
0
    }
1142
0
    LOG(INFO) << "finish update delete bitmap for tablet: " << _tablet->tablet_id()
1143
0
              << ", rowsets: " << _input_rowsets.size() << ", cost: " << watch.get_elapse_time_us()
1144
0
              << "(us)";
1145
0
    return Status::OK();
1146
0
}
1147
1148
0
Status CloudCompactionMixin::update_delete_bitmap() {
1149
    // for mow with cluster keys, compaction read data with delete bitmap
1150
    // if tablet is not ready(such as schema change), we need to update delete bitmap
1151
0
    {
1152
0
        std::shared_lock meta_rlock(_tablet->get_header_lock());
1153
0
        if (_tablet->tablet_state() != TABLET_NOTREADY) {
1154
0
            return Status::OK();
1155
0
        }
1156
0
    }
1157
0
    OlapStopWatch watch;
1158
0
    std::vector<RowsetSharedPtr> rowsets;
1159
0
    for (const auto& rowset : _input_rowsets) {
1160
0
        Status st = _tablet->update_delete_bitmap_without_lock(_tablet, rowset, &rowsets);
1161
0
        if (!st.ok()) {
1162
0
            LOG(INFO) << "failed update_delete_bitmap_without_lock for tablet_id="
1163
0
                      << _tablet->tablet_id() << ", st=" << st.to_string();
1164
0
            return st;
1165
0
        }
1166
0
        rowsets.push_back(rowset);
1167
0
    }
1168
0
    LOG(INFO) << "finish update delete bitmap for tablet: " << _tablet->tablet_id()
1169
0
              << ", rowsets: " << _input_rowsets.size() << ", cost: " << watch.get_elapse_time_us()
1170
0
              << "(us)";
1171
0
    return Status::OK();
1172
0
}
1173
1174
41
Status CompactionMixin::construct_output_rowset_writer(RowsetWriterContext& ctx) {
1175
    // only do index compaction for dup_keys and unique_keys with mow enabled
1176
41
    if (_enable_inverted_index_compaction && (((_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1177
24
                                                _tablet->enable_unique_key_merge_on_write()) ||
1178
24
                                               _tablet->keys_type() == KeysType::DUP_KEYS))) {
1179
24
        construct_index_compaction_columns(ctx);
1180
24
    }
1181
41
    ctx.version = _output_version;
1182
41
    ctx.rowset_state = VISIBLE;
1183
41
    ctx.segments_overlap = NONOVERLAPPING;
1184
41
    ctx.tablet_schema = _cur_tablet_schema;
1185
41
    ctx.newest_write_timestamp = _newest_write_timestamp;
1186
41
    ctx.write_type = DataWriteType::TYPE_COMPACTION;
1187
41
    ctx.compaction_type = compaction_type();
1188
41
    ctx.allow_packed_file = false;
1189
41
    _output_rs_writer = DORIS_TRY(_tablet->create_rowset_writer(ctx, _is_vertical));
1190
41
    _pending_rs_guard = _engine.add_pending_rowset(ctx);
1191
41
    return Status::OK();
1192
41
}
1193
1194
0
Status CompactionMixin::modify_rowsets() {
1195
0
    std::vector<RowsetSharedPtr> output_rowsets;
1196
0
    output_rowsets.push_back(_output_rowset);
1197
1198
0
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1199
0
        _tablet->enable_unique_key_merge_on_write()) {
1200
0
        Version version = tablet()->max_version();
1201
0
        DeleteBitmap output_rowset_delete_bitmap(_tablet->tablet_id());
1202
0
        std::unique_ptr<RowLocationSet> missed_rows;
1203
0
        if ((config::enable_missing_rows_correctness_check ||
1204
0
             config::enable_mow_compaction_correctness_check_core ||
1205
0
             config::enable_mow_compaction_correctness_check_fail) &&
1206
0
            !_allow_delete_in_cumu_compaction &&
1207
0
            compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
1208
0
            missed_rows = std::make_unique<RowLocationSet>();
1209
0
            LOG(INFO) << "RowLocation Set inited succ for tablet:" << _tablet->tablet_id();
1210
0
        }
1211
0
        std::unique_ptr<std::map<RowsetSharedPtr, RowLocationPairList>> location_map;
1212
0
        if (config::enable_rowid_conversion_correctness_check &&
1213
0
            tablet()->tablet_schema()->cluster_key_uids().empty()) {
1214
0
            location_map = std::make_unique<std::map<RowsetSharedPtr, RowLocationPairList>>();
1215
0
            LOG(INFO) << "Location Map inited succ for tablet:" << _tablet->tablet_id();
1216
0
        }
1217
        // Convert the delete bitmap of the input rowsets to output rowset.
1218
        // New loads are not blocked, so some keys of input rowsets might
1219
        // be deleted during the time. We need to deal with delete bitmap
1220
        // of incremental data later.
1221
        // TODO(LiaoXin): check if there are duplicate keys
1222
0
        std::size_t missed_rows_size = 0;
1223
0
        tablet()->calc_compaction_output_rowset_delete_bitmap(
1224
0
                _input_rowsets, *_rowid_conversion, 0, version.second + 1, missed_rows.get(),
1225
0
                location_map.get(), _tablet->tablet_meta()->delete_bitmap(),
1226
0
                &output_rowset_delete_bitmap);
1227
0
        if (missed_rows) {
1228
0
            missed_rows_size = missed_rows->size();
1229
0
            std::size_t merged_missed_rows_size = _stats.merged_rows;
1230
0
            if (!_tablet->tablet_meta()->tablet_schema()->cluster_key_uids().empty()) {
1231
0
                merged_missed_rows_size += _stats.filtered_rows;
1232
0
            }
1233
1234
            // Suppose a heavy schema change process on BE converting tablet A to tablet B.
1235
            // 1. during schema change double write, new loads write [X-Y] on tablet B.
1236
            // 2. rowsets with version [a],[a+1],...,[b-1],[b] on tablet B are picked for cumu compaction(X<=a<b<=Y).(cumu compaction
1237
            //    on new tablet during schema change double write is allowed after https://github.com/apache/doris/pull/16470)
1238
            // 3. schema change remove all rowsets on tablet B before version Z(b<=Z<=Y) before it begins to convert historical rowsets.
1239
            // 4. schema change finishes.
1240
            // 5. cumu compation begins on new tablet with version [a],...,[b]. If there are duplicate keys between these rowsets,
1241
            //    the compaction check will fail because these rowsets have skipped to calculate delete bitmap in commit phase and
1242
            //    publish phase because tablet B is in NOT_READY state when writing.
1243
1244
            // Considering that the cumu compaction will fail finally in this situation because `Tablet::modify_rowsets` will check if rowsets in
1245
            // `to_delete`(_input_rowsets) still exist in tablet's `_rs_version_map`, we can just skip to check missed rows here.
1246
0
            bool need_to_check_missed_rows = true;
1247
0
            {
1248
0
                std::shared_lock rlock(_tablet->get_header_lock());
1249
0
                need_to_check_missed_rows =
1250
0
                        std::all_of(_input_rowsets.begin(), _input_rowsets.end(),
1251
0
                                    [&](const RowsetSharedPtr& rowset) {
1252
0
                                        return tablet()->rowset_exists_unlocked(rowset);
1253
0
                                    });
1254
0
            }
1255
1256
0
            if (_tablet->tablet_state() == TABLET_RUNNING &&
1257
0
                merged_missed_rows_size != missed_rows_size && need_to_check_missed_rows) {
1258
0
                std::stringstream ss;
1259
0
                ss << "cumulative compaction: the merged rows(" << _stats.merged_rows
1260
0
                   << "), filtered rows(" << _stats.filtered_rows
1261
0
                   << ") is not equal to missed rows(" << missed_rows_size
1262
0
                   << ") in rowid conversion, tablet_id: " << _tablet->tablet_id()
1263
0
                   << ", table_id:" << _tablet->table_id();
1264
0
                if (missed_rows_size == 0) {
1265
0
                    ss << ", debug info: ";
1266
0
                    DeleteBitmap subset_map(_tablet->tablet_id());
1267
0
                    for (auto rs : _input_rowsets) {
1268
0
                        _tablet->tablet_meta()->delete_bitmap().subset(
1269
0
                                {rs->rowset_id(), 0, 0},
1270
0
                                {rs->rowset_id(), rs->num_segments(), version.second + 1},
1271
0
                                &subset_map);
1272
0
                        ss << "(rowset id: " << rs->rowset_id()
1273
0
                           << ", delete bitmap cardinality: " << subset_map.cardinality() << ")";
1274
0
                    }
1275
0
                    ss << ", version[0-" << version.second + 1 << "]";
1276
0
                }
1277
0
                std::string err_msg = fmt::format(
1278
0
                        "cumulative compaction: the merged rows({}), filtered rows({})"
1279
0
                        " is not equal to missed rows({}) in rowid conversion,"
1280
0
                        " tablet_id: {}, table_id:{}",
1281
0
                        _stats.merged_rows, _stats.filtered_rows, missed_rows_size,
1282
0
                        _tablet->tablet_id(), _tablet->table_id());
1283
0
                LOG(WARNING) << err_msg;
1284
0
                if (config::enable_mow_compaction_correctness_check_core) {
1285
0
                    CHECK(false) << err_msg;
1286
0
                } else if (config::enable_mow_compaction_correctness_check_fail) {
1287
0
                    return Status::InternalError<false>(err_msg);
1288
0
                } else {
1289
0
                    DCHECK(false) << err_msg;
1290
0
                }
1291
0
            }
1292
0
        }
1293
1294
0
        if (location_map) {
1295
0
            RETURN_IF_ERROR(tablet()->check_rowid_conversion(_output_rowset, *location_map));
1296
0
            location_map->clear();
1297
0
        }
1298
1299
0
        {
1300
0
            std::lock_guard<std::mutex> wrlock_(tablet()->get_rowset_update_lock());
1301
0
            std::lock_guard<std::shared_mutex> wrlock(_tablet->get_header_lock());
1302
0
            SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD);
1303
1304
            // Here we will calculate all the rowsets delete bitmaps which are committed but not published to reduce the calculation pressure
1305
            // of publish phase.
1306
            // All rowsets which need to recalculate have been published so we don't need to acquire lock.
1307
            // Step1: collect this tablet's all committed rowsets' delete bitmaps
1308
0
            CommitTabletTxnInfoVec commit_tablet_txn_info_vec {};
1309
0
            _engine.txn_manager()->get_all_commit_tablet_txn_info_by_tablet(
1310
0
                    *tablet(), &commit_tablet_txn_info_vec);
1311
1312
            // Step2: calculate all rowsets' delete bitmaps which are published during compaction.
1313
0
            for (auto& it : commit_tablet_txn_info_vec) {
1314
0
                if (!_check_if_includes_input_rowsets(it.rowset_ids)) {
1315
                    // When calculating the delete bitmap of all committed rowsets relative to the compaction,
1316
                    // there may be cases where the compacted rowsets are newer than the committed rowsets.
1317
                    // At this time, row number conversion cannot be performed, otherwise data will be missing.
1318
                    // Therefore, we need to check if every committed rowset has calculated delete bitmap for
1319
                    // all compaction input rowsets.
1320
0
                    continue;
1321
0
                }
1322
0
                DeleteBitmap txn_output_delete_bitmap(_tablet->tablet_id());
1323
0
                tablet()->calc_compaction_output_rowset_delete_bitmap(
1324
0
                        _input_rowsets, *_rowid_conversion, 0, UINT64_MAX, missed_rows.get(),
1325
0
                        location_map.get(), *it.delete_bitmap.get(), &txn_output_delete_bitmap);
1326
0
                if (config::enable_merge_on_write_correctness_check) {
1327
0
                    RowsetIdUnorderedSet rowsetids;
1328
0
                    rowsetids.insert(_output_rowset->rowset_id());
1329
0
                    _tablet->add_sentinel_mark_to_delete_bitmap(&txn_output_delete_bitmap,
1330
0
                                                                rowsetids);
1331
0
                }
1332
0
                it.delete_bitmap->merge(txn_output_delete_bitmap);
1333
                // Step3: write back updated delete bitmap and tablet info.
1334
0
                it.rowset_ids.insert(_output_rowset->rowset_id());
1335
0
                _engine.txn_manager()->set_txn_related_delete_bitmap(
1336
0
                        it.partition_id, it.transaction_id, _tablet->tablet_id(),
1337
0
                        tablet()->tablet_uid(), true, it.delete_bitmap, it.rowset_ids,
1338
0
                        it.partial_update_info);
1339
0
            }
1340
1341
            // Convert the delete bitmap of the input rowsets to output rowset for
1342
            // incremental data.
1343
0
            tablet()->calc_compaction_output_rowset_delete_bitmap(
1344
0
                    _input_rowsets, *_rowid_conversion, version.second, UINT64_MAX,
1345
0
                    missed_rows.get(), location_map.get(), _tablet->tablet_meta()->delete_bitmap(),
1346
0
                    &output_rowset_delete_bitmap);
1347
1348
0
            if (location_map) {
1349
0
                RETURN_IF_ERROR(tablet()->check_rowid_conversion(_output_rowset, *location_map));
1350
0
            }
1351
1352
0
            tablet()->merge_delete_bitmap(output_rowset_delete_bitmap);
1353
0
            RETURN_IF_ERROR(tablet()->modify_rowsets(output_rowsets, _input_rowsets, true));
1354
0
        }
1355
0
    } else {
1356
0
        std::lock_guard<std::shared_mutex> wrlock(_tablet->get_header_lock());
1357
0
        SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD);
1358
0
        RETURN_IF_ERROR(tablet()->modify_rowsets(output_rowsets, _input_rowsets, true));
1359
0
    }
1360
1361
0
    if (config::tablet_rowset_stale_sweep_by_size &&
1362
0
        _tablet->tablet_meta()->all_stale_rs_metas().size() >=
1363
0
                config::tablet_rowset_stale_sweep_threshold_size) {
1364
0
        tablet()->delete_expired_stale_rowset();
1365
0
    }
1366
1367
0
    int64_t cur_max_version = 0;
1368
0
    {
1369
0
        std::shared_lock rlock(_tablet->get_header_lock());
1370
0
        cur_max_version = _tablet->max_version_unlocked();
1371
0
        tablet()->save_meta();
1372
0
    }
1373
0
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1374
0
        _tablet->enable_unique_key_merge_on_write()) {
1375
0
        auto st = TabletMetaManager::remove_old_version_delete_bitmap(
1376
0
                tablet()->data_dir(), _tablet->tablet_id(), cur_max_version);
1377
0
        if (!st.ok()) {
1378
0
            LOG(WARNING) << "failed to remove old version delete bitmap, st: " << st;
1379
0
        }
1380
0
    }
1381
0
    DBUG_EXECUTE_IF("CumulativeCompaction.modify_rowsets.delete_expired_stale_rowset",
1382
0
                    { tablet()->delete_expired_stale_rowset(); });
1383
0
    _tablet->prefill_dbm_agg_cache_after_compaction(_output_rowset);
1384
0
    return Status::OK();
1385
0
}
1386
1387
bool CompactionMixin::_check_if_includes_input_rowsets(
1388
0
        const RowsetIdUnorderedSet& commit_rowset_ids_set) const {
1389
0
    std::vector<RowsetId> commit_rowset_ids {};
1390
0
    commit_rowset_ids.insert(commit_rowset_ids.end(), commit_rowset_ids_set.begin(),
1391
0
                             commit_rowset_ids_set.end());
1392
0
    std::sort(commit_rowset_ids.begin(), commit_rowset_ids.end());
1393
0
    std::vector<RowsetId> input_rowset_ids {};
1394
0
    for (const auto& rowset : _input_rowsets) {
1395
0
        input_rowset_ids.emplace_back(rowset->rowset_meta()->rowset_id());
1396
0
    }
1397
0
    std::sort(input_rowset_ids.begin(), input_rowset_ids.end());
1398
0
    return std::includes(commit_rowset_ids.begin(), commit_rowset_ids.end(),
1399
0
                         input_rowset_ids.begin(), input_rowset_ids.end());
1400
0
}
1401
1402
0
void CompactionMixin::update_compaction_level() {
1403
0
    auto* cumu_policy = tablet()->cumulative_compaction_policy();
1404
0
    if (cumu_policy && cumu_policy->name() == CUMULATIVE_TIME_SERIES_POLICY) {
1405
0
        int64_t compaction_level =
1406
0
                cumu_policy->get_compaction_level(tablet(), _input_rowsets, _output_rowset);
1407
0
        _output_rowset->rowset_meta()->set_compaction_level(compaction_level);
1408
0
    }
1409
0
}
1410
1411
0
Status Compaction::check_correctness() {
1412
    // 1. check row number
1413
0
    if (_input_row_num != _output_rowset->num_rows() + _stats.merged_rows + _stats.filtered_rows) {
1414
0
        return Status::Error<CHECK_LINES_ERROR>(
1415
0
                "row_num does not match between cumulative input and output! tablet={}, "
1416
0
                "input_row_num={}, merged_row_num={}, filtered_row_num={}, output_row_num={}",
1417
0
                _tablet->tablet_id(), _input_row_num, _stats.merged_rows, _stats.filtered_rows,
1418
0
                _output_rowset->num_rows());
1419
0
    }
1420
    // 2. check variant column path stats
1421
0
    RETURN_IF_ERROR(vectorized::schema_util::VariantCompactionUtil::check_path_stats(
1422
0
            _input_rowsets, _output_rowset, _tablet));
1423
0
    return Status::OK();
1424
0
}
1425
1426
22
int64_t CompactionMixin::get_compaction_permits() {
1427
22
    int64_t permits = 0;
1428
616
    for (auto&& rowset : _input_rowsets) {
1429
616
        permits += rowset->rowset_meta()->get_compaction_score();
1430
616
    }
1431
22
    return permits;
1432
22
}
1433
1434
0
int64_t CompactionMixin::calc_input_rowsets_total_size() const {
1435
0
    int64_t input_rowsets_total_size = 0;
1436
0
    for (const auto& rowset : _input_rowsets) {
1437
0
        const auto& rowset_meta = rowset->rowset_meta();
1438
0
        auto total_size = rowset_meta->total_disk_size();
1439
0
        input_rowsets_total_size += total_size;
1440
0
    }
1441
0
    return input_rowsets_total_size;
1442
0
}
1443
1444
0
int64_t CompactionMixin::calc_input_rowsets_row_num() const {
1445
0
    int64_t input_rowsets_row_num = 0;
1446
0
    for (const auto& rowset : _input_rowsets) {
1447
0
        const auto& rowset_meta = rowset->rowset_meta();
1448
0
        auto total_size = rowset_meta->total_disk_size();
1449
0
        input_rowsets_row_num += total_size;
1450
0
    }
1451
0
    return input_rowsets_row_num;
1452
0
}
1453
1454
0
void Compaction::_load_segment_to_cache() {
1455
    // Load new rowset's segments to cache.
1456
0
    SegmentCacheHandle handle;
1457
0
    auto st = SegmentLoader::instance()->load_segments(
1458
0
            std::static_pointer_cast<BetaRowset>(_output_rowset), &handle, true);
1459
0
    if (!st.ok()) {
1460
0
        LOG(WARNING) << "failed to load segment to cache! output rowset version="
1461
0
                     << _output_rowset->start_version() << "-" << _output_rowset->end_version()
1462
0
                     << ".";
1463
0
    }
1464
0
}
1465
1466
0
Status CloudCompactionMixin::build_basic_info() {
1467
0
    _output_version =
1468
0
            Version(_input_rowsets.front()->start_version(), _input_rowsets.back()->end_version());
1469
1470
0
    _newest_write_timestamp = _input_rowsets.back()->newest_write_timestamp();
1471
1472
0
    std::vector<RowsetMetaSharedPtr> rowset_metas(_input_rowsets.size());
1473
0
    std::transform(_input_rowsets.begin(), _input_rowsets.end(), rowset_metas.begin(),
1474
0
                   [](const RowsetSharedPtr& rowset) { return rowset->rowset_meta(); });
1475
0
    if (is_index_change_compaction()) {
1476
0
        RETURN_IF_ERROR(rebuild_tablet_schema());
1477
0
    } else {
1478
0
        _cur_tablet_schema = _tablet->tablet_schema_with_merged_max_schema_version(rowset_metas);
1479
0
    }
1480
1481
    // if enable_vertical_compact_variant_subcolumns is true, we need to compact the variant subcolumns in seperate column groups
1482
    // so get_extended_compaction_schema will extended the schema for variant columns
1483
0
    if (_enable_vertical_compact_variant_subcolumns) {
1484
0
        RETURN_IF_ERROR(
1485
0
                vectorized::schema_util::VariantCompactionUtil::get_extended_compaction_schema(
1486
0
                        _input_rowsets, _cur_tablet_schema));
1487
0
    }
1488
0
    return Status::OK();
1489
0
}
1490
1491
0
int64_t CloudCompactionMixin::get_compaction_permits() {
1492
0
    int64_t permits = 0;
1493
0
    for (auto&& rowset : _input_rowsets) {
1494
0
        permits += rowset->rowset_meta()->get_compaction_score();
1495
0
    }
1496
0
    return permits;
1497
0
}
1498
1499
CloudCompactionMixin::CloudCompactionMixin(CloudStorageEngine& engine, CloudTabletSPtr tablet,
1500
                                           const std::string& label)
1501
24
        : Compaction(tablet, label), _engine(engine) {
1502
24
    auto uuid = UUIDGenerator::instance()->next_uuid();
1503
24
    std::stringstream ss;
1504
24
    ss << uuid;
1505
24
    _uuid = ss.str();
1506
24
}
1507
1508
0
Status CloudCompactionMixin::execute_compact_impl(int64_t permits) {
1509
0
    OlapStopWatch watch;
1510
1511
0
    RETURN_IF_ERROR(build_basic_info());
1512
1513
0
    LOG(INFO) << "start " << compaction_name() << ". tablet=" << _tablet->tablet_id()
1514
0
              << ", output_version=" << _output_version << ", permits: " << permits;
1515
1516
0
    RETURN_IF_ERROR(merge_input_rowsets());
1517
1518
0
    DBUG_EXECUTE_IF("CloudFullCompaction::modify_rowsets.wrong_rowset_id", {
1519
0
        DCHECK(compaction_type() == ReaderType::READER_FULL_COMPACTION);
1520
0
        RowsetId id;
1521
0
        id.version = 2;
1522
0
        id.hi = _output_rowset->rowset_meta()->rowset_id().hi + ((int64_t)(1) << 56);
1523
0
        id.mi = _output_rowset->rowset_meta()->rowset_id().mi;
1524
0
        id.lo = _output_rowset->rowset_meta()->rowset_id().lo;
1525
0
        _output_rowset->rowset_meta()->set_rowset_id(id);
1526
0
        LOG(INFO) << "[Debug wrong rowset id]:"
1527
0
                  << _output_rowset->rowset_meta()->rowset_id().to_string();
1528
0
    })
1529
1530
    // Currently, updates are only made in the time_series.
1531
0
    update_compaction_level();
1532
1533
0
    RETURN_IF_ERROR(_engine.meta_mgr().commit_rowset(*_output_rowset->rowset_meta().get(), _uuid));
1534
1535
    // 4. modify rowsets in memory
1536
0
    RETURN_IF_ERROR(modify_rowsets());
1537
1538
    // update compaction status data
1539
0
    auto tablet = std::static_pointer_cast<CloudTablet>(_tablet);
1540
0
    tablet->local_read_time_us.fetch_add(_stats.cloud_local_read_time);
1541
0
    tablet->remote_read_time_us.fetch_add(_stats.cloud_remote_read_time);
1542
0
    tablet->exec_compaction_time_us.fetch_add(watch.get_elapse_time_us());
1543
1544
0
    return Status::OK();
1545
0
}
1546
1547
2
int64_t CloudCompactionMixin::initiator() const {
1548
2
    return HashUtil::hash64(_uuid.data(), _uuid.size(), 0) & std::numeric_limits<int64_t>::max();
1549
2
}
1550
1551
namespace cloud {
1552
size_t truncate_rowsets_by_txn_size(std::vector<RowsetSharedPtr>& rowsets, int64_t& kept_size_bytes,
1553
14
                                    int64_t& truncated_size_bytes) {
1554
14
    if (rowsets.empty()) {
1555
1
        kept_size_bytes = 0;
1556
1
        truncated_size_bytes = 0;
1557
1
        return 0;
1558
1
    }
1559
1560
13
    int64_t max_size = config::compaction_txn_max_size_bytes;
1561
13
    int64_t cumulative_meta_size = 0;
1562
13
    size_t keep_count = 0;
1563
1564
34
    for (size_t i = 0; i < rowsets.size(); ++i) {
1565
25
        const auto& rs = rowsets[i];
1566
1567
        // Estimate rowset meta size using doris_rowset_meta_to_cloud
1568
25
        auto cloud_meta = cloud::doris_rowset_meta_to_cloud(rs->rowset_meta()->get_rowset_pb(true));
1569
25
        int64_t rowset_meta_size = cloud_meta.ByteSizeLong();
1570
1571
25
        cumulative_meta_size += rowset_meta_size;
1572
1573
25
        if (keep_count > 0 && cumulative_meta_size > max_size) {
1574
            // Rollback and stop
1575
4
            cumulative_meta_size -= rowset_meta_size;
1576
4
            break;
1577
4
        }
1578
1579
21
        keep_count++;
1580
21
    }
1581
1582
    // Ensure at least 1 rowset is kept
1583
13
    if (keep_count == 0) {
1584
0
        keep_count = 1;
1585
        // Recalculate size for the first rowset
1586
0
        const auto& rs = rowsets[0];
1587
0
        auto cloud_meta = cloud::doris_rowset_meta_to_cloud(rs->rowset_meta()->get_rowset_pb());
1588
0
        cumulative_meta_size = cloud_meta.ByteSizeLong();
1589
0
    }
1590
1591
    // Calculate truncated size
1592
13
    int64_t truncated_total_size = 0;
1593
13
    size_t truncated_count = rowsets.size() - keep_count;
1594
13
    if (truncated_count > 0) {
1595
35
        for (size_t i = keep_count; i < rowsets.size(); ++i) {
1596
31
            auto cloud_meta =
1597
31
                    cloud::doris_rowset_meta_to_cloud(rowsets[i]->rowset_meta()->get_rowset_pb());
1598
31
            truncated_total_size += cloud_meta.ByteSizeLong();
1599
31
        }
1600
4
        rowsets.resize(keep_count);
1601
4
    }
1602
1603
13
    kept_size_bytes = cumulative_meta_size;
1604
13
    truncated_size_bytes = truncated_total_size;
1605
13
    return truncated_count;
1606
14
}
1607
} // namespace cloud
1608
1609
5
size_t CloudCompactionMixin::apply_txn_size_truncation_and_log(const std::string& compaction_name) {
1610
5
    if (_input_rowsets.empty()) {
1611
1
        return 0;
1612
1
    }
1613
1614
4
    int64_t original_count = _input_rowsets.size();
1615
4
    int64_t original_start_version = _input_rowsets.front()->start_version();
1616
4
    int64_t original_end_version = _input_rowsets.back()->end_version();
1617
1618
4
    int64_t final_size = 0;
1619
4
    int64_t truncated_size = 0;
1620
4
    size_t truncated_count =
1621
4
            cloud::truncate_rowsets_by_txn_size(_input_rowsets, final_size, truncated_size);
1622
1623
4
    if (truncated_count > 0) {
1624
2
        int64_t original_size = final_size + truncated_size;
1625
2
        LOG(INFO) << compaction_name << " txn size estimation truncate"
1626
2
                  << ", tablet_id=" << _tablet->tablet_id() << ", original_version_range=["
1627
2
                  << original_start_version << "-" << original_end_version
1628
2
                  << "], final_version_range=[" << _input_rowsets.front()->start_version() << "-"
1629
2
                  << _input_rowsets.back()->end_version()
1630
2
                  << "], original_rowset_count=" << original_count
1631
2
                  << ", final_rowset_count=" << _input_rowsets.size()
1632
2
                  << ", truncated_rowset_count=" << truncated_count
1633
2
                  << ", original_size_bytes=" << original_size
1634
2
                  << ", final_size_bytes=" << final_size
1635
2
                  << ", truncated_size_bytes=" << truncated_size
1636
2
                  << ", threshold_bytes=" << config::compaction_txn_max_size_bytes;
1637
2
    }
1638
1639
4
    return truncated_count;
1640
5
}
1641
1642
0
Status CloudCompactionMixin::execute_compact() {
1643
0
    TEST_INJECTION_POINT("Compaction::do_compaction");
1644
0
    int64_t permits = get_compaction_permits();
1645
0
    HANDLE_EXCEPTION_IF_CATCH_EXCEPTION(
1646
0
            execute_compact_impl(permits), [&](const doris::Exception& ex) {
1647
0
                auto st = garbage_collection();
1648
0
                if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1649
0
                    _tablet->enable_unique_key_merge_on_write() && !st.ok()) {
1650
                    // if compaction fail, be will try to abort compaction, and delete bitmap lock
1651
                    // will release if abort job successfully, but if abort failed, delete bitmap
1652
                    // lock will not release, in this situation, be need to send this rpc to ms
1653
                    // to try to release delete bitmap lock.
1654
0
                    _engine.meta_mgr().remove_delete_bitmap_update_lock(
1655
0
                            _tablet->table_id(), COMPACTION_DELETE_BITMAP_LOCK_ID, initiator(),
1656
0
                            _tablet->tablet_id());
1657
0
                }
1658
0
            });
1659
1660
0
    DorisMetrics::instance()->remote_compaction_read_rows_total->increment(_input_row_num);
1661
0
    DorisMetrics::instance()->remote_compaction_write_rows_total->increment(
1662
0
            _output_rowset->num_rows());
1663
0
    DorisMetrics::instance()->remote_compaction_write_bytes_total->increment(
1664
0
            _output_rowset->total_disk_size());
1665
1666
0
    _load_segment_to_cache();
1667
0
    return Status::OK();
1668
0
}
1669
1670
0
Status CloudCompactionMixin::modify_rowsets() {
1671
0
    return Status::OK();
1672
0
}
1673
1674
5
Status CloudCompactionMixin::set_storage_resource_from_input_rowsets(RowsetWriterContext& ctx) {
1675
    // Set storage resource from input rowsets by iterating backwards to find the first rowset
1676
    // with non-empty resource_id. This handles two scenarios:
1677
    // 1. Hole rowsets compaction: Multiple hole rowsets may lack storage resource.
1678
    //    Example: [0-1, 2-2, 3-3, 4-4, 5-5] where 2-5 are hole rowsets.
1679
    //    If 0-1 lacks resource_id, then 2-5 also lack resource_id.
1680
    // 2. Schema change: New tablet may have later version empty rowsets without resource_id,
1681
    //    but middle rowsets get resource_id after historical rowsets are converted.
1682
    //    We iterate backwards to find the most recent rowset with valid resource_id.
1683
1684
6
    for (const auto& rowset : std::ranges::reverse_view(_input_rowsets)) {
1685
6
        const auto& resource_id = rowset->rowset_meta()->resource_id();
1686
1687
6
        if (!resource_id.empty()) {
1688
2
            ctx.storage_resource = *DORIS_TRY(rowset->rowset_meta()->remote_storage_resource());
1689
2
            return Status::OK();
1690
2
        }
1691
1692
        // Validate that non-empty rowsets (num_segments > 0) must have valid resource_id
1693
        // Only hole rowsets or empty rowsets are allowed to have empty resource_id
1694
4
        if (rowset->num_segments() > 0) {
1695
1
            auto error_msg = fmt::format(
1696
1
                    "Non-empty rowset must have valid resource_id. "
1697
1
                    "rowset_id={}, version=[{}-{}], is_hole_rowset={}, num_segments={}, "
1698
1
                    "tablet_id={}, table_id={}",
1699
1
                    rowset->rowset_id().to_string(), rowset->start_version(), rowset->end_version(),
1700
1
                    rowset->is_hole_rowset(), rowset->num_segments(), _tablet->tablet_id(),
1701
1
                    _tablet->table_id());
1702
1703
#ifndef BE_TEST
1704
            DCHECK(false) << error_msg;
1705
#endif
1706
1707
1
            return Status::InternalError<false>(error_msg);
1708
1
        }
1709
4
    }
1710
1711
2
    return Status::OK();
1712
5
}
1713
1714
0
Status CloudCompactionMixin::construct_output_rowset_writer(RowsetWriterContext& ctx) {
1715
    // only do index compaction for dup_keys and unique_keys with mow enabled
1716
0
    if (_enable_inverted_index_compaction && (((_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1717
0
                                                _tablet->enable_unique_key_merge_on_write()) ||
1718
0
                                               _tablet->keys_type() == KeysType::DUP_KEYS))) {
1719
0
        construct_index_compaction_columns(ctx);
1720
0
    }
1721
1722
    // Use the storage resource of the previous rowset.
1723
0
    RETURN_IF_ERROR(set_storage_resource_from_input_rowsets(ctx));
1724
1725
0
    ctx.txn_id = boost::uuids::hash_value(UUIDGenerator::instance()->next_uuid()) &
1726
0
                 std::numeric_limits<int64_t>::max(); // MUST be positive
1727
0
    ctx.txn_expiration = _expiration;
1728
1729
0
    ctx.version = _output_version;
1730
0
    ctx.rowset_state = VISIBLE;
1731
0
    ctx.segments_overlap = NONOVERLAPPING;
1732
0
    ctx.tablet_schema = _cur_tablet_schema;
1733
0
    ctx.newest_write_timestamp = _newest_write_timestamp;
1734
0
    ctx.write_type = DataWriteType::TYPE_COMPACTION;
1735
0
    ctx.compaction_type = compaction_type();
1736
0
    ctx.allow_packed_file = false;
1737
1738
    // We presume that the data involved in cumulative compaction is sufficiently 'hot'
1739
    // and should always be retained in the cache.
1740
    // TODO(gavin): Ensure that the retention of hot data is implemented with precision.
1741
1742
0
    ctx.write_file_cache = should_cache_compaction_output();
1743
0
    ctx.file_cache_ttl_sec = _tablet->ttl_seconds();
1744
0
    ctx.approximate_bytes_to_write = _input_rowsets_total_size;
1745
0
    ctx.tablet = _tablet;
1746
1747
0
    _output_rs_writer = DORIS_TRY(_tablet->create_rowset_writer(ctx, _is_vertical));
1748
0
    RETURN_IF_ERROR(
1749
0
            _engine.meta_mgr().prepare_rowset(*_output_rs_writer->rowset_meta().get(), _uuid));
1750
0
    return Status::OK();
1751
0
}
1752
1753
0
Status CloudCompactionMixin::garbage_collection() {
1754
0
    if (!config::enable_file_cache) {
1755
0
        return Status::OK();
1756
0
    }
1757
0
    if (_output_rs_writer) {
1758
0
        auto* beta_rowset_writer = dynamic_cast<BaseBetaRowsetWriter*>(_output_rs_writer.get());
1759
0
        DCHECK(beta_rowset_writer);
1760
0
        for (const auto& [_, file_writer] : beta_rowset_writer->get_file_writers()) {
1761
0
            auto file_key = io::BlockFileCache::hash(file_writer->path().filename().native());
1762
0
            auto* file_cache = io::FileCacheFactory::instance()->get_by_path(file_key);
1763
0
            file_cache->remove_if_cached_async(file_key);
1764
0
        }
1765
0
        for (const auto& [_, index_writer] : beta_rowset_writer->index_file_writers()) {
1766
0
            for (const auto& file_name : index_writer->get_index_file_names()) {
1767
0
                auto file_key = io::BlockFileCache::hash(file_name);
1768
0
                auto* file_cache = io::FileCacheFactory::instance()->get_by_path(file_key);
1769
0
                file_cache->remove_if_cached_async(file_key);
1770
0
            }
1771
0
        }
1772
0
    }
1773
0
    return Status::OK();
1774
0
}
1775
1776
0
void CloudCompactionMixin::update_compaction_level() {
1777
    // for index change compaction, compaction level should not changed.
1778
    // because input rowset num is 1.
1779
0
    if (is_index_change_compaction()) {
1780
0
        DCHECK(_input_rowsets.size() == 1);
1781
0
        _output_rowset->rowset_meta()->set_compaction_level(
1782
0
                _input_rowsets.back()->rowset_meta()->compaction_level());
1783
0
    } else {
1784
0
        auto compaction_policy = _tablet->tablet_meta()->compaction_policy();
1785
0
        auto cumu_policy = _engine.cumu_compaction_policy(compaction_policy);
1786
0
        if (cumu_policy && cumu_policy->name() == CUMULATIVE_TIME_SERIES_POLICY) {
1787
0
            int64_t compaction_level = cumu_policy->get_compaction_level(
1788
0
                    cloud_tablet(), _input_rowsets, _output_rowset);
1789
0
            _output_rowset->rowset_meta()->set_compaction_level(compaction_level);
1790
0
        }
1791
0
    }
1792
0
}
1793
1794
// should skip hole rowsets, ortherwise the count will be wrong in ms
1795
2
int64_t CloudCompactionMixin::num_input_rowsets() const {
1796
2
    int64_t count = 0;
1797
2
    for (const auto& r : _input_rowsets) {
1798
2
        if (!r->is_hole_rowset()) {
1799
2
            count++;
1800
2
        }
1801
2
    }
1802
2
    return count;
1803
2
}
1804
1805
8
bool CloudCompactionMixin::should_cache_compaction_output() {
1806
8
    if (compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
1807
0
        return true;
1808
0
    }
1809
1810
8
    if (compaction_type() == ReaderType::READER_BASE_COMPACTION) {
1811
8
        double input_rowsets_hit_cache_ratio = 0.0;
1812
1813
8
        int64_t _input_rowsets_cached_size =
1814
8
                _input_rowsets_cached_data_size + _input_rowsets_cached_index_size;
1815
8
        if (_input_rowsets_total_size > 0) {
1816
7
            input_rowsets_hit_cache_ratio =
1817
7
                    double(_input_rowsets_cached_size) / double(_input_rowsets_total_size);
1818
7
        }
1819
1820
8
        LOG(INFO) << "CloudBaseCompaction should_cache_compaction_output"
1821
8
                  << ", tablet_id=" << _tablet->tablet_id()
1822
8
                  << ", input_rowsets_hit_cache_ratio=" << input_rowsets_hit_cache_ratio
1823
8
                  << ", _input_rowsets_cached_size=" << _input_rowsets_cached_size
1824
8
                  << ", _input_rowsets_total_size=" << _input_rowsets_total_size
1825
8
                  << ", enable_file_cache_keep_base_compaction_output="
1826
8
                  << config::enable_file_cache_keep_base_compaction_output
1827
8
                  << ", file_cache_keep_base_compaction_output_min_hit_ratio="
1828
8
                  << config::file_cache_keep_base_compaction_output_min_hit_ratio;
1829
1830
8
        if (config::enable_file_cache_keep_base_compaction_output) {
1831
0
            return true;
1832
0
        }
1833
1834
8
        if (input_rowsets_hit_cache_ratio >
1835
8
            config::file_cache_keep_base_compaction_output_min_hit_ratio) {
1836
3
            return true;
1837
3
        }
1838
8
    }
1839
5
    return false;
1840
8
}
1841
1842
#include "common/compile_check_end.h"
1843
} // namespace doris