Coverage Report

Created: 2025-12-31 20:16

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/doris/be/src/olap/compaction.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "olap/compaction.h"
19
20
#include <fmt/format.h>
21
#include <gen_cpp/olap_file.pb.h>
22
#include <glog/logging.h>
23
24
#include <algorithm>
25
#include <atomic>
26
#include <cstdint>
27
#include <cstdlib>
28
#include <list>
29
#include <map>
30
#include <memory>
31
#include <mutex>
32
#include <nlohmann/json.hpp>
33
#include <numeric>
34
#include <ostream>
35
#include <set>
36
#include <shared_mutex>
37
#include <utility>
38
39
#include "cloud/cloud_meta_mgr.h"
40
#include "cloud/cloud_storage_engine.h"
41
#include "cloud/cloud_tablet.h"
42
#include "cloud/pb_convert.h"
43
#include "common/config.h"
44
#include "common/status.h"
45
#include "cpp/sync_point.h"
46
#include "io/cache/block_file_cache_factory.h"
47
#include "io/fs/file_system.h"
48
#include "io/fs/file_writer.h"
49
#include "io/fs/remote_file_system.h"
50
#include "io/io_common.h"
51
#include "olap/cumulative_compaction.h"
52
#include "olap/cumulative_compaction_policy.h"
53
#include "olap/cumulative_compaction_time_series_policy.h"
54
#include "olap/data_dir.h"
55
#include "olap/olap_common.h"
56
#include "olap/olap_define.h"
57
#include "olap/rowset/beta_rowset.h"
58
#include "olap/rowset/beta_rowset_reader.h"
59
#include "olap/rowset/beta_rowset_writer.h"
60
#include "olap/rowset/rowset.h"
61
#include "olap/rowset/rowset_fwd.h"
62
#include "olap/rowset/rowset_meta.h"
63
#include "olap/rowset/rowset_writer.h"
64
#include "olap/rowset/rowset_writer_context.h"
65
#include "olap/rowset/segment_v2/index_file_reader.h"
66
#include "olap/rowset/segment_v2/index_file_writer.h"
67
#include "olap/rowset/segment_v2/inverted_index_compaction.h"
68
#include "olap/rowset/segment_v2/inverted_index_desc.h"
69
#include "olap/rowset/segment_v2/inverted_index_fs_directory.h"
70
#include "olap/storage_engine.h"
71
#include "olap/storage_policy.h"
72
#include "olap/tablet.h"
73
#include "olap/tablet_meta.h"
74
#include "olap/tablet_meta_manager.h"
75
#include "olap/task/engine_checksum_task.h"
76
#include "olap/txn_manager.h"
77
#include "olap/utils.h"
78
#include "runtime/memory/mem_tracker_limiter.h"
79
#include "runtime/thread_context.h"
80
#include "util/doris_metrics.h"
81
#include "util/pretty_printer.h"
82
#include "util/time.h"
83
#include "util/trace.h"
84
#include "vec/common/schema_util.h"
85
86
using std::vector;
87
88
namespace doris {
89
using namespace ErrorCode;
90
namespace {
91
#include "common/compile_check_begin.h"
92
93
bool is_rowset_tidy(std::string& pre_max_key, bool& pre_rs_key_bounds_truncated,
94
2.37k
                    const RowsetSharedPtr& rhs) {
95
2.37k
    size_t min_tidy_size = config::ordered_data_compaction_min_segment_size;
96
2.37k
    if (rhs->num_segments() == 0) {
97
2.14k
        return true;
98
2.14k
    }
99
228
    if (rhs->is_segments_overlapping()) {
100
1
        return false;
101
1
    }
102
    // check segment size
103
227
    auto* beta_rowset = reinterpret_cast<BetaRowset*>(rhs.get());
104
227
    std::vector<size_t> segments_size;
105
227
    RETURN_FALSE_IF_ERROR(beta_rowset->get_segments_size(&segments_size));
106
231
    for (auto segment_size : segments_size) {
107
        // is segment is too small, need to do compaction
108
231
        if (segment_size < min_tidy_size) {
109
185
            return false;
110
185
        }
111
231
    }
112
41
    std::string min_key;
113
41
    auto ret = rhs->first_key(&min_key);
114
41
    if (!ret) {
115
0
        return false;
116
0
    }
117
41
    bool cur_rs_key_bounds_truncated {rhs->is_segments_key_bounds_truncated()};
118
41
    if (!Slice::lhs_is_strictly_less_than_rhs(Slice {pre_max_key}, pre_rs_key_bounds_truncated,
119
41
                                              Slice {min_key}, cur_rs_key_bounds_truncated)) {
120
5
        return false;
121
5
    }
122
41
    CHECK(rhs->last_key(&pre_max_key));
123
36
    pre_rs_key_bounds_truncated = cur_rs_key_bounds_truncated;
124
36
    return true;
125
41
}
126
127
} // namespace
128
129
Compaction::Compaction(BaseTabletSPtr tablet, const std::string& label)
130
        : _mem_tracker(
131
100k
                  MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::COMPACTION, label)),
132
100k
          _tablet(std::move(tablet)),
133
100k
          _is_vertical(config::enable_vertical_compaction),
134
100k
          _allow_delete_in_cumu_compaction(config::enable_delete_when_cumu_compaction),
135
          _enable_vertical_compact_variant_subcolumns(
136
100k
                  config::enable_vertical_compact_variant_subcolumns),
137
100k
          _enable_inverted_index_compaction(config::inverted_index_compaction_enable) {
138
100k
    init_profile(label);
139
100k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_mem_tracker);
140
100k
    _rowid_conversion = std::make_unique<RowIdConversion>();
141
100k
}
142
143
100k
Compaction::~Compaction() {
144
100k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_mem_tracker);
145
100k
    _output_rs_writer.reset();
146
100k
    _tablet.reset();
147
100k
    _input_rowsets.clear();
148
100k
    _output_rowset.reset();
149
100k
    _cur_tablet_schema.reset();
150
100k
    _rowid_conversion.reset();
151
100k
}
152
153
100k
void Compaction::init_profile(const std::string& label) {
154
100k
    _profile = std::make_unique<RuntimeProfile>(label);
155
156
100k
    _input_rowsets_data_size_counter =
157
100k
            ADD_COUNTER(_profile, "input_rowsets_data_size", TUnit::BYTES);
158
100k
    _input_rowsets_counter = ADD_COUNTER(_profile, "input_rowsets_count", TUnit::UNIT);
159
100k
    _input_row_num_counter = ADD_COUNTER(_profile, "input_row_num", TUnit::UNIT);
160
100k
    _input_segments_num_counter = ADD_COUNTER(_profile, "input_segments_num", TUnit::UNIT);
161
100k
    _merged_rows_counter = ADD_COUNTER(_profile, "merged_rows", TUnit::UNIT);
162
100k
    _filtered_rows_counter = ADD_COUNTER(_profile, "filtered_rows", TUnit::UNIT);
163
100k
    _output_rowset_data_size_counter =
164
100k
            ADD_COUNTER(_profile, "output_rowset_data_size", TUnit::BYTES);
165
100k
    _output_row_num_counter = ADD_COUNTER(_profile, "output_row_num", TUnit::UNIT);
166
100k
    _output_segments_num_counter = ADD_COUNTER(_profile, "output_segments_num", TUnit::UNIT);
167
100k
    _merge_rowsets_latency_timer = ADD_TIMER(_profile, "merge_rowsets_latency");
168
100k
}
169
170
626
int64_t Compaction::merge_way_num() {
171
626
    int64_t way_num = 0;
172
5.10k
    for (auto&& rowset : _input_rowsets) {
173
5.10k
        way_num += rowset->rowset_meta()->get_merge_way_num();
174
5.10k
    }
175
176
626
    return way_num;
177
626
}
178
179
626
Status Compaction::merge_input_rowsets() {
180
626
    std::vector<RowsetReaderSharedPtr> input_rs_readers;
181
626
    input_rs_readers.reserve(_input_rowsets.size());
182
5.10k
    for (auto& rowset : _input_rowsets) {
183
5.10k
        RowsetReaderSharedPtr rs_reader;
184
5.10k
        RETURN_IF_ERROR(rowset->create_reader(&rs_reader));
185
5.10k
        input_rs_readers.push_back(std::move(rs_reader));
186
5.10k
    }
187
188
626
    RowsetWriterContext ctx;
189
626
    RETURN_IF_ERROR(construct_output_rowset_writer(ctx));
190
191
    // write merged rows to output rowset
192
    // The test results show that merger is low-memory-footprint, there is no need to tracker its mem pool
193
    // if ctx.columns_to_do_index_compaction.size() > 0, it means we need to do inverted index compaction.
194
    // the row ID conversion matrix needs to be used for inverted index compaction.
195
626
    if (!ctx.columns_to_do_index_compaction.empty() ||
196
626
        (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
197
551
         _tablet->enable_unique_key_merge_on_write())) {
198
328
        _stats.rowid_conversion = _rowid_conversion.get();
199
328
    }
200
201
626
    int64_t way_num = merge_way_num();
202
203
626
    Status res;
204
626
    {
205
626
        SCOPED_TIMER(_merge_rowsets_latency_timer);
206
        // 1. Merge segment files and write bkd inverted index
207
        // TODO implement vertical compaction for seq map
208
626
        if (_is_vertical && !_tablet->tablet_schema()->has_seq_map()) {
209
626
            if (!_tablet->tablet_schema()->cluster_key_uids().empty()) {
210
17
                RETURN_IF_ERROR(update_delete_bitmap());
211
17
            }
212
626
            res = Merger::vertical_merge_rowsets(_tablet, compaction_type(), *_cur_tablet_schema,
213
626
                                                 input_rs_readers, _output_rs_writer.get(),
214
626
                                                 cast_set<uint32_t>(get_avg_segment_rows()),
215
626
                                                 way_num, &_stats);
216
626
        } else {
217
0
            if (!_tablet->tablet_schema()->cluster_key_uids().empty()) {
218
0
                return Status::InternalError(
219
0
                        "mow table with cluster keys does not support non vertical compaction");
220
0
            }
221
0
            res = Merger::vmerge_rowsets(_tablet, compaction_type(), *_cur_tablet_schema,
222
0
                                         input_rs_readers, _output_rs_writer.get(), &_stats);
223
0
        }
224
225
626
        _tablet->last_compaction_status = res;
226
626
        if (!res.ok()) {
227
6
            return res;
228
6
        }
229
        // 2. Merge the remaining inverted index files of the string type
230
620
        RETURN_IF_ERROR(do_inverted_index_compaction());
231
620
    }
232
233
590
    COUNTER_UPDATE(_merged_rows_counter, _stats.merged_rows);
234
590
    COUNTER_UPDATE(_filtered_rows_counter, _stats.filtered_rows);
235
236
    // 3. In the `build`, `_close_file_writers` is called to close the inverted index file writer and write the final compound index file.
237
590
    RETURN_NOT_OK_STATUS_WITH_WARN(_output_rs_writer->build(_output_rowset),
238
590
                                   fmt::format("rowset writer build failed. output_version: {}",
239
590
                                               _output_version.to_string()));
240
241
    // When true, writers should remove variant extracted subcolumns from the
242
    // schema stored in RowsetMeta. This is used when compaction temporarily
243
    // extends schema to split variant subcolumns for vertical compaction but
244
    // the final rowset meta must not persist those extracted subcolumns.
245
590
    if (_enable_vertical_compact_variant_subcolumns &&
246
590
        (_cur_tablet_schema->num_variant_columns() > 0)) {
247
58
        _output_rowset->rowset_meta()->set_tablet_schema(
248
58
                _cur_tablet_schema->copy_without_variant_extracted_columns());
249
58
    }
250
251
    //RETURN_IF_ERROR(_engine.meta_mgr().commit_rowset(*_output_rowset->rowset_meta().get()));
252
590
    set_delete_predicate_for_output_rowset();
253
254
590
    _local_read_bytes_total = _stats.bytes_read_from_local;
255
590
    _remote_read_bytes_total = _stats.bytes_read_from_remote;
256
590
    DorisMetrics::instance()->local_compaction_read_bytes_total->increment(_local_read_bytes_total);
257
590
    DorisMetrics::instance()->remote_compaction_read_bytes_total->increment(
258
590
            _remote_read_bytes_total);
259
590
    DorisMetrics::instance()->local_compaction_write_bytes_total->increment(
260
590
            _stats.cached_bytes_total);
261
262
590
    COUNTER_UPDATE(_output_rowset_data_size_counter, _output_rowset->data_disk_size());
263
590
    COUNTER_UPDATE(_output_row_num_counter, _output_rowset->num_rows());
264
590
    COUNTER_UPDATE(_output_segments_num_counter, _output_rowset->num_segments());
265
266
590
    return check_correctness();
267
590
}
268
269
589
void Compaction::set_delete_predicate_for_output_rowset() {
270
    // Now we support delete in cumu compaction, to make all data in rowsets whose version
271
    // is below output_version to be delete in the future base compaction, we should carry
272
    // all delete predicate in the output rowset.
273
    // Output start version > 2 means we must set the delete predicate in the output rowset
274
589
    if (_output_rowset->version().first > 2 &&
275
589
        (_allow_delete_in_cumu_compaction || is_index_change_compaction())) {
276
1
        DeletePredicatePB delete_predicate;
277
1
        std::accumulate(_input_rowsets.begin(), _input_rowsets.end(), &delete_predicate,
278
1
                        [](DeletePredicatePB* delete_predicate, const RowsetSharedPtr& rs) {
279
1
                            if (rs->rowset_meta()->has_delete_predicate()) {
280
1
                                delete_predicate->MergeFrom(rs->rowset_meta()->delete_predicate());
281
1
                            }
282
1
                            return delete_predicate;
283
1
                        });
284
        // now version in delete_predicate is deprecated
285
1
        if (!delete_predicate.in_predicates().empty() ||
286
1
            !delete_predicate.sub_predicates_v2().empty() ||
287
1
            !delete_predicate.sub_predicates().empty()) {
288
1
            _output_rowset->rowset_meta()->set_delete_predicate(std::move(delete_predicate));
289
1
        }
290
1
    }
291
589
}
292
293
625
int64_t Compaction::get_avg_segment_rows() {
294
    // take care of empty rowset
295
    // input_rowsets_size is total disk_size of input_rowset, this size is the
296
    // final size after codec and compress, so expect dest segment file size
297
    // in disk is config::vertical_compaction_max_segment_size
298
625
    const auto& meta = _tablet->tablet_meta();
299
625
    if (meta->compaction_policy() == CUMULATIVE_TIME_SERIES_POLICY) {
300
12
        int64_t compaction_goal_size_mbytes = meta->time_series_compaction_goal_size_mbytes();
301
        // The output segment rows should be less than total input rows
302
12
        return std::min((compaction_goal_size_mbytes * 1024 * 1024 * 2) /
303
12
                                (_input_rowsets_data_size / (_input_row_num + 1) + 1),
304
12
                        _input_row_num + 1);
305
12
    }
306
613
    return std::min(config::vertical_compaction_max_segment_size /
307
613
                            (_input_rowsets_data_size / (_input_row_num + 1) + 1),
308
613
                    _input_row_num + 1);
309
625
}
310
311
CompactionMixin::CompactionMixin(StorageEngine& engine, TabletSharedPtr tablet,
312
                                 const std::string& label)
313
100k
        : Compaction(tablet, label), _engine(engine) {}
314
315
100k
CompactionMixin::~CompactionMixin() {
316
100k
    if (_state != CompactionState::SUCCESS && _output_rowset != nullptr) {
317
6
        if (!_output_rowset->is_local()) {
318
0
            tablet()->record_unused_remote_rowset(_output_rowset->rowset_id(),
319
0
                                                  _output_rowset->rowset_meta()->resource_id(),
320
0
                                                  _output_rowset->num_segments());
321
0
            return;
322
0
        }
323
6
        _engine.add_unused_rowset(_output_rowset);
324
6
    }
325
100k
}
326
327
867k
Tablet* CompactionMixin::tablet() {
328
867k
    return static_cast<Tablet*>(_tablet.get());
329
867k
}
330
331
294
Status CompactionMixin::do_compact_ordered_rowsets() {
332
294
    RETURN_IF_ERROR(build_basic_info(true));
333
294
    RowsetWriterContext ctx;
334
294
    RETURN_IF_ERROR(construct_output_rowset_writer(ctx));
335
336
294
    LOG(INFO) << "start to do ordered data compaction, tablet=" << _tablet->tablet_id()
337
294
              << ", output_version=" << _output_version;
338
    // link data to new rowset
339
294
    auto seg_id = 0;
340
294
    bool segments_key_bounds_truncated {false};
341
294
    std::vector<KeyBoundsPB> segment_key_bounds;
342
2.16k
    for (auto rowset : _input_rowsets) {
343
2.16k
        RETURN_IF_ERROR(rowset->link_files_to(tablet()->tablet_path(),
344
2.16k
                                              _output_rs_writer->rowset_id(), seg_id));
345
2.16k
        seg_id += rowset->num_segments();
346
2.16k
        segments_key_bounds_truncated |= rowset->is_segments_key_bounds_truncated();
347
2.16k
        std::vector<KeyBoundsPB> key_bounds;
348
2.16k
        RETURN_IF_ERROR(rowset->get_segments_key_bounds(&key_bounds));
349
2.16k
        segment_key_bounds.insert(segment_key_bounds.end(), key_bounds.begin(), key_bounds.end());
350
2.16k
    }
351
    // build output rowset
352
294
    RowsetMetaSharedPtr rowset_meta = std::make_shared<RowsetMeta>();
353
294
    rowset_meta->set_num_rows(_input_row_num);
354
294
    rowset_meta->set_total_disk_size(_input_rowsets_data_size + _input_rowsets_index_size);
355
294
    rowset_meta->set_data_disk_size(_input_rowsets_data_size);
356
294
    rowset_meta->set_index_disk_size(_input_rowsets_index_size);
357
294
    rowset_meta->set_empty(_input_row_num == 0);
358
294
    rowset_meta->set_num_segments(_input_num_segments);
359
294
    rowset_meta->set_segments_overlap(NONOVERLAPPING);
360
294
    rowset_meta->set_rowset_state(VISIBLE);
361
294
    rowset_meta->set_segments_key_bounds_truncated(segments_key_bounds_truncated);
362
294
    rowset_meta->set_segments_key_bounds(segment_key_bounds);
363
364
294
    _output_rowset = _output_rs_writer->manual_build(rowset_meta);
365
366
    // 2. check variant column path stats
367
294
    RETURN_IF_ERROR(vectorized::schema_util::VariantCompactionUtil::check_path_stats(
368
294
            _input_rowsets, _output_rowset, _tablet));
369
294
    return Status::OK();
370
294
}
371
372
912
Status CompactionMixin::build_basic_info(bool is_ordered_compaction) {
373
7.13k
    for (auto& rowset : _input_rowsets) {
374
7.13k
        const auto& rowset_meta = rowset->rowset_meta();
375
7.13k
        auto index_size = rowset_meta->index_disk_size();
376
7.13k
        auto total_size = rowset_meta->total_disk_size();
377
7.13k
        auto data_size = rowset_meta->data_disk_size();
378
        // corrupted index size caused by bug before 2.1.5 or 3.0.0 version
379
        // try to get real index size from disk.
380
7.13k
        if (index_size < 0 || index_size > total_size * 2) {
381
0
            LOG(ERROR) << "invalid index size:" << index_size << " total size:" << total_size
382
0
                       << " data size:" << data_size << " tablet:" << rowset_meta->tablet_id()
383
0
                       << " rowset:" << rowset_meta->rowset_id();
384
0
            index_size = 0;
385
0
            auto st = rowset->get_inverted_index_size(&index_size);
386
0
            if (!st.ok()) {
387
0
                LOG(ERROR) << "failed to get inverted index size. res=" << st;
388
0
            }
389
0
        }
390
7.13k
        _input_rowsets_data_size += data_size;
391
7.13k
        _input_rowsets_index_size += index_size;
392
7.13k
        _input_rowsets_total_size += total_size;
393
7.13k
        _input_row_num += rowset->num_rows();
394
7.13k
        _input_num_segments += rowset->num_segments();
395
7.13k
    }
396
912
    COUNTER_UPDATE(_input_rowsets_data_size_counter, _input_rowsets_data_size);
397
912
    COUNTER_UPDATE(_input_row_num_counter, _input_row_num);
398
912
    COUNTER_UPDATE(_input_segments_num_counter, _input_num_segments);
399
400
912
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::build_basic_info",
401
912
                                      Status::OK());
402
403
912
    _output_version =
404
912
            Version(_input_rowsets.front()->start_version(), _input_rowsets.back()->end_version());
405
406
912
    _newest_write_timestamp = _input_rowsets.back()->newest_write_timestamp();
407
408
912
    std::vector<RowsetMetaSharedPtr> rowset_metas(_input_rowsets.size());
409
912
    std::transform(_input_rowsets.begin(), _input_rowsets.end(), rowset_metas.begin(),
410
7.34k
                   [](const RowsetSharedPtr& rowset) { return rowset->rowset_meta(); });
411
912
    _cur_tablet_schema = _tablet->tablet_schema_with_merged_max_schema_version(rowset_metas);
412
413
    // if enable_vertical_compact_variant_subcolumns is true, we need to compact the variant subcolumns in seperate column groups
414
    // so get_extended_compaction_schema will extended the schema for variant columns
415
    // for ordered compaction, we don't need to extend the schema for variant columns
416
915
    if (_enable_vertical_compact_variant_subcolumns && !is_ordered_compaction) {
417
625
        RETURN_IF_ERROR(
418
625
                vectorized::schema_util::VariantCompactionUtil::get_extended_compaction_schema(
419
625
                        _input_rowsets, _cur_tablet_schema));
420
625
    }
421
912
    return Status::OK();
422
912
}
423
424
925
bool CompactionMixin::handle_ordered_data_compaction() {
425
925
    if (!config::enable_ordered_data_compaction) {
426
0
        return false;
427
0
    }
428
925
    if (compaction_type() == ReaderType::READER_COLD_DATA_COMPACTION ||
429
925
        compaction_type() == ReaderType::READER_FULL_COMPACTION) {
430
        // The remote file system and full compaction does not support to link files.
431
156
        return false;
432
156
    }
433
769
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
434
769
        _tablet->enable_unique_key_merge_on_write()) {
435
217
        return false;
436
217
    }
437
438
552
    if (_tablet->tablet_meta()->tablet_schema()->skip_write_index_on_load()) {
439
        // Expected to create index through normal compaction
440
0
        return false;
441
0
    }
442
443
    // check delete version: if compaction type is base compaction and
444
    // has a delete version, use original compaction
445
552
    if (compaction_type() == ReaderType::READER_BASE_COMPACTION ||
446
552
        (_allow_delete_in_cumu_compaction &&
447
482
         compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION)) {
448
210
        for (auto& rowset : _input_rowsets) {
449
210
            if (rowset->rowset_meta()->has_delete_predicate()) {
450
70
                return false;
451
70
            }
452
210
        }
453
70
    }
454
455
    // check if rowsets are tidy so we can just modify meta and do link
456
    // files to handle compaction
457
482
    auto input_size = _input_rowsets.size();
458
482
    std::string pre_max_key;
459
482
    bool pre_rs_key_bounds_truncated {false};
460
2.65k
    for (auto i = 0; i < input_size; ++i) {
461
2.36k
        if (!is_rowset_tidy(pre_max_key, pre_rs_key_bounds_truncated, _input_rowsets[i])) {
462
192
            if (i <= input_size / 2) {
463
188
                return false;
464
188
            } else {
465
4
                _input_rowsets.resize(i);
466
4
                break;
467
4
            }
468
192
        }
469
2.36k
    }
470
    // most rowset of current compaction is nonoverlapping
471
    // just handle nonoverlappint rowsets
472
294
    auto st = do_compact_ordered_rowsets();
473
294
    if (!st.ok()) {
474
0
        LOG(WARNING) << "failed to compact ordered rowsets: " << st;
475
0
        _pending_rs_guard.drop();
476
0
    }
477
478
294
    return st.ok();
479
482
}
480
481
912
Status CompactionMixin::execute_compact() {
482
912
    uint32_t checksum_before;
483
912
    uint32_t checksum_after;
484
912
    bool enable_compaction_checksum = config::enable_compaction_checksum;
485
912
    if (enable_compaction_checksum) {
486
0
        EngineChecksumTask checksum_task(_engine, _tablet->tablet_id(), _tablet->schema_hash(),
487
0
                                         _input_rowsets.back()->end_version(), &checksum_before);
488
0
        RETURN_IF_ERROR(checksum_task.execute());
489
0
    }
490
491
912
    auto* data_dir = tablet()->data_dir();
492
912
    int64_t permits = get_compaction_permits();
493
912
    data_dir->disks_compaction_score_increment(permits);
494
912
    data_dir->disks_compaction_num_increment(1);
495
496
917
    auto record_compaction_stats = [&](const doris::Exception& ex) {
497
917
        _tablet->compaction_count.fetch_add(1, std::memory_order_relaxed);
498
917
        data_dir->disks_compaction_score_increment(-permits);
499
917
        data_dir->disks_compaction_num_increment(-1);
500
917
    };
501
502
912
    HANDLE_EXCEPTION_IF_CATCH_EXCEPTION(execute_compact_impl(permits), record_compaction_stats);
503
880
    record_compaction_stats(doris::Exception());
504
505
880
    if (enable_compaction_checksum) {
506
0
        EngineChecksumTask checksum_task(_engine, _tablet->tablet_id(), _tablet->schema_hash(),
507
0
                                         _input_rowsets.back()->end_version(), &checksum_after);
508
0
        RETURN_IF_ERROR(checksum_task.execute());
509
0
        if (checksum_before != checksum_after) {
510
0
            return Status::InternalError(
511
0
                    "compaction tablet checksum not consistent, before={}, after={}, tablet_id={}",
512
0
                    checksum_before, checksum_after, _tablet->tablet_id());
513
0
        }
514
0
    }
515
516
880
    DorisMetrics::instance()->local_compaction_read_rows_total->increment(_input_row_num);
517
880
    DorisMetrics::instance()->local_compaction_read_bytes_total->increment(
518
880
            _input_rowsets_total_size);
519
520
880
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::execute_compact", Status::OK());
521
522
880
    DorisMetrics::instance()->local_compaction_write_rows_total->increment(
523
880
            _output_rowset->num_rows());
524
880
    DorisMetrics::instance()->local_compaction_write_bytes_total->increment(
525
880
            _output_rowset->total_disk_size());
526
527
880
    _load_segment_to_cache();
528
880
    return Status::OK();
529
880
}
530
531
915
Status CompactionMixin::execute_compact_impl(int64_t permits) {
532
915
    OlapStopWatch watch;
533
534
915
    if (handle_ordered_data_compaction()) {
535
290
        RETURN_IF_ERROR(modify_rowsets());
536
290
        LOG(INFO) << "succeed to do ordered data " << compaction_name()
537
290
                  << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version
538
290
                  << ", disk=" << tablet()->data_dir()->path()
539
290
                  << ", segments=" << _input_num_segments << ", input_row_num=" << _input_row_num
540
290
                  << ", output_row_num=" << _output_rowset->num_rows()
541
290
                  << ", input_rowsets_data_size=" << _input_rowsets_data_size
542
290
                  << ", input_rowsets_index_size=" << _input_rowsets_index_size
543
290
                  << ", input_rowsets_total_size=" << _input_rowsets_total_size
544
290
                  << ", output_rowset_data_size=" << _output_rowset->data_disk_size()
545
290
                  << ", output_rowset_index_size=" << _output_rowset->index_disk_size()
546
290
                  << ", output_rowset_total_size=" << _output_rowset->total_disk_size()
547
290
                  << ". elapsed time=" << watch.get_elapse_second() << "s.";
548
290
        _state = CompactionState::SUCCESS;
549
290
        return Status::OK();
550
290
    }
551
625
    RETURN_IF_ERROR(build_basic_info());
552
553
625
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::execute_compact_impl",
554
625
                                      Status::OK());
555
556
18.4E
    VLOG_DEBUG << "dump tablet schema: " << _cur_tablet_schema->dump_structure();
557
558
625
    LOG(INFO) << "start " << compaction_name() << ". tablet=" << _tablet->tablet_id()
559
625
              << ", output_version=" << _output_version << ", permits: " << permits;
560
561
625
    RETURN_IF_ERROR(merge_input_rowsets());
562
563
    // Currently, updates are only made in the time_series.
564
589
    update_compaction_level();
565
566
589
    RETURN_IF_ERROR(modify_rowsets());
567
568
589
    auto* cumu_policy = tablet()->cumulative_compaction_policy();
569
589
    DCHECK(cumu_policy);
570
589
    LOG(INFO) << "succeed to do " << compaction_name() << " is_vertical=" << _is_vertical
571
589
              << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version
572
589
              << ", current_max_version=" << tablet()->max_version().second
573
589
              << ", disk=" << tablet()->data_dir()->path()
574
589
              << ", input_segments=" << _input_num_segments << ", input_rowsets_data_size="
575
589
              << PrettyPrinter::print_bytes(_input_rowsets_data_size)
576
589
              << ", input_rowsets_index_size="
577
589
              << PrettyPrinter::print_bytes(_input_rowsets_index_size)
578
589
              << ", input_rowsets_total_size="
579
589
              << PrettyPrinter::print_bytes(_input_rowsets_total_size)
580
589
              << ", output_rowset_data_size="
581
589
              << PrettyPrinter::print_bytes(_output_rowset->data_disk_size())
582
589
              << ", output_rowset_index_size="
583
589
              << PrettyPrinter::print_bytes(_output_rowset->index_disk_size())
584
589
              << ", output_rowset_total_size="
585
589
              << PrettyPrinter::print_bytes(_output_rowset->total_disk_size())
586
589
              << ", input_row_num=" << _input_row_num
587
589
              << ", output_row_num=" << _output_rowset->num_rows()
588
589
              << ", filtered_row_num=" << _stats.filtered_rows
589
589
              << ", merged_row_num=" << _stats.merged_rows
590
589
              << ". elapsed time=" << watch.get_elapse_second()
591
589
              << "s. cumulative_compaction_policy=" << cumu_policy->name()
592
589
              << ", compact_row_per_second="
593
589
              << cast_set<double>(_input_row_num) / watch.get_elapse_second();
594
595
589
    _state = CompactionState::SUCCESS;
596
597
589
    return Status::OK();
598
589
}
599
600
654
Status Compaction::do_inverted_index_compaction() {
601
654
    const auto& ctx = _output_rs_writer->context();
602
654
    if (!_enable_inverted_index_compaction || _input_row_num <= 0 ||
603
654
        ctx.columns_to_do_index_compaction.empty()) {
604
558
        return Status::OK();
605
558
    }
606
607
96
    auto error_handler = [this](int64_t index_id, int64_t column_uniq_id) {
608
56
        LOG(WARNING) << "failed to do index compaction"
609
56
                     << ". tablet=" << _tablet->tablet_id() << ". column uniq id=" << column_uniq_id
610
56
                     << ". index_id=" << index_id;
611
398
        for (auto& rowset : _input_rowsets) {
612
398
            rowset->set_skip_index_compaction(cast_set<int32_t>(column_uniq_id));
613
398
            LOG(INFO) << "mark skipping inverted index compaction next time"
614
398
                      << ". tablet=" << _tablet->tablet_id() << ", rowset=" << rowset->rowset_id()
615
398
                      << ", column uniq id=" << column_uniq_id << ", index_id=" << index_id;
616
398
        }
617
56
    };
618
619
96
    DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_rowid_conversion_null",
620
96
                    { _stats.rowid_conversion = nullptr; })
621
96
    if (!_stats.rowid_conversion) {
622
2
        LOG(WARNING) << "failed to do index compaction, rowid conversion is null"
623
2
                     << ". tablet=" << _tablet->tablet_id()
624
2
                     << ", input row number=" << _input_row_num;
625
2
        mark_skip_index_compaction(ctx, error_handler);
626
627
2
        return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
628
2
                "failed to do index compaction, rowid conversion is null. tablet={}",
629
2
                _tablet->tablet_id());
630
2
    }
631
632
94
    OlapStopWatch inverted_watch;
633
634
    // translation vec
635
    // <<dest_idx_num, dest_docId>>
636
    // the first level vector: index indicates src segment.
637
    // the second level vector: index indicates row id of source segment,
638
    // value indicates row id of destination segment.
639
    // <UINT32_MAX, UINT32_MAX> indicates current row not exist.
640
94
    const auto& trans_vec = _stats.rowid_conversion->get_rowid_conversion_map();
641
642
    // source rowset,segment -> index_id
643
94
    const auto& src_seg_to_id_map = _stats.rowid_conversion->get_src_segment_to_id_map();
644
645
    // dest rowset id
646
94
    RowsetId dest_rowset_id = _stats.rowid_conversion->get_dst_rowset_id();
647
    // dest segment id -> num rows
648
94
    std::vector<uint32_t> dest_segment_num_rows;
649
94
    RETURN_IF_ERROR(_output_rs_writer->get_segment_num_rows(&dest_segment_num_rows));
650
651
94
    auto src_segment_num = src_seg_to_id_map.size();
652
94
    auto dest_segment_num = dest_segment_num_rows.size();
653
654
    // when all the input rowsets are deleted, the output rowset will be empty and dest_segment_num will be 0.
655
94
    if (dest_segment_num <= 0) {
656
3
        LOG(INFO) << "skip doing index compaction due to no output segments"
657
3
                  << ". tablet=" << _tablet->tablet_id() << ", input row number=" << _input_row_num
658
3
                  << ". elapsed time=" << inverted_watch.get_elapse_second() << "s.";
659
3
        return Status::OK();
660
3
    }
661
662
    // Only write info files when debug index compaction is enabled.
663
    // The files are used to debug index compaction and works with index_tool.
664
91
    if (config::debug_inverted_index_compaction) {
665
        // src index files
666
        // format: rowsetId_segmentId
667
10
        std::vector<std::string> src_index_files(src_segment_num);
668
78
        for (const auto& m : src_seg_to_id_map) {
669
78
            std::pair<RowsetId, uint32_t> p = m.first;
670
78
            src_index_files[m.second] = p.first.to_string() + "_" + std::to_string(p.second);
671
78
        }
672
673
        // dest index files
674
        // format: rowsetId_segmentId
675
10
        std::vector<std::string> dest_index_files(dest_segment_num);
676
20
        for (int i = 0; i < dest_segment_num; ++i) {
677
10
            auto prefix = dest_rowset_id.to_string() + "_" + std::to_string(i);
678
10
            dest_index_files[i] = prefix;
679
10
        }
680
681
10
        auto write_json_to_file = [&](const nlohmann::json& json_obj,
682
40
                                      const std::string& file_name) {
683
40
            io::FileWriterPtr file_writer;
684
40
            std::string file_path =
685
40
                    fmt::format("{}/{}.json", std::string(getenv("LOG_DIR")), file_name);
686
40
            RETURN_IF_ERROR(io::global_local_filesystem()->create_file(file_path, &file_writer));
687
40
            RETURN_IF_ERROR(file_writer->append(json_obj.dump()));
688
40
            RETURN_IF_ERROR(file_writer->append("\n"));
689
40
            return file_writer->close();
690
40
        };
691
692
        // Convert trans_vec to JSON and print it
693
10
        nlohmann::json trans_vec_json = trans_vec;
694
10
        auto output_version =
695
10
                _output_version.to_string().substr(1, _output_version.to_string().size() - 2);
696
10
        RETURN_IF_ERROR(write_json_to_file(
697
10
                trans_vec_json,
698
10
                fmt::format("trans_vec_{}_{}", _tablet->tablet_id(), output_version)));
699
700
10
        nlohmann::json src_index_files_json = src_index_files;
701
10
        RETURN_IF_ERROR(write_json_to_file(
702
10
                src_index_files_json,
703
10
                fmt::format("src_idx_dirs_{}_{}", _tablet->tablet_id(), output_version)));
704
705
10
        nlohmann::json dest_index_files_json = dest_index_files;
706
10
        RETURN_IF_ERROR(write_json_to_file(
707
10
                dest_index_files_json,
708
10
                fmt::format("dest_idx_dirs_{}_{}", _tablet->tablet_id(), output_version)));
709
710
10
        nlohmann::json dest_segment_num_rows_json = dest_segment_num_rows;
711
10
        RETURN_IF_ERROR(write_json_to_file(
712
10
                dest_segment_num_rows_json,
713
10
                fmt::format("dest_seg_num_rows_{}_{}", _tablet->tablet_id(), output_version)));
714
10
    }
715
716
    // create index_writer to compaction indexes
717
91
    std::unordered_map<RowsetId, Rowset*> rs_id_to_rowset_map;
718
568
    for (auto&& rs : _input_rowsets) {
719
568
        rs_id_to_rowset_map.emplace(rs->rowset_id(), rs.get());
720
568
    }
721
722
    // src index dirs
723
91
    std::vector<std::unique_ptr<IndexFileReader>> index_file_readers(src_segment_num);
724
546
    for (const auto& m : src_seg_to_id_map) {
725
546
        const auto& [rowset_id, seg_id] = m.first;
726
727
546
        auto find_it = rs_id_to_rowset_map.find(rowset_id);
728
546
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_find_rowset_error",
729
546
                        { find_it = rs_id_to_rowset_map.end(); })
730
546
        if (find_it == rs_id_to_rowset_map.end()) [[unlikely]] {
731
2
            LOG(WARNING) << "failed to do index compaction, cannot find rowset. tablet_id="
732
2
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string();
733
2
            mark_skip_index_compaction(ctx, error_handler);
734
2
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
735
2
                    "failed to do index compaction, cannot find rowset. tablet_id={} rowset_id={}",
736
2
                    _tablet->tablet_id(), rowset_id.to_string());
737
2
        }
738
739
544
        auto* rowset = find_it->second;
740
544
        auto fs = rowset->rowset_meta()->fs();
741
544
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_get_fs_error", { fs = nullptr; })
742
544
        if (!fs) {
743
2
            LOG(WARNING) << "failed to do index compaction, get fs failed. resource_id="
744
2
                         << rowset->rowset_meta()->resource_id();
745
2
            mark_skip_index_compaction(ctx, error_handler);
746
2
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
747
2
                    "get fs failed, resource_id={}", rowset->rowset_meta()->resource_id());
748
2
        }
749
750
542
        auto seg_path = rowset->segment_path(seg_id);
751
542
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_seg_path_nullptr", {
752
542
            seg_path = ResultError(Status::Error<ErrorCode::INTERNAL_ERROR>(
753
542
                    "do_inverted_index_compaction_seg_path_nullptr"));
754
542
        })
755
542
        if (!seg_path.has_value()) {
756
2
            LOG(WARNING) << "failed to do index compaction, get segment path failed. tablet_id="
757
2
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string()
758
2
                         << " seg_id=" << seg_id;
759
2
            mark_skip_index_compaction(ctx, error_handler);
760
2
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
761
2
                    "get segment path failed. tablet_id={} rowset_id={} seg_id={}",
762
2
                    _tablet->tablet_id(), rowset_id.to_string(), seg_id);
763
2
        }
764
540
        auto index_file_reader = std::make_unique<IndexFileReader>(
765
540
                fs,
766
540
                std::string {InvertedIndexDescriptor::get_index_file_path_prefix(seg_path.value())},
767
540
                _cur_tablet_schema->get_inverted_index_storage_format(),
768
540
                rowset->rowset_meta()->inverted_index_file_info(seg_id));
769
540
        auto st = index_file_reader->init(config::inverted_index_read_buffer_size);
770
540
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_init_inverted_index_file_reader",
771
540
                        {
772
540
                            st = Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
773
540
                                    "debug point: "
774
540
                                    "Compaction::do_inverted_index_compaction_init_inverted_index_"
775
540
                                    "file_reader error");
776
540
                        })
777
540
        if (!st.ok()) {
778
2
            LOG(WARNING) << "failed to do index compaction, init inverted index file reader "
779
2
                            "failed. tablet_id="
780
2
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string()
781
2
                         << " seg_id=" << seg_id;
782
2
            mark_skip_index_compaction(ctx, error_handler);
783
2
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
784
2
                    "init inverted index file reader failed. tablet_id={} rowset_id={} seg_id={}",
785
2
                    _tablet->tablet_id(), rowset_id.to_string(), seg_id);
786
2
        }
787
538
        index_file_readers[m.second] = std::move(index_file_reader);
788
538
    }
789
790
    // dest index files
791
    // format: rowsetId_segmentId
792
83
    auto& inverted_index_file_writers =
793
83
            dynamic_cast<BaseBetaRowsetWriter*>(_output_rs_writer.get())->index_file_writers();
794
83
    DBUG_EXECUTE_IF(
795
83
            "Compaction::do_inverted_index_compaction_inverted_index_file_writers_size_error",
796
83
            { inverted_index_file_writers.clear(); })
797
83
    if (inverted_index_file_writers.size() != dest_segment_num) {
798
2
        LOG(WARNING) << "failed to do index compaction, dest segment num not match. tablet_id="
799
2
                     << _tablet->tablet_id() << " dest_segment_num=" << dest_segment_num
800
2
                     << " inverted_index_file_writers.size()="
801
2
                     << inverted_index_file_writers.size();
802
2
        mark_skip_index_compaction(ctx, error_handler);
803
2
        return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
804
2
                "dest segment num not match. tablet_id={} dest_segment_num={} "
805
2
                "inverted_index_file_writers.size()={}",
806
2
                _tablet->tablet_id(), dest_segment_num, inverted_index_file_writers.size());
807
2
    }
808
809
    // use tmp file dir to store index files
810
81
    auto tmp_file_dir = ExecEnv::GetInstance()->get_tmp_file_dirs()->get_tmp_file_dir();
811
81
    auto index_tmp_path = tmp_file_dir / dest_rowset_id.to_string();
812
81
    LOG(INFO) << "start index compaction"
813
81
              << ". tablet=" << _tablet->tablet_id() << ", source index size=" << src_segment_num
814
81
              << ", destination index size=" << dest_segment_num << ".";
815
816
81
    Status status = Status::OK();
817
411
    for (auto&& column_uniq_id : ctx.columns_to_do_index_compaction) {
818
411
        auto col = _cur_tablet_schema->column_by_uid(column_uniq_id);
819
411
        auto index_metas = _cur_tablet_schema->inverted_indexs(col);
820
411
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_can_not_find_index_meta",
821
411
                        { index_metas.clear(); })
822
411
        if (index_metas.empty()) {
823
2
            status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
824
2
                    fmt::format("Can not find index_meta for col {}", col.name()));
825
2
            LOG(WARNING) << "failed to do index compaction, can not find index_meta for column"
826
2
                         << ". tablet=" << _tablet->tablet_id()
827
2
                         << ", column uniq id=" << column_uniq_id;
828
2
            error_handler(-1, column_uniq_id);
829
2
            break;
830
2
        }
831
411
        for (const auto& index_meta : index_metas) {
832
411
            std::vector<lucene::store::Directory*> dest_index_dirs(dest_segment_num);
833
411
            try {
834
411
                std::vector<std::unique_ptr<DorisCompoundReader, DirectoryDeleter>> src_idx_dirs(
835
411
                        src_segment_num);
836
2.08k
                for (int src_segment_id = 0; src_segment_id < src_segment_num; src_segment_id++) {
837
1.67k
                    auto res = index_file_readers[src_segment_id]->open(index_meta);
838
1.67k
                    DBUG_EXECUTE_IF("Compaction::open_inverted_index_file_reader", {
839
1.67k
                        res = ResultError(Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
840
1.67k
                                "debug point: Compaction::open_index_file_reader error"));
841
1.67k
                    })
842
1.67k
                    if (!res.has_value()) {
843
2
                        LOG(WARNING) << "failed to do index compaction, open inverted index file "
844
2
                                        "reader failed"
845
2
                                     << ". tablet=" << _tablet->tablet_id()
846
2
                                     << ", column uniq id=" << column_uniq_id
847
2
                                     << ", src_segment_id=" << src_segment_id;
848
2
                        throw Exception(ErrorCode::INVERTED_INDEX_COMPACTION_ERROR,
849
2
                                        res.error().msg());
850
2
                    }
851
1.67k
                    src_idx_dirs[src_segment_id] = std::move(res.value());
852
1.67k
                }
853
927
                for (int dest_segment_id = 0; dest_segment_id < dest_segment_num;
854
520
                     dest_segment_id++) {
855
520
                    auto res = inverted_index_file_writers[dest_segment_id]->open(index_meta);
856
520
                    DBUG_EXECUTE_IF("Compaction::open_inverted_index_file_writer", {
857
520
                        res = ResultError(Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
858
520
                                "debug point: Compaction::open_inverted_index_file_writer error"));
859
520
                    })
860
520
                    if (!res.has_value()) {
861
2
                        LOG(WARNING) << "failed to do index compaction, open inverted index file "
862
2
                                        "writer failed"
863
2
                                     << ". tablet=" << _tablet->tablet_id()
864
2
                                     << ", column uniq id=" << column_uniq_id
865
2
                                     << ", dest_segment_id=" << dest_segment_id;
866
2
                        throw Exception(ErrorCode::INVERTED_INDEX_COMPACTION_ERROR,
867
2
                                        res.error().msg());
868
2
                    }
869
                    // Destination directories in dest_index_dirs do not need to be deconstructed,
870
                    // but their lifecycle must be managed by inverted_index_file_writers.
871
518
                    dest_index_dirs[dest_segment_id] = res.value().get();
872
518
                }
873
407
                auto st = compact_column(index_meta->index_id(), src_idx_dirs, dest_index_dirs,
874
407
                                         index_tmp_path.native(), trans_vec, dest_segment_num_rows);
875
407
                if (!st.ok()) {
876
8
                    error_handler(index_meta->index_id(), column_uniq_id);
877
8
                    status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(st.msg());
878
8
                }
879
407
            } catch (CLuceneError& e) {
880
18
                error_handler(index_meta->index_id(), column_uniq_id);
881
18
                status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(e.what());
882
18
            } catch (const Exception& e) {
883
4
                error_handler(index_meta->index_id(), column_uniq_id);
884
4
                status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(e.what());
885
4
            }
886
411
        }
887
409
    }
888
889
    // check index compaction status. If status is not ok, we should return error and end this compaction round.
890
81
    if (!status.ok()) {
891
19
        return status;
892
19
    }
893
81
    LOG(INFO) << "succeed to do index compaction"
894
62
              << ". tablet=" << _tablet->tablet_id()
895
62
              << ". elapsed time=" << inverted_watch.get_elapse_second() << "s.";
896
897
62
    return Status::OK();
898
81
}
899
900
void Compaction::mark_skip_index_compaction(
901
        const RowsetWriterContext& context,
902
12
        const std::function<void(int64_t, int64_t)>& error_handler) {
903
24
    for (auto&& column_uniq_id : context.columns_to_do_index_compaction) {
904
24
        auto col = _cur_tablet_schema->column_by_uid(column_uniq_id);
905
24
        auto index_metas = _cur_tablet_schema->inverted_indexs(col);
906
24
        DBUG_EXECUTE_IF("Compaction::mark_skip_index_compaction_can_not_find_index_meta",
907
24
                        { index_metas.clear(); })
908
24
        if (index_metas.empty()) {
909
0
            LOG(WARNING) << "mark skip index compaction, can not find index_meta for column"
910
0
                         << ". tablet=" << _tablet->tablet_id()
911
0
                         << ", column uniq id=" << column_uniq_id;
912
0
            error_handler(-1, column_uniq_id);
913
0
            continue;
914
0
        }
915
24
        for (const auto& index_meta : index_metas) {
916
24
            error_handler(index_meta->index_id(), column_uniq_id);
917
24
        }
918
24
    }
919
12
}
920
921
899
void Compaction::construct_index_compaction_columns(RowsetWriterContext& ctx) {
922
2.87k
    for (const auto& index : _cur_tablet_schema->inverted_indexes()) {
923
2.87k
        auto col_unique_ids = index->col_unique_ids();
924
        // check if column unique ids is empty to avoid crash
925
2.87k
        if (col_unique_ids.empty()) {
926
1
            LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] index[" << index->index_id()
927
1
                         << "] has no column unique id, will skip index compaction."
928
1
                         << " tablet_schema=" << _cur_tablet_schema->dump_full_schema();
929
1
            continue;
930
1
        }
931
2.87k
        auto col_unique_id = col_unique_ids[0];
932
2.87k
        if (!_cur_tablet_schema->has_column_unique_id(col_unique_id)) {
933
0
            LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
934
0
                         << col_unique_id << "] not found, will skip index compaction";
935
0
            continue;
936
0
        }
937
        // Avoid doing inverted index compaction on non-slice type columns
938
2.87k
        if (!field_is_slice_type(_cur_tablet_schema->column_by_uid(col_unique_id).type())) {
939
2.00k
            continue;
940
2.00k
        }
941
942
        // if index properties are different, index compaction maybe needs to be skipped.
943
870
        bool is_continue = false;
944
870
        std::optional<std::map<std::string, std::string>> first_properties;
945
4.76k
        for (const auto& rowset : _input_rowsets) {
946
4.76k
            auto tablet_indexs = rowset->tablet_schema()->inverted_indexs(col_unique_id);
947
            // no inverted index or index id is different from current index id
948
4.76k
            auto it = std::find_if(tablet_indexs.begin(), tablet_indexs.end(),
949
4.77k
                                   [&index](const auto& tablet_index) {
950
4.77k
                                       return tablet_index->index_id() == index->index_id();
951
4.77k
                                   });
952
4.76k
            if (it != tablet_indexs.end()) {
953
4.75k
                const auto* tablet_index = *it;
954
4.75k
                auto properties = tablet_index->properties();
955
4.75k
                if (!first_properties.has_value()) {
956
864
                    first_properties = properties;
957
3.89k
                } else {
958
3.89k
                    DBUG_EXECUTE_IF(
959
3.89k
                            "Compaction::do_inverted_index_compaction_index_properties_different",
960
3.89k
                            { properties.emplace("dummy_key", "dummy_value"); })
961
3.89k
                    if (properties != first_properties.value()) {
962
8
                        is_continue = true;
963
8
                        break;
964
8
                    }
965
3.89k
                }
966
4.75k
            } else {
967
3
                is_continue = true;
968
3
                break;
969
3
            }
970
4.76k
        }
971
870
        if (is_continue) {
972
10
            continue;
973
10
        }
974
4.21k
        auto has_inverted_index = [&](const RowsetSharedPtr& src_rs) {
975
4.21k
            auto* rowset = static_cast<BetaRowset*>(src_rs.get());
976
4.21k
            DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_is_skip_index_compaction",
977
4.21k
                            { rowset->set_skip_index_compaction(col_unique_id); })
978
4.21k
            if (rowset->is_skip_index_compaction(col_unique_id)) {
979
59
                LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] rowset["
980
59
                             << rowset->rowset_id() << "] column_unique_id[" << col_unique_id
981
59
                             << "] skip inverted index compaction due to last failure";
982
59
                return false;
983
59
            }
984
985
4.15k
            auto fs = rowset->rowset_meta()->fs();
986
4.15k
            DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_get_fs_error",
987
4.15k
                            { fs = nullptr; })
988
4.15k
            if (!fs) {
989
4
                LOG(WARNING) << "get fs failed, resource_id="
990
4
                             << rowset->rowset_meta()->resource_id();
991
4
                return false;
992
4
            }
993
994
4.14k
            auto index_metas = rowset->tablet_schema()->inverted_indexs(col_unique_id);
995
4.14k
            DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_index_meta_nullptr",
996
4.14k
                            { index_metas.clear(); })
997
4.14k
            if (index_metas.empty()) {
998
4
                LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
999
4
                             << col_unique_id << "] index meta is null, will skip index compaction";
1000
4
                return false;
1001
4
            }
1002
4.17k
            for (const auto& index_meta : index_metas) {
1003
6.32k
                for (auto i = 0; i < rowset->num_segments(); i++) {
1004
                    // TODO: inverted_index_path
1005
2.17k
                    auto seg_path = rowset->segment_path(i);
1006
2.17k
                    DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_seg_path_nullptr", {
1007
2.17k
                        seg_path = ResultError(Status::Error<ErrorCode::INTERNAL_ERROR>(
1008
2.17k
                                "construct_skip_inverted_index_seg_path_nullptr"));
1009
2.17k
                    })
1010
2.17k
                    if (!seg_path) {
1011
4
                        LOG(WARNING) << seg_path.error();
1012
4
                        return false;
1013
4
                    }
1014
1015
2.16k
                    std::string index_file_path;
1016
2.16k
                    try {
1017
2.16k
                        auto index_file_reader = std::make_unique<IndexFileReader>(
1018
2.16k
                                fs,
1019
2.16k
                                std::string {InvertedIndexDescriptor::get_index_file_path_prefix(
1020
2.16k
                                        seg_path.value())},
1021
2.16k
                                _cur_tablet_schema->get_inverted_index_storage_format(),
1022
2.16k
                                rowset->rowset_meta()->inverted_index_file_info(i));
1023
2.16k
                        auto st = index_file_reader->init(config::inverted_index_read_buffer_size);
1024
2.16k
                        index_file_path = index_file_reader->get_index_file_path(index_meta);
1025
2.16k
                        DBUG_EXECUTE_IF(
1026
2.16k
                                "Compaction::construct_skip_inverted_index_index_file_reader_init_"
1027
2.16k
                                "status_not_ok",
1028
2.16k
                                {
1029
2.16k
                                    st = Status::Error<ErrorCode::INTERNAL_ERROR>(
1030
2.16k
                                            "debug point: "
1031
2.16k
                                            "construct_skip_inverted_index_index_file_reader_init_"
1032
2.16k
                                            "status_"
1033
2.16k
                                            "not_ok");
1034
2.16k
                                })
1035
2.16k
                        if (!st.ok()) {
1036
4
                            LOG(WARNING) << "init index " << index_file_path << " error:" << st;
1037
4
                            return false;
1038
4
                        }
1039
1040
                        // check index meta
1041
2.16k
                        auto result = index_file_reader->open(index_meta);
1042
2.16k
                        DBUG_EXECUTE_IF(
1043
2.16k
                                "Compaction::construct_skip_inverted_index_index_file_reader_open_"
1044
2.16k
                                "error",
1045
2.16k
                                {
1046
2.16k
                                    result = ResultError(
1047
2.16k
                                            Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
1048
2.16k
                                                    "CLuceneError occur when open idx file"));
1049
2.16k
                                })
1050
2.16k
                        if (!result.has_value()) {
1051
4
                            LOG(WARNING) << "open index " << index_file_path
1052
4
                                         << " error:" << result.error();
1053
4
                            return false;
1054
4
                        }
1055
2.15k
                        auto reader = std::move(result.value());
1056
2.15k
                        std::vector<std::string> files;
1057
2.15k
                        reader->list(&files);
1058
2.15k
                        reader->close();
1059
2.15k
                        DBUG_EXECUTE_IF(
1060
2.15k
                                "Compaction::construct_skip_inverted_index_index_reader_close_"
1061
2.15k
                                "error",
1062
2.15k
                                { _CLTHROWA(CL_ERR_IO, "debug point: reader close error"); })
1063
1064
2.15k
                        DBUG_EXECUTE_IF(
1065
2.15k
                                "Compaction::construct_skip_inverted_index_index_files_count",
1066
2.15k
                                { files.clear(); })
1067
1068
                        // why is 3?
1069
                        // slice type index file at least has 3 files: null_bitmap, segments_N, segments.gen
1070
2.15k
                        if (files.size() < 3) {
1071
4
                            LOG(WARNING)
1072
4
                                    << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
1073
4
                                    << col_unique_id << "]," << index_file_path
1074
4
                                    << " is corrupted, will skip index compaction";
1075
4
                            return false;
1076
4
                        }
1077
2.15k
                    } catch (CLuceneError& err) {
1078
4
                        LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
1079
4
                                     << col_unique_id << "] open index[" << index_file_path
1080
4
                                     << "], will skip index compaction, error:" << err.what();
1081
4
                        return false;
1082
4
                    }
1083
2.16k
                }
1084
4.17k
            }
1085
4.12k
            return true;
1086
4.14k
        };
1087
1088
860
        bool all_have_inverted_index = std::all_of(_input_rowsets.begin(), _input_rowsets.end(),
1089
860
                                                   std::move(has_inverted_index));
1090
1091
860
        if (all_have_inverted_index) {
1092
771
            ctx.columns_to_do_index_compaction.insert(col_unique_id);
1093
771
        }
1094
860
    }
1095
899
}
1096
1097
17
Status CompactionMixin::update_delete_bitmap() {
1098
    // for mow with cluster keys, compaction read data with delete bitmap
1099
    // if tablet is not ready(such as schema change), we need to update delete bitmap
1100
17
    {
1101
17
        std::shared_lock meta_rlock(_tablet->get_header_lock());
1102
17
        if (_tablet->tablet_state() != TABLET_NOTREADY) {
1103
16
            return Status::OK();
1104
16
        }
1105
17
    }
1106
1
    OlapStopWatch watch;
1107
1
    std::vector<RowsetSharedPtr> rowsets;
1108
5
    for (const auto& rowset : _input_rowsets) {
1109
5
        std::lock_guard rwlock(tablet()->get_rowset_update_lock());
1110
5
        std::shared_lock rlock(_tablet->get_header_lock());
1111
5
        Status st = _tablet->update_delete_bitmap_without_lock(_tablet, rowset, &rowsets);
1112
5
        if (!st.ok()) {
1113
0
            LOG(INFO) << "failed update_delete_bitmap_without_lock for tablet_id="
1114
0
                      << _tablet->tablet_id() << ", st=" << st.to_string();
1115
0
            return st;
1116
0
        }
1117
5
        rowsets.push_back(rowset);
1118
5
    }
1119
1
    LOG(INFO) << "finish update delete bitmap for tablet: " << _tablet->tablet_id()
1120
1
              << ", rowsets: " << _input_rowsets.size() << ", cost: " << watch.get_elapse_time_us()
1121
1
              << "(us)";
1122
1
    return Status::OK();
1123
1
}
1124
1125
0
Status CloudCompactionMixin::update_delete_bitmap() {
1126
    // for mow with cluster keys, compaction read data with delete bitmap
1127
    // if tablet is not ready(such as schema change), we need to update delete bitmap
1128
0
    {
1129
0
        std::shared_lock meta_rlock(_tablet->get_header_lock());
1130
0
        if (_tablet->tablet_state() != TABLET_NOTREADY) {
1131
0
            return Status::OK();
1132
0
        }
1133
0
    }
1134
0
    OlapStopWatch watch;
1135
0
    std::vector<RowsetSharedPtr> rowsets;
1136
0
    for (const auto& rowset : _input_rowsets) {
1137
0
        Status st = _tablet->update_delete_bitmap_without_lock(_tablet, rowset, &rowsets);
1138
0
        if (!st.ok()) {
1139
0
            LOG(INFO) << "failed update_delete_bitmap_without_lock for tablet_id="
1140
0
                      << _tablet->tablet_id() << ", st=" << st.to_string();
1141
0
            return st;
1142
0
        }
1143
0
        rowsets.push_back(rowset);
1144
0
    }
1145
0
    LOG(INFO) << "finish update delete bitmap for tablet: " << _tablet->tablet_id()
1146
0
              << ", rowsets: " << _input_rowsets.size() << ", cost: " << watch.get_elapse_time_us()
1147
0
              << "(us)";
1148
0
    return Status::OK();
1149
0
}
1150
1151
956
Status CompactionMixin::construct_output_rowset_writer(RowsetWriterContext& ctx) {
1152
    // only do index compaction for dup_keys and unique_keys with mow enabled
1153
956
    if (_enable_inverted_index_compaction && (((_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1154
936
                                                _tablet->enable_unique_key_merge_on_write()) ||
1155
936
                                               _tablet->keys_type() == KeysType::DUP_KEYS))) {
1156
900
        construct_index_compaction_columns(ctx);
1157
900
    }
1158
956
    ctx.version = _output_version;
1159
956
    ctx.rowset_state = VISIBLE;
1160
956
    ctx.segments_overlap = NONOVERLAPPING;
1161
956
    ctx.tablet_schema = _cur_tablet_schema;
1162
956
    ctx.newest_write_timestamp = _newest_write_timestamp;
1163
956
    ctx.write_type = DataWriteType::TYPE_COMPACTION;
1164
956
    ctx.compaction_type = compaction_type();
1165
956
    ctx.allow_packed_file = false;
1166
956
    _output_rs_writer = DORIS_TRY(_tablet->create_rowset_writer(ctx, _is_vertical));
1167
956
    _pending_rs_guard = _engine.add_pending_rowset(ctx);
1168
956
    return Status::OK();
1169
956
}
1170
1171
752
Status CompactionMixin::modify_rowsets() {
1172
752
    std::vector<RowsetSharedPtr> output_rowsets;
1173
752
    output_rowsets.push_back(_output_rowset);
1174
1175
752
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1176
752
        _tablet->enable_unique_key_merge_on_write()) {
1177
217
        Version version = tablet()->max_version();
1178
217
        DeleteBitmap output_rowset_delete_bitmap(_tablet->tablet_id());
1179
217
        std::unique_ptr<RowLocationSet> missed_rows;
1180
217
        if ((config::enable_missing_rows_correctness_check ||
1181
217
             config::enable_mow_compaction_correctness_check_core ||
1182
217
             config::enable_mow_compaction_correctness_check_fail) &&
1183
217
            !_allow_delete_in_cumu_compaction &&
1184
217
            compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
1185
213
            missed_rows = std::make_unique<RowLocationSet>();
1186
213
            LOG(INFO) << "RowLocation Set inited succ for tablet:" << _tablet->tablet_id();
1187
213
        }
1188
217
        std::unique_ptr<std::map<RowsetSharedPtr, RowLocationPairList>> location_map;
1189
217
        if (config::enable_rowid_conversion_correctness_check &&
1190
217
            tablet()->tablet_schema()->cluster_key_uids().empty()) {
1191
0
            location_map = std::make_unique<std::map<RowsetSharedPtr, RowLocationPairList>>();
1192
0
            LOG(INFO) << "Location Map inited succ for tablet:" << _tablet->tablet_id();
1193
0
        }
1194
        // Convert the delete bitmap of the input rowsets to output rowset.
1195
        // New loads are not blocked, so some keys of input rowsets might
1196
        // be deleted during the time. We need to deal with delete bitmap
1197
        // of incremental data later.
1198
        // TODO(LiaoXin): check if there are duplicate keys
1199
217
        std::size_t missed_rows_size = 0;
1200
217
        tablet()->calc_compaction_output_rowset_delete_bitmap(
1201
217
                _input_rowsets, *_rowid_conversion, 0, version.second + 1, missed_rows.get(),
1202
217
                location_map.get(), _tablet->tablet_meta()->delete_bitmap(),
1203
217
                &output_rowset_delete_bitmap);
1204
217
        if (missed_rows) {
1205
213
            missed_rows_size = missed_rows->size();
1206
213
            std::size_t merged_missed_rows_size = _stats.merged_rows;
1207
213
            if (!_tablet->tablet_meta()->tablet_schema()->cluster_key_uids().empty()) {
1208
15
                merged_missed_rows_size += _stats.filtered_rows;
1209
15
            }
1210
1211
            // Suppose a heavy schema change process on BE converting tablet A to tablet B.
1212
            // 1. during schema change double write, new loads write [X-Y] on tablet B.
1213
            // 2. rowsets with version [a],[a+1],...,[b-1],[b] on tablet B are picked for cumu compaction(X<=a<b<=Y).(cumu compaction
1214
            //    on new tablet during schema change double write is allowed after https://github.com/apache/doris/pull/16470)
1215
            // 3. schema change remove all rowsets on tablet B before version Z(b<=Z<=Y) before it begins to convert historical rowsets.
1216
            // 4. schema change finishes.
1217
            // 5. cumu compation begins on new tablet with version [a],...,[b]. If there are duplicate keys between these rowsets,
1218
            //    the compaction check will fail because these rowsets have skipped to calculate delete bitmap in commit phase and
1219
            //    publish phase because tablet B is in NOT_READY state when writing.
1220
1221
            // Considering that the cumu compaction will fail finally in this situation because `Tablet::modify_rowsets` will check if rowsets in
1222
            // `to_delete`(_input_rowsets) still exist in tablet's `_rs_version_map`, we can just skip to check missed rows here.
1223
213
            bool need_to_check_missed_rows = true;
1224
213
            {
1225
213
                std::shared_lock rlock(_tablet->get_header_lock());
1226
213
                need_to_check_missed_rows =
1227
213
                        std::all_of(_input_rowsets.begin(), _input_rowsets.end(),
1228
2.12k
                                    [&](const RowsetSharedPtr& rowset) {
1229
2.12k
                                        return tablet()->rowset_exists_unlocked(rowset);
1230
2.12k
                                    });
1231
213
            }
1232
1233
213
            if (_tablet->tablet_state() == TABLET_RUNNING &&
1234
213
                merged_missed_rows_size != missed_rows_size && need_to_check_missed_rows) {
1235
0
                std::stringstream ss;
1236
0
                ss << "cumulative compaction: the merged rows(" << _stats.merged_rows
1237
0
                   << "), filtered rows(" << _stats.filtered_rows
1238
0
                   << ") is not equal to missed rows(" << missed_rows_size
1239
0
                   << ") in rowid conversion, tablet_id: " << _tablet->tablet_id()
1240
0
                   << ", table_id:" << _tablet->table_id();
1241
0
                if (missed_rows_size == 0) {
1242
0
                    ss << ", debug info: ";
1243
0
                    DeleteBitmap subset_map(_tablet->tablet_id());
1244
0
                    for (auto rs : _input_rowsets) {
1245
0
                        _tablet->tablet_meta()->delete_bitmap().subset(
1246
0
                                {rs->rowset_id(), 0, 0},
1247
0
                                {rs->rowset_id(), rs->num_segments(), version.second + 1},
1248
0
                                &subset_map);
1249
0
                        ss << "(rowset id: " << rs->rowset_id()
1250
0
                           << ", delete bitmap cardinality: " << subset_map.cardinality() << ")";
1251
0
                    }
1252
0
                    ss << ", version[0-" << version.second + 1 << "]";
1253
0
                }
1254
0
                std::string err_msg = fmt::format(
1255
0
                        "cumulative compaction: the merged rows({}), filtered rows({})"
1256
0
                        " is not equal to missed rows({}) in rowid conversion,"
1257
0
                        " tablet_id: {}, table_id:{}",
1258
0
                        _stats.merged_rows, _stats.filtered_rows, missed_rows_size,
1259
0
                        _tablet->tablet_id(), _tablet->table_id());
1260
0
                LOG(WARNING) << err_msg;
1261
0
                if (config::enable_mow_compaction_correctness_check_core) {
1262
0
                    CHECK(false) << err_msg;
1263
0
                } else if (config::enable_mow_compaction_correctness_check_fail) {
1264
0
                    return Status::InternalError<false>(err_msg);
1265
0
                } else {
1266
0
                    DCHECK(false) << err_msg;
1267
0
                }
1268
0
            }
1269
213
        }
1270
1271
217
        if (location_map) {
1272
0
            RETURN_IF_ERROR(tablet()->check_rowid_conversion(_output_rowset, *location_map));
1273
0
            location_map->clear();
1274
0
        }
1275
1276
217
        {
1277
217
            std::lock_guard<std::mutex> wrlock_(tablet()->get_rowset_update_lock());
1278
217
            std::lock_guard<std::shared_mutex> wrlock(_tablet->get_header_lock());
1279
217
            SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD);
1280
1281
            // Here we will calculate all the rowsets delete bitmaps which are committed but not published to reduce the calculation pressure
1282
            // of publish phase.
1283
            // All rowsets which need to recalculate have been published so we don't need to acquire lock.
1284
            // Step1: collect this tablet's all committed rowsets' delete bitmaps
1285
217
            CommitTabletTxnInfoVec commit_tablet_txn_info_vec {};
1286
217
            _engine.txn_manager()->get_all_commit_tablet_txn_info_by_tablet(
1287
217
                    *tablet(), &commit_tablet_txn_info_vec);
1288
1289
            // Step2: calculate all rowsets' delete bitmaps which are published during compaction.
1290
217
            for (auto& it : commit_tablet_txn_info_vec) {
1291
34
                if (!_check_if_includes_input_rowsets(it.rowset_ids)) {
1292
                    // When calculating the delete bitmap of all committed rowsets relative to the compaction,
1293
                    // there may be cases where the compacted rowsets are newer than the committed rowsets.
1294
                    // At this time, row number conversion cannot be performed, otherwise data will be missing.
1295
                    // Therefore, we need to check if every committed rowset has calculated delete bitmap for
1296
                    // all compaction input rowsets.
1297
30
                    continue;
1298
30
                }
1299
4
                DeleteBitmap txn_output_delete_bitmap(_tablet->tablet_id());
1300
4
                tablet()->calc_compaction_output_rowset_delete_bitmap(
1301
4
                        _input_rowsets, *_rowid_conversion, 0, UINT64_MAX, missed_rows.get(),
1302
4
                        location_map.get(), *it.delete_bitmap.get(), &txn_output_delete_bitmap);
1303
4
                if (config::enable_merge_on_write_correctness_check) {
1304
4
                    RowsetIdUnorderedSet rowsetids;
1305
4
                    rowsetids.insert(_output_rowset->rowset_id());
1306
4
                    _tablet->add_sentinel_mark_to_delete_bitmap(&txn_output_delete_bitmap,
1307
4
                                                                rowsetids);
1308
4
                }
1309
4
                it.delete_bitmap->merge(txn_output_delete_bitmap);
1310
                // Step3: write back updated delete bitmap and tablet info.
1311
4
                it.rowset_ids.insert(_output_rowset->rowset_id());
1312
4
                _engine.txn_manager()->set_txn_related_delete_bitmap(
1313
4
                        it.partition_id, it.transaction_id, _tablet->tablet_id(),
1314
4
                        tablet()->tablet_uid(), true, it.delete_bitmap, it.rowset_ids,
1315
4
                        it.partial_update_info);
1316
4
            }
1317
1318
            // Convert the delete bitmap of the input rowsets to output rowset for
1319
            // incremental data.
1320
217
            tablet()->calc_compaction_output_rowset_delete_bitmap(
1321
217
                    _input_rowsets, *_rowid_conversion, version.second, UINT64_MAX,
1322
217
                    missed_rows.get(), location_map.get(), _tablet->tablet_meta()->delete_bitmap(),
1323
217
                    &output_rowset_delete_bitmap);
1324
1325
217
            if (location_map) {
1326
0
                RETURN_IF_ERROR(tablet()->check_rowid_conversion(_output_rowset, *location_map));
1327
0
            }
1328
1329
217
            tablet()->merge_delete_bitmap(output_rowset_delete_bitmap);
1330
217
            RETURN_IF_ERROR(tablet()->modify_rowsets(output_rowsets, _input_rowsets, true));
1331
217
        }
1332
535
    } else {
1333
535
        std::lock_guard<std::shared_mutex> wrlock(_tablet->get_header_lock());
1334
535
        SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD);
1335
535
        RETURN_IF_ERROR(tablet()->modify_rowsets(output_rowsets, _input_rowsets, true));
1336
535
    }
1337
1338
752
    if (config::tablet_rowset_stale_sweep_by_size &&
1339
752
        _tablet->tablet_meta()->all_stale_rs_metas().size() >=
1340
0
                config::tablet_rowset_stale_sweep_threshold_size) {
1341
0
        tablet()->delete_expired_stale_rowset();
1342
0
    }
1343
1344
752
    int64_t cur_max_version = 0;
1345
752
    {
1346
752
        std::shared_lock rlock(_tablet->get_header_lock());
1347
752
        cur_max_version = _tablet->max_version_unlocked();
1348
752
        tablet()->save_meta();
1349
752
    }
1350
752
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1351
752
        _tablet->enable_unique_key_merge_on_write()) {
1352
217
        auto st = TabletMetaManager::remove_old_version_delete_bitmap(
1353
217
                tablet()->data_dir(), _tablet->tablet_id(), cur_max_version);
1354
217
        if (!st.ok()) {
1355
0
            LOG(WARNING) << "failed to remove old version delete bitmap, st: " << st;
1356
0
        }
1357
217
    }
1358
752
    DBUG_EXECUTE_IF("CumulativeCompaction.modify_rowsets.delete_expired_stale_rowset",
1359
752
                    { tablet()->delete_expired_stale_rowset(); });
1360
752
    _tablet->prefill_dbm_agg_cache_after_compaction(_output_rowset);
1361
752
    return Status::OK();
1362
752
}
1363
1364
bool CompactionMixin::_check_if_includes_input_rowsets(
1365
34
        const RowsetIdUnorderedSet& commit_rowset_ids_set) const {
1366
34
    std::vector<RowsetId> commit_rowset_ids {};
1367
34
    commit_rowset_ids.insert(commit_rowset_ids.end(), commit_rowset_ids_set.begin(),
1368
34
                             commit_rowset_ids_set.end());
1369
34
    std::sort(commit_rowset_ids.begin(), commit_rowset_ids.end());
1370
34
    std::vector<RowsetId> input_rowset_ids {};
1371
1.81k
    for (const auto& rowset : _input_rowsets) {
1372
1.81k
        input_rowset_ids.emplace_back(rowset->rowset_meta()->rowset_id());
1373
1.81k
    }
1374
34
    std::sort(input_rowset_ids.begin(), input_rowset_ids.end());
1375
34
    return std::includes(commit_rowset_ids.begin(), commit_rowset_ids.end(),
1376
34
                         input_rowset_ids.begin(), input_rowset_ids.end());
1377
34
}
1378
1379
585
void CompactionMixin::update_compaction_level() {
1380
585
    auto* cumu_policy = tablet()->cumulative_compaction_policy();
1381
589
    if (cumu_policy && cumu_policy->name() == CUMULATIVE_TIME_SERIES_POLICY) {
1382
12
        int64_t compaction_level =
1383
12
                cumu_policy->get_compaction_level(tablet(), _input_rowsets, _output_rowset);
1384
12
        _output_rowset->rowset_meta()->set_compaction_level(compaction_level);
1385
12
    }
1386
585
}
1387
1388
590
Status Compaction::check_correctness() {
1389
    // 1. check row number
1390
590
    if (_input_row_num != _output_rowset->num_rows() + _stats.merged_rows + _stats.filtered_rows) {
1391
0
        return Status::Error<CHECK_LINES_ERROR>(
1392
0
                "row_num does not match between cumulative input and output! tablet={}, "
1393
0
                "input_row_num={}, merged_row_num={}, filtered_row_num={}, output_row_num={}",
1394
0
                _tablet->tablet_id(), _input_row_num, _stats.merged_rows, _stats.filtered_rows,
1395
0
                _output_rowset->num_rows());
1396
0
    }
1397
    // 2. check variant column path stats
1398
590
    RETURN_IF_ERROR(vectorized::schema_util::VariantCompactionUtil::check_path_stats(
1399
590
            _input_rowsets, _output_rowset, _tablet));
1400
590
    return Status::OK();
1401
590
}
1402
1403
1.63k
int64_t CompactionMixin::get_compaction_permits() {
1404
1.63k
    int64_t permits = 0;
1405
13.3k
    for (auto&& rowset : _input_rowsets) {
1406
13.3k
        permits += rowset->rowset_meta()->get_compaction_score();
1407
13.3k
    }
1408
1.63k
    return permits;
1409
1.63k
}
1410
1411
614
int64_t CompactionMixin::calc_input_rowsets_total_size() const {
1412
614
    int64_t input_rowsets_total_size = 0;
1413
5.12k
    for (const auto& rowset : _input_rowsets) {
1414
5.12k
        const auto& rowset_meta = rowset->rowset_meta();
1415
5.12k
        auto total_size = rowset_meta->total_disk_size();
1416
5.12k
        input_rowsets_total_size += total_size;
1417
5.12k
    }
1418
614
    return input_rowsets_total_size;
1419
614
}
1420
1421
614
int64_t CompactionMixin::calc_input_rowsets_row_num() const {
1422
614
    int64_t input_rowsets_row_num = 0;
1423
5.12k
    for (const auto& rowset : _input_rowsets) {
1424
5.12k
        const auto& rowset_meta = rowset->rowset_meta();
1425
5.12k
        auto total_size = rowset_meta->total_disk_size();
1426
5.12k
        input_rowsets_row_num += total_size;
1427
5.12k
    }
1428
614
    return input_rowsets_row_num;
1429
614
}
1430
1431
880
void Compaction::_load_segment_to_cache() {
1432
    // Load new rowset's segments to cache.
1433
880
    SegmentCacheHandle handle;
1434
880
    auto st = SegmentLoader::instance()->load_segments(
1435
880
            std::static_pointer_cast<BetaRowset>(_output_rowset), &handle, true);
1436
880
    if (!st.ok()) {
1437
0
        LOG(WARNING) << "failed to load segment to cache! output rowset version="
1438
0
                     << _output_rowset->start_version() << "-" << _output_rowset->end_version()
1439
0
                     << ".";
1440
0
    }
1441
880
}
1442
1443
0
Status CloudCompactionMixin::build_basic_info() {
1444
0
    _output_version =
1445
0
            Version(_input_rowsets.front()->start_version(), _input_rowsets.back()->end_version());
1446
1447
0
    _newest_write_timestamp = _input_rowsets.back()->newest_write_timestamp();
1448
1449
0
    std::vector<RowsetMetaSharedPtr> rowset_metas(_input_rowsets.size());
1450
0
    std::transform(_input_rowsets.begin(), _input_rowsets.end(), rowset_metas.begin(),
1451
0
                   [](const RowsetSharedPtr& rowset) { return rowset->rowset_meta(); });
1452
0
    if (is_index_change_compaction()) {
1453
0
        RETURN_IF_ERROR(rebuild_tablet_schema());
1454
0
    } else {
1455
0
        _cur_tablet_schema = _tablet->tablet_schema_with_merged_max_schema_version(rowset_metas);
1456
0
    }
1457
1458
    // if enable_vertical_compact_variant_subcolumns is true, we need to compact the variant subcolumns in seperate column groups
1459
    // so get_extended_compaction_schema will extended the schema for variant columns
1460
0
    if (_enable_vertical_compact_variant_subcolumns) {
1461
0
        RETURN_IF_ERROR(
1462
0
                vectorized::schema_util::VariantCompactionUtil::get_extended_compaction_schema(
1463
0
                        _input_rowsets, _cur_tablet_schema));
1464
0
    }
1465
0
    return Status::OK();
1466
0
}
1467
1468
0
int64_t CloudCompactionMixin::get_compaction_permits() {
1469
0
    int64_t permits = 0;
1470
0
    for (auto&& rowset : _input_rowsets) {
1471
0
        permits += rowset->rowset_meta()->get_compaction_score();
1472
0
    }
1473
0
    return permits;
1474
0
}
1475
1476
CloudCompactionMixin::CloudCompactionMixin(CloudStorageEngine& engine, CloudTabletSPtr tablet,
1477
                                           const std::string& label)
1478
24
        : Compaction(tablet, label), _engine(engine) {
1479
24
    auto uuid = UUIDGenerator::instance()->next_uuid();
1480
24
    std::stringstream ss;
1481
24
    ss << uuid;
1482
24
    _uuid = ss.str();
1483
24
}
1484
1485
0
Status CloudCompactionMixin::execute_compact_impl(int64_t permits) {
1486
0
    OlapStopWatch watch;
1487
1488
0
    RETURN_IF_ERROR(build_basic_info());
1489
1490
0
    LOG(INFO) << "start " << compaction_name() << ". tablet=" << _tablet->tablet_id()
1491
0
              << ", output_version=" << _output_version << ", permits: " << permits;
1492
1493
0
    RETURN_IF_ERROR(merge_input_rowsets());
1494
1495
0
    DBUG_EXECUTE_IF("CloudFullCompaction::modify_rowsets.wrong_rowset_id", {
1496
0
        DCHECK(compaction_type() == ReaderType::READER_FULL_COMPACTION);
1497
0
        RowsetId id;
1498
0
        id.version = 2;
1499
0
        id.hi = _output_rowset->rowset_meta()->rowset_id().hi + ((int64_t)(1) << 56);
1500
0
        id.mi = _output_rowset->rowset_meta()->rowset_id().mi;
1501
0
        id.lo = _output_rowset->rowset_meta()->rowset_id().lo;
1502
0
        _output_rowset->rowset_meta()->set_rowset_id(id);
1503
0
        LOG(INFO) << "[Debug wrong rowset id]:"
1504
0
                  << _output_rowset->rowset_meta()->rowset_id().to_string();
1505
0
    })
1506
1507
    // Currently, updates are only made in the time_series.
1508
0
    update_compaction_level();
1509
1510
0
    RETURN_IF_ERROR(_engine.meta_mgr().commit_rowset(*_output_rowset->rowset_meta().get(), _uuid));
1511
1512
    // 4. modify rowsets in memory
1513
0
    RETURN_IF_ERROR(modify_rowsets());
1514
1515
    // update compaction status data
1516
0
    auto tablet = std::static_pointer_cast<CloudTablet>(_tablet);
1517
0
    tablet->local_read_time_us.fetch_add(_stats.cloud_local_read_time);
1518
0
    tablet->remote_read_time_us.fetch_add(_stats.cloud_remote_read_time);
1519
0
    tablet->exec_compaction_time_us.fetch_add(watch.get_elapse_time_us());
1520
1521
0
    return Status::OK();
1522
0
}
1523
1524
2
int64_t CloudCompactionMixin::initiator() const {
1525
2
    return HashUtil::hash64(_uuid.data(), _uuid.size(), 0) & std::numeric_limits<int64_t>::max();
1526
2
}
1527
1528
namespace cloud {
1529
size_t truncate_rowsets_by_txn_size(std::vector<RowsetSharedPtr>& rowsets, int64_t& kept_size_bytes,
1530
14
                                    int64_t& truncated_size_bytes) {
1531
14
    if (rowsets.empty()) {
1532
1
        kept_size_bytes = 0;
1533
1
        truncated_size_bytes = 0;
1534
1
        return 0;
1535
1
    }
1536
1537
13
    int64_t max_size = config::compaction_txn_max_size_bytes;
1538
13
    int64_t cumulative_meta_size = 0;
1539
13
    size_t keep_count = 0;
1540
1541
34
    for (size_t i = 0; i < rowsets.size(); ++i) {
1542
25
        const auto& rs = rowsets[i];
1543
1544
        // Estimate rowset meta size using doris_rowset_meta_to_cloud
1545
25
        auto cloud_meta = cloud::doris_rowset_meta_to_cloud(rs->rowset_meta()->get_rowset_pb(true));
1546
25
        int64_t rowset_meta_size = cloud_meta.ByteSizeLong();
1547
1548
25
        cumulative_meta_size += rowset_meta_size;
1549
1550
25
        if (keep_count > 0 && cumulative_meta_size > max_size) {
1551
            // Rollback and stop
1552
4
            cumulative_meta_size -= rowset_meta_size;
1553
4
            break;
1554
4
        }
1555
1556
21
        keep_count++;
1557
21
    }
1558
1559
    // Ensure at least 1 rowset is kept
1560
13
    if (keep_count == 0) {
1561
0
        keep_count = 1;
1562
        // Recalculate size for the first rowset
1563
0
        const auto& rs = rowsets[0];
1564
0
        auto cloud_meta = cloud::doris_rowset_meta_to_cloud(rs->rowset_meta()->get_rowset_pb());
1565
0
        cumulative_meta_size = cloud_meta.ByteSizeLong();
1566
0
    }
1567
1568
    // Calculate truncated size
1569
13
    int64_t truncated_total_size = 0;
1570
13
    size_t truncated_count = rowsets.size() - keep_count;
1571
13
    if (truncated_count > 0) {
1572
35
        for (size_t i = keep_count; i < rowsets.size(); ++i) {
1573
31
            auto cloud_meta =
1574
31
                    cloud::doris_rowset_meta_to_cloud(rowsets[i]->rowset_meta()->get_rowset_pb());
1575
31
            truncated_total_size += cloud_meta.ByteSizeLong();
1576
31
        }
1577
4
        rowsets.resize(keep_count);
1578
4
    }
1579
1580
13
    kept_size_bytes = cumulative_meta_size;
1581
13
    truncated_size_bytes = truncated_total_size;
1582
13
    return truncated_count;
1583
14
}
1584
} // namespace cloud
1585
1586
5
size_t CloudCompactionMixin::apply_txn_size_truncation_and_log(const std::string& compaction_name) {
1587
5
    if (_input_rowsets.empty()) {
1588
1
        return 0;
1589
1
    }
1590
1591
4
    int64_t original_count = _input_rowsets.size();
1592
4
    int64_t original_start_version = _input_rowsets.front()->start_version();
1593
4
    int64_t original_end_version = _input_rowsets.back()->end_version();
1594
1595
4
    int64_t final_size = 0;
1596
4
    int64_t truncated_size = 0;
1597
4
    size_t truncated_count =
1598
4
            cloud::truncate_rowsets_by_txn_size(_input_rowsets, final_size, truncated_size);
1599
1600
4
    if (truncated_count > 0) {
1601
2
        int64_t original_size = final_size + truncated_size;
1602
2
        LOG(INFO) << compaction_name << " txn size estimation truncate"
1603
2
                  << ", tablet_id=" << _tablet->tablet_id() << ", original_version_range=["
1604
2
                  << original_start_version << "-" << original_end_version
1605
2
                  << "], final_version_range=[" << _input_rowsets.front()->start_version() << "-"
1606
2
                  << _input_rowsets.back()->end_version()
1607
2
                  << "], original_rowset_count=" << original_count
1608
2
                  << ", final_rowset_count=" << _input_rowsets.size()
1609
2
                  << ", truncated_rowset_count=" << truncated_count
1610
2
                  << ", original_size_bytes=" << original_size
1611
2
                  << ", final_size_bytes=" << final_size
1612
2
                  << ", truncated_size_bytes=" << truncated_size
1613
2
                  << ", threshold_bytes=" << config::compaction_txn_max_size_bytes;
1614
2
    }
1615
1616
4
    return truncated_count;
1617
5
}
1618
1619
0
Status CloudCompactionMixin::execute_compact() {
1620
0
    TEST_INJECTION_POINT("Compaction::do_compaction");
1621
0
    int64_t permits = get_compaction_permits();
1622
0
    HANDLE_EXCEPTION_IF_CATCH_EXCEPTION(
1623
0
            execute_compact_impl(permits), [&](const doris::Exception& ex) {
1624
0
                auto st = garbage_collection();
1625
0
                if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1626
0
                    _tablet->enable_unique_key_merge_on_write() && !st.ok()) {
1627
                    // if compaction fail, be will try to abort compaction, and delete bitmap lock
1628
                    // will release if abort job successfully, but if abort failed, delete bitmap
1629
                    // lock will not release, in this situation, be need to send this rpc to ms
1630
                    // to try to release delete bitmap lock.
1631
0
                    _engine.meta_mgr().remove_delete_bitmap_update_lock(
1632
0
                            _tablet->table_id(), COMPACTION_DELETE_BITMAP_LOCK_ID, initiator(),
1633
0
                            _tablet->tablet_id());
1634
0
                }
1635
0
            });
1636
1637
0
    DorisMetrics::instance()->remote_compaction_read_rows_total->increment(_input_row_num);
1638
0
    DorisMetrics::instance()->remote_compaction_write_rows_total->increment(
1639
0
            _output_rowset->num_rows());
1640
0
    DorisMetrics::instance()->remote_compaction_write_bytes_total->increment(
1641
0
            _output_rowset->total_disk_size());
1642
1643
0
    _load_segment_to_cache();
1644
0
    return Status::OK();
1645
0
}
1646
1647
0
Status CloudCompactionMixin::modify_rowsets() {
1648
0
    return Status::OK();
1649
0
}
1650
1651
0
Status CloudCompactionMixin::set_storage_resource_from_input_rowsets(RowsetWriterContext& ctx) {
1652
    // Set storage resource from input rowsets by iterating backwards to find the first rowset
1653
    // with non-empty resource_id. This handles two scenarios:
1654
    // 1. Hole rowsets compaction: Multiple hole rowsets may lack storage resource.
1655
    //    Example: [0-1, 2-2, 3-3, 4-4, 5-5] where 2-5 are hole rowsets.
1656
    //    If 0-1 lacks resource_id, then 2-5 also lack resource_id.
1657
    // 2. Schema change: New tablet may have later version empty rowsets without resource_id,
1658
    //    but middle rowsets get resource_id after historical rowsets are converted.
1659
    //    We iterate backwards to find the most recent rowset with valid resource_id.
1660
1661
0
    for (const auto& rowset : std::ranges::reverse_view(_input_rowsets)) {
1662
0
        const auto& resource_id = rowset->rowset_meta()->resource_id();
1663
1664
0
        if (!resource_id.empty()) {
1665
0
            ctx.storage_resource = *DORIS_TRY(rowset->rowset_meta()->remote_storage_resource());
1666
0
            return Status::OK();
1667
0
        }
1668
1669
        // Validate that non-empty rowsets (num_segments > 0) must have valid resource_id
1670
        // Only hole rowsets or empty rowsets are allowed to have empty resource_id
1671
0
        if (rowset->num_segments() > 0) {
1672
0
            auto error_msg = fmt::format(
1673
0
                    "Non-empty rowset must have valid resource_id. "
1674
0
                    "rowset_id={}, version=[{}-{}], is_hole_rowset={}, num_segments={}, "
1675
0
                    "tablet_id={}, table_id={}",
1676
0
                    rowset->rowset_id().to_string(), rowset->start_version(), rowset->end_version(),
1677
0
                    rowset->is_hole_rowset(), rowset->num_segments(), _tablet->tablet_id(),
1678
0
                    _tablet->table_id());
1679
1680
0
#ifndef BE_TEST
1681
0
            DCHECK(false) << error_msg;
1682
0
#endif
1683
1684
0
            return Status::InternalError<false>(error_msg);
1685
0
        }
1686
0
    }
1687
1688
0
    return Status::OK();
1689
0
}
1690
1691
0
Status CloudCompactionMixin::construct_output_rowset_writer(RowsetWriterContext& ctx) {
1692
    // only do index compaction for dup_keys and unique_keys with mow enabled
1693
0
    if (_enable_inverted_index_compaction && (((_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1694
0
                                                _tablet->enable_unique_key_merge_on_write()) ||
1695
0
                                               _tablet->keys_type() == KeysType::DUP_KEYS))) {
1696
0
        construct_index_compaction_columns(ctx);
1697
0
    }
1698
1699
    // Use the storage resource of the previous rowset.
1700
0
    RETURN_IF_ERROR(set_storage_resource_from_input_rowsets(ctx));
1701
1702
0
    ctx.txn_id = boost::uuids::hash_value(UUIDGenerator::instance()->next_uuid()) &
1703
0
                 std::numeric_limits<int64_t>::max(); // MUST be positive
1704
0
    ctx.txn_expiration = _expiration;
1705
1706
0
    ctx.version = _output_version;
1707
0
    ctx.rowset_state = VISIBLE;
1708
0
    ctx.segments_overlap = NONOVERLAPPING;
1709
0
    ctx.tablet_schema = _cur_tablet_schema;
1710
0
    ctx.newest_write_timestamp = _newest_write_timestamp;
1711
0
    ctx.write_type = DataWriteType::TYPE_COMPACTION;
1712
0
    ctx.compaction_type = compaction_type();
1713
0
    ctx.allow_packed_file = false;
1714
1715
    // We presume that the data involved in cumulative compaction is sufficiently 'hot'
1716
    // and should always be retained in the cache.
1717
    // TODO(gavin): Ensure that the retention of hot data is implemented with precision.
1718
1719
0
    ctx.write_file_cache = should_cache_compaction_output();
1720
0
    ctx.file_cache_ttl_sec = _tablet->ttl_seconds();
1721
0
    ctx.approximate_bytes_to_write = _input_rowsets_total_size;
1722
0
    ctx.tablet = _tablet;
1723
1724
0
    _output_rs_writer = DORIS_TRY(_tablet->create_rowset_writer(ctx, _is_vertical));
1725
0
    RETURN_IF_ERROR(
1726
0
            _engine.meta_mgr().prepare_rowset(*_output_rs_writer->rowset_meta().get(), _uuid));
1727
0
    return Status::OK();
1728
0
}
1729
1730
0
Status CloudCompactionMixin::garbage_collection() {
1731
0
    if (!config::enable_file_cache) {
1732
0
        return Status::OK();
1733
0
    }
1734
0
    if (_output_rs_writer) {
1735
0
        auto* beta_rowset_writer = dynamic_cast<BaseBetaRowsetWriter*>(_output_rs_writer.get());
1736
0
        DCHECK(beta_rowset_writer);
1737
0
        for (const auto& [_, file_writer] : beta_rowset_writer->get_file_writers()) {
1738
0
            auto file_key = io::BlockFileCache::hash(file_writer->path().filename().native());
1739
0
            auto* file_cache = io::FileCacheFactory::instance()->get_by_path(file_key);
1740
0
            file_cache->remove_if_cached_async(file_key);
1741
0
        }
1742
0
        for (const auto& [_, index_writer] : beta_rowset_writer->index_file_writers()) {
1743
0
            for (const auto& file_name : index_writer->get_index_file_names()) {
1744
0
                auto file_key = io::BlockFileCache::hash(file_name);
1745
0
                auto* file_cache = io::FileCacheFactory::instance()->get_by_path(file_key);
1746
0
                file_cache->remove_if_cached_async(file_key);
1747
0
            }
1748
0
        }
1749
0
    }
1750
0
    return Status::OK();
1751
0
}
1752
1753
0
void CloudCompactionMixin::update_compaction_level() {
1754
    // for index change compaction, compaction level should not changed.
1755
    // because input rowset num is 1.
1756
0
    if (is_index_change_compaction()) {
1757
0
        DCHECK(_input_rowsets.size() == 1);
1758
0
        _output_rowset->rowset_meta()->set_compaction_level(
1759
0
                _input_rowsets.back()->rowset_meta()->compaction_level());
1760
0
    } else {
1761
0
        auto compaction_policy = _tablet->tablet_meta()->compaction_policy();
1762
0
        auto cumu_policy = _engine.cumu_compaction_policy(compaction_policy);
1763
0
        if (cumu_policy && cumu_policy->name() == CUMULATIVE_TIME_SERIES_POLICY) {
1764
0
            int64_t compaction_level = cumu_policy->get_compaction_level(
1765
0
                    cloud_tablet(), _input_rowsets, _output_rowset);
1766
0
            _output_rowset->rowset_meta()->set_compaction_level(compaction_level);
1767
0
        }
1768
0
    }
1769
0
}
1770
1771
// should skip hole rowsets, ortherwise the count will be wrong in ms
1772
2
int64_t CloudCompactionMixin::num_input_rowsets() const {
1773
2
    int64_t count = 0;
1774
2
    for (const auto& r : _input_rowsets) {
1775
2
        if (!r->is_hole_rowset()) {
1776
2
            count++;
1777
2
        }
1778
2
    }
1779
2
    return count;
1780
2
}
1781
1782
8
bool CloudCompactionMixin::should_cache_compaction_output() {
1783
8
    if (compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
1784
0
        return true;
1785
0
    }
1786
1787
8
    if (compaction_type() == ReaderType::READER_BASE_COMPACTION) {
1788
8
        double input_rowsets_hit_cache_ratio = 0.0;
1789
1790
8
        int64_t _input_rowsets_cached_size =
1791
8
                _input_rowsets_cached_data_size + _input_rowsets_cached_index_size;
1792
8
        if (_input_rowsets_total_size > 0) {
1793
7
            input_rowsets_hit_cache_ratio =
1794
7
                    double(_input_rowsets_cached_size) / double(_input_rowsets_total_size);
1795
7
        }
1796
1797
8
        LOG(INFO) << "CloudBaseCompaction should_cache_compaction_output"
1798
8
                  << ", tablet_id=" << _tablet->tablet_id()
1799
8
                  << ", input_rowsets_hit_cache_ratio=" << input_rowsets_hit_cache_ratio
1800
8
                  << ", _input_rowsets_cached_size=" << _input_rowsets_cached_size
1801
8
                  << ", _input_rowsets_total_size=" << _input_rowsets_total_size
1802
8
                  << ", enable_file_cache_keep_base_compaction_output="
1803
8
                  << config::enable_file_cache_keep_base_compaction_output
1804
8
                  << ", file_cache_keep_base_compaction_output_min_hit_ratio="
1805
8
                  << config::file_cache_keep_base_compaction_output_min_hit_ratio;
1806
1807
8
        if (config::enable_file_cache_keep_base_compaction_output) {
1808
0
            return true;
1809
0
        }
1810
1811
8
        if (input_rowsets_hit_cache_ratio >
1812
8
            config::file_cache_keep_base_compaction_output_min_hit_ratio) {
1813
3
            return true;
1814
3
        }
1815
8
    }
1816
5
    return false;
1817
8
}
1818
1819
#include "common/compile_check_end.h"
1820
} // namespace doris