Coverage Report

Created: 2026-03-12 18:22

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/doris/be/src/olap/compaction.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "olap/compaction.h"
19
20
#include <fmt/format.h>
21
#include <gen_cpp/olap_file.pb.h>
22
#include <glog/logging.h>
23
24
#include <algorithm>
25
#include <atomic>
26
#include <cstdint>
27
#include <cstdlib>
28
#include <list>
29
#include <map>
30
#include <memory>
31
#include <mutex>
32
#include <nlohmann/json.hpp>
33
#include <numeric>
34
#include <ostream>
35
#include <set>
36
#include <shared_mutex>
37
#include <utility>
38
39
#include "cloud/cloud_meta_mgr.h"
40
#include "cloud/cloud_storage_engine.h"
41
#include "cloud/cloud_tablet.h"
42
#include "cloud/pb_convert.h"
43
#include "common/config.h"
44
#include "common/status.h"
45
#include "cpp/sync_point.h"
46
#include "io/cache/block_file_cache_factory.h"
47
#include "io/fs/file_system.h"
48
#include "io/fs/file_writer.h"
49
#include "io/fs/remote_file_system.h"
50
#include "io/io_common.h"
51
#include "olap/cumulative_compaction.h"
52
#include "olap/cumulative_compaction_policy.h"
53
#include "olap/cumulative_compaction_time_series_policy.h"
54
#include "olap/data_dir.h"
55
#include "olap/olap_common.h"
56
#include "olap/olap_define.h"
57
#include "olap/rowset/beta_rowset.h"
58
#include "olap/rowset/beta_rowset_reader.h"
59
#include "olap/rowset/beta_rowset_writer.h"
60
#include "olap/rowset/rowset.h"
61
#include "olap/rowset/rowset_fwd.h"
62
#include "olap/rowset/rowset_meta.h"
63
#include "olap/rowset/rowset_writer.h"
64
#include "olap/rowset/rowset_writer_context.h"
65
#include "olap/rowset/segment_v2/index_file_reader.h"
66
#include "olap/rowset/segment_v2/index_file_writer.h"
67
#include "olap/rowset/segment_v2/inverted_index_compaction.h"
68
#include "olap/rowset/segment_v2/inverted_index_desc.h"
69
#include "olap/rowset/segment_v2/inverted_index_fs_directory.h"
70
#include "olap/storage_engine.h"
71
#include "olap/storage_policy.h"
72
#include "olap/tablet.h"
73
#include "olap/tablet_meta.h"
74
#include "olap/tablet_meta_manager.h"
75
#include "olap/task/engine_checksum_task.h"
76
#include "olap/txn_manager.h"
77
#include "olap/utils.h"
78
#include "runtime/memory/mem_tracker_limiter.h"
79
#include "runtime/thread_context.h"
80
#include "util/doris_metrics.h"
81
#include "util/pretty_printer.h"
82
#include "util/time.h"
83
#include "util/trace.h"
84
#include "vec/common/schema_util.h"
85
86
using std::vector;
87
88
namespace doris {
89
using namespace ErrorCode;
90
namespace {
91
#include "common/compile_check_begin.h"
92
93
bool is_rowset_tidy(std::string& pre_max_key, bool& pre_rs_key_bounds_truncated,
94
81
                    const RowsetSharedPtr& rhs) {
95
81
    size_t min_tidy_size = config::ordered_data_compaction_min_segment_size;
96
81
    if (rhs->num_segments() == 0) {
97
24
        return true;
98
24
    }
99
57
    if (rhs->is_segments_overlapping()) {
100
0
        return false;
101
0
    }
102
    // check segment size
103
57
    auto* beta_rowset = reinterpret_cast<BetaRowset*>(rhs.get());
104
57
    std::vector<size_t> segments_size;
105
57
    RETURN_FALSE_IF_ERROR(beta_rowset->get_segments_size(&segments_size));
106
64
    for (auto segment_size : segments_size) {
107
        // is segment is too small, need to do compaction
108
64
        if (segment_size < min_tidy_size) {
109
18
            return false;
110
18
        }
111
64
    }
112
38
    std::string min_key;
113
38
    auto ret = rhs->first_key(&min_key);
114
38
    if (!ret) {
115
0
        return false;
116
0
    }
117
38
    bool cur_rs_key_bounds_truncated {rhs->is_segments_key_bounds_truncated()};
118
38
    if (!Slice::lhs_is_strictly_less_than_rhs(Slice {pre_max_key}, pre_rs_key_bounds_truncated,
119
38
                                              Slice {min_key}, cur_rs_key_bounds_truncated)) {
120
5
        return false;
121
5
    }
122
38
    CHECK(rhs->last_key(&pre_max_key));
123
33
    pre_rs_key_bounds_truncated = cur_rs_key_bounds_truncated;
124
33
    return true;
125
38
}
126
127
} // namespace
128
129
Compaction::Compaction(BaseTabletSPtr tablet, const std::string& label)
130
        : _mem_tracker(
131
156k
                  MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::COMPACTION, label)),
132
156k
          _tablet(std::move(tablet)),
133
156k
          _is_vertical(config::enable_vertical_compaction),
134
156k
          _allow_delete_in_cumu_compaction(config::enable_delete_when_cumu_compaction),
135
          _enable_vertical_compact_variant_subcolumns(
136
156k
                  config::enable_vertical_compact_variant_subcolumns),
137
156k
          _enable_inverted_index_compaction(config::inverted_index_compaction_enable) {
138
156k
    init_profile(label);
139
156k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_mem_tracker);
140
156k
    _rowid_conversion = std::make_unique<RowIdConversion>();
141
156k
}
142
143
156k
Compaction::~Compaction() {
144
156k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_mem_tracker);
145
156k
    _output_rs_writer.reset();
146
156k
    _tablet.reset();
147
156k
    _input_rowsets.clear();
148
156k
    _output_rowset.reset();
149
156k
    _cur_tablet_schema.reset();
150
156k
    _rowid_conversion.reset();
151
156k
}
152
153
156k
void Compaction::init_profile(const std::string& label) {
154
156k
    _profile = std::make_unique<RuntimeProfile>(label);
155
156
156k
    _input_rowsets_data_size_counter =
157
156k
            ADD_COUNTER(_profile, "input_rowsets_data_size", TUnit::BYTES);
158
156k
    _input_rowsets_counter = ADD_COUNTER(_profile, "input_rowsets_count", TUnit::UNIT);
159
156k
    _input_row_num_counter = ADD_COUNTER(_profile, "input_row_num", TUnit::UNIT);
160
156k
    _input_segments_num_counter = ADD_COUNTER(_profile, "input_segments_num", TUnit::UNIT);
161
156k
    _merged_rows_counter = ADD_COUNTER(_profile, "merged_rows", TUnit::UNIT);
162
156k
    _filtered_rows_counter = ADD_COUNTER(_profile, "filtered_rows", TUnit::UNIT);
163
156k
    _output_rowset_data_size_counter =
164
156k
            ADD_COUNTER(_profile, "output_rowset_data_size", TUnit::BYTES);
165
156k
    _output_row_num_counter = ADD_COUNTER(_profile, "output_row_num", TUnit::UNIT);
166
156k
    _output_segments_num_counter = ADD_COUNTER(_profile, "output_segments_num", TUnit::UNIT);
167
156k
    _merge_rowsets_latency_timer = ADD_TIMER(_profile, "merge_rowsets_latency");
168
156k
}
169
170
9.65k
int64_t Compaction::merge_way_num() {
171
9.65k
    int64_t way_num = 0;
172
74.7k
    for (auto&& rowset : _input_rowsets) {
173
74.7k
        way_num += rowset->rowset_meta()->get_merge_way_num();
174
74.7k
    }
175
176
9.65k
    return way_num;
177
9.65k
}
178
179
9.70k
Status Compaction::merge_input_rowsets() {
180
9.70k
    std::vector<RowsetReaderSharedPtr> input_rs_readers;
181
9.70k
    input_rs_readers.reserve(_input_rowsets.size());
182
75.0k
    for (auto& rowset : _input_rowsets) {
183
75.0k
        RowsetReaderSharedPtr rs_reader;
184
75.0k
        RETURN_IF_ERROR(rowset->create_reader(&rs_reader));
185
75.0k
        input_rs_readers.push_back(std::move(rs_reader));
186
75.0k
    }
187
188
9.70k
    RowsetWriterContext ctx;
189
9.70k
    RETURN_IF_ERROR(construct_output_rowset_writer(ctx));
190
191
    // write merged rows to output rowset
192
    // The test results show that merger is low-memory-footprint, there is no need to tracker its mem pool
193
    // if ctx.columns_to_do_index_compaction.size() > 0, it means we need to do inverted index compaction.
194
    // the row ID conversion matrix needs to be used for inverted index compaction.
195
9.70k
    if (!ctx.columns_to_do_index_compaction.empty() ||
196
9.70k
        (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
197
9.02k
         _tablet->enable_unique_key_merge_on_write())) {
198
4.15k
        _stats.rowid_conversion = _rowid_conversion.get();
199
4.15k
    }
200
201
9.70k
    int64_t way_num = merge_way_num();
202
203
9.70k
    Status res;
204
9.70k
    {
205
9.70k
        SCOPED_TIMER(_merge_rowsets_latency_timer);
206
        // 1. Merge segment files and write bkd inverted index
207
9.70k
        if (_is_vertical) {
208
9.69k
            if (!_tablet->tablet_schema()->cluster_key_uids().empty()) {
209
162
                RETURN_IF_ERROR(update_delete_bitmap());
210
162
            }
211
9.69k
            res = Merger::vertical_merge_rowsets(_tablet, compaction_type(), *_cur_tablet_schema,
212
9.69k
                                                 input_rs_readers, _output_rs_writer.get(),
213
9.69k
                                                 cast_set<uint32_t>(get_avg_segment_rows()),
214
9.69k
                                                 way_num, &_stats);
215
9.69k
        } else {
216
10
            if (!_tablet->tablet_schema()->cluster_key_uids().empty()) {
217
0
                return Status::InternalError(
218
0
                        "mow table with cluster keys does not support non vertical compaction");
219
0
            }
220
10
            res = Merger::vmerge_rowsets(_tablet, compaction_type(), *_cur_tablet_schema,
221
10
                                         input_rs_readers, _output_rs_writer.get(), &_stats);
222
10
        }
223
224
9.70k
        _tablet->last_compaction_status = res;
225
9.70k
        if (!res.ok()) {
226
0
            return res;
227
0
        }
228
        // 2. Merge the remaining inverted index files of the string type
229
9.70k
        RETURN_IF_ERROR(do_inverted_index_compaction());
230
9.70k
    }
231
232
9.70k
    COUNTER_UPDATE(_merged_rows_counter, _stats.merged_rows);
233
9.70k
    COUNTER_UPDATE(_filtered_rows_counter, _stats.filtered_rows);
234
235
    // 3. In the `build`, `_close_file_writers` is called to close the inverted index file writer and write the final compound index file.
236
9.70k
    RETURN_NOT_OK_STATUS_WITH_WARN(_output_rs_writer->build(_output_rowset),
237
9.70k
                                   fmt::format("rowset writer build failed. output_version: {}",
238
9.70k
                                               _output_version.to_string()));
239
240
    // When true, writers should remove variant extracted subcolumns from the
241
    // schema stored in RowsetMeta. This is used when compaction temporarily
242
    // extends schema to split variant subcolumns for vertical compaction but
243
    // the final rowset meta must not persist those extracted subcolumns.
244
9.70k
    if (_enable_vertical_compact_variant_subcolumns &&
245
9.70k
        (_cur_tablet_schema->num_variant_columns() > 0)) {
246
488
        _output_rowset->rowset_meta()->set_tablet_schema(
247
488
                _cur_tablet_schema->copy_without_variant_extracted_columns());
248
488
    }
249
250
    //RETURN_IF_ERROR(_engine.meta_mgr().commit_rowset(*_output_rowset->rowset_meta().get()));
251
9.70k
    set_delete_predicate_for_output_rowset();
252
253
9.70k
    _local_read_bytes_total = _stats.bytes_read_from_local;
254
9.70k
    _remote_read_bytes_total = _stats.bytes_read_from_remote;
255
9.70k
    DorisMetrics::instance()->local_compaction_read_bytes_total->increment(_local_read_bytes_total);
256
9.70k
    DorisMetrics::instance()->remote_compaction_read_bytes_total->increment(
257
9.70k
            _remote_read_bytes_total);
258
9.70k
    DorisMetrics::instance()->local_compaction_write_bytes_total->increment(
259
9.70k
            _stats.cached_bytes_total);
260
261
9.70k
    COUNTER_UPDATE(_output_rowset_data_size_counter, _output_rowset->data_disk_size());
262
9.70k
    COUNTER_UPDATE(_output_row_num_counter, _output_rowset->num_rows());
263
9.70k
    COUNTER_UPDATE(_output_segments_num_counter, _output_rowset->num_segments());
264
265
9.70k
    return check_correctness();
266
9.70k
}
267
268
9.66k
void Compaction::set_delete_predicate_for_output_rowset() {
269
    // Now we support delete in cumu compaction, to make all data in rowsets whose version
270
    // is below output_version to be delete in the future base compaction, we should carry
271
    // all delete predicate in the output rowset.
272
    // Output start version > 2 means we must set the delete predicate in the output rowset
273
9.66k
    if (_output_rowset->version().first > 2 &&
274
9.66k
        (_allow_delete_in_cumu_compaction || is_index_change_compaction())) {
275
149
        DeletePredicatePB delete_predicate;
276
149
        std::accumulate(_input_rowsets.begin(), _input_rowsets.end(), &delete_predicate,
277
149
                        [](DeletePredicatePB* delete_predicate, const RowsetSharedPtr& rs) {
278
149
                            if (rs->rowset_meta()->has_delete_predicate()) {
279
3
                                delete_predicate->MergeFrom(rs->rowset_meta()->delete_predicate());
280
3
                            }
281
149
                            return delete_predicate;
282
149
                        });
283
        // now version in delete_predicate is deprecated
284
149
        if (!delete_predicate.in_predicates().empty() ||
285
149
            !delete_predicate.sub_predicates_v2().empty() ||
286
149
            !delete_predicate.sub_predicates().empty()) {
287
3
            _output_rowset->rowset_meta()->set_delete_predicate(std::move(delete_predicate));
288
3
        }
289
149
    }
290
9.66k
}
291
292
9.65k
int64_t Compaction::get_avg_segment_rows() {
293
    // take care of empty rowset
294
    // input_rowsets_size is total disk_size of input_rowset, this size is the
295
    // final size after codec and compress, so expect dest segment file size
296
    // in disk is config::vertical_compaction_max_segment_size
297
9.65k
    const auto& meta = _tablet->tablet_meta();
298
9.65k
    if (meta->compaction_policy() == CUMULATIVE_TIME_SERIES_POLICY) {
299
3
        int64_t compaction_goal_size_mbytes = meta->time_series_compaction_goal_size_mbytes();
300
        // The output segment rows should be less than total input rows
301
3
        return std::min((compaction_goal_size_mbytes * 1024 * 1024 * 2) /
302
3
                                (_input_rowsets_data_size / (_input_row_num + 1) + 1),
303
3
                        _input_row_num + 1);
304
3
    }
305
9.65k
    return std::min(config::vertical_compaction_max_segment_size /
306
9.65k
                            (_input_rowsets_data_size / (_input_row_num + 1) + 1),
307
9.65k
                    _input_row_num + 1);
308
9.65k
}
309
310
CompactionMixin::CompactionMixin(StorageEngine& engine, TabletSharedPtr tablet,
311
                                 const std::string& label)
312
69.7k
        : Compaction(tablet, label), _engine(engine) {}
313
314
69.7k
CompactionMixin::~CompactionMixin() {
315
69.7k
    if (_state != CompactionState::SUCCESS && _output_rowset != nullptr) {
316
6
        if (!_output_rowset->is_local()) {
317
0
            tablet()->record_unused_remote_rowset(_output_rowset->rowset_id(),
318
0
                                                  _output_rowset->rowset_meta()->resource_id(),
319
0
                                                  _output_rowset->num_segments());
320
0
            return;
321
0
        }
322
6
        _engine.add_unused_rowset(_output_rowset);
323
6
    }
324
69.7k
}
325
326
621k
Tablet* CompactionMixin::tablet() {
327
621k
    return static_cast<Tablet*>(_tablet.get());
328
621k
}
329
330
14
Status CompactionMixin::do_compact_ordered_rowsets() {
331
14
    RETURN_IF_ERROR(build_basic_info(true));
332
14
    RowsetWriterContext ctx;
333
14
    RETURN_IF_ERROR(construct_output_rowset_writer(ctx));
334
335
14
    LOG(INFO) << "start to do ordered data compaction, tablet=" << _tablet->tablet_id()
336
14
              << ", output_version=" << _output_version;
337
    // link data to new rowset
338
14
    auto seg_id = 0;
339
14
    bool segments_key_bounds_truncated {false};
340
14
    std::vector<KeyBoundsPB> segment_key_bounds;
341
14
    std::vector<uint32_t> num_segment_rows;
342
52
    for (auto rowset : _input_rowsets) {
343
52
        RETURN_IF_ERROR(rowset->link_files_to(tablet()->tablet_path(),
344
52
                                              _output_rs_writer->rowset_id(), seg_id));
345
52
        seg_id += rowset->num_segments();
346
52
        segments_key_bounds_truncated |= rowset->is_segments_key_bounds_truncated();
347
52
        std::vector<KeyBoundsPB> key_bounds;
348
52
        RETURN_IF_ERROR(rowset->get_segments_key_bounds(&key_bounds));
349
52
        segment_key_bounds.insert(segment_key_bounds.end(), key_bounds.begin(), key_bounds.end());
350
52
        std::vector<uint32_t> input_segment_rows;
351
52
        rowset->get_num_segment_rows(&input_segment_rows);
352
52
        num_segment_rows.insert(num_segment_rows.end(), input_segment_rows.begin(),
353
52
                                input_segment_rows.end());
354
52
    }
355
    // build output rowset
356
14
    RowsetMetaSharedPtr rowset_meta = std::make_shared<RowsetMeta>();
357
14
    rowset_meta->set_num_rows(_input_row_num);
358
14
    rowset_meta->set_total_disk_size(_input_rowsets_data_size + _input_rowsets_index_size);
359
14
    rowset_meta->set_data_disk_size(_input_rowsets_data_size);
360
14
    rowset_meta->set_index_disk_size(_input_rowsets_index_size);
361
14
    rowset_meta->set_empty(_input_row_num == 0);
362
14
    rowset_meta->set_num_segments(_input_num_segments);
363
14
    rowset_meta->set_segments_overlap(NONOVERLAPPING);
364
14
    rowset_meta->set_rowset_state(VISIBLE);
365
14
    rowset_meta->set_segments_key_bounds_truncated(segments_key_bounds_truncated);
366
14
    rowset_meta->set_segments_key_bounds(segment_key_bounds);
367
14
    rowset_meta->set_num_segment_rows(num_segment_rows);
368
369
14
    _output_rowset = _output_rs_writer->manual_build(rowset_meta);
370
371
    // 2. check variant column path stats
372
14
    RETURN_IF_ERROR(vectorized::schema_util::VariantCompactionUtil::check_path_stats(
373
14
            _input_rowsets, _output_rowset, _tablet));
374
14
    return Status::OK();
375
14
}
376
377
38
Status CompactionMixin::build_basic_info(bool is_ordered_compaction) {
378
145
    for (auto& rowset : _input_rowsets) {
379
145
        const auto& rowset_meta = rowset->rowset_meta();
380
145
        auto index_size = rowset_meta->index_disk_size();
381
145
        auto total_size = rowset_meta->total_disk_size();
382
145
        auto data_size = rowset_meta->data_disk_size();
383
        // corrupted index size caused by bug before 2.1.5 or 3.0.0 version
384
        // try to get real index size from disk.
385
146
        if (index_size < 0 || index_size > total_size * 2) {
386
0
            LOG(ERROR) << "invalid index size:" << index_size << " total size:" << total_size
387
0
                       << " data size:" << data_size << " tablet:" << rowset_meta->tablet_id()
388
0
                       << " rowset:" << rowset_meta->rowset_id();
389
0
            index_size = 0;
390
0
            auto st = rowset->get_inverted_index_size(&index_size);
391
0
            if (!st.ok()) {
392
0
                LOG(ERROR) << "failed to get inverted index size. res=" << st;
393
0
            }
394
0
        }
395
145
        _input_rowsets_data_size += data_size;
396
145
        _input_rowsets_index_size += index_size;
397
145
        _input_rowsets_total_size += total_size;
398
145
        _input_row_num += rowset->num_rows();
399
145
        _input_num_segments += rowset->num_segments();
400
145
    }
401
38
    COUNTER_UPDATE(_input_rowsets_data_size_counter, _input_rowsets_data_size);
402
38
    COUNTER_UPDATE(_input_row_num_counter, _input_row_num);
403
38
    COUNTER_UPDATE(_input_segments_num_counter, _input_num_segments);
404
405
38
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::build_basic_info",
406
38
                                      Status::OK());
407
408
38
    _output_version =
409
38
            Version(_input_rowsets.front()->start_version(), _input_rowsets.back()->end_version());
410
411
38
    _newest_write_timestamp = _input_rowsets.back()->newest_write_timestamp();
412
413
38
    std::vector<RowsetMetaSharedPtr> rowset_metas(_input_rowsets.size());
414
38
    std::transform(_input_rowsets.begin(), _input_rowsets.end(), rowset_metas.begin(),
415
276
                   [](const RowsetSharedPtr& rowset) { return rowset->rowset_meta(); });
416
38
    _cur_tablet_schema = _tablet->tablet_schema_with_merged_max_schema_version(rowset_metas);
417
418
    // if enable_vertical_compact_variant_subcolumns is true, we need to compact the variant subcolumns in seperate column groups
419
    // so get_extended_compaction_schema will extended the schema for variant columns
420
    // for ordered compaction, we don't need to extend the schema for variant columns
421
38
    if (_enable_vertical_compact_variant_subcolumns && !is_ordered_compaction) {
422
30
        RETURN_IF_ERROR(
423
30
                vectorized::schema_util::VariantCompactionUtil::get_extended_compaction_schema(
424
30
                        _input_rowsets, _cur_tablet_schema));
425
30
    }
426
38
    return Status::OK();
427
38
}
428
429
50
bool CompactionMixin::handle_ordered_data_compaction() {
430
50
    if (!config::enable_ordered_data_compaction) {
431
0
        return false;
432
0
    }
433
50
    if (compaction_type() == ReaderType::READER_COLD_DATA_COMPACTION ||
434
50
        compaction_type() == ReaderType::READER_FULL_COMPACTION) {
435
        // The remote file system and full compaction does not support to link files.
436
0
        return false;
437
0
    }
438
50
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
439
50
        _tablet->enable_unique_key_merge_on_write()) {
440
12
        return false;
441
12
    }
442
443
38
    if (_tablet->tablet_meta()->tablet_schema()->skip_write_index_on_load()) {
444
        // Expected to create index through normal compaction
445
0
        return false;
446
0
    }
447
448
    // check delete version: if compaction type is base compaction and
449
    // has a delete version, use original compaction
450
38
    if (compaction_type() == ReaderType::READER_BASE_COMPACTION ||
451
38
        (_allow_delete_in_cumu_compaction &&
452
38
         compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION)) {
453
0
        for (auto& rowset : _input_rowsets) {
454
0
            if (rowset->rowset_meta()->has_delete_predicate()) {
455
0
                return false;
456
0
            }
457
0
        }
458
0
    }
459
460
    // check if rowsets are tidy so we can just modify meta and do link
461
    // files to handle compaction
462
38
    auto input_size = _input_rowsets.size();
463
38
    std::string pre_max_key;
464
38
    bool pre_rs_key_bounds_truncated {false};
465
95
    for (auto i = 0; i < input_size; ++i) {
466
81
        if (!is_rowset_tidy(pre_max_key, pre_rs_key_bounds_truncated, _input_rowsets[i])) {
467
24
            if (i <= input_size / 2) {
468
24
                return false;
469
24
            } else {
470
0
                _input_rowsets.resize(i);
471
0
                break;
472
0
            }
473
24
        }
474
81
    }
475
    // most rowset of current compaction is nonoverlapping
476
    // just handle nonoverlappint rowsets
477
14
    auto st = do_compact_ordered_rowsets();
478
14
    if (!st.ok()) {
479
0
        LOG(WARNING) << "failed to compact ordered rowsets: " << st;
480
0
        _pending_rs_guard.drop();
481
0
    }
482
483
14
    return st.ok();
484
38
}
485
486
38
Status CompactionMixin::execute_compact() {
487
38
    uint32_t checksum_before;
488
38
    uint32_t checksum_after;
489
38
    bool enable_compaction_checksum = config::enable_compaction_checksum;
490
38
    if (enable_compaction_checksum) {
491
0
        EngineChecksumTask checksum_task(_engine, _tablet->tablet_id(), _tablet->schema_hash(),
492
0
                                         _input_rowsets.back()->end_version(), &checksum_before);
493
0
        RETURN_IF_ERROR(checksum_task.execute());
494
0
    }
495
496
38
    auto* data_dir = tablet()->data_dir();
497
38
    int64_t permits = get_compaction_permits();
498
38
    data_dir->disks_compaction_score_increment(permits);
499
38
    data_dir->disks_compaction_num_increment(1);
500
501
39
    auto record_compaction_stats = [&](const doris::Exception& ex) {
502
39
        _tablet->compaction_count.fetch_add(1, std::memory_order_relaxed);
503
39
        data_dir->disks_compaction_score_increment(-permits);
504
39
        data_dir->disks_compaction_num_increment(-1);
505
39
    };
506
507
38
    HANDLE_EXCEPTION_IF_CATCH_EXCEPTION(execute_compact_impl(permits), record_compaction_stats);
508
38
    record_compaction_stats(doris::Exception());
509
510
38
    if (enable_compaction_checksum) {
511
0
        EngineChecksumTask checksum_task(_engine, _tablet->tablet_id(), _tablet->schema_hash(),
512
0
                                         _input_rowsets.back()->end_version(), &checksum_after);
513
0
        RETURN_IF_ERROR(checksum_task.execute());
514
0
        if (checksum_before != checksum_after) {
515
0
            return Status::InternalError(
516
0
                    "compaction tablet checksum not consistent, before={}, after={}, tablet_id={}",
517
0
                    checksum_before, checksum_after, _tablet->tablet_id());
518
0
        }
519
0
    }
520
521
38
    DorisMetrics::instance()->local_compaction_read_rows_total->increment(_input_row_num);
522
38
    DorisMetrics::instance()->local_compaction_read_bytes_total->increment(
523
38
            _input_rowsets_total_size);
524
525
38
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::execute_compact", Status::OK());
526
527
38
    DorisMetrics::instance()->local_compaction_write_rows_total->increment(
528
38
            _output_rowset->num_rows());
529
38
    DorisMetrics::instance()->local_compaction_write_bytes_total->increment(
530
38
            _output_rowset->total_disk_size());
531
532
38
    _load_segment_to_cache();
533
38
    return Status::OK();
534
38
}
535
536
38
Status CompactionMixin::execute_compact_impl(int64_t permits) {
537
38
    OlapStopWatch watch;
538
539
38
    if (handle_ordered_data_compaction()) {
540
8
        RETURN_IF_ERROR(modify_rowsets());
541
8
        LOG(INFO) << "succeed to do ordered data " << compaction_name()
542
8
                  << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version
543
8
                  << ", disk=" << tablet()->data_dir()->path()
544
8
                  << ", segments=" << _input_num_segments << ", input_row_num=" << _input_row_num
545
8
                  << ", output_row_num=" << _output_rowset->num_rows()
546
8
                  << ", input_rowsets_data_size=" << _input_rowsets_data_size
547
8
                  << ", input_rowsets_index_size=" << _input_rowsets_index_size
548
8
                  << ", input_rowsets_total_size=" << _input_rowsets_total_size
549
8
                  << ", output_rowset_data_size=" << _output_rowset->data_disk_size()
550
8
                  << ", output_rowset_index_size=" << _output_rowset->index_disk_size()
551
8
                  << ", output_rowset_total_size=" << _output_rowset->total_disk_size()
552
8
                  << ". elapsed time=" << watch.get_elapse_second() << "s.";
553
8
        _state = CompactionState::SUCCESS;
554
8
        return Status::OK();
555
8
    }
556
30
    RETURN_IF_ERROR(build_basic_info());
557
558
30
    TEST_SYNC_POINT_RETURN_WITH_VALUE("compaction::CompactionMixin::execute_compact_impl",
559
30
                                      Status::OK());
560
561
30
    VLOG_DEBUG << "dump tablet schema: " << _cur_tablet_schema->dump_structure();
562
563
30
    LOG(INFO) << "start " << compaction_name() << ". tablet=" << _tablet->tablet_id()
564
30
              << ", output_version=" << _output_version << ", permits: " << permits;
565
566
30
    RETURN_IF_ERROR(merge_input_rowsets());
567
568
    // Currently, updates are only made in the time_series.
569
30
    update_compaction_level();
570
571
30
    RETURN_IF_ERROR(modify_rowsets());
572
573
30
    auto* cumu_policy = tablet()->cumulative_compaction_policy();
574
30
    DCHECK(cumu_policy);
575
30
    LOG(INFO) << "succeed to do " << compaction_name() << " is_vertical=" << _is_vertical
576
30
              << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version
577
30
              << ", current_max_version=" << tablet()->max_version().second
578
30
              << ", disk=" << tablet()->data_dir()->path()
579
30
              << ", input_segments=" << _input_num_segments << ", input_rowsets_data_size="
580
30
              << PrettyPrinter::print_bytes(_input_rowsets_data_size)
581
30
              << ", input_rowsets_index_size="
582
30
              << PrettyPrinter::print_bytes(_input_rowsets_index_size)
583
30
              << ", input_rowsets_total_size="
584
30
              << PrettyPrinter::print_bytes(_input_rowsets_total_size)
585
30
              << ", output_rowset_data_size="
586
30
              << PrettyPrinter::print_bytes(_output_rowset->data_disk_size())
587
30
              << ", output_rowset_index_size="
588
30
              << PrettyPrinter::print_bytes(_output_rowset->index_disk_size())
589
30
              << ", output_rowset_total_size="
590
30
              << PrettyPrinter::print_bytes(_output_rowset->total_disk_size())
591
30
              << ", input_row_num=" << _input_row_num
592
30
              << ", output_row_num=" << _output_rowset->num_rows()
593
30
              << ", filtered_row_num=" << _stats.filtered_rows
594
30
              << ", merged_row_num=" << _stats.merged_rows
595
30
              << ". elapsed time=" << watch.get_elapse_second()
596
30
              << "s. cumulative_compaction_policy=" << cumu_policy->name()
597
30
              << ", compact_row_per_second="
598
30
              << cast_set<double>(_input_row_num) / watch.get_elapse_second();
599
600
30
    _state = CompactionState::SUCCESS;
601
602
30
    return Status::OK();
603
30
}
604
605
9.72k
Status Compaction::do_inverted_index_compaction() {
606
9.72k
    const auto& ctx = _output_rs_writer->context();
607
9.72k
    if (!_enable_inverted_index_compaction || _input_row_num <= 0 ||
608
9.72k
        ctx.columns_to_do_index_compaction.empty()) {
609
9.46k
        return Status::OK();
610
9.46k
    }
611
612
261
    auto error_handler = [this](int64_t index_id, int64_t column_uniq_id) {
613
2
        LOG(WARNING) << "failed to do index compaction"
614
2
                     << ". tablet=" << _tablet->tablet_id() << ". column uniq id=" << column_uniq_id
615
2
                     << ". index_id=" << index_id;
616
4
        for (auto& rowset : _input_rowsets) {
617
4
            rowset->set_skip_index_compaction(cast_set<int32_t>(column_uniq_id));
618
4
            LOG(INFO) << "mark skipping inverted index compaction next time"
619
4
                      << ". tablet=" << _tablet->tablet_id() << ", rowset=" << rowset->rowset_id()
620
4
                      << ", column uniq id=" << column_uniq_id << ", index_id=" << index_id;
621
4
        }
622
2
    };
623
624
261
    DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_rowid_conversion_null",
625
261
                    { _stats.rowid_conversion = nullptr; })
626
261
    if (!_stats.rowid_conversion) {
627
0
        LOG(WARNING) << "failed to do index compaction, rowid conversion is null"
628
0
                     << ". tablet=" << _tablet->tablet_id()
629
0
                     << ", input row number=" << _input_row_num;
630
0
        mark_skip_index_compaction(ctx, error_handler);
631
632
0
        return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
633
0
                "failed to do index compaction, rowid conversion is null. tablet={}",
634
0
                _tablet->tablet_id());
635
0
    }
636
637
261
    OlapStopWatch inverted_watch;
638
639
    // translation vec
640
    // <<dest_idx_num, dest_docId>>
641
    // the first level vector: index indicates src segment.
642
    // the second level vector: index indicates row id of source segment,
643
    // value indicates row id of destination segment.
644
    // <UINT32_MAX, UINT32_MAX> indicates current row not exist.
645
261
    const auto& trans_vec = _stats.rowid_conversion->get_rowid_conversion_map();
646
647
    // source rowset,segment -> index_id
648
261
    const auto& src_seg_to_id_map = _stats.rowid_conversion->get_src_segment_to_id_map();
649
650
    // dest rowset id
651
261
    RowsetId dest_rowset_id = _stats.rowid_conversion->get_dst_rowset_id();
652
    // dest segment id -> num rows
653
261
    std::vector<uint32_t> dest_segment_num_rows;
654
261
    RETURN_IF_ERROR(_output_rs_writer->get_segment_num_rows(&dest_segment_num_rows));
655
656
261
    auto src_segment_num = src_seg_to_id_map.size();
657
261
    auto dest_segment_num = dest_segment_num_rows.size();
658
659
    // when all the input rowsets are deleted, the output rowset will be empty and dest_segment_num will be 0.
660
261
    if (dest_segment_num <= 0) {
661
2
        LOG(INFO) << "skip doing index compaction due to no output segments"
662
2
                  << ". tablet=" << _tablet->tablet_id() << ", input row number=" << _input_row_num
663
2
                  << ". elapsed time=" << inverted_watch.get_elapse_second() << "s.";
664
2
        return Status::OK();
665
2
    }
666
667
    // Only write info files when debug index compaction is enabled.
668
    // The files are used to debug index compaction and works with index_tool.
669
259
    if (config::debug_inverted_index_compaction) {
670
        // src index files
671
        // format: rowsetId_segmentId
672
0
        std::vector<std::string> src_index_files(src_segment_num);
673
0
        for (const auto& m : src_seg_to_id_map) {
674
0
            std::pair<RowsetId, uint32_t> p = m.first;
675
0
            src_index_files[m.second] = p.first.to_string() + "_" + std::to_string(p.second);
676
0
        }
677
678
        // dest index files
679
        // format: rowsetId_segmentId
680
0
        std::vector<std::string> dest_index_files(dest_segment_num);
681
0
        for (int i = 0; i < dest_segment_num; ++i) {
682
0
            auto prefix = dest_rowset_id.to_string() + "_" + std::to_string(i);
683
0
            dest_index_files[i] = prefix;
684
0
        }
685
686
0
        auto write_json_to_file = [&](const nlohmann::json& json_obj,
687
0
                                      const std::string& file_name) {
688
0
            io::FileWriterPtr file_writer;
689
0
            std::string file_path =
690
0
                    fmt::format("{}/{}.json", std::string(getenv("LOG_DIR")), file_name);
691
0
            RETURN_IF_ERROR(io::global_local_filesystem()->create_file(file_path, &file_writer));
692
0
            RETURN_IF_ERROR(file_writer->append(json_obj.dump()));
693
0
            RETURN_IF_ERROR(file_writer->append("\n"));
694
0
            return file_writer->close();
695
0
        };
696
697
        // Convert trans_vec to JSON and print it
698
0
        nlohmann::json trans_vec_json = trans_vec;
699
0
        auto output_version =
700
0
                _output_version.to_string().substr(1, _output_version.to_string().size() - 2);
701
0
        RETURN_IF_ERROR(write_json_to_file(
702
0
                trans_vec_json,
703
0
                fmt::format("trans_vec_{}_{}", _tablet->tablet_id(), output_version)));
704
705
0
        nlohmann::json src_index_files_json = src_index_files;
706
0
        RETURN_IF_ERROR(write_json_to_file(
707
0
                src_index_files_json,
708
0
                fmt::format("src_idx_dirs_{}_{}", _tablet->tablet_id(), output_version)));
709
710
0
        nlohmann::json dest_index_files_json = dest_index_files;
711
0
        RETURN_IF_ERROR(write_json_to_file(
712
0
                dest_index_files_json,
713
0
                fmt::format("dest_idx_dirs_{}_{}", _tablet->tablet_id(), output_version)));
714
715
0
        nlohmann::json dest_segment_num_rows_json = dest_segment_num_rows;
716
0
        RETURN_IF_ERROR(write_json_to_file(
717
0
                dest_segment_num_rows_json,
718
0
                fmt::format("dest_seg_num_rows_{}_{}", _tablet->tablet_id(), output_version)));
719
0
    }
720
721
    // create index_writer to compaction indexes
722
259
    std::unordered_map<RowsetId, Rowset*> rs_id_to_rowset_map;
723
1.47k
    for (auto&& rs : _input_rowsets) {
724
1.47k
        rs_id_to_rowset_map.emplace(rs->rowset_id(), rs.get());
725
1.47k
    }
726
727
    // src index dirs
728
259
    std::vector<std::unique_ptr<IndexFileReader>> index_file_readers(src_segment_num);
729
983
    for (const auto& m : src_seg_to_id_map) {
730
983
        const auto& [rowset_id, seg_id] = m.first;
731
732
983
        auto find_it = rs_id_to_rowset_map.find(rowset_id);
733
983
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_find_rowset_error",
734
983
                        { find_it = rs_id_to_rowset_map.end(); })
735
983
        if (find_it == rs_id_to_rowset_map.end()) [[unlikely]] {
736
0
            LOG(WARNING) << "failed to do index compaction, cannot find rowset. tablet_id="
737
0
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string();
738
0
            mark_skip_index_compaction(ctx, error_handler);
739
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
740
0
                    "failed to do index compaction, cannot find rowset. tablet_id={} rowset_id={}",
741
0
                    _tablet->tablet_id(), rowset_id.to_string());
742
0
        }
743
744
983
        auto* rowset = find_it->second;
745
983
        auto fs = rowset->rowset_meta()->fs();
746
983
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_get_fs_error", { fs = nullptr; })
747
983
        if (!fs) {
748
0
            LOG(WARNING) << "failed to do index compaction, get fs failed. resource_id="
749
0
                         << rowset->rowset_meta()->resource_id();
750
0
            mark_skip_index_compaction(ctx, error_handler);
751
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
752
0
                    "get fs failed, resource_id={}", rowset->rowset_meta()->resource_id());
753
0
        }
754
755
983
        auto seg_path = rowset->segment_path(seg_id);
756
983
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_seg_path_nullptr", {
757
983
            seg_path = ResultError(Status::Error<ErrorCode::INTERNAL_ERROR>(
758
983
                    "do_inverted_index_compaction_seg_path_nullptr"));
759
983
        })
760
983
        if (!seg_path.has_value()) {
761
0
            LOG(WARNING) << "failed to do index compaction, get segment path failed. tablet_id="
762
0
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string()
763
0
                         << " seg_id=" << seg_id;
764
0
            mark_skip_index_compaction(ctx, error_handler);
765
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
766
0
                    "get segment path failed. tablet_id={} rowset_id={} seg_id={}",
767
0
                    _tablet->tablet_id(), rowset_id.to_string(), seg_id);
768
0
        }
769
983
        auto index_file_reader = std::make_unique<IndexFileReader>(
770
983
                fs,
771
983
                std::string {InvertedIndexDescriptor::get_index_file_path_prefix(seg_path.value())},
772
983
                _cur_tablet_schema->get_inverted_index_storage_format(),
773
983
                rowset->rowset_meta()->inverted_index_file_info(seg_id));
774
983
        auto st = index_file_reader->init(config::inverted_index_read_buffer_size);
775
983
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_init_inverted_index_file_reader",
776
983
                        {
777
983
                            st = Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
778
983
                                    "debug point: "
779
983
                                    "Compaction::do_inverted_index_compaction_init_inverted_index_"
780
983
                                    "file_reader error");
781
983
                        })
782
983
        if (!st.ok()) {
783
0
            LOG(WARNING) << "failed to do index compaction, init inverted index file reader "
784
0
                            "failed. tablet_id="
785
0
                         << _tablet->tablet_id() << " rowset_id=" << rowset_id.to_string()
786
0
                         << " seg_id=" << seg_id;
787
0
            mark_skip_index_compaction(ctx, error_handler);
788
0
            return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
789
0
                    "init inverted index file reader failed. tablet_id={} rowset_id={} seg_id={}",
790
0
                    _tablet->tablet_id(), rowset_id.to_string(), seg_id);
791
0
        }
792
983
        index_file_readers[m.second] = std::move(index_file_reader);
793
983
    }
794
795
    // dest index files
796
    // format: rowsetId_segmentId
797
259
    auto& inverted_index_file_writers =
798
259
            dynamic_cast<BaseBetaRowsetWriter*>(_output_rs_writer.get())->index_file_writers();
799
259
    DBUG_EXECUTE_IF(
800
259
            "Compaction::do_inverted_index_compaction_inverted_index_file_writers_size_error",
801
259
            { inverted_index_file_writers.clear(); })
802
259
    if (inverted_index_file_writers.size() != dest_segment_num) {
803
0
        LOG(WARNING) << "failed to do index compaction, dest segment num not match. tablet_id="
804
0
                     << _tablet->tablet_id() << " dest_segment_num=" << dest_segment_num
805
0
                     << " inverted_index_file_writers.size()="
806
0
                     << inverted_index_file_writers.size();
807
0
        mark_skip_index_compaction(ctx, error_handler);
808
0
        return Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
809
0
                "dest segment num not match. tablet_id={} dest_segment_num={} "
810
0
                "inverted_index_file_writers.size()={}",
811
0
                _tablet->tablet_id(), dest_segment_num, inverted_index_file_writers.size());
812
0
    }
813
814
    // use tmp file dir to store index files
815
259
    auto tmp_file_dir = ExecEnv::GetInstance()->get_tmp_file_dirs()->get_tmp_file_dir();
816
259
    auto index_tmp_path = tmp_file_dir / dest_rowset_id.to_string();
817
259
    LOG(INFO) << "start index compaction"
818
259
              << ". tablet=" << _tablet->tablet_id() << ", source index size=" << src_segment_num
819
259
              << ", destination index size=" << dest_segment_num << ".";
820
821
259
    Status status = Status::OK();
822
874
    for (auto&& column_uniq_id : ctx.columns_to_do_index_compaction) {
823
874
        auto col = _cur_tablet_schema->column_by_uid(column_uniq_id);
824
874
        auto index_metas = _cur_tablet_schema->inverted_indexs(col);
825
874
        DBUG_EXECUTE_IF("Compaction::do_inverted_index_compaction_can_not_find_index_meta",
826
874
                        { index_metas.clear(); })
827
874
        if (index_metas.empty()) {
828
0
            status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
829
0
                    fmt::format("Can not find index_meta for col {}", col.name()));
830
0
            LOG(WARNING) << "failed to do index compaction, can not find index_meta for column"
831
0
                         << ". tablet=" << _tablet->tablet_id()
832
0
                         << ", column uniq id=" << column_uniq_id;
833
0
            error_handler(-1, column_uniq_id);
834
0
            break;
835
0
        }
836
897
        for (const auto& index_meta : index_metas) {
837
897
            std::vector<lucene::store::Directory*> dest_index_dirs(dest_segment_num);
838
897
            try {
839
897
                std::vector<std::unique_ptr<DorisCompoundReader, DirectoryDeleter>> src_idx_dirs(
840
897
                        src_segment_num);
841
3.92k
                for (int src_segment_id = 0; src_segment_id < src_segment_num; src_segment_id++) {
842
3.02k
                    auto res = index_file_readers[src_segment_id]->open(index_meta);
843
3.02k
                    DBUG_EXECUTE_IF("Compaction::open_inverted_index_file_reader", {
844
3.02k
                        res = ResultError(Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
845
3.02k
                                "debug point: Compaction::open_index_file_reader error"));
846
3.02k
                    })
847
3.02k
                    if (!res.has_value()) {
848
0
                        LOG(WARNING) << "failed to do index compaction, open inverted index file "
849
0
                                        "reader failed"
850
0
                                     << ". tablet=" << _tablet->tablet_id()
851
0
                                     << ", column uniq id=" << column_uniq_id
852
0
                                     << ", src_segment_id=" << src_segment_id;
853
0
                        throw Exception(ErrorCode::INVERTED_INDEX_COMPACTION_ERROR,
854
0
                                        res.error().msg());
855
0
                    }
856
3.02k
                    src_idx_dirs[src_segment_id] = std::move(res.value());
857
3.02k
                }
858
1.90k
                for (int dest_segment_id = 0; dest_segment_id < dest_segment_num;
859
1.00k
                     dest_segment_id++) {
860
1.00k
                    auto res = inverted_index_file_writers[dest_segment_id]->open(index_meta);
861
1.00k
                    DBUG_EXECUTE_IF("Compaction::open_inverted_index_file_writer", {
862
1.00k
                        res = ResultError(Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
863
1.00k
                                "debug point: Compaction::open_inverted_index_file_writer error"));
864
1.00k
                    })
865
1.00k
                    if (!res.has_value()) {
866
0
                        LOG(WARNING) << "failed to do index compaction, open inverted index file "
867
0
                                        "writer failed"
868
0
                                     << ". tablet=" << _tablet->tablet_id()
869
0
                                     << ", column uniq id=" << column_uniq_id
870
0
                                     << ", dest_segment_id=" << dest_segment_id;
871
0
                        throw Exception(ErrorCode::INVERTED_INDEX_COMPACTION_ERROR,
872
0
                                        res.error().msg());
873
0
                    }
874
                    // Destination directories in dest_index_dirs do not need to be deconstructed,
875
                    // but their lifecycle must be managed by inverted_index_file_writers.
876
1.00k
                    dest_index_dirs[dest_segment_id] = res.value().get();
877
1.00k
                }
878
897
                auto st = compact_column(index_meta->index_id(), src_idx_dirs, dest_index_dirs,
879
897
                                         index_tmp_path.native(), trans_vec, dest_segment_num_rows);
880
897
                if (!st.ok()) {
881
2
                    error_handler(index_meta->index_id(), column_uniq_id);
882
2
                    status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(st.msg());
883
2
                }
884
897
            } catch (CLuceneError& e) {
885
0
                error_handler(index_meta->index_id(), column_uniq_id);
886
0
                status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(e.what());
887
0
            } catch (const Exception& e) {
888
0
                error_handler(index_meta->index_id(), column_uniq_id);
889
0
                status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(e.what());
890
0
            }
891
897
        }
892
874
    }
893
894
    // check index compaction status. If status is not ok, we should return error and end this compaction round.
895
259
    if (!status.ok()) {
896
1
        return status;
897
1
    }
898
259
    LOG(INFO) << "succeed to do index compaction"
899
258
              << ". tablet=" << _tablet->tablet_id()
900
258
              << ". elapsed time=" << inverted_watch.get_elapse_second() << "s.";
901
902
258
    return Status::OK();
903
259
}
904
905
void Compaction::mark_skip_index_compaction(
906
        const RowsetWriterContext& context,
907
0
        const std::function<void(int64_t, int64_t)>& error_handler) {
908
0
    for (auto&& column_uniq_id : context.columns_to_do_index_compaction) {
909
0
        auto col = _cur_tablet_schema->column_by_uid(column_uniq_id);
910
0
        auto index_metas = _cur_tablet_schema->inverted_indexs(col);
911
0
        DBUG_EXECUTE_IF("Compaction::mark_skip_index_compaction_can_not_find_index_meta",
912
0
                        { index_metas.clear(); })
913
0
        if (index_metas.empty()) {
914
0
            LOG(WARNING) << "mark skip index compaction, can not find index_meta for column"
915
0
                         << ". tablet=" << _tablet->tablet_id()
916
0
                         << ", column uniq id=" << column_uniq_id;
917
0
            error_handler(-1, column_uniq_id);
918
0
            continue;
919
0
        }
920
0
        for (const auto& index_meta : index_metas) {
921
0
            error_handler(index_meta->index_id(), column_uniq_id);
922
0
        }
923
0
    }
924
0
}
925
926
7.91k
void Compaction::construct_index_compaction_columns(RowsetWriterContext& ctx) {
927
7.91k
    for (const auto& index : _cur_tablet_schema->inverted_indexes()) {
928
4.34k
        auto col_unique_ids = index->col_unique_ids();
929
        // check if column unique ids is empty to avoid crash
930
4.34k
        if (col_unique_ids.empty()) {
931
1
            LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] index[" << index->index_id()
932
1
                         << "] has no column unique id, will skip index compaction."
933
1
                         << " tablet_schema=" << _cur_tablet_schema->dump_full_schema();
934
1
            continue;
935
1
        }
936
4.34k
        auto col_unique_id = col_unique_ids[0];
937
4.34k
        if (!_cur_tablet_schema->has_column_unique_id(col_unique_id)) {
938
0
            LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
939
0
                         << col_unique_id << "] not found, will skip index compaction";
940
0
            continue;
941
0
        }
942
        // Avoid doing inverted index compaction on non-slice type columns
943
4.34k
        if (!field_is_slice_type(_cur_tablet_schema->column_by_uid(col_unique_id).type())) {
944
2.51k
            continue;
945
2.51k
        }
946
947
        // if index properties are different, index compaction maybe needs to be skipped.
948
1.83k
        bool is_continue = false;
949
1.83k
        std::optional<std::map<std::string, std::string>> first_properties;
950
12.3k
        for (const auto& rowset : _input_rowsets) {
951
12.3k
            auto tablet_indexs = rowset->tablet_schema()->inverted_indexs(col_unique_id);
952
            // no inverted index or index id is different from current index id
953
12.3k
            auto it = std::find_if(tablet_indexs.begin(), tablet_indexs.end(),
954
12.4k
                                   [&index](const auto& tablet_index) {
955
12.4k
                                       return tablet_index->index_id() == index->index_id();
956
12.4k
                                   });
957
12.3k
            if (it != tablet_indexs.end()) {
958
12.3k
                const auto* tablet_index = *it;
959
12.3k
                auto properties = tablet_index->properties();
960
12.3k
                if (!first_properties.has_value()) {
961
1.83k
                    first_properties = properties;
962
10.4k
                } else {
963
10.4k
                    DBUG_EXECUTE_IF(
964
10.4k
                            "Compaction::do_inverted_index_compaction_index_properties_different",
965
10.4k
                            { properties.emplace("dummy_key", "dummy_value"); })
966
10.4k
                    if (properties != first_properties.value()) {
967
3
                        is_continue = true;
968
3
                        break;
969
3
                    }
970
10.4k
                }
971
18.4E
            } else {
972
18.4E
                is_continue = true;
973
18.4E
                break;
974
18.4E
            }
975
12.3k
        }
976
1.83k
        if (is_continue) {
977
5
            continue;
978
5
        }
979
12.3k
        auto has_inverted_index = [&](const RowsetSharedPtr& src_rs) {
980
12.3k
            auto* rowset = static_cast<BetaRowset*>(src_rs.get());
981
12.3k
            DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_is_skip_index_compaction",
982
12.3k
                            { rowset->set_skip_index_compaction(col_unique_id); })
983
12.3k
            if (rowset->is_skip_index_compaction(col_unique_id)) {
984
1
                LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] rowset["
985
1
                             << rowset->rowset_id() << "] column_unique_id[" << col_unique_id
986
1
                             << "] skip inverted index compaction due to last failure";
987
1
                return false;
988
1
            }
989
990
12.3k
            auto fs = rowset->rowset_meta()->fs();
991
12.3k
            DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_get_fs_error",
992
12.3k
                            { fs = nullptr; })
993
12.3k
            if (!fs) {
994
0
                LOG(WARNING) << "get fs failed, resource_id="
995
0
                             << rowset->rowset_meta()->resource_id();
996
0
                return false;
997
0
            }
998
999
12.3k
            auto index_metas = rowset->tablet_schema()->inverted_indexs(col_unique_id);
1000
12.3k
            DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_index_meta_nullptr",
1001
12.3k
                            { index_metas.clear(); })
1002
12.3k
            if (index_metas.empty()) {
1003
0
                LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
1004
0
                             << col_unique_id << "] index meta is null, will skip index compaction";
1005
0
                return false;
1006
0
            }
1007
12.5k
            for (const auto& index_meta : index_metas) {
1008
16.1k
                for (auto i = 0; i < rowset->num_segments(); i++) {
1009
                    // TODO: inverted_index_path
1010
3.53k
                    auto seg_path = rowset->segment_path(i);
1011
3.53k
                    DBUG_EXECUTE_IF("Compaction::construct_skip_inverted_index_seg_path_nullptr", {
1012
3.53k
                        seg_path = ResultError(Status::Error<ErrorCode::INTERNAL_ERROR>(
1013
3.53k
                                "construct_skip_inverted_index_seg_path_nullptr"));
1014
3.53k
                    })
1015
3.53k
                    if (!seg_path) {
1016
0
                        LOG(WARNING) << seg_path.error();
1017
0
                        return false;
1018
0
                    }
1019
1020
3.53k
                    std::string index_file_path;
1021
3.53k
                    try {
1022
3.53k
                        auto index_file_reader = std::make_unique<IndexFileReader>(
1023
3.53k
                                fs,
1024
3.53k
                                std::string {InvertedIndexDescriptor::get_index_file_path_prefix(
1025
3.53k
                                        seg_path.value())},
1026
3.53k
                                _cur_tablet_schema->get_inverted_index_storage_format(),
1027
3.53k
                                rowset->rowset_meta()->inverted_index_file_info(i));
1028
3.53k
                        auto st = index_file_reader->init(config::inverted_index_read_buffer_size);
1029
3.53k
                        index_file_path = index_file_reader->get_index_file_path(index_meta);
1030
3.53k
                        DBUG_EXECUTE_IF(
1031
3.53k
                                "Compaction::construct_skip_inverted_index_index_file_reader_init_"
1032
3.53k
                                "status_not_ok",
1033
3.53k
                                {
1034
3.53k
                                    st = Status::Error<ErrorCode::INTERNAL_ERROR>(
1035
3.53k
                                            "debug point: "
1036
3.53k
                                            "construct_skip_inverted_index_index_file_reader_init_"
1037
3.53k
                                            "status_"
1038
3.53k
                                            "not_ok");
1039
3.53k
                                })
1040
3.53k
                        if (!st.ok()) {
1041
0
                            LOG(WARNING) << "init index " << index_file_path << " error:" << st;
1042
0
                            return false;
1043
0
                        }
1044
1045
                        // check index meta
1046
3.53k
                        auto result = index_file_reader->open(index_meta);
1047
3.53k
                        DBUG_EXECUTE_IF(
1048
3.53k
                                "Compaction::construct_skip_inverted_index_index_file_reader_open_"
1049
3.53k
                                "error",
1050
3.53k
                                {
1051
3.53k
                                    result = ResultError(
1052
3.53k
                                            Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
1053
3.53k
                                                    "CLuceneError occur when open idx file"));
1054
3.53k
                                })
1055
3.53k
                        if (!result.has_value()) {
1056
0
                            LOG(WARNING) << "open index " << index_file_path
1057
0
                                         << " error:" << result.error();
1058
0
                            return false;
1059
0
                        }
1060
3.53k
                        auto reader = std::move(result.value());
1061
3.53k
                        std::vector<std::string> files;
1062
3.53k
                        reader->list(&files);
1063
3.53k
                        reader->close();
1064
3.53k
                        DBUG_EXECUTE_IF(
1065
3.53k
                                "Compaction::construct_skip_inverted_index_index_reader_close_"
1066
3.53k
                                "error",
1067
3.53k
                                { _CLTHROWA(CL_ERR_IO, "debug point: reader close error"); })
1068
1069
3.53k
                        DBUG_EXECUTE_IF(
1070
3.53k
                                "Compaction::construct_skip_inverted_index_index_files_count",
1071
3.53k
                                { files.clear(); })
1072
1073
                        // why is 3?
1074
                        // slice type index file at least has 3 files: null_bitmap, segments_N, segments.gen
1075
3.53k
                        if (files.size() < 3) {
1076
0
                            LOG(WARNING)
1077
0
                                    << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
1078
0
                                    << col_unique_id << "]," << index_file_path
1079
0
                                    << " is corrupted, will skip index compaction";
1080
0
                            return false;
1081
0
                        }
1082
3.53k
                    } catch (CLuceneError& err) {
1083
0
                        LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
1084
0
                                     << col_unique_id << "] open index[" << index_file_path
1085
0
                                     << "], will skip index compaction, error:" << err.what();
1086
0
                        return false;
1087
0
                    }
1088
3.53k
                }
1089
12.5k
            }
1090
12.3k
            return true;
1091
12.3k
        };
1092
1093
1.83k
        bool all_have_inverted_index = std::all_of(_input_rowsets.begin(), _input_rowsets.end(),
1094
1.83k
                                                   std::move(has_inverted_index));
1095
1096
1.83k
        if (all_have_inverted_index) {
1097
1.83k
            ctx.columns_to_do_index_compaction.insert(col_unique_id);
1098
1.83k
        }
1099
1.83k
    }
1100
7.91k
}
1101
1102
0
Status CompactionMixin::update_delete_bitmap() {
1103
    // for mow with cluster keys, compaction read data with delete bitmap
1104
    // if tablet is not ready(such as schema change), we need to update delete bitmap
1105
0
    {
1106
0
        std::shared_lock meta_rlock(_tablet->get_header_lock());
1107
0
        if (_tablet->tablet_state() != TABLET_NOTREADY) {
1108
0
            return Status::OK();
1109
0
        }
1110
0
    }
1111
0
    OlapStopWatch watch;
1112
0
    std::vector<RowsetSharedPtr> rowsets;
1113
0
    for (const auto& rowset : _input_rowsets) {
1114
0
        std::lock_guard rwlock(tablet()->get_rowset_update_lock());
1115
0
        std::shared_lock rlock(_tablet->get_header_lock());
1116
0
        Status st = _tablet->update_delete_bitmap_without_lock(_tablet, rowset, &rowsets);
1117
0
        if (!st.ok()) {
1118
0
            LOG(INFO) << "failed update_delete_bitmap_without_lock for tablet_id="
1119
0
                      << _tablet->tablet_id() << ", st=" << st.to_string();
1120
0
            return st;
1121
0
        }
1122
0
        rowsets.push_back(rowset);
1123
0
    }
1124
0
    LOG(INFO) << "finish update delete bitmap for tablet: " << _tablet->tablet_id()
1125
0
              << ", rowsets: " << _input_rowsets.size() << ", cost: " << watch.get_elapse_time_us()
1126
0
              << "(us)";
1127
0
    return Status::OK();
1128
0
}
1129
1130
162
Status CloudCompactionMixin::update_delete_bitmap() {
1131
    // for mow with cluster keys, compaction read data with delete bitmap
1132
    // if tablet is not ready(such as schema change), we need to update delete bitmap
1133
162
    {
1134
162
        std::shared_lock meta_rlock(_tablet->get_header_lock());
1135
162
        if (_tablet->tablet_state() != TABLET_NOTREADY) {
1136
162
            return Status::OK();
1137
162
        }
1138
162
    }
1139
0
    OlapStopWatch watch;
1140
0
    std::vector<RowsetSharedPtr> rowsets;
1141
0
    for (const auto& rowset : _input_rowsets) {
1142
0
        Status st = _tablet->update_delete_bitmap_without_lock(_tablet, rowset, &rowsets);
1143
0
        if (!st.ok()) {
1144
0
            LOG(INFO) << "failed update_delete_bitmap_without_lock for tablet_id="
1145
0
                      << _tablet->tablet_id() << ", st=" << st.to_string();
1146
0
            return st;
1147
0
        }
1148
0
        rowsets.push_back(rowset);
1149
0
    }
1150
0
    LOG(INFO) << "finish update delete bitmap for tablet: " << _tablet->tablet_id()
1151
0
              << ", rowsets: " << _input_rowsets.size() << ", cost: " << watch.get_elapse_time_us()
1152
0
              << "(us)";
1153
0
    return Status::OK();
1154
0
}
1155
1156
79
Status CompactionMixin::construct_output_rowset_writer(RowsetWriterContext& ctx) {
1157
    // only do index compaction for dup_keys and unique_keys with mow enabled
1158
79
    if (_enable_inverted_index_compaction && (((_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1159
62
                                                _tablet->enable_unique_key_merge_on_write()) ||
1160
62
                                               _tablet->keys_type() == KeysType::DUP_KEYS))) {
1161
62
        construct_index_compaction_columns(ctx);
1162
62
    }
1163
79
    ctx.version = _output_version;
1164
79
    ctx.rowset_state = VISIBLE;
1165
79
    ctx.segments_overlap = NONOVERLAPPING;
1166
79
    ctx.tablet_schema = _cur_tablet_schema;
1167
79
    ctx.newest_write_timestamp = _newest_write_timestamp;
1168
79
    ctx.write_type = DataWriteType::TYPE_COMPACTION;
1169
79
    ctx.compaction_type = compaction_type();
1170
79
    ctx.allow_packed_file = false;
1171
79
    _output_rs_writer = DORIS_TRY(_tablet->create_rowset_writer(ctx, _is_vertical));
1172
79
    _pending_rs_guard = _engine.add_pending_rowset(ctx);
1173
79
    return Status::OK();
1174
79
}
1175
1176
38
Status CompactionMixin::modify_rowsets() {
1177
38
    std::vector<RowsetSharedPtr> output_rowsets;
1178
38
    output_rowsets.push_back(_output_rowset);
1179
1180
38
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1181
38
        _tablet->enable_unique_key_merge_on_write()) {
1182
12
        Version version = tablet()->max_version();
1183
12
        DeleteBitmap output_rowset_delete_bitmap(_tablet->tablet_id());
1184
12
        std::unique_ptr<RowLocationSet> missed_rows;
1185
12
        if ((config::enable_missing_rows_correctness_check ||
1186
12
             config::enable_mow_compaction_correctness_check_core ||
1187
12
             config::enable_mow_compaction_correctness_check_fail) &&
1188
12
            !_allow_delete_in_cumu_compaction &&
1189
12
            compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
1190
12
            missed_rows = std::make_unique<RowLocationSet>();
1191
12
            LOG(INFO) << "RowLocation Set inited succ for tablet:" << _tablet->tablet_id();
1192
12
        }
1193
12
        std::unique_ptr<std::map<RowsetSharedPtr, RowLocationPairList>> location_map;
1194
12
        if (config::enable_rowid_conversion_correctness_check &&
1195
12
            tablet()->tablet_schema()->cluster_key_uids().empty()) {
1196
0
            location_map = std::make_unique<std::map<RowsetSharedPtr, RowLocationPairList>>();
1197
0
            LOG(INFO) << "Location Map inited succ for tablet:" << _tablet->tablet_id();
1198
0
        }
1199
        // Convert the delete bitmap of the input rowsets to output rowset.
1200
        // New loads are not blocked, so some keys of input rowsets might
1201
        // be deleted during the time. We need to deal with delete bitmap
1202
        // of incremental data later.
1203
        // TODO(LiaoXin): check if there are duplicate keys
1204
12
        std::size_t missed_rows_size = 0;
1205
12
        tablet()->calc_compaction_output_rowset_delete_bitmap(
1206
12
                _input_rowsets, *_rowid_conversion, 0, version.second + 1, missed_rows.get(),
1207
12
                location_map.get(), _tablet->tablet_meta()->delete_bitmap(),
1208
12
                &output_rowset_delete_bitmap);
1209
12
        if (missed_rows) {
1210
12
            missed_rows_size = missed_rows->size();
1211
12
            std::size_t merged_missed_rows_size = _stats.merged_rows;
1212
12
            if (!_tablet->tablet_meta()->tablet_schema()->cluster_key_uids().empty()) {
1213
0
                merged_missed_rows_size += _stats.filtered_rows;
1214
0
            }
1215
1216
            // Suppose a heavy schema change process on BE converting tablet A to tablet B.
1217
            // 1. during schema change double write, new loads write [X-Y] on tablet B.
1218
            // 2. rowsets with version [a],[a+1],...,[b-1],[b] on tablet B are picked for cumu compaction(X<=a<b<=Y).(cumu compaction
1219
            //    on new tablet during schema change double write is allowed after https://github.com/apache/doris/pull/16470)
1220
            // 3. schema change remove all rowsets on tablet B before version Z(b<=Z<=Y) before it begins to convert historical rowsets.
1221
            // 4. schema change finishes.
1222
            // 5. cumu compation begins on new tablet with version [a],...,[b]. If there are duplicate keys between these rowsets,
1223
            //    the compaction check will fail because these rowsets have skipped to calculate delete bitmap in commit phase and
1224
            //    publish phase because tablet B is in NOT_READY state when writing.
1225
1226
            // Considering that the cumu compaction will fail finally in this situation because `Tablet::modify_rowsets` will check if rowsets in
1227
            // `to_delete`(_input_rowsets) still exist in tablet's `_rs_version_map`, we can just skip to check missed rows here.
1228
12
            bool need_to_check_missed_rows = true;
1229
12
            {
1230
12
                std::shared_lock rlock(_tablet->get_header_lock());
1231
12
                need_to_check_missed_rows =
1232
12
                        std::all_of(_input_rowsets.begin(), _input_rowsets.end(),
1233
62
                                    [&](const RowsetSharedPtr& rowset) {
1234
62
                                        return tablet()->rowset_exists_unlocked(rowset);
1235
62
                                    });
1236
12
            }
1237
1238
12
            if (_tablet->tablet_state() == TABLET_RUNNING &&
1239
12
                merged_missed_rows_size != missed_rows_size && need_to_check_missed_rows) {
1240
0
                std::stringstream ss;
1241
0
                ss << "cumulative compaction: the merged rows(" << _stats.merged_rows
1242
0
                   << "), filtered rows(" << _stats.filtered_rows
1243
0
                   << ") is not equal to missed rows(" << missed_rows_size
1244
0
                   << ") in rowid conversion, tablet_id: " << _tablet->tablet_id()
1245
0
                   << ", table_id:" << _tablet->table_id();
1246
0
                if (missed_rows_size == 0) {
1247
0
                    ss << ", debug info: ";
1248
0
                    DeleteBitmap subset_map(_tablet->tablet_id());
1249
0
                    for (auto rs : _input_rowsets) {
1250
0
                        _tablet->tablet_meta()->delete_bitmap().subset(
1251
0
                                {rs->rowset_id(), 0, 0},
1252
0
                                {rs->rowset_id(), rs->num_segments(), version.second + 1},
1253
0
                                &subset_map);
1254
0
                        ss << "(rowset id: " << rs->rowset_id()
1255
0
                           << ", delete bitmap cardinality: " << subset_map.cardinality() << ")";
1256
0
                    }
1257
0
                    ss << ", version[0-" << version.second + 1 << "]";
1258
0
                }
1259
0
                std::string err_msg = fmt::format(
1260
0
                        "cumulative compaction: the merged rows({}), filtered rows({})"
1261
0
                        " is not equal to missed rows({}) in rowid conversion,"
1262
0
                        " tablet_id: {}, table_id:{}",
1263
0
                        _stats.merged_rows, _stats.filtered_rows, missed_rows_size,
1264
0
                        _tablet->tablet_id(), _tablet->table_id());
1265
0
                LOG(WARNING) << err_msg;
1266
0
                if (config::enable_mow_compaction_correctness_check_core) {
1267
0
                    CHECK(false) << err_msg;
1268
0
                } else if (config::enable_mow_compaction_correctness_check_fail) {
1269
0
                    return Status::InternalError<false>(err_msg);
1270
0
                } else {
1271
0
                    DCHECK(false) << err_msg;
1272
0
                }
1273
0
            }
1274
12
        }
1275
1276
12
        if (location_map) {
1277
0
            RETURN_IF_ERROR(tablet()->check_rowid_conversion(_output_rowset, *location_map));
1278
0
            location_map->clear();
1279
0
        }
1280
1281
12
        {
1282
12
            std::lock_guard<std::mutex> wrlock_(tablet()->get_rowset_update_lock());
1283
12
            std::lock_guard<std::shared_mutex> wrlock(_tablet->get_header_lock());
1284
12
            SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD);
1285
1286
            // Here we will calculate all the rowsets delete bitmaps which are committed but not published to reduce the calculation pressure
1287
            // of publish phase.
1288
            // All rowsets which need to recalculate have been published so we don't need to acquire lock.
1289
            // Step1: collect this tablet's all committed rowsets' delete bitmaps
1290
12
            CommitTabletTxnInfoVec commit_tablet_txn_info_vec {};
1291
12
            _engine.txn_manager()->get_all_commit_tablet_txn_info_by_tablet(
1292
12
                    *tablet(), &commit_tablet_txn_info_vec);
1293
1294
            // Step2: calculate all rowsets' delete bitmaps which are published during compaction.
1295
12
            for (auto& it : commit_tablet_txn_info_vec) {
1296
0
                if (!_check_if_includes_input_rowsets(it.rowset_ids)) {
1297
                    // When calculating the delete bitmap of all committed rowsets relative to the compaction,
1298
                    // there may be cases where the compacted rowsets are newer than the committed rowsets.
1299
                    // At this time, row number conversion cannot be performed, otherwise data will be missing.
1300
                    // Therefore, we need to check if every committed rowset has calculated delete bitmap for
1301
                    // all compaction input rowsets.
1302
0
                    continue;
1303
0
                }
1304
0
                DeleteBitmap txn_output_delete_bitmap(_tablet->tablet_id());
1305
0
                tablet()->calc_compaction_output_rowset_delete_bitmap(
1306
0
                        _input_rowsets, *_rowid_conversion, 0, UINT64_MAX, missed_rows.get(),
1307
0
                        location_map.get(), *it.delete_bitmap.get(), &txn_output_delete_bitmap);
1308
0
                if (config::enable_merge_on_write_correctness_check) {
1309
0
                    RowsetIdUnorderedSet rowsetids;
1310
0
                    rowsetids.insert(_output_rowset->rowset_id());
1311
0
                    _tablet->add_sentinel_mark_to_delete_bitmap(&txn_output_delete_bitmap,
1312
0
                                                                rowsetids);
1313
0
                }
1314
0
                it.delete_bitmap->merge(txn_output_delete_bitmap);
1315
                // Step3: write back updated delete bitmap and tablet info.
1316
0
                it.rowset_ids.insert(_output_rowset->rowset_id());
1317
0
                _engine.txn_manager()->set_txn_related_delete_bitmap(
1318
0
                        it.partition_id, it.transaction_id, _tablet->tablet_id(),
1319
0
                        tablet()->tablet_uid(), true, it.delete_bitmap, it.rowset_ids,
1320
0
                        it.partial_update_info);
1321
0
            }
1322
1323
            // Convert the delete bitmap of the input rowsets to output rowset for
1324
            // incremental data.
1325
12
            tablet()->calc_compaction_output_rowset_delete_bitmap(
1326
12
                    _input_rowsets, *_rowid_conversion, version.second, UINT64_MAX,
1327
12
                    missed_rows.get(), location_map.get(), _tablet->tablet_meta()->delete_bitmap(),
1328
12
                    &output_rowset_delete_bitmap);
1329
1330
12
            if (location_map) {
1331
0
                RETURN_IF_ERROR(tablet()->check_rowid_conversion(_output_rowset, *location_map));
1332
0
            }
1333
1334
12
            tablet()->merge_delete_bitmap(output_rowset_delete_bitmap);
1335
12
            RETURN_IF_ERROR(tablet()->modify_rowsets(output_rowsets, _input_rowsets, true));
1336
12
        }
1337
26
    } else {
1338
26
        std::lock_guard<std::shared_mutex> wrlock(_tablet->get_header_lock());
1339
26
        SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD);
1340
26
        RETURN_IF_ERROR(tablet()->modify_rowsets(output_rowsets, _input_rowsets, true));
1341
26
    }
1342
1343
38
    if (config::tablet_rowset_stale_sweep_by_size &&
1344
38
        _tablet->tablet_meta()->all_stale_rs_metas().size() >=
1345
0
                config::tablet_rowset_stale_sweep_threshold_size) {
1346
0
        tablet()->delete_expired_stale_rowset();
1347
0
    }
1348
1349
38
    int64_t cur_max_version = 0;
1350
38
    {
1351
38
        std::shared_lock rlock(_tablet->get_header_lock());
1352
38
        cur_max_version = _tablet->max_version_unlocked();
1353
38
        tablet()->save_meta();
1354
38
    }
1355
38
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1356
38
        _tablet->enable_unique_key_merge_on_write()) {
1357
12
        auto st = TabletMetaManager::remove_old_version_delete_bitmap(
1358
12
                tablet()->data_dir(), _tablet->tablet_id(), cur_max_version);
1359
12
        if (!st.ok()) {
1360
0
            LOG(WARNING) << "failed to remove old version delete bitmap, st: " << st;
1361
0
        }
1362
12
    }
1363
38
    DBUG_EXECUTE_IF("CumulativeCompaction.modify_rowsets.delete_expired_stale_rowset",
1364
38
                    { tablet()->delete_expired_stale_rowset(); });
1365
38
    _tablet->prefill_dbm_agg_cache_after_compaction(_output_rowset);
1366
38
    return Status::OK();
1367
38
}
1368
1369
bool CompactionMixin::_check_if_includes_input_rowsets(
1370
0
        const RowsetIdUnorderedSet& commit_rowset_ids_set) const {
1371
0
    std::vector<RowsetId> commit_rowset_ids {};
1372
0
    commit_rowset_ids.insert(commit_rowset_ids.end(), commit_rowset_ids_set.begin(),
1373
0
                             commit_rowset_ids_set.end());
1374
0
    std::sort(commit_rowset_ids.begin(), commit_rowset_ids.end());
1375
0
    std::vector<RowsetId> input_rowset_ids {};
1376
0
    for (const auto& rowset : _input_rowsets) {
1377
0
        input_rowset_ids.emplace_back(rowset->rowset_meta()->rowset_id());
1378
0
    }
1379
0
    std::sort(input_rowset_ids.begin(), input_rowset_ids.end());
1380
0
    return std::includes(commit_rowset_ids.begin(), commit_rowset_ids.end(),
1381
0
                         input_rowset_ids.begin(), input_rowset_ids.end());
1382
0
}
1383
1384
30
void CompactionMixin::update_compaction_level() {
1385
30
    auto* cumu_policy = tablet()->cumulative_compaction_policy();
1386
30
    if (cumu_policy && cumu_policy->name() == CUMULATIVE_TIME_SERIES_POLICY) {
1387
0
        int64_t compaction_level =
1388
0
                cumu_policy->get_compaction_level(tablet(), _input_rowsets, _output_rowset);
1389
0
        _output_rowset->rowset_meta()->set_compaction_level(compaction_level);
1390
0
    }
1391
30
}
1392
1393
9.68k
Status Compaction::check_correctness() {
1394
    // 1. check row number
1395
9.68k
    if (_input_row_num != _output_rowset->num_rows() + _stats.merged_rows + _stats.filtered_rows) {
1396
0
        return Status::Error<CHECK_LINES_ERROR>(
1397
0
                "row_num does not match between cumulative input and output! tablet={}, "
1398
0
                "input_row_num={}, merged_row_num={}, filtered_row_num={}, output_row_num={}",
1399
0
                _tablet->tablet_id(), _input_row_num, _stats.merged_rows, _stats.filtered_rows,
1400
0
                _output_rowset->num_rows());
1401
0
    }
1402
    // 2. check variant column path stats
1403
9.68k
    RETURN_IF_ERROR(vectorized::schema_util::VariantCompactionUtil::check_path_stats(
1404
9.68k
            _input_rowsets, _output_rowset, _tablet));
1405
9.68k
    return Status::OK();
1406
9.68k
}
1407
1408
98
int64_t CompactionMixin::get_compaction_permits() {
1409
98
    int64_t permits = 0;
1410
908
    for (auto&& rowset : _input_rowsets) {
1411
908
        permits += rowset->rowset_meta()->get_compaction_score();
1412
908
    }
1413
98
    return permits;
1414
98
}
1415
1416
28
int64_t CompactionMixin::calc_input_rowsets_total_size() const {
1417
28
    int64_t input_rowsets_total_size = 0;
1418
94
    for (const auto& rowset : _input_rowsets) {
1419
94
        const auto& rowset_meta = rowset->rowset_meta();
1420
94
        auto total_size = rowset_meta->total_disk_size();
1421
94
        input_rowsets_total_size += total_size;
1422
94
    }
1423
28
    return input_rowsets_total_size;
1424
28
}
1425
1426
28
int64_t CompactionMixin::calc_input_rowsets_row_num() const {
1427
28
    int64_t input_rowsets_row_num = 0;
1428
94
    for (const auto& rowset : _input_rowsets) {
1429
94
        const auto& rowset_meta = rowset->rowset_meta();
1430
94
        auto total_size = rowset_meta->total_disk_size();
1431
94
        input_rowsets_row_num += total_size;
1432
94
    }
1433
28
    return input_rowsets_row_num;
1434
28
}
1435
1436
9.49k
void Compaction::_load_segment_to_cache() {
1437
    // Load new rowset's segments to cache.
1438
9.49k
    SegmentCacheHandle handle;
1439
9.49k
    auto st = SegmentLoader::instance()->load_segments(
1440
9.49k
            std::static_pointer_cast<BetaRowset>(_output_rowset), &handle, true);
1441
9.49k
    if (!st.ok()) {
1442
0
        LOG(WARNING) << "failed to load segment to cache! output rowset version="
1443
0
                     << _output_rowset->start_version() << "-" << _output_rowset->end_version()
1444
0
                     << ".";
1445
0
    }
1446
9.49k
}
1447
1448
9.53k
Status CloudCompactionMixin::build_basic_info() {
1449
9.53k
    _output_version =
1450
9.53k
            Version(_input_rowsets.front()->start_version(), _input_rowsets.back()->end_version());
1451
1452
9.53k
    _newest_write_timestamp = _input_rowsets.back()->newest_write_timestamp();
1453
1454
9.53k
    std::vector<RowsetMetaSharedPtr> rowset_metas(_input_rowsets.size());
1455
9.53k
    std::transform(_input_rowsets.begin(), _input_rowsets.end(), rowset_metas.begin(),
1456
73.2k
                   [](const RowsetSharedPtr& rowset) { return rowset->rowset_meta(); });
1457
9.53k
    if (is_index_change_compaction()) {
1458
493
        RETURN_IF_ERROR(rebuild_tablet_schema());
1459
9.04k
    } else {
1460
9.04k
        _cur_tablet_schema = _tablet->tablet_schema_with_merged_max_schema_version(rowset_metas);
1461
9.04k
    }
1462
1463
    // if enable_vertical_compact_variant_subcolumns is true, we need to compact the variant subcolumns in seperate column groups
1464
    // so get_extended_compaction_schema will extended the schema for variant columns
1465
9.58k
    if (_enable_vertical_compact_variant_subcolumns) {
1466
9.58k
        RETURN_IF_ERROR(
1467
9.58k
                vectorized::schema_util::VariantCompactionUtil::get_extended_compaction_schema(
1468
9.58k
                        _input_rowsets, _cur_tablet_schema));
1469
9.58k
    }
1470
9.53k
    return Status::OK();
1471
9.53k
}
1472
1473
9.46k
int64_t CloudCompactionMixin::get_compaction_permits() {
1474
9.46k
    int64_t permits = 0;
1475
73.5k
    for (auto&& rowset : _input_rowsets) {
1476
73.5k
        permits += rowset->rowset_meta()->get_compaction_score();
1477
73.5k
    }
1478
9.46k
    return permits;
1479
9.46k
}
1480
1481
CloudCompactionMixin::CloudCompactionMixin(CloudStorageEngine& engine, CloudTabletSPtr tablet,
1482
                                           const std::string& label)
1483
87.0k
        : Compaction(tablet, label), _engine(engine) {
1484
87.0k
    auto uuid = UUIDGenerator::instance()->next_uuid();
1485
87.0k
    std::stringstream ss;
1486
87.0k
    ss << uuid;
1487
87.0k
    _uuid = ss.str();
1488
87.0k
}
1489
1490
9.59k
Status CloudCompactionMixin::execute_compact_impl(int64_t permits) {
1491
9.59k
    OlapStopWatch watch;
1492
1493
9.59k
    RETURN_IF_ERROR(build_basic_info());
1494
1495
9.59k
    LOG(INFO) << "start " << compaction_name() << ". tablet=" << _tablet->tablet_id()
1496
9.59k
              << ", output_version=" << _output_version << ", permits: " << permits;
1497
1498
9.59k
    RETURN_IF_ERROR(merge_input_rowsets());
1499
1500
9.59k
    DBUG_EXECUTE_IF("CloudFullCompaction::modify_rowsets.wrong_rowset_id", {
1501
9.59k
        DCHECK(compaction_type() == ReaderType::READER_FULL_COMPACTION);
1502
9.59k
        RowsetId id;
1503
9.59k
        id.version = 2;
1504
9.59k
        id.hi = _output_rowset->rowset_meta()->rowset_id().hi + ((int64_t)(1) << 56);
1505
9.59k
        id.mi = _output_rowset->rowset_meta()->rowset_id().mi;
1506
9.59k
        id.lo = _output_rowset->rowset_meta()->rowset_id().lo;
1507
9.59k
        _output_rowset->rowset_meta()->set_rowset_id(id);
1508
9.59k
        LOG(INFO) << "[Debug wrong rowset id]:"
1509
9.59k
                  << _output_rowset->rowset_meta()->rowset_id().to_string();
1510
9.59k
    })
1511
1512
    // Currently, updates are only made in the time_series.
1513
9.59k
    update_compaction_level();
1514
1515
9.59k
    RETURN_IF_ERROR(_engine.meta_mgr().commit_rowset(*_output_rowset->rowset_meta().get(), _uuid));
1516
1517
    // 4. modify rowsets in memory
1518
9.59k
    RETURN_IF_ERROR(modify_rowsets());
1519
1520
    // update compaction status data
1521
9.48k
    auto tablet = std::static_pointer_cast<CloudTablet>(_tablet);
1522
9.48k
    tablet->local_read_time_us.fetch_add(_stats.cloud_local_read_time);
1523
9.48k
    tablet->remote_read_time_us.fetch_add(_stats.cloud_remote_read_time);
1524
9.48k
    tablet->exec_compaction_time_us.fetch_add(watch.get_elapse_time_us());
1525
1526
9.48k
    return Status::OK();
1527
9.59k
}
1528
1529
9.43k
int64_t CloudCompactionMixin::initiator() const {
1530
9.43k
    return HashUtil::hash64(_uuid.data(), _uuid.size(), 0) & std::numeric_limits<int64_t>::max();
1531
9.43k
}
1532
1533
namespace cloud {
1534
size_t truncate_rowsets_by_txn_size(std::vector<RowsetSharedPtr>& rowsets, int64_t& kept_size_bytes,
1535
9.82k
                                    int64_t& truncated_size_bytes) {
1536
9.82k
    if (rowsets.empty()) {
1537
1
        kept_size_bytes = 0;
1538
1
        truncated_size_bytes = 0;
1539
1
        return 0;
1540
1
    }
1541
1542
9.81k
    int64_t max_size = config::compaction_txn_max_size_bytes;
1543
9.81k
    int64_t cumulative_meta_size = 0;
1544
9.81k
    size_t keep_count = 0;
1545
1546
86.1k
    for (size_t i = 0; i < rowsets.size(); ++i) {
1547
76.3k
        const auto& rs = rowsets[i];
1548
1549
        // Estimate rowset meta size using doris_rowset_meta_to_cloud
1550
76.3k
        auto cloud_meta = cloud::doris_rowset_meta_to_cloud(rs->rowset_meta()->get_rowset_pb(true));
1551
76.3k
        int64_t rowset_meta_size = cloud_meta.ByteSizeLong();
1552
1553
76.3k
        cumulative_meta_size += rowset_meta_size;
1554
1555
76.3k
        if (keep_count > 0 && cumulative_meta_size > max_size) {
1556
            // Rollback and stop
1557
4
            cumulative_meta_size -= rowset_meta_size;
1558
4
            break;
1559
4
        }
1560
1561
76.3k
        keep_count++;
1562
76.3k
    }
1563
1564
    // Ensure at least 1 rowset is kept
1565
9.81k
    if (keep_count == 0) {
1566
0
        keep_count = 1;
1567
        // Recalculate size for the first rowset
1568
0
        const auto& rs = rowsets[0];
1569
0
        auto cloud_meta = cloud::doris_rowset_meta_to_cloud(rs->rowset_meta()->get_rowset_pb());
1570
0
        cumulative_meta_size = cloud_meta.ByteSizeLong();
1571
0
    }
1572
1573
    // Calculate truncated size
1574
9.81k
    int64_t truncated_total_size = 0;
1575
9.81k
    size_t truncated_count = rowsets.size() - keep_count;
1576
9.81k
    if (truncated_count > 0) {
1577
35
        for (size_t i = keep_count; i < rowsets.size(); ++i) {
1578
31
            auto cloud_meta =
1579
31
                    cloud::doris_rowset_meta_to_cloud(rowsets[i]->rowset_meta()->get_rowset_pb());
1580
31
            truncated_total_size += cloud_meta.ByteSizeLong();
1581
31
        }
1582
4
        rowsets.resize(keep_count);
1583
4
    }
1584
1585
9.81k
    kept_size_bytes = cumulative_meta_size;
1586
9.81k
    truncated_size_bytes = truncated_total_size;
1587
9.81k
    return truncated_count;
1588
9.82k
}
1589
} // namespace cloud
1590
1591
9.31k
size_t CloudCompactionMixin::apply_txn_size_truncation_and_log(const std::string& compaction_name) {
1592
9.31k
    if (_input_rowsets.empty()) {
1593
1
        return 0;
1594
1
    }
1595
1596
9.31k
    int64_t original_count = _input_rowsets.size();
1597
9.31k
    int64_t original_start_version = _input_rowsets.front()->start_version();
1598
9.31k
    int64_t original_end_version = _input_rowsets.back()->end_version();
1599
1600
9.31k
    int64_t final_size = 0;
1601
9.31k
    int64_t truncated_size = 0;
1602
9.31k
    size_t truncated_count =
1603
9.31k
            cloud::truncate_rowsets_by_txn_size(_input_rowsets, final_size, truncated_size);
1604
1605
9.31k
    if (truncated_count > 0) {
1606
2
        int64_t original_size = final_size + truncated_size;
1607
2
        LOG(INFO) << compaction_name << " txn size estimation truncate"
1608
2
                  << ", tablet_id=" << _tablet->tablet_id() << ", original_version_range=["
1609
2
                  << original_start_version << "-" << original_end_version
1610
2
                  << "], final_version_range=[" << _input_rowsets.front()->start_version() << "-"
1611
2
                  << _input_rowsets.back()->end_version()
1612
2
                  << "], original_rowset_count=" << original_count
1613
2
                  << ", final_rowset_count=" << _input_rowsets.size()
1614
2
                  << ", truncated_rowset_count=" << truncated_count
1615
2
                  << ", original_size_bytes=" << original_size
1616
2
                  << ", final_size_bytes=" << final_size
1617
2
                  << ", truncated_size_bytes=" << truncated_size
1618
2
                  << ", threshold_bytes=" << config::compaction_txn_max_size_bytes;
1619
2
    }
1620
1621
9.31k
    return truncated_count;
1622
9.31k
}
1623
1624
9.45k
Status CloudCompactionMixin::execute_compact() {
1625
9.45k
    TEST_INJECTION_POINT("Compaction::do_compaction");
1626
9.45k
    int64_t permits = get_compaction_permits();
1627
9.45k
    HANDLE_EXCEPTION_IF_CATCH_EXCEPTION(
1628
9.45k
            execute_compact_impl(permits), [&](const doris::Exception& ex) {
1629
9.45k
                auto st = garbage_collection();
1630
9.45k
                if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1631
9.45k
                    _tablet->enable_unique_key_merge_on_write() && !st.ok()) {
1632
                    // if compaction fail, be will try to abort compaction, and delete bitmap lock
1633
                    // will release if abort job successfully, but if abort failed, delete bitmap
1634
                    // lock will not release, in this situation, be need to send this rpc to ms
1635
                    // to try to release delete bitmap lock.
1636
9.45k
                    _engine.meta_mgr().remove_delete_bitmap_update_lock(
1637
9.45k
                            _tablet->table_id(), COMPACTION_DELETE_BITMAP_LOCK_ID, initiator(),
1638
9.45k
                            _tablet->tablet_id());
1639
9.45k
                }
1640
9.53k
            });
1641
1642
9.53k
    DorisMetrics::instance()->remote_compaction_read_rows_total->increment(_input_row_num);
1643
9.53k
    DorisMetrics::instance()->remote_compaction_write_rows_total->increment(
1644
9.53k
            _output_rowset->num_rows());
1645
9.53k
    DorisMetrics::instance()->remote_compaction_write_bytes_total->increment(
1646
9.53k
            _output_rowset->total_disk_size());
1647
1648
9.53k
    _load_segment_to_cache();
1649
9.53k
    return Status::OK();
1650
9.45k
}
1651
1652
0
Status CloudCompactionMixin::modify_rowsets() {
1653
0
    return Status::OK();
1654
0
}
1655
1656
9.65k
Status CloudCompactionMixin::set_storage_resource_from_input_rowsets(RowsetWriterContext& ctx) {
1657
    // Set storage resource from input rowsets by iterating backwards to find the first rowset
1658
    // with non-empty resource_id. This handles two scenarios:
1659
    // 1. Hole rowsets compaction: Multiple hole rowsets may lack storage resource.
1660
    //    Example: [0-1, 2-2, 3-3, 4-4, 5-5] where 2-5 are hole rowsets.
1661
    //    If 0-1 lacks resource_id, then 2-5 also lack resource_id.
1662
    // 2. Schema change: New tablet may have later version empty rowsets without resource_id,
1663
    //    but middle rowsets get resource_id after historical rowsets are converted.
1664
    //    We iterate backwards to find the most recent rowset with valid resource_id.
1665
1666
9.66k
    for (const auto& rowset : std::ranges::reverse_view(_input_rowsets)) {
1667
9.66k
        const auto& resource_id = rowset->rowset_meta()->resource_id();
1668
1669
9.67k
        if (!resource_id.empty()) {
1670
9.67k
            ctx.storage_resource = *DORIS_TRY(rowset->rowset_meta()->remote_storage_resource());
1671
9.67k
            return Status::OK();
1672
9.67k
        }
1673
1674
        // Validate that non-empty rowsets (num_segments > 0) must have valid resource_id
1675
        // Only hole rowsets or empty rowsets are allowed to have empty resource_id
1676
18.4E
        if (rowset->num_segments() > 0) {
1677
0
            auto error_msg = fmt::format(
1678
0
                    "Non-empty rowset must have valid resource_id. "
1679
0
                    "rowset_id={}, version=[{}-{}], is_hole_rowset={}, num_segments={}, "
1680
0
                    "tablet_id={}, table_id={}",
1681
0
                    rowset->rowset_id().to_string(), rowset->start_version(), rowset->end_version(),
1682
0
                    rowset->is_hole_rowset(), rowset->num_segments(), _tablet->tablet_id(),
1683
0
                    _tablet->table_id());
1684
1685
0
#ifndef BE_TEST
1686
0
            DCHECK(false) << error_msg;
1687
0
#endif
1688
1689
0
            return Status::InternalError<false>(error_msg);
1690
0
        }
1691
18.4E
    }
1692
1693
18.4E
    return Status::OK();
1694
9.65k
}
1695
1696
9.65k
Status CloudCompactionMixin::construct_output_rowset_writer(RowsetWriterContext& ctx) {
1697
    // only do index compaction for dup_keys and unique_keys with mow enabled
1698
9.65k
    if (_enable_inverted_index_compaction && (((_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1699
9.15k
                                                _tablet->enable_unique_key_merge_on_write()) ||
1700
9.15k
                                               _tablet->keys_type() == KeysType::DUP_KEYS))) {
1701
7.85k
        construct_index_compaction_columns(ctx);
1702
7.85k
    }
1703
1704
    // Use the storage resource of the previous rowset.
1705
9.65k
    RETURN_IF_ERROR(set_storage_resource_from_input_rowsets(ctx));
1706
1707
9.65k
    ctx.txn_id = boost::uuids::hash_value(UUIDGenerator::instance()->next_uuid()) &
1708
9.65k
                 std::numeric_limits<int64_t>::max(); // MUST be positive
1709
9.65k
    ctx.txn_expiration = _expiration;
1710
1711
9.65k
    ctx.version = _output_version;
1712
9.65k
    ctx.rowset_state = VISIBLE;
1713
9.65k
    ctx.segments_overlap = NONOVERLAPPING;
1714
9.65k
    ctx.tablet_schema = _cur_tablet_schema;
1715
9.65k
    ctx.newest_write_timestamp = _newest_write_timestamp;
1716
9.65k
    ctx.write_type = DataWriteType::TYPE_COMPACTION;
1717
9.65k
    ctx.compaction_type = compaction_type();
1718
9.65k
    ctx.allow_packed_file = false;
1719
1720
    // We presume that the data involved in cumulative compaction is sufficiently 'hot'
1721
    // and should always be retained in the cache.
1722
    // TODO(gavin): Ensure that the retention of hot data is implemented with precision.
1723
1724
9.65k
    ctx.write_file_cache = should_cache_compaction_output();
1725
9.65k
    ctx.file_cache_ttl_sec = _tablet->ttl_seconds();
1726
9.65k
    ctx.approximate_bytes_to_write = _input_rowsets_total_size;
1727
9.65k
    ctx.tablet = _tablet;
1728
9.65k
    ctx.job_id = _uuid;
1729
1730
9.65k
    _output_rs_writer = DORIS_TRY(_tablet->create_rowset_writer(ctx, _is_vertical));
1731
9.65k
    RETURN_IF_ERROR(
1732
9.65k
            _engine.meta_mgr().prepare_rowset(*_output_rs_writer->rowset_meta().get(), _uuid));
1733
9.65k
    return Status::OK();
1734
9.65k
}
1735
1736
110
Status CloudCompactionMixin::garbage_collection() {
1737
110
    if (!config::enable_file_cache) {
1738
0
        return Status::OK();
1739
0
    }
1740
110
    if (_output_rs_writer) {
1741
110
        auto* beta_rowset_writer = dynamic_cast<BaseBetaRowsetWriter*>(_output_rs_writer.get());
1742
110
        DCHECK(beta_rowset_writer);
1743
110
        for (const auto& [_, file_writer] : beta_rowset_writer->get_file_writers()) {
1744
96
            auto file_key = io::BlockFileCache::hash(file_writer->path().filename().native());
1745
96
            auto* file_cache = io::FileCacheFactory::instance()->get_by_path(file_key);
1746
96
            file_cache->remove_if_cached_async(file_key);
1747
96
        }
1748
110
        for (const auto& [_, index_writer] : beta_rowset_writer->index_file_writers()) {
1749
0
            for (const auto& file_name : index_writer->get_index_file_names()) {
1750
0
                auto file_key = io::BlockFileCache::hash(file_name);
1751
0
                auto* file_cache = io::FileCacheFactory::instance()->get_by_path(file_key);
1752
0
                file_cache->remove_if_cached_async(file_key);
1753
0
            }
1754
0
        }
1755
110
    }
1756
110
    return Status::OK();
1757
110
}
1758
1759
9.62k
void CloudCompactionMixin::update_compaction_level() {
1760
    // for index change compaction, compaction level should not changed.
1761
    // because input rowset num is 1.
1762
9.62k
    if (is_index_change_compaction()) {
1763
495
        DCHECK(_input_rowsets.size() == 1);
1764
495
        _output_rowset->rowset_meta()->set_compaction_level(
1765
495
                _input_rowsets.back()->rowset_meta()->compaction_level());
1766
9.12k
    } else {
1767
9.12k
        auto compaction_policy = _tablet->tablet_meta()->compaction_policy();
1768
9.12k
        auto cumu_policy = _engine.cumu_compaction_policy(compaction_policy);
1769
9.17k
        if (cumu_policy && cumu_policy->name() == CUMULATIVE_TIME_SERIES_POLICY) {
1770
3
            int64_t compaction_level = cumu_policy->get_compaction_level(
1771
3
                    cloud_tablet(), _input_rowsets, _output_rowset);
1772
3
            _output_rowset->rowset_meta()->set_compaction_level(compaction_level);
1773
3
        }
1774
9.12k
    }
1775
9.62k
}
1776
1777
// should skip hole rowsets, ortherwise the count will be wrong in ms
1778
9.56k
int64_t CloudCompactionMixin::num_input_rowsets() const {
1779
9.56k
    int64_t count = 0;
1780
74.4k
    for (const auto& r : _input_rowsets) {
1781
74.4k
        if (!r->is_hole_rowset()) {
1782
74.3k
            count++;
1783
74.3k
        }
1784
74.4k
    }
1785
9.56k
    return count;
1786
9.56k
}
1787
1788
9.68k
bool CloudCompactionMixin::should_cache_compaction_output() {
1789
9.68k
    if (compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
1790
9.51k
        return true;
1791
9.51k
    }
1792
1793
169
    if (compaction_type() == ReaderType::READER_BASE_COMPACTION) {
1794
85
        double input_rowsets_hit_cache_ratio = 0.0;
1795
1796
85
        int64_t _input_rowsets_cached_size =
1797
85
                _input_rowsets_cached_data_size + _input_rowsets_cached_index_size;
1798
85
        if (_input_rowsets_total_size > 0) {
1799
71
            input_rowsets_hit_cache_ratio =
1800
71
                    double(_input_rowsets_cached_size) / double(_input_rowsets_total_size);
1801
71
        }
1802
1803
85
        LOG(INFO) << "CloudBaseCompaction should_cache_compaction_output"
1804
85
                  << ", tablet_id=" << _tablet->tablet_id()
1805
85
                  << ", input_rowsets_hit_cache_ratio=" << input_rowsets_hit_cache_ratio
1806
85
                  << ", _input_rowsets_cached_size=" << _input_rowsets_cached_size
1807
85
                  << ", _input_rowsets_total_size=" << _input_rowsets_total_size
1808
85
                  << ", enable_file_cache_keep_base_compaction_output="
1809
85
                  << config::enable_file_cache_keep_base_compaction_output
1810
85
                  << ", file_cache_keep_base_compaction_output_min_hit_ratio="
1811
85
                  << config::file_cache_keep_base_compaction_output_min_hit_ratio;
1812
1813
85
        if (config::enable_file_cache_keep_base_compaction_output) {
1814
0
            return true;
1815
0
        }
1816
1817
85
        if (input_rowsets_hit_cache_ratio >
1818
85
            config::file_cache_keep_base_compaction_output_min_hit_ratio) {
1819
63
            return true;
1820
63
        }
1821
85
    }
1822
106
    return false;
1823
169
}
1824
1825
#include "common/compile_check_end.h"
1826
} // namespace doris