Coverage Report

Created: 2025-05-09 17:05

/root/doris/be/src/olap/compaction.cpp
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "olap/compaction.h"
19
20
#include <fmt/format.h>
21
#include <gen_cpp/olap_file.pb.h>
22
#include <glog/logging.h>
23
24
#include <algorithm>
25
#include <cstdlib>
26
#include <list>
27
#include <map>
28
#include <memory>
29
#include <mutex>
30
#include <nlohmann/json.hpp>
31
#include <numeric>
32
#include <ostream>
33
#include <set>
34
#include <shared_mutex>
35
#include <utility>
36
37
#include "common/config.h"
38
#include "common/status.h"
39
#include "io/fs/file_system.h"
40
#include "io/fs/file_writer.h"
41
#include "io/fs/remote_file_system.h"
42
#include "io/io_common.h"
43
#include "olap/cumulative_compaction_policy.h"
44
#include "olap/cumulative_compaction_time_series_policy.h"
45
#include "olap/data_dir.h"
46
#include "olap/olap_common.h"
47
#include "olap/olap_define.h"
48
#include "olap/rowset/beta_rowset.h"
49
#include "olap/rowset/rowset.h"
50
#include "olap/rowset/rowset_meta.h"
51
#include "olap/rowset/rowset_writer.h"
52
#include "olap/rowset/rowset_writer_context.h"
53
#include "olap/rowset/segment_v2/inverted_index_compaction.h"
54
#include "olap/rowset/segment_v2/inverted_index_file_reader.h"
55
#include "olap/rowset/segment_v2/inverted_index_file_writer.h"
56
#include "olap/rowset/segment_v2/inverted_index_fs_directory.h"
57
#include "olap/storage_engine.h"
58
#include "olap/storage_policy.h"
59
#include "olap/tablet.h"
60
#include "olap/tablet_meta.h"
61
#include "olap/tablet_meta_manager.h"
62
#include "olap/task/engine_checksum_task.h"
63
#include "olap/txn_manager.h"
64
#include "olap/utils.h"
65
#include "runtime/memory/mem_tracker_limiter.h"
66
#include "runtime/thread_context.h"
67
#include "util/time.h"
68
#include "util/trace.h"
69
70
using std::vector;
71
72
namespace doris {
73
using namespace ErrorCode;
74
75
Compaction::Compaction(const TabletSharedPtr& tablet, const std::string& label)
76
        : _tablet(tablet),
77
          _input_rowsets_size(0),
78
          _input_row_num(0),
79
          _input_num_segments(0),
80
          _input_index_size(0),
81
1.61k
          _state(CompactionState::INITED) {
82
1.61k
    _mem_tracker = MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::COMPACTION, label);
83
1.61k
    init_profile(label);
84
1.61k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_mem_tracker);
85
1.61k
    _rowid_conversion = std::make_unique<RowIdConversion>();
86
1.61k
}
87
88
1.61k
Compaction::~Compaction() {
89
1.61k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_mem_tracker);
90
1.61k
    _output_rs_writer.reset();
91
1.61k
    _tablet.reset();
92
1.61k
    _input_rowsets.clear();
93
1.61k
    _output_rowset.reset();
94
1.61k
    _cur_tablet_schema.reset();
95
1.61k
    _rowid_conversion.reset();
96
1.61k
}
97
98
1.61k
void Compaction::init_profile(const std::string& label) {
99
1.61k
    _profile = std::make_unique<RuntimeProfile>(label);
100
101
1.61k
    _input_rowsets_data_size_counter =
102
1.61k
            ADD_COUNTER(_profile, "input_rowsets_data_size", TUnit::BYTES);
103
1.61k
    _input_rowsets_counter = ADD_COUNTER(_profile, "input_rowsets_count", TUnit::UNIT);
104
1.61k
    _input_row_num_counter = ADD_COUNTER(_profile, "input_row_num", TUnit::UNIT);
105
1.61k
    _input_segments_num_counter = ADD_COUNTER(_profile, "input_segments_num", TUnit::UNIT);
106
1.61k
    _merged_rows_counter = ADD_COUNTER(_profile, "merged_rows", TUnit::UNIT);
107
1.61k
    _filtered_rows_counter = ADD_COUNTER(_profile, "filtered_rows", TUnit::UNIT);
108
1.61k
    _output_rowset_data_size_counter =
109
1.61k
            ADD_COUNTER(_profile, "output_rowset_data_size", TUnit::BYTES);
110
1.61k
    _output_row_num_counter = ADD_COUNTER(_profile, "output_row_num", TUnit::UNIT);
111
1.61k
    _output_segments_num_counter = ADD_COUNTER(_profile, "output_segments_num", TUnit::UNIT);
112
1.61k
    _merge_rowsets_latency_timer = ADD_TIMER(_profile, "merge_rowsets_latency");
113
1.61k
}
114
115
328
Status Compaction::compact() {
116
328
    RETURN_IF_ERROR(prepare_compact());
117
324
    RETURN_IF_ERROR(execute_compact());
118
324
    return Status::OK();
119
324
}
120
121
1.36k
Status Compaction::execute_compact() {
122
1.36k
    Status st = execute_compact_impl();
123
1.36k
    if (!st.ok()) {
124
0
        gc_output_rowset();
125
0
    }
126
1.36k
    return st;
127
1.36k
}
128
129
1.36k
Status Compaction::do_compaction(int64_t permits) {
130
1.36k
    uint32_t checksum_before;
131
1.36k
    uint32_t checksum_after;
132
1.36k
    if (config::enable_compaction_checksum) {
133
0
        EngineChecksumTask checksum_task(_tablet->tablet_id(), _tablet->schema_hash(),
134
0
                                         _input_rowsets.back()->end_version(), &checksum_before);
135
0
        RETURN_IF_ERROR(checksum_task.execute());
136
0
    }
137
138
1.36k
    _tablet->data_dir()->disks_compaction_score_increment(permits);
139
1.36k
    _tablet->data_dir()->disks_compaction_num_increment(1);
140
1.36k
    Status st = do_compaction_impl(permits);
141
1.36k
    _tablet->data_dir()->disks_compaction_score_increment(-permits);
142
1.36k
    _tablet->data_dir()->disks_compaction_num_increment(-1);
143
144
1.36k
    if (config::enable_compaction_checksum) {
145
0
        EngineChecksumTask checksum_task(_tablet->tablet_id(), _tablet->schema_hash(),
146
0
                                         _input_rowsets.back()->end_version(), &checksum_after);
147
0
        RETURN_IF_ERROR(checksum_task.execute());
148
0
        if (checksum_before != checksum_after) {
149
0
            LOG(WARNING) << "Compaction tablet=" << _tablet->tablet_id()
150
0
                         << " checksum not consistent"
151
0
                         << ", before=" << checksum_before << ", checksum_after=" << checksum_after;
152
0
        }
153
0
    }
154
1.36k
    if (st.ok()) {
155
1.36k
        _load_segment_to_cache();
156
1.36k
    }
157
1.36k
    return st;
158
1.36k
}
159
160
799
bool Compaction::should_vertical_compaction() {
161
    // some conditions that not use vertical compaction
162
799
    if (!config::enable_vertical_compaction) {
163
0
        return false;
164
0
    }
165
799
    return true;
166
799
}
167
168
799
int64_t Compaction::get_avg_segment_rows() {
169
    // take care of empty rowset
170
    // input_rowsets_size is total disk_size of input_rowset, this size is the
171
    // final size after codec and compress, so expect dest segment file size
172
    // in disk is config::vertical_compaction_max_segment_size
173
799
    const auto& meta = _tablet->tablet_meta();
174
799
    if (meta->compaction_policy() == CUMULATIVE_TIME_SERIES_POLICY) {
175
1
        int64_t compaction_goal_size_mbytes = meta->time_series_compaction_goal_size_mbytes();
176
1
        return (compaction_goal_size_mbytes * 1024 * 1024 * 2) /
177
1
               (_input_rowsets_size / (_input_row_num + 1) + 1);
178
1
    }
179
798
    return config::vertical_compaction_max_segment_size /
180
798
           (_input_rowsets_size / (_input_row_num + 1) + 1);
181
799
}
182
183
6.94k
bool Compaction::is_rowset_tidy(std::string& pre_max_key, const RowsetSharedPtr& rhs) {
184
6.94k
    size_t min_tidy_size = config::ordered_data_compaction_min_segment_size;
185
6.94k
    if (rhs->num_segments() == 0) {
186
6.49k
        return true;
187
6.49k
    }
188
453
    if (rhs->is_segments_overlapping()) {
189
0
        return false;
190
0
    }
191
    // check segment size
192
453
    auto beta_rowset = reinterpret_cast<BetaRowset*>(rhs.get());
193
453
    std::vector<size_t> segments_size;
194
453
    RETURN_FALSE_IF_ERROR(beta_rowset->get_segments_size(&segments_size));
195
458
    for (auto segment_size : segments_size) {
196
        // is segment is too small, need to do compaction
197
458
        if (segment_size < min_tidy_size) {
198
448
            return false;
199
448
        }
200
458
    }
201
5
    std::string min_key;
202
5
    auto ret = rhs->first_key(&min_key);
203
5
    if (!ret) {
204
0
        return false;
205
0
    }
206
5
    if (min_key <= pre_max_key) {
207
0
        return false;
208
0
    }
209
5
    CHECK(rhs->last_key(&pre_max_key));
210
211
5
    return true;
212
5
}
213
214
568
Status Compaction::do_compact_ordered_rowsets() {
215
568
    build_basic_info();
216
568
    RowsetWriterContext ctx;
217
568
    RETURN_IF_ERROR(construct_output_rowset_writer(ctx));
218
219
568
    LOG(INFO) << "start to do ordered data compaction, tablet=" << _tablet->tablet_id()
220
568
              << ", output_version=" << _output_version;
221
    // link data to new rowset
222
568
    auto seg_id = 0;
223
568
    std::vector<KeyBoundsPB> segment_key_bounds;
224
5.49k
    for (auto rowset : _input_rowsets) {
225
5.49k
        RETURN_IF_ERROR(rowset->link_files_to(_tablet->tablet_path(),
226
5.49k
                                              _output_rs_writer->rowset_id(), seg_id));
227
5.49k
        seg_id += rowset->num_segments();
228
229
5.49k
        std::vector<KeyBoundsPB> key_bounds;
230
5.49k
        RETURN_IF_ERROR(rowset->get_segments_key_bounds(&key_bounds));
231
5.49k
        segment_key_bounds.insert(segment_key_bounds.end(), key_bounds.begin(), key_bounds.end());
232
5.49k
    }
233
    // build output rowset
234
568
    RowsetMetaSharedPtr rowset_meta = std::make_shared<RowsetMeta>();
235
568
    rowset_meta->set_num_rows(_input_row_num);
236
568
    rowset_meta->set_total_disk_size(_input_rowsets_size);
237
568
    rowset_meta->set_data_disk_size(_input_rowsets_size);
238
568
    rowset_meta->set_index_disk_size(_input_index_size);
239
568
    rowset_meta->set_empty(_input_row_num == 0);
240
568
    rowset_meta->set_num_segments(_input_num_segments);
241
568
    rowset_meta->set_segments_overlap(NONOVERLAPPING);
242
568
    rowset_meta->set_rowset_state(VISIBLE);
243
244
568
    rowset_meta->set_segments_key_bounds(segment_key_bounds);
245
568
    _output_rowset = _output_rs_writer->manual_build(rowset_meta);
246
568
    return Status::OK();
247
568
}
248
249
1.36k
void Compaction::build_basic_info() {
250
17.9k
    for (auto& rowset : _input_rowsets) {
251
17.9k
        _input_rowsets_size += rowset->data_disk_size();
252
17.9k
        _input_index_size += rowset->index_disk_size();
253
17.9k
        _input_row_num += rowset->num_rows();
254
17.9k
        _input_num_segments += rowset->num_segments();
255
17.9k
    }
256
1.36k
    COUNTER_UPDATE(_input_rowsets_data_size_counter, _input_rowsets_size);
257
1.36k
    COUNTER_UPDATE(_input_row_num_counter, _input_row_num);
258
1.36k
    COUNTER_UPDATE(_input_segments_num_counter, _input_num_segments);
259
260
1.36k
    _output_version =
261
1.36k
            Version(_input_rowsets.front()->start_version(), _input_rowsets.back()->end_version());
262
263
1.36k
    _newest_write_timestamp = _input_rowsets.back()->newest_write_timestamp();
264
265
1.36k
    std::vector<RowsetMetaSharedPtr> rowset_metas(_input_rowsets.size());
266
1.36k
    std::transform(_input_rowsets.begin(), _input_rowsets.end(), rowset_metas.begin(),
267
17.9k
                   [](const RowsetSharedPtr& rowset) { return rowset->rowset_meta(); });
268
1.36k
    _cur_tablet_schema = _tablet->tablet_schema_with_merged_max_schema_version(rowset_metas);
269
1.36k
}
270
271
1.36k
bool Compaction::handle_ordered_data_compaction() {
272
1.36k
    if (!config::enable_ordered_data_compaction) {
273
0
        return false;
274
0
    }
275
1.36k
    if (compaction_type() == ReaderType::READER_COLD_DATA_COMPACTION ||
276
1.36k
        compaction_type() == ReaderType::READER_FULL_COMPACTION) {
277
        // The remote file system and full compaction does not support to link files.
278
41
        return false;
279
41
    }
280
1.32k
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
281
1.32k
        _tablet->enable_unique_key_merge_on_write()) {
282
361
        return false;
283
361
    }
284
285
965
    if (_tablet->tablet_meta()->tablet_schema()->skip_write_index_on_load()) {
286
        // Expected to create index through normal compaction
287
0
        return false;
288
0
    }
289
290
    // check delete version: if compaction type is base compaction and
291
    // has a delete version, use original compaction
292
965
    if (compaction_type() == ReaderType::READER_BASE_COMPACTION) {
293
34
        for (auto& rowset : _input_rowsets) {
294
34
            if (rowset->rowset_meta()->has_delete_predicate()) {
295
12
                return false;
296
12
            }
297
34
        }
298
12
    }
299
300
    // check if rowsets are tidy so we can just modify meta and do link
301
    // files to handle compaction
302
953
    auto input_size = _input_rowsets.size();
303
953
    std::string pre_max_key;
304
7.44k
    for (auto i = 0; i < input_size; ++i) {
305
6.94k
        if (!is_rowset_tidy(pre_max_key, _input_rowsets[i])) {
306
448
            if (i <= input_size / 2) {
307
385
                return false;
308
385
            } else {
309
63
                _input_rowsets.resize(i);
310
63
                break;
311
63
            }
312
448
        }
313
6.94k
    }
314
    // most rowset of current compaction is nonoverlapping
315
    // just handle nonoverlappint rowsets
316
568
    auto st = do_compact_ordered_rowsets();
317
568
    if (!st.ok()) {
318
0
        LOG(WARNING) << "failed to compact ordered rowsets: " << st;
319
0
        _pending_rs_guard.drop();
320
0
    }
321
322
568
    return st.ok();
323
953
}
324
325
799
int64_t Compaction::merge_way_num() {
326
799
    int64_t way_num = 0;
327
12.4k
    for (auto&& rowset : _input_rowsets) {
328
12.4k
        way_num += rowset->rowset_meta()->get_merge_way_num();
329
12.4k
    }
330
331
799
    return way_num;
332
799
}
333
334
1.36k
Status Compaction::do_compaction_impl(int64_t permits) {
335
1.36k
    OlapStopWatch watch;
336
337
1.36k
    if (handle_ordered_data_compaction()) {
338
567
        RETURN_IF_ERROR(modify_rowsets());
339
340
567
        int64_t now = UnixMillis();
341
567
        if (compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
342
            // TIME_SERIES_POLICY, generating an empty rowset doesn't need to update the timestamp.
343
567
            if (!(_tablet->tablet_meta()->compaction_policy() == CUMULATIVE_TIME_SERIES_POLICY &&
344
567
                  _output_rowset->num_segments() == 0)) {
345
564
                _tablet->set_last_cumu_compaction_success_time(now);
346
564
            }
347
567
        } else if (compaction_type() == ReaderType::READER_BASE_COMPACTION) {
348
0
            _tablet->set_last_base_compaction_success_time(now);
349
0
        } else if (compaction_type() == ReaderType::READER_FULL_COMPACTION) {
350
0
            _tablet->set_last_full_compaction_success_time(now);
351
0
        }
352
567
        auto cumu_policy = _tablet->cumulative_compaction_policy();
353
567
        LOG(INFO) << "succeed to do ordered data " << compaction_name()
354
567
                  << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version
355
567
                  << ", disk=" << _tablet->data_dir()->path()
356
567
                  << ", segments=" << _input_num_segments << ", input_row_num=" << _input_row_num
357
567
                  << ", output_row_num=" << _output_rowset->num_rows()
358
567
                  << ", input_rowset_size=" << _input_rowsets_size
359
567
                  << ", output_rowset_size=" << _output_rowset->data_disk_size()
360
567
                  << ". elapsed time=" << watch.get_elapse_second()
361
567
                  << "s. cumulative_compaction_policy="
362
567
                  << (cumu_policy == nullptr ? "quick" : cumu_policy->name());
363
567
        return Status::OK();
364
567
    }
365
799
    build_basic_info();
366
367
799
    VLOG_DEBUG << "dump tablet schema: " << _cur_tablet_schema->dump_structure();
368
369
799
    LOG(INFO) << "start " << compaction_name() << ". tablet=" << _tablet->tablet_id()
370
799
              << ", output_version=" << _output_version << ", permits: " << permits;
371
799
    bool vertical_compaction = should_vertical_compaction();
372
799
    RowsetWriterContext ctx;
373
799
    RETURN_IF_ERROR(construct_input_rowset_readers());
374
799
    RETURN_IF_ERROR(construct_output_rowset_writer(ctx, vertical_compaction));
375
376
    // 2. write merged rows to output rowset
377
    // The test results show that merger is low-memory-footprint, there is no need to tracker its mem pool
378
799
    Merger::Statistics stats;
379
    // if ctx.columns_to_do_index_compaction.size() > 0, it means we need to do inverted index compaction.
380
    // the row ID conversion matrix needs to be used for inverted index compaction.
381
799
    if (!ctx.columns_to_do_index_compaction.empty() ||
382
799
        (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
383
799
         _tablet->enable_unique_key_merge_on_write())) {
384
380
        stats.rowid_conversion = _rowid_conversion.get();
385
380
    }
386
799
    int64_t way_num = merge_way_num();
387
388
799
    Status res;
389
799
    {
390
799
        SCOPED_TIMER(_merge_rowsets_latency_timer);
391
799
        if (vertical_compaction) {
392
799
            res = Merger::vertical_merge_rowsets(_tablet, compaction_type(), _cur_tablet_schema,
393
799
                                                 _input_rs_readers, _output_rs_writer.get(),
394
799
                                                 get_avg_segment_rows(), way_num, &stats);
395
799
        } else {
396
0
            res = Merger::vmerge_rowsets(_tablet, compaction_type(), _cur_tablet_schema,
397
0
                                         _input_rs_readers, _output_rs_writer.get(), &stats);
398
0
        }
399
799
    }
400
401
799
    _tablet->last_compaction_status = res;
402
403
799
    if (!res.ok()) {
404
0
        LOG(WARNING) << "fail to do " << compaction_name() << ". res=" << res
405
0
                     << ", tablet=" << _tablet->tablet_id()
406
0
                     << ", output_version=" << _output_version;
407
0
        return res;
408
0
    }
409
799
    COUNTER_UPDATE(_merged_rows_counter, stats.merged_rows);
410
799
    COUNTER_UPDATE(_filtered_rows_counter, stats.filtered_rows);
411
412
799
    RETURN_NOT_OK_STATUS_WITH_WARN(_output_rs_writer->build(_output_rowset),
413
799
                                   fmt::format("rowset writer build failed. output_version: {}",
414
799
                                               _output_version.to_string()));
415
    // Now we support delete in cumu compaction, to make all data in rowsets whose version
416
    // is below output_version to be delete in the future base compaction, we should carry
417
    // all delete predicate in the output rowset.
418
    // Output start version > 2 means we must set the delete predicate in the output rowset
419
799
    if (allow_delete_in_cumu_compaction() && _output_rowset->version().first > 2) {
420
0
        DeletePredicatePB delete_predicate;
421
0
        std::accumulate(
422
0
                _input_rs_readers.begin(), _input_rs_readers.end(), &delete_predicate,
423
0
                [](DeletePredicatePB* delete_predicate, const RowsetReaderSharedPtr& reader) {
424
0
                    if (reader->rowset()->rowset_meta()->has_delete_predicate()) {
425
0
                        delete_predicate->MergeFrom(
426
0
                                reader->rowset()->rowset_meta()->delete_predicate());
427
0
                    }
428
0
                    return delete_predicate;
429
0
                });
430
        // now version in delete_predicate is deprecated
431
0
        if (!delete_predicate.in_predicates().empty() ||
432
0
            !delete_predicate.sub_predicates_v2().empty() ||
433
0
            !delete_predicate.sub_predicates().empty()) {
434
0
            _output_rowset->rowset_meta()->set_delete_predicate(std::move(delete_predicate));
435
0
        }
436
0
    }
437
438
799
    COUNTER_UPDATE(_output_rowset_data_size_counter, _output_rowset->data_disk_size());
439
799
    COUNTER_UPDATE(_output_row_num_counter, _output_rowset->num_rows());
440
799
    COUNTER_UPDATE(_output_segments_num_counter, _output_rowset->num_segments());
441
442
    // 3. check correctness
443
799
    RETURN_IF_ERROR(check_correctness(stats));
444
445
799
    if (_input_row_num > 0 && stats.rowid_conversion && config::inverted_index_compaction_enable &&
446
799
        !ctx.columns_to_do_index_compaction.empty()) {
447
0
        OlapStopWatch inverted_watch;
448
449
        // translation vec
450
        // <<dest_idx_num, dest_docId>>
451
        // the first level vector: index indicates src segment.
452
        // the second level vector: index indicates row id of source segment,
453
        // value indicates row id of destination segment.
454
        // <UINT32_MAX, UINT32_MAX> indicates current row not exist.
455
0
        std::vector<std::vector<std::pair<uint32_t, uint32_t>>> trans_vec =
456
0
                stats.rowid_conversion->get_rowid_conversion_map();
457
458
        // source rowset,segment -> index_id
459
0
        std::map<std::pair<RowsetId, uint32_t>, uint32_t> src_seg_to_id_map =
460
0
                stats.rowid_conversion->get_src_segment_to_id_map();
461
        // dest rowset id
462
0
        RowsetId dest_rowset_id = stats.rowid_conversion->get_dst_rowset_id();
463
        // dest segment id -> num rows
464
0
        std::vector<uint32_t> dest_segment_num_rows;
465
0
        RETURN_IF_ERROR(_output_rs_writer->get_segment_num_rows(&dest_segment_num_rows));
466
467
0
        auto src_segment_num = src_seg_to_id_map.size();
468
0
        auto dest_segment_num = dest_segment_num_rows.size();
469
470
0
        if (dest_segment_num > 0) {
471
            // src index files
472
            // format: rowsetId_segmentId
473
0
            std::vector<std::string> src_index_files(src_segment_num);
474
0
            for (const auto& m : src_seg_to_id_map) {
475
0
                std::pair<RowsetId, uint32_t> p = m.first;
476
0
                src_index_files[m.second] = p.first.to_string() + "_" + std::to_string(p.second);
477
0
            }
478
479
            // dest index files
480
            // format: rowsetId_segmentId
481
0
            std::vector<std::string> dest_index_files(dest_segment_num);
482
0
            for (int i = 0; i < dest_segment_num; ++i) {
483
0
                auto prefix = dest_rowset_id.to_string() + "_" + std::to_string(i);
484
0
                dest_index_files[i] = prefix;
485
0
            }
486
487
            // Only write info files when debug index compaction is enabled.
488
            // The files are used to debug index compaction and works with index_tool.
489
0
            if (config::debug_inverted_index_compaction) {
490
0
                auto write_json_to_file = [&](const nlohmann::json& json_obj,
491
0
                                              const std::string& file_name) {
492
0
                    io::FileWriterPtr file_writer;
493
0
                    std::string file_path =
494
0
                            fmt::format("{}/{}.json", std::string(getenv("LOG_DIR")), file_name);
495
0
                    RETURN_IF_ERROR(
496
0
                            io::global_local_filesystem()->create_file(file_path, &file_writer));
497
0
                    RETURN_IF_ERROR(file_writer->append(json_obj.dump()));
498
0
                    RETURN_IF_ERROR(file_writer->append("\n"));
499
0
                    return file_writer->close();
500
0
                };
501
502
                // Convert trans_vec to JSON and print it
503
0
                nlohmann::json trans_vec_json = trans_vec;
504
0
                auto output_version = _output_version.to_string().substr(
505
0
                        1, _output_version.to_string().size() - 2);
506
0
                RETURN_IF_ERROR(write_json_to_file(
507
0
                        trans_vec_json,
508
0
                        fmt::format("trans_vec_{}_{}", _tablet->tablet_id(), output_version)));
509
510
0
                nlohmann::json src_index_files_json = src_index_files;
511
0
                RETURN_IF_ERROR(write_json_to_file(
512
0
                        src_index_files_json,
513
0
                        fmt::format("src_idx_dirs_{}_{}", _tablet->tablet_id(), output_version)));
514
515
0
                nlohmann::json dest_index_files_json = dest_index_files;
516
0
                RETURN_IF_ERROR(write_json_to_file(
517
0
                        dest_index_files_json,
518
0
                        fmt::format("dest_idx_dirs_{}_{}", _tablet->tablet_id(), output_version)));
519
520
0
                nlohmann::json dest_segment_num_rows_json = dest_segment_num_rows;
521
0
                RETURN_IF_ERROR(
522
0
                        write_json_to_file(dest_segment_num_rows_json,
523
0
                                           fmt::format("dest_seg_num_rows_{}_{}",
524
0
                                                       _tablet->tablet_id(), output_version)));
525
0
            }
526
527
            // create index_writer to compaction indexes
528
0
            const auto& fs = _output_rowset->rowset_meta()->fs();
529
0
            const auto& tablet_path = _tablet->tablet_path();
530
531
            // src index dirs
532
            // format: rowsetId_segmentId
533
0
            std::vector<std::unique_ptr<InvertedIndexFileReader>> inverted_index_file_readers(
534
0
                    src_segment_num);
535
0
            for (const auto& m : src_seg_to_id_map) {
536
0
                std::pair<RowsetId, uint32_t> p = m.first;
537
0
                auto segment_file_name =
538
0
                        p.first.to_string() + "_" + std::to_string(p.second) + ".dat";
539
0
                auto inverted_index_file_reader = std::make_unique<InvertedIndexFileReader>(
540
0
                        fs, tablet_path, segment_file_name,
541
0
                        _cur_tablet_schema->get_inverted_index_storage_format());
542
0
                bool open_idx_file_cache = false;
543
0
                auto st = inverted_index_file_reader->init(config::inverted_index_read_buffer_size,
544
0
                                                           open_idx_file_cache);
545
0
                if (!st.ok()) {
546
0
                    LOG(ERROR) << "init inverted index "
547
0
                               << InvertedIndexDescriptor::get_index_file_name(segment_file_name)
548
0
                               << " failed in compaction when init inverted index file reader";
549
0
                    return st;
550
0
                }
551
0
                inverted_index_file_readers[m.second] = std::move(inverted_index_file_reader);
552
0
            }
553
554
            // dest index files
555
            // format: rowsetId_segmentId
556
0
            std::vector<std::unique_ptr<InvertedIndexFileWriter>> inverted_index_file_writers(
557
0
                    dest_segment_num);
558
559
            // Some columns have already been indexed
560
            // key: seg_id, value: inverted index file size
561
0
            std::unordered_map<int, int64_t> compacted_idx_file_size;
562
0
            for (int seg_id = 0; seg_id < dest_segment_num; ++seg_id) {
563
0
                auto prefix = dest_rowset_id.to_string() + "_" + std::to_string(seg_id) + ".dat";
564
0
                auto inverted_index_file_reader = std::make_unique<InvertedIndexFileReader>(
565
0
                        fs, tablet_path, prefix,
566
0
                        _cur_tablet_schema->get_inverted_index_storage_format());
567
0
                bool open_idx_file_cache = false;
568
0
                auto st = inverted_index_file_reader->init(config::inverted_index_read_buffer_size,
569
0
                                                           open_idx_file_cache);
570
0
                if (st.ok()) {
571
0
                    auto index_not_need_to_compact =
572
0
                            DORIS_TRY(inverted_index_file_reader->get_all_directories());
573
                    // V1: each index is a separate file
574
                    // V2: all indexes are in a single file
575
0
                    if (_cur_tablet_schema->get_inverted_index_storage_format() !=
576
0
                        doris::InvertedIndexStorageFormatPB::V1) {
577
0
                        int64_t fsize = 0;
578
0
                        st = fs->file_size(InvertedIndexDescriptor::get_index_file_name(prefix),
579
0
                                           &fsize);
580
0
                        if (!st.ok()) {
581
0
                            LOG(ERROR) << "file size error in index compaction, error:" << st.msg();
582
0
                            return st;
583
0
                        }
584
0
                        compacted_idx_file_size[seg_id] = fsize;
585
0
                    }
586
0
                    auto inverted_index_file_writer = std::make_unique<InvertedIndexFileWriter>(
587
0
                            fs, tablet_path, prefix,
588
0
                            _cur_tablet_schema->get_inverted_index_storage_format());
589
0
                    RETURN_NOT_OK_STATUS_WITH_WARN(
590
0
                            inverted_index_file_writer->initialize(index_not_need_to_compact),
591
0
                            "failed to initialize inverted_index_file_writer for " +
592
0
                                    inverted_index_file_writer->get_index_file_name());
593
0
                    inverted_index_file_writers[seg_id] = std::move(inverted_index_file_writer);
594
0
                } else if (st.is<ErrorCode::INVERTED_INDEX_FILE_NOT_FOUND>()) {
595
0
                    auto inverted_index_file_writer = std::make_unique<InvertedIndexFileWriter>(
596
0
                            fs, tablet_path, prefix,
597
0
                            _cur_tablet_schema->get_inverted_index_storage_format());
598
0
                    inverted_index_file_writers[seg_id] = std::move(inverted_index_file_writer);
599
                    // no index file
600
0
                    compacted_idx_file_size[seg_id] = 0;
601
0
                } else {
602
0
                    LOG(ERROR) << "init inverted index "
603
0
                               << InvertedIndexDescriptor::get_index_file_name(prefix)
604
0
                               << " failed in compaction when create inverted index file writer";
605
0
                    return st;
606
0
                }
607
0
            }
608
609
            // we choose the first destination segment name as the temporary index writer path
610
            // Used to distinguish between different index compaction
611
0
            auto index_tmp_path = tablet_path + "/" + dest_rowset_id.to_string() + "_" + "tmp";
612
0
            LOG(INFO) << "start index compaction"
613
0
                      << ". tablet=" << _tablet->tablet_id()
614
0
                      << ", source index size=" << src_segment_num
615
0
                      << ", destination index size=" << dest_segment_num << ".";
616
617
0
            auto error_handler = [this](int64_t index_id, int64_t column_uniq_id) {
618
0
                LOG(WARNING) << "failed to do index compaction"
619
0
                             << ". tablet=" << _tablet->tablet_id()
620
0
                             << ". column uniq id=" << column_uniq_id << ". index_id=" << index_id;
621
0
                for (auto& rowset : _input_rowsets) {
622
0
                    rowset->set_skip_index_compaction(column_uniq_id);
623
0
                    LOG(INFO) << "mark skipping inverted index compaction next time"
624
0
                              << ". tablet=" << _tablet->tablet_id()
625
0
                              << ", rowset=" << rowset->rowset_id()
626
0
                              << ", column uniq id=" << column_uniq_id << ", index_id=" << index_id;
627
0
                }
628
0
            };
629
630
0
            Status status = Status::OK();
631
0
            for (auto&& column_uniq_id : ctx.columns_to_do_index_compaction) {
632
0
                auto col = _cur_tablet_schema->column_by_uid(column_uniq_id);
633
0
                const auto* index_meta = _cur_tablet_schema->get_inverted_index(col);
634
635
                // if index properties are different, index compaction maybe needs to be skipped.
636
0
                bool is_continue = false;
637
0
                std::optional<std::map<std::string, std::string>> first_properties;
638
0
                for (const auto& rowset : _input_rowsets) {
639
0
                    const auto* tablet_index = rowset->tablet_schema()->get_inverted_index(col);
640
                    // no inverted index or index id is different from current index id
641
0
                    if (tablet_index == nullptr ||
642
0
                        tablet_index->index_id() != index_meta->index_id()) {
643
0
                        error_handler(index_meta->index_id(), column_uniq_id);
644
0
                        status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
645
0
                                "index ids are different, skip index compaction");
646
0
                        is_continue = true;
647
0
                        break;
648
0
                    }
649
0
                    const auto& properties = tablet_index->properties();
650
0
                    if (!first_properties.has_value()) {
651
0
                        first_properties = properties;
652
0
                    } else {
653
0
                        if (properties != first_properties.value()) {
654
0
                            error_handler(index_meta->index_id(), column_uniq_id);
655
0
                            status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(
656
0
                                    "if index properties are different, index compaction needs to "
657
0
                                    "be "
658
0
                                    "skipped.");
659
0
                            is_continue = true;
660
0
                            break;
661
0
                        }
662
0
                    }
663
0
                }
664
0
                if (is_continue) {
665
0
                    continue;
666
0
                }
667
668
0
                std::vector<lucene::store::Directory*> dest_index_dirs(dest_segment_num);
669
0
                try {
670
0
                    std::vector<std::unique_ptr<DorisCompoundReader>> src_idx_dirs(src_segment_num);
671
0
                    for (int src_segment_id = 0; src_segment_id < src_segment_num;
672
0
                         src_segment_id++) {
673
0
                        auto res = inverted_index_file_readers[src_segment_id]->open(index_meta);
674
0
                        DBUG_EXECUTE_IF("Compaction::open_inverted_index_file_reader", {
675
0
                            res = ResultError(Status::Error<
676
0
                                              ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
677
0
                                    "debug point: Compaction::open_index_file_reader error"));
678
0
                        })
679
0
                        if (!res.has_value()) {
680
0
                            throw Exception(ErrorCode::INVERTED_INDEX_COMPACTION_ERROR,
681
0
                                            res.error().msg());
682
0
                        }
683
0
                        src_idx_dirs[src_segment_id] = std::move(res.value());
684
0
                    }
685
0
                    for (int dest_segment_id = 0; dest_segment_id < dest_segment_num;
686
0
                         dest_segment_id++) {
687
0
                        auto res = inverted_index_file_writers[dest_segment_id]->open(index_meta);
688
0
                        DBUG_EXECUTE_IF("Compaction::open_inverted_index_file_writer", {
689
0
                            res = ResultError(
690
0
                                    Status::Error<ErrorCode::INVERTED_INDEX_CLUCENE_ERROR>(
691
0
                                            "debug point: "
692
0
                                            "Compaction::open_inverted_index_file_writer error"));
693
0
                        })
694
0
                        if (!res.has_value()) {
695
0
                            throw Exception(ErrorCode::INVERTED_INDEX_COMPACTION_ERROR,
696
0
                                            res.error().msg());
697
0
                        }
698
0
                        dest_index_dirs[dest_segment_id] = res.value();
699
0
                    }
700
0
                    auto st = compact_column(index_meta->index_id(), src_idx_dirs, dest_index_dirs,
701
0
                                             fs, index_tmp_path, trans_vec, dest_segment_num_rows);
702
0
                    if (!st.ok()) {
703
0
                        error_handler(index_meta->index_id(), column_uniq_id);
704
0
                        status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(st.msg());
705
0
                    }
706
0
                } catch (CLuceneError& e) {
707
0
                    error_handler(index_meta->index_id(), column_uniq_id);
708
0
                    status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(e.what());
709
0
                } catch (const Exception& e) {
710
0
                    error_handler(index_meta->index_id(), column_uniq_id);
711
0
                    status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(e.what());
712
0
                }
713
0
            }
714
0
            uint64_t inverted_index_file_size = 0;
715
0
            for (int seg_id = 0; seg_id < dest_segment_num; ++seg_id) {
716
0
                auto inverted_index_file_writer = inverted_index_file_writers[seg_id].get();
717
0
                if (Status st = inverted_index_file_writer->close(); !st.ok()) {
718
0
                    status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(st.msg());
719
0
                } else {
720
0
                    inverted_index_file_size += inverted_index_file_writer->get_index_file_size();
721
0
                    inverted_index_file_size -= compacted_idx_file_size[seg_id];
722
0
                }
723
0
            }
724
            // check index compaction status. If status is not ok, we should return error and end this compaction round.
725
0
            if (!status.ok()) {
726
0
                return status;
727
0
            }
728
729
            // index compaction should update total disk size and index disk size
730
0
            _output_rowset->rowset_meta()->set_data_disk_size(_output_rowset->data_disk_size() +
731
0
                                                              inverted_index_file_size);
732
0
            _output_rowset->rowset_meta()->set_total_disk_size(_output_rowset->data_disk_size() +
733
0
                                                               inverted_index_file_size);
734
0
            _output_rowset->rowset_meta()->set_index_disk_size(_output_rowset->index_disk_size() +
735
0
                                                               inverted_index_file_size);
736
737
0
            COUNTER_UPDATE(_output_rowset_data_size_counter, _output_rowset->data_disk_size());
738
0
            LOG(INFO) << "succeed to do index compaction"
739
0
                      << ". tablet=" << _tablet->tablet_id()
740
0
                      << ", input row number=" << _input_row_num
741
0
                      << ", output row number=" << _output_rowset->num_rows()
742
0
                      << ", input_rowset_size=" << _input_rowsets_size
743
0
                      << ", output_rowset_size=" << _output_rowset->data_disk_size()
744
0
                      << ", inverted index file size=" << inverted_index_file_size
745
0
                      << ". elapsed time=" << inverted_watch.get_elapse_second() << "s.";
746
0
        } else {
747
0
            LOG(INFO) << "skip doing index compaction due to no output segments"
748
0
                      << ". tablet=" << _tablet->tablet_id()
749
0
                      << ", input row number=" << _input_row_num
750
0
                      << ", output row number=" << _output_rowset->num_rows()
751
0
                      << ". elapsed time=" << inverted_watch.get_elapse_second() << "s.";
752
0
        }
753
0
    }
754
755
    // 4. modify rowsets in memory
756
799
    RETURN_IF_ERROR(modify_rowsets(&stats));
757
758
    // 5. update last success compaction time
759
799
    int64_t now = UnixMillis();
760
    // TODO(yingchun): do the judge in Tablet class
761
799
    if (compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
762
        // TIME_SERIES_POLICY, generating an empty rowset doesn't need to update the timestamp.
763
745
        if (!(_tablet->tablet_meta()->compaction_policy() == CUMULATIVE_TIME_SERIES_POLICY &&
764
745
              _output_rowset->num_segments() == 0)) {
765
745
            _tablet->set_last_cumu_compaction_success_time(now);
766
745
        }
767
745
    } else if (compaction_type() == ReaderType::READER_BASE_COMPACTION) {
768
13
        _tablet->set_last_base_compaction_success_time(now);
769
41
    } else if (compaction_type() == ReaderType::READER_FULL_COMPACTION) {
770
39
        _tablet->set_last_full_compaction_success_time(now);
771
39
    }
772
773
799
    int64_t current_max_version;
774
799
    {
775
799
        std::shared_lock rdlock(_tablet->get_header_lock());
776
799
        RowsetSharedPtr max_rowset = _tablet->rowset_with_max_version();
777
799
        if (max_rowset == nullptr) {
778
0
            current_max_version = -1;
779
799
        } else {
780
799
            current_max_version = _tablet->rowset_with_max_version()->end_version();
781
799
        }
782
799
    }
783
784
799
    auto cumu_policy = _tablet->cumulative_compaction_policy();
785
799
    DCHECK(cumu_policy);
786
799
    LOG(INFO) << "succeed to do " << compaction_name() << " is_vertical=" << vertical_compaction
787
799
              << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version
788
799
              << ", current_max_version=" << current_max_version
789
799
              << ", disk=" << _tablet->data_dir()->path() << ", segments=" << _input_num_segments
790
799
              << ", input_rowset_size=" << _input_rowsets_size
791
799
              << ", output_rowset_size=" << _output_rowset->data_disk_size()
792
799
              << ", input_row_num=" << _input_row_num
793
799
              << ", output_row_num=" << _output_rowset->num_rows()
794
799
              << ", filtered_row_num=" << stats.filtered_rows
795
799
              << ", merged_row_num=" << stats.merged_rows
796
799
              << ". elapsed time=" << watch.get_elapse_second()
797
799
              << "s. cumulative_compaction_policy=" << cumu_policy->name()
798
799
              << ", compact_row_per_second=" << int(_input_row_num / watch.get_elapse_second());
799
800
799
    return Status::OK();
801
799
}
802
803
1.36k
Status Compaction::construct_output_rowset_writer(RowsetWriterContext& ctx, bool is_vertical) {
804
1.36k
    ctx.version = _output_version;
805
1.36k
    ctx.rowset_state = VISIBLE;
806
1.36k
    ctx.segments_overlap = NONOVERLAPPING;
807
1.36k
    ctx.tablet_schema = _cur_tablet_schema;
808
1.36k
    ctx.newest_write_timestamp = _newest_write_timestamp;
809
1.36k
    ctx.write_type = DataWriteType::TYPE_COMPACTION;
810
1.36k
    if (config::inverted_index_compaction_enable &&
811
1.36k
        (((_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
812
1
           _tablet->enable_unique_key_merge_on_write()) ||
813
1
          _tablet->keys_type() == KeysType::DUP_KEYS)) &&
814
1.36k
        _cur_tablet_schema->get_inverted_index_storage_format() ==
815
1
                InvertedIndexStorageFormatPB::V1) {
816
4
        for (const auto& index : _cur_tablet_schema->indexes()) {
817
4
            if (index.index_type() == IndexType::INVERTED) {
818
4
                auto col_unique_ids = index.col_unique_ids();
819
                // check if column unique ids is empty to avoid crash
820
4
                if (col_unique_ids.empty()) {
821
0
                    LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] index["
822
0
                                 << index.index_id()
823
0
                                 << "] has no column unique id, will skip index compaction."
824
0
                                 << " tablet_schema=" << _cur_tablet_schema->dump_full_schema();
825
0
                    continue;
826
0
                }
827
4
                auto col_unique_id = col_unique_ids[0];
828
4
                if (!_cur_tablet_schema->has_column_unique_id(col_unique_id)) {
829
0
                    LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id["
830
0
                                 << col_unique_id << "] not found, will skip index compaction";
831
0
                    continue;
832
0
                }
833
                // Avoid doing inverted index compaction on non-slice type columns
834
4
                if (!field_is_slice_type(_cur_tablet_schema->column_by_uid(col_unique_id).type())) {
835
2
                    continue;
836
2
                }
837
                //NOTE: here src_rs may be in building index progress, so it would not contain inverted index info.
838
2
                bool all_have_inverted_index = std::all_of(
839
6
                        _input_rowsets.begin(), _input_rowsets.end(), [&](const auto& src_rs) {
840
6
                            BetaRowsetSharedPtr rowset =
841
6
                                    std::static_pointer_cast<BetaRowset>(src_rs);
842
6
                            if (rowset == nullptr) {
843
0
                                LOG(WARNING) << "tablet[" << _tablet->tablet_id()
844
0
                                             << "] rowset is null, will skip index compaction";
845
0
                                return false;
846
0
                            }
847
6
                            if (rowset->is_skip_index_compaction(col_unique_id)) {
848
0
                                LOG(WARNING)
849
0
                                        << "tablet[" << _tablet->tablet_id() << "] rowset["
850
0
                                        << rowset->rowset_id() << "] column_unique_id["
851
0
                                        << col_unique_id
852
0
                                        << "] skip inverted index compaction due to last failure";
853
0
                                return false;
854
0
                            }
855
6
                            auto fs = rowset->rowset_meta()->fs();
856
857
6
                            const auto* index_meta =
858
6
                                    rowset->tablet_schema()->get_inverted_index(col_unique_id, "");
859
6
                            if (index_meta == nullptr) {
860
0
                                LOG(WARNING) << "tablet[" << _tablet->tablet_id()
861
0
                                             << "] column_unique_id[" << col_unique_id
862
0
                                             << "] index meta is null, will skip index compaction";
863
0
                                return false;
864
0
                            }
865
26
                            for (auto i = 0; i < rowset->num_segments(); i++) {
866
20
                                std::string index_file_path;
867
20
                                try {
868
20
                                    auto segment_file = rowset->segment_file_path(i);
869
20
                                    io::Path segment_path(segment_file);
870
20
                                    auto inverted_index_file_reader =
871
20
                                            std::make_unique<InvertedIndexFileReader>(
872
20
                                                    fs, segment_path.parent_path(),
873
20
                                                    segment_path.filename(),
874
20
                                                    _cur_tablet_schema
875
20
                                                            ->get_inverted_index_storage_format());
876
20
                                    bool open_idx_file_cache = false;
877
20
                                    auto st = inverted_index_file_reader->init(
878
20
                                            config::inverted_index_read_buffer_size,
879
20
                                            open_idx_file_cache);
880
20
                                    index_file_path =
881
20
                                            inverted_index_file_reader->get_index_file_path(
882
20
                                                    index_meta);
883
20
                                    if (!st.ok()) {
884
0
                                        LOG(WARNING) << "init index " << index_file_path
885
0
                                                     << " error:" << st;
886
0
                                        return false;
887
0
                                    }
888
889
20
                                    bool exists = false;
890
20
                                    if (!inverted_index_file_reader
891
20
                                                 ->index_file_exist(index_meta, &exists)
892
20
                                                 .ok()) {
893
0
                                        LOG(ERROR) << index_file_path << " fs->exists error";
894
0
                                        return false;
895
0
                                    }
896
897
20
                                    if (!exists) {
898
0
                                        LOG(WARNING)
899
0
                                                << "tablet[" << _tablet->tablet_id()
900
0
                                                << "] column_unique_id[" << col_unique_id << "],"
901
0
                                                << index_file_path
902
0
                                                << " is not exists, will skip index compaction";
903
0
                                        return false;
904
0
                                    }
905
906
                                    // check index meta
907
20
                                    auto result = inverted_index_file_reader->open(index_meta);
908
20
                                    if (!result.has_value()) {
909
0
                                        LOG(WARNING) << "open index " << index_file_path
910
0
                                                     << " error:" << result.error();
911
0
                                        return false;
912
0
                                    }
913
20
                                    auto reader = std::move(result.value());
914
20
                                    std::vector<std::string> files;
915
20
                                    reader->list(&files);
916
20
                                    reader->close();
917
918
20
                                    DBUG_EXECUTE_IF(
919
20
                                            "Compaction::construct_skip_inverted_index_index_"
920
20
                                            "reader_"
921
20
                                            "close_error",
922
20
                                            {
923
20
                                                _CLTHROWA(CL_ERR_IO,
924
20
                                                          "debug point: reader close error");
925
20
                                            })
926
927
                                    // why is 3?
928
                                    // slice type index file at least has 3 files: null_bitmap, segments_N, segments.gen
929
20
                                    if (files.size() < 3) {
930
0
                                        LOG(WARNING) << "tablet[" << _tablet->tablet_id()
931
0
                                                     << "] column_unique_id[" << col_unique_id
932
0
                                                     << "]," << index_file_path
933
0
                                                     << " is corrupted, will skip index compaction";
934
0
                                        return false;
935
0
                                    }
936
20
                                } catch (CLuceneError& err) {
937
0
                                    LOG(WARNING) << "tablet[" << _tablet->tablet_id()
938
0
                                                 << "] column_unique_id[" << col_unique_id
939
0
                                                 << "] open index[" << index_file_path
940
0
                                                 << "], will skip index compaction, error:"
941
0
                                                 << err.what();
942
0
                                    return false;
943
0
                                }
944
20
                            }
945
6
                            return true;
946
6
                        });
947
2
                if (all_have_inverted_index) {
948
2
                    ctx.columns_to_do_index_compaction.insert(col_unique_id);
949
2
                }
950
2
            }
951
4
        }
952
1
    }
953
1.36k
    if (compaction_type() == ReaderType::READER_COLD_DATA_COMPACTION) {
954
        // write output rowset to storage policy resource
955
2
        auto storage_policy = get_storage_policy(_tablet->storage_policy_id());
956
2
        if (storage_policy == nullptr) {
957
0
            return Status::InternalError("could not find storage_policy, storage_policy_id={}",
958
0
                                         _tablet->storage_policy_id());
959
0
        }
960
2
        auto resource = get_storage_resource(storage_policy->resource_id);
961
2
        if (resource.fs == nullptr) {
962
0
            return Status::InternalError("could not find resource, resouce_id={}",
963
0
                                         storage_policy->resource_id);
964
0
        }
965
2
        DCHECK(atol(resource.fs->id().c_str()) == storage_policy->resource_id);
966
2
        DCHECK(resource.fs->type() != io::FileSystemType::LOCAL);
967
2
        ctx.fs = std::move(resource.fs);
968
2
    }
969
1.36k
    _output_rs_writer = DORIS_TRY(_tablet->create_rowset_writer(ctx, is_vertical));
970
1.36k
    _pending_rs_guard = StorageEngine::instance()->add_pending_rowset(ctx);
971
1.36k
    return Status::OK();
972
1.36k
}
973
974
799
Status Compaction::construct_input_rowset_readers() {
975
12.4k
    for (auto& rowset : _input_rowsets) {
976
12.4k
        RowsetReaderSharedPtr rs_reader;
977
12.4k
        RETURN_IF_ERROR(rowset->create_reader(&rs_reader));
978
12.4k
        _input_rs_readers.push_back(std::move(rs_reader));
979
12.4k
    }
980
799
    return Status::OK();
981
799
}
982
983
1.32k
Status Compaction::modify_rowsets(const Merger::Statistics* stats) {
984
1.32k
    std::vector<RowsetSharedPtr> output_rowsets;
985
1.32k
    output_rowsets.push_back(_output_rowset);
986
987
1.32k
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
988
1.32k
        _tablet->enable_unique_key_merge_on_write() &&
989
1.32k
        _tablet->tablet_schema()->cluster_key_idxes().empty()) {
990
361
        Version version = _tablet->max_version();
991
361
        DeleteBitmap output_rowset_delete_bitmap(_tablet->tablet_id());
992
361
        std::unique_ptr<RowLocationSet> missed_rows;
993
361
        if (config::enable_missing_rows_correctness_check && !allow_delete_in_cumu_compaction() &&
994
361
            compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
995
360
            missed_rows = std::make_unique<RowLocationSet>();
996
360
            LOG(INFO) << "RowLocation Set inited succ for tablet:" << _tablet->tablet_id();
997
360
        }
998
361
        std::unique_ptr<std::map<RowsetSharedPtr, RowLocationPairList>> location_map;
999
361
        if (config::enable_rowid_conversion_correctness_check) {
1000
0
            location_map = std::make_unique<std::map<RowsetSharedPtr, RowLocationPairList>>();
1001
0
            LOG(INFO) << "Location Map inited succ for tablet:" << _tablet->tablet_id();
1002
0
        }
1003
        // Convert the delete bitmap of the input rowsets to output rowset.
1004
        // New loads are not blocked, so some keys of input rowsets might
1005
        // be deleted during the time. We need to deal with delete bitmap
1006
        // of incremental data later.
1007
        // TODO(LiaoXin): check if there are duplicate keys
1008
361
        std::size_t missed_rows_size = 0;
1009
361
        _tablet->calc_compaction_output_rowset_delete_bitmap(
1010
361
                _input_rowsets, *_rowid_conversion, 0, version.second + 1, missed_rows.get(),
1011
361
                location_map.get(), _tablet->tablet_meta()->delete_bitmap(),
1012
361
                &output_rowset_delete_bitmap);
1013
361
        if (missed_rows) {
1014
360
            missed_rows_size = missed_rows->size();
1015
            // Suppose a heavy schema change process on BE converting tablet A to tablet B.
1016
            // 1. during schema change double write, new loads write [X-Y] on tablet B.
1017
            // 2. rowsets with version [a],[a+1],...,[b-1],[b] on tablet B are picked for cumu compaction(X<=a<b<=Y).(cumu compaction
1018
            //    on new tablet during schema change double write is allowed after https://github.com/apache/doris/pull/16470)
1019
            // 3. schema change remove all rowsets on tablet B before version Z(b<=Z<=Y) before it begins to convert historical rowsets.
1020
            // 4. schema change finishes.
1021
            // 5. cumu compation begins on new tablet with version [a],...,[b]. If there are duplicate keys between these rowsets,
1022
            //    the compaction check will fail because these rowsets have skipped to calculate delete bitmap in commit phase and
1023
            //    publish phase because tablet B is in NOT_READY state when writing.
1024
1025
            // Considering that the cumu compaction will fail finally in this situation because `Tablet::modify_rowsets` will check if rowsets in
1026
            // `to_delete`(_input_rowsets) still exist in tablet's `_rs_version_map`, we can just skip to check missed rows here.
1027
360
            bool need_to_check_missed_rows = true;
1028
360
            {
1029
360
                std::shared_lock rlock(_tablet->get_header_lock());
1030
360
                need_to_check_missed_rows =
1031
360
                        std::all_of(_input_rowsets.begin(), _input_rowsets.end(),
1032
5.97k
                                    [&](const RowsetSharedPtr& rowset) {
1033
5.97k
                                        return _tablet->rowset_exists_unlocked(rowset);
1034
5.97k
                                    });
1035
360
            }
1036
1037
360
            if (_tablet->tablet_state() == TABLET_RUNNING && stats != nullptr &&
1038
360
                stats->merged_rows != missed_rows_size && need_to_check_missed_rows) {
1039
0
                std::stringstream ss;
1040
0
                ss << "cumulative compaction: the merged rows(" << stats->merged_rows
1041
0
                   << ") is not equal to missed rows(" << missed_rows_size
1042
0
                   << ") in rowid conversion, tablet_id: " << _tablet->tablet_id()
1043
0
                   << ", table_id:" << _tablet->table_id();
1044
0
                if (missed_rows_size == 0) {
1045
0
                    ss << ", debug info: ";
1046
0
                    DeleteBitmap subset_map(_tablet->tablet_id());
1047
0
                    for (auto rs : _input_rowsets) {
1048
0
                        _tablet->tablet_meta()->delete_bitmap().subset(
1049
0
                                {rs->rowset_id(), 0, 0},
1050
0
                                {rs->rowset_id(), rs->num_segments(), version.second + 1},
1051
0
                                &subset_map);
1052
0
                        ss << "(rowset id: " << rs->rowset_id()
1053
0
                           << ", delete bitmap cardinality: " << subset_map.cardinality() << ")";
1054
0
                    }
1055
0
                    ss << ", version[0-" << version.second + 1 << "]";
1056
0
                }
1057
0
                DCHECK(false) << ss.str();
1058
0
                LOG(WARNING) << ss.str();
1059
0
            }
1060
360
        }
1061
1062
361
        if (config::enable_rowid_conversion_correctness_check) {
1063
0
            RETURN_IF_ERROR(_tablet->check_rowid_conversion(_output_rowset, *location_map));
1064
0
            location_map->clear();
1065
0
        }
1066
1067
361
        {
1068
361
            std::lock_guard<std::mutex> wrlock_(_tablet->get_rowset_update_lock());
1069
361
            std::lock_guard<std::shared_mutex> wrlock(_tablet->get_header_lock());
1070
361
            SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD);
1071
1072
            // Here we will calculate all the rowsets delete bitmaps which are committed but not published to reduce the calculation pressure
1073
            // of publish phase.
1074
            // All rowsets which need to recalculate have been published so we don't need to acquire lock.
1075
            // Step1: collect this tablet's all committed rowsets' delete bitmaps
1076
361
            CommitTabletTxnInfoVec commit_tablet_txn_info_vec {};
1077
361
            StorageEngine::instance()->txn_manager()->get_all_commit_tablet_txn_info_by_tablet(
1078
361
                    _tablet, &commit_tablet_txn_info_vec);
1079
1080
            // Step2: calculate all rowsets' delete bitmaps which are published during compaction.
1081
361
            for (auto& it : commit_tablet_txn_info_vec) {
1082
38
                if (!_check_if_includes_input_rowsets(it.rowset_ids)) {
1083
                    // When calculating the delete bitmap of all committed rowsets relative to the compaction,
1084
                    // there may be cases where the compacted rowsets are newer than the committed rowsets.
1085
                    // At this time, row number conversion cannot be performed, otherwise data will be missing.
1086
                    // Therefore, we need to check if every committed rowset has calculated delete bitmap for
1087
                    // all compaction input rowsets.
1088
0
                    continue;
1089
0
                }
1090
38
                DeleteBitmap txn_output_delete_bitmap(_tablet->tablet_id());
1091
38
                _tablet->calc_compaction_output_rowset_delete_bitmap(
1092
38
                        _input_rowsets, *_rowid_conversion, 0, UINT64_MAX, missed_rows.get(),
1093
38
                        location_map.get(), *it.delete_bitmap.get(), &txn_output_delete_bitmap);
1094
38
                if (config::enable_merge_on_write_correctness_check) {
1095
38
                    RowsetIdUnorderedSet rowsetids;
1096
38
                    rowsetids.insert(_output_rowset->rowset_id());
1097
38
                    _tablet->add_sentinel_mark_to_delete_bitmap(&txn_output_delete_bitmap,
1098
38
                                                                rowsetids);
1099
38
                }
1100
38
                it.delete_bitmap->merge(txn_output_delete_bitmap);
1101
                // Step3: write back updated delete bitmap and tablet info.
1102
38
                it.rowset_ids.insert(_output_rowset->rowset_id());
1103
38
                StorageEngine::instance()->txn_manager()->set_txn_related_delete_bitmap(
1104
38
                        it.partition_id, it.transaction_id, _tablet->tablet_id(),
1105
38
                        _tablet->tablet_uid(), true, it.delete_bitmap, it.rowset_ids,
1106
38
                        it.partial_update_info);
1107
38
            }
1108
1109
            // Convert the delete bitmap of the input rowsets to output rowset for
1110
            // incremental data.
1111
361
            _tablet->calc_compaction_output_rowset_delete_bitmap(
1112
361
                    _input_rowsets, *_rowid_conversion, version.second, UINT64_MAX,
1113
361
                    missed_rows.get(), location_map.get(), _tablet->tablet_meta()->delete_bitmap(),
1114
361
                    &output_rowset_delete_bitmap);
1115
1116
361
            if (missed_rows) {
1117
360
                DCHECK_EQ(missed_rows->size(), missed_rows_size);
1118
360
                if (missed_rows->size() != missed_rows_size) {
1119
0
                    LOG(WARNING) << "missed rows don't match, before: " << missed_rows_size
1120
0
                                 << " after: " << missed_rows->size();
1121
0
                }
1122
360
            }
1123
1124
361
            if (location_map) {
1125
0
                RETURN_IF_ERROR(_tablet->check_rowid_conversion(_output_rowset, *location_map));
1126
0
            }
1127
1128
361
            _tablet->merge_delete_bitmap(output_rowset_delete_bitmap);
1129
361
            RETURN_IF_ERROR(_tablet->modify_rowsets(output_rowsets, _input_rowsets, true));
1130
361
        }
1131
964
    } else {
1132
964
        std::lock_guard<std::shared_mutex> wrlock(_tablet->get_header_lock());
1133
964
        SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD);
1134
964
        RETURN_IF_ERROR(_tablet->modify_rowsets(output_rowsets, _input_rowsets, true));
1135
964
    }
1136
1137
1.32k
    if (config::tablet_rowset_stale_sweep_by_size &&
1138
1.32k
        _tablet->tablet_meta()->all_stale_rs_metas().size() >=
1139
0
                config::tablet_rowset_stale_sweep_threshold_size) {
1140
0
        _tablet->delete_expired_stale_rowset();
1141
0
    }
1142
1143
1.32k
    int64_t cur_max_version = 0;
1144
1.32k
    {
1145
1.32k
        std::shared_lock rlock(_tablet->get_header_lock());
1146
1.32k
        cur_max_version = _tablet->max_version_unlocked().second;
1147
1.32k
        _tablet->save_meta();
1148
1.32k
    }
1149
1.32k
    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
1150
1.32k
        _tablet->enable_unique_key_merge_on_write()) {
1151
361
        auto st = TabletMetaManager::remove_old_version_delete_bitmap(
1152
361
                _tablet->data_dir(), _tablet->tablet_id(), cur_max_version);
1153
361
        if (!st.ok()) {
1154
0
            LOG(WARNING) << "failed to remove old version delete bitmap, st: " << st;
1155
0
        }
1156
361
    }
1157
1.32k
    return Status::OK();
1158
1.32k
}
1159
1160
bool Compaction::_check_if_includes_input_rowsets(
1161
38
        const RowsetIdUnorderedSet& commit_rowset_ids_set) const {
1162
38
    std::vector<RowsetId> commit_rowset_ids {};
1163
38
    commit_rowset_ids.insert(commit_rowset_ids.end(), commit_rowset_ids_set.begin(),
1164
38
                             commit_rowset_ids_set.end());
1165
38
    std::sort(commit_rowset_ids.begin(), commit_rowset_ids.end());
1166
38
    std::vector<RowsetId> input_rowset_ids {};
1167
838
    for (const auto& rowset : _input_rowsets) {
1168
838
        input_rowset_ids.emplace_back(rowset->rowset_meta()->rowset_id());
1169
838
    }
1170
38
    std::sort(input_rowset_ids.begin(), input_rowset_ids.end());
1171
38
    return std::includes(commit_rowset_ids.begin(), commit_rowset_ids.end(),
1172
38
                         input_rowset_ids.begin(), input_rowset_ids.end());
1173
38
}
1174
1175
0
void Compaction::gc_output_rowset() {
1176
0
    if (_state != CompactionState::SUCCESS && _output_rowset != nullptr) {
1177
0
        if (!_output_rowset->is_local()) {
1178
0
            _tablet->record_unused_remote_rowset(_output_rowset->rowset_id(),
1179
0
                                                 _output_rowset->rowset_meta()->resource_id(),
1180
0
                                                 _output_rowset->num_segments());
1181
0
            return;
1182
0
        }
1183
0
        StorageEngine::instance()->add_unused_rowset(_output_rowset);
1184
0
    }
1185
0
}
1186
1187
// Find the longest consecutive version path in "rowset", from beginning.
1188
// Two versions before and after the missing version will be saved in missing_version,
1189
// if missing_version is not null.
1190
Status Compaction::find_longest_consecutive_version(std::vector<RowsetSharedPtr>* rowsets,
1191
1.35k
                                                    std::vector<Version>* missing_version) {
1192
1.35k
    if (rowsets->empty()) {
1193
2
        return Status::OK();
1194
2
    }
1195
1.34k
    RowsetSharedPtr prev_rowset = rowsets->front();
1196
1.34k
    size_t i = 1;
1197
19.2k
    for (; i < rowsets->size(); ++i) {
1198
17.9k
        RowsetSharedPtr rowset = (*rowsets)[i];
1199
17.9k
        if (rowset->start_version() != prev_rowset->end_version() + 1) {
1200
2
            if (missing_version != nullptr) {
1201
0
                missing_version->push_back(prev_rowset->version());
1202
0
                missing_version->push_back(rowset->version());
1203
0
            }
1204
2
            break;
1205
2
        }
1206
17.9k
        prev_rowset = rowset;
1207
17.9k
    }
1208
1209
1.34k
    rowsets->resize(i);
1210
1.34k
    return Status::OK();
1211
1.35k
}
1212
1213
264
Status Compaction::check_version_continuity(const std::vector<RowsetSharedPtr>& rowsets) {
1214
264
    if (rowsets.empty()) {
1215
17
        return Status::OK();
1216
17
    }
1217
247
    RowsetSharedPtr prev_rowset = rowsets.front();
1218
794
    for (size_t i = 1; i < rowsets.size(); ++i) {
1219
547
        RowsetSharedPtr rowset = rowsets[i];
1220
547
        if (rowset->start_version() != prev_rowset->end_version() + 1) {
1221
0
            return Status::Error<CUMULATIVE_MISS_VERSION>(
1222
0
                    "There are missed versions among rowsets. prev_rowset version={}-{}, rowset "
1223
0
                    "version={}-{}",
1224
0
                    prev_rowset->start_version(), prev_rowset->end_version(),
1225
0
                    rowset->start_version(), rowset->end_version());
1226
0
        }
1227
547
        prev_rowset = rowset;
1228
547
    }
1229
1230
247
    return Status::OK();
1231
247
}
1232
1233
799
Status Compaction::check_correctness(const Merger::Statistics& stats) {
1234
    // 1. check row number
1235
799
    if (_input_row_num != _output_rowset->num_rows() + stats.merged_rows + stats.filtered_rows) {
1236
0
        return Status::Error<CHECK_LINES_ERROR>(
1237
0
                "row_num does not match between cumulative input and output! tablet={}, "
1238
0
                "input_row_num={}, merged_row_num={}, filtered_row_num={}, output_row_num={}",
1239
0
                _tablet->tablet_id(), _input_row_num, stats.merged_rows, stats.filtered_rows,
1240
0
                _output_rowset->num_rows());
1241
0
    }
1242
799
    return Status::OK();
1243
799
}
1244
1245
1.36k
int64_t Compaction::get_compaction_permits() {
1246
1.36k
    int64_t permits = 0;
1247
18.2k
    for (auto rowset : _input_rowsets) {
1248
18.2k
        permits += rowset->rowset_meta()->get_compaction_score();
1249
18.2k
    }
1250
1.36k
    return permits;
1251
1.36k
}
1252
1253
1.36k
void Compaction::_load_segment_to_cache() {
1254
    // Load new rowset's segments to cache.
1255
1.36k
    SegmentCacheHandle handle;
1256
1.36k
    auto st = SegmentLoader::instance()->load_segments(
1257
1.36k
            std::static_pointer_cast<BetaRowset>(_output_rowset), &handle, true);
1258
1.36k
    if (!st.ok()) {
1259
0
        LOG(WARNING) << "failed to load segment to cache! output rowset version="
1260
0
                     << _output_rowset->start_version() << "-" << _output_rowset->end_version()
1261
0
                     << ".";
1262
0
    }
1263
1.36k
}
1264
1265
#ifdef BE_TEST
1266
void Compaction::set_input_rowset(const std::vector<RowsetSharedPtr>& rowsets) {
1267
    _input_rowsets = rowsets;
1268
}
1269
1270
RowsetSharedPtr Compaction::output_rowset() {
1271
    return _output_rowset;
1272
}
1273
#endif
1274
1275
} // namespace doris