Coverage Report

Created: 2026-04-01 15:23

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/storage/olap_server.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include <gen_cpp/Types_types.h>
19
#include <gen_cpp/olap_file.pb.h>
20
#include <glog/logging.h>
21
#include <rapidjson/prettywriter.h>
22
#include <rapidjson/stringbuffer.h>
23
#include <stdint.h>
24
#include <sys/types.h>
25
26
#include <algorithm>
27
#include <atomic>
28
// IWYU pragma: no_include <bits/chrono.h>
29
#include <gen_cpp/FrontendService.h>
30
#include <gen_cpp/internal_service.pb.h>
31
32
#include <chrono> // IWYU pragma: keep
33
#include <cmath>
34
#include <condition_variable>
35
#include <cstdint>
36
#include <ctime>
37
#include <functional>
38
#include <map>
39
#include <memory>
40
#include <mutex>
41
#include <ostream>
42
#include <random>
43
#include <shared_mutex>
44
#include <string>
45
#include <thread>
46
#include <unordered_set>
47
#include <utility>
48
#include <vector>
49
50
#include "agent/utils.h"
51
#include "common/config.h"
52
#include "common/logging.h"
53
#include "common/metrics/doris_metrics.h"
54
#include "common/metrics/metrics.h"
55
#include "common/status.h"
56
#include "cpp/sync_point.h"
57
#include "io/fs/file_writer.h" // IWYU pragma: keep
58
#include "io/fs/path.h"
59
#include "load/memtable/memtable_flush_executor.h"
60
#include "runtime/memory/cache_manager.h"
61
#include "runtime/memory/global_memory_arbitrator.h"
62
#include "storage/compaction/cold_data_compaction.h"
63
#include "storage/compaction/compaction_permit_limiter.h"
64
#include "storage/compaction/cumulative_compaction.h"
65
#include "storage/compaction/cumulative_compaction_policy.h"
66
#include "storage/compaction/cumulative_compaction_time_series_policy.h"
67
#include "storage/compaction/single_replica_compaction.h"
68
#include "storage/data_dir.h"
69
#include "storage/olap_common.h"
70
#include "storage/olap_define.h"
71
#include "storage/rowset/segcompaction.h"
72
#include "storage/schema_change/schema_change.h"
73
#include "storage/storage_engine.h"
74
#include "storage/storage_policy.h"
75
#include "storage/tablet/base_tablet.h"
76
#include "storage/tablet/tablet.h"
77
#include "storage/tablet/tablet_manager.h"
78
#include "storage/tablet/tablet_meta.h"
79
#include "storage/tablet/tablet_meta_manager.h"
80
#include "storage/tablet/tablet_schema.h"
81
#include "storage/task/engine_publish_version_task.h"
82
#include "storage/task/index_builder.h"
83
#include "util/client_cache.h"
84
#include "util/countdown_latch.h"
85
#include "util/debug_points.h"
86
#include "util/mem_info.h"
87
#include "util/thread.h"
88
#include "util/threadpool.h"
89
#include "util/thrift_rpc_helper.h"
90
#include "util/time.h"
91
#include "util/uid_util.h"
92
#include "util/work_thread_pool.hpp"
93
94
using std::string;
95
96
namespace doris {
97
#include "common/compile_check_begin.h"
98
using io::Path;
99
100
// number of running SCHEMA-CHANGE threads
101
volatile uint32_t g_schema_change_active_threads = 0;
102
bvar::Status<int64_t> g_cumu_compaction_task_num_per_round("cumu_compaction_task_num_per_round", 0);
103
bvar::Status<int64_t> g_base_compaction_task_num_per_round("base_compaction_task_num_per_round", 0);
104
105
static const uint64_t DEFAULT_SEED = 104729;
106
static const uint64_t MOD_PRIME = 7652413;
107
108
0
CompactionSubmitRegistry::CompactionSubmitRegistry(CompactionSubmitRegistry&& r) {
109
0
    std::swap(_tablet_submitted_cumu_compaction, r._tablet_submitted_cumu_compaction);
110
0
    std::swap(_tablet_submitted_base_compaction, r._tablet_submitted_base_compaction);
111
0
    std::swap(_tablet_submitted_full_compaction, r._tablet_submitted_full_compaction);
112
0
}
113
114
0
CompactionSubmitRegistry CompactionSubmitRegistry::create_snapshot() {
115
    // full compaction is not engaged in this method
116
0
    std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex);
117
0
    CompactionSubmitRegistry registry;
118
0
    registry._tablet_submitted_base_compaction = _tablet_submitted_base_compaction;
119
0
    registry._tablet_submitted_cumu_compaction = _tablet_submitted_cumu_compaction;
120
0
    return registry;
121
0
}
122
123
7
void CompactionSubmitRegistry::reset(const std::vector<DataDir*>& stores) {
124
    // full compaction is not engaged in this method
125
7
    for (const auto& store : stores) {
126
0
        _tablet_submitted_cumu_compaction[store] = {};
127
0
        _tablet_submitted_base_compaction[store] = {};
128
0
    }
129
7
}
130
131
uint32_t CompactionSubmitRegistry::count_executing_compaction(DataDir* dir,
132
2
                                                              CompactionType compaction_type) {
133
    // non-lock, used in snapshot
134
2
    const auto& compaction_tasks = _get_tablet_set(dir, compaction_type);
135
2
    return cast_set<uint32_t>(std::count_if(
136
2
            compaction_tasks.begin(), compaction_tasks.end(),
137
10
            [](const auto& task) { return task->compaction_stage == CompactionStage::EXECUTING; }));
138
2
}
139
140
1
uint32_t CompactionSubmitRegistry::count_executing_cumu_and_base(DataDir* dir) {
141
    // non-lock, used in snapshot
142
1
    return count_executing_compaction(dir, CompactionType::BASE_COMPACTION) +
143
1
           count_executing_compaction(dir, CompactionType::CUMULATIVE_COMPACTION);
144
1
}
145
146
0
bool CompactionSubmitRegistry::has_compaction_task(DataDir* dir, CompactionType compaction_type) {
147
    // non-lock, used in snapshot
148
0
    return !_get_tablet_set(dir, compaction_type).empty();
149
0
}
150
151
std::vector<TabletSharedPtr> CompactionSubmitRegistry::pick_topn_tablets_for_compaction(
152
        TabletManager* tablet_mgr, DataDir* data_dir, CompactionType compaction_type,
153
0
        const CumuCompactionPolicyTable& cumu_compaction_policies, uint32_t* disk_max_score) {
154
    // non-lock, used in snapshot
155
0
    return tablet_mgr->find_best_tablets_to_compaction(compaction_type, data_dir,
156
0
                                                       _get_tablet_set(data_dir, compaction_type),
157
0
                                                       disk_max_score, cumu_compaction_policies);
158
0
}
159
160
21
bool CompactionSubmitRegistry::insert(TabletSharedPtr tablet, CompactionType compaction_type) {
161
21
    std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex);
162
21
    auto& tablet_set = _get_tablet_set(tablet->data_dir(), compaction_type);
163
21
    bool already_exist = !(tablet_set.insert(tablet).second);
164
21
    return already_exist;
165
21
}
166
167
void CompactionSubmitRegistry::remove(TabletSharedPtr tablet, CompactionType compaction_type,
168
7
                                      std::function<void()> wakeup_cb) {
169
7
    std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex);
170
7
    auto& tablet_set = _get_tablet_set(tablet->data_dir(), compaction_type);
171
7
    size_t removed = tablet_set.erase(tablet);
172
7
    if (removed == 1) {
173
7
        wakeup_cb();
174
7
    }
175
7
}
176
177
CompactionSubmitRegistry::TabletSet& CompactionSubmitRegistry::_get_tablet_set(
178
30
        DataDir* dir, CompactionType compaction_type) {
179
30
    switch (compaction_type) {
180
1
    case CompactionType::BASE_COMPACTION:
181
1
        return _tablet_submitted_base_compaction[dir];
182
29
    case CompactionType::CUMULATIVE_COMPACTION:
183
29
        return _tablet_submitted_cumu_compaction[dir];
184
0
    case CompactionType::FULL_COMPACTION:
185
0
        return _tablet_submitted_full_compaction[dir];
186
0
    default:
187
0
        CHECK(false) << "invalid compaction type";
188
30
    }
189
30
}
190
191
0
static int32_t get_cumu_compaction_threads_num(size_t data_dirs_num) {
192
0
    int32_t threads_num = config::max_cumu_compaction_threads;
193
0
    if (threads_num == -1) {
194
0
        int32_t num_cores = doris::CpuInfo::num_cores();
195
0
        threads_num = std::max(cast_set<int32_t>(data_dirs_num), num_cores / 6);
196
0
    }
197
0
    threads_num = threads_num <= 0 ? 1 : threads_num;
198
0
    return threads_num;
199
0
}
200
201
0
static int32_t get_base_compaction_threads_num(size_t data_dirs_num) {
202
0
    int32_t threads_num = config::max_base_compaction_threads;
203
0
    if (threads_num == -1) {
204
0
        threads_num = cast_set<int32_t>(data_dirs_num);
205
0
    }
206
0
    threads_num = threads_num <= 0 ? 1 : threads_num;
207
0
    return threads_num;
208
0
}
209
210
0
static int32_t get_single_replica_compaction_threads_num(size_t data_dirs_num) {
211
0
    int32_t threads_num = config::max_single_replica_compaction_threads;
212
0
    if (threads_num == -1) {
213
0
        threads_num = cast_set<int32_t>(data_dirs_num);
214
0
    }
215
0
    threads_num = threads_num <= 0 ? 1 : threads_num;
216
0
    return threads_num;
217
0
}
218
219
0
Status StorageEngine::start_bg_threads(std::shared_ptr<WorkloadGroup> wg_sptr) {
220
0
    RETURN_IF_ERROR(Thread::create(
221
0
            "StorageEngine", "unused_rowset_monitor_thread",
222
0
            [this]() { this->_unused_rowset_monitor_thread_callback(); },
223
0
            &_unused_rowset_monitor_thread));
224
0
    LOG(INFO) << "unused rowset monitor thread started";
225
226
0
    RETURN_IF_ERROR(Thread::create(
227
0
            "StorageEngine", "evict_querying_rowset_thread",
228
0
            [this]() { this->_evict_quring_rowset_thread_callback(); },
229
0
            &_evict_quering_rowset_thread));
230
0
    LOG(INFO) << "evict quering thread started";
231
232
    // start thread for monitoring the snapshot and trash folder
233
0
    RETURN_IF_ERROR(Thread::create(
234
0
            "StorageEngine", "garbage_sweeper_thread",
235
0
            [this]() { this->_garbage_sweeper_thread_callback(); }, &_garbage_sweeper_thread));
236
0
    LOG(INFO) << "garbage sweeper thread started";
237
238
    // start thread for monitoring the tablet with io error
239
0
    RETURN_IF_ERROR(Thread::create(
240
0
            "StorageEngine", "disk_stat_monitor_thread",
241
0
            [this]() { this->_disk_stat_monitor_thread_callback(); }, &_disk_stat_monitor_thread));
242
0
    LOG(INFO) << "disk stat monitor thread started";
243
244
    // convert store map to vector
245
0
    std::vector<DataDir*> data_dirs = get_stores();
246
247
0
    auto base_compaction_threads = get_base_compaction_threads_num(data_dirs.size());
248
0
    auto cumu_compaction_threads = get_cumu_compaction_threads_num(data_dirs.size());
249
0
    auto single_replica_compaction_threads =
250
0
            get_single_replica_compaction_threads_num(data_dirs.size());
251
252
0
    RETURN_IF_ERROR(ThreadPoolBuilder("BaseCompactionTaskThreadPool")
253
0
                            .set_min_threads(base_compaction_threads)
254
0
                            .set_max_threads(base_compaction_threads)
255
0
                            .build(&_base_compaction_thread_pool));
256
0
    RETURN_IF_ERROR(ThreadPoolBuilder("CumuCompactionTaskThreadPool")
257
0
                            .set_min_threads(cumu_compaction_threads)
258
0
                            .set_max_threads(cumu_compaction_threads)
259
0
                            .build(&_cumu_compaction_thread_pool));
260
0
    RETURN_IF_ERROR(ThreadPoolBuilder("SingleReplicaCompactionTaskThreadPool")
261
0
                            .set_min_threads(single_replica_compaction_threads)
262
0
                            .set_max_threads(single_replica_compaction_threads)
263
0
                            .build(&_single_replica_compaction_thread_pool));
264
265
0
    if (config::enable_segcompaction) {
266
0
        RETURN_IF_ERROR(ThreadPoolBuilder("SegCompactionTaskThreadPool")
267
0
                                .set_min_threads(config::segcompaction_num_threads)
268
0
                                .set_max_threads(config::segcompaction_num_threads)
269
0
                                .build(&_seg_compaction_thread_pool));
270
0
    }
271
0
    RETURN_IF_ERROR(ThreadPoolBuilder("ColdDataCompactionTaskThreadPool")
272
0
                            .set_min_threads(config::cold_data_compaction_thread_num)
273
0
                            .set_max_threads(config::cold_data_compaction_thread_num)
274
0
                            .build(&_cold_data_compaction_thread_pool));
275
276
    // compaction tasks producer thread
277
0
    RETURN_IF_ERROR(Thread::create(
278
0
            "StorageEngine", "compaction_tasks_producer_thread",
279
0
            [this]() { this->_compaction_tasks_producer_callback(); },
280
0
            &_compaction_tasks_producer_thread));
281
0
    LOG(INFO) << "compaction tasks producer thread started";
282
283
0
    RETURN_IF_ERROR(Thread::create(
284
0
            "StorageEngine", "_update_replica_infos_thread",
285
0
            [this]() { this->_update_replica_infos_callback(); }, &_update_replica_infos_thread));
286
0
    LOG(INFO) << "tablet replicas info update thread started";
287
288
0
    int32_t max_checkpoint_thread_num = config::max_meta_checkpoint_threads;
289
0
    if (max_checkpoint_thread_num < 0) {
290
0
        max_checkpoint_thread_num = cast_set<int32_t>(data_dirs.size());
291
0
    }
292
0
    RETURN_IF_ERROR(ThreadPoolBuilder("TabletMetaCheckpointTaskThreadPool")
293
0
                            .set_max_threads(max_checkpoint_thread_num)
294
0
                            .build(&_tablet_meta_checkpoint_thread_pool));
295
296
0
    RETURN_IF_ERROR(Thread::create(
297
0
            "StorageEngine", "tablet_checkpoint_tasks_producer_thread",
298
0
            [this, data_dirs]() { this->_tablet_checkpoint_callback(data_dirs); },
299
0
            &_tablet_checkpoint_tasks_producer_thread));
300
0
    LOG(INFO) << "tablet checkpoint tasks producer thread started";
301
302
0
    RETURN_IF_ERROR(Thread::create(
303
0
            "StorageEngine", "tablet_path_check_thread",
304
0
            [this]() { this->_tablet_path_check_callback(); }, &_tablet_path_check_thread));
305
0
    LOG(INFO) << "tablet path check thread started";
306
307
    // path scan and gc thread
308
0
    if (config::path_gc_check) {
309
0
        for (auto data_dir : get_stores()) {
310
0
            std::shared_ptr<Thread> path_gc_thread;
311
0
            RETURN_IF_ERROR(Thread::create(
312
0
                    "StorageEngine", "path_gc_thread",
313
0
                    [this, data_dir]() { this->_path_gc_thread_callback(data_dir); },
314
0
                    &path_gc_thread));
315
0
            _path_gc_threads.emplace_back(path_gc_thread);
316
0
        }
317
0
        LOG(INFO) << "path gc threads started. number:" << get_stores().size();
318
0
    }
319
320
0
    RETURN_IF_ERROR(ThreadPoolBuilder("CooldownTaskThreadPool")
321
0
                            .set_min_threads(config::cooldown_thread_num)
322
0
                            .set_max_threads(config::cooldown_thread_num)
323
0
                            .build(&_cooldown_thread_pool));
324
0
    LOG(INFO) << "cooldown thread pool started";
325
326
0
    RETURN_IF_ERROR(Thread::create(
327
0
            "StorageEngine", "cooldown_tasks_producer_thread",
328
0
            [this]() { this->_cooldown_tasks_producer_callback(); },
329
0
            &_cooldown_tasks_producer_thread));
330
0
    LOG(INFO) << "cooldown tasks producer thread started";
331
332
0
    RETURN_IF_ERROR(Thread::create(
333
0
            "StorageEngine", "remove_unused_remote_files_thread",
334
0
            [this]() { this->_remove_unused_remote_files_callback(); },
335
0
            &_remove_unused_remote_files_thread));
336
0
    LOG(INFO) << "remove unused remote files thread started";
337
338
0
    RETURN_IF_ERROR(Thread::create(
339
0
            "StorageEngine", "cold_data_compaction_producer_thread",
340
0
            [this]() { this->_cold_data_compaction_producer_callback(); },
341
0
            &_cold_data_compaction_producer_thread));
342
0
    LOG(INFO) << "cold data compaction producer thread started";
343
344
    // add tablet publish version thread pool
345
0
    RETURN_IF_ERROR(ThreadPoolBuilder("TabletPublishTxnThreadPool")
346
0
                            .set_min_threads(config::tablet_publish_txn_max_thread)
347
0
                            .set_max_threads(config::tablet_publish_txn_max_thread)
348
0
                            .build(&_tablet_publish_txn_thread_pool));
349
350
0
    RETURN_IF_ERROR(Thread::create(
351
0
            "StorageEngine", "async_publish_version_thread",
352
0
            [this]() { this->_async_publish_callback(); }, &_async_publish_thread));
353
0
    LOG(INFO) << "async publish thread started";
354
355
0
    RETURN_IF_ERROR(Thread::create(
356
0
            "StorageEngine", "check_tablet_delete_bitmap_score_thread",
357
0
            [this]() { this->_check_tablet_delete_bitmap_score_callback(); },
358
0
            &_check_delete_bitmap_score_thread));
359
0
    LOG(INFO) << "check tablet delete bitmap score thread started";
360
361
0
    _start_adaptive_thread_controller();
362
363
0
    LOG(INFO) << "all storage engine's background threads are started.";
364
0
    return Status::OK();
365
0
}
366
367
0
void StorageEngine::_garbage_sweeper_thread_callback() {
368
0
    uint32_t max_interval = config::max_garbage_sweep_interval;
369
0
    uint32_t min_interval = config::min_garbage_sweep_interval;
370
371
0
    if (max_interval < min_interval || min_interval <= 0) {
372
0
        LOG(WARNING) << "garbage sweep interval config is illegal: [max=" << max_interval
373
0
                     << " min=" << min_interval << "].";
374
0
        min_interval = 1;
375
0
        max_interval = max_interval >= min_interval ? max_interval : min_interval;
376
0
        LOG(INFO) << "force reset garbage sweep interval. "
377
0
                  << "max_interval=" << max_interval << ", min_interval=" << min_interval;
378
0
    }
379
380
0
    const double pi = M_PI;
381
0
    double usage = 1.0;
382
    // After the program starts, the first round of cleaning starts after min_interval.
383
0
    uint32_t curr_interval = min_interval;
384
0
    do {
385
        // Function properties:
386
        // when usage < 0.6,          ratio close to 1.(interval close to max_interval)
387
        // when usage at [0.6, 0.75], ratio is rapidly decreasing from 0.87 to 0.27.
388
        // when usage > 0.75,         ratio is slowly decreasing.
389
        // when usage > 0.8,          ratio close to min_interval.
390
        // when usage = 0.88,         ratio is approximately 0.0057.
391
0
        double ratio = (1.1 * (pi / 2 - std::atan(usage * 100 / 5 - 14)) - 0.28) / pi;
392
0
        ratio = ratio > 0 ? ratio : 0;
393
        // TODO(dx): fix it
394
0
        auto curr_interval_not_work = uint32_t(max_interval * ratio);
395
0
        curr_interval_not_work = std::max(curr_interval_not_work, min_interval);
396
0
        curr_interval_not_work = std::min(curr_interval_not_work, max_interval);
397
398
        // start clean trash and update usage.
399
0
        Status res = start_trash_sweep(&usage);
400
0
        if (res.ok() && _need_clean_trash.exchange(false, std::memory_order_relaxed)) {
401
0
            res = start_trash_sweep(&usage, true);
402
0
        }
403
404
0
        if (!res.ok()) {
405
0
            LOG(WARNING) << "one or more errors occur when sweep trash."
406
0
                         << "see previous message for detail. err code=" << res;
407
            // do nothing. continue next loop.
408
0
        }
409
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(curr_interval)));
410
0
}
411
412
0
void StorageEngine::_disk_stat_monitor_thread_callback() {
413
0
    int32_t interval = config::disk_stat_monitor_interval;
414
0
    do {
415
0
        _start_disk_stat_monitor();
416
417
0
        interval = config::disk_stat_monitor_interval;
418
0
        if (interval <= 0) {
419
0
            LOG(WARNING) << "disk_stat_monitor_interval config is illegal: " << interval
420
0
                         << ", force set to 1";
421
0
            interval = 1;
422
0
        }
423
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
424
0
}
425
426
0
void StorageEngine::_unused_rowset_monitor_thread_callback() {
427
0
    int32_t interval = config::unused_rowset_monitor_interval;
428
0
    do {
429
0
        start_delete_unused_rowset();
430
431
0
        interval = config::unused_rowset_monitor_interval;
432
0
        if (interval <= 0) {
433
0
            LOG(WARNING) << "unused_rowset_monitor_interval config is illegal: " << interval
434
0
                         << ", force set to 1";
435
0
            interval = 1;
436
0
        }
437
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
438
0
}
439
440
0
int32_t StorageEngine::_auto_get_interval_by_disk_capacity(DataDir* data_dir) {
441
0
    double disk_used = data_dir->get_usage(0);
442
0
    double remain_used = 1 - disk_used;
443
0
    DCHECK(remain_used >= 0 && remain_used <= 1);
444
0
    DCHECK(config::path_gc_check_interval_second >= 0);
445
0
    int32_t ret = 0;
446
0
    if (remain_used > 0.9) {
447
        // if config::path_gc_check_interval_second == 24h
448
0
        ret = config::path_gc_check_interval_second;
449
0
    } else if (remain_used > 0.7) {
450
        // 12h
451
0
        ret = config::path_gc_check_interval_second / 2;
452
0
    } else if (remain_used > 0.5) {
453
        // 6h
454
0
        ret = config::path_gc_check_interval_second / 4;
455
0
    } else if (remain_used > 0.3) {
456
        // 4h
457
0
        ret = config::path_gc_check_interval_second / 6;
458
0
    } else {
459
        // 3h
460
0
        ret = config::path_gc_check_interval_second / 8;
461
0
    }
462
0
    return ret;
463
0
}
464
465
0
void StorageEngine::_path_gc_thread_callback(DataDir* data_dir) {
466
0
    LOG(INFO) << "try to start path gc thread!";
467
0
    time_t last_exec_time = 0;
468
0
    do {
469
0
        time_t current_time = time(nullptr);
470
471
0
        int32_t interval = _auto_get_interval_by_disk_capacity(data_dir);
472
0
        DBUG_EXECUTE_IF("_path_gc_thread_callback.interval.eq.1ms", {
473
0
            LOG(INFO) << "debug point change interval eq 1ms";
474
0
            interval = 1;
475
0
            while (DebugPoints::instance()->is_enable("_path_gc_thread_callback.always.do")) {
476
0
                data_dir->perform_path_gc();
477
0
                std::this_thread::sleep_for(std::chrono::milliseconds(10));
478
0
            }
479
0
        });
480
0
        if (interval <= 0) {
481
0
            LOG(WARNING) << "path gc thread check interval config is illegal:" << interval
482
0
                         << " will be forced set to half hour";
483
0
            interval = 1800; // 0.5 hour
484
0
        }
485
0
        if (current_time - last_exec_time >= interval) {
486
0
            LOG(INFO) << "try to perform path gc! disk remain [" << 1 - data_dir->get_usage(0)
487
0
                      << "] internal [" << interval << "]";
488
0
            data_dir->perform_path_gc();
489
0
            last_exec_time = time(nullptr);
490
0
        }
491
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(5)));
492
0
    LOG(INFO) << "stop path gc thread!";
493
0
}
494
495
0
void StorageEngine::_tablet_checkpoint_callback(const std::vector<DataDir*>& data_dirs) {
496
0
    int64_t interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs;
497
0
    do {
498
0
        for (auto data_dir : data_dirs) {
499
0
            LOG(INFO) << "begin to produce tablet meta checkpoint tasks, data_dir="
500
0
                      << data_dir->path();
501
0
            auto st = _tablet_meta_checkpoint_thread_pool->submit_func(
502
0
                    [data_dir, this]() { _tablet_manager->do_tablet_meta_checkpoint(data_dir); });
503
0
            if (!st.ok()) {
504
0
                LOG(WARNING) << "submit tablet checkpoint tasks failed.";
505
0
            }
506
0
        }
507
0
        interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs;
508
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
509
0
}
510
511
0
void StorageEngine::_tablet_path_check_callback() {
512
0
    struct TabletIdComparator {
513
0
        bool operator()(Tablet* a, Tablet* b) { return a->tablet_id() < b->tablet_id(); }
514
0
    };
515
516
0
    using TabletQueue = std::priority_queue<Tablet*, std::vector<Tablet*>, TabletIdComparator>;
517
518
0
    int64_t interval = config::tablet_path_check_interval_seconds;
519
0
    if (interval <= 0) {
520
0
        return;
521
0
    }
522
523
0
    int64_t last_tablet_id = 0;
524
0
    do {
525
0
        int32_t batch_size = config::tablet_path_check_batch_size;
526
0
        if (batch_size <= 0) {
527
0
            if (_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))) {
528
0
                break;
529
0
            }
530
0
            continue;
531
0
        }
532
533
0
        LOG(INFO) << "start to check tablet path";
534
535
0
        auto all_tablets = _tablet_manager->get_all_tablet(
536
0
                [](Tablet* t) { return t->is_used() && t->tablet_state() == TABLET_RUNNING; });
537
538
0
        TabletQueue big_id_tablets;
539
0
        TabletQueue small_id_tablets;
540
0
        for (auto tablet : all_tablets) {
541
0
            auto tablet_id = tablet->tablet_id();
542
0
            TabletQueue* belong_tablets = nullptr;
543
0
            if (tablet_id > last_tablet_id) {
544
0
                if (big_id_tablets.size() < batch_size ||
545
0
                    big_id_tablets.top()->tablet_id() > tablet_id) {
546
0
                    belong_tablets = &big_id_tablets;
547
0
                }
548
0
            } else if (big_id_tablets.size() < batch_size) {
549
0
                if (small_id_tablets.size() < batch_size ||
550
0
                    small_id_tablets.top()->tablet_id() > tablet_id) {
551
0
                    belong_tablets = &small_id_tablets;
552
0
                }
553
0
            }
554
0
            if (belong_tablets != nullptr) {
555
0
                belong_tablets->push(tablet.get());
556
0
                if (belong_tablets->size() > batch_size) {
557
0
                    belong_tablets->pop();
558
0
                }
559
0
            }
560
0
        }
561
562
0
        int32_t need_small_id_tablet_size =
563
0
                batch_size - static_cast<int32_t>(big_id_tablets.size());
564
565
0
        if (!big_id_tablets.empty()) {
566
0
            last_tablet_id = big_id_tablets.top()->tablet_id();
567
0
        }
568
0
        while (!big_id_tablets.empty()) {
569
0
            big_id_tablets.top()->check_tablet_path_exists();
570
0
            big_id_tablets.pop();
571
0
        }
572
573
0
        if (!small_id_tablets.empty() && need_small_id_tablet_size > 0) {
574
0
            while (static_cast<int32_t>(small_id_tablets.size()) > need_small_id_tablet_size) {
575
0
                small_id_tablets.pop();
576
0
            }
577
578
0
            last_tablet_id = small_id_tablets.top()->tablet_id();
579
0
            while (!small_id_tablets.empty()) {
580
0
                small_id_tablets.top()->check_tablet_path_exists();
581
0
                small_id_tablets.pop();
582
0
            }
583
0
        }
584
585
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
586
0
}
587
588
6
void StorageEngine::_adjust_compaction_thread_num() {
589
6
    TEST_SYNC_POINT_RETURN_WITH_VOID("StorageEngine::_adjust_compaction_thread_num.return_void");
590
0
    auto base_compaction_threads_num = get_base_compaction_threads_num(_store_map.size());
591
0
    if (_base_compaction_thread_pool->max_threads() != base_compaction_threads_num) {
592
0
        int old_max_threads = _base_compaction_thread_pool->max_threads();
593
0
        Status status = _base_compaction_thread_pool->set_max_threads(base_compaction_threads_num);
594
0
        if (status.ok()) {
595
0
            VLOG_NOTICE << "update base compaction thread pool max_threads from " << old_max_threads
596
0
                        << " to " << base_compaction_threads_num;
597
0
        }
598
0
    }
599
0
    if (_base_compaction_thread_pool->min_threads() != base_compaction_threads_num) {
600
0
        int old_min_threads = _base_compaction_thread_pool->min_threads();
601
0
        Status status = _base_compaction_thread_pool->set_min_threads(base_compaction_threads_num);
602
0
        if (status.ok()) {
603
0
            VLOG_NOTICE << "update base compaction thread pool min_threads from " << old_min_threads
604
0
                        << " to " << base_compaction_threads_num;
605
0
        }
606
0
    }
607
608
0
    auto cumu_compaction_threads_num = get_cumu_compaction_threads_num(_store_map.size());
609
0
    if (_cumu_compaction_thread_pool->max_threads() != cumu_compaction_threads_num) {
610
0
        int old_max_threads = _cumu_compaction_thread_pool->max_threads();
611
0
        Status status = _cumu_compaction_thread_pool->set_max_threads(cumu_compaction_threads_num);
612
0
        if (status.ok()) {
613
0
            VLOG_NOTICE << "update cumu compaction thread pool max_threads from " << old_max_threads
614
0
                        << " to " << cumu_compaction_threads_num;
615
0
        }
616
0
    }
617
0
    if (_cumu_compaction_thread_pool->min_threads() != cumu_compaction_threads_num) {
618
0
        int old_min_threads = _cumu_compaction_thread_pool->min_threads();
619
0
        Status status = _cumu_compaction_thread_pool->set_min_threads(cumu_compaction_threads_num);
620
0
        if (status.ok()) {
621
0
            VLOG_NOTICE << "update cumu compaction thread pool min_threads from " << old_min_threads
622
0
                        << " to " << cumu_compaction_threads_num;
623
0
        }
624
0
    }
625
626
0
    auto single_replica_compaction_threads_num =
627
0
            get_single_replica_compaction_threads_num(_store_map.size());
628
0
    if (_single_replica_compaction_thread_pool->max_threads() !=
629
0
        single_replica_compaction_threads_num) {
630
0
        int old_max_threads = _single_replica_compaction_thread_pool->max_threads();
631
0
        Status status = _single_replica_compaction_thread_pool->set_max_threads(
632
0
                single_replica_compaction_threads_num);
633
0
        if (status.ok()) {
634
0
            VLOG_NOTICE << "update single replica compaction thread pool max_threads from "
635
0
                        << old_max_threads << " to " << single_replica_compaction_threads_num;
636
0
        }
637
0
    }
638
0
    if (_single_replica_compaction_thread_pool->min_threads() !=
639
0
        single_replica_compaction_threads_num) {
640
0
        int old_min_threads = _single_replica_compaction_thread_pool->min_threads();
641
0
        Status status = _single_replica_compaction_thread_pool->set_min_threads(
642
0
                single_replica_compaction_threads_num);
643
0
        if (status.ok()) {
644
0
            VLOG_NOTICE << "update single replica compaction thread pool min_threads from "
645
0
                        << old_min_threads << " to " << single_replica_compaction_threads_num;
646
0
        }
647
0
    }
648
0
}
649
650
7
void StorageEngine::_compaction_tasks_producer_callback() {
651
7
    LOG(INFO) << "try to start compaction producer process!";
652
653
7
    std::vector<DataDir*> data_dirs = get_stores();
654
7
    _compaction_submit_registry.reset(data_dirs);
655
656
7
    int round = 0;
657
7
    CompactionType compaction_type;
658
659
    // Used to record the time when the score metric was last updated.
660
    // The update of the score metric is accompanied by the logic of selecting the tablet.
661
    // If there is no slot available, the logic of selecting the tablet will be terminated,
662
    // which causes the score metric update to be terminated.
663
    // In order to avoid this situation, we need to update the score regularly.
664
7
    int64_t last_cumulative_score_update_time = 0;
665
7
    int64_t last_base_score_update_time = 0;
666
7
    static const int64_t check_score_interval_ms = 5000; // 5 secs
667
668
7
    int64_t interval = config::generate_compaction_tasks_interval_ms;
669
7
    do {
670
7
        int64_t cur_time = UnixMillis();
671
7
        if (!config::disable_auto_compaction &&
672
7
            (!config::enable_compaction_pause_on_high_memory ||
673
6
             !GlobalMemoryArbitrator::is_exceed_soft_mem_limit(GB_EXCHANGE_BYTE))) {
674
6
            _adjust_compaction_thread_num();
675
676
6
            bool check_score = false;
677
6
            if (round < config::cumulative_compaction_rounds_for_each_base_compaction_round) {
678
6
                compaction_type = CompactionType::CUMULATIVE_COMPACTION;
679
6
                round++;
680
6
                if (cur_time - last_cumulative_score_update_time >= check_score_interval_ms) {
681
6
                    check_score = true;
682
6
                    last_cumulative_score_update_time = cur_time;
683
6
                }
684
6
            } else {
685
0
                compaction_type = CompactionType::BASE_COMPACTION;
686
0
                round = 0;
687
0
                if (cur_time - last_base_score_update_time >= check_score_interval_ms) {
688
0
                    check_score = true;
689
0
                    last_base_score_update_time = cur_time;
690
0
                }
691
0
            }
692
6
            std::unique_ptr<ThreadPool>& thread_pool =
693
6
                    (compaction_type == CompactionType::CUMULATIVE_COMPACTION)
694
6
                            ? _cumu_compaction_thread_pool
695
6
                            : _base_compaction_thread_pool;
696
6
            bvar::Status<int64_t>& g_compaction_task_num_per_round =
697
6
                    (compaction_type == CompactionType::CUMULATIVE_COMPACTION)
698
6
                            ? g_cumu_compaction_task_num_per_round
699
6
                            : g_base_compaction_task_num_per_round;
700
6
            if (config::compaction_num_per_round != -1) {
701
0
                _compaction_num_per_round = config::compaction_num_per_round;
702
6
            } else if (thread_pool->get_queue_size() == 0) {
703
                // If all tasks in the thread pool queue are executed,
704
                // double the number of tasks generated each time,
705
                // with a maximum of config::max_automatic_compaction_num_per_round tasks per generation.
706
2
                if (_compaction_num_per_round < config::max_automatic_compaction_num_per_round) {
707
1
                    _compaction_num_per_round *= 2;
708
1
                    g_compaction_task_num_per_round.set_value(_compaction_num_per_round);
709
1
                }
710
4
            } else if (thread_pool->get_queue_size() > _compaction_num_per_round / 2) {
711
                // If all tasks in the thread pool is greater than
712
                // half of the tasks submitted in the previous round,
713
                // reduce the number of tasks generated each time by half, with a minimum of 1.
714
3
                if (_compaction_num_per_round > 1) {
715
1
                    _compaction_num_per_round /= 2;
716
1
                    g_compaction_task_num_per_round.set_value(_compaction_num_per_round);
717
1
                }
718
3
            }
719
6
            std::vector<TabletSharedPtr> tablets_compaction =
720
6
                    _generate_compaction_tasks(compaction_type, data_dirs, check_score);
721
6
            if (tablets_compaction.size() == 0) {
722
6
                std::unique_lock<std::mutex> lock(_compaction_producer_sleep_mutex);
723
6
                _wakeup_producer_flag = 0;
724
                // It is necessary to wake up the thread on timeout to prevent deadlock
725
                // in case of no running compaction task.
726
6
                _compaction_producer_sleep_cv.wait_for(
727
6
                        lock, std::chrono::milliseconds(2000),
728
12
                        [this] { return _wakeup_producer_flag == 1; });
729
6
                continue;
730
6
            }
731
732
0
            for (const auto& tablet : tablets_compaction) {
733
0
                if (compaction_type == CompactionType::BASE_COMPACTION) {
734
0
                    tablet->set_last_base_compaction_schedule_time(UnixMillis());
735
0
                } else if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) {
736
0
                    tablet->set_last_cumu_compaction_schedule_time(UnixMillis());
737
0
                } else if (compaction_type == CompactionType::FULL_COMPACTION) {
738
0
                    tablet->set_last_full_compaction_schedule_time(UnixMillis());
739
0
                }
740
0
                Status st = _submit_compaction_task(tablet, compaction_type, false);
741
0
                if (!st.ok()) {
742
0
                    LOG(WARNING) << "failed to submit compaction task for tablet: "
743
0
                                 << tablet->tablet_id() << ", err: " << st;
744
0
                }
745
0
            }
746
0
            interval = config::generate_compaction_tasks_interval_ms;
747
1
        } else {
748
1
            interval = 5000; // 5s to check disable_auto_compaction
749
1
        }
750
751
        // wait some seconds for ut test
752
1
        {
753
1
            std ::vector<std ::any> args {};
754
1
            args.emplace_back(1);
755
1
            doris ::SyncPoint ::get_instance()->process(
756
1
                    "StorageEngine::_compaction_tasks_producer_callback", std ::move(args));
757
1
        }
758
1
        int64_t end_time = UnixMillis();
759
1
        DorisMetrics::instance()->compaction_producer_callback_a_round_time->set_value(end_time -
760
1
                                                                                       cur_time);
761
7
    } while (!_stop_background_threads_latch.wait_for(std::chrono::milliseconds(interval)));
762
7
}
763
764
0
void StorageEngine::_update_replica_infos_callback() {
765
#ifdef GOOGLE_PROFILER
766
    ProfilerRegisterThread();
767
#endif
768
0
    LOG(INFO) << "start to update replica infos!";
769
770
0
    int64_t interval = config::update_replica_infos_interval_seconds;
771
0
    do {
772
0
        auto all_tablets = _tablet_manager->get_all_tablet([](Tablet* t) {
773
0
            return t->is_used() && t->tablet_state() == TABLET_RUNNING &&
774
0
                   !t->tablet_meta()->tablet_schema()->disable_auto_compaction() &&
775
0
                   t->tablet_meta()->tablet_schema()->enable_single_replica_compaction();
776
0
        });
777
0
        ClusterInfo* cluster_info = ExecEnv::GetInstance()->cluster_info();
778
0
        if (cluster_info == nullptr) {
779
0
            LOG(WARNING) << "Have not get FE Master heartbeat yet";
780
0
            std::this_thread::sleep_for(std::chrono::seconds(2));
781
0
            continue;
782
0
        }
783
0
        TNetworkAddress master_addr = cluster_info->master_fe_addr;
784
0
        if (master_addr.hostname == "" || master_addr.port == 0) {
785
0
            LOG(WARNING) << "Have not get FE Master heartbeat yet";
786
0
            std::this_thread::sleep_for(std::chrono::seconds(2));
787
0
            continue;
788
0
        }
789
790
0
        int start = 0;
791
0
        int tablet_size = cast_set<int>(all_tablets.size());
792
        // The while loop may take a long time, we should skip it when stop
793
0
        while (start < tablet_size && _stop_background_threads_latch.count() > 0) {
794
0
            int batch_size = std::min(100, tablet_size - start);
795
0
            int end = start + batch_size;
796
0
            TGetTabletReplicaInfosRequest request;
797
0
            TGetTabletReplicaInfosResult result;
798
0
            for (int i = start; i < end; i++) {
799
0
                request.tablet_ids.emplace_back(all_tablets[i]->tablet_id());
800
0
            }
801
0
            Status rpc_st = ThriftRpcHelper::rpc<FrontendServiceClient>(
802
0
                    master_addr.hostname, master_addr.port,
803
0
                    [&request, &result](FrontendServiceConnection& client) {
804
0
                        client->getTabletReplicaInfos(result, request);
805
0
                    });
806
807
0
            if (!rpc_st.ok()) {
808
0
                LOG(WARNING) << "Failed to get tablet replica infos, encounter rpc failure, "
809
0
                                "tablet start: "
810
0
                             << start << " end: " << end;
811
0
                continue;
812
0
            }
813
814
0
            std::unique_lock<std::mutex> lock(_peer_replica_infos_mutex);
815
0
            for (const auto& it : result.tablet_replica_infos) {
816
0
                auto tablet_id = it.first;
817
0
                auto tablet = _tablet_manager->get_tablet(tablet_id);
818
0
                if (tablet == nullptr) {
819
0
                    VLOG_CRITICAL << "tablet ptr is nullptr";
820
0
                    continue;
821
0
                }
822
823
0
                VLOG_NOTICE << tablet_id << " tablet has " << it.second.size() << " replicas";
824
0
                uint64_t min_modulo = MOD_PRIME;
825
0
                TReplicaInfo peer_replica;
826
0
                for (const auto& replica : it.second) {
827
0
                    int64_t peer_replica_id = replica.replica_id;
828
0
                    uint64_t modulo = HashUtil::hash64(&peer_replica_id, sizeof(peer_replica_id),
829
0
                                                       DEFAULT_SEED) %
830
0
                                      MOD_PRIME;
831
0
                    if (modulo < min_modulo) {
832
0
                        peer_replica = replica;
833
0
                        min_modulo = modulo;
834
0
                    }
835
0
                }
836
0
                VLOG_NOTICE << "tablet " << tablet_id << ", peer replica host is "
837
0
                            << peer_replica.host;
838
0
                _peer_replica_infos[tablet_id] = peer_replica;
839
0
            }
840
0
            _token = result.token;
841
0
            VLOG_NOTICE << "get tablet replica infos from fe, size is " << end - start
842
0
                        << " token = " << result.token;
843
0
            start = end;
844
0
        }
845
0
        interval = config::update_replica_infos_interval_seconds;
846
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
847
0
}
848
849
Status StorageEngine::_submit_single_replica_compaction_task(TabletSharedPtr tablet,
850
0
                                                             CompactionType compaction_type) {
851
    // For single replica compaction, the local version to be merged is determined based on the version fetched from the peer replica.
852
    // Therefore, it is currently not possible to determine whether it should be a base compaction or cumulative compaction.
853
    // As a result, the tablet needs to be pushed to both the _tablet_submitted_cumu_compaction and the _tablet_submitted_base_compaction simultaneously.
854
0
    bool already_exist =
855
0
            _compaction_submit_registry.insert(tablet, CompactionType::CUMULATIVE_COMPACTION);
856
0
    if (already_exist) {
857
0
        return Status::AlreadyExist<false>(
858
0
                "compaction task has already been submitted, tablet_id={}", tablet->tablet_id());
859
0
    }
860
861
0
    already_exist = _compaction_submit_registry.insert(tablet, CompactionType::BASE_COMPACTION);
862
0
    if (already_exist) {
863
0
        _pop_tablet_from_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION);
864
0
        return Status::AlreadyExist<false>(
865
0
                "compaction task has already been submitted, tablet_id={}", tablet->tablet_id());
866
0
    }
867
868
0
    auto compaction = std::make_shared<SingleReplicaCompaction>(*this, tablet, compaction_type);
869
0
    DorisMetrics::instance()->single_compaction_request_total->increment(1);
870
0
    auto st = compaction->prepare_compact();
871
872
0
    auto clean_single_replica_compaction = [tablet, this]() {
873
0
        _pop_tablet_from_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION);
874
0
        _pop_tablet_from_submitted_compaction(tablet, CompactionType::BASE_COMPACTION);
875
0
    };
876
877
0
    if (!st.ok()) {
878
0
        clean_single_replica_compaction();
879
0
        if (!st.is<ErrorCode::CUMULATIVE_NO_SUITABLE_VERSION>()) {
880
0
            LOG(WARNING) << "failed to prepare single replica compaction, tablet_id="
881
0
                         << tablet->tablet_id() << " : " << st;
882
0
            return st;
883
0
        }
884
0
        return Status::OK(); // No suitable version, regard as OK
885
0
    }
886
887
0
    auto submit_st = _single_replica_compaction_thread_pool->submit_func(
888
0
            [tablet, compaction = std::move(compaction),
889
0
             clean_single_replica_compaction]() mutable {
890
0
                tablet->execute_single_replica_compaction(*compaction);
891
0
                clean_single_replica_compaction();
892
0
            });
893
0
    if (!submit_st.ok()) {
894
0
        clean_single_replica_compaction();
895
0
        return Status::InternalError(
896
0
                "failed to submit single replica compaction task to thread pool, "
897
0
                "tablet_id={}",
898
0
                tablet->tablet_id());
899
0
    }
900
0
    return Status::OK();
901
0
}
902
903
void StorageEngine::get_tablet_rowset_versions(const PGetTabletVersionsRequest* request,
904
0
                                               PGetTabletVersionsResponse* response) {
905
0
    TabletSharedPtr tablet = _tablet_manager->get_tablet(request->tablet_id());
906
0
    if (tablet == nullptr) {
907
0
        response->mutable_status()->set_status_code(TStatusCode::CANCELLED);
908
0
        return;
909
0
    }
910
0
    std::vector<Version> local_versions = tablet->get_all_local_versions();
911
0
    for (const auto& local_version : local_versions) {
912
0
        auto version = response->add_versions();
913
0
        version->set_first(local_version.first);
914
0
        version->set_second(local_version.second);
915
0
    }
916
0
    response->mutable_status()->set_status_code(0);
917
0
}
918
919
bool need_generate_compaction_tasks(int task_cnt_per_disk, int thread_per_disk,
920
0
                                    CompactionType compaction_type, bool all_base) {
921
    // We need to reserve at least one Slot for cumulative compaction.
922
    // So when there is only one Slot, we have to judge whether there is a cumulative compaction
923
    // in the current submitted tasks.
924
    // If so, the last Slot can be assigned to Base compaction,
925
    // otherwise, this Slot needs to be reserved for cumulative compaction.
926
0
    if (task_cnt_per_disk >= thread_per_disk) {
927
        // Return if no available slot
928
0
        return false;
929
0
    } else if (task_cnt_per_disk >= thread_per_disk - 1) {
930
        // Only one slot left, check if it can be assigned to base compaction task.
931
0
        if (compaction_type == CompactionType::BASE_COMPACTION) {
932
0
            if (all_base) {
933
0
                return false;
934
0
            }
935
0
        }
936
0
    }
937
0
    return true;
938
0
}
939
940
0
int get_concurrent_per_disk(int max_score, int thread_per_disk) {
941
0
    if (!config::enable_compaction_priority_scheduling) {
942
0
        return thread_per_disk;
943
0
    }
944
945
0
    double load_average = 0;
946
0
    if (DorisMetrics::instance()->system_metrics() != nullptr) {
947
0
        load_average = DorisMetrics::instance()->system_metrics()->get_load_average_1_min();
948
0
    }
949
0
    int num_cores = doris::CpuInfo::num_cores();
950
0
    bool cpu_usage_high = load_average > num_cores * 0.8;
951
952
0
    auto process_memory_usage = doris::GlobalMemoryArbitrator::process_memory_usage();
953
0
    bool memory_usage_high = static_cast<double>(process_memory_usage) >
954
0
                             static_cast<double>(MemInfo::soft_mem_limit()) * 0.8;
955
956
0
    if (max_score <= config::low_priority_compaction_score_threshold &&
957
0
        (cpu_usage_high || memory_usage_high)) {
958
0
        return config::low_priority_compaction_task_num_per_disk;
959
0
    }
960
961
0
    return thread_per_disk;
962
0
}
963
964
0
int32_t disk_compaction_slot_num(const DataDir& data_dir) {
965
0
    return data_dir.is_ssd_disk() ? config::compaction_task_num_per_fast_disk
966
0
                                  : config::compaction_task_num_per_disk;
967
0
}
968
969
bool has_free_compaction_slot(CompactionSubmitRegistry* registry, DataDir* dir,
970
0
                              CompactionType compaction_type, uint32_t executing_cnt) {
971
0
    int32_t thread_per_disk = disk_compaction_slot_num(*dir);
972
0
    return need_generate_compaction_tasks(
973
0
            executing_cnt, thread_per_disk, compaction_type,
974
0
            !registry->has_compaction_task(dir, CompactionType::CUMULATIVE_COMPACTION));
975
0
}
976
977
std::vector<TabletSharedPtr> StorageEngine::_generate_compaction_tasks(
978
6
        CompactionType compaction_type, std::vector<DataDir*>& data_dirs, bool check_score) {
979
6
    TEST_SYNC_POINT_RETURN_WITH_VALUE("olap_server::_generate_compaction_tasks.return_empty",
980
0
                                      std::vector<TabletSharedPtr> {});
981
0
    _update_cumulative_compaction_policy();
982
0
    std::vector<TabletSharedPtr> tablets_compaction;
983
0
    uint32_t max_compaction_score = 0;
984
985
0
    std::random_device rd;
986
0
    std::mt19937 g(rd());
987
0
    std::shuffle(data_dirs.begin(), data_dirs.end(), g);
988
989
    // Copy _tablet_submitted_xxx_compaction map so that we don't need to hold _tablet_submitted_compaction_mutex
990
    // when traversing the data dir
991
0
    auto compaction_registry_snapshot = _compaction_submit_registry.create_snapshot();
992
0
    for (auto* data_dir : data_dirs) {
993
0
        bool need_pick_tablet = true;
994
0
        uint32_t executing_task_num =
995
0
                compaction_registry_snapshot.count_executing_cumu_and_base(data_dir);
996
0
        need_pick_tablet = has_free_compaction_slot(&compaction_registry_snapshot, data_dir,
997
0
                                                    compaction_type, executing_task_num);
998
0
        if (!need_pick_tablet && !check_score) {
999
0
            continue;
1000
0
        }
1001
1002
        // Even if need_pick_tablet is false, we still need to call find_best_tablet_to_compaction(),
1003
        // So that we can update the max_compaction_score metric.
1004
0
        if (!data_dir->reach_capacity_limit(0)) {
1005
0
            uint32_t disk_max_score = 0;
1006
0
            auto tablets = compaction_registry_snapshot.pick_topn_tablets_for_compaction(
1007
0
                    _tablet_manager.get(), data_dir, compaction_type,
1008
0
                    _cumulative_compaction_policies, &disk_max_score);
1009
0
            int concurrent_num =
1010
0
                    get_concurrent_per_disk(disk_max_score, disk_compaction_slot_num(*data_dir));
1011
0
            need_pick_tablet = need_generate_compaction_tasks(
1012
0
                    executing_task_num, concurrent_num, compaction_type,
1013
0
                    !compaction_registry_snapshot.has_compaction_task(
1014
0
                            data_dir, CompactionType::CUMULATIVE_COMPACTION));
1015
0
            for (const auto& tablet : tablets) {
1016
0
                if (tablet != nullptr) {
1017
0
                    if (need_pick_tablet) {
1018
0
                        tablets_compaction.emplace_back(tablet);
1019
0
                    }
1020
0
                    max_compaction_score = std::max(max_compaction_score, disk_max_score);
1021
0
                }
1022
0
            }
1023
0
        }
1024
0
    }
1025
1026
0
    if (max_compaction_score > 0) {
1027
0
        if (compaction_type == CompactionType::BASE_COMPACTION) {
1028
0
            DorisMetrics::instance()->tablet_base_max_compaction_score->set_value(
1029
0
                    max_compaction_score);
1030
0
        } else {
1031
0
            DorisMetrics::instance()->tablet_cumulative_max_compaction_score->set_value(
1032
0
                    max_compaction_score);
1033
0
        }
1034
0
    }
1035
0
    return tablets_compaction;
1036
6
}
1037
1038
0
void StorageEngine::_update_cumulative_compaction_policy() {
1039
0
    if (_cumulative_compaction_policies.empty()) {
1040
0
        _cumulative_compaction_policies[CUMULATIVE_SIZE_BASED_POLICY] =
1041
0
                CumulativeCompactionPolicyFactory::create_cumulative_compaction_policy(
1042
0
                        CUMULATIVE_SIZE_BASED_POLICY);
1043
0
        _cumulative_compaction_policies[CUMULATIVE_TIME_SERIES_POLICY] =
1044
0
                CumulativeCompactionPolicyFactory::create_cumulative_compaction_policy(
1045
0
                        CUMULATIVE_TIME_SERIES_POLICY);
1046
0
    }
1047
0
}
1048
1049
void StorageEngine::_pop_tablet_from_submitted_compaction(TabletSharedPtr tablet,
1050
7
                                                          CompactionType compaction_type) {
1051
7
    _compaction_submit_registry.remove(tablet, compaction_type, [this]() {
1052
7
        std::unique_lock<std::mutex> lock(_compaction_producer_sleep_mutex);
1053
7
        _wakeup_producer_flag = 1;
1054
7
        _compaction_producer_sleep_cv.notify_one();
1055
7
    });
1056
7
}
1057
1058
Status StorageEngine::_submit_compaction_task(TabletSharedPtr tablet,
1059
21
                                              CompactionType compaction_type, bool force) {
1060
21
    if (tablet->tablet_meta()->tablet_schema()->enable_single_replica_compaction() &&
1061
21
        should_fetch_from_peer(tablet->tablet_id())) {
1062
0
        VLOG_CRITICAL << "start to submit single replica compaction task for tablet: "
1063
0
                      << tablet->tablet_id();
1064
0
        Status st = _submit_single_replica_compaction_task(tablet, compaction_type);
1065
0
        if (!st.ok()) {
1066
0
            LOG(WARNING) << "failed to submit single replica compaction task for tablet: "
1067
0
                         << tablet->tablet_id() << ", err: " << st;
1068
0
        }
1069
1070
0
        return Status::OK();
1071
0
    }
1072
21
    bool already_exist = _compaction_submit_registry.insert(tablet, compaction_type);
1073
21
    if (already_exist) {
1074
0
        return Status::AlreadyExist<false>(
1075
0
                "compaction task has already been submitted, tablet_id={}, compaction_type={}.",
1076
0
                tablet->tablet_id(), compaction_type);
1077
0
    }
1078
21
    tablet->compaction_stage = CompactionStage::PENDING;
1079
21
    std::shared_ptr<CompactionMixin> compaction;
1080
21
    int64_t permits = 0;
1081
21
    Status st = Tablet::prepare_compaction_and_calculate_permits(compaction_type, tablet,
1082
21
                                                                 compaction, permits);
1083
21
    if (st.ok() && permits > 0) {
1084
21
        if (!force) {
1085
21
            _permit_limiter.request(permits);
1086
21
        }
1087
21
        std::unique_ptr<ThreadPool>& thread_pool =
1088
21
                (compaction_type == CompactionType::CUMULATIVE_COMPACTION)
1089
21
                        ? _cumu_compaction_thread_pool
1090
21
                        : _base_compaction_thread_pool;
1091
21
        VLOG_CRITICAL << "compaction thread pool. type: "
1092
0
                      << (compaction_type == CompactionType::CUMULATIVE_COMPACTION ? "CUMU"
1093
0
                                                                                   : "BASE")
1094
0
                      << ", num_threads: " << thread_pool->num_threads()
1095
0
                      << ", num_threads_pending_start: " << thread_pool->num_threads_pending_start()
1096
0
                      << ", num_active_threads: " << thread_pool->num_active_threads()
1097
0
                      << ", max_threads: " << thread_pool->max_threads()
1098
0
                      << ", min_threads: " << thread_pool->min_threads()
1099
0
                      << ", num_total_queued_tasks: " << thread_pool->get_queue_size();
1100
21
        auto status = thread_pool->submit_func([=, compaction = std::move(compaction), this]() {
1101
7
            _handle_compaction(std::move(tablet), std::move(compaction), compaction_type, permits,
1102
7
                               force);
1103
7
        });
1104
21
        if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) [[likely]] {
1105
21
            DorisMetrics::instance()->cumulative_compaction_task_pending_total->set_value(
1106
21
                    _cumu_compaction_thread_pool->get_queue_size());
1107
21
        } else if (compaction_type == CompactionType::BASE_COMPACTION) {
1108
0
            DorisMetrics::instance()->base_compaction_task_pending_total->set_value(
1109
0
                    _base_compaction_thread_pool->get_queue_size());
1110
0
        }
1111
21
        if (!st.ok()) {
1112
0
            if (!force) {
1113
0
                _permit_limiter.release(permits);
1114
0
            }
1115
0
            _pop_tablet_from_submitted_compaction(tablet, compaction_type);
1116
0
            tablet->compaction_stage = CompactionStage::NOT_SCHEDULED;
1117
0
            return Status::InternalError(
1118
0
                    "failed to submit compaction task to thread pool, "
1119
0
                    "tablet_id={}, compaction_type={}.",
1120
0
                    tablet->tablet_id(), compaction_type);
1121
0
        }
1122
21
        return Status::OK();
1123
21
    } else {
1124
0
        _pop_tablet_from_submitted_compaction(tablet, compaction_type);
1125
0
        tablet->compaction_stage = CompactionStage::NOT_SCHEDULED;
1126
0
        if (!st.ok()) {
1127
0
            return Status::InternalError(
1128
0
                    "failed to prepare compaction task and calculate permits, "
1129
0
                    "tablet_id={}, compaction_type={}, "
1130
0
                    "permit={}, current_permit={}, status={}",
1131
0
                    tablet->tablet_id(), compaction_type, permits, _permit_limiter.usage(),
1132
0
                    st.to_string());
1133
0
        }
1134
0
        return st;
1135
0
    }
1136
21
}
1137
1138
void StorageEngine::_handle_compaction(TabletSharedPtr tablet,
1139
                                       std::shared_ptr<CompactionMixin> compaction,
1140
                                       CompactionType compaction_type, int64_t permits,
1141
7
                                       bool force) {
1142
7
    if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) [[likely]] {
1143
7
        DorisMetrics::instance()->cumulative_compaction_task_running_total->increment(1);
1144
7
        DorisMetrics::instance()->cumulative_compaction_task_pending_total->set_value(
1145
7
                _cumu_compaction_thread_pool->get_queue_size());
1146
7
    } else if (compaction_type == CompactionType::BASE_COMPACTION) {
1147
0
        DorisMetrics::instance()->base_compaction_task_running_total->increment(1);
1148
0
        DorisMetrics::instance()->base_compaction_task_pending_total->set_value(
1149
0
                _base_compaction_thread_pool->get_queue_size());
1150
0
    }
1151
7
    bool is_large_task = true;
1152
7
    Defer defer {[&]() {
1153
7
        DBUG_EXECUTE_IF("StorageEngine._submit_compaction_task.sleep", { sleep(5); })
1154
7
        if (!force) {
1155
7
            _permit_limiter.release(permits);
1156
7
        }
1157
7
        _pop_tablet_from_submitted_compaction(tablet, compaction_type);
1158
7
        tablet->compaction_stage = CompactionStage::NOT_SCHEDULED;
1159
7
        if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) {
1160
7
            std::lock_guard<std::mutex> lock(_cumu_compaction_delay_mtx);
1161
7
            _cumu_compaction_thread_pool_used_threads--;
1162
7
            if (!is_large_task) {
1163
0
                _cumu_compaction_thread_pool_small_tasks_running--;
1164
0
            }
1165
7
            DorisMetrics::instance()->cumulative_compaction_task_running_total->increment(-1);
1166
7
            DorisMetrics::instance()->cumulative_compaction_task_pending_total->set_value(
1167
7
                    _cumu_compaction_thread_pool->get_queue_size());
1168
7
        } else if (compaction_type == CompactionType::BASE_COMPACTION) {
1169
0
            DorisMetrics::instance()->base_compaction_task_running_total->increment(-1);
1170
0
            DorisMetrics::instance()->base_compaction_task_pending_total->set_value(
1171
0
                    _base_compaction_thread_pool->get_queue_size());
1172
0
        }
1173
7
    }};
1174
7
    do {
1175
7
        if (compaction->compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
1176
7
            std::lock_guard<std::mutex> lock(_cumu_compaction_delay_mtx);
1177
7
            _cumu_compaction_thread_pool_used_threads++;
1178
7
            if (config::large_cumu_compaction_task_min_thread_num > 1 &&
1179
7
                _cumu_compaction_thread_pool->max_threads() >=
1180
0
                        config::large_cumu_compaction_task_min_thread_num) {
1181
                // Determine if this is a large task based on configured thresholds
1182
0
                is_large_task = (compaction->calc_input_rowsets_total_size() >
1183
0
                                         config::large_cumu_compaction_task_bytes_threshold ||
1184
0
                                 compaction->calc_input_rowsets_row_num() >
1185
0
                                         config::large_cumu_compaction_task_row_num_threshold);
1186
1187
                // Small task. No delay needed
1188
0
                if (!is_large_task) {
1189
0
                    _cumu_compaction_thread_pool_small_tasks_running++;
1190
0
                    break;
1191
0
                }
1192
                // Deal with large task
1193
0
                if (_should_delay_large_task()) {
1194
0
                    LOG_WARNING(
1195
0
                            "failed to do CumulativeCompaction, cumu thread pool is "
1196
0
                            "intensive, delay large task.")
1197
0
                            .tag("tablet_id", tablet->tablet_id())
1198
0
                            .tag("input_rows", compaction->calc_input_rowsets_row_num())
1199
0
                            .tag("input_rowsets_total_size",
1200
0
                                 compaction->calc_input_rowsets_total_size())
1201
0
                            .tag("config::large_cumu_compaction_task_bytes_threshold",
1202
0
                                 config::large_cumu_compaction_task_bytes_threshold)
1203
0
                            .tag("config::large_cumu_compaction_task_row_num_threshold",
1204
0
                                 config::large_cumu_compaction_task_row_num_threshold)
1205
0
                            .tag("remaining threads", _cumu_compaction_thread_pool_used_threads)
1206
0
                            .tag("small_tasks_running",
1207
0
                                 _cumu_compaction_thread_pool_small_tasks_running);
1208
                    // Delay this task and sleep 5s for this tablet
1209
0
                    long now = duration_cast<std::chrono::milliseconds>(
1210
0
                                       std::chrono::system_clock::now().time_since_epoch())
1211
0
                                       .count();
1212
0
                    tablet->set_last_cumu_compaction_failure_time(now);
1213
0
                    return;
1214
0
                }
1215
0
            }
1216
7
        }
1217
7
    } while (false);
1218
7
    if (!tablet->can_do_compaction(tablet->data_dir()->path_hash(), compaction_type)) {
1219
0
        LOG(INFO) << "Tablet state has been changed, no need to begin this compaction "
1220
0
                     "task, tablet_id="
1221
0
                  << tablet->tablet_id() << ", tablet_state=" << tablet->tablet_state();
1222
0
        return;
1223
0
    }
1224
7
    tablet->compaction_stage = CompactionStage::EXECUTING;
1225
7
    TEST_SYNC_POINT_RETURN_WITH_VOID("olap_server::execute_compaction");
1226
1
    tablet->execute_compaction(*compaction);
1227
1
}
1228
1229
Status StorageEngine::submit_compaction_task(TabletSharedPtr tablet, CompactionType compaction_type,
1230
0
                                             bool force, bool eager) {
1231
0
    if (!eager) {
1232
0
        DCHECK(compaction_type == CompactionType::BASE_COMPACTION ||
1233
0
               compaction_type == CompactionType::CUMULATIVE_COMPACTION);
1234
0
        auto compaction_registry_snapshot = _compaction_submit_registry.create_snapshot();
1235
0
        auto stores = get_stores();
1236
1237
0
        bool is_busy = std::none_of(
1238
0
                stores.begin(), stores.end(),
1239
0
                [&compaction_registry_snapshot, compaction_type](auto* data_dir) {
1240
0
                    return has_free_compaction_slot(
1241
0
                            &compaction_registry_snapshot, data_dir, compaction_type,
1242
0
                            compaction_registry_snapshot.count_executing_cumu_and_base(data_dir));
1243
0
                });
1244
0
        if (is_busy) {
1245
0
            LOG_EVERY_N(WARNING, 100)
1246
0
                    << "Too busy to submit a compaction task, tablet=" << tablet->get_table_id();
1247
0
            return Status::OK();
1248
0
        }
1249
0
    }
1250
0
    _update_cumulative_compaction_policy();
1251
    // alter table tableName set ("compaction_policy"="time_series")
1252
    // if atler table's compaction  policy, we need to modify tablet compaction policy shared ptr
1253
0
    if (tablet->get_cumulative_compaction_policy() == nullptr ||
1254
0
        tablet->get_cumulative_compaction_policy()->name() !=
1255
0
                tablet->tablet_meta()->compaction_policy()) {
1256
0
        tablet->set_cumulative_compaction_policy(
1257
0
                _cumulative_compaction_policies.at(tablet->tablet_meta()->compaction_policy()));
1258
0
    }
1259
0
    tablet->set_skip_compaction(false);
1260
0
    return _submit_compaction_task(tablet, compaction_type, force);
1261
0
}
1262
1263
Status StorageEngine::_handle_seg_compaction(std::shared_ptr<SegcompactionWorker> worker,
1264
                                             SegCompactionCandidatesSharedPtr segments,
1265
11
                                             uint64_t submission_time) {
1266
    // note: be aware that worker->_writer maybe released when the task is cancelled
1267
11
    uint64_t exec_queue_time = GetCurrentTimeMicros() - submission_time;
1268
11
    LOG(INFO) << "segcompaction thread pool queue time(ms): " << exec_queue_time / 1000;
1269
11
    worker->compact_segments(segments);
1270
    // return OK here. error will be reported via BetaRowsetWriter::_segcompaction_status
1271
11
    return Status::OK();
1272
11
}
1273
1274
Status StorageEngine::submit_seg_compaction_task(std::shared_ptr<SegcompactionWorker> worker,
1275
11
                                                 SegCompactionCandidatesSharedPtr segments) {
1276
11
    uint64_t submission_time = GetCurrentTimeMicros();
1277
11
    return _seg_compaction_thread_pool->submit_func([this, worker, segments, submission_time] {
1278
11
        static_cast<void>(_handle_seg_compaction(worker, segments, submission_time));
1279
11
    });
1280
11
}
1281
1282
0
Status StorageEngine::process_index_change_task(const TAlterInvertedIndexReq& request) {
1283
0
    auto tablet_id = request.tablet_id;
1284
0
    TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id);
1285
0
    DBUG_EXECUTE_IF("StorageEngine::process_index_change_task_tablet_nullptr",
1286
0
                    { tablet = nullptr; })
1287
0
    if (tablet == nullptr) {
1288
0
        LOG(WARNING) << "tablet: " << tablet_id << " not exist";
1289
0
        return Status::InternalError("tablet not exist, tablet_id={}.", tablet_id);
1290
0
    }
1291
1292
0
    IndexBuilderSharedPtr index_builder = std::make_shared<IndexBuilder>(
1293
0
            *this, tablet, request.columns, request.alter_inverted_indexes, request.is_drop_op);
1294
0
    RETURN_IF_ERROR(_handle_index_change(index_builder));
1295
0
    return Status::OK();
1296
0
}
1297
1298
0
Status StorageEngine::_handle_index_change(IndexBuilderSharedPtr index_builder) {
1299
0
    RETURN_IF_ERROR(index_builder->init());
1300
0
    RETURN_IF_ERROR(index_builder->do_build_inverted_index());
1301
0
    return Status::OK();
1302
0
}
1303
1304
0
void StorageEngine::_cooldown_tasks_producer_callback() {
1305
0
    int64_t interval = config::generate_cooldown_task_interval_sec;
1306
    // the cooldown replica may be slow to upload it's meta file, so we should wait
1307
    // until it has done uploaded
1308
0
    int64_t skip_failed_interval = interval * 10;
1309
0
    do {
1310
        // these tables are ordered by priority desc
1311
0
        std::vector<TabletSharedPtr> tablets;
1312
0
        std::vector<RowsetSharedPtr> rowsets;
1313
        // TODO(luwei) : a more efficient way to get cooldown tablets
1314
0
        auto cur_time = time(nullptr);
1315
        // we should skip all the tablets which are not running and those pending to do cooldown
1316
        // also tablets once failed to do follow cooldown
1317
0
        auto skip_tablet = [this, skip_failed_interval,
1318
0
                            cur_time](const TabletSharedPtr& tablet) -> bool {
1319
0
            bool is_skip =
1320
0
                    cur_time - tablet->last_failed_follow_cooldown_time() < skip_failed_interval ||
1321
0
                    TABLET_RUNNING != tablet->tablet_state();
1322
0
            if (is_skip) {
1323
0
                return is_skip;
1324
0
            }
1325
0
            std::lock_guard<std::mutex> lock(_running_cooldown_mutex);
1326
0
            return _running_cooldown_tablets.find(tablet->tablet_id()) !=
1327
0
                   _running_cooldown_tablets.end();
1328
0
        };
1329
0
        _tablet_manager->get_cooldown_tablets(&tablets, &rowsets, std::move(skip_tablet));
1330
0
        LOG(INFO) << "cooldown producer get tablet num: " << tablets.size();
1331
0
        int max_priority = cast_set<int>(tablets.size());
1332
0
        int index = 0;
1333
0
        for (const auto& tablet : tablets) {
1334
0
            {
1335
0
                std::lock_guard<std::mutex> lock(_running_cooldown_mutex);
1336
0
                _running_cooldown_tablets.insert(tablet->tablet_id());
1337
0
            }
1338
0
            PriorityThreadPool::Task task;
1339
0
            RowsetSharedPtr rowset = std::move(rowsets[index++]);
1340
0
            task.work_function = [tablet, rowset, task_size = tablets.size(), this]() {
1341
0
                Status st = tablet->cooldown(rowset);
1342
0
                {
1343
0
                    std::lock_guard<std::mutex> lock(_running_cooldown_mutex);
1344
0
                    _running_cooldown_tablets.erase(tablet->tablet_id());
1345
0
                }
1346
0
                if (!st.ok()) {
1347
0
                    LOG(WARNING) << "failed to cooldown, tablet: " << tablet->tablet_id()
1348
0
                                 << " err: " << st;
1349
0
                } else {
1350
0
                    LOG(INFO) << "succeed to cooldown, tablet: " << tablet->tablet_id()
1351
0
                              << " cooldown progress ("
1352
0
                              << task_size - _cooldown_thread_pool->get_queue_size() << "/"
1353
0
                              << task_size << ")";
1354
0
                }
1355
0
            };
1356
0
            task.priority = max_priority--;
1357
0
            bool submited = _cooldown_thread_pool->offer(std::move(task));
1358
1359
0
            if (!submited) {
1360
0
                LOG(INFO) << "failed to submit cooldown task";
1361
0
            }
1362
0
        }
1363
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
1364
0
}
1365
1366
0
void StorageEngine::_remove_unused_remote_files_callback() {
1367
0
    while (!_stop_background_threads_latch.wait_for(
1368
0
            std::chrono::seconds(config::remove_unused_remote_files_interval_sec))) {
1369
0
        LOG(INFO) << "begin to remove unused remote files";
1370
0
        do_remove_unused_remote_files();
1371
0
    }
1372
0
}
1373
1374
static void collect_tablet_unused_remote_files(
1375
        Tablet* t, TConfirmUnusedRemoteFilesRequest& req,
1376
        std::unordered_map<int64_t, std::pair<StorageResource, std::vector<io::FileInfo>>>& buffer,
1377
0
        int64_t& num_files_in_buffer, PendingRowsetSet& pending_remote_rowsets) {
1378
0
    auto storage_resource = get_resource_by_storage_policy_id(t->storage_policy_id());
1379
0
    if (!storage_resource) {
1380
0
        LOG(WARNING) << "encounter error when remove unused remote files, tablet_id="
1381
0
                     << t->tablet_id() << " : " << storage_resource.error();
1382
0
        return;
1383
0
    }
1384
1385
    // TODO(plat1ko): Support path v1
1386
0
    if (storage_resource->path_version > 0) {
1387
0
        return;
1388
0
    }
1389
1390
0
    std::vector<io::FileInfo> files;
1391
    // FIXME(plat1ko): What if user reset resource in storage policy to another resource?
1392
    //  Maybe we should also list files in previously uploaded resources.
1393
0
    bool exists = true;
1394
0
    auto st = storage_resource->fs->list(storage_resource->remote_tablet_path(t->tablet_id()), true,
1395
0
                                         &files, &exists);
1396
0
    if (!st.ok()) {
1397
0
        LOG(WARNING) << "encounter error when remove unused remote files, tablet_id="
1398
0
                     << t->tablet_id() << " : " << st;
1399
0
        return;
1400
0
    }
1401
0
    if (!exists || files.empty()) {
1402
0
        return;
1403
0
    }
1404
    // get all cooldowned rowsets
1405
0
    RowsetIdUnorderedSet cooldowned_rowsets;
1406
0
    UniqueId cooldown_meta_id;
1407
0
    {
1408
0
        std::shared_lock rlock(t->get_header_lock());
1409
0
        for (const auto& [_, rs_meta] : t->tablet_meta()->all_rs_metas()) {
1410
0
            if (!rs_meta->is_local()) {
1411
0
                cooldowned_rowsets.insert(rs_meta->rowset_id());
1412
0
            }
1413
0
        }
1414
0
        if (cooldowned_rowsets.empty()) {
1415
0
            return;
1416
0
        }
1417
0
        cooldown_meta_id = t->tablet_meta()->cooldown_meta_id();
1418
0
    }
1419
0
    auto [cooldown_term, cooldown_replica_id] = t->cooldown_conf();
1420
0
    if (cooldown_replica_id != t->replica_id()) {
1421
0
        return;
1422
0
    }
1423
    // {cooldown_replica_id}.{cooldown_term}.meta
1424
0
    std::string remote_meta_path =
1425
0
            cooldown_tablet_meta_filename(cooldown_replica_id, cooldown_term);
1426
    // filter out the paths that should be reserved
1427
0
    auto filter = [&](io::FileInfo& info) {
1428
0
        std::string_view filename = info.file_name;
1429
0
        if (filename.ends_with(".meta")) {
1430
0
            return filename == remote_meta_path;
1431
0
        }
1432
0
        auto rowset_id = extract_rowset_id(filename);
1433
0
        if (rowset_id.hi == 0) {
1434
0
            return false;
1435
0
        }
1436
0
        return cooldowned_rowsets.contains(rowset_id) || pending_remote_rowsets.contains(rowset_id);
1437
0
    };
1438
0
    files.erase(std::remove_if(files.begin(), files.end(), std::move(filter)), files.end());
1439
0
    if (files.empty()) {
1440
0
        return;
1441
0
    }
1442
0
    files.shrink_to_fit();
1443
0
    num_files_in_buffer += files.size();
1444
0
    buffer.insert({t->tablet_id(), {*storage_resource, std::move(files)}});
1445
0
    auto& info = req.confirm_list.emplace_back();
1446
0
    info.__set_tablet_id(t->tablet_id());
1447
0
    info.__set_cooldown_replica_id(cooldown_replica_id);
1448
0
    info.__set_cooldown_meta_id(cooldown_meta_id.to_thrift());
1449
0
}
1450
1451
static void confirm_and_remove_unused_remote_files(
1452
        const TConfirmUnusedRemoteFilesRequest& req,
1453
        std::unordered_map<int64_t, std::pair<StorageResource, std::vector<io::FileInfo>>>& buffer,
1454
0
        const int64_t num_files_in_buffer) {
1455
0
    TConfirmUnusedRemoteFilesResult result;
1456
0
    LOG(INFO) << "begin to confirm unused remote files. num_tablets=" << buffer.size()
1457
0
              << " num_files=" << num_files_in_buffer;
1458
0
    auto st = MasterServerClient::instance()->confirm_unused_remote_files(req, &result);
1459
0
    if (!st.ok()) {
1460
0
        LOG(WARNING) << st;
1461
0
        return;
1462
0
    }
1463
0
    for (auto id : result.confirmed_tablets) {
1464
0
        if (auto it = buffer.find(id); LIKELY(it != buffer.end())) {
1465
0
            auto& storage_resource = it->second.first;
1466
0
            auto& files = it->second.second;
1467
0
            std::vector<io::Path> paths;
1468
0
            paths.reserve(files.size());
1469
            // delete unused files
1470
0
            LOG(INFO) << "delete unused files. root_path=" << storage_resource.fs->root_path()
1471
0
                      << " tablet_id=" << id;
1472
0
            io::Path dir = storage_resource.remote_tablet_path(id);
1473
0
            for (auto& file : files) {
1474
0
                auto file_path = dir / file.file_name;
1475
0
                LOG(INFO) << "delete unused file: " << file_path.native();
1476
0
                paths.push_back(std::move(file_path));
1477
0
            }
1478
0
            st = storage_resource.fs->batch_delete(paths);
1479
0
            if (!st.ok()) {
1480
0
                LOG(WARNING) << "failed to delete unused files, tablet_id=" << id << " : " << st;
1481
0
            }
1482
0
            buffer.erase(it);
1483
0
        }
1484
0
    }
1485
0
}
1486
1487
0
void StorageEngine::do_remove_unused_remote_files() {
1488
0
    auto tablets = tablet_manager()->get_all_tablet([](Tablet* t) {
1489
0
        return t->tablet_meta()->cooldown_meta_id().initialized() && t->is_used() &&
1490
0
               t->tablet_state() == TABLET_RUNNING &&
1491
0
               t->cooldown_conf_unlocked().cooldown_replica_id == t->replica_id();
1492
0
    });
1493
0
    TConfirmUnusedRemoteFilesRequest req;
1494
0
    req.__isset.confirm_list = true;
1495
    // tablet_id -> [storage_resource, unused_remote_files]
1496
0
    using unused_remote_files_buffer_t =
1497
0
            std::unordered_map<int64_t, std::pair<StorageResource, std::vector<io::FileInfo>>>;
1498
0
    unused_remote_files_buffer_t buffer;
1499
0
    int64_t num_files_in_buffer = 0;
1500
    // assume a filename is 0.1KB, buffer size should not larger than 100MB
1501
0
    constexpr int64_t max_files_in_buffer = 1000000;
1502
1503
    // batch confirm to reduce FE's overhead
1504
0
    auto next_confirm_time = std::chrono::steady_clock::now() +
1505
0
                             std::chrono::seconds(config::confirm_unused_remote_files_interval_sec);
1506
0
    for (auto& t : tablets) {
1507
0
        if (t.use_count() <= 1 // this means tablet has been dropped
1508
0
            || t->cooldown_conf_unlocked().cooldown_replica_id != t->replica_id() ||
1509
0
            t->tablet_state() != TABLET_RUNNING) {
1510
0
            continue;
1511
0
        }
1512
0
        collect_tablet_unused_remote_files(t.get(), req, buffer, num_files_in_buffer,
1513
0
                                           _pending_remote_rowsets);
1514
0
        if (num_files_in_buffer > 0 && (num_files_in_buffer > max_files_in_buffer ||
1515
0
                                        std::chrono::steady_clock::now() > next_confirm_time)) {
1516
0
            confirm_and_remove_unused_remote_files(req, buffer, num_files_in_buffer);
1517
0
            buffer.clear();
1518
0
            req.confirm_list.clear();
1519
0
            num_files_in_buffer = 0;
1520
0
            next_confirm_time =
1521
0
                    std::chrono::steady_clock::now() +
1522
0
                    std::chrono::seconds(config::confirm_unused_remote_files_interval_sec);
1523
0
        }
1524
0
    }
1525
0
    if (num_files_in_buffer > 0) {
1526
0
        confirm_and_remove_unused_remote_files(req, buffer, num_files_in_buffer);
1527
0
    }
1528
0
}
1529
1530
0
void StorageEngine::_cold_data_compaction_producer_callback() {
1531
0
    while (!_stop_background_threads_latch.wait_for(
1532
0
            std::chrono::seconds(config::cold_data_compaction_interval_sec))) {
1533
0
        if (config::disable_auto_compaction ||
1534
0
            GlobalMemoryArbitrator::is_exceed_soft_mem_limit(GB_EXCHANGE_BYTE)) {
1535
0
            continue;
1536
0
        }
1537
1538
0
        std::unordered_set<int64_t> copied_tablet_submitted;
1539
0
        {
1540
0
            std::lock_guard lock(_cold_compaction_tablet_submitted_mtx);
1541
0
            copied_tablet_submitted = _cold_compaction_tablet_submitted;
1542
0
        }
1543
0
        int64_t n = config::cold_data_compaction_thread_num - copied_tablet_submitted.size();
1544
0
        if (n <= 0) {
1545
0
            continue;
1546
0
        }
1547
0
        auto tablets = _tablet_manager->get_all_tablet([&copied_tablet_submitted](Tablet* t) {
1548
0
            return t->tablet_meta()->cooldown_meta_id().initialized() && t->is_used() &&
1549
0
                   t->tablet_state() == TABLET_RUNNING &&
1550
0
                   !copied_tablet_submitted.contains(t->tablet_id()) &&
1551
0
                   !t->tablet_meta()->tablet_schema()->disable_auto_compaction();
1552
0
        });
1553
0
        std::vector<std::pair<TabletSharedPtr, int64_t>> tablet_to_compact;
1554
0
        tablet_to_compact.reserve(n + 1);
1555
0
        std::vector<std::pair<TabletSharedPtr, int64_t>> tablet_to_follow;
1556
0
        tablet_to_follow.reserve(n + 1);
1557
1558
0
        for (auto& t : tablets) {
1559
0
            if (t->replica_id() == t->cooldown_conf_unlocked().cooldown_replica_id) {
1560
0
                auto score = t->calc_cold_data_compaction_score();
1561
0
                if (score < config::cold_data_compaction_score_threshold) {
1562
0
                    continue;
1563
0
                }
1564
0
                tablet_to_compact.emplace_back(t, score);
1565
0
                if (tablet_to_compact.size() > n) {
1566
0
                    std::sort(tablet_to_compact.begin(), tablet_to_compact.end(),
1567
0
                              [](auto& a, auto& b) { return a.second > b.second; });
1568
0
                    tablet_to_compact.pop_back();
1569
0
                }
1570
0
                continue;
1571
0
            }
1572
            // else, need to follow
1573
0
            {
1574
0
                std::lock_guard lock(_running_cooldown_mutex);
1575
0
                if (_running_cooldown_tablets.contains(t->table_id())) {
1576
                    // already in cooldown queue
1577
0
                    continue;
1578
0
                }
1579
0
            }
1580
            // TODO(plat1ko): some avoidance strategy if failed to follow
1581
0
            auto score = t->calc_cold_data_compaction_score();
1582
0
            tablet_to_follow.emplace_back(t, score);
1583
1584
0
            if (tablet_to_follow.size() > n) {
1585
0
                std::sort(tablet_to_follow.begin(), tablet_to_follow.end(),
1586
0
                          [](auto& a, auto& b) { return a.second > b.second; });
1587
0
                tablet_to_follow.pop_back();
1588
0
            }
1589
0
        }
1590
1591
0
        for (auto& [tablet, score] : tablet_to_compact) {
1592
0
            LOG(INFO) << "submit cold data compaction. tablet_id=" << tablet->tablet_id()
1593
0
                      << " score=" << score;
1594
0
            static_cast<void>(
1595
0
                    _cold_data_compaction_thread_pool->submit_func([t = std::move(tablet), this]() {
1596
0
                        _handle_cold_data_compaction(std::move(t));
1597
0
                    }));
1598
0
        }
1599
1600
0
        for (auto& [tablet, score] : tablet_to_follow) {
1601
0
            LOG(INFO) << "submit to follow cooldown meta. tablet_id=" << tablet->tablet_id()
1602
0
                      << " score=" << score;
1603
0
            static_cast<void>(_cold_data_compaction_thread_pool->submit_func(
1604
0
                    [t = std::move(tablet), this]() { _follow_cooldown_meta(std::move(t)); }));
1605
0
        }
1606
0
    }
1607
0
}
1608
1609
0
void StorageEngine::_handle_cold_data_compaction(TabletSharedPtr t) {
1610
0
    auto compaction = std::make_shared<ColdDataCompaction>(*this, t);
1611
0
    {
1612
0
        std::lock_guard lock(_cold_compaction_tablet_submitted_mtx);
1613
0
        _cold_compaction_tablet_submitted.insert(t->tablet_id());
1614
0
    }
1615
0
    Defer defer {[&] {
1616
0
        std::lock_guard lock(_cold_compaction_tablet_submitted_mtx);
1617
0
        _cold_compaction_tablet_submitted.erase(t->tablet_id());
1618
0
    }};
1619
0
    std::unique_lock cold_compaction_lock(t->get_cold_compaction_lock(), std::try_to_lock);
1620
0
    if (!cold_compaction_lock.owns_lock()) {
1621
0
        LOG(WARNING) << "try cold_compaction_lock failed, tablet_id=" << t->tablet_id();
1622
0
        return;
1623
0
    }
1624
0
    _update_cumulative_compaction_policy();
1625
0
    if (t->get_cumulative_compaction_policy() == nullptr ||
1626
0
        t->get_cumulative_compaction_policy()->name() != t->tablet_meta()->compaction_policy()) {
1627
0
        t->set_cumulative_compaction_policy(
1628
0
                _cumulative_compaction_policies.at(t->tablet_meta()->compaction_policy()));
1629
0
    }
1630
1631
0
    auto st = compaction->prepare_compact();
1632
0
    if (!st.ok()) {
1633
0
        LOG(WARNING) << "failed to prepare cold data compaction. tablet_id=" << t->tablet_id()
1634
0
                     << " err=" << st;
1635
0
        return;
1636
0
    }
1637
1638
0
    st = compaction->execute_compact();
1639
0
    if (!st.ok()) {
1640
0
        LOG(WARNING) << "failed to execute cold data compaction. tablet_id=" << t->tablet_id()
1641
0
                     << " err=" << st;
1642
0
        return;
1643
0
    }
1644
0
}
1645
1646
0
void StorageEngine::_follow_cooldown_meta(TabletSharedPtr t) {
1647
0
    {
1648
0
        std::lock_guard lock(_cold_compaction_tablet_submitted_mtx);
1649
0
        _cold_compaction_tablet_submitted.insert(t->tablet_id());
1650
0
    }
1651
0
    auto st = t->cooldown();
1652
0
    {
1653
0
        std::lock_guard lock(_cold_compaction_tablet_submitted_mtx);
1654
0
        _cold_compaction_tablet_submitted.erase(t->tablet_id());
1655
0
    }
1656
0
    if (!st.ok()) {
1657
        // The cooldown of the replica may be relatively slow
1658
        // resulting in a short period of time where following cannot be successful
1659
0
        LOG_EVERY_N(WARNING, 5) << "failed to cooldown. tablet_id=" << t->tablet_id()
1660
0
                                << " err=" << st;
1661
0
    }
1662
0
}
1663
1664
void StorageEngine::add_async_publish_task(int64_t partition_id, int64_t tablet_id,
1665
                                           int64_t publish_version, int64_t transaction_id,
1666
2.05k
                                           bool is_recovery) {
1667
2.05k
    if (!is_recovery) {
1668
2.05k
        bool exists = false;
1669
2.05k
        {
1670
2.05k
            std::shared_lock<std::shared_mutex> rlock(_async_publish_lock);
1671
2.05k
            if (auto tablet_iter = _async_publish_tasks.find(tablet_id);
1672
2.05k
                tablet_iter != _async_publish_tasks.end()) {
1673
2.05k
                if (auto iter = tablet_iter->second.find(publish_version);
1674
2.05k
                    iter != tablet_iter->second.end()) {
1675
20
                    exists = true;
1676
20
                }
1677
2.05k
            }
1678
2.05k
        }
1679
2.05k
        if (exists) {
1680
20
            return;
1681
20
        }
1682
2.03k
        TabletSharedPtr tablet = tablet_manager()->get_tablet(tablet_id);
1683
2.03k
        if (tablet == nullptr) {
1684
0
            LOG(INFO) << "tablet may be dropped when add async publish task, tablet_id: "
1685
0
                      << tablet_id;
1686
0
            return;
1687
0
        }
1688
2.03k
        PendingPublishInfoPB pending_publish_info_pb;
1689
2.03k
        pending_publish_info_pb.set_partition_id(partition_id);
1690
2.03k
        pending_publish_info_pb.set_transaction_id(transaction_id);
1691
2.03k
        static_cast<void>(TabletMetaManager::save_pending_publish_info(
1692
2.03k
                tablet->data_dir(), tablet->tablet_id(), publish_version,
1693
2.03k
                pending_publish_info_pb.SerializeAsString()));
1694
2.03k
    }
1695
2.05k
    LOG(INFO) << "add pending publish task, tablet_id: " << tablet_id
1696
2.03k
              << " version: " << publish_version << " txn_id:" << transaction_id
1697
2.03k
              << " is_recovery: " << is_recovery;
1698
2.03k
    std::unique_lock<std::shared_mutex> wlock(_async_publish_lock);
1699
2.03k
    _async_publish_tasks[tablet_id][publish_version] = {transaction_id, partition_id};
1700
2.03k
}
1701
1702
3
int64_t StorageEngine::get_pending_publish_min_version(int64_t tablet_id) {
1703
3
    std::shared_lock<std::shared_mutex> rlock(_async_publish_lock);
1704
3
    auto iter = _async_publish_tasks.find(tablet_id);
1705
3
    if (iter == _async_publish_tasks.end()) {
1706
0
        return INT64_MAX;
1707
0
    }
1708
3
    if (iter->second.empty()) {
1709
0
        return INT64_MAX;
1710
0
    }
1711
3
    return iter->second.begin()->first;
1712
3
}
1713
1714
10
void StorageEngine::_process_async_publish() {
1715
    // tablet, publish_version
1716
10
    std::vector<std::pair<TabletSharedPtr, int64_t>> need_removed_tasks;
1717
10
    {
1718
10
        std::unique_lock<std::shared_mutex> wlock(_async_publish_lock);
1719
10
        for (auto tablet_iter = _async_publish_tasks.begin();
1720
20
             tablet_iter != _async_publish_tasks.end();) {
1721
10
            if (tablet_iter->second.empty()) {
1722
1
                tablet_iter = _async_publish_tasks.erase(tablet_iter);
1723
1
                continue;
1724
1
            }
1725
9
            int64_t tablet_id = tablet_iter->first;
1726
9
            TabletSharedPtr tablet = tablet_manager()->get_tablet(tablet_id);
1727
9
            if (!tablet) {
1728
1
                LOG(WARNING) << "tablet does not exist when async publush, tablet_id: "
1729
1
                             << tablet_id;
1730
1
                tablet_iter = _async_publish_tasks.erase(tablet_iter);
1731
1
                continue;
1732
1
            }
1733
1734
8
            auto task_iter = tablet_iter->second.begin();
1735
8
            int64_t version = task_iter->first;
1736
8
            int64_t transaction_id = task_iter->second.first;
1737
8
            int64_t partition_id = task_iter->second.second;
1738
8
            int64_t max_version = tablet->max_version().second;
1739
1740
8
            if (version <= max_version) {
1741
6
                need_removed_tasks.emplace_back(tablet, version);
1742
6
                tablet_iter->second.erase(task_iter);
1743
6
                tablet_iter++;
1744
6
                continue;
1745
6
            }
1746
2
            if (version != max_version + 1) {
1747
1
                int32_t max_version_config = tablet->max_version_config();
1748
                // Keep only the most recent versions
1749
31
                while (tablet_iter->second.size() > max_version_config) {
1750
30
                    need_removed_tasks.emplace_back(tablet, version);
1751
30
                    task_iter = tablet_iter->second.erase(task_iter);
1752
30
                    version = task_iter->first;
1753
30
                }
1754
1
                tablet_iter++;
1755
1
                continue;
1756
1
            }
1757
1758
1
            auto async_publish_task = std::make_shared<AsyncTabletPublishTask>(
1759
1
                    *this, tablet, partition_id, transaction_id, version);
1760
1
            static_cast<void>(_tablet_publish_txn_thread_pool->submit_func(
1761
1
                    [=]() { async_publish_task->handle(); }));
1762
1
            tablet_iter->second.erase(task_iter);
1763
1
            need_removed_tasks.emplace_back(tablet, version);
1764
1
            tablet_iter++;
1765
1
        }
1766
10
    }
1767
37
    for (auto& [tablet, publish_version] : need_removed_tasks) {
1768
37
        static_cast<void>(TabletMetaManager::remove_pending_publish_info(
1769
37
                tablet->data_dir(), tablet->tablet_id(), publish_version));
1770
37
    }
1771
10
}
1772
1773
0
void StorageEngine::_async_publish_callback() {
1774
0
    while (!_stop_background_threads_latch.wait_for(std::chrono::milliseconds(30))) {
1775
0
        _process_async_publish();
1776
0
    }
1777
0
}
1778
1779
0
void StorageEngine::_check_tablet_delete_bitmap_score_callback() {
1780
0
    LOG(INFO) << "try to start check tablet delete bitmap score!";
1781
0
    while (!_stop_background_threads_latch.wait_for(
1782
0
            std::chrono::seconds(config::check_tablet_delete_bitmap_interval_seconds))) {
1783
0
        if (!config::enable_check_tablet_delete_bitmap_score) {
1784
0
            return;
1785
0
        }
1786
0
        uint64_t max_delete_bitmap_score = 0;
1787
0
        uint64_t max_base_rowset_delete_bitmap_score = 0;
1788
0
        _tablet_manager->get_topn_tablet_delete_bitmap_score(&max_delete_bitmap_score,
1789
0
                                                             &max_base_rowset_delete_bitmap_score);
1790
0
        if (max_delete_bitmap_score > 0) {
1791
0
            _tablet_max_delete_bitmap_score_metrics->set_value(max_delete_bitmap_score);
1792
0
        }
1793
0
        if (max_base_rowset_delete_bitmap_score > 0) {
1794
0
            _tablet_max_base_rowset_delete_bitmap_score_metrics->set_value(
1795
0
                    max_base_rowset_delete_bitmap_score);
1796
0
        }
1797
0
    }
1798
0
}
1799
#include "common/compile_check_end.h"
1800
} // namespace doris