Coverage Report

Created: 2025-07-25 19:50

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/doris/be/src/olap/olap_server.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include <gen_cpp/Types_types.h>
19
#include <gen_cpp/olap_file.pb.h>
20
#include <glog/logging.h>
21
#include <rapidjson/prettywriter.h>
22
#include <rapidjson/stringbuffer.h>
23
#include <stdint.h>
24
#include <sys/types.h>
25
26
#include <algorithm>
27
#include <atomic>
28
// IWYU pragma: no_include <bits/chrono.h>
29
#include <chrono> // IWYU pragma: keep
30
#include <cmath>
31
#include <condition_variable>
32
#include <cstdint>
33
#include <ctime>
34
#include <functional>
35
#include <map>
36
#include <memory>
37
#include <mutex>
38
#include <ostream>
39
#include <random>
40
#include <shared_mutex>
41
#include <string>
42
#include <thread>
43
#include <unordered_set>
44
#include <utility>
45
#include <vector>
46
47
#include "agent/utils.h"
48
#include "common/config.h"
49
#include "common/logging.h"
50
#include "common/status.h"
51
#include "cpp/sync_point.h"
52
#include "gen_cpp/FrontendService.h"
53
#include "gen_cpp/internal_service.pb.h"
54
#include "io/fs/file_writer.h" // IWYU pragma: keep
55
#include "io/fs/path.h"
56
#include "olap/base_tablet.h"
57
#include "olap/cold_data_compaction.h"
58
#include "olap/compaction_permit_limiter.h"
59
#include "olap/cumulative_compaction.h"
60
#include "olap/cumulative_compaction_policy.h"
61
#include "olap/cumulative_compaction_time_series_policy.h"
62
#include "olap/data_dir.h"
63
#include "olap/olap_common.h"
64
#include "olap/olap_define.h"
65
#include "olap/rowset/segcompaction.h"
66
#include "olap/schema_change.h"
67
#include "olap/single_replica_compaction.h"
68
#include "olap/storage_engine.h"
69
#include "olap/storage_policy.h"
70
#include "olap/tablet.h"
71
#include "olap/tablet_manager.h"
72
#include "olap/tablet_meta.h"
73
#include "olap/tablet_meta_manager.h"
74
#include "olap/tablet_schema.h"
75
#include "olap/task/engine_publish_version_task.h"
76
#include "olap/task/index_builder.h"
77
#include "runtime/client_cache.h"
78
#include "runtime/memory/cache_manager.h"
79
#include "runtime/memory/global_memory_arbitrator.h"
80
#include "util/countdown_latch.h"
81
#include "util/debug_points.h"
82
#include "util/doris_metrics.h"
83
#include "util/mem_info.h"
84
#include "util/metrics.h"
85
#include "util/thread.h"
86
#include "util/threadpool.h"
87
#include "util/thrift_rpc_helper.h"
88
#include "util/time.h"
89
#include "util/uid_util.h"
90
#include "util/work_thread_pool.hpp"
91
92
using std::string;
93
94
namespace doris {
95
#include "common/compile_check_begin.h"
96
using io::Path;
97
98
// number of running SCHEMA-CHANGE threads
99
volatile uint32_t g_schema_change_active_threads = 0;
100
bvar::Status<int64_t> g_cumu_compaction_task_num_per_round("cumu_compaction_task_num_per_round", 0);
101
bvar::Status<int64_t> g_base_compaction_task_num_per_round("base_compaction_task_num_per_round", 0);
102
103
static const uint64_t DEFAULT_SEED = 104729;
104
static const uint64_t MOD_PRIME = 7652413;
105
106
0
CompactionSubmitRegistry::CompactionSubmitRegistry(CompactionSubmitRegistry&& r) {
107
0
    std::swap(_tablet_submitted_cumu_compaction, r._tablet_submitted_cumu_compaction);
108
0
    std::swap(_tablet_submitted_base_compaction, r._tablet_submitted_base_compaction);
109
0
    std::swap(_tablet_submitted_full_compaction, r._tablet_submitted_full_compaction);
110
0
}
111
112
0
CompactionSubmitRegistry CompactionSubmitRegistry::create_snapshot() {
113
    // full compaction is not engaged in this method
114
0
    std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex);
115
0
    CompactionSubmitRegistry registry;
116
0
    registry._tablet_submitted_base_compaction = _tablet_submitted_base_compaction;
117
0
    registry._tablet_submitted_cumu_compaction = _tablet_submitted_cumu_compaction;
118
0
    return registry;
119
0
}
120
121
7
void CompactionSubmitRegistry::reset(const std::vector<DataDir*>& stores) {
122
    // full compaction is not engaged in this method
123
7
    for (const auto& store : stores) {
124
0
        _tablet_submitted_cumu_compaction[store] = {};
125
0
        _tablet_submitted_base_compaction[store] = {};
126
0
    }
127
7
}
128
129
uint32_t CompactionSubmitRegistry::count_executing_compaction(DataDir* dir,
130
2
                                                              CompactionType compaction_type) {
131
    // non-lock, used in snapshot
132
2
    const auto& compaction_tasks = _get_tablet_set(dir, compaction_type);
133
2
    return cast_set<uint32_t>(std::count_if(
134
2
            compaction_tasks.begin(), compaction_tasks.end(),
135
10
            [](const auto& task) { return task->compaction_stage == CompactionStage::EXECUTING; }));
136
2
}
137
138
1
uint32_t CompactionSubmitRegistry::count_executing_cumu_and_base(DataDir* dir) {
139
    // non-lock, used in snapshot
140
1
    return count_executing_compaction(dir, CompactionType::BASE_COMPACTION) +
141
1
           count_executing_compaction(dir, CompactionType::CUMULATIVE_COMPACTION);
142
1
}
143
144
0
bool CompactionSubmitRegistry::has_compaction_task(DataDir* dir, CompactionType compaction_type) {
145
    // non-lock, used in snapshot
146
0
    return !_get_tablet_set(dir, compaction_type).empty();
147
0
}
148
149
std::vector<TabletSharedPtr> CompactionSubmitRegistry::pick_topn_tablets_for_compaction(
150
        TabletManager* tablet_mgr, DataDir* data_dir, CompactionType compaction_type,
151
0
        const CumuCompactionPolicyTable& cumu_compaction_policies, uint32_t* disk_max_score) {
152
    // non-lock, used in snapshot
153
0
    return tablet_mgr->find_best_tablets_to_compaction(compaction_type, data_dir,
154
0
                                                       _get_tablet_set(data_dir, compaction_type),
155
0
                                                       disk_max_score, cumu_compaction_policies);
156
0
}
157
158
21
bool CompactionSubmitRegistry::insert(TabletSharedPtr tablet, CompactionType compaction_type) {
159
21
    std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex);
160
21
    auto& tablet_set = _get_tablet_set(tablet->data_dir(), compaction_type);
161
21
    bool already_exist = !(tablet_set.insert(tablet).second);
162
21
    return already_exist;
163
21
}
164
165
void CompactionSubmitRegistry::remove(TabletSharedPtr tablet, CompactionType compaction_type,
166
7
                                      std::function<void()> wakeup_cb) {
167
7
    std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex);
168
7
    auto& tablet_set = _get_tablet_set(tablet->data_dir(), compaction_type);
169
7
    size_t removed = tablet_set.erase(tablet);
170
7
    if (removed == 1) {
171
7
        wakeup_cb();
172
7
    }
173
7
}
174
175
CompactionSubmitRegistry::TabletSet& CompactionSubmitRegistry::_get_tablet_set(
176
30
        DataDir* dir, CompactionType compaction_type) {
177
30
    switch (compaction_type) {
178
1
    case CompactionType::BASE_COMPACTION:
179
1
        return _tablet_submitted_base_compaction[dir];
180
29
    case CompactionType::CUMULATIVE_COMPACTION:
181
29
        return _tablet_submitted_cumu_compaction[dir];
182
0
    case CompactionType::FULL_COMPACTION:
183
0
        return _tablet_submitted_full_compaction[dir];
184
0
    default:
185
0
        CHECK(false) << "invalid compaction type";
186
30
    }
187
30
}
188
189
0
static int32_t get_cumu_compaction_threads_num(size_t data_dirs_num) {
190
0
    int32_t threads_num = config::max_cumu_compaction_threads;
191
0
    if (threads_num == -1) {
192
0
        int32_t num_cores = doris::CpuInfo::num_cores();
193
0
        threads_num = std::max(cast_set<int32_t>(data_dirs_num), num_cores / 6);
194
0
    }
195
0
    threads_num = threads_num <= 0 ? 1 : threads_num;
196
0
    return threads_num;
197
0
}
198
199
0
static int32_t get_base_compaction_threads_num(size_t data_dirs_num) {
200
0
    int32_t threads_num = config::max_base_compaction_threads;
201
0
    if (threads_num == -1) {
202
0
        threads_num = cast_set<int32_t>(data_dirs_num);
203
0
    }
204
0
    threads_num = threads_num <= 0 ? 1 : threads_num;
205
0
    return threads_num;
206
0
}
207
208
0
static int32_t get_single_replica_compaction_threads_num(size_t data_dirs_num) {
209
0
    int32_t threads_num = config::max_single_replica_compaction_threads;
210
0
    if (threads_num == -1) {
211
0
        threads_num = cast_set<int32_t>(data_dirs_num);
212
0
    }
213
0
    threads_num = threads_num <= 0 ? 1 : threads_num;
214
0
    return threads_num;
215
0
}
216
217
0
Status StorageEngine::start_bg_threads(std::shared_ptr<WorkloadGroup> wg_sptr) {
218
0
    RETURN_IF_ERROR(Thread::create(
219
0
            "StorageEngine", "unused_rowset_monitor_thread",
220
0
            [this]() { this->_unused_rowset_monitor_thread_callback(); },
221
0
            &_unused_rowset_monitor_thread));
222
0
    LOG(INFO) << "unused rowset monitor thread started";
223
224
0
    RETURN_IF_ERROR(Thread::create(
225
0
            "StorageEngine", "evict_querying_rowset_thread",
226
0
            [this]() { this->_evict_quring_rowset_thread_callback(); },
227
0
            &_evict_quering_rowset_thread));
228
0
    LOG(INFO) << "evict quering thread started";
229
230
    // start thread for monitoring the snapshot and trash folder
231
0
    RETURN_IF_ERROR(Thread::create(
232
0
            "StorageEngine", "garbage_sweeper_thread",
233
0
            [this]() { this->_garbage_sweeper_thread_callback(); }, &_garbage_sweeper_thread));
234
0
    LOG(INFO) << "garbage sweeper thread started";
235
236
    // start thread for monitoring the tablet with io error
237
0
    RETURN_IF_ERROR(Thread::create(
238
0
            "StorageEngine", "disk_stat_monitor_thread",
239
0
            [this]() { this->_disk_stat_monitor_thread_callback(); }, &_disk_stat_monitor_thread));
240
0
    LOG(INFO) << "disk stat monitor thread started";
241
242
    // convert store map to vector
243
0
    std::vector<DataDir*> data_dirs = get_stores();
244
245
0
    auto base_compaction_threads = get_base_compaction_threads_num(data_dirs.size());
246
0
    auto cumu_compaction_threads = get_cumu_compaction_threads_num(data_dirs.size());
247
0
    auto single_replica_compaction_threads =
248
0
            get_single_replica_compaction_threads_num(data_dirs.size());
249
250
0
    RETURN_IF_ERROR(ThreadPoolBuilder("BaseCompactionTaskThreadPool")
251
0
                            .set_min_threads(base_compaction_threads)
252
0
                            .set_max_threads(base_compaction_threads)
253
0
                            .build(&_base_compaction_thread_pool));
254
0
    RETURN_IF_ERROR(ThreadPoolBuilder("CumuCompactionTaskThreadPool")
255
0
                            .set_min_threads(cumu_compaction_threads)
256
0
                            .set_max_threads(cumu_compaction_threads)
257
0
                            .build(&_cumu_compaction_thread_pool));
258
0
    RETURN_IF_ERROR(ThreadPoolBuilder("SingleReplicaCompactionTaskThreadPool")
259
0
                            .set_min_threads(single_replica_compaction_threads)
260
0
                            .set_max_threads(single_replica_compaction_threads)
261
0
                            .build(&_single_replica_compaction_thread_pool));
262
263
0
    if (config::enable_segcompaction) {
264
0
        RETURN_IF_ERROR(ThreadPoolBuilder("SegCompactionTaskThreadPool")
265
0
                                .set_min_threads(config::segcompaction_num_threads)
266
0
                                .set_max_threads(config::segcompaction_num_threads)
267
0
                                .build(&_seg_compaction_thread_pool));
268
0
    }
269
0
    RETURN_IF_ERROR(ThreadPoolBuilder("ColdDataCompactionTaskThreadPool")
270
0
                            .set_min_threads(config::cold_data_compaction_thread_num)
271
0
                            .set_max_threads(config::cold_data_compaction_thread_num)
272
0
                            .build(&_cold_data_compaction_thread_pool));
273
274
    // compaction tasks producer thread
275
0
    RETURN_IF_ERROR(Thread::create(
276
0
            "StorageEngine", "compaction_tasks_producer_thread",
277
0
            [this]() { this->_compaction_tasks_producer_callback(); },
278
0
            &_compaction_tasks_producer_thread));
279
0
    LOG(INFO) << "compaction tasks producer thread started";
280
281
0
    RETURN_IF_ERROR(Thread::create(
282
0
            "StorageEngine", "_update_replica_infos_thread",
283
0
            [this]() { this->_update_replica_infos_callback(); }, &_update_replica_infos_thread));
284
0
    LOG(INFO) << "tablet replicas info update thread started";
285
286
0
    int32_t max_checkpoint_thread_num = config::max_meta_checkpoint_threads;
287
0
    if (max_checkpoint_thread_num < 0) {
288
0
        max_checkpoint_thread_num = cast_set<int32_t>(data_dirs.size());
289
0
    }
290
0
    RETURN_IF_ERROR(ThreadPoolBuilder("TabletMetaCheckpointTaskThreadPool")
291
0
                            .set_max_threads(max_checkpoint_thread_num)
292
0
                            .build(&_tablet_meta_checkpoint_thread_pool));
293
294
0
    RETURN_IF_ERROR(Thread::create(
295
0
            "StorageEngine", "tablet_checkpoint_tasks_producer_thread",
296
0
            [this, data_dirs]() { this->_tablet_checkpoint_callback(data_dirs); },
297
0
            &_tablet_checkpoint_tasks_producer_thread));
298
0
    LOG(INFO) << "tablet checkpoint tasks producer thread started";
299
300
0
    RETURN_IF_ERROR(Thread::create(
301
0
            "StorageEngine", "tablet_path_check_thread",
302
0
            [this]() { this->_tablet_path_check_callback(); }, &_tablet_path_check_thread));
303
0
    LOG(INFO) << "tablet path check thread started";
304
305
    // path scan and gc thread
306
0
    if (config::path_gc_check) {
307
0
        for (auto data_dir : get_stores()) {
308
0
            scoped_refptr<Thread> path_gc_thread;
309
0
            RETURN_IF_ERROR(Thread::create(
310
0
                    "StorageEngine", "path_gc_thread",
311
0
                    [this, data_dir]() { this->_path_gc_thread_callback(data_dir); },
312
0
                    &path_gc_thread));
313
0
            _path_gc_threads.emplace_back(path_gc_thread);
314
0
        }
315
0
        LOG(INFO) << "path gc threads started. number:" << get_stores().size();
316
0
    }
317
318
0
    RETURN_IF_ERROR(ThreadPoolBuilder("CooldownTaskThreadPool")
319
0
                            .set_min_threads(config::cooldown_thread_num)
320
0
                            .set_max_threads(config::cooldown_thread_num)
321
0
                            .build(&_cooldown_thread_pool));
322
0
    LOG(INFO) << "cooldown thread pool started";
323
324
0
    RETURN_IF_ERROR(Thread::create(
325
0
            "StorageEngine", "cooldown_tasks_producer_thread",
326
0
            [this]() { this->_cooldown_tasks_producer_callback(); },
327
0
            &_cooldown_tasks_producer_thread));
328
0
    LOG(INFO) << "cooldown tasks producer thread started";
329
330
0
    RETURN_IF_ERROR(Thread::create(
331
0
            "StorageEngine", "remove_unused_remote_files_thread",
332
0
            [this]() { this->_remove_unused_remote_files_callback(); },
333
0
            &_remove_unused_remote_files_thread));
334
0
    LOG(INFO) << "remove unused remote files thread started";
335
336
0
    RETURN_IF_ERROR(Thread::create(
337
0
            "StorageEngine", "cold_data_compaction_producer_thread",
338
0
            [this]() { this->_cold_data_compaction_producer_callback(); },
339
0
            &_cold_data_compaction_producer_thread));
340
0
    LOG(INFO) << "cold data compaction producer thread started";
341
342
    // add tablet publish version thread pool
343
0
    RETURN_IF_ERROR(ThreadPoolBuilder("TabletPublishTxnThreadPool")
344
0
                            .set_min_threads(config::tablet_publish_txn_max_thread)
345
0
                            .set_max_threads(config::tablet_publish_txn_max_thread)
346
0
                            .build(&_tablet_publish_txn_thread_pool));
347
348
0
    RETURN_IF_ERROR(Thread::create(
349
0
            "StorageEngine", "async_publish_version_thread",
350
0
            [this]() { this->_async_publish_callback(); }, &_async_publish_thread));
351
0
    LOG(INFO) << "async publish thread started";
352
353
0
    RETURN_IF_ERROR(Thread::create(
354
0
            "StorageEngine", "check_tablet_delete_bitmap_score_thread",
355
0
            [this]() { this->_check_tablet_delete_bitmap_score_callback(); },
356
0
            &_check_delete_bitmap_score_thread));
357
0
    LOG(INFO) << "check tablet delete bitmap score thread started";
358
359
0
    LOG(INFO) << "all storage engine's background threads are started.";
360
0
    return Status::OK();
361
0
}
362
363
0
void StorageEngine::_garbage_sweeper_thread_callback() {
364
0
    uint32_t max_interval = config::max_garbage_sweep_interval;
365
0
    uint32_t min_interval = config::min_garbage_sweep_interval;
366
367
0
    if (max_interval < min_interval || min_interval <= 0) {
368
0
        LOG(WARNING) << "garbage sweep interval config is illegal: [max=" << max_interval
369
0
                     << " min=" << min_interval << "].";
370
0
        min_interval = 1;
371
0
        max_interval = max_interval >= min_interval ? max_interval : min_interval;
372
0
        LOG(INFO) << "force reset garbage sweep interval. "
373
0
                  << "max_interval=" << max_interval << ", min_interval=" << min_interval;
374
0
    }
375
376
0
    const double pi = M_PI;
377
0
    double usage = 1.0;
378
    // After the program starts, the first round of cleaning starts after min_interval.
379
0
    uint32_t curr_interval = min_interval;
380
0
    do {
381
        // Function properties:
382
        // when usage < 0.6,          ratio close to 1.(interval close to max_interval)
383
        // when usage at [0.6, 0.75], ratio is rapidly decreasing from 0.87 to 0.27.
384
        // when usage > 0.75,         ratio is slowly decreasing.
385
        // when usage > 0.8,          ratio close to min_interval.
386
        // when usage = 0.88,         ratio is approximately 0.0057.
387
0
        double ratio = (1.1 * (pi / 2 - std::atan(usage * 100 / 5 - 14)) - 0.28) / pi;
388
0
        ratio = ratio > 0 ? ratio : 0;
389
0
        curr_interval = uint32_t(max_interval * ratio);
390
0
        curr_interval = std::max(curr_interval, min_interval);
391
0
        curr_interval = std::min(curr_interval, max_interval);
392
393
        // start clean trash and update usage.
394
0
        Status res = start_trash_sweep(&usage);
395
0
        if (res.ok() && _need_clean_trash.exchange(false, std::memory_order_relaxed)) {
396
0
            res = start_trash_sweep(&usage, true);
397
0
        }
398
399
0
        if (!res.ok()) {
400
0
            LOG(WARNING) << "one or more errors occur when sweep trash."
401
0
                         << "see previous message for detail. err code=" << res;
402
            // do nothing. continue next loop.
403
0
        }
404
0
        LOG(INFO) << "trash thread check usage=" << usage << " ratio=" << ratio
405
0
                  << " curr_interval=" << curr_interval;
406
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(curr_interval)));
407
0
}
408
409
0
void StorageEngine::_disk_stat_monitor_thread_callback() {
410
0
    int32_t interval = config::disk_stat_monitor_interval;
411
0
    do {
412
0
        _start_disk_stat_monitor();
413
414
0
        interval = config::disk_stat_monitor_interval;
415
0
        if (interval <= 0) {
416
0
            LOG(WARNING) << "disk_stat_monitor_interval config is illegal: " << interval
417
0
                         << ", force set to 1";
418
0
            interval = 1;
419
0
        }
420
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
421
0
}
422
423
0
void StorageEngine::_unused_rowset_monitor_thread_callback() {
424
0
    int32_t interval = config::unused_rowset_monitor_interval;
425
0
    do {
426
0
        start_delete_unused_rowset();
427
428
0
        interval = config::unused_rowset_monitor_interval;
429
0
        if (interval <= 0) {
430
0
            LOG(WARNING) << "unused_rowset_monitor_interval config is illegal: " << interval
431
0
                         << ", force set to 1";
432
0
            interval = 1;
433
0
        }
434
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
435
0
}
436
437
0
int32_t StorageEngine::_auto_get_interval_by_disk_capacity(DataDir* data_dir) {
438
0
    double disk_used = data_dir->get_usage(0);
439
0
    double remain_used = 1 - disk_used;
440
0
    DCHECK(remain_used >= 0 && remain_used <= 1);
441
0
    DCHECK(config::path_gc_check_interval_second >= 0);
442
0
    int32_t ret = 0;
443
0
    if (remain_used > 0.9) {
444
        // if config::path_gc_check_interval_second == 24h
445
0
        ret = config::path_gc_check_interval_second;
446
0
    } else if (remain_used > 0.7) {
447
        // 12h
448
0
        ret = config::path_gc_check_interval_second / 2;
449
0
    } else if (remain_used > 0.5) {
450
        // 6h
451
0
        ret = config::path_gc_check_interval_second / 4;
452
0
    } else if (remain_used > 0.3) {
453
        // 4h
454
0
        ret = config::path_gc_check_interval_second / 6;
455
0
    } else {
456
        // 3h
457
0
        ret = config::path_gc_check_interval_second / 8;
458
0
    }
459
0
    return ret;
460
0
}
461
462
0
void StorageEngine::_path_gc_thread_callback(DataDir* data_dir) {
463
0
    LOG(INFO) << "try to start path gc thread!";
464
0
    time_t last_exec_time = 0;
465
0
    do {
466
0
        time_t current_time = time(nullptr);
467
468
0
        int32_t interval = _auto_get_interval_by_disk_capacity(data_dir);
469
0
        DBUG_EXECUTE_IF("_path_gc_thread_callback.interval.eq.1ms", {
470
0
            LOG(INFO) << "debug point change interval eq 1ms";
471
0
            interval = 1;
472
0
            while (DebugPoints::instance()->is_enable("_path_gc_thread_callback.always.do")) {
473
0
                data_dir->perform_path_gc();
474
0
                std::this_thread::sleep_for(std::chrono::milliseconds(10));
475
0
            }
476
0
        });
477
0
        if (interval <= 0) {
478
0
            LOG(WARNING) << "path gc thread check interval config is illegal:" << interval
479
0
                         << " will be forced set to half hour";
480
0
            interval = 1800; // 0.5 hour
481
0
        }
482
0
        if (current_time - last_exec_time >= interval) {
483
0
            LOG(INFO) << "try to perform path gc! disk remain [" << 1 - data_dir->get_usage(0)
484
0
                      << "] internal [" << interval << "]";
485
0
            data_dir->perform_path_gc();
486
0
            last_exec_time = time(nullptr);
487
0
        }
488
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(5)));
489
0
    LOG(INFO) << "stop path gc thread!";
490
0
}
491
492
0
void StorageEngine::_tablet_checkpoint_callback(const std::vector<DataDir*>& data_dirs) {
493
0
    int64_t interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs;
494
0
    do {
495
0
        for (auto data_dir : data_dirs) {
496
0
            LOG(INFO) << "begin to produce tablet meta checkpoint tasks, data_dir="
497
0
                      << data_dir->path();
498
0
            auto st = _tablet_meta_checkpoint_thread_pool->submit_func(
499
0
                    [data_dir, this]() { _tablet_manager->do_tablet_meta_checkpoint(data_dir); });
500
0
            if (!st.ok()) {
501
0
                LOG(WARNING) << "submit tablet checkpoint tasks failed.";
502
0
            }
503
0
        }
504
0
        interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs;
505
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
506
0
}
507
508
0
void StorageEngine::_tablet_path_check_callback() {
509
0
    struct TabletIdComparator {
510
0
        bool operator()(Tablet* a, Tablet* b) { return a->tablet_id() < b->tablet_id(); }
511
0
    };
512
513
0
    using TabletQueue = std::priority_queue<Tablet*, std::vector<Tablet*>, TabletIdComparator>;
514
515
0
    int64_t interval = config::tablet_path_check_interval_seconds;
516
0
    if (interval <= 0) {
517
0
        return;
518
0
    }
519
520
0
    int64_t last_tablet_id = 0;
521
0
    do {
522
0
        int32_t batch_size = config::tablet_path_check_batch_size;
523
0
        if (batch_size <= 0) {
524
0
            if (_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))) {
525
0
                break;
526
0
            }
527
0
            continue;
528
0
        }
529
530
0
        LOG(INFO) << "start to check tablet path";
531
532
0
        auto all_tablets = _tablet_manager->get_all_tablet(
533
0
                [](Tablet* t) { return t->is_used() && t->tablet_state() == TABLET_RUNNING; });
534
535
0
        TabletQueue big_id_tablets;
536
0
        TabletQueue small_id_tablets;
537
0
        for (auto tablet : all_tablets) {
538
0
            auto tablet_id = tablet->tablet_id();
539
0
            TabletQueue* belong_tablets = nullptr;
540
0
            if (tablet_id > last_tablet_id) {
541
0
                if (big_id_tablets.size() < batch_size ||
542
0
                    big_id_tablets.top()->tablet_id() > tablet_id) {
543
0
                    belong_tablets = &big_id_tablets;
544
0
                }
545
0
            } else if (big_id_tablets.size() < batch_size) {
546
0
                if (small_id_tablets.size() < batch_size ||
547
0
                    small_id_tablets.top()->tablet_id() > tablet_id) {
548
0
                    belong_tablets = &small_id_tablets;
549
0
                }
550
0
            }
551
0
            if (belong_tablets != nullptr) {
552
0
                belong_tablets->push(tablet.get());
553
0
                if (belong_tablets->size() > batch_size) {
554
0
                    belong_tablets->pop();
555
0
                }
556
0
            }
557
0
        }
558
559
0
        int32_t need_small_id_tablet_size =
560
0
                batch_size - static_cast<int32_t>(big_id_tablets.size());
561
562
0
        if (!big_id_tablets.empty()) {
563
0
            last_tablet_id = big_id_tablets.top()->tablet_id();
564
0
        }
565
0
        while (!big_id_tablets.empty()) {
566
0
            big_id_tablets.top()->check_tablet_path_exists();
567
0
            big_id_tablets.pop();
568
0
        }
569
570
0
        if (!small_id_tablets.empty() && need_small_id_tablet_size > 0) {
571
0
            while (static_cast<int32_t>(small_id_tablets.size()) > need_small_id_tablet_size) {
572
0
                small_id_tablets.pop();
573
0
            }
574
575
0
            last_tablet_id = small_id_tablets.top()->tablet_id();
576
0
            while (!small_id_tablets.empty()) {
577
0
                small_id_tablets.top()->check_tablet_path_exists();
578
0
                small_id_tablets.pop();
579
0
            }
580
0
        }
581
582
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
583
0
}
584
585
6
void StorageEngine::_adjust_compaction_thread_num() {
586
6
    TEST_SYNC_POINT_RETURN_WITH_VOID("StorageEngine::_adjust_compaction_thread_num.return_void");
587
0
    auto base_compaction_threads_num = get_base_compaction_threads_num(_store_map.size());
588
0
    if (_base_compaction_thread_pool->max_threads() != base_compaction_threads_num) {
589
0
        int old_max_threads = _base_compaction_thread_pool->max_threads();
590
0
        Status status = _base_compaction_thread_pool->set_max_threads(base_compaction_threads_num);
591
0
        if (status.ok()) {
592
0
            VLOG_NOTICE << "update base compaction thread pool max_threads from " << old_max_threads
593
0
                        << " to " << base_compaction_threads_num;
594
0
        }
595
0
    }
596
0
    if (_base_compaction_thread_pool->min_threads() != base_compaction_threads_num) {
597
0
        int old_min_threads = _base_compaction_thread_pool->min_threads();
598
0
        Status status = _base_compaction_thread_pool->set_min_threads(base_compaction_threads_num);
599
0
        if (status.ok()) {
600
0
            VLOG_NOTICE << "update base compaction thread pool min_threads from " << old_min_threads
601
0
                        << " to " << base_compaction_threads_num;
602
0
        }
603
0
    }
604
605
0
    auto cumu_compaction_threads_num = get_cumu_compaction_threads_num(_store_map.size());
606
0
    if (_cumu_compaction_thread_pool->max_threads() != cumu_compaction_threads_num) {
607
0
        int old_max_threads = _cumu_compaction_thread_pool->max_threads();
608
0
        Status status = _cumu_compaction_thread_pool->set_max_threads(cumu_compaction_threads_num);
609
0
        if (status.ok()) {
610
0
            VLOG_NOTICE << "update cumu compaction thread pool max_threads from " << old_max_threads
611
0
                        << " to " << cumu_compaction_threads_num;
612
0
        }
613
0
    }
614
0
    if (_cumu_compaction_thread_pool->min_threads() != cumu_compaction_threads_num) {
615
0
        int old_min_threads = _cumu_compaction_thread_pool->min_threads();
616
0
        Status status = _cumu_compaction_thread_pool->set_min_threads(cumu_compaction_threads_num);
617
0
        if (status.ok()) {
618
0
            VLOG_NOTICE << "update cumu compaction thread pool min_threads from " << old_min_threads
619
0
                        << " to " << cumu_compaction_threads_num;
620
0
        }
621
0
    }
622
623
0
    auto single_replica_compaction_threads_num =
624
0
            get_single_replica_compaction_threads_num(_store_map.size());
625
0
    if (_single_replica_compaction_thread_pool->max_threads() !=
626
0
        single_replica_compaction_threads_num) {
627
0
        int old_max_threads = _single_replica_compaction_thread_pool->max_threads();
628
0
        Status status = _single_replica_compaction_thread_pool->set_max_threads(
629
0
                single_replica_compaction_threads_num);
630
0
        if (status.ok()) {
631
0
            VLOG_NOTICE << "update single replica compaction thread pool max_threads from "
632
0
                        << old_max_threads << " to " << single_replica_compaction_threads_num;
633
0
        }
634
0
    }
635
0
    if (_single_replica_compaction_thread_pool->min_threads() !=
636
0
        single_replica_compaction_threads_num) {
637
0
        int old_min_threads = _single_replica_compaction_thread_pool->min_threads();
638
0
        Status status = _single_replica_compaction_thread_pool->set_min_threads(
639
0
                single_replica_compaction_threads_num);
640
0
        if (status.ok()) {
641
0
            VLOG_NOTICE << "update single replica compaction thread pool min_threads from "
642
0
                        << old_min_threads << " to " << single_replica_compaction_threads_num;
643
0
        }
644
0
    }
645
0
}
646
647
7
void StorageEngine::_compaction_tasks_producer_callback() {
648
7
    LOG(INFO) << "try to start compaction producer process!";
649
650
7
    std::vector<DataDir*> data_dirs = get_stores();
651
7
    _compaction_submit_registry.reset(data_dirs);
652
653
7
    int round = 0;
654
7
    CompactionType compaction_type;
655
656
    // Used to record the time when the score metric was last updated.
657
    // The update of the score metric is accompanied by the logic of selecting the tablet.
658
    // If there is no slot available, the logic of selecting the tablet will be terminated,
659
    // which causes the score metric update to be terminated.
660
    // In order to avoid this situation, we need to update the score regularly.
661
7
    int64_t last_cumulative_score_update_time = 0;
662
7
    int64_t last_base_score_update_time = 0;
663
7
    static const int64_t check_score_interval_ms = 5000; // 5 secs
664
665
7
    int64_t interval = config::generate_compaction_tasks_interval_ms;
666
7
    do {
667
7
        int64_t cur_time = UnixMillis();
668
7
        if (!config::disable_auto_compaction &&
669
7
            (!config::enable_compaction_pause_on_high_memory ||
670
6
             !GlobalMemoryArbitrator::is_exceed_soft_mem_limit(GB_EXCHANGE_BYTE))) {
671
6
            _adjust_compaction_thread_num();
672
673
6
            bool check_score = false;
674
6
            if (round < config::cumulative_compaction_rounds_for_each_base_compaction_round) {
675
6
                compaction_type = CompactionType::CUMULATIVE_COMPACTION;
676
6
                round++;
677
6
                if (cur_time - last_cumulative_score_update_time >= check_score_interval_ms) {
678
6
                    check_score = true;
679
6
                    last_cumulative_score_update_time = cur_time;
680
6
                }
681
6
            } else {
682
0
                compaction_type = CompactionType::BASE_COMPACTION;
683
0
                round = 0;
684
0
                if (cur_time - last_base_score_update_time >= check_score_interval_ms) {
685
0
                    check_score = true;
686
0
                    last_base_score_update_time = cur_time;
687
0
                }
688
0
            }
689
6
            std::unique_ptr<ThreadPool>& thread_pool =
690
6
                    (compaction_type == CompactionType::CUMULATIVE_COMPACTION)
691
6
                            ? _cumu_compaction_thread_pool
692
6
                            : _base_compaction_thread_pool;
693
6
            bvar::Status<int64_t>& g_compaction_task_num_per_round =
694
6
                    (compaction_type == CompactionType::CUMULATIVE_COMPACTION)
695
6
                            ? g_cumu_compaction_task_num_per_round
696
6
                            : g_base_compaction_task_num_per_round;
697
6
            if (config::compaction_num_per_round != -1) {
698
0
                _compaction_num_per_round = config::compaction_num_per_round;
699
6
            } else if (thread_pool->get_queue_size() == 0) {
700
                // If all tasks in the thread pool queue are executed,
701
                // double the number of tasks generated each time,
702
                // with a maximum of config::max_automatic_compaction_num_per_round tasks per generation.
703
2
                if (_compaction_num_per_round < config::max_automatic_compaction_num_per_round) {
704
1
                    _compaction_num_per_round *= 2;
705
1
                    g_compaction_task_num_per_round.set_value(_compaction_num_per_round);
706
1
                }
707
4
            } else if (thread_pool->get_queue_size() > _compaction_num_per_round / 2) {
708
                // If all tasks in the thread pool is greater than
709
                // half of the tasks submitted in the previous round,
710
                // reduce the number of tasks generated each time by half, with a minimum of 1.
711
3
                if (_compaction_num_per_round > 1) {
712
1
                    _compaction_num_per_round /= 2;
713
1
                    g_compaction_task_num_per_round.set_value(_compaction_num_per_round);
714
1
                }
715
3
            }
716
6
            std::vector<TabletSharedPtr> tablets_compaction =
717
6
                    _generate_compaction_tasks(compaction_type, data_dirs, check_score);
718
6
            if (tablets_compaction.size() == 0) {
719
6
                std::unique_lock<std::mutex> lock(_compaction_producer_sleep_mutex);
720
6
                _wakeup_producer_flag = 0;
721
                // It is necessary to wake up the thread on timeout to prevent deadlock
722
                // in case of no running compaction task.
723
6
                _compaction_producer_sleep_cv.wait_for(
724
6
                        lock, std::chrono::milliseconds(2000),
725
12
                        [this] { return _wakeup_producer_flag == 1; });
726
6
                continue;
727
6
            }
728
729
0
            for (const auto& tablet : tablets_compaction) {
730
0
                if (compaction_type == CompactionType::BASE_COMPACTION) {
731
0
                    tablet->set_last_base_compaction_schedule_time(UnixMillis());
732
0
                } else if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) {
733
0
                    tablet->set_last_cumu_compaction_schedule_time(UnixMillis());
734
0
                } else if (compaction_type == CompactionType::FULL_COMPACTION) {
735
0
                    tablet->set_last_full_compaction_schedule_time(UnixMillis());
736
0
                }
737
0
                Status st = _submit_compaction_task(tablet, compaction_type, false);
738
0
                if (!st.ok()) {
739
0
                    LOG(WARNING) << "failed to submit compaction task for tablet: "
740
0
                                 << tablet->tablet_id() << ", err: " << st;
741
0
                }
742
0
            }
743
0
            interval = config::generate_compaction_tasks_interval_ms;
744
1
        } else {
745
1
            interval = 5000; // 5s to check disable_auto_compaction
746
1
        }
747
748
        // wait some seconds for ut test
749
1
        {
750
1
            std ::vector<std ::any> args {};
751
1
            args.emplace_back(1);
752
1
            doris ::SyncPoint ::get_instance()->process(
753
1
                    "StorageEngine::_compaction_tasks_producer_callback", std ::move(args));
754
1
        }
755
1
        int64_t end_time = UnixMillis();
756
1
        DorisMetrics::instance()->compaction_producer_callback_a_round_time->set_value(end_time -
757
1
                                                                                       cur_time);
758
7
    } while (!_stop_background_threads_latch.wait_for(std::chrono::milliseconds(interval)));
759
7
}
760
761
0
void StorageEngine::_update_replica_infos_callback() {
762
#ifdef GOOGLE_PROFILER
763
    ProfilerRegisterThread();
764
#endif
765
0
    LOG(INFO) << "start to update replica infos!";
766
767
0
    int64_t interval = config::update_replica_infos_interval_seconds;
768
0
    do {
769
0
        auto all_tablets = _tablet_manager->get_all_tablet([](Tablet* t) {
770
0
            return t->is_used() && t->tablet_state() == TABLET_RUNNING &&
771
0
                   !t->tablet_meta()->tablet_schema()->disable_auto_compaction() &&
772
0
                   t->tablet_meta()->tablet_schema()->enable_single_replica_compaction();
773
0
        });
774
0
        ClusterInfo* cluster_info = ExecEnv::GetInstance()->cluster_info();
775
0
        if (cluster_info == nullptr) {
776
0
            LOG(WARNING) << "Have not get FE Master heartbeat yet";
777
0
            std::this_thread::sleep_for(std::chrono::seconds(2));
778
0
            continue;
779
0
        }
780
0
        TNetworkAddress master_addr = cluster_info->master_fe_addr;
781
0
        if (master_addr.hostname == "" || master_addr.port == 0) {
782
0
            LOG(WARNING) << "Have not get FE Master heartbeat yet";
783
0
            std::this_thread::sleep_for(std::chrono::seconds(2));
784
0
            continue;
785
0
        }
786
787
0
        int start = 0;
788
0
        int tablet_size = cast_set<int>(all_tablets.size());
789
        // The while loop may take a long time, we should skip it when stop
790
0
        while (start < tablet_size && _stop_background_threads_latch.count() > 0) {
791
0
            int batch_size = std::min(100, tablet_size - start);
792
0
            int end = start + batch_size;
793
0
            TGetTabletReplicaInfosRequest request;
794
0
            TGetTabletReplicaInfosResult result;
795
0
            for (int i = start; i < end; i++) {
796
0
                request.tablet_ids.emplace_back(all_tablets[i]->tablet_id());
797
0
            }
798
0
            Status rpc_st = ThriftRpcHelper::rpc<FrontendServiceClient>(
799
0
                    master_addr.hostname, master_addr.port,
800
0
                    [&request, &result](FrontendServiceConnection& client) {
801
0
                        client->getTabletReplicaInfos(result, request);
802
0
                    });
803
804
0
            if (!rpc_st.ok()) {
805
0
                LOG(WARNING) << "Failed to get tablet replica infos, encounter rpc failure, "
806
0
                                "tablet start: "
807
0
                             << start << " end: " << end;
808
0
                continue;
809
0
            }
810
811
0
            std::unique_lock<std::mutex> lock(_peer_replica_infos_mutex);
812
0
            for (const auto& it : result.tablet_replica_infos) {
813
0
                auto tablet_id = it.first;
814
0
                auto tablet = _tablet_manager->get_tablet(tablet_id);
815
0
                if (tablet == nullptr) {
816
0
                    VLOG_CRITICAL << "tablet ptr is nullptr";
817
0
                    continue;
818
0
                }
819
820
0
                VLOG_NOTICE << tablet_id << " tablet has " << it.second.size() << " replicas";
821
0
                uint64_t min_modulo = MOD_PRIME;
822
0
                TReplicaInfo peer_replica;
823
0
                for (const auto& replica : it.second) {
824
0
                    int64_t peer_replica_id = replica.replica_id;
825
0
                    uint64_t modulo = HashUtil::hash64(&peer_replica_id, sizeof(peer_replica_id),
826
0
                                                       DEFAULT_SEED) %
827
0
                                      MOD_PRIME;
828
0
                    if (modulo < min_modulo) {
829
0
                        peer_replica = replica;
830
0
                        min_modulo = modulo;
831
0
                    }
832
0
                }
833
0
                VLOG_NOTICE << "tablet " << tablet_id << ", peer replica host is "
834
0
                            << peer_replica.host;
835
0
                _peer_replica_infos[tablet_id] = peer_replica;
836
0
            }
837
0
            _token = result.token;
838
0
            VLOG_NOTICE << "get tablet replica infos from fe, size is " << end - start
839
0
                        << " token = " << result.token;
840
0
            start = end;
841
0
        }
842
0
        interval = config::update_replica_infos_interval_seconds;
843
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
844
0
}
845
846
Status StorageEngine::_submit_single_replica_compaction_task(TabletSharedPtr tablet,
847
0
                                                             CompactionType compaction_type) {
848
    // For single replica compaction, the local version to be merged is determined based on the version fetched from the peer replica.
849
    // Therefore, it is currently not possible to determine whether it should be a base compaction or cumulative compaction.
850
    // As a result, the tablet needs to be pushed to both the _tablet_submitted_cumu_compaction and the _tablet_submitted_base_compaction simultaneously.
851
0
    bool already_exist =
852
0
            _compaction_submit_registry.insert(tablet, CompactionType::CUMULATIVE_COMPACTION);
853
0
    if (already_exist) {
854
0
        return Status::AlreadyExist<false>(
855
0
                "compaction task has already been submitted, tablet_id={}", tablet->tablet_id());
856
0
    }
857
858
0
    already_exist = _compaction_submit_registry.insert(tablet, CompactionType::BASE_COMPACTION);
859
0
    if (already_exist) {
860
0
        _pop_tablet_from_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION);
861
0
        return Status::AlreadyExist<false>(
862
0
                "compaction task has already been submitted, tablet_id={}", tablet->tablet_id());
863
0
    }
864
865
0
    auto compaction = std::make_shared<SingleReplicaCompaction>(*this, tablet, compaction_type);
866
0
    DorisMetrics::instance()->single_compaction_request_total->increment(1);
867
0
    auto st = compaction->prepare_compact();
868
869
0
    auto clean_single_replica_compaction = [tablet, this]() {
870
0
        _pop_tablet_from_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION);
871
0
        _pop_tablet_from_submitted_compaction(tablet, CompactionType::BASE_COMPACTION);
872
0
    };
873
874
0
    if (!st.ok()) {
875
0
        clean_single_replica_compaction();
876
0
        if (!st.is<ErrorCode::CUMULATIVE_NO_SUITABLE_VERSION>()) {
877
0
            LOG(WARNING) << "failed to prepare single replica compaction, tablet_id="
878
0
                         << tablet->tablet_id() << " : " << st;
879
0
            return st;
880
0
        }
881
0
        return Status::OK(); // No suitable version, regard as OK
882
0
    }
883
884
0
    auto submit_st = _single_replica_compaction_thread_pool->submit_func(
885
0
            [tablet, compaction = std::move(compaction),
886
0
             clean_single_replica_compaction]() mutable {
887
0
                tablet->execute_single_replica_compaction(*compaction);
888
0
                clean_single_replica_compaction();
889
0
            });
890
0
    if (!submit_st.ok()) {
891
0
        clean_single_replica_compaction();
892
0
        return Status::InternalError(
893
0
                "failed to submit single replica compaction task to thread pool, "
894
0
                "tablet_id={}",
895
0
                tablet->tablet_id());
896
0
    }
897
0
    return Status::OK();
898
0
}
899
900
void StorageEngine::get_tablet_rowset_versions(const PGetTabletVersionsRequest* request,
901
0
                                               PGetTabletVersionsResponse* response) {
902
0
    TabletSharedPtr tablet = _tablet_manager->get_tablet(request->tablet_id());
903
0
    if (tablet == nullptr) {
904
0
        response->mutable_status()->set_status_code(TStatusCode::CANCELLED);
905
0
        return;
906
0
    }
907
0
    std::vector<Version> local_versions = tablet->get_all_local_versions();
908
0
    for (const auto& local_version : local_versions) {
909
0
        auto version = response->add_versions();
910
0
        version->set_first(local_version.first);
911
0
        version->set_second(local_version.second);
912
0
    }
913
0
    response->mutable_status()->set_status_code(0);
914
0
}
915
916
bool need_generate_compaction_tasks(int task_cnt_per_disk, int thread_per_disk,
917
0
                                    CompactionType compaction_type, bool all_base) {
918
    // We need to reserve at least one Slot for cumulative compaction.
919
    // So when there is only one Slot, we have to judge whether there is a cumulative compaction
920
    // in the current submitted tasks.
921
    // If so, the last Slot can be assigned to Base compaction,
922
    // otherwise, this Slot needs to be reserved for cumulative compaction.
923
0
    if (task_cnt_per_disk >= thread_per_disk) {
924
        // Return if no available slot
925
0
        return false;
926
0
    } else if (task_cnt_per_disk >= thread_per_disk - 1) {
927
        // Only one slot left, check if it can be assigned to base compaction task.
928
0
        if (compaction_type == CompactionType::BASE_COMPACTION) {
929
0
            if (all_base) {
930
0
                return false;
931
0
            }
932
0
        }
933
0
    }
934
0
    return true;
935
0
}
936
937
0
int get_concurrent_per_disk(int max_score, int thread_per_disk) {
938
0
    if (!config::enable_compaction_priority_scheduling) {
939
0
        return thread_per_disk;
940
0
    }
941
942
0
    double load_average = 0;
943
0
    if (DorisMetrics::instance()->system_metrics() != nullptr) {
944
0
        load_average = DorisMetrics::instance()->system_metrics()->get_load_average_1_min();
945
0
    }
946
0
    int num_cores = doris::CpuInfo::num_cores();
947
0
    bool cpu_usage_high = load_average > num_cores * 0.8;
948
949
0
    auto process_memory_usage = doris::GlobalMemoryArbitrator::process_memory_usage();
950
0
    bool memory_usage_high = static_cast<double>(process_memory_usage) >
951
0
                             static_cast<double>(MemInfo::soft_mem_limit()) * 0.8;
952
953
0
    if (max_score <= config::low_priority_compaction_score_threshold &&
954
0
        (cpu_usage_high || memory_usage_high)) {
955
0
        return config::low_priority_compaction_task_num_per_disk;
956
0
    }
957
958
0
    return thread_per_disk;
959
0
}
960
961
0
int32_t disk_compaction_slot_num(const DataDir& data_dir) {
962
0
    return data_dir.is_ssd_disk() ? config::compaction_task_num_per_fast_disk
963
0
                                  : config::compaction_task_num_per_disk;
964
0
}
965
966
bool has_free_compaction_slot(CompactionSubmitRegistry* registry, DataDir* dir,
967
0
                              CompactionType compaction_type, uint32_t executing_cnt) {
968
0
    int32_t thread_per_disk = disk_compaction_slot_num(*dir);
969
0
    return need_generate_compaction_tasks(
970
0
            executing_cnt, thread_per_disk, compaction_type,
971
0
            !registry->has_compaction_task(dir, CompactionType::CUMULATIVE_COMPACTION));
972
0
}
973
974
std::vector<TabletSharedPtr> StorageEngine::_generate_compaction_tasks(
975
6
        CompactionType compaction_type, std::vector<DataDir*>& data_dirs, bool check_score) {
976
6
    TEST_SYNC_POINT_RETURN_WITH_VALUE("olap_server::_generate_compaction_tasks.return_empty",
977
0
                                      std::vector<TabletSharedPtr> {});
978
0
    _update_cumulative_compaction_policy();
979
0
    std::vector<TabletSharedPtr> tablets_compaction;
980
0
    uint32_t max_compaction_score = 0;
981
982
0
    std::random_device rd;
983
0
    std::mt19937 g(rd());
984
0
    std::shuffle(data_dirs.begin(), data_dirs.end(), g);
985
986
    // Copy _tablet_submitted_xxx_compaction map so that we don't need to hold _tablet_submitted_compaction_mutex
987
    // when traversing the data dir
988
0
    auto compaction_registry_snapshot = _compaction_submit_registry.create_snapshot();
989
0
    for (auto* data_dir : data_dirs) {
990
0
        bool need_pick_tablet = true;
991
0
        uint32_t executing_task_num =
992
0
                compaction_registry_snapshot.count_executing_cumu_and_base(data_dir);
993
0
        need_pick_tablet = has_free_compaction_slot(&compaction_registry_snapshot, data_dir,
994
0
                                                    compaction_type, executing_task_num);
995
0
        if (!need_pick_tablet && !check_score) {
996
0
            continue;
997
0
        }
998
999
        // Even if need_pick_tablet is false, we still need to call find_best_tablet_to_compaction(),
1000
        // So that we can update the max_compaction_score metric.
1001
0
        if (!data_dir->reach_capacity_limit(0)) {
1002
0
            uint32_t disk_max_score = 0;
1003
0
            auto tablets = compaction_registry_snapshot.pick_topn_tablets_for_compaction(
1004
0
                    _tablet_manager.get(), data_dir, compaction_type,
1005
0
                    _cumulative_compaction_policies, &disk_max_score);
1006
0
            int concurrent_num =
1007
0
                    get_concurrent_per_disk(disk_max_score, disk_compaction_slot_num(*data_dir));
1008
0
            need_pick_tablet = need_generate_compaction_tasks(
1009
0
                    executing_task_num, concurrent_num, compaction_type,
1010
0
                    !compaction_registry_snapshot.has_compaction_task(
1011
0
                            data_dir, CompactionType::CUMULATIVE_COMPACTION));
1012
0
            for (const auto& tablet : tablets) {
1013
0
                if (tablet != nullptr) {
1014
0
                    if (need_pick_tablet) {
1015
0
                        tablets_compaction.emplace_back(tablet);
1016
0
                    }
1017
0
                    max_compaction_score = std::max(max_compaction_score, disk_max_score);
1018
0
                }
1019
0
            }
1020
0
        }
1021
0
    }
1022
1023
0
    if (max_compaction_score > 0) {
1024
0
        if (compaction_type == CompactionType::BASE_COMPACTION) {
1025
0
            DorisMetrics::instance()->tablet_base_max_compaction_score->set_value(
1026
0
                    max_compaction_score);
1027
0
        } else {
1028
0
            DorisMetrics::instance()->tablet_cumulative_max_compaction_score->set_value(
1029
0
                    max_compaction_score);
1030
0
        }
1031
0
    }
1032
0
    return tablets_compaction;
1033
6
}
1034
1035
0
void StorageEngine::_update_cumulative_compaction_policy() {
1036
0
    if (_cumulative_compaction_policies.empty()) {
1037
0
        _cumulative_compaction_policies[CUMULATIVE_SIZE_BASED_POLICY] =
1038
0
                CumulativeCompactionPolicyFactory::create_cumulative_compaction_policy(
1039
0
                        CUMULATIVE_SIZE_BASED_POLICY);
1040
0
        _cumulative_compaction_policies[CUMULATIVE_TIME_SERIES_POLICY] =
1041
0
                CumulativeCompactionPolicyFactory::create_cumulative_compaction_policy(
1042
0
                        CUMULATIVE_TIME_SERIES_POLICY);
1043
0
    }
1044
0
}
1045
1046
void StorageEngine::_pop_tablet_from_submitted_compaction(TabletSharedPtr tablet,
1047
7
                                                          CompactionType compaction_type) {
1048
7
    _compaction_submit_registry.remove(tablet, compaction_type, [this]() {
1049
7
        std::unique_lock<std::mutex> lock(_compaction_producer_sleep_mutex);
1050
7
        _wakeup_producer_flag = 1;
1051
7
        _compaction_producer_sleep_cv.notify_one();
1052
7
    });
1053
7
}
1054
1055
Status StorageEngine::_submit_compaction_task(TabletSharedPtr tablet,
1056
21
                                              CompactionType compaction_type, bool force) {
1057
21
    if (tablet->tablet_meta()->tablet_schema()->enable_single_replica_compaction() &&
1058
21
        should_fetch_from_peer(tablet->tablet_id())) {
1059
0
        VLOG_CRITICAL << "start to submit single replica compaction task for tablet: "
1060
0
                      << tablet->tablet_id();
1061
0
        Status st = _submit_single_replica_compaction_task(tablet, compaction_type);
1062
0
        if (!st.ok()) {
1063
0
            LOG(WARNING) << "failed to submit single replica compaction task for tablet: "
1064
0
                         << tablet->tablet_id() << ", err: " << st;
1065
0
        }
1066
1067
0
        return Status::OK();
1068
0
    }
1069
21
    bool already_exist = _compaction_submit_registry.insert(tablet, compaction_type);
1070
21
    if (already_exist) {
1071
0
        return Status::AlreadyExist<false>(
1072
0
                "compaction task has already been submitted, tablet_id={}, compaction_type={}.",
1073
0
                tablet->tablet_id(), compaction_type);
1074
0
    }
1075
21
    tablet->compaction_stage = CompactionStage::PENDING;
1076
21
    std::shared_ptr<CompactionMixin> compaction;
1077
21
    int64_t permits = 0;
1078
21
    Status st = Tablet::prepare_compaction_and_calculate_permits(compaction_type, tablet,
1079
21
                                                                 compaction, permits);
1080
21
    if (st.ok() && permits > 0) {
1081
21
        if (!force) {
1082
21
            _permit_limiter.request(permits);
1083
21
        }
1084
21
        std::unique_ptr<ThreadPool>& thread_pool =
1085
21
                (compaction_type == CompactionType::CUMULATIVE_COMPACTION)
1086
21
                        ? _cumu_compaction_thread_pool
1087
21
                        : _base_compaction_thread_pool;
1088
21
        VLOG_CRITICAL << "compaction thread pool. type: "
1089
0
                      << (compaction_type == CompactionType::CUMULATIVE_COMPACTION ? "CUMU"
1090
0
                                                                                   : "BASE")
1091
0
                      << ", num_threads: " << thread_pool->num_threads()
1092
0
                      << ", num_threads_pending_start: " << thread_pool->num_threads_pending_start()
1093
0
                      << ", num_active_threads: " << thread_pool->num_active_threads()
1094
0
                      << ", max_threads: " << thread_pool->max_threads()
1095
0
                      << ", min_threads: " << thread_pool->min_threads()
1096
0
                      << ", num_total_queued_tasks: " << thread_pool->get_queue_size();
1097
21
        auto status = thread_pool->submit_func([tablet, compaction = std::move(compaction),
1098
21
                                                compaction_type, permits, force, this]() {
1099
7
            if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) [[likely]] {
1100
7
                DorisMetrics::instance()->cumulative_compaction_task_running_total->increment(1);
1101
7
                DorisMetrics::instance()->cumulative_compaction_task_pending_total->set_value(
1102
7
                        _cumu_compaction_thread_pool->get_queue_size());
1103
7
            } else if (compaction_type == CompactionType::BASE_COMPACTION) {
1104
0
                DorisMetrics::instance()->base_compaction_task_running_total->increment(1);
1105
0
                DorisMetrics::instance()->base_compaction_task_pending_total->set_value(
1106
0
                        _base_compaction_thread_pool->get_queue_size());
1107
0
            }
1108
7
            bool is_large_task = true;
1109
7
            Defer defer {[&]() {
1110
7
                DBUG_EXECUTE_IF("StorageEngine._submit_compaction_task.sleep", { sleep(5); })
1111
7
                if (!force) {
1112
7
                    _permit_limiter.release(permits);
1113
7
                }
1114
7
                _pop_tablet_from_submitted_compaction(tablet, compaction_type);
1115
7
                tablet->compaction_stage = CompactionStage::NOT_SCHEDULED;
1116
7
                if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) {
1117
7
                    std::lock_guard<std::mutex> lock(_cumu_compaction_delay_mtx);
1118
7
                    _cumu_compaction_thread_pool_used_threads--;
1119
7
                    if (!is_large_task) {
1120
0
                        _cumu_compaction_thread_pool_small_tasks_running--;
1121
0
                    }
1122
7
                    DorisMetrics::instance()->cumulative_compaction_task_running_total->increment(
1123
7
                            -1);
1124
7
                    DorisMetrics::instance()->cumulative_compaction_task_pending_total->set_value(
1125
7
                            _cumu_compaction_thread_pool->get_queue_size());
1126
7
                } else if (compaction_type == CompactionType::BASE_COMPACTION) {
1127
0
                    DorisMetrics::instance()->base_compaction_task_running_total->increment(-1);
1128
0
                    DorisMetrics::instance()->base_compaction_task_pending_total->set_value(
1129
0
                            _base_compaction_thread_pool->get_queue_size());
1130
0
                }
1131
7
            }};
1132
7
            do {
1133
7
                if (compaction->compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) {
1134
7
                    std::lock_guard<std::mutex> lock(_cumu_compaction_delay_mtx);
1135
7
                    _cumu_compaction_thread_pool_used_threads++;
1136
7
                    if (config::large_cumu_compaction_task_min_thread_num > 1 &&
1137
7
                        _cumu_compaction_thread_pool->max_threads() >=
1138
0
                                config::large_cumu_compaction_task_min_thread_num) {
1139
                        // Determine if this is a large task based on configured thresholds
1140
0
                        is_large_task =
1141
0
                                (compaction->calc_input_rowsets_total_size() >
1142
0
                                         config::large_cumu_compaction_task_bytes_threshold ||
1143
0
                                 compaction->calc_input_rowsets_row_num() >
1144
0
                                         config::large_cumu_compaction_task_row_num_threshold);
1145
1146
                        // Small task. No delay needed
1147
0
                        if (!is_large_task) {
1148
0
                            _cumu_compaction_thread_pool_small_tasks_running++;
1149
0
                            break;
1150
0
                        }
1151
                        // Deal with large task
1152
0
                        if (_should_delay_large_task()) {
1153
0
                            LOG_WARNING(
1154
0
                                    "failed to do CumulativeCompaction, cumu thread pool is "
1155
0
                                    "intensive, delay large task.")
1156
0
                                    .tag("tablet_id", tablet->tablet_id())
1157
0
                                    .tag("input_rows", compaction->calc_input_rowsets_row_num())
1158
0
                                    .tag("input_rowsets_total_size",
1159
0
                                         compaction->calc_input_rowsets_total_size())
1160
0
                                    .tag("config::large_cumu_compaction_task_bytes_threshold",
1161
0
                                         config::large_cumu_compaction_task_bytes_threshold)
1162
0
                                    .tag("config::large_cumu_compaction_task_row_num_threshold",
1163
0
                                         config::large_cumu_compaction_task_row_num_threshold)
1164
0
                                    .tag("remaining threads",
1165
0
                                         _cumu_compaction_thread_pool_used_threads)
1166
0
                                    .tag("small_tasks_running",
1167
0
                                         _cumu_compaction_thread_pool_small_tasks_running);
1168
                            // Delay this task and sleep 5s for this tablet
1169
0
                            long now = duration_cast<std::chrono::milliseconds>(
1170
0
                                               std::chrono::system_clock::now().time_since_epoch())
1171
0
                                               .count();
1172
0
                            tablet->set_last_cumu_compaction_failure_time(now);
1173
0
                            return;
1174
0
                        }
1175
0
                    }
1176
7
                }
1177
7
            } while (false);
1178
7
            if (!tablet->can_do_compaction(tablet->data_dir()->path_hash(), compaction_type)) {
1179
0
                LOG(INFO) << "Tablet state has been changed, no need to begin this compaction "
1180
0
                             "task, tablet_id="
1181
0
                          << tablet->tablet_id() << ", tablet_state=" << tablet->tablet_state();
1182
0
                return;
1183
0
            }
1184
7
            tablet->compaction_stage = CompactionStage::EXECUTING;
1185
7
            TEST_SYNC_POINT_RETURN_WITH_VOID("olap_server::execute_compaction");
1186
1
            tablet->execute_compaction(*compaction);
1187
1
        });
1188
21
        if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) [[likely]] {
1189
21
            DorisMetrics::instance()->cumulative_compaction_task_pending_total->set_value(
1190
21
                    _cumu_compaction_thread_pool->get_queue_size());
1191
21
        } else if (compaction_type == CompactionType::BASE_COMPACTION) {
1192
0
            DorisMetrics::instance()->base_compaction_task_pending_total->set_value(
1193
0
                    _base_compaction_thread_pool->get_queue_size());
1194
0
        }
1195
21
        if (!st.ok()) {
1196
0
            if (!force) {
1197
0
                _permit_limiter.release(permits);
1198
0
            }
1199
0
            _pop_tablet_from_submitted_compaction(tablet, compaction_type);
1200
0
            tablet->compaction_stage = CompactionStage::NOT_SCHEDULED;
1201
0
            return Status::InternalError(
1202
0
                    "failed to submit compaction task to thread pool, "
1203
0
                    "tablet_id={}, compaction_type={}.",
1204
0
                    tablet->tablet_id(), compaction_type);
1205
0
        }
1206
21
        return Status::OK();
1207
21
    } else {
1208
0
        _pop_tablet_from_submitted_compaction(tablet, compaction_type);
1209
0
        tablet->compaction_stage = CompactionStage::NOT_SCHEDULED;
1210
0
        if (!st.ok()) {
1211
0
            return Status::InternalError(
1212
0
                    "failed to prepare compaction task and calculate permits, "
1213
0
                    "tablet_id={}, compaction_type={}, "
1214
0
                    "permit={}, current_permit={}, status={}",
1215
0
                    tablet->tablet_id(), compaction_type, permits, _permit_limiter.usage(),
1216
0
                    st.to_string());
1217
0
        }
1218
0
        return st;
1219
0
    }
1220
21
}
1221
1222
Status StorageEngine::submit_compaction_task(TabletSharedPtr tablet, CompactionType compaction_type,
1223
0
                                             bool force, bool eager) {
1224
0
    if (!eager) {
1225
0
        DCHECK(compaction_type == CompactionType::BASE_COMPACTION ||
1226
0
               compaction_type == CompactionType::CUMULATIVE_COMPACTION);
1227
0
        auto compaction_registry_snapshot = _compaction_submit_registry.create_snapshot();
1228
0
        auto stores = get_stores();
1229
1230
0
        bool is_busy = std::none_of(
1231
0
                stores.begin(), stores.end(),
1232
0
                [&compaction_registry_snapshot, compaction_type](auto* data_dir) {
1233
0
                    return has_free_compaction_slot(
1234
0
                            &compaction_registry_snapshot, data_dir, compaction_type,
1235
0
                            compaction_registry_snapshot.count_executing_cumu_and_base(data_dir));
1236
0
                });
1237
0
        if (is_busy) {
1238
0
            LOG_EVERY_N(WARNING, 100)
1239
0
                    << "Too busy to submit a compaction task, tablet=" << tablet->get_table_id();
1240
0
            return Status::OK();
1241
0
        }
1242
0
    }
1243
0
    _update_cumulative_compaction_policy();
1244
    // alter table tableName set ("compaction_policy"="time_series")
1245
    // if atler table's compaction  policy, we need to modify tablet compaction policy shared ptr
1246
0
    if (tablet->get_cumulative_compaction_policy() == nullptr ||
1247
0
        tablet->get_cumulative_compaction_policy()->name() !=
1248
0
                tablet->tablet_meta()->compaction_policy()) {
1249
0
        tablet->set_cumulative_compaction_policy(
1250
0
                _cumulative_compaction_policies.at(tablet->tablet_meta()->compaction_policy()));
1251
0
    }
1252
0
    tablet->set_skip_compaction(false);
1253
0
    return _submit_compaction_task(tablet, compaction_type, force);
1254
0
}
1255
1256
Status StorageEngine::_handle_seg_compaction(std::shared_ptr<SegcompactionWorker> worker,
1257
                                             SegCompactionCandidatesSharedPtr segments,
1258
11
                                             uint64_t submission_time) {
1259
    // note: be aware that worker->_writer maybe released when the task is cancelled
1260
11
    uint64_t exec_queue_time = GetCurrentTimeMicros() - submission_time;
1261
11
    LOG(INFO) << "segcompaction thread pool queue time(ms): " << exec_queue_time / 1000;
1262
11
    worker->compact_segments(segments);
1263
    // return OK here. error will be reported via BetaRowsetWriter::_segcompaction_status
1264
11
    return Status::OK();
1265
11
}
1266
1267
Status StorageEngine::submit_seg_compaction_task(std::shared_ptr<SegcompactionWorker> worker,
1268
11
                                                 SegCompactionCandidatesSharedPtr segments) {
1269
11
    uint64_t submission_time = GetCurrentTimeMicros();
1270
11
    return _seg_compaction_thread_pool->submit_func([this, worker, segments, submission_time] {
1271
11
        static_cast<void>(_handle_seg_compaction(worker, segments, submission_time));
1272
11
    });
1273
11
}
1274
1275
0
Status StorageEngine::process_index_change_task(const TAlterInvertedIndexReq& request) {
1276
0
    auto tablet_id = request.tablet_id;
1277
0
    TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id);
1278
0
    DBUG_EXECUTE_IF("StorageEngine::process_index_change_task_tablet_nullptr",
1279
0
                    { tablet = nullptr; })
1280
0
    if (tablet == nullptr) {
1281
0
        LOG(WARNING) << "tablet: " << tablet_id << " not exist";
1282
0
        return Status::InternalError("tablet not exist, tablet_id={}.", tablet_id);
1283
0
    }
1284
1285
0
    IndexBuilderSharedPtr index_builder = std::make_shared<IndexBuilder>(
1286
0
            *this, tablet, request.columns, request.alter_inverted_indexes, request.is_drop_op);
1287
0
    RETURN_IF_ERROR(_handle_index_change(index_builder));
1288
0
    return Status::OK();
1289
0
}
1290
1291
0
Status StorageEngine::_handle_index_change(IndexBuilderSharedPtr index_builder) {
1292
0
    RETURN_IF_ERROR(index_builder->init());
1293
0
    RETURN_IF_ERROR(index_builder->do_build_inverted_index());
1294
0
    return Status::OK();
1295
0
}
1296
1297
0
void StorageEngine::_cooldown_tasks_producer_callback() {
1298
0
    int64_t interval = config::generate_cooldown_task_interval_sec;
1299
    // the cooldown replica may be slow to upload it's meta file, so we should wait
1300
    // until it has done uploaded
1301
0
    int64_t skip_failed_interval = interval * 10;
1302
0
    do {
1303
        // these tables are ordered by priority desc
1304
0
        std::vector<TabletSharedPtr> tablets;
1305
0
        std::vector<RowsetSharedPtr> rowsets;
1306
        // TODO(luwei) : a more efficient way to get cooldown tablets
1307
0
        auto cur_time = time(nullptr);
1308
        // we should skip all the tablets which are not running and those pending to do cooldown
1309
        // also tablets once failed to do follow cooldown
1310
0
        auto skip_tablet = [this, skip_failed_interval,
1311
0
                            cur_time](const TabletSharedPtr& tablet) -> bool {
1312
0
            bool is_skip =
1313
0
                    cur_time - tablet->last_failed_follow_cooldown_time() < skip_failed_interval ||
1314
0
                    TABLET_RUNNING != tablet->tablet_state();
1315
0
            if (is_skip) {
1316
0
                return is_skip;
1317
0
            }
1318
0
            std::lock_guard<std::mutex> lock(_running_cooldown_mutex);
1319
0
            return _running_cooldown_tablets.find(tablet->tablet_id()) !=
1320
0
                   _running_cooldown_tablets.end();
1321
0
        };
1322
0
        _tablet_manager->get_cooldown_tablets(&tablets, &rowsets, std::move(skip_tablet));
1323
0
        LOG(INFO) << "cooldown producer get tablet num: " << tablets.size();
1324
0
        int max_priority = cast_set<int>(tablets.size());
1325
0
        int index = 0;
1326
0
        for (const auto& tablet : tablets) {
1327
0
            {
1328
0
                std::lock_guard<std::mutex> lock(_running_cooldown_mutex);
1329
0
                _running_cooldown_tablets.insert(tablet->tablet_id());
1330
0
            }
1331
0
            PriorityThreadPool::Task task;
1332
0
            RowsetSharedPtr rowset = std::move(rowsets[index++]);
1333
0
            task.work_function = [tablet, rowset, task_size = tablets.size(), this]() {
1334
0
                Status st = tablet->cooldown(rowset);
1335
0
                {
1336
0
                    std::lock_guard<std::mutex> lock(_running_cooldown_mutex);
1337
0
                    _running_cooldown_tablets.erase(tablet->tablet_id());
1338
0
                }
1339
0
                if (!st.ok()) {
1340
0
                    LOG(WARNING) << "failed to cooldown, tablet: " << tablet->tablet_id()
1341
0
                                 << " err: " << st;
1342
0
                } else {
1343
0
                    LOG(INFO) << "succeed to cooldown, tablet: " << tablet->tablet_id()
1344
0
                              << " cooldown progress ("
1345
0
                              << task_size - _cooldown_thread_pool->get_queue_size() << "/"
1346
0
                              << task_size << ")";
1347
0
                }
1348
0
            };
1349
0
            task.priority = max_priority--;
1350
0
            bool submited = _cooldown_thread_pool->offer(std::move(task));
1351
1352
0
            if (!submited) {
1353
0
                LOG(INFO) << "failed to submit cooldown task";
1354
0
            }
1355
0
        }
1356
0
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
1357
0
}
1358
1359
0
void StorageEngine::_remove_unused_remote_files_callback() {
1360
0
    while (!_stop_background_threads_latch.wait_for(
1361
0
            std::chrono::seconds(config::remove_unused_remote_files_interval_sec))) {
1362
0
        LOG(INFO) << "begin to remove unused remote files";
1363
0
        do_remove_unused_remote_files();
1364
0
    }
1365
0
}
1366
1367
0
void StorageEngine::do_remove_unused_remote_files() {
1368
0
    auto tablets = tablet_manager()->get_all_tablet([](Tablet* t) {
1369
0
        return t->tablet_meta()->cooldown_meta_id().initialized() && t->is_used() &&
1370
0
               t->tablet_state() == TABLET_RUNNING &&
1371
0
               t->cooldown_conf_unlocked().cooldown_replica_id == t->replica_id();
1372
0
    });
1373
0
    TConfirmUnusedRemoteFilesRequest req;
1374
0
    req.__isset.confirm_list = true;
1375
    // tablet_id -> [storage_resource, unused_remote_files]
1376
0
    using unused_remote_files_buffer_t =
1377
0
            std::unordered_map<int64_t, std::pair<StorageResource, std::vector<io::FileInfo>>>;
1378
0
    unused_remote_files_buffer_t buffer;
1379
0
    int64_t num_files_in_buffer = 0;
1380
    // assume a filename is 0.1KB, buffer size should not larger than 100MB
1381
0
    constexpr int64_t max_files_in_buffer = 1000000;
1382
1383
0
    auto calc_unused_remote_files = [&req, &buffer, &num_files_in_buffer, this](Tablet* t) {
1384
0
        auto storage_resource = get_resource_by_storage_policy_id(t->storage_policy_id());
1385
0
        if (!storage_resource) {
1386
0
            LOG(WARNING) << "encounter error when remove unused remote files, tablet_id="
1387
0
                         << t->tablet_id() << " : " << storage_resource.error();
1388
0
            return;
1389
0
        }
1390
1391
        // TODO(plat1ko): Support path v1
1392
0
        if (storage_resource->path_version > 0) {
1393
0
            return;
1394
0
        }
1395
1396
0
        std::vector<io::FileInfo> files;
1397
        // FIXME(plat1ko): What if user reset resource in storage policy to another resource?
1398
        //  Maybe we should also list files in previously uploaded resources.
1399
0
        bool exists = true;
1400
0
        auto st = storage_resource->fs->list(storage_resource->remote_tablet_path(t->tablet_id()),
1401
0
                                             true, &files, &exists);
1402
0
        if (!st.ok()) {
1403
0
            LOG(WARNING) << "encounter error when remove unused remote files, tablet_id="
1404
0
                         << t->tablet_id() << " : " << st;
1405
0
            return;
1406
0
        }
1407
0
        if (!exists || files.empty()) {
1408
0
            return;
1409
0
        }
1410
        // get all cooldowned rowsets
1411
0
        RowsetIdUnorderedSet cooldowned_rowsets;
1412
0
        UniqueId cooldown_meta_id;
1413
0
        {
1414
0
            std::shared_lock rlock(t->get_header_lock());
1415
0
            for (auto&& rs_meta : t->tablet_meta()->all_rs_metas()) {
1416
0
                if (!rs_meta->is_local()) {
1417
0
                    cooldowned_rowsets.insert(rs_meta->rowset_id());
1418
0
                }
1419
0
            }
1420
0
            if (cooldowned_rowsets.empty()) {
1421
0
                return;
1422
0
            }
1423
0
            cooldown_meta_id = t->tablet_meta()->cooldown_meta_id();
1424
0
        }
1425
0
        auto [cooldown_term, cooldown_replica_id] = t->cooldown_conf();
1426
0
        if (cooldown_replica_id != t->replica_id()) {
1427
0
            return;
1428
0
        }
1429
        // {cooldown_replica_id}.{cooldown_term}.meta
1430
0
        std::string remote_meta_path =
1431
0
                cooldown_tablet_meta_filename(cooldown_replica_id, cooldown_term);
1432
        // filter out the paths that should be reserved
1433
0
        auto filter = [&, this](io::FileInfo& info) {
1434
0
            std::string_view filename = info.file_name;
1435
0
            if (filename.ends_with(".meta")) {
1436
0
                return filename == remote_meta_path;
1437
0
            }
1438
0
            auto rowset_id = extract_rowset_id(filename);
1439
0
            if (rowset_id.hi == 0) {
1440
0
                return false;
1441
0
            }
1442
0
            return cooldowned_rowsets.contains(rowset_id) ||
1443
0
                   pending_remote_rowsets().contains(rowset_id);
1444
0
        };
1445
0
        files.erase(std::remove_if(files.begin(), files.end(), std::move(filter)), files.end());
1446
0
        if (files.empty()) {
1447
0
            return;
1448
0
        }
1449
0
        files.shrink_to_fit();
1450
0
        num_files_in_buffer += files.size();
1451
0
        buffer.insert({t->tablet_id(), {*storage_resource, std::move(files)}});
1452
0
        auto& info = req.confirm_list.emplace_back();
1453
0
        info.__set_tablet_id(t->tablet_id());
1454
0
        info.__set_cooldown_replica_id(cooldown_replica_id);
1455
0
        info.__set_cooldown_meta_id(cooldown_meta_id.to_thrift());
1456
0
    };
1457
1458
0
    auto confirm_and_remove_files = [&buffer, &req, &num_files_in_buffer]() {
1459
0
        TConfirmUnusedRemoteFilesResult result;
1460
0
        LOG(INFO) << "begin to confirm unused remote files. num_tablets=" << buffer.size()
1461
0
                  << " num_files=" << num_files_in_buffer;
1462
0
        auto st = MasterServerClient::instance()->confirm_unused_remote_files(req, &result);
1463
0
        if (!st.ok()) {
1464
0
            LOG(WARNING) << st;
1465
0
            return;
1466
0
        }
1467
0
        for (auto id : result.confirmed_tablets) {
1468
0
            if (auto it = buffer.find(id); LIKELY(it != buffer.end())) {
1469
0
                auto& storage_resource = it->second.first;
1470
0
                auto& files = it->second.second;
1471
0
                std::vector<io::Path> paths;
1472
0
                paths.reserve(files.size());
1473
                // delete unused files
1474
0
                LOG(INFO) << "delete unused files. root_path=" << storage_resource.fs->root_path()
1475
0
                          << " tablet_id=" << id;
1476
0
                io::Path dir = storage_resource.remote_tablet_path(id);
1477
0
                for (auto& file : files) {
1478
0
                    auto file_path = dir / file.file_name;
1479
0
                    LOG(INFO) << "delete unused file: " << file_path.native();
1480
0
                    paths.push_back(std::move(file_path));
1481
0
                }
1482
0
                st = storage_resource.fs->batch_delete(paths);
1483
0
                if (!st.ok()) {
1484
0
                    LOG(WARNING) << "failed to delete unused files, tablet_id=" << id << " : "
1485
0
                                 << st;
1486
0
                }
1487
0
                buffer.erase(it);
1488
0
            }
1489
0
        }
1490
0
    };
1491
1492
    // batch confirm to reduce FE's overhead
1493
0
    auto next_confirm_time = std::chrono::steady_clock::now() +
1494
0
                             std::chrono::seconds(config::confirm_unused_remote_files_interval_sec);
1495
0
    for (auto& t : tablets) {
1496
0
        if (t.use_count() <= 1 // this means tablet has been dropped
1497
0
            || t->cooldown_conf_unlocked().cooldown_replica_id != t->replica_id() ||
1498
0
            t->tablet_state() != TABLET_RUNNING) {
1499
0
            continue;
1500
0
        }
1501
0
        calc_unused_remote_files(t.get());
1502
0
        if (num_files_in_buffer > 0 && (num_files_in_buffer > max_files_in_buffer ||
1503
0
                                        std::chrono::steady_clock::now() > next_confirm_time)) {
1504
0
            confirm_and_remove_files();
1505
0
            buffer.clear();
1506
0
            req.confirm_list.clear();
1507
0
            num_files_in_buffer = 0;
1508
0
            next_confirm_time =
1509
0
                    std::chrono::steady_clock::now() +
1510
0
                    std::chrono::seconds(config::confirm_unused_remote_files_interval_sec);
1511
0
        }
1512
0
    }
1513
0
    if (num_files_in_buffer > 0) {
1514
0
        confirm_and_remove_files();
1515
0
    }
1516
0
}
1517
1518
0
void StorageEngine::_cold_data_compaction_producer_callback() {
1519
0
    while (!_stop_background_threads_latch.wait_for(
1520
0
            std::chrono::seconds(config::cold_data_compaction_interval_sec))) {
1521
0
        if (config::disable_auto_compaction ||
1522
0
            GlobalMemoryArbitrator::is_exceed_soft_mem_limit(GB_EXCHANGE_BYTE)) {
1523
0
            continue;
1524
0
        }
1525
1526
0
        std::unordered_set<int64_t> copied_tablet_submitted;
1527
0
        {
1528
0
            std::lock_guard lock(_cold_compaction_tablet_submitted_mtx);
1529
0
            copied_tablet_submitted = _cold_compaction_tablet_submitted;
1530
0
        }
1531
0
        int64_t n = config::cold_data_compaction_thread_num - copied_tablet_submitted.size();
1532
0
        if (n <= 0) {
1533
0
            continue;
1534
0
        }
1535
0
        auto tablets = _tablet_manager->get_all_tablet([&copied_tablet_submitted](Tablet* t) {
1536
0
            return t->tablet_meta()->cooldown_meta_id().initialized() && t->is_used() &&
1537
0
                   t->tablet_state() == TABLET_RUNNING &&
1538
0
                   !copied_tablet_submitted.contains(t->tablet_id()) &&
1539
0
                   !t->tablet_meta()->tablet_schema()->disable_auto_compaction();
1540
0
        });
1541
0
        std::vector<std::pair<TabletSharedPtr, int64_t>> tablet_to_compact;
1542
0
        tablet_to_compact.reserve(n + 1);
1543
0
        std::vector<std::pair<TabletSharedPtr, int64_t>> tablet_to_follow;
1544
0
        tablet_to_follow.reserve(n + 1);
1545
1546
0
        for (auto& t : tablets) {
1547
0
            if (t->replica_id() == t->cooldown_conf_unlocked().cooldown_replica_id) {
1548
0
                auto score = t->calc_cold_data_compaction_score();
1549
0
                if (score < 4) {
1550
0
                    continue;
1551
0
                }
1552
0
                tablet_to_compact.emplace_back(t, score);
1553
0
                if (tablet_to_compact.size() > n) {
1554
0
                    std::sort(tablet_to_compact.begin(), tablet_to_compact.end(),
1555
0
                              [](auto& a, auto& b) { return a.second > b.second; });
1556
0
                    tablet_to_compact.pop_back();
1557
0
                }
1558
0
                continue;
1559
0
            }
1560
            // else, need to follow
1561
0
            {
1562
0
                std::lock_guard lock(_running_cooldown_mutex);
1563
0
                if (_running_cooldown_tablets.contains(t->table_id())) {
1564
                    // already in cooldown queue
1565
0
                    continue;
1566
0
                }
1567
0
            }
1568
            // TODO(plat1ko): some avoidance strategy if failed to follow
1569
0
            auto score = t->calc_cold_data_compaction_score();
1570
0
            tablet_to_follow.emplace_back(t, score);
1571
1572
0
            if (tablet_to_follow.size() > n) {
1573
0
                std::sort(tablet_to_follow.begin(), tablet_to_follow.end(),
1574
0
                          [](auto& a, auto& b) { return a.second > b.second; });
1575
0
                tablet_to_follow.pop_back();
1576
0
            }
1577
0
        }
1578
1579
0
        for (auto& [tablet, score] : tablet_to_compact) {
1580
0
            LOG(INFO) << "submit cold data compaction. tablet_id=" << tablet->tablet_id()
1581
0
                      << " score=" << score;
1582
0
            static_cast<void>(_cold_data_compaction_thread_pool->submit_func(
1583
0
                    [&, t = std::move(tablet), this]() {
1584
0
                        auto compaction = std::make_shared<ColdDataCompaction>(*this, t);
1585
0
                        {
1586
0
                            std::lock_guard lock(_cold_compaction_tablet_submitted_mtx);
1587
0
                            _cold_compaction_tablet_submitted.insert(t->tablet_id());
1588
0
                        }
1589
0
                        Defer defer {[&] {
1590
0
                            std::lock_guard lock(_cold_compaction_tablet_submitted_mtx);
1591
0
                            _cold_compaction_tablet_submitted.erase(t->tablet_id());
1592
0
                        }};
1593
0
                        std::unique_lock cold_compaction_lock(t->get_cold_compaction_lock(),
1594
0
                                                              std::try_to_lock);
1595
0
                        if (!cold_compaction_lock.owns_lock()) {
1596
0
                            LOG(WARNING) << "try cold_compaction_lock failed, tablet_id="
1597
0
                                         << t->tablet_id();
1598
0
                            return;
1599
0
                        }
1600
0
                        _update_cumulative_compaction_policy();
1601
0
                        if (t->get_cumulative_compaction_policy() == nullptr ||
1602
0
                            t->get_cumulative_compaction_policy()->name() !=
1603
0
                                    t->tablet_meta()->compaction_policy()) {
1604
0
                            t->set_cumulative_compaction_policy(_cumulative_compaction_policies.at(
1605
0
                                    t->tablet_meta()->compaction_policy()));
1606
0
                        }
1607
1608
0
                        auto st = compaction->prepare_compact();
1609
0
                        if (!st.ok()) {
1610
0
                            LOG(WARNING) << "failed to prepare cold data compaction. tablet_id="
1611
0
                                         << t->tablet_id() << " err=" << st;
1612
0
                            return;
1613
0
                        }
1614
1615
0
                        st = compaction->execute_compact();
1616
0
                        if (!st.ok()) {
1617
0
                            LOG(WARNING) << "failed to execute cold data compaction. tablet_id="
1618
0
                                         << t->tablet_id() << " err=" << st;
1619
0
                            return;
1620
0
                        }
1621
0
                    }));
1622
0
        }
1623
1624
0
        for (auto& [tablet, score] : tablet_to_follow) {
1625
0
            LOG(INFO) << "submit to follow cooldown meta. tablet_id=" << tablet->tablet_id()
1626
0
                      << " score=" << score;
1627
0
            static_cast<void>(_cold_data_compaction_thread_pool->submit_func([&,
1628
0
                                                                              t = std::move(
1629
0
                                                                                      tablet)]() {
1630
0
                {
1631
0
                    std::lock_guard lock(_cold_compaction_tablet_submitted_mtx);
1632
0
                    _cold_compaction_tablet_submitted.insert(t->tablet_id());
1633
0
                }
1634
0
                auto st = t->cooldown();
1635
0
                {
1636
0
                    std::lock_guard lock(_cold_compaction_tablet_submitted_mtx);
1637
0
                    _cold_compaction_tablet_submitted.erase(t->tablet_id());
1638
0
                }
1639
0
                if (!st.ok()) {
1640
                    // The cooldown of the replica may be relatively slow
1641
                    // resulting in a short period of time where following cannot be successful
1642
0
                    LOG_EVERY_N(WARNING, 5)
1643
0
                            << "failed to cooldown. tablet_id=" << t->tablet_id() << " err=" << st;
1644
0
                }
1645
0
            }));
1646
0
        }
1647
0
    }
1648
0
}
1649
1650
void StorageEngine::add_async_publish_task(int64_t partition_id, int64_t tablet_id,
1651
                                           int64_t publish_version, int64_t transaction_id,
1652
2.05k
                                           bool is_recovery) {
1653
2.05k
    if (!is_recovery) {
1654
2.05k
        bool exists = false;
1655
2.05k
        {
1656
2.05k
            std::shared_lock<std::shared_mutex> rlock(_async_publish_lock);
1657
2.05k
            if (auto tablet_iter = _async_publish_tasks.find(tablet_id);
1658
2.05k
                tablet_iter != _async_publish_tasks.end()) {
1659
2.05k
                if (auto iter = tablet_iter->second.find(publish_version);
1660
2.05k
                    iter != tablet_iter->second.end()) {
1661
20
                    exists = true;
1662
20
                }
1663
2.05k
            }
1664
2.05k
        }
1665
2.05k
        if (exists) {
1666
20
            return;
1667
20
        }
1668
2.03k
        TabletSharedPtr tablet = tablet_manager()->get_tablet(tablet_id);
1669
2.03k
        if (tablet == nullptr) {
1670
0
            LOG(INFO) << "tablet may be dropped when add async publish task, tablet_id: "
1671
0
                      << tablet_id;
1672
0
            return;
1673
0
        }
1674
2.03k
        PendingPublishInfoPB pending_publish_info_pb;
1675
2.03k
        pending_publish_info_pb.set_partition_id(partition_id);
1676
2.03k
        pending_publish_info_pb.set_transaction_id(transaction_id);
1677
2.03k
        static_cast<void>(TabletMetaManager::save_pending_publish_info(
1678
2.03k
                tablet->data_dir(), tablet->tablet_id(), publish_version,
1679
2.03k
                pending_publish_info_pb.SerializeAsString()));
1680
2.03k
    }
1681
2.05k
    LOG(INFO) << "add pending publish task, tablet_id: " << tablet_id
1682
2.03k
              << " version: " << publish_version << " txn_id:" << transaction_id
1683
2.03k
              << " is_recovery: " << is_recovery;
1684
2.03k
    std::unique_lock<std::shared_mutex> wlock(_async_publish_lock);
1685
2.03k
    _async_publish_tasks[tablet_id][publish_version] = {transaction_id, partition_id};
1686
2.03k
}
1687
1688
3
int64_t StorageEngine::get_pending_publish_min_version(int64_t tablet_id) {
1689
3
    std::shared_lock<std::shared_mutex> rlock(_async_publish_lock);
1690
3
    auto iter = _async_publish_tasks.find(tablet_id);
1691
3
    if (iter == _async_publish_tasks.end()) {
1692
0
        return INT64_MAX;
1693
0
    }
1694
3
    if (iter->second.empty()) {
1695
0
        return INT64_MAX;
1696
0
    }
1697
3
    return iter->second.begin()->first;
1698
3
}
1699
1700
10
void StorageEngine::_process_async_publish() {
1701
    // tablet, publish_version
1702
10
    std::vector<std::pair<TabletSharedPtr, int64_t>> need_removed_tasks;
1703
10
    {
1704
10
        std::unique_lock<std::shared_mutex> wlock(_async_publish_lock);
1705
10
        for (auto tablet_iter = _async_publish_tasks.begin();
1706
20
             tablet_iter != _async_publish_tasks.end();) {
1707
10
            if (tablet_iter->second.empty()) {
1708
1
                tablet_iter = _async_publish_tasks.erase(tablet_iter);
1709
1
                continue;
1710
1
            }
1711
9
            int64_t tablet_id = tablet_iter->first;
1712
9
            TabletSharedPtr tablet = tablet_manager()->get_tablet(tablet_id);
1713
9
            if (!tablet) {
1714
1
                LOG(WARNING) << "tablet does not exist when async publush, tablet_id: "
1715
1
                             << tablet_id;
1716
1
                tablet_iter = _async_publish_tasks.erase(tablet_iter);
1717
1
                continue;
1718
1
            }
1719
1720
8
            auto task_iter = tablet_iter->second.begin();
1721
8
            int64_t version = task_iter->first;
1722
8
            int64_t transaction_id = task_iter->second.first;
1723
8
            int64_t partition_id = task_iter->second.second;
1724
8
            int64_t max_version = tablet->max_version().second;
1725
1726
8
            if (version <= max_version) {
1727
6
                need_removed_tasks.emplace_back(tablet, version);
1728
6
                tablet_iter->second.erase(task_iter);
1729
6
                tablet_iter++;
1730
6
                continue;
1731
6
            }
1732
2
            if (version != max_version + 1) {
1733
1
                int32_t max_version_config = tablet->max_version_config();
1734
                // Keep only the most recent versions
1735
31
                while (tablet_iter->second.size() > max_version_config) {
1736
30
                    need_removed_tasks.emplace_back(tablet, version);
1737
30
                    task_iter = tablet_iter->second.erase(task_iter);
1738
30
                    version = task_iter->first;
1739
30
                }
1740
1
                tablet_iter++;
1741
1
                continue;
1742
1
            }
1743
1744
1
            auto async_publish_task = std::make_shared<AsyncTabletPublishTask>(
1745
1
                    *this, tablet, partition_id, transaction_id, version);
1746
1
            static_cast<void>(_tablet_publish_txn_thread_pool->submit_func(
1747
1
                    [=]() { async_publish_task->handle(); }));
1748
1
            tablet_iter->second.erase(task_iter);
1749
1
            need_removed_tasks.emplace_back(tablet, version);
1750
1
            tablet_iter++;
1751
1
        }
1752
10
    }
1753
37
    for (auto& [tablet, publish_version] : need_removed_tasks) {
1754
37
        static_cast<void>(TabletMetaManager::remove_pending_publish_info(
1755
37
                tablet->data_dir(), tablet->tablet_id(), publish_version));
1756
37
    }
1757
10
}
1758
1759
0
void StorageEngine::_async_publish_callback() {
1760
0
    while (!_stop_background_threads_latch.wait_for(std::chrono::milliseconds(30))) {
1761
0
        _process_async_publish();
1762
0
    }
1763
0
}
1764
1765
0
void StorageEngine::_check_tablet_delete_bitmap_score_callback() {
1766
0
    LOG(INFO) << "try to start check tablet delete bitmap score!";
1767
0
    while (!_stop_background_threads_latch.wait_for(
1768
0
            std::chrono::seconds(config::check_tablet_delete_bitmap_interval_seconds))) {
1769
0
        if (!config::enable_check_tablet_delete_bitmap_score) {
1770
0
            return;
1771
0
        }
1772
0
        uint64_t max_delete_bitmap_score = 0;
1773
0
        uint64_t max_base_rowset_delete_bitmap_score = 0;
1774
0
        _tablet_manager->get_topn_tablet_delete_bitmap_score(&max_delete_bitmap_score,
1775
0
                                                             &max_base_rowset_delete_bitmap_score);
1776
0
        if (max_delete_bitmap_score > 0) {
1777
0
            _tablet_max_delete_bitmap_score_metrics->set_value(max_delete_bitmap_score);
1778
0
        }
1779
0
        if (max_base_rowset_delete_bitmap_score > 0) {
1780
0
            _tablet_max_base_rowset_delete_bitmap_score_metrics->set_value(
1781
0
                    max_base_rowset_delete_bitmap_score);
1782
0
        }
1783
0
    }
1784
0
}
1785
#include "common/compile_check_end.h"
1786
} // namespace doris