Coverage Report

Created: 2026-04-15 06:32

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/storage/storage_engine.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "storage/storage_engine.h"
19
20
// IWYU pragma: no_include <bthread/errno.h>
21
#include <fmt/format.h>
22
#include <gen_cpp/AgentService_types.h>
23
#include <gen_cpp/FrontendService.h>
24
#include <gen_cpp/Types_types.h>
25
#include <glog/logging.h>
26
#include <rapidjson/document.h>
27
#include <rapidjson/encodings.h>
28
#include <rapidjson/prettywriter.h>
29
#include <rapidjson/stringbuffer.h>
30
#include <sys/resource.h>
31
#include <thrift/protocol/TDebugProtocol.h>
32
33
#include <algorithm>
34
#include <boost/algorithm/string/case_conv.hpp>
35
#include <boost/container/detail/std_fwd.hpp>
36
#include <cassert>
37
#include <cerrno> // IWYU pragma: keep
38
#include <chrono>
39
#include <cstdlib>
40
#include <cstring>
41
#include <filesystem>
42
#include <iterator>
43
#include <memory>
44
#include <mutex>
45
#include <ostream>
46
#include <set>
47
#include <thread>
48
#include <unordered_set>
49
#include <utility>
50
51
#include "agent/task_worker_pool.h"
52
#include "cloud/cloud_storage_engine.h"
53
#include "common/config.h"
54
#include "common/logging.h"
55
#include "common/metrics/doris_metrics.h"
56
#include "common/metrics/metrics.h"
57
#include "common/status.h"
58
#include "core/assert_cast.h"
59
#include "io/fs/local_file_system.h"
60
#include "load/memtable/memtable_flush_executor.h"
61
#include "load/stream_load/stream_load_recorder.h"
62
#include "runtime/exec_env.h"
63
#include "runtime/memory/global_memory_arbitrator.h"
64
#include "storage/binlog.h"
65
#include "storage/compaction/single_replica_compaction.h"
66
#include "storage/data_dir.h"
67
#include "storage/id_manager.h"
68
#include "storage/olap_common.h"
69
#include "storage/olap_define.h"
70
#include "storage/rowset/rowset_fwd.h"
71
#include "storage/rowset/rowset_meta.h"
72
#include "storage/rowset/rowset_meta_manager.h"
73
#include "storage/rowset/unique_rowset_id_generator.h"
74
#include "storage/snapshot/snapshot_manager.h"
75
#include "storage/tablet/tablet_manager.h"
76
#include "storage/tablet/tablet_meta.h"
77
#include "storage/tablet/tablet_meta_manager.h"
78
#include "storage/txn/txn_manager.h"
79
#include "util/client_cache.h"
80
#include "util/mem_info.h"
81
#include "util/stopwatch.hpp"
82
#include "util/thread.h"
83
#include "util/threadpool.h"
84
#include "util/thrift_rpc_helper.h"
85
#include "util/uid_util.h"
86
#include "util/work_thread_pool.hpp"
87
88
using std::filesystem::directory_iterator;
89
using std::filesystem::path;
90
using std::map;
91
using std::set;
92
using std::string;
93
using std::stringstream;
94
using std::vector;
95
96
namespace doris {
97
using namespace ErrorCode;
98
extern void get_round_robin_stores(int64_t curr_index, const std::vector<DirInfo>& dir_infos,
99
                                   std::vector<DataDir*>& stores);
100
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(unused_rowsets_count, MetricUnit::ROWSETS);
101
bvar::Status<int64_t> g_max_rowsets_with_useless_delete_bitmap(
102
        "max_rowsets_with_useless_delete_bitmap", 0);
103
bvar::Status<int64_t> g_max_rowsets_with_useless_delete_bitmap_version(
104
        "max_rowsets_with_useless_delete_bitmap_version", 0);
105
106
namespace {
107
bvar::Adder<uint64_t> unused_rowsets_counter("ununsed_rowsets_counter");
108
};
109
110
BaseStorageEngine::BaseStorageEngine(Type type, const UniqueId& backend_uid)
111
520
        : _type(type),
112
520
          _rowset_id_generator(std::make_unique<UniqueRowsetIdGenerator>(backend_uid)),
113
520
          _stop_background_threads_latch(1) {
114
520
    _memory_limitation_bytes_for_schema_change = static_cast<int64_t>(
115
520
            static_cast<double>(MemInfo::soft_mem_limit()) * config::schema_change_mem_limit_frac);
116
520
    _tablet_max_delete_bitmap_score_metrics =
117
520
            std::make_shared<bvar::Status<size_t>>("tablet_max", "delete_bitmap_score", 0);
118
520
    _tablet_max_base_rowset_delete_bitmap_score_metrics = std::make_shared<bvar::Status<size_t>>(
119
520
            "tablet_max_base_rowset", "delete_bitmap_score", 0);
120
520
}
121
122
516
BaseStorageEngine::~BaseStorageEngine() = default;
123
124
232k
RowsetId BaseStorageEngine::next_rowset_id() {
125
232k
    return _rowset_id_generator->next_id();
126
232k
}
127
128
25.2k
StorageEngine& BaseStorageEngine::to_local() {
129
25.2k
    CHECK_EQ(_type, Type::LOCAL);
130
25.2k
    return *static_cast<StorageEngine*>(this);
131
25.2k
}
132
133
1.98M
CloudStorageEngine& BaseStorageEngine::to_cloud() {
134
1.98M
    CHECK_EQ(_type, Type::CLOUD);
135
1.98M
    return *static_cast<CloudStorageEngine*>(this);
136
1.98M
}
137
138
31.9k
int64_t BaseStorageEngine::memory_limitation_bytes_per_thread_for_schema_change() const {
139
31.9k
    return std::max(_memory_limitation_bytes_for_schema_change / config::alter_tablet_worker_count,
140
31.9k
                    config::memory_limitation_per_thread_for_schema_change_bytes);
141
31.9k
}
142
143
596
void BaseStorageEngine::notify_build_index_task_begin() {
144
596
    _running_build_index_tasks.fetch_add(1, std::memory_order_relaxed);
145
596
}
146
147
595
void BaseStorageEngine::notify_build_index_task_end() {
148
595
    auto old_value = _running_build_index_tasks.fetch_sub(1, std::memory_order_relaxed);
149
595
    DCHECK_GT(old_value, 0);
150
595
}
151
152
5
int32_t BaseStorageEngine::running_build_index_tasks() const {
153
5
    return std::max(1, _running_build_index_tasks.load(std::memory_order_relaxed));
154
5
}
155
156
605
int64_t BaseStorageEngine::memory_limitation_bytes_for_build_index() const {
157
605
    int64_t min_limit = config::build_index_min_memory_per_task_bytes;
158
605
    int64_t soft_mem_limit = MemInfo::soft_mem_limit();
159
160
605
    int64_t limit = static_cast<int64_t>(static_cast<double>(soft_mem_limit) *
161
605
                                         config::build_index_mem_limit_frac);
162
163
605
    int64_t process_memory_usage = GlobalMemoryArbitrator::process_memory_usage();
164
605
    int64_t remaining = soft_mem_limit - process_memory_usage;
165
605
    if (remaining < limit) {
166
586
        limit = std::max(remaining, min_limit);
167
586
    }
168
169
605
    int64_t high_watermark = soft_mem_limit * config::build_index_memory_high_watermark_pct / 100;
170
605
    int64_t low_watermark = soft_mem_limit * config::build_index_memory_low_watermark_pct / 100;
171
172
605
    if (process_memory_usage >= high_watermark) {
173
3
        limit = min_limit;
174
602
    } else if (process_memory_usage >= low_watermark) {
175
1
        limit = std::max(limit / 2, min_limit);
176
1
    }
177
178
605
    return std::max(limit, min_limit);
179
605
}
180
181
7
void BaseStorageEngine::_start_adaptive_thread_controller() {
182
7
    if (!config::enable_adaptive_flush_threads) {
183
0
        return;
184
0
    }
185
186
7
    auto* system_metrics = DorisMetrics::instance()->system_metrics();
187
7
    auto* s3_upload_pool = ExecEnv::GetInstance()->s3_file_upload_thread_pool();
188
189
7
    _adaptive_thread_controller.init(system_metrics, s3_upload_pool);
190
191
7
    if (_memtable_flush_executor) {
192
7
        auto* flush_pool = _memtable_flush_executor->flush_pool();
193
7
        auto* high_prio_pool = _memtable_flush_executor->high_prio_flush_pool();
194
7
        _adaptive_thread_controller.add("flush", {flush_pool, high_prio_pool},
195
7
                                        AdaptiveThreadPoolController::make_flush_adjust_func(
196
7
                                                &_adaptive_thread_controller, flush_pool),
197
7
                                        config::max_flush_thread_num_per_cpu,
198
7
                                        config::min_flush_thread_num_per_cpu);
199
7
    }
200
7
}
201
202
45
Status BaseStorageEngine::init_stream_load_recorder(const std::string& stream_load_record_path) {
203
45
    LOG(INFO) << "stream load record path: " << stream_load_record_path;
204
    // init stream load record rocksdb
205
45
    _stream_load_recorder = StreamLoadRecorder::create_shared(stream_load_record_path);
206
45
    if (_stream_load_recorder == nullptr) {
207
0
        RETURN_NOT_OK_STATUS_WITH_WARN(
208
0
                Status::MemoryAllocFailed("allocate memory for StreamLoadRecorder failed"),
209
0
                "new StreamLoadRecorder failed");
210
0
    }
211
45
    auto st = _stream_load_recorder->init();
212
45
    if (!st.ok()) {
213
0
        RETURN_NOT_OK_STATUS_WITH_WARN(
214
0
                Status::IOError("open StreamLoadRecorder rocksdb failed, path={}",
215
0
                                stream_load_record_path),
216
0
                "init StreamLoadRecorder failed");
217
0
    }
218
45
    return Status::OK();
219
45
}
220
221
0
void CompactionSubmitRegistry::jsonfy_compaction_status(std::string* result) {
222
0
    rapidjson::Document root;
223
0
    root.SetObject();
224
225
0
    auto add_node = [&root](const std::string& name, const Registry& registry) {
226
0
        rapidjson::Value compaction_name;
227
0
        compaction_name.SetString(name.c_str(), cast_set<uint32_t>(name.length()),
228
0
                                  root.GetAllocator());
229
0
        rapidjson::Document path_obj;
230
0
        path_obj.SetObject();
231
0
        for (const auto& it : registry) {
232
0
            const auto& dir = it.first->path();
233
0
            rapidjson::Value path_key;
234
0
            path_key.SetString(dir.c_str(), cast_set<uint32_t>(dir.length()), root.GetAllocator());
235
236
0
            rapidjson::Document arr;
237
0
            arr.SetArray();
238
239
0
            for (const auto& tablet : it.second) {
240
0
                rapidjson::Value tablet_id;
241
0
                auto tablet_id_str = std::to_string(tablet->tablet_id());
242
0
                tablet_id.SetString(tablet_id_str.c_str(),
243
0
                                    cast_set<uint32_t>(tablet_id_str.length()),
244
0
                                    root.GetAllocator());
245
0
                arr.PushBack(tablet_id, root.GetAllocator());
246
0
            }
247
0
            path_obj.AddMember(path_key, arr, root.GetAllocator());
248
0
        }
249
0
        root.AddMember(compaction_name, path_obj, root.GetAllocator());
250
0
    };
251
252
0
    std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex);
253
0
    add_node("BaseCompaction", _tablet_submitted_base_compaction);
254
0
    add_node("CumulativeCompaction", _tablet_submitted_cumu_compaction);
255
0
    add_node("FullCompaction", _tablet_submitted_full_compaction);
256
257
0
    rapidjson::StringBuffer str_buf;
258
0
    rapidjson::PrettyWriter<rapidjson::StringBuffer> writer(str_buf);
259
0
    root.Accept(writer);
260
0
    *result = std::string(str_buf.GetString());
261
0
}
262
263
44
static Status _validate_options(const EngineOptions& options) {
264
44
    if (options.store_paths.empty()) {
265
0
        return Status::InternalError("store paths is empty");
266
0
    }
267
44
    return Status::OK();
268
44
}
269
270
44
Status StorageEngine::open() {
271
44
    RETURN_IF_ERROR(_validate_options(_options));
272
44
    LOG(INFO) << "starting backend using uid:" << _options.backend_uid.to_string();
273
44
    RETURN_NOT_OK_STATUS_WITH_WARN(_open(), "open engine failed");
274
44
    LOG(INFO) << "success to init storage engine.";
275
44
    return Status::OK();
276
44
}
277
278
StorageEngine::StorageEngine(const EngineOptions& options)
279
360
        : BaseStorageEngine(Type::LOCAL, options.backend_uid),
280
360
          _options(options),
281
360
          _available_storage_medium_type_count(0),
282
360
          _is_all_cluster_id_exist(true),
283
360
          _stopped(false),
284
360
          _tablet_manager(new TabletManager(*this, config::tablet_map_shard_size)),
285
360
          _txn_manager(new TxnManager(*this, config::txn_map_shard_size, config::txn_shard_size)),
286
360
          _default_rowset_type(BETA_ROWSET),
287
360
          _create_tablet_idx_lru_cache(
288
360
                  new CreateTabletRRIdxCache(config::partition_disk_index_lru_size)),
289
360
          _snapshot_mgr(std::make_unique<SnapshotManager>(*this)) {
290
360
    REGISTER_HOOK_METRIC(unused_rowsets_count, [this]() {
291
        // std::lock_guard<std::mutex> lock(_gc_mutex);
292
360
        return _unused_rowsets.size();
293
360
    });
294
295
360
    _broken_paths = options.broken_paths;
296
360
}
297
298
357
StorageEngine::~StorageEngine() {
299
357
    stop();
300
357
}
301
302
44
static Status load_data_dirs(const std::vector<DataDir*>& data_dirs) {
303
44
    std::unique_ptr<ThreadPool> pool;
304
305
44
    int num_threads = config::load_data_dirs_threads;
306
44
    if (num_threads <= 0) {
307
44
        num_threads = cast_set<int>(data_dirs.size());
308
44
    }
309
310
44
    auto st = ThreadPoolBuilder("load_data_dir")
311
44
                      .set_min_threads(num_threads)
312
44
                      .set_max_threads(num_threads)
313
44
                      .build(&pool);
314
44
    CHECK(st.ok()) << st;
315
316
44
    std::mutex result_mtx;
317
44
    Status result;
318
319
55
    for (auto* data_dir : data_dirs) {
320
55
        st = pool->submit_func([&, data_dir] {
321
55
            SCOPED_INIT_THREAD_CONTEXT();
322
55
            {
323
55
                std::lock_guard lock(result_mtx);
324
55
                if (!result.ok()) { // Some data dir has failed
325
0
                    return;
326
0
                }
327
55
            }
328
329
55
            auto st = data_dir->load();
330
55
            if (!st.ok()) {
331
0
                LOG(WARNING) << "error occured when init load tables. res=" << st
332
0
                             << ", data dir=" << data_dir->path();
333
0
                std::lock_guard lock(result_mtx);
334
0
                result = std::move(st);
335
0
            }
336
55
        });
337
338
55
        if (!st.ok()) {
339
0
            return st;
340
0
        }
341
55
    }
342
343
44
    pool->wait();
344
345
44
    return result;
346
44
}
347
348
44
Status StorageEngine::_open() {
349
    // init store_map
350
44
    RETURN_NOT_OK_STATUS_WITH_WARN(_init_store_map(), "_init_store_map failed");
351
352
44
    _effective_cluster_id = config::cluster_id;
353
44
    RETURN_NOT_OK_STATUS_WITH_WARN(_check_all_root_path_cluster_id(), "fail to check cluster id");
354
355
44
    _update_storage_medium_type_count();
356
357
44
    RETURN_NOT_OK_STATUS_WITH_WARN(_check_file_descriptor_number(), "check fd number failed");
358
359
44
    auto dirs = get_stores();
360
44
    RETURN_IF_ERROR(load_data_dirs(dirs));
361
362
44
    _disk_num = cast_set<int>(dirs.size());
363
44
    _memtable_flush_executor = std::make_unique<MemTableFlushExecutor>();
364
44
    _memtable_flush_executor->init(_disk_num);
365
366
44
    _calc_delete_bitmap_executor = std::make_unique<CalcDeleteBitmapExecutor>();
367
44
    _calc_delete_bitmap_executor->init(config::calc_delete_bitmap_max_thread);
368
369
44
    _calc_delete_bitmap_executor_for_load = std::make_unique<CalcDeleteBitmapExecutor>();
370
44
    _calc_delete_bitmap_executor_for_load->init(
371
44
            config::calc_delete_bitmap_for_load_max_thread > 0
372
44
                    ? config::calc_delete_bitmap_for_load_max_thread
373
44
                    : std::max(1, CpuInfo::num_cores() / 2));
374
375
44
    _parse_default_rowset_type();
376
377
44
    return Status::OK();
378
44
}
379
380
44
Status StorageEngine::_init_store_map() {
381
44
    std::vector<std::thread> threads;
382
44
    std::mutex error_msg_lock;
383
44
    std::string error_msg;
384
55
    for (auto& path : _options.store_paths) {
385
55
        auto store = std::make_unique<DataDir>(*this, path.path, path.capacity_bytes,
386
55
                                               path.storage_medium);
387
55
        threads.emplace_back([store = store.get(), &error_msg_lock, &error_msg]() {
388
55
            SCOPED_INIT_THREAD_CONTEXT();
389
55
            auto st = store->init();
390
55
            if (!st.ok()) {
391
0
                {
392
0
                    std::lock_guard<std::mutex> l(error_msg_lock);
393
0
                    error_msg.append(st.to_string() + ";");
394
0
                }
395
0
                LOG(WARNING) << "Store load failed, status=" << st.to_string()
396
0
                             << ", path=" << store->path();
397
0
            }
398
55
        });
399
55
        _store_map.emplace(store->path(), std::move(store));
400
55
    }
401
55
    for (auto& thread : threads) {
402
55
        thread.join();
403
55
    }
404
405
    // All store paths MUST init successfully
406
44
    if (!error_msg.empty()) {
407
0
        return Status::InternalError("init path failed, error={}", error_msg);
408
0
    }
409
410
44
    RETURN_NOT_OK_STATUS_WITH_WARN(init_stream_load_recorder(_options.store_paths[0].path),
411
44
                                   "init StreamLoadRecorder failed");
412
413
44
    return Status::OK();
414
44
}
415
416
1.92k
void StorageEngine::_update_storage_medium_type_count() {
417
1.92k
    set<TStorageMedium::type> available_storage_medium_types;
418
419
1.92k
    std::lock_guard<std::mutex> l(_store_lock);
420
2.16k
    for (auto& it : _store_map) {
421
2.16k
        if (it.second->is_used()) {
422
2.16k
            available_storage_medium_types.insert(it.second->storage_medium());
423
2.16k
        }
424
2.16k
    }
425
426
1.92k
    _available_storage_medium_type_count =
427
1.92k
            cast_set<uint32_t>(available_storage_medium_types.size());
428
1.92k
}
429
430
44
Status StorageEngine::_judge_and_update_effective_cluster_id(int32_t cluster_id) {
431
44
    if (cluster_id == -1 && _effective_cluster_id == -1) {
432
        // maybe this is a new cluster, cluster id will get from heartbeat message
433
40
        return Status::OK();
434
40
    } else if (cluster_id != -1 && _effective_cluster_id == -1) {
435
4
        _effective_cluster_id = cluster_id;
436
4
        return Status::OK();
437
4
    } else if (cluster_id == -1 && _effective_cluster_id != -1) {
438
        // _effective_cluster_id is the right effective cluster id
439
0
        return Status::OK();
440
0
    } else {
441
0
        if (cluster_id != _effective_cluster_id) {
442
0
            RETURN_NOT_OK_STATUS_WITH_WARN(
443
0
                    Status::Corruption("multiple cluster ids is not equal. one={}, other={}",
444
0
                                       _effective_cluster_id, cluster_id),
445
0
                    "cluster id not equal");
446
0
        }
447
0
    }
448
449
0
    return Status::OK();
450
44
}
451
452
884
std::vector<DataDir*> StorageEngine::get_stores(bool include_unused) {
453
884
    std::vector<DataDir*> stores;
454
884
    stores.reserve(_store_map.size());
455
456
884
    std::lock_guard<std::mutex> l(_store_lock);
457
884
    if (include_unused) {
458
152
        for (auto&& [_, store] : _store_map) {
459
151
            stores.push_back(store.get());
460
151
        }
461
732
    } else {
462
799
        for (auto&& [_, store] : _store_map) {
463
799
            if (store->is_used()) {
464
799
                stores.push_back(store.get());
465
799
            }
466
799
        }
467
732
    }
468
884
    return stores;
469
884
}
470
471
Status StorageEngine::get_all_data_dir_info(std::vector<DataDirInfo>* data_dir_infos,
472
371
                                            bool need_update) {
473
371
    Status res = Status::OK();
474
371
    data_dir_infos->clear();
475
476
371
    MonotonicStopWatch timer;
477
371
    timer.start();
478
479
    // 1. update available capacity of each data dir
480
    // get all root path info and construct a path map.
481
    // path -> DataDirInfo
482
371
    std::map<std::string, DataDirInfo> path_map;
483
371
    {
484
371
        std::lock_guard<std::mutex> l(_store_lock);
485
417
        for (auto& it : _store_map) {
486
417
            if (need_update) {
487
353
                RETURN_IF_ERROR(it.second->update_capacity());
488
353
            }
489
417
            path_map.emplace(it.first, it.second->get_dir_info());
490
417
        }
491
371
    }
492
493
    // 2. get total tablets' size of each data dir
494
371
    size_t tablet_count = 0;
495
371
    _tablet_manager->update_root_path_info(&path_map, &tablet_count);
496
497
    // 3. update metrics in DataDir
498
417
    for (auto& path : path_map) {
499
417
        std::lock_guard<std::mutex> l(_store_lock);
500
417
        auto data_dir = _store_map.find(path.first);
501
417
        DCHECK(data_dir != _store_map.end());
502
417
        data_dir->second->update_local_data_size(path.second.local_used_capacity);
503
417
        data_dir->second->update_remote_data_size(path.second.remote_used_capacity);
504
417
    }
505
506
    // add path info to data_dir_infos
507
417
    for (auto& entry : path_map) {
508
417
        data_dir_infos->emplace_back(entry.second);
509
417
    }
510
511
371
    timer.stop();
512
371
    LOG(INFO) << "get root path info cost: " << timer.elapsed_time() / 1000000
513
371
              << " ms. tablet counter: " << tablet_count;
514
515
371
    return res;
516
371
}
517
518
64
int64_t StorageEngine::get_file_or_directory_size(const std::string& file_path) {
519
64
    if (!std::filesystem::exists(file_path)) {
520
64
        return 0;
521
64
    }
522
0
    if (!std::filesystem::is_directory(file_path)) {
523
0
        return std::filesystem::file_size(file_path);
524
0
    }
525
0
    int64_t sum_size = 0;
526
0
    for (const auto& it : std::filesystem::directory_iterator(file_path)) {
527
0
        sum_size += get_file_or_directory_size(it.path());
528
0
    }
529
0
    return sum_size;
530
0
}
531
532
1.88k
void StorageEngine::_start_disk_stat_monitor() {
533
2.10k
    for (auto& it : _store_map) {
534
2.10k
        it.second->health_check();
535
2.10k
    }
536
537
1.88k
    _update_storage_medium_type_count();
538
539
1.88k
    _exit_if_too_many_disks_are_failed();
540
1.88k
}
541
542
// TODO(lingbin): Should be in EnvPosix?
543
44
Status StorageEngine::_check_file_descriptor_number() {
544
44
    struct rlimit l;
545
44
    int ret = getrlimit(RLIMIT_NOFILE, &l);
546
44
    if (ret != 0) {
547
0
        LOG(WARNING) << "call getrlimit() failed. errno=" << strerror(errno)
548
0
                     << ", use default configuration instead.";
549
0
        return Status::OK();
550
0
    }
551
44
    if (getenv("SKIP_CHECK_ULIMIT") == nullptr) {
552
44
        LOG(INFO) << "will check 'ulimit' value.";
553
44
    } else if (std::string(getenv("SKIP_CHECK_ULIMIT")) == "true") {
554
0
        LOG(INFO) << "the 'ulimit' value check is skipped"
555
0
                  << ", the SKIP_CHECK_ULIMIT env value is " << getenv("SKIP_CHECK_ULIMIT");
556
0
        return Status::OK();
557
0
    } else {
558
0
        LOG(INFO) << "the SKIP_CHECK_ULIMIT env value is " << getenv("SKIP_CHECK_ULIMIT")
559
0
                  << ", will check ulimit value.";
560
0
    }
561
44
    if (l.rlim_cur < config::min_file_descriptor_number) {
562
0
        LOG(ERROR) << "File descriptor number is less than " << config::min_file_descriptor_number
563
0
                   << ". Please use (ulimit -n) to set a value equal or greater than "
564
0
                   << config::min_file_descriptor_number;
565
0
        return Status::Error<ErrorCode::EXCEEDED_LIMIT>(
566
0
                "file descriptors limit {} is small than {}", l.rlim_cur,
567
0
                config::min_file_descriptor_number);
568
0
    }
569
44
    return Status::OK();
570
44
}
571
572
44
Status StorageEngine::_check_all_root_path_cluster_id() {
573
44
    int32_t cluster_id = -1;
574
55
    for (auto& it : _store_map) {
575
55
        int32_t tmp_cluster_id = it.second->cluster_id();
576
55
        if (it.second->cluster_id_incomplete()) {
577
47
            _is_all_cluster_id_exist = false;
578
47
        } else if (tmp_cluster_id == cluster_id) {
579
            // both have right cluster id, do nothing
580
4
        } else if (cluster_id == -1) {
581
4
            cluster_id = tmp_cluster_id;
582
4
        } else {
583
0
            RETURN_NOT_OK_STATUS_WITH_WARN(
584
0
                    Status::Corruption("multiple cluster ids is not equal. one={}, other={}",
585
0
                                       cluster_id, tmp_cluster_id),
586
0
                    "cluster id not equal");
587
0
        }
588
55
    }
589
590
    // judge and get effective cluster id
591
44
    RETURN_IF_ERROR(_judge_and_update_effective_cluster_id(cluster_id));
592
593
    // write cluster id into cluster_id_path if get effective cluster id success
594
44
    if (_effective_cluster_id != -1 && !_is_all_cluster_id_exist) {
595
0
        RETURN_IF_ERROR(set_cluster_id(_effective_cluster_id));
596
0
    }
597
598
44
    return Status::OK();
599
44
}
600
601
2
Status StorageEngine::set_cluster_id(int32_t cluster_id) {
602
2
    std::lock_guard<std::mutex> l(_store_lock);
603
2
    for (auto& it : _store_map) {
604
2
        RETURN_IF_ERROR(it.second->set_cluster_id(cluster_id));
605
2
    }
606
2
    _effective_cluster_id = cluster_id;
607
2
    _is_all_cluster_id_exist = true;
608
2
    return Status::OK();
609
2
}
610
611
int StorageEngine::_get_and_set_next_disk_index(int64_t partition_id,
612
6.30k
                                                TStorageMedium::type storage_medium) {
613
6.30k
    auto key = CreateTabletRRIdxCache::get_key(partition_id, storage_medium);
614
6.30k
    int curr_index = _create_tablet_idx_lru_cache->get_index(key);
615
    // -1, lru can't find key
616
6.30k
    if (curr_index == -1) {
617
1.31k
        curr_index = std::max(0, _last_use_index[storage_medium] + 1);
618
1.31k
    }
619
6.30k
    _last_use_index[storage_medium] = curr_index;
620
6.30k
    _create_tablet_idx_lru_cache->set_index(key, std::max(0, curr_index + 1));
621
6.30k
    return curr_index;
622
6.30k
}
623
624
void StorageEngine::_get_candidate_stores(TStorageMedium::type storage_medium,
625
6.30k
                                          std::vector<DirInfo>& dir_infos) {
626
6.30k
    std::vector<double> usages;
627
6.30k
    for (auto& it : _store_map) {
628
6.30k
        DataDir* data_dir = it.second.get();
629
6.30k
        if (data_dir->is_used()) {
630
6.30k
            if ((_available_storage_medium_type_count == 1 ||
631
6.30k
                 data_dir->storage_medium() == storage_medium) &&
632
6.30k
                !data_dir->reach_capacity_limit(0)) {
633
6.30k
                double usage = data_dir->get_usage(0);
634
6.30k
                DirInfo dir_info;
635
6.30k
                dir_info.data_dir = data_dir;
636
6.30k
                dir_info.usage = usage;
637
6.30k
                dir_info.available_level = 0;
638
6.30k
                usages.push_back(usage);
639
6.30k
                dir_infos.push_back(dir_info);
640
6.30k
            }
641
6.30k
        }
642
6.30k
    }
643
644
6.30k
    if (dir_infos.size() <= 1) {
645
6.30k
        return;
646
6.30k
    }
647
648
1
    std::sort(usages.begin(), usages.end());
649
1
    if (usages.back() < 0.7) {
650
1
        return;
651
1
    }
652
653
0
    std::vector<double> level_min_usages;
654
0
    level_min_usages.push_back(usages[0]);
655
0
    for (auto usage : usages) {
656
        // usage < 0.7 consider as one level, give a small skew
657
0
        if (usage < 0.7 - (config::high_disk_avail_level_diff_usages / 2.0)) {
658
0
            continue;
659
0
        }
660
661
        // at high usages,  default 15% is one level
662
        // for example: there disk usages are:   0.66,  0.72,  0.83
663
        // then level_min_usages = [0.66, 0.83], divide disks into 2 levels:  [0.66, 0.72], [0.83]
664
0
        if (usage >= level_min_usages.back() + config::high_disk_avail_level_diff_usages) {
665
0
            level_min_usages.push_back(usage);
666
0
        }
667
0
    }
668
0
    for (auto& dir_info : dir_infos) {
669
0
        double usage = dir_info.usage;
670
0
        for (size_t i = 1; i < level_min_usages.size() && usage >= level_min_usages[i]; i++) {
671
0
            dir_info.available_level++;
672
0
        }
673
674
        // when usage is too high, no matter consider balance now,
675
        // make it a higher level.
676
        // for example, two disks and usages are: 0.85 and 0.92, then let tablets fall on the first disk.
677
        // by default, storage_flood_stage_usage_percent = 90
678
0
        if (usage > config::storage_flood_stage_usage_percent / 100.0) {
679
0
            dir_info.available_level++;
680
0
        }
681
0
    }
682
0
}
683
684
std::vector<DataDir*> StorageEngine::get_stores_for_create_tablet(
685
6.30k
        int64_t partition_id, TStorageMedium::type storage_medium) {
686
6.30k
    std::vector<DirInfo> dir_infos;
687
6.30k
    int curr_index = 0;
688
6.30k
    std::vector<DataDir*> stores;
689
6.30k
    {
690
6.30k
        std::lock_guard<std::mutex> l(_store_lock);
691
6.30k
        curr_index = _get_and_set_next_disk_index(partition_id, storage_medium);
692
6.30k
        _get_candidate_stores(storage_medium, dir_infos);
693
6.30k
    }
694
695
6.30k
    std::sort(dir_infos.begin(), dir_infos.end());
696
6.30k
    get_round_robin_stores(curr_index, dir_infos, stores);
697
698
6.30k
    return stores;
699
6.30k
}
700
701
// maintain in stores LOW,MID,HIGH level round robin
702
void get_round_robin_stores(int64_t curr_index, const std::vector<DirInfo>& dir_infos,
703
6.30k
                            std::vector<DataDir*>& stores) {
704
12.6k
    for (size_t i = 0; i < dir_infos.size();) {
705
6.30k
        size_t end = i + 1;
706
6.30k
        while (end < dir_infos.size() &&
707
6.30k
               dir_infos[i].available_level == dir_infos[end].available_level) {
708
1
            end++;
709
1
        }
710
        // data dirs [i, end) have the same tablet size, round robin range [i, end)
711
6.30k
        size_t count = end - i;
712
12.6k
        for (size_t k = 0; k < count; k++) {
713
6.30k
            size_t index = i + ((k + curr_index) % count);
714
6.30k
            stores.push_back(dir_infos[index].data_dir);
715
6.30k
        }
716
6.30k
        i = end;
717
6.30k
    }
718
6.30k
}
719
720
176
DataDir* StorageEngine::get_store(const std::string& path) {
721
    // _store_map is unchanged, no need to lock
722
176
    auto it = _store_map.find(path);
723
176
    if (it == _store_map.end()) {
724
0
        return nullptr;
725
0
    }
726
176
    return it->second.get();
727
176
}
728
729
1.88k
static bool too_many_disks_are_failed(uint32_t unused_num, uint32_t total_num) {
730
1.88k
    return ((total_num == 0) ||
731
1.88k
            (unused_num * 100 / total_num > config::max_percentage_of_error_disk));
732
1.88k
}
733
734
1.88k
void StorageEngine::_exit_if_too_many_disks_are_failed() {
735
1.88k
    uint32_t unused_root_path_num = 0;
736
1.88k
    uint32_t total_root_path_num = 0;
737
738
1.88k
    {
739
        // TODO(yingchun): _store_map is only updated in main and ~StorageEngine, maybe we can remove it?
740
1.88k
        std::lock_guard<std::mutex> l(_store_lock);
741
1.88k
        if (_store_map.empty()) {
742
0
            return;
743
0
        }
744
745
2.10k
        for (auto& it : _store_map) {
746
2.10k
            ++total_root_path_num;
747
2.10k
            if (it.second->is_used()) {
748
2.10k
                continue;
749
2.10k
            }
750
0
            ++unused_root_path_num;
751
0
        }
752
1.88k
    }
753
754
1.88k
    if (too_many_disks_are_failed(unused_root_path_num, total_root_path_num)) {
755
0
        LOG(FATAL) << "meet too many error disks, process exit. "
756
0
                   << "max_ratio_allowed=" << config::max_percentage_of_error_disk << "%"
757
0
                   << ", error_disk_count=" << unused_root_path_num
758
0
                   << ", total_disk_count=" << total_root_path_num;
759
0
        exit(0);
760
0
    }
761
1.88k
}
762
763
360
void StorageEngine::stop() {
764
360
    if (_stopped) {
765
3
        LOG(WARNING) << "Storage engine is stopped twice.";
766
3
        return;
767
3
    }
768
    // trigger the waiting threads
769
357
    notify_listeners();
770
771
357
    {
772
357
        std::lock_guard<std::mutex> l(_store_lock);
773
357
        for (auto& store_pair : _store_map) {
774
50
            store_pair.second->stop_bg_worker();
775
50
        }
776
357
    }
777
778
357
    _stop_background_threads_latch.count_down();
779
357
#define THREAD_JOIN(thread) \
780
3.92k
    if (thread) {           \
781
33
        thread->join();     \
782
33
    }
783
784
357
    THREAD_JOIN(_compaction_tasks_producer_thread);
785
357
    THREAD_JOIN(_update_replica_infos_thread);
786
357
    THREAD_JOIN(_unused_rowset_monitor_thread);
787
357
    THREAD_JOIN(_garbage_sweeper_thread);
788
357
    THREAD_JOIN(_disk_stat_monitor_thread);
789
357
    THREAD_JOIN(_cache_clean_thread);
790
357
    THREAD_JOIN(_tablet_checkpoint_tasks_producer_thread);
791
357
    THREAD_JOIN(_async_publish_thread);
792
357
    THREAD_JOIN(_cold_data_compaction_producer_thread);
793
357
    THREAD_JOIN(_cooldown_tasks_producer_thread);
794
357
    THREAD_JOIN(_check_delete_bitmap_score_thread);
795
357
#undef THREAD_JOIN
796
797
357
#define THREADS_JOIN(threads)            \
798
357
    for (const auto& thread : threads) { \
799
5
        if (thread) {                    \
800
5
            thread->join();              \
801
5
        }                                \
802
5
    }
803
804
357
    THREADS_JOIN(_path_gc_threads);
805
357
#undef THREADS_JOIN
806
807
357
    if (_base_compaction_thread_pool) {
808
9
        _base_compaction_thread_pool->shutdown();
809
9
    }
810
357
    if (_cumu_compaction_thread_pool) {
811
10
        _cumu_compaction_thread_pool->shutdown();
812
10
    }
813
357
    if (_single_replica_compaction_thread_pool) {
814
3
        _single_replica_compaction_thread_pool->shutdown();
815
3
    }
816
817
357
    if (_seg_compaction_thread_pool) {
818
14
        _seg_compaction_thread_pool->shutdown();
819
14
    }
820
357
    if (_tablet_meta_checkpoint_thread_pool) {
821
3
        _tablet_meta_checkpoint_thread_pool->shutdown();
822
3
    }
823
357
    if (_cold_data_compaction_thread_pool) {
824
3
        _cold_data_compaction_thread_pool->shutdown();
825
3
    }
826
827
357
    if (_cooldown_thread_pool) {
828
3
        _cooldown_thread_pool->shutdown();
829
3
    }
830
831
357
    _adaptive_thread_controller.stop();
832
357
    _memtable_flush_executor.reset(nullptr);
833
357
    _calc_delete_bitmap_executor.reset(nullptr);
834
357
    _calc_delete_bitmap_executor_for_load.reset();
835
836
357
    _stopped = true;
837
357
    LOG(INFO) << "Storage engine is stopped.";
838
357
}
839
840
12
void StorageEngine::clear_transaction_task(const TTransactionId transaction_id) {
841
    // clear transaction task may not contains partitions ids, we should get partition id from txn manager.
842
12
    std::vector<int64_t> partition_ids;
843
12
    _txn_manager->get_partition_ids(transaction_id, &partition_ids);
844
12
    clear_transaction_task(transaction_id, partition_ids);
845
12
}
846
847
void StorageEngine::clear_transaction_task(const TTransactionId transaction_id,
848
14
                                           const std::vector<TPartitionId>& partition_ids) {
849
14
    LOG(INFO) << "begin to clear transaction task. transaction_id=" << transaction_id;
850
851
14
    for (const TPartitionId& partition_id : partition_ids) {
852
14
        std::map<TabletInfo, RowsetSharedPtr> tablet_infos;
853
14
        _txn_manager->get_txn_related_tablets(transaction_id, partition_id, &tablet_infos);
854
855
        // each tablet
856
272
        for (auto& tablet_info : tablet_infos) {
857
            // should use tablet uid to ensure clean txn correctly
858
272
            TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_info.first.tablet_id,
859
272
                                                                 tablet_info.first.tablet_uid);
860
            // The tablet may be dropped or altered, leave a INFO log and go on process other tablet
861
272
            if (tablet == nullptr) {
862
0
                LOG(INFO) << "tablet is no longer exist. tablet_id=" << tablet_info.first.tablet_id
863
0
                          << ", tablet_uid=" << tablet_info.first.tablet_uid;
864
0
                continue;
865
0
            }
866
272
            Status s = _txn_manager->delete_txn(partition_id, tablet, transaction_id);
867
272
            if (!s.ok()) {
868
0
                LOG(WARNING) << "failed to clear transaction. txn_id=" << transaction_id
869
0
                             << ", partition_id=" << partition_id
870
0
                             << ", tablet_id=" << tablet_info.first.tablet_id
871
0
                             << ", status=" << s.to_string();
872
0
            }
873
272
        }
874
14
    }
875
14
    LOG(INFO) << "finish to clear transaction task. transaction_id=" << transaction_id;
876
14
}
877
878
56
Status StorageEngine::start_trash_sweep(double* usage, bool ignore_guard) {
879
56
    Status res = Status::OK();
880
881
56
    std::unique_lock<std::mutex> l(_trash_sweep_lock, std::defer_lock);
882
56
    if (!l.try_lock()) {
883
0
        LOG(INFO) << "trash and snapshot sweep is running.";
884
0
        if (ignore_guard) {
885
0
            _need_clean_trash.store(true, std::memory_order_relaxed);
886
0
        }
887
0
        return res;
888
0
    }
889
890
56
    LOG(INFO) << "start trash and snapshot sweep. is_clean=" << ignore_guard;
891
892
56
    const int32_t snapshot_expire = config::snapshot_expire_time_sec;
893
56
    const int32_t trash_expire = config::trash_file_expire_time_sec;
894
    // the guard space should be lower than storage_flood_stage_usage_percent,
895
    // so here we multiply 0.9
896
    // if ignore_guard is true, set guard_space to 0.
897
56
    const double guard_space =
898
56
            ignore_guard ? 0 : config::storage_flood_stage_usage_percent / 100.0 * 0.9;
899
56
    std::vector<DataDirInfo> data_dir_infos;
900
56
    RETURN_NOT_OK_STATUS_WITH_WARN(get_all_data_dir_info(&data_dir_infos, false),
901
56
                                   "failed to get root path stat info when sweep trash.")
902
56
    std::sort(data_dir_infos.begin(), data_dir_infos.end(), DataDirInfoLessAvailability());
903
904
56
    time_t now = time(nullptr); //获取UTC时间
905
56
    tm local_tm_now;
906
56
    local_tm_now.tm_isdst = 0;
907
56
    if (localtime_r(&now, &local_tm_now) == nullptr) {
908
0
        return Status::Error<OS_ERROR>("fail to localtime_r time. time={}", now);
909
0
    }
910
56
    const time_t local_now = mktime(&local_tm_now); //得到当地日历时间
911
912
56
    double tmp_usage = 0.0;
913
64
    for (DataDirInfo& info : data_dir_infos) {
914
64
        LOG(INFO) << "Start to sweep path " << info.path;
915
64
        if (!info.is_used) {
916
0
            continue;
917
0
        }
918
919
64
        double curr_usage =
920
64
                (double)(info.disk_capacity - info.available) / (double)info.disk_capacity;
921
64
        tmp_usage = std::max(tmp_usage, curr_usage);
922
923
64
        Status curr_res = Status::OK();
924
64
        auto snapshot_path = fmt::format("{}/{}", info.path, SNAPSHOT_PREFIX);
925
64
        curr_res = _do_sweep(snapshot_path, local_now, snapshot_expire);
926
64
        if (!curr_res.ok()) {
927
0
            LOG(WARNING) << "failed to sweep snapshot. path=" << snapshot_path
928
0
                         << ", err_code=" << curr_res;
929
0
            res = curr_res;
930
0
        }
931
932
64
        auto trash_path = fmt::format("{}/{}", info.path, TRASH_PREFIX);
933
64
        curr_res = _do_sweep(trash_path, local_now, curr_usage > guard_space ? 0 : trash_expire);
934
64
        if (!curr_res.ok()) {
935
0
            LOG(WARNING) << "failed to sweep trash. path=" << trash_path
936
0
                         << ", err_code=" << curr_res;
937
0
            res = curr_res;
938
0
        }
939
64
    }
940
941
56
    if (usage != nullptr) {
942
56
        *usage = tmp_usage; // update usage
943
56
    }
944
945
    // clear expire incremental rowset, move deleted tablet to trash
946
56
    RETURN_IF_ERROR(_tablet_manager->start_trash_sweep());
947
948
    // clean rubbish transactions
949
56
    _clean_unused_txns();
950
951
    // clean unused rowset metas in OlapMeta
952
56
    _clean_unused_rowset_metas();
953
954
    // clean unused binlog metas in OlapMeta
955
56
    _clean_unused_binlog_metas();
956
957
    // cleand unused delete bitmap for deleted tablet
958
56
    _clean_unused_delete_bitmap();
959
960
    // cleand unused pending publish info for deleted tablet
961
56
    _clean_unused_pending_publish_info();
962
963
    // clean unused partial update info for finished txns
964
56
    _clean_unused_partial_update_info();
965
966
    // clean unused rowsets in remote storage backends
967
64
    for (auto data_dir : get_stores()) {
968
64
        data_dir->perform_remote_rowset_gc();
969
64
        data_dir->perform_remote_tablet_gc();
970
64
        data_dir->update_trash_capacity();
971
64
    }
972
973
56
    return res;
974
56
}
975
976
56
void StorageEngine::_clean_unused_rowset_metas() {
977
56
    std::vector<RowsetMetaSharedPtr> invalid_rowset_metas;
978
56
    auto clean_rowset_func = [this, &invalid_rowset_metas](TabletUid tablet_uid, RowsetId rowset_id,
979
29.6k
                                                           std::string_view meta_str) -> bool {
980
        // return false will break meta iterator, return true to skip this error
981
29.6k
        RowsetMetaSharedPtr rowset_meta(new RowsetMeta());
982
29.6k
        bool parsed = rowset_meta->init(meta_str);
983
29.6k
        if (!parsed) {
984
0
            LOG(WARNING) << "parse rowset meta string failed for rowset_id:" << rowset_id;
985
0
            invalid_rowset_metas.push_back(rowset_meta);
986
0
            return true;
987
0
        }
988
29.6k
        if (rowset_meta->tablet_uid() != tablet_uid) {
989
0
            LOG(WARNING) << "tablet uid is not equal, skip the rowset"
990
0
                         << ", rowset_id=" << rowset_meta->rowset_id()
991
0
                         << ", in_put_tablet_uid=" << tablet_uid
992
0
                         << ", tablet_uid in rowset meta=" << rowset_meta->tablet_uid();
993
0
            invalid_rowset_metas.push_back(rowset_meta);
994
0
            return true;
995
0
        }
996
997
29.6k
        TabletSharedPtr tablet = _tablet_manager->get_tablet(rowset_meta->tablet_id());
998
29.6k
        if (tablet == nullptr) {
999
            // tablet may be dropped
1000
            // TODO(cmy): this is better to be a VLOG, because drop table is a very common case.
1001
            // leave it as INFO log for observation. Maybe change it in future.
1002
748
            LOG(INFO) << "failed to find tablet " << rowset_meta->tablet_id()
1003
748
                      << " for rowset: " << rowset_meta->rowset_id() << ", tablet may be dropped";
1004
748
            invalid_rowset_metas.push_back(rowset_meta);
1005
748
            return true;
1006
748
        }
1007
28.9k
        if (tablet->tablet_uid() != rowset_meta->tablet_uid()) {
1008
            // In this case, we get the tablet using the tablet id recorded in the rowset meta.
1009
            // but the uid in the tablet is different from the one recorded in the rowset meta.
1010
            // How this happened:
1011
            // Replica1 of Tablet A exists on BE1. Because of the clone task, a new replica2 is createed on BE2,
1012
            // and then replica1 deleted from BE1. After some time, we created replica again on BE1,
1013
            // which will creates a new tablet with the same id but a different uid.
1014
            // And in the historical version, when we deleted the replica, we did not delete the corresponding rowset meta,
1015
            // thus causing the original rowset meta to remain(with same tablet id but different uid).
1016
0
            LOG(WARNING) << "rowset's tablet uid " << rowset_meta->tablet_uid()
1017
0
                         << " does not equal to tablet uid: " << tablet->tablet_uid();
1018
0
            invalid_rowset_metas.push_back(rowset_meta);
1019
0
            return true;
1020
0
        }
1021
28.9k
        if (rowset_meta->rowset_state() == RowsetStatePB::VISIBLE &&
1022
28.9k
            (!tablet->rowset_meta_is_useful(rowset_meta)) &&
1023
28.9k
            !check_rowset_id_in_unused_rowsets(rowset_id)) {
1024
112
            LOG(INFO) << "rowset meta is not used any more, remove it. rowset_id="
1025
112
                      << rowset_meta->rowset_id();
1026
112
            invalid_rowset_metas.push_back(rowset_meta);
1027
112
        }
1028
28.9k
        return true;
1029
28.9k
    };
1030
56
    auto data_dirs = get_stores();
1031
64
    for (auto data_dir : data_dirs) {
1032
64
        static_cast<void>(
1033
64
                RowsetMetaManager::traverse_rowset_metas(data_dir->get_meta(), clean_rowset_func));
1034
        // 1. delete delete_bitmap
1035
64
        std::set<int64_t> tablets_to_save_meta;
1036
860
        for (auto& rowset_meta : invalid_rowset_metas) {
1037
860
            TabletSharedPtr tablet = _tablet_manager->get_tablet(rowset_meta->tablet_id());
1038
860
            if (tablet && tablet->tablet_meta()->enable_unique_key_merge_on_write()) {
1039
104
                tablet->tablet_meta()->remove_rowset_delete_bitmap(rowset_meta->rowset_id(),
1040
104
                                                                   rowset_meta->version());
1041
104
                tablets_to_save_meta.emplace(tablet->tablet_id());
1042
104
            }
1043
860
        }
1044
64
        for (const auto& tablet_id : tablets_to_save_meta) {
1045
22
            auto tablet = _tablet_manager->get_tablet(tablet_id);
1046
22
            if (tablet) {
1047
22
                std::shared_lock rlock(tablet->get_header_lock());
1048
22
                tablet->save_meta();
1049
22
            }
1050
22
        }
1051
        // 2. delete rowset meta
1052
860
        for (auto& rowset_meta : invalid_rowset_metas) {
1053
860
            static_cast<void>(RowsetMetaManager::remove(
1054
860
                    data_dir->get_meta(), rowset_meta->tablet_uid(), rowset_meta->rowset_id()));
1055
860
        }
1056
64
        LOG(INFO) << "remove " << invalid_rowset_metas.size()
1057
64
                  << " invalid rowset meta from dir: " << data_dir->path();
1058
64
        invalid_rowset_metas.clear();
1059
64
    }
1060
56
}
1061
1062
56
void StorageEngine::_clean_unused_binlog_metas() {
1063
56
    std::vector<std::string> unused_binlog_key_suffixes;
1064
56
    auto unused_binlog_collector = [this, &unused_binlog_key_suffixes](std::string_view key,
1065
56
                                                                       std::string_view value,
1066
248
                                                                       bool need_check) -> bool {
1067
248
        if (need_check) {
1068
248
            BinlogMetaEntryPB binlog_meta_pb;
1069
248
            if (UNLIKELY(!binlog_meta_pb.ParseFromArray(value.data(),
1070
248
                                                        cast_set<int>(value.size())))) {
1071
0
                LOG(WARNING) << "parse rowset meta string failed for binlog meta key: " << key;
1072
248
            } else if (_tablet_manager->get_tablet(binlog_meta_pb.tablet_id()) == nullptr) {
1073
0
                LOG(INFO) << "failed to find tablet " << binlog_meta_pb.tablet_id()
1074
0
                          << " for binlog rowset: " << binlog_meta_pb.rowset_id()
1075
0
                          << ", tablet may be dropped";
1076
248
            } else {
1077
248
                return false;
1078
248
            }
1079
248
        }
1080
1081
0
        unused_binlog_key_suffixes.emplace_back(key.substr(kBinlogMetaPrefix.size()));
1082
0
        return true;
1083
248
    };
1084
56
    auto data_dirs = get_stores();
1085
64
    for (auto data_dir : data_dirs) {
1086
64
        static_cast<void>(RowsetMetaManager::traverse_binlog_metas(data_dir->get_meta(),
1087
64
                                                                   unused_binlog_collector));
1088
64
        for (const auto& suffix : unused_binlog_key_suffixes) {
1089
0
            static_cast<void>(RowsetMetaManager::remove_binlog(data_dir->get_meta(), suffix));
1090
0
        }
1091
64
        LOG(INFO) << "remove " << unused_binlog_key_suffixes.size()
1092
64
                  << " invalid binlog meta from dir: " << data_dir->path();
1093
64
        unused_binlog_key_suffixes.clear();
1094
64
    }
1095
56
}
1096
1097
56
void StorageEngine::_clean_unused_delete_bitmap() {
1098
56
    std::unordered_set<int64_t> removed_tablets;
1099
56
    auto clean_delete_bitmap_func = [this, &removed_tablets](int64_t tablet_id, int64_t version,
1100
372
                                                             std::string_view val) -> bool {
1101
372
        TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id);
1102
372
        if (tablet == nullptr) {
1103
0
            if (removed_tablets.insert(tablet_id).second) {
1104
0
                LOG(INFO) << "clean ununsed delete bitmap for deleted tablet, tablet_id: "
1105
0
                          << tablet_id;
1106
0
            }
1107
0
        }
1108
372
        return true;
1109
372
    };
1110
56
    auto data_dirs = get_stores();
1111
64
    for (auto data_dir : data_dirs) {
1112
64
        static_cast<void>(TabletMetaManager::traverse_delete_bitmap(data_dir->get_meta(),
1113
64
                                                                    clean_delete_bitmap_func));
1114
64
        for (auto id : removed_tablets) {
1115
0
            static_cast<void>(
1116
0
                    TabletMetaManager::remove_old_version_delete_bitmap(data_dir, id, INT64_MAX));
1117
0
        }
1118
64
        LOG(INFO) << "removed invalid delete bitmap from dir: " << data_dir->path()
1119
64
                  << ", deleted tablets size: " << removed_tablets.size();
1120
64
        removed_tablets.clear();
1121
64
    }
1122
56
}
1123
1124
56
void StorageEngine::_clean_unused_pending_publish_info() {
1125
56
    std::vector<std::pair<int64_t, int64_t>> removed_infos;
1126
56
    auto clean_pending_publish_info_func = [this, &removed_infos](int64_t tablet_id,
1127
56
                                                                  int64_t publish_version,
1128
56
                                                                  std::string_view info) -> bool {
1129
0
        TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id);
1130
0
        if (tablet == nullptr) {
1131
0
            removed_infos.emplace_back(tablet_id, publish_version);
1132
0
        }
1133
0
        return true;
1134
0
    };
1135
56
    auto data_dirs = get_stores();
1136
64
    for (auto data_dir : data_dirs) {
1137
64
        static_cast<void>(TabletMetaManager::traverse_pending_publish(
1138
64
                data_dir->get_meta(), clean_pending_publish_info_func));
1139
64
        for (auto& [tablet_id, publish_version] : removed_infos) {
1140
0
            static_cast<void>(TabletMetaManager::remove_pending_publish_info(data_dir, tablet_id,
1141
0
                                                                             publish_version));
1142
0
        }
1143
64
        LOG(INFO) << "removed invalid pending publish info from dir: " << data_dir->path()
1144
64
                  << ", deleted pending publish info size: " << removed_infos.size();
1145
64
        removed_infos.clear();
1146
64
    }
1147
56
}
1148
1149
56
void StorageEngine::_clean_unused_partial_update_info() {
1150
56
    std::vector<std::tuple<int64_t, int64_t, int64_t>> remove_infos;
1151
56
    auto unused_partial_update_info_collector =
1152
56
            [this, &remove_infos](int64_t tablet_id, int64_t partition_id, int64_t txn_id,
1153
56
                                  std::string_view value) -> bool {
1154
0
        TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id);
1155
0
        if (tablet == nullptr) {
1156
0
            remove_infos.emplace_back(tablet_id, partition_id, txn_id);
1157
0
            return true;
1158
0
        }
1159
0
        TxnState txn_state =
1160
0
                _txn_manager->get_txn_state(partition_id, txn_id, tablet_id, tablet->tablet_uid());
1161
0
        if (txn_state == TxnState::NOT_FOUND || txn_state == TxnState::ABORTED ||
1162
0
            txn_state == TxnState::DELETED) {
1163
0
            remove_infos.emplace_back(tablet_id, partition_id, txn_id);
1164
0
            return true;
1165
0
        }
1166
0
        return true;
1167
0
    };
1168
56
    auto data_dirs = get_stores();
1169
64
    for (auto* data_dir : data_dirs) {
1170
64
        static_cast<void>(RowsetMetaManager::traverse_partial_update_info(
1171
64
                data_dir->get_meta(), unused_partial_update_info_collector));
1172
64
        static_cast<void>(
1173
64
                RowsetMetaManager::remove_partial_update_infos(data_dir->get_meta(), remove_infos));
1174
64
    }
1175
56
}
1176
1177
0
void StorageEngine::gc_binlogs(const std::unordered_map<int64_t, int64_t>& gc_tablet_infos) {
1178
0
    for (auto [tablet_id, version] : gc_tablet_infos) {
1179
0
        LOG(INFO) << fmt::format("start to gc binlogs for tablet_id: {}, version: {}", tablet_id,
1180
0
                                 version);
1181
1182
0
        TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id);
1183
0
        if (tablet == nullptr) {
1184
0
            LOG(WARNING) << fmt::format("tablet_id: {} not found", tablet_id);
1185
0
            continue;
1186
0
        }
1187
0
        tablet->gc_binlogs(version);
1188
0
    }
1189
0
}
1190
1191
56
void StorageEngine::_clean_unused_txns() {
1192
56
    std::set<TabletInfo> tablet_infos;
1193
56
    _txn_manager->get_all_related_tablets(&tablet_infos);
1194
974
    for (auto& tablet_info : tablet_infos) {
1195
974
        TabletSharedPtr tablet =
1196
974
                _tablet_manager->get_tablet(tablet_info.tablet_id, tablet_info.tablet_uid, true);
1197
974
        if (tablet == nullptr) {
1198
            // TODO(ygl) :  should check if tablet still in meta, it's a improvement
1199
            // case 1: tablet still in meta, just remove from memory
1200
            // case 2: tablet not in meta store, remove rowset from meta
1201
            // currently just remove them from memory
1202
            // nullptr to indicate not remove them from meta store
1203
0
            _txn_manager->force_rollback_tablet_related_txns(nullptr, tablet_info.tablet_id,
1204
0
                                                             tablet_info.tablet_uid);
1205
0
        }
1206
974
    }
1207
56
}
1208
1209
Status StorageEngine::_do_sweep(const std::string& scan_root, const time_t& local_now,
1210
128
                                const int32_t expire) {
1211
128
    Status res = Status::OK();
1212
128
    bool exists = true;
1213
128
    RETURN_IF_ERROR(io::global_local_filesystem()->exists(scan_root, &exists));
1214
128
    if (!exists) {
1215
        // dir not existed. no need to sweep trash.
1216
94
        return res;
1217
94
    }
1218
1219
34
    int curr_sweep_batch_size = 0;
1220
34
    try {
1221
        // Sort pathes by name, that is by delete time.
1222
34
        std::vector<path> sorted_pathes;
1223
34
        std::copy(directory_iterator(scan_root), directory_iterator(),
1224
34
                  std::back_inserter(sorted_pathes));
1225
34
        std::sort(sorted_pathes.begin(), sorted_pathes.end());
1226
34
        for (const auto& sorted_path : sorted_pathes) {
1227
4
            string dir_name = sorted_path.filename().string();
1228
4
            string str_time = dir_name.substr(0, dir_name.find('.'));
1229
4
            tm local_tm_create;
1230
4
            local_tm_create.tm_isdst = 0;
1231
4
            if (strptime(str_time.c_str(), "%Y%m%d%H%M%S", &local_tm_create) == nullptr) {
1232
0
                res = Status::Error<OS_ERROR>("fail to strptime time. time={}", str_time);
1233
0
                continue;
1234
0
            }
1235
1236
4
            int32_t actual_expire = expire;
1237
            // try get timeout in dir name, the old snapshot dir does not contain timeout
1238
            // eg: 20190818221123.3.86400, the 86400 is timeout, in second
1239
4
            size_t pos = dir_name.find('.', str_time.size() + 1);
1240
4
            if (pos != string::npos) {
1241
4
                actual_expire = std::stoi(dir_name.substr(pos + 1));
1242
4
            }
1243
4
            VLOG_TRACE << "get actual expire time " << actual_expire << " of dir: " << dir_name;
1244
1245
4
            string path_name = sorted_path.string();
1246
4
            if (difftime(local_now, mktime(&local_tm_create)) >= actual_expire) {
1247
0
                res = io::global_local_filesystem()->delete_directory(path_name);
1248
0
                LOG(INFO) << "do sweep delete directory " << path_name << " local_now " << local_now
1249
0
                          << "actual_expire " << actual_expire << " res " << res;
1250
0
                if (!res.ok()) {
1251
0
                    continue;
1252
0
                }
1253
1254
0
                curr_sweep_batch_size++;
1255
0
                if (config::garbage_sweep_batch_size > 0 &&
1256
0
                    curr_sweep_batch_size >= config::garbage_sweep_batch_size) {
1257
0
                    curr_sweep_batch_size = 0;
1258
0
                    std::this_thread::sleep_for(std::chrono::milliseconds(1));
1259
0
                }
1260
4
            } else {
1261
                // Because files are ordered by filename, i.e. by create time, so all the left files are not expired.
1262
4
                break;
1263
4
            }
1264
4
        }
1265
34
    } catch (...) {
1266
0
        res = Status::Error<IO_ERROR>("Exception occur when scan directory. path_desc={}",
1267
0
                                      scan_root);
1268
0
    }
1269
1270
34
    return res;
1271
34
}
1272
1273
// invalid rowset type config will return ALPHA_ROWSET for system to run smoothly
1274
44
void StorageEngine::_parse_default_rowset_type() {
1275
44
    std::string default_rowset_type_config = config::default_rowset_type;
1276
44
    boost::to_upper(default_rowset_type_config);
1277
44
    if (default_rowset_type_config == "BETA") {
1278
44
        _default_rowset_type = BETA_ROWSET;
1279
44
    } else if (default_rowset_type_config == "ALPHA") {
1280
0
        _default_rowset_type = ALPHA_ROWSET;
1281
0
        LOG(WARNING) << "default_rowset_type in be.conf should be set to beta, alpha is not "
1282
0
                        "supported any more";
1283
0
    } else {
1284
0
        LOG(FATAL) << "unknown value " << default_rowset_type_config
1285
0
                   << " in default_rowset_type in be.conf";
1286
0
    }
1287
44
}
1288
1289
317
void StorageEngine::start_delete_unused_rowset() {
1290
317
    DBUG_EXECUTE_IF("StorageEngine::start_delete_unused_rowset.block", DBUG_BLOCK);
1291
317
    LOG(INFO) << "start to delete unused rowset, size: " << _unused_rowsets.size()
1292
317
              << ", unused delete bitmap size: " << _unused_delete_bitmap.size();
1293
317
    std::vector<RowsetSharedPtr> unused_rowsets_copy;
1294
317
    unused_rowsets_copy.reserve(_unused_rowsets.size());
1295
317
    auto due_to_use_count = 0;
1296
317
    auto due_to_not_delete_file = 0;
1297
317
    auto due_to_delayed_expired_ts = 0;
1298
317
    std::set<int64_t> tablets_to_save_meta;
1299
317
    {
1300
317
        std::lock_guard<std::mutex> lock(_gc_mutex);
1301
18.0k
        for (auto it = _unused_rowsets.begin(); it != _unused_rowsets.end();) {
1302
17.6k
            auto&& rs = it->second;
1303
17.6k
            if (rs.use_count() == 1 && rs->need_delete_file()) {
1304
                // remote rowset data will be reclaimed by `remove_unused_remote_files`
1305
17.6k
                if (rs->is_local()) {
1306
17.6k
                    unused_rowsets_copy.push_back(std::move(rs));
1307
17.6k
                }
1308
17.6k
                it = _unused_rowsets.erase(it);
1309
17.6k
            } else {
1310
0
                if (rs.use_count() != 1) {
1311
0
                    ++due_to_use_count;
1312
0
                } else if (!rs->need_delete_file()) {
1313
0
                    ++due_to_not_delete_file;
1314
0
                } else {
1315
0
                    ++due_to_delayed_expired_ts;
1316
0
                }
1317
0
                ++it;
1318
0
            }
1319
17.6k
        }
1320
        // check remove delete bitmaps
1321
317
        for (auto it = _unused_delete_bitmap.begin(); it != _unused_delete_bitmap.end();) {
1322
0
            auto tablet_id = std::get<0>(*it);
1323
0
            auto tablet = _tablet_manager->get_tablet(tablet_id);
1324
0
            if (tablet == nullptr) {
1325
0
                it = _unused_delete_bitmap.erase(it);
1326
0
                continue;
1327
0
            }
1328
0
            auto& rowset_ids = std::get<1>(*it);
1329
0
            auto& key_ranges = std::get<2>(*it);
1330
0
            bool find_unused_rowset = false;
1331
0
            for (const auto& rowset_id : rowset_ids) {
1332
0
                if (_unused_rowsets.find(rowset_id) != _unused_rowsets.end()) {
1333
0
                    VLOG_DEBUG << "can not remove pre rowset delete bitmap because rowset is in use"
1334
0
                               << ", tablet_id=" << tablet_id
1335
0
                               << ", rowset_id=" << rowset_id.to_string();
1336
0
                    find_unused_rowset = true;
1337
0
                    break;
1338
0
                }
1339
0
            }
1340
0
            if (find_unused_rowset) {
1341
0
                ++it;
1342
0
                continue;
1343
0
            }
1344
0
            tablet->tablet_meta()->delete_bitmap().remove(key_ranges);
1345
0
            tablets_to_save_meta.emplace(tablet_id);
1346
0
            it = _unused_delete_bitmap.erase(it);
1347
0
        }
1348
317
    }
1349
317
    LOG(INFO) << "collected " << unused_rowsets_copy.size() << " unused rowsets to remove, skipped "
1350
317
              << due_to_use_count << " rowsets due to use count > 1, skipped "
1351
317
              << due_to_not_delete_file << " rowsets due to don't need to delete file, skipped "
1352
317
              << due_to_delayed_expired_ts << " rowsets due to delayed expired timestamp. left "
1353
317
              << _unused_delete_bitmap.size() << " unused delete bitmap.";
1354
17.6k
    for (auto&& rs : unused_rowsets_copy) {
1355
17.6k
        VLOG_NOTICE << "start to remove rowset:" << rs->rowset_id()
1356
0
                    << ", version:" << rs->version();
1357
        // delete delete_bitmap of unused rowsets
1358
17.6k
        if (auto tablet = _tablet_manager->get_tablet(rs->rowset_meta()->tablet_id());
1359
17.6k
            tablet && tablet->enable_unique_key_merge_on_write()) {
1360
3.70k
            tablet->tablet_meta()->remove_rowset_delete_bitmap(rs->rowset_id(), rs->version());
1361
3.70k
            tablets_to_save_meta.emplace(tablet->tablet_id());
1362
3.70k
        }
1363
17.6k
        Status status = rs->remove();
1364
17.6k
        unused_rowsets_counter << -1;
1365
17.6k
        VLOG_NOTICE << "remove rowset:" << rs->rowset_id() << " finished. status:" << status;
1366
17.6k
    }
1367
317
    for (const auto& tablet_id : tablets_to_save_meta) {
1368
195
        auto tablet = _tablet_manager->get_tablet(tablet_id);
1369
195
        if (tablet) {
1370
195
            std::shared_lock rlock(tablet->get_header_lock());
1371
195
            tablet->save_meta();
1372
195
        }
1373
195
    }
1374
317
    LOG(INFO) << "removed all collected unused rowsets";
1375
317
}
1376
1377
17.8k
void StorageEngine::add_unused_rowset(RowsetSharedPtr rowset) {
1378
17.8k
    if (rowset == nullptr) {
1379
5
        return;
1380
5
    }
1381
17.8k
    VLOG_NOTICE << "add unused rowset, rowset id:" << rowset->rowset_id()
1382
39
                << ", version:" << rowset->version();
1383
17.8k
    std::lock_guard<std::mutex> lock(_gc_mutex);
1384
17.8k
    auto it = _unused_rowsets.find(rowset->rowset_id());
1385
17.8k
    if (it == _unused_rowsets.end()) {
1386
17.8k
        rowset->set_need_delete_file();
1387
17.8k
        rowset->close();
1388
17.8k
        _unused_rowsets[rowset->rowset_id()] = std::move(rowset);
1389
17.8k
        unused_rowsets_counter << 1;
1390
17.8k
    }
1391
17.8k
}
1392
1393
void StorageEngine::add_unused_delete_bitmap_key_ranges(int64_t tablet_id,
1394
                                                        const std::vector<RowsetId>& rowsets,
1395
0
                                                        const DeleteBitmapKeyRanges& key_ranges) {
1396
0
    VLOG_NOTICE << "add unused delete bitmap key ranges, tablet id:" << tablet_id;
1397
0
    std::lock_guard<std::mutex> lock(_gc_mutex);
1398
0
    _unused_delete_bitmap.push_back(std::make_tuple(tablet_id, rowsets, key_ranges));
1399
0
}
1400
1401
// TODO(zc): refactor this funciton
1402
6.30k
Status StorageEngine::create_tablet(const TCreateTabletReq& request, RuntimeProfile* profile) {
1403
    // Get all available stores, use ref_root_path if the caller specified
1404
6.30k
    std::vector<DataDir*> stores;
1405
6.30k
    {
1406
6.30k
        SCOPED_TIMER(ADD_TIMER(profile, "GetStores"));
1407
6.30k
        stores = get_stores_for_create_tablet(request.partition_id, request.storage_medium);
1408
6.30k
    }
1409
6.30k
    if (stores.empty()) {
1410
0
        return Status::Error<CE_CMD_PARAMS_ERROR>(
1411
0
                "there is no available disk that can be used to create tablet.");
1412
0
    }
1413
6.30k
    return _tablet_manager->create_tablet(request, stores, profile);
1414
6.30k
}
1415
1416
Result<BaseTabletSPtr> StorageEngine::get_tablet(int64_t tablet_id, SyncRowsetStats* sync_stats,
1417
389k
                                                 bool force_use_only_cached, bool cache_on_miss) {
1418
389k
    BaseTabletSPtr tablet;
1419
389k
    std::string err;
1420
389k
    tablet = _tablet_manager->get_tablet(tablet_id, true, &err);
1421
389k
    if (tablet == nullptr) {
1422
43
        return unexpected(
1423
43
                Status::InternalError("failed to get tablet: {}, reason: {}", tablet_id, err));
1424
43
    }
1425
389k
    return tablet;
1426
389k
}
1427
1428
Status StorageEngine::get_tablet_meta(int64_t tablet_id, TabletMetaSharedPtr* tablet_meta,
1429
42
                                      bool force_use_only_cached) {
1430
42
    if (tablet_meta == nullptr) {
1431
0
        return Status::InvalidArgument("tablet_meta output is null");
1432
0
    }
1433
1434
42
    auto res = get_tablet(tablet_id, nullptr, force_use_only_cached, true);
1435
42
    if (!res.has_value()) {
1436
42
        return res.error();
1437
42
    }
1438
1439
0
    *tablet_meta = res.value()->tablet_meta();
1440
0
    return Status::OK();
1441
42
}
1442
1443
Status StorageEngine::obtain_shard_path(TStorageMedium::type storage_medium, int64_t path_hash,
1444
                                        std::string* shard_path, DataDir** store,
1445
0
                                        int64_t partition_id) {
1446
0
    LOG(INFO) << "begin to process obtain root path. storage_medium=" << storage_medium;
1447
1448
0
    if (shard_path == nullptr) {
1449
0
        return Status::Error<CE_CMD_PARAMS_ERROR>(
1450
0
                "invalid output parameter which is null pointer.");
1451
0
    }
1452
1453
0
    auto stores = get_stores_for_create_tablet(partition_id, storage_medium);
1454
0
    if (stores.empty()) {
1455
0
        return Status::Error<NO_AVAILABLE_ROOT_PATH>(
1456
0
                "no available disk can be used to create tablet.");
1457
0
    }
1458
1459
0
    *store = nullptr;
1460
0
    if (path_hash != -1) {
1461
0
        for (auto data_dir : stores) {
1462
0
            if (data_dir->path_hash() == path_hash) {
1463
0
                *store = data_dir;
1464
0
                break;
1465
0
            }
1466
0
        }
1467
0
    }
1468
0
    if (*store == nullptr) {
1469
0
        *store = stores[0];
1470
0
    }
1471
1472
0
    uint64_t shard = (*store)->get_shard();
1473
1474
0
    std::stringstream root_path_stream;
1475
0
    root_path_stream << (*store)->path() << "/" << DATA_PREFIX << "/" << shard;
1476
0
    *shard_path = root_path_stream.str();
1477
1478
0
    LOG(INFO) << "success to process obtain root path. path=" << *shard_path;
1479
0
    return Status::OK();
1480
0
}
1481
1482
Status StorageEngine::load_header(const string& shard_path, const TCloneReq& request,
1483
0
                                  bool restore) {
1484
0
    LOG(INFO) << "begin to process load headers."
1485
0
              << "tablet_id=" << request.tablet_id << ", schema_hash=" << request.schema_hash;
1486
0
    Status res = Status::OK();
1487
1488
0
    DataDir* store = nullptr;
1489
0
    {
1490
        // TODO(zc)
1491
0
        try {
1492
0
            auto store_path =
1493
0
                    std::filesystem::path(shard_path).parent_path().parent_path().string();
1494
0
            store = get_store(store_path);
1495
0
            if (store == nullptr) {
1496
0
                return Status::Error<INVALID_ROOT_PATH>("invalid shard path, path={}", shard_path);
1497
0
            }
1498
0
        } catch (...) {
1499
0
            return Status::Error<INVALID_ROOT_PATH>("invalid shard path, path={}", shard_path);
1500
0
        }
1501
0
    }
1502
1503
0
    std::stringstream schema_hash_path_stream;
1504
0
    schema_hash_path_stream << shard_path << "/" << request.tablet_id << "/" << request.schema_hash;
1505
    // not surely, reload and restore tablet action call this api
1506
    // reset tablet uid here
1507
1508
0
    string header_path = TabletMeta::construct_header_file_path(schema_hash_path_stream.str(),
1509
0
                                                                request.tablet_id);
1510
0
    res = _tablet_manager->load_tablet_from_dir(store, request.tablet_id, request.schema_hash,
1511
0
                                                schema_hash_path_stream.str(), false, restore);
1512
0
    if (!res.ok()) {
1513
0
        LOG(WARNING) << "fail to process load headers. res=" << res;
1514
0
        return res;
1515
0
    }
1516
1517
0
    LOG(INFO) << "success to process load headers.";
1518
0
    return res;
1519
0
}
1520
1521
29
void BaseStorageEngine::register_report_listener(ReportWorker* listener) {
1522
29
    std::lock_guard<std::mutex> l(_report_mtx);
1523
29
    if (std::find(_report_listeners.begin(), _report_listeners.end(), listener) !=
1524
29
        _report_listeners.end()) [[unlikely]] {
1525
0
        return;
1526
0
    }
1527
29
    _report_listeners.push_back(listener);
1528
29
}
1529
1530
13
void BaseStorageEngine::deregister_report_listener(ReportWorker* listener) {
1531
13
    std::lock_guard<std::mutex> l(_report_mtx);
1532
13
    if (auto it = std::find(_report_listeners.begin(), _report_listeners.end(), listener);
1533
13
        it != _report_listeners.end()) {
1534
13
        _report_listeners.erase(it);
1535
13
    }
1536
13
}
1537
1538
371
void BaseStorageEngine::notify_listeners() {
1539
371
    std::lock_guard<std::mutex> l(_report_mtx);
1540
371
    for (auto& listener : _report_listeners) {
1541
56
        listener->notify();
1542
56
    }
1543
371
}
1544
1545
2
bool BaseStorageEngine::notify_listener(std::string_view name) {
1546
2
    bool found = false;
1547
2
    std::lock_guard<std::mutex> l(_report_mtx);
1548
5
    for (auto& listener : _report_listeners) {
1549
5
        if (listener->name() == name) {
1550
2
            listener->notify();
1551
2
            found = true;
1552
2
        }
1553
5
    }
1554
2
    return found;
1555
2
}
1556
1557
7
void BaseStorageEngine::_evict_quring_rowset_thread_callback() {
1558
7
    int32_t interval = config::quering_rowsets_evict_interval;
1559
434
    do {
1560
434
        _evict_querying_rowset();
1561
434
        interval = config::quering_rowsets_evict_interval;
1562
434
        if (interval <= 0) {
1563
0
            LOG(WARNING) << "quering_rowsets_evict_interval config is illegal: " << interval
1564
0
                         << ", force set to 1";
1565
0
            interval = 1;
1566
0
        }
1567
434
    } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval)));
1568
7
}
1569
1570
// check whether any unused rowsets's id equal to rowset_id
1571
4.65k
bool StorageEngine::check_rowset_id_in_unused_rowsets(const RowsetId& rowset_id) {
1572
4.65k
    std::lock_guard<std::mutex> lock(_gc_mutex);
1573
4.65k
    return _unused_rowsets.contains(rowset_id);
1574
4.65k
}
1575
1576
1.44k
PendingRowsetGuard StorageEngine::add_pending_rowset(const RowsetWriterContext& ctx) {
1577
1.44k
    if (ctx.is_local_rowset()) {
1578
1.44k
        return _pending_local_rowsets.add(ctx.rowset_id);
1579
1.44k
    }
1580
2
    return _pending_remote_rowsets.add(ctx.rowset_id);
1581
1.44k
}
1582
1583
bool StorageEngine::get_peer_replica_info(int64_t tablet_id, TReplicaInfo* replica,
1584
0
                                          std::string* token) {
1585
0
    TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id);
1586
0
    if (tablet == nullptr) {
1587
0
        LOG(WARNING) << "tablet is no longer exist: tablet_id=" << tablet_id;
1588
0
        return false;
1589
0
    }
1590
0
    std::unique_lock<std::mutex> lock(_peer_replica_infos_mutex);
1591
0
    if (_peer_replica_infos.contains(tablet_id) &&
1592
0
        _peer_replica_infos[tablet_id].replica_id != tablet->replica_id()) {
1593
0
        *replica = _peer_replica_infos[tablet_id];
1594
0
        *token = _token;
1595
0
        return true;
1596
0
    }
1597
0
    return false;
1598
0
}
1599
1600
0
bool StorageEngine::get_peers_replica_backends(int64_t tablet_id, std::vector<TBackend>* backends) {
1601
0
    TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id);
1602
0
    if (tablet == nullptr) {
1603
0
        LOG(WARNING) << "tablet is no longer exist: tablet_id=" << tablet_id;
1604
0
        return false;
1605
0
    }
1606
0
    int64_t cur_time = UnixMillis();
1607
0
    if (cur_time - _last_get_peers_replica_backends_time_ms < 10000) {
1608
0
        LOG_WARNING("failed to get peers replica backens.")
1609
0
                .tag("tablet_id", tablet_id)
1610
0
                .tag("last time", _last_get_peers_replica_backends_time_ms)
1611
0
                .tag("cur time", cur_time);
1612
0
        return false;
1613
0
    }
1614
0
    LOG_INFO("start get peers replica backends info.").tag("tablet id", tablet_id);
1615
0
    ClusterInfo* cluster_info = ExecEnv::GetInstance()->cluster_info();
1616
0
    if (cluster_info == nullptr) {
1617
0
        LOG(WARNING) << "Have not get FE Master heartbeat yet";
1618
0
        return false;
1619
0
    }
1620
0
    TNetworkAddress master_addr = cluster_info->master_fe_addr;
1621
0
    if (master_addr.hostname.empty() || master_addr.port == 0) {
1622
0
        LOG(WARNING) << "Have not get FE Master heartbeat yet";
1623
0
        return false;
1624
0
    }
1625
0
    TGetTabletReplicaInfosRequest request;
1626
0
    TGetTabletReplicaInfosResult result;
1627
0
    request.tablet_ids.emplace_back(tablet_id);
1628
0
    Status rpc_st = ThriftRpcHelper::rpc<FrontendServiceClient>(
1629
0
            master_addr.hostname, master_addr.port,
1630
0
            [&request, &result](FrontendServiceConnection& client) {
1631
0
                client->getTabletReplicaInfos(result, request);
1632
0
            });
1633
1634
0
    if (!rpc_st.ok()) {
1635
0
        LOG(WARNING) << "Failed to get tablet replica infos, encounter rpc failure, "
1636
0
                        "tablet id: "
1637
0
                     << tablet_id;
1638
0
        return false;
1639
0
    }
1640
0
    std::unique_lock<std::mutex> lock(_peer_replica_infos_mutex);
1641
0
    if (result.tablet_replica_infos.contains(tablet_id)) {
1642
0
        std::vector<TReplicaInfo> reps = result.tablet_replica_infos[tablet_id];
1643
0
        if (reps.empty()) [[unlikely]] {
1644
0
            VLOG_DEBUG << "get_peers_replica_backends reps is empty, maybe this tablet is in "
1645
0
                          "schema change. Go to FE to see more info. Tablet id: "
1646
0
                       << tablet_id;
1647
0
        }
1648
0
        for (const auto& rep : reps) {
1649
0
            if (rep.replica_id != tablet->replica_id()) {
1650
0
                TBackend backend;
1651
0
                backend.__set_host(rep.host);
1652
0
                backend.__set_be_port(rep.be_port);
1653
0
                backend.__set_http_port(rep.http_port);
1654
0
                backend.__set_brpc_port(rep.brpc_port);
1655
0
                if (rep.__isset.is_alive) {
1656
0
                    backend.__set_is_alive(rep.is_alive);
1657
0
                }
1658
0
                if (rep.__isset.backend_id) {
1659
0
                    backend.__set_id(rep.backend_id);
1660
0
                }
1661
0
                backends->emplace_back(backend);
1662
0
                std::stringstream backend_string;
1663
0
                backend.printTo(backend_string);
1664
0
                LOG_INFO("get 1 peer replica backend info.")
1665
0
                        .tag("tablet id", tablet_id)
1666
0
                        .tag("backend info", backend_string.str());
1667
0
            }
1668
0
        }
1669
0
        _last_get_peers_replica_backends_time_ms = UnixMillis();
1670
0
        LOG_INFO("succeed get peers replica backends info.")
1671
0
                .tag("tablet id", tablet_id)
1672
0
                .tag("replica num", backends->size());
1673
0
        return true;
1674
0
    }
1675
0
    return false;
1676
0
}
1677
1678
0
bool StorageEngine::should_fetch_from_peer(int64_t tablet_id) {
1679
#ifdef BE_TEST
1680
    if (tablet_id % 2 == 0) {
1681
        return true;
1682
    }
1683
    return false;
1684
#endif
1685
0
    TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id);
1686
0
    if (tablet == nullptr) {
1687
0
        LOG(WARNING) << "tablet is no longer exist: tablet_id=" << tablet_id;
1688
0
        return false;
1689
0
    }
1690
0
    std::unique_lock<std::mutex> lock(_peer_replica_infos_mutex);
1691
0
    if (_peer_replica_infos.contains(tablet_id)) {
1692
0
        return _peer_replica_infos[tablet_id].replica_id != tablet->replica_id();
1693
0
    }
1694
0
    return false;
1695
0
}
1696
1697
// Return json:
1698
// {
1699
//   "CumulativeCompaction": {
1700
//          "/home/disk1" : [10001, 10002],
1701
//          "/home/disk2" : [10003]
1702
//   },
1703
//   "BaseCompaction": {
1704
//          "/home/disk1" : [10001, 10002],
1705
//          "/home/disk2" : [10003]
1706
//   }
1707
// }
1708
0
void StorageEngine::get_compaction_status_json(std::string* result) {
1709
0
    _compaction_submit_registry.jsonfy_compaction_status(result);
1710
0
}
1711
1712
161
void BaseStorageEngine::add_quering_rowset(RowsetSharedPtr rs) {
1713
161
    std::lock_guard<std::mutex> lock(_quering_rowsets_mutex);
1714
161
    _querying_rowsets.emplace(rs->rowset_id(), rs);
1715
161
}
1716
1717
25
RowsetSharedPtr BaseStorageEngine::get_quering_rowset(RowsetId rs_id) {
1718
25
    std::lock_guard<std::mutex> lock(_quering_rowsets_mutex);
1719
25
    auto it = _querying_rowsets.find(rs_id);
1720
25
    if (it != _querying_rowsets.end()) {
1721
25
        return it->second;
1722
25
    }
1723
0
    return nullptr;
1724
25
}
1725
1726
434
void BaseStorageEngine::_evict_querying_rowset() {
1727
434
    {
1728
434
        std::lock_guard<std::mutex> lock(_quering_rowsets_mutex);
1729
1.25k
        for (auto it = _querying_rowsets.begin(); it != _querying_rowsets.end();) {
1730
825
            uint64_t now = UnixSeconds();
1731
            // We delay the GC time of this rowset since it's maybe still needed, see #20732
1732
825
            if (now > it->second->delayed_expired_timestamp()) {
1733
25
                it = _querying_rowsets.erase(it);
1734
800
            } else {
1735
800
                ++it;
1736
800
            }
1737
825
        }
1738
434
    }
1739
1740
434
    uint64_t now = UnixSeconds();
1741
434
    ExecEnv::GetInstance()->get_id_manager()->gc_expired_id_file_map(now);
1742
434
}
1743
1744
4
bool BaseStorageEngine::_should_delay_large_task() {
1745
4
    DCHECK_GE(_cumu_compaction_thread_pool->max_threads(),
1746
4
              _cumu_compaction_thread_pool_used_threads);
1747
4
    DCHECK_GE(_cumu_compaction_thread_pool_small_tasks_running, 0);
1748
    // Case 1: Multiple threads available => accept large task
1749
4
    if (_cumu_compaction_thread_pool->max_threads() - _cumu_compaction_thread_pool_used_threads >
1750
4
        0) {
1751
2
        return false; // No delay needed
1752
2
    }
1753
    // Case 2: Only one thread left => accept large task only if another small task is already running
1754
2
    if (_cumu_compaction_thread_pool_small_tasks_running > 0) {
1755
1
        return false; // No delay needed
1756
1
    }
1757
    // Case 3: Only one thread left, this is a large task, and no small tasks are running
1758
    // Delay this task to reserve capacity for potential small tasks
1759
1
    return true; // Delay this large task
1760
2
}
1761
1762
5
bool StorageEngine::add_broken_path(std::string path) {
1763
5
    std::lock_guard<std::mutex> lock(_broken_paths_mutex);
1764
5
    auto success = _broken_paths.emplace(path).second;
1765
5
    if (success) {
1766
4
        static_cast<void>(_persist_broken_paths());
1767
4
    }
1768
5
    return success;
1769
5
}
1770
1771
3
bool StorageEngine::remove_broken_path(std::string path) {
1772
3
    std::lock_guard<std::mutex> lock(_broken_paths_mutex);
1773
3
    auto count = _broken_paths.erase(path);
1774
3
    if (count > 0) {
1775
3
        static_cast<void>(_persist_broken_paths());
1776
3
    }
1777
3
    return count > 0;
1778
3
}
1779
1780
7
Status StorageEngine::_persist_broken_paths() {
1781
7
    std::string config_value;
1782
7
    for (const std::string& path : _broken_paths) {
1783
6
        config_value += path + ";";
1784
6
    }
1785
1786
7
    if (config_value.length() > 0) {
1787
5
        auto st = config::set_config("broken_storage_path", config_value, true);
1788
5
        LOG(INFO) << "persist broken_storage_path " << config_value << st;
1789
5
        return st;
1790
5
    }
1791
1792
2
    return Status::OK();
1793
7
}
1794
1795
0
Status StorageEngine::submit_clone_task(Tablet* tablet, int64_t version) {
1796
0
    std::vector<TBackend> backends;
1797
0
    if (!get_peers_replica_backends(tablet->tablet_id(), &backends)) {
1798
0
        return Status::Error<ErrorCode::INTERNAL_ERROR, false>(
1799
0
                "get_peers_replica_backends failed.");
1800
0
    }
1801
0
    TAgentTaskRequest task;
1802
0
    TCloneReq req;
1803
0
    req.__set_tablet_id(tablet->tablet_id());
1804
0
    req.__set_schema_hash(tablet->schema_hash());
1805
0
    req.__set_src_backends(backends);
1806
0
    req.__set_version(version);
1807
0
    req.__set_replica_id(tablet->replica_id());
1808
0
    req.__set_partition_id(tablet->partition_id());
1809
0
    req.__set_table_id(tablet->table_id());
1810
0
    task.__set_task_type(TTaskType::CLONE);
1811
0
    task.__set_clone_req(req);
1812
0
    task.__set_priority(TPriority::HIGH);
1813
0
    task.__set_signature(tablet->tablet_id());
1814
0
    LOG_INFO("BE start to submit missing rowset clone task.")
1815
0
            .tag("tablet_id", tablet->tablet_id())
1816
0
            .tag("version", version)
1817
0
            .tag("replica_id", tablet->replica_id())
1818
0
            .tag("partition_id", tablet->partition_id())
1819
0
            .tag("table_id", tablet->table_id());
1820
0
    RETURN_IF_ERROR(assert_cast<PriorTaskWorkerPool*>(workers->at(TTaskType::CLONE).get())
1821
0
                            ->submit_high_prior_and_cancel_low(task));
1822
0
    return Status::OK();
1823
0
}
1824
1825
6.30k
int CreateTabletRRIdxCache::get_index(const std::string& key) {
1826
6.30k
    auto* lru_handle = lookup(key);
1827
6.30k
    if (lru_handle) {
1828
4.98k
        Defer release([cache = this, lru_handle] { cache->release(lru_handle); });
1829
4.98k
        auto* value = (CacheValue*)LRUCachePolicy::value(lru_handle);
1830
4.98k
        VLOG_DEBUG << "use create tablet idx cache key=" << key << " value=" << value->idx;
1831
4.98k
        return value->idx;
1832
4.98k
    }
1833
1.31k
    return -1;
1834
6.30k
}
1835
1836
6.30k
void CreateTabletRRIdxCache::set_index(const std::string& key, int next_idx) {
1837
6.30k
    assert(next_idx >= 0);
1838
6.30k
    auto* value = new CacheValue;
1839
6.30k
    value->idx = next_idx;
1840
6.30k
    auto* lru_handle = insert(key, value, 1, sizeof(int), CachePriority::NORMAL);
1841
6.30k
    release(lru_handle);
1842
6.30k
}
1843
} // namespace doris