Coverage Report

Created: 2025-04-14 21:27

/root/doris/be/src/olap/data_dir.cpp
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "olap/data_dir.h"
19
20
#include <fmt/core.h>
21
#include <fmt/format.h>
22
#include <gen_cpp/FrontendService_types.h>
23
#include <gen_cpp/Types_types.h>
24
#include <gen_cpp/olap_file.pb.h>
25
26
#include <atomic>
27
#include <cstdio>
28
// IWYU pragma: no_include <bits/chrono.h>
29
#include <chrono> // IWYU pragma: keep
30
#include <cstddef>
31
#include <filesystem>
32
#include <memory>
33
#include <new>
34
#include <roaring/roaring.hh>
35
#include <set>
36
#include <sstream>
37
#include <string>
38
#include <thread>
39
#include <utility>
40
41
#include "common/config.h"
42
#include "common/logging.h"
43
#include "io/fs/file_reader.h"
44
#include "io/fs/file_writer.h"
45
#include "io/fs/local_file_system.h"
46
#include "io/fs/path.h"
47
#include "olap/delete_handler.h"
48
#include "olap/olap_common.h"
49
#include "olap/olap_define.h"
50
#include "olap/olap_meta.h"
51
#include "olap/rowset/beta_rowset.h"
52
#include "olap/rowset/pending_rowset_helper.h"
53
#include "olap/rowset/rowset.h"
54
#include "olap/rowset/rowset_id_generator.h"
55
#include "olap/rowset/rowset_meta.h"
56
#include "olap/rowset/rowset_meta_manager.h"
57
#include "olap/storage_engine.h"
58
#include "olap/storage_policy.h"
59
#include "olap/tablet.h"
60
#include "olap/tablet_manager.h"
61
#include "olap/tablet_meta_manager.h"
62
#include "olap/txn_manager.h"
63
#include "olap/utils.h" // for check_dir_existed
64
#include "service/backend_options.h"
65
#include "util/doris_metrics.h"
66
#include "util/string_util.h"
67
#include "util/uid_util.h"
68
69
namespace doris {
70
using namespace ErrorCode;
71
72
namespace {
73
74
66
Status read_cluster_id(const std::string& cluster_id_path, int32_t* cluster_id) {
75
66
    bool exists = false;
76
66
    RETURN_IF_ERROR(io::global_local_filesystem()->exists(cluster_id_path, &exists));
77
66
    *cluster_id = -1;
78
66
    if (exists) {
79
0
        io::FileReaderSPtr reader;
80
0
        RETURN_IF_ERROR(io::global_local_filesystem()->open_file(cluster_id_path, &reader));
81
0
        size_t fsize = reader->size();
82
0
        if (fsize > 0) {
83
0
            std::string content;
84
0
            content.resize(fsize, '\0');
85
0
            size_t bytes_read = 0;
86
0
            RETURN_IF_ERROR(reader->read_at(0, {content.data(), fsize}, &bytes_read));
87
0
            DCHECK_EQ(fsize, bytes_read);
88
0
            *cluster_id = std::stoi(content);
89
0
        }
90
0
    }
91
66
    return Status::OK();
92
66
}
93
94
0
Status _write_cluster_id_to_path(const std::string& path, int32_t cluster_id) {
95
0
    bool exists = false;
96
0
    RETURN_IF_ERROR(io::global_local_filesystem()->exists(path, &exists));
97
0
    if (!exists) {
98
0
        io::FileWriterPtr file_writer;
99
0
        RETURN_IF_ERROR(io::global_local_filesystem()->create_file(path, &file_writer));
100
0
        RETURN_IF_ERROR(file_writer->append(std::to_string(cluster_id)));
101
0
        RETURN_IF_ERROR(file_writer->close());
102
0
    }
103
0
    return Status::OK();
104
0
}
105
106
} // namespace
107
108
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(disks_total_capacity, MetricUnit::BYTES);
109
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(disks_avail_capacity, MetricUnit::BYTES);
110
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(disks_local_used_capacity, MetricUnit::BYTES);
111
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(disks_remote_used_capacity, MetricUnit::BYTES);
112
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(disks_trash_used_capacity, MetricUnit::BYTES);
113
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(disks_state, MetricUnit::BYTES);
114
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(disks_compaction_score, MetricUnit::NOUNIT);
115
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(disks_compaction_num, MetricUnit::NOUNIT);
116
117
DataDir::DataDir(StorageEngine& engine, const std::string& path, int64_t capacity_bytes,
118
                 TStorageMedium::type storage_medium)
119
        : _engine(engine),
120
          _path(path),
121
          _available_bytes(0),
122
          _disk_capacity_bytes(0),
123
          _trash_used_bytes(0),
124
          _storage_medium(storage_medium),
125
          _is_used(false),
126
          _cluster_id(-1),
127
147
          _to_be_deleted(false) {
128
147
    _data_dir_metric_entity = DorisMetrics::instance()->metric_registry()->register_entity(
129
147
            std::string("data_dir.") + path, {{"path", path}});
130
147
    INT_GAUGE_METRIC_REGISTER(_data_dir_metric_entity, disks_total_capacity);
131
147
    INT_GAUGE_METRIC_REGISTER(_data_dir_metric_entity, disks_avail_capacity);
132
147
    INT_GAUGE_METRIC_REGISTER(_data_dir_metric_entity, disks_local_used_capacity);
133
147
    INT_GAUGE_METRIC_REGISTER(_data_dir_metric_entity, disks_remote_used_capacity);
134
147
    INT_GAUGE_METRIC_REGISTER(_data_dir_metric_entity, disks_trash_used_capacity);
135
147
    INT_GAUGE_METRIC_REGISTER(_data_dir_metric_entity, disks_state);
136
147
    INT_GAUGE_METRIC_REGISTER(_data_dir_metric_entity, disks_compaction_score);
137
147
    INT_GAUGE_METRIC_REGISTER(_data_dir_metric_entity, disks_compaction_num);
138
147
}
139
140
147
DataDir::~DataDir() {
141
147
    DorisMetrics::instance()->metric_registry()->deregister_entity(_data_dir_metric_entity);
142
147
    delete _meta;
143
147
}
144
145
66
Status DataDir::init(bool init_meta) {
146
66
    bool exists = false;
147
66
    RETURN_IF_ERROR(io::global_local_filesystem()->exists(_path, &exists));
148
66
    if (!exists) {
149
0
        RETURN_NOT_OK_STATUS_WITH_WARN(Status::IOError("opendir failed, path={}", _path),
150
0
                                       "check file exist failed");
151
0
    }
152
153
66
    RETURN_NOT_OK_STATUS_WITH_WARN(update_capacity(), "update_capacity failed");
154
66
    RETURN_NOT_OK_STATUS_WITH_WARN(_init_cluster_id(), "_init_cluster_id failed");
155
66
    RETURN_NOT_OK_STATUS_WITH_WARN(_init_capacity_and_create_shards(),
156
66
                                   "_init_capacity_and_create_shards failed");
157
66
    if (init_meta) {
158
66
        RETURN_NOT_OK_STATUS_WITH_WARN(_init_meta(), "_init_meta failed");
159
66
    }
160
161
66
    _is_used = true;
162
66
    return Status::OK();
163
66
}
164
165
42
void DataDir::stop_bg_worker() {
166
42
    _stop_bg_worker = true;
167
42
}
168
169
66
Status DataDir::_init_cluster_id() {
170
66
    auto cluster_id_path = fmt::format("{}/{}", _path, CLUSTER_ID_PREFIX);
171
66
    RETURN_IF_ERROR(read_cluster_id(cluster_id_path, &_cluster_id));
172
66
    if (_cluster_id == -1) {
173
66
        _cluster_id_incomplete = true;
174
66
    }
175
66
    return Status::OK();
176
66
}
177
178
66
Status DataDir::_init_capacity_and_create_shards() {
179
66
    RETURN_IF_ERROR(io::global_local_filesystem()->get_space_info(_path, &_disk_capacity_bytes,
180
66
                                                                  &_available_bytes));
181
66
    auto data_path = fmt::format("{}/{}", _path, DATA_PREFIX);
182
66
    bool exists = false;
183
66
    RETURN_IF_ERROR(io::global_local_filesystem()->exists(data_path, &exists));
184
66
    if (!exists) {
185
66
        RETURN_IF_ERROR(io::global_local_filesystem()->create_directory(data_path));
186
66
    }
187
67.6k
    for (int i = 0; i < MAX_SHARD_NUM; ++i) {
188
67.5k
        auto shard_path = fmt::format("{}/{}", data_path, i);
189
67.5k
        RETURN_IF_ERROR(io::global_local_filesystem()->exists(shard_path, &exists));
190
67.5k
        if (!exists) {
191
67.5k
            RETURN_IF_ERROR(io::global_local_filesystem()->create_directory(shard_path));
192
67.5k
        }
193
67.5k
    }
194
195
66
    return Status::OK();
196
66
}
197
198
67
Status DataDir::_init_meta() {
199
    // init path hash
200
67
    _path_hash = hash_of_path(BackendOptions::get_localhost(), _path);
201
67
    LOG(INFO) << "path: " << _path << ", hash: " << _path_hash;
202
203
    // init meta
204
67
    _meta = new (std::nothrow) OlapMeta(_path);
205
67
    if (_meta == nullptr) {
206
0
        RETURN_NOT_OK_STATUS_WITH_WARN(
207
0
                Status::MemoryAllocFailed("allocate memory for OlapMeta failed"),
208
0
                "new OlapMeta failed");
209
0
    }
210
67
    Status res = _meta->init();
211
67
    if (!res.ok()) {
212
0
        RETURN_NOT_OK_STATUS_WITH_WARN(Status::IOError("open rocksdb failed, path={}", _path),
213
0
                                       "init OlapMeta failed");
214
0
    }
215
67
    return Status::OK();
216
67
}
217
218
0
Status DataDir::set_cluster_id(int32_t cluster_id) {
219
0
    if (_cluster_id != -1 && _cluster_id != cluster_id) {
220
0
        LOG(ERROR) << "going to set cluster id to already assigned store, cluster_id="
221
0
                   << _cluster_id << ", new_cluster_id=" << cluster_id;
222
0
        return Status::InternalError("going to set cluster id to already assigned store");
223
0
    }
224
0
    if (!_cluster_id_incomplete) {
225
0
        return Status::OK();
226
0
    }
227
0
    auto cluster_id_path = fmt::format("{}/{}", _path, CLUSTER_ID_PREFIX);
228
0
    return _write_cluster_id_to_path(cluster_id_path, cluster_id);
229
0
}
230
231
0
void DataDir::health_check() {
232
    // check disk
233
0
    if (_is_used) {
234
0
        Status res = _read_and_write_test_file();
235
0
        if (!res && res.is<IO_ERROR>()) {
236
0
            LOG(WARNING) << "store read/write test file occur IO Error. path=" << _path
237
0
                         << ", err: " << res;
238
0
            _engine.add_broken_path(_path);
239
0
            _is_used = !res.is<IO_ERROR>();
240
0
        }
241
0
    }
242
0
    disks_state->set_value(_is_used ? 1 : 0);
243
0
}
244
245
0
Status DataDir::_read_and_write_test_file() {
246
0
    auto test_file = fmt::format("{}/{}", _path, kTestFilePath);
247
0
    return read_write_test_file(test_file);
248
0
}
249
250
304
void DataDir::register_tablet(Tablet* tablet) {
251
304
    TabletInfo tablet_info(tablet->tablet_id(), tablet->tablet_uid());
252
253
304
    std::lock_guard<std::mutex> l(_mutex);
254
304
    _tablet_set.emplace(std::move(tablet_info));
255
304
}
256
257
255
void DataDir::deregister_tablet(Tablet* tablet) {
258
255
    TabletInfo tablet_info(tablet->tablet_id(), tablet->tablet_uid());
259
260
255
    std::lock_guard<std::mutex> l(_mutex);
261
255
    _tablet_set.erase(tablet_info);
262
255
}
263
264
0
void DataDir::clear_tablets(std::vector<TabletInfo>* tablet_infos) {
265
0
    std::lock_guard<std::mutex> l(_mutex);
266
267
0
    tablet_infos->insert(tablet_infos->end(), _tablet_set.begin(), _tablet_set.end());
268
0
    _tablet_set.clear();
269
0
}
270
271
0
std::string DataDir::get_absolute_shard_path(int64_t shard_id) {
272
0
    return fmt::format("{}/{}/{}", _path, DATA_PREFIX, shard_id);
273
0
}
274
275
std::string DataDir::get_absolute_tablet_path(int64_t shard_id, int64_t tablet_id,
276
0
                                              int32_t schema_hash) {
277
0
    return fmt::format("{}/{}/{}", get_absolute_shard_path(shard_id), tablet_id, schema_hash);
278
0
}
279
280
0
void DataDir::find_tablet_in_trash(int64_t tablet_id, std::vector<std::string>* paths) {
281
    // path: /root_path/trash/time_label/tablet_id/schema_hash
282
0
    auto trash_path = fmt::format("{}/{}", _path, TRASH_PREFIX);
283
0
    bool exists = true;
284
0
    std::vector<io::FileInfo> sub_dirs;
285
0
    Status st = io::global_local_filesystem()->list(trash_path, false, &sub_dirs, &exists);
286
0
    if (!st) {
287
0
        return;
288
0
    }
289
290
0
    for (auto& sub_dir : sub_dirs) {
291
        // sub dir is time_label
292
0
        if (sub_dir.is_file) {
293
0
            continue;
294
0
        }
295
0
        auto sub_path = fmt::format("{}/{}", trash_path, sub_dir.file_name);
296
0
        auto tablet_path = fmt::format("{}/{}", sub_path, tablet_id);
297
0
        st = io::global_local_filesystem()->exists(tablet_path, &exists);
298
0
        if (st && exists) {
299
0
            paths->emplace_back(std::move(tablet_path));
300
0
        }
301
0
    }
302
0
}
303
304
std::string DataDir::get_root_path_from_schema_hash_path_in_trash(
305
0
        const std::string& schema_hash_dir_in_trash) {
306
0
    return io::Path(schema_hash_dir_in_trash)
307
0
            .parent_path()
308
0
            .parent_path()
309
0
            .parent_path()
310
0
            .parent_path()
311
0
            .string();
312
0
}
313
314
42
Status DataDir::_check_incompatible_old_format_tablet() {
315
42
    auto check_incompatible_old_func = [](int64_t tablet_id, int32_t schema_hash,
316
42
                                          std::string_view value) -> bool {
317
        // if strict check incompatible old format, then log fatal
318
0
        if (config::storage_strict_check_incompatible_old_format) {
319
0
            throw Exception(Status::FatalError(
320
0
                    "There are incompatible old format metas, current version does not support and "
321
0
                    "it may lead to data missing!!! tablet_id = {} schema_hash = {}",
322
0
                    tablet_id, schema_hash));
323
0
        } else {
324
0
            LOG(WARNING)
325
0
                    << "There are incompatible old format metas, current version does not support "
326
0
                    << "and it may lead to data missing!!! "
327
0
                    << "tablet_id = " << tablet_id << " schema_hash = " << schema_hash;
328
0
        }
329
0
        return false;
330
0
    };
331
332
    // seek old header prefix. when check_incompatible_old_func is called, it has old format in olap_meta
333
42
    Status check_incompatible_old_status = TabletMetaManager::traverse_headers(
334
42
            _meta, check_incompatible_old_func, OLD_HEADER_PREFIX);
335
42
    if (!check_incompatible_old_status) {
336
0
        LOG(WARNING) << "check incompatible old format meta fails, it may lead to data missing!!! "
337
0
                     << _path;
338
42
    } else {
339
42
        LOG(INFO) << "successfully check incompatible old format meta " << _path;
340
42
    }
341
42
    return check_incompatible_old_status;
342
42
}
343
344
// TODO(ygl): deal with rowsets and tablets when load failed
345
42
Status DataDir::load() {
346
42
    LOG(INFO) << "start to load tablets from " << _path;
347
348
    // load rowset meta from meta env and create rowset
349
    // COMMITTED: add to txn manager
350
    // VISIBLE: add to tablet
351
    // if one rowset load failed, then the total data dir will not be loaded
352
353
    // necessarily check incompatible old format. when there are old metas, it may load to data missing
354
42
    RETURN_IF_ERROR(_check_incompatible_old_format_tablet());
355
356
42
    std::vector<RowsetMetaSharedPtr> dir_rowset_metas;
357
42
    LOG(INFO) << "begin loading rowset from meta";
358
42
    auto load_rowset_func = [&dir_rowset_metas, this](TabletUid tablet_uid, RowsetId rowset_id,
359
42
                                                      std::string_view meta_str) -> bool {
360
0
        RowsetMetaSharedPtr rowset_meta(new RowsetMeta());
361
0
        bool parsed = rowset_meta->init(meta_str);
362
0
        if (!parsed) {
363
0
            LOG(WARNING) << "parse rowset meta string failed for rowset_id:" << rowset_id;
364
            // return false will break meta iterator, return true to skip this error
365
0
            return true;
366
0
        }
367
368
0
        if (rowset_meta->has_delete_predicate()) {
369
            // copy the delete sub pred v1 to check then
370
0
            auto orig_delete_sub_pred = rowset_meta->delete_predicate().sub_predicates();
371
0
            auto* delete_pred = rowset_meta->mutable_delete_pred_pb();
372
373
0
            if ((!delete_pred->sub_predicates().empty() &&
374
0
                 delete_pred->sub_predicates_v2().empty()) ||
375
0
                (!delete_pred->in_predicates().empty() &&
376
0
                 delete_pred->in_predicates()[0].has_column_unique_id())) {
377
                // convert pred and write only when delete sub pred v2 is not set or there is in list pred to be set column uid
378
0
                RETURN_IF_ERROR(DeleteHandler::convert_to_sub_pred_v2(
379
0
                        delete_pred, rowset_meta->tablet_schema()));
380
0
                LOG(INFO) << fmt::format(
381
0
                        "convert rowset with old delete pred: rowset_id={}, tablet_id={}",
382
0
                        rowset_id.to_string(), tablet_uid.to_string());
383
0
                CHECK_EQ(orig_delete_sub_pred.size(), delete_pred->sub_predicates().size())
384
0
                        << "inconsistent sub predicate v1 after conversion";
385
0
                for (size_t i = 0; i < orig_delete_sub_pred.size(); ++i) {
386
0
                    CHECK_STREQ(orig_delete_sub_pred.Get(i).c_str(),
387
0
                                delete_pred->sub_predicates().Get(i).c_str())
388
0
                            << "inconsistent sub predicate v1 after conversion";
389
0
                }
390
0
                std::string result;
391
0
                rowset_meta->serialize(&result);
392
0
                std::string key =
393
0
                        ROWSET_PREFIX + tablet_uid.to_string() + "_" + rowset_id.to_string();
394
0
                RETURN_IF_ERROR(_meta->put(META_COLUMN_FAMILY_INDEX, key, result));
395
0
            }
396
0
        }
397
398
0
        if (rowset_meta->partition_id() == 0) {
399
0
            LOG(WARNING) << "rs tablet=" << rowset_meta->tablet_id() << " rowset_id=" << rowset_id
400
0
                         << " load from meta but partition id eq 0";
401
0
        }
402
403
0
        dir_rowset_metas.push_back(rowset_meta);
404
0
        return true;
405
0
    };
406
42
    MonotonicStopWatch rs_timer;
407
42
    rs_timer.start();
408
42
    Status load_rowset_status = RowsetMetaManager::traverse_rowset_metas(_meta, load_rowset_func);
409
42
    rs_timer.stop();
410
42
    if (!load_rowset_status) {
411
0
        LOG(WARNING) << "errors when load rowset meta from meta env, skip this data dir:" << _path;
412
42
    } else {
413
42
        LOG(INFO) << "load rowset from meta finished, cost: "
414
42
                  << rs_timer.elapsed_time_milliseconds() << " ms, data dir: " << _path;
415
42
    }
416
417
    // load tablet
418
    // create tablet from tablet meta and add it to tablet mgr
419
42
    LOG(INFO) << "begin loading tablet from meta";
420
42
    std::set<int64_t> tablet_ids;
421
42
    std::set<int64_t> failed_tablet_ids;
422
42
    auto load_tablet_func = [this, &tablet_ids, &failed_tablet_ids](
423
42
                                    int64_t tablet_id, int32_t schema_hash,
424
42
                                    std::string_view value) -> bool {
425
0
        Status status = _engine.tablet_manager()->load_tablet_from_meta(
426
0
                this, tablet_id, schema_hash, value, false, false, false, false);
427
0
        if (!status.ok() && !status.is<TABLE_ALREADY_DELETED_ERROR>() &&
428
0
            !status.is<ENGINE_INSERT_OLD_TABLET>()) {
429
            // load_tablet_from_meta() may return Status::Error<TABLE_ALREADY_DELETED_ERROR>()
430
            // which means the tablet status is DELETED
431
            // This may happen when the tablet was just deleted before the BE restarted,
432
            // but it has not been cleared from rocksdb. At this time, restarting the BE
433
            // will read the tablet in the DELETE state from rocksdb. These tablets have been
434
            // added to the garbage collection queue and will be automatically deleted afterwards.
435
            // Therefore, we believe that this situation is not a failure.
436
437
            // Besides, load_tablet_from_meta() may return Status::Error<ENGINE_INSERT_OLD_TABLET>()
438
            // when BE is restarting and the older tablet have been added to the
439
            // garbage collection queue but not deleted yet.
440
            // In this case, since the data_dirs are parallel loaded, a later loaded tablet
441
            // may be older than previously loaded one, which should not be acknowledged as a
442
            // failure.
443
0
            LOG(WARNING) << "load tablet from header failed. status:" << status
444
0
                         << ", tablet=" << tablet_id << "." << schema_hash;
445
0
            failed_tablet_ids.insert(tablet_id);
446
0
        } else {
447
0
            tablet_ids.insert(tablet_id);
448
0
        }
449
0
        return true;
450
0
    };
451
42
    MonotonicStopWatch tablet_timer;
452
42
    tablet_timer.start();
453
42
    Status load_tablet_status = TabletMetaManager::traverse_headers(_meta, load_tablet_func);
454
42
    tablet_timer.stop();
455
42
    if (!failed_tablet_ids.empty()) {
456
0
        LOG(WARNING) << "load tablets from header failed"
457
0
                     << ", loaded tablet: " << tablet_ids.size()
458
0
                     << ", error tablet: " << failed_tablet_ids.size() << ", path: " << _path;
459
0
        if (!config::ignore_load_tablet_failure) {
460
0
            throw Exception(Status::FatalError(
461
0
                    "load tablets encounter failure. stop BE process. path: {}", _path));
462
0
        }
463
0
    }
464
42
    if (!load_tablet_status) {
465
0
        LOG(WARNING) << "there is failure when loading tablet headers"
466
0
                     << ", loaded tablet: " << tablet_ids.size()
467
0
                     << ", error tablet: " << failed_tablet_ids.size() << ", path: " << _path;
468
42
    } else {
469
42
        LOG(INFO) << "load tablet from meta finished"
470
42
                  << ", loaded tablet: " << tablet_ids.size()
471
42
                  << ", error tablet: " << failed_tablet_ids.size()
472
42
                  << ", cost: " << tablet_timer.elapsed_time_milliseconds()
473
42
                  << " ms, path: " << _path;
474
42
    }
475
476
42
    for (int64_t tablet_id : tablet_ids) {
477
0
        TabletSharedPtr tablet = _engine.tablet_manager()->get_tablet(tablet_id);
478
0
        if (tablet && tablet->set_tablet_schema_into_rowset_meta()) {
479
0
            RETURN_IF_ERROR(TabletMetaManager::save(this, tablet->tablet_id(),
480
0
                                                    tablet->schema_hash(), tablet->tablet_meta()));
481
0
        }
482
0
    }
483
484
42
    auto load_pending_publish_info_func =
485
42
            [&engine = _engine](int64_t tablet_id, int64_t publish_version, std::string_view info) {
486
0
                PendingPublishInfoPB pending_publish_info_pb;
487
0
                bool parsed = pending_publish_info_pb.ParseFromArray(info.data(), info.size());
488
0
                if (!parsed) {
489
0
                    LOG(WARNING) << "parse pending publish info failed, tablet_id: " << tablet_id
490
0
                                 << " publish_version: " << publish_version;
491
0
                }
492
0
                engine.add_async_publish_task(pending_publish_info_pb.partition_id(), tablet_id,
493
0
                                              publish_version,
494
0
                                              pending_publish_info_pb.transaction_id(), true);
495
0
                return true;
496
0
            };
497
42
    MonotonicStopWatch pending_publish_timer;
498
42
    pending_publish_timer.start();
499
42
    RETURN_IF_ERROR(
500
42
            TabletMetaManager::traverse_pending_publish(_meta, load_pending_publish_info_func));
501
42
    pending_publish_timer.stop();
502
42
    LOG(INFO) << "load pending publish task from meta finished, cost: "
503
42
              << pending_publish_timer.elapsed_time_milliseconds() << " ms, data dir: " << _path;
504
505
42
    int64_t rowset_partition_id_eq_0_num = 0;
506
42
    for (auto rowset_meta : dir_rowset_metas) {
507
0
        if (rowset_meta->partition_id() == 0) {
508
0
            ++rowset_partition_id_eq_0_num;
509
0
        }
510
0
    }
511
42
    if (rowset_partition_id_eq_0_num > config::ignore_invalid_partition_id_rowset_num) {
512
0
        throw Exception(Status::FatalError(
513
0
                "rowset partition id eq 0 is {} bigger than config {}, be exit, plz check be.INFO",
514
0
                rowset_partition_id_eq_0_num, config::ignore_invalid_partition_id_rowset_num));
515
0
    }
516
517
    // traverse rowset
518
    // 1. add committed rowset to txn map
519
    // 2. add visible rowset to tablet
520
    // ignore any errors when load tablet or rowset, because fe will repair them after report
521
42
    int64_t invalid_rowset_counter = 0;
522
42
    for (auto&& rowset_meta : dir_rowset_metas) {
523
0
        TabletSharedPtr tablet = _engine.tablet_manager()->get_tablet(rowset_meta->tablet_id());
524
        // tablet maybe dropped, but not drop related rowset meta
525
0
        if (tablet == nullptr) {
526
0
            VLOG_NOTICE << "could not find tablet id: " << rowset_meta->tablet_id()
527
0
                        << ", schema hash: " << rowset_meta->tablet_schema_hash()
528
0
                        << ", for rowset: " << rowset_meta->rowset_id() << ", skip this rowset";
529
0
            ++invalid_rowset_counter;
530
0
            continue;
531
0
        }
532
533
0
        if (rowset_meta->partition_id() == 0) {
534
0
            LOG(WARNING) << "skip tablet_id=" << tablet->tablet_id()
535
0
                         << " rowset: " << rowset_meta->rowset_id()
536
0
                         << " txn: " << rowset_meta->txn_id();
537
0
            continue;
538
0
        }
539
540
0
        RowsetSharedPtr rowset;
541
0
        Status create_status = tablet->create_rowset(rowset_meta, &rowset);
542
0
        if (!create_status) {
543
0
            LOG(WARNING) << "could not create rowset from rowsetmeta: "
544
0
                         << " rowset_id: " << rowset_meta->rowset_id()
545
0
                         << " rowset_type: " << rowset_meta->rowset_type()
546
0
                         << " rowset_state: " << rowset_meta->rowset_state();
547
0
            continue;
548
0
        }
549
0
        if (rowset_meta->rowset_state() == RowsetStatePB::COMMITTED &&
550
0
            rowset_meta->tablet_uid() == tablet->tablet_uid()) {
551
0
            if (!rowset_meta->tablet_schema()) {
552
0
                rowset_meta->set_tablet_schema(tablet->tablet_schema());
553
0
                RETURN_IF_ERROR(RowsetMetaManager::save(_meta, rowset_meta->tablet_uid(),
554
0
                                                        rowset_meta->rowset_id(),
555
0
                                                        rowset_meta->get_rowset_pb(), false));
556
0
            }
557
0
            Status commit_txn_status = _engine.txn_manager()->commit_txn(
558
0
                    _meta, rowset_meta->partition_id(), rowset_meta->txn_id(),
559
0
                    rowset_meta->tablet_id(), rowset_meta->tablet_uid(), rowset_meta->load_id(),
560
0
                    rowset, _engine.pending_local_rowsets().add(rowset_meta->rowset_id()), true);
561
0
            if (commit_txn_status || commit_txn_status.is<PUSH_TRANSACTION_ALREADY_EXIST>()) {
562
0
                LOG(INFO) << "successfully to add committed rowset: " << rowset_meta->rowset_id()
563
0
                          << " to tablet: " << rowset_meta->tablet_id()
564
0
                          << " schema hash: " << rowset_meta->tablet_schema_hash()
565
0
                          << " for txn: " << rowset_meta->txn_id();
566
567
0
            } else if (commit_txn_status.is<ErrorCode::INTERNAL_ERROR>()) {
568
0
                LOG(WARNING) << "failed to add committed rowset: " << rowset_meta->rowset_id()
569
0
                             << " to tablet: " << rowset_meta->tablet_id()
570
0
                             << " for txn: " << rowset_meta->txn_id()
571
0
                             << " error: " << commit_txn_status;
572
0
                return commit_txn_status;
573
0
            } else {
574
0
                LOG(WARNING) << "failed to add committed rowset: " << rowset_meta->rowset_id()
575
0
                             << " to tablet: " << rowset_meta->tablet_id()
576
0
                             << " for txn: " << rowset_meta->txn_id()
577
0
                             << " error: " << commit_txn_status;
578
0
            }
579
0
        } else if (rowset_meta->rowset_state() == RowsetStatePB::VISIBLE &&
580
0
                   rowset_meta->tablet_uid() == tablet->tablet_uid()) {
581
0
            if (!rowset_meta->tablet_schema()) {
582
0
                rowset_meta->set_tablet_schema(tablet->tablet_schema());
583
0
                RETURN_IF_ERROR(RowsetMetaManager::save(_meta, rowset_meta->tablet_uid(),
584
0
                                                        rowset_meta->rowset_id(),
585
0
                                                        rowset_meta->get_rowset_pb(), false));
586
0
            }
587
0
            Status publish_status = tablet->add_rowset(rowset);
588
0
            if (!publish_status && !publish_status.is<PUSH_VERSION_ALREADY_EXIST>()) {
589
0
                LOG(WARNING) << "add visible rowset to tablet failed rowset_id:"
590
0
                             << rowset->rowset_id() << " tablet id: " << rowset_meta->tablet_id()
591
0
                             << " txn id:" << rowset_meta->txn_id()
592
0
                             << " start_version: " << rowset_meta->version().first
593
0
                             << " end_version: " << rowset_meta->version().second;
594
0
            }
595
0
        } else {
596
0
            LOG(WARNING) << "find invalid rowset: " << rowset_meta->rowset_id()
597
0
                         << " with tablet id: " << rowset_meta->tablet_id()
598
0
                         << " tablet uid: " << rowset_meta->tablet_uid()
599
0
                         << " schema hash: " << rowset_meta->tablet_schema_hash()
600
0
                         << " txn: " << rowset_meta->txn_id()
601
0
                         << " current valid tablet uid: " << tablet->tablet_uid();
602
0
            ++invalid_rowset_counter;
603
0
        }
604
0
    }
605
606
42
    int64_t dbm_cnt {0};
607
42
    int64_t unknown_dbm_cnt {0};
608
42
    auto load_delete_bitmap_func = [this, &dbm_cnt, &unknown_dbm_cnt](int64_t tablet_id,
609
42
                                                                      int64_t version,
610
42
                                                                      std::string_view val) {
611
0
        TabletSharedPtr tablet = _engine.tablet_manager()->get_tablet(tablet_id);
612
0
        if (!tablet) {
613
0
            return true;
614
0
        }
615
0
        const std::vector<RowsetMetaSharedPtr>& all_rowsets = tablet->tablet_meta()->all_rs_metas();
616
0
        RowsetIdUnorderedSet rowset_ids;
617
0
        for (auto& rowset_meta : all_rowsets) {
618
0
            rowset_ids.insert(rowset_meta->rowset_id());
619
0
        }
620
621
0
        DeleteBitmapPB delete_bitmap_pb;
622
0
        delete_bitmap_pb.ParseFromArray(val.data(), val.size());
623
0
        int rst_ids_size = delete_bitmap_pb.rowset_ids_size();
624
0
        int seg_ids_size = delete_bitmap_pb.segment_ids_size();
625
0
        int seg_maps_size = delete_bitmap_pb.segment_delete_bitmaps_size();
626
0
        CHECK(rst_ids_size == seg_ids_size && seg_ids_size == seg_maps_size);
627
628
0
        for (size_t i = 0; i < rst_ids_size; ++i) {
629
0
            RowsetId rst_id;
630
0
            rst_id.init(delete_bitmap_pb.rowset_ids(i));
631
            // only process the rowset in _rs_metas
632
0
            if (rowset_ids.find(rst_id) == rowset_ids.end()) {
633
0
                ++unknown_dbm_cnt;
634
0
                continue;
635
0
            }
636
0
            ++dbm_cnt;
637
0
            auto seg_id = delete_bitmap_pb.segment_ids(i);
638
0
            auto iter = tablet->tablet_meta()->delete_bitmap().delete_bitmap.find(
639
0
                    {rst_id, seg_id, version});
640
            // This version of delete bitmap already exists
641
0
            if (iter != tablet->tablet_meta()->delete_bitmap().delete_bitmap.end()) {
642
0
                continue;
643
0
            }
644
0
            auto bitmap = delete_bitmap_pb.segment_delete_bitmaps(i).data();
645
0
            tablet->tablet_meta()->delete_bitmap().delete_bitmap[{rst_id, seg_id, version}] =
646
0
                    roaring::Roaring::read(bitmap);
647
0
        }
648
0
        return true;
649
0
    };
650
42
    MonotonicStopWatch dbm_timer;
651
42
    dbm_timer.start();
652
42
    RETURN_IF_ERROR(TabletMetaManager::traverse_delete_bitmap(_meta, load_delete_bitmap_func));
653
42
    dbm_timer.stop();
654
655
42
    LOG(INFO) << "load delete bitmap from meta finished, cost: "
656
42
              << dbm_timer.elapsed_time_milliseconds() << " ms, data dir: " << _path;
657
658
    // At startup, we only count these invalid rowset, but do not actually delete it.
659
    // The actual delete operation is in StorageEngine::_clean_unused_rowset_metas,
660
    // which is cleaned up uniformly by the background cleanup thread.
661
42
    LOG(INFO) << "finish to load tablets from " << _path
662
42
              << ", total rowset meta: " << dir_rowset_metas.size()
663
42
              << ", invalid rowset num: " << invalid_rowset_counter
664
42
              << ", visible/stale rowsets' delete bitmap count: " << dbm_cnt
665
42
              << ", invalid rowsets' delete bitmap count: " << unknown_dbm_cnt;
666
667
42
    return Status::OK();
668
42
}
669
670
// gc unused local tablet dir
671
20
void DataDir::_perform_tablet_gc(const std::string& tablet_schema_hash_path, int16_t shard_id) {
672
20
    if (_stop_bg_worker) {
673
0
        return;
674
0
    }
675
676
20
    TTabletId tablet_id = -1;
677
20
    TSchemaHash schema_hash = -1;
678
20
    bool is_valid = TabletManager::get_tablet_id_and_schema_hash_from_path(
679
20
            tablet_schema_hash_path, &tablet_id, &schema_hash);
680
20
    if (!is_valid || tablet_id < 1 || schema_hash < 1) [[unlikely]] {
681
0
        LOG(WARNING) << "[path gc] unknown path: " << tablet_schema_hash_path;
682
0
        return;
683
0
    }
684
685
20
    auto tablet = _engine.tablet_manager()->get_tablet(tablet_id);
686
20
    if (!tablet || tablet->data_dir() != this) {
687
10
        if (tablet) {
688
0
            LOG(INFO) << "The tablet in path " << tablet_schema_hash_path
689
0
                      << " is not same with the running one: " << tablet->tablet_path()
690
0
                      << ", might be the old tablet after migration, try to move it to trash";
691
0
        }
692
10
        _engine.tablet_manager()->try_delete_unused_tablet_path(this, tablet_id, schema_hash,
693
10
                                                                tablet_schema_hash_path, shard_id);
694
10
        return;
695
10
    }
696
697
10
    _perform_rowset_gc(tablet_schema_hash_path);
698
10
}
699
700
// gc unused local rowsets under tablet dir
701
10
void DataDir::_perform_rowset_gc(const std::string& tablet_schema_hash_path) {
702
10
    if (_stop_bg_worker) {
703
0
        return;
704
0
    }
705
706
10
    TTabletId tablet_id = -1;
707
10
    TSchemaHash schema_hash = -1;
708
10
    bool is_valid = doris::TabletManager::get_tablet_id_and_schema_hash_from_path(
709
10
            tablet_schema_hash_path, &tablet_id, &schema_hash);
710
10
    if (!is_valid || tablet_id < 1 || schema_hash < 1) [[unlikely]] {
711
0
        LOG(WARNING) << "[path gc] unknown path: " << tablet_schema_hash_path;
712
0
        return;
713
0
    }
714
715
10
    auto tablet = _engine.tablet_manager()->get_tablet(tablet_id);
716
10
    if (!tablet) {
717
        // Could not found the tablet, maybe it's a dropped tablet, will be reclaimed
718
        // in the next time `_perform_path_gc_by_tablet`
719
0
        return;
720
0
    }
721
722
10
    if (tablet->data_dir() != this) {
723
        // Current running tablet is not in same data_dir, maybe it's a tablet after migration,
724
        // will be reclaimed in the next time `_perform_path_gc_by_tablet`
725
0
        return;
726
0
    }
727
728
10
    bool exists;
729
10
    std::vector<io::FileInfo> files;
730
10
    auto st = io::global_local_filesystem()->list(tablet_schema_hash_path, true, &files, &exists);
731
10
    if (!st.ok()) [[unlikely]] {
732
0
        LOG(WARNING) << "[path gc] fail to list tablet path " << tablet_schema_hash_path << " : "
733
0
                     << st;
734
0
        return;
735
0
    }
736
737
    // Rowset files excluding pending rowsets
738
10
    std::vector<std::pair<RowsetId, std::string /* filename */>> rowsets_not_pending;
739
480
    for (auto&& file : files) {
740
480
        auto rowset_id = extract_rowset_id(file.file_name);
741
480
        if (rowset_id.hi == 0) {
742
0
            continue; // Not a rowset
743
0
        }
744
745
480
        if (_engine.pending_local_rowsets().contains(rowset_id)) {
746
80
            continue; // Pending rowset file
747
80
        }
748
749
400
        rowsets_not_pending.emplace_back(rowset_id, std::move(file.file_name));
750
400
    }
751
752
10
    RowsetIdUnorderedSet rowsets_in_version_map;
753
10
    tablet->traverse_rowsets(
754
30
            [&rowsets_in_version_map](auto& rs) { rowsets_in_version_map.insert(rs->rowset_id()); },
755
10
            true);
756
757
80
    auto reclaim_rowset_file = [](const std::string& path) {
758
80
        auto st = io::global_local_filesystem()->delete_file(path);
759
80
        if (!st.ok()) [[unlikely]] {
760
0
            LOG(WARNING) << "[path gc] failed to delete garbage rowset file: " << st;
761
0
            return;
762
0
        }
763
80
        LOG(INFO) << "[path gc] delete garbage path: " << path; // Audit log
764
80
    };
765
766
100
    auto should_reclaim = [&, this](const RowsetId& rowset_id) {
767
100
        return !rowsets_in_version_map.contains(rowset_id) &&
768
100
               !_engine.check_rowset_id_in_unused_rowsets(rowset_id) &&
769
100
               RowsetMetaManager::exists(get_meta(), tablet->tablet_uid(), rowset_id)
770
40
                       .is<META_KEY_NOT_FOUND>();
771
100
    };
772
773
    // rowset_id -> is_garbage
774
10
    std::unordered_map<RowsetId, bool> checked_rowsets;
775
400
    for (auto&& [rowset_id, filename] : rowsets_not_pending) {
776
400
        if (_stop_bg_worker) {
777
0
            return;
778
0
        }
779
780
400
        if (auto it = checked_rowsets.find(rowset_id); it != checked_rowsets.end()) {
781
300
            if (it->second) { // Is checked garbage rowset
782
60
                reclaim_rowset_file(tablet_schema_hash_path + '/' + filename);
783
60
            }
784
300
            continue;
785
300
        }
786
787
100
        if (should_reclaim(rowset_id)) {
788
20
            if (config::path_gc_check_step > 0 &&
789
20
                ++_path_gc_step % config::path_gc_check_step == 0) {
790
0
                std::this_thread::sleep_for(
791
0
                        std::chrono::milliseconds(config::path_gc_check_step_interval_ms));
792
0
            }
793
20
            reclaim_rowset_file(tablet_schema_hash_path + '/' + filename);
794
20
            checked_rowsets.emplace(rowset_id, true);
795
80
        } else {
796
80
            checked_rowsets.emplace(rowset_id, false);
797
80
        }
798
100
    }
799
10
}
800
801
1
void DataDir::perform_path_gc() {
802
1
    if (_stop_bg_worker) {
803
0
        return;
804
0
    }
805
806
1
    LOG(INFO) << "start to gc data dir " << _path;
807
1
    auto data_path = fmt::format("{}/{}", _path, DATA_PREFIX);
808
1
    std::vector<io::FileInfo> shards;
809
1
    bool exists = true;
810
1
    const auto& fs = io::global_local_filesystem();
811
1
    auto st = fs->list(data_path, false, &shards, &exists);
812
1
    if (!st.ok()) [[unlikely]] {
813
0
        LOG(WARNING) << "failed to scan data dir: " << st;
814
0
        return;
815
0
    }
816
817
4
    for (const auto& shard : shards) {
818
4
        if (_stop_bg_worker) {
819
0
            break;
820
0
        }
821
822
4
        if (shard.is_file) {
823
0
            continue;
824
0
        }
825
826
4
        auto shard_path = fmt::format("{}/{}", data_path, shard.file_name);
827
4
        std::vector<io::FileInfo> tablet_ids;
828
4
        st = io::global_local_filesystem()->list(shard_path, false, &tablet_ids, &exists);
829
4
        if (!st.ok()) [[unlikely]] {
830
0
            LOG(WARNING) << "fail to walk dir, shard_path=" << shard_path << " : " << st;
831
0
            continue;
832
0
        }
833
834
20
        for (const auto& tablet_id : tablet_ids) {
835
20
            if (_stop_bg_worker) {
836
0
                break;
837
0
            }
838
839
20
            if (tablet_id.is_file) {
840
0
                continue;
841
0
            }
842
843
20
            auto tablet_id_path = fmt::format("{}/{}", shard_path, tablet_id.file_name);
844
20
            std::vector<io::FileInfo> schema_hashes;
845
20
            st = fs->list(tablet_id_path, false, &schema_hashes, &exists);
846
20
            if (!st.ok()) [[unlikely]] {
847
0
                LOG(WARNING) << "fail to walk dir, tablet_id_path=" << tablet_id_path << " : "
848
0
                             << st;
849
0
                continue;
850
0
            }
851
852
20
            for (auto&& schema_hash : schema_hashes) {
853
20
                if (schema_hash.is_file) {
854
0
                    continue;
855
0
                }
856
857
20
                if (config::path_gc_check_step > 0 &&
858
20
                    ++_path_gc_step % config::path_gc_check_step == 0) {
859
0
                    std::this_thread::sleep_for(
860
0
                            std::chrono::milliseconds(config::path_gc_check_step_interval_ms));
861
0
                }
862
20
                int16_t shard_id = -1;
863
20
                try {
864
20
                    shard_id = std::stoi(shard.file_name);
865
20
                } catch (const std::exception&) {
866
0
                    LOG(WARNING) << "failed to stoi shard_id, shard name=" << shard.file_name;
867
0
                    continue;
868
0
                }
869
20
                _perform_tablet_gc(tablet_id_path + '/' + schema_hash.file_name, shard_id);
870
20
            }
871
20
        }
872
4
    }
873
874
1
    LOG(INFO) << "gc data dir path: " << _path << " finished";
875
1
}
876
877
147
Status DataDir::update_capacity() {
878
147
    RETURN_IF_ERROR(io::global_local_filesystem()->get_space_info(_path, &_disk_capacity_bytes,
879
147
                                                                  &_available_bytes));
880
136
    disks_total_capacity->set_value(_disk_capacity_bytes);
881
136
    disks_avail_capacity->set_value(_available_bytes);
882
136
    LOG(INFO) << "path: " << _path << " total capacity: " << _disk_capacity_bytes
883
136
              << ", available capacity: " << _available_bytes << ", usage: " << get_usage(0)
884
136
              << ", in_use: " << is_used();
885
886
136
    return Status::OK();
887
147
}
888
889
0
void DataDir::update_trash_capacity() {
890
0
    auto trash_path = fmt::format("{}/{}", _path, TRASH_PREFIX);
891
0
    try {
892
0
        _trash_used_bytes = _engine.get_file_or_directory_size(trash_path);
893
0
    } catch (const std::filesystem::filesystem_error& e) {
894
0
        LOG(WARNING) << "update trash capacity failed, path: " << _path << ", err: " << e.what();
895
0
        return;
896
0
    }
897
0
    disks_trash_used_capacity->set_value(_trash_used_bytes);
898
0
    LOG(INFO) << "path: " << _path << " trash capacity: " << _trash_used_bytes;
899
0
}
900
901
0
void DataDir::update_local_data_size(int64_t size) {
902
0
    disks_local_used_capacity->set_value(size);
903
0
}
904
905
0
void DataDir::update_remote_data_size(int64_t size) {
906
0
    disks_remote_used_capacity->set_value(size);
907
0
}
908
909
0
size_t DataDir::tablet_size() const {
910
0
    std::lock_guard<std::mutex> l(_mutex);
911
0
    return _tablet_set.size();
912
0
}
913
914
307
bool DataDir::reach_capacity_limit(int64_t incoming_data_size) {
915
307
    double used_pct = get_usage(incoming_data_size);
916
307
    int64_t left_bytes = _available_bytes - incoming_data_size;
917
307
    if (used_pct >= config::storage_flood_stage_usage_percent / 100.0 &&
918
307
        left_bytes <= config::storage_flood_stage_left_capacity_bytes) {
919
0
        LOG(WARNING) << "reach capacity limit. used pct: " << used_pct
920
0
                     << ", left bytes: " << left_bytes << ", path: " << _path;
921
0
        return true;
922
0
    }
923
307
    return false;
924
307
}
925
926
0
void DataDir::disks_compaction_score_increment(int64_t delta) {
927
0
    disks_compaction_score->increment(delta);
928
0
}
929
930
0
void DataDir::disks_compaction_num_increment(int64_t delta) {
931
0
    disks_compaction_num->increment(delta);
932
0
}
933
934
235
Status DataDir::move_to_trash(const std::string& tablet_path) {
935
235
    if (config::trash_file_expire_time_sec <= 0) {
936
235
        LOG(INFO) << "delete tablet dir " << tablet_path
937
235
                  << " directly due to trash_file_expire_time_sec is 0";
938
235
        RETURN_IF_ERROR(io::global_local_filesystem()->delete_directory(tablet_path));
939
235
        return delete_tablet_parent_path_if_empty(tablet_path);
940
235
    }
941
942
0
    Status res = Status::OK();
943
    // 1. get timestamp string
944
0
    string time_str;
945
0
    if ((res = gen_timestamp_string(&time_str)) != Status::OK()) {
946
0
        LOG(WARNING) << "failed to generate time_string when move file to trash.err code=" << res;
947
0
        return res;
948
0
    }
949
950
    // 2. generate new file path
951
    // a global counter to avoid file name duplication.
952
0
    static std::atomic<uint64_t> delete_counter(0);
953
0
    auto trash_root_path =
954
0
            fmt::format("{}/{}/{}.{}", _path, TRASH_PREFIX, time_str, delete_counter++);
955
0
    auto fs_tablet_path = io::Path(tablet_path);
956
0
    auto trash_tablet_path = trash_root_path /
957
0
                             fs_tablet_path.parent_path().filename() /* tablet_id */ /
958
0
                             fs_tablet_path.filename() /* schema_hash */;
959
960
    // 3. create target dir, or the rename() function will fail.
961
0
    auto trash_tablet_parent = trash_tablet_path.parent_path();
962
    // create dir if not exists
963
0
    bool exists = true;
964
0
    RETURN_IF_ERROR(io::global_local_filesystem()->exists(trash_tablet_parent, &exists));
965
0
    if (!exists) {
966
0
        RETURN_IF_ERROR(io::global_local_filesystem()->create_directory(trash_tablet_parent));
967
0
    }
968
969
    // 4. move tablet to trash
970
0
    VLOG_NOTICE << "move file to trash. " << tablet_path << " -> " << trash_tablet_path;
971
0
    if (rename(tablet_path.c_str(), trash_tablet_path.c_str()) < 0) {
972
0
        return Status::Error<OS_ERROR>("move file to trash failed. file={}, target={}, err={}",
973
0
                                       tablet_path, trash_tablet_path.native(), Errno::str());
974
0
    }
975
976
    // 5. check parent dir of source file, delete it when empty
977
0
    RETURN_IF_ERROR(delete_tablet_parent_path_if_empty(tablet_path));
978
979
0
    return Status::OK();
980
0
}
981
982
235
Status DataDir::delete_tablet_parent_path_if_empty(const std::string& tablet_path) {
983
235
    auto fs_tablet_path = io::Path(tablet_path);
984
235
    std::string source_parent_dir = fs_tablet_path.parent_path(); // tablet_id level
985
235
    std::vector<io::FileInfo> sub_files;
986
235
    bool exists = true;
987
235
    RETURN_IF_ERROR(
988
235
            io::global_local_filesystem()->list(source_parent_dir, false, &sub_files, &exists));
989
235
    if (sub_files.empty()) {
990
235
        LOG(INFO) << "remove empty dir " << source_parent_dir;
991
        // no need to exam return status
992
235
        RETURN_IF_ERROR(io::global_local_filesystem()->delete_directory(source_parent_dir));
993
235
    }
994
235
    return Status::OK();
995
235
}
996
997
0
void DataDir::perform_remote_rowset_gc() {
998
0
    std::vector<std::pair<std::string, std::string>> gc_kvs;
999
0
    auto traverse_remote_rowset_func = [&gc_kvs](std::string_view key,
1000
0
                                                 std::string_view value) -> bool {
1001
0
        gc_kvs.emplace_back(key, value);
1002
0
        return true;
1003
0
    };
1004
0
    static_cast<void>(_meta->iterate(META_COLUMN_FAMILY_INDEX, REMOTE_ROWSET_GC_PREFIX,
1005
0
                                     traverse_remote_rowset_func));
1006
0
    std::vector<std::string> deleted_keys;
1007
0
    for (auto& [key, val] : gc_kvs) {
1008
0
        auto rowset_id = key.substr(REMOTE_ROWSET_GC_PREFIX.size());
1009
0
        RemoteRowsetGcPB gc_pb;
1010
0
        if (!gc_pb.ParseFromString(val)) {
1011
0
            LOG(WARNING) << "malformed RemoteRowsetGcPB. rowset_id=" << rowset_id;
1012
0
            deleted_keys.push_back(std::move(key));
1013
0
            continue;
1014
0
        }
1015
1016
0
        auto storage_resource = get_storage_resource(gc_pb.resource_id());
1017
0
        if (!storage_resource) {
1018
0
            LOG(WARNING) << "Cannot get file system: " << gc_pb.resource_id();
1019
0
            continue;
1020
0
        }
1021
1022
0
        std::vector<io::Path> seg_paths;
1023
0
        seg_paths.reserve(gc_pb.num_segments());
1024
0
        for (int i = 0; i < gc_pb.num_segments(); ++i) {
1025
0
            seg_paths.emplace_back(
1026
0
                    storage_resource->first.remote_segment_path(gc_pb.tablet_id(), rowset_id, i));
1027
0
        }
1028
1029
0
        auto& fs = storage_resource->first.fs;
1030
0
        LOG(INFO) << "delete remote rowset. root_path=" << fs->root_path()
1031
0
                  << ", rowset_id=" << rowset_id;
1032
0
        auto st = fs->batch_delete(seg_paths);
1033
0
        if (st.ok()) {
1034
0
            deleted_keys.push_back(std::move(key));
1035
0
            unused_remote_rowset_num << -1;
1036
0
        } else {
1037
0
            LOG(WARNING) << "failed to delete remote rowset. err=" << st;
1038
0
        }
1039
0
    }
1040
0
    for (const auto& key : deleted_keys) {
1041
0
        static_cast<void>(_meta->remove(META_COLUMN_FAMILY_INDEX, key));
1042
0
    }
1043
0
}
1044
1045
0
void DataDir::perform_remote_tablet_gc() {
1046
0
    std::vector<std::pair<std::string, std::string>> tablet_gc_kvs;
1047
0
    auto traverse_remote_tablet_func = [&tablet_gc_kvs](std::string_view key,
1048
0
                                                        std::string_view value) -> bool {
1049
0
        tablet_gc_kvs.emplace_back(key, value);
1050
0
        return true;
1051
0
    };
1052
0
    static_cast<void>(_meta->iterate(META_COLUMN_FAMILY_INDEX, REMOTE_TABLET_GC_PREFIX,
1053
0
                                     traverse_remote_tablet_func));
1054
0
    std::vector<std::string> deleted_keys;
1055
0
    for (auto& [key, val] : tablet_gc_kvs) {
1056
0
        auto tablet_id = key.substr(REMOTE_TABLET_GC_PREFIX.size());
1057
0
        RemoteTabletGcPB gc_pb;
1058
0
        if (!gc_pb.ParseFromString(val)) {
1059
0
            LOG(WARNING) << "malformed RemoteTabletGcPB. tablet_id=" << tablet_id;
1060
0
            deleted_keys.push_back(std::move(key));
1061
0
            continue;
1062
0
        }
1063
0
        bool success = true;
1064
0
        for (auto& resource_id : gc_pb.resource_ids()) {
1065
0
            auto fs = get_filesystem(resource_id);
1066
0
            if (!fs) {
1067
0
                LOG(WARNING) << "could not get file system. resource_id=" << resource_id;
1068
0
                success = false;
1069
0
                continue;
1070
0
            }
1071
0
            LOG(INFO) << "delete remote rowsets of tablet. root_path=" << fs->root_path()
1072
0
                      << ", tablet_id=" << tablet_id;
1073
0
            auto st = fs->delete_directory(DATA_PREFIX + '/' + tablet_id);
1074
0
            if (!st.ok()) {
1075
0
                LOG(WARNING) << "failed to delete all remote rowset in tablet. err=" << st;
1076
0
                success = false;
1077
0
            }
1078
0
        }
1079
0
        if (success) {
1080
0
            deleted_keys.push_back(std::move(key));
1081
0
        }
1082
0
    }
1083
0
    for (const auto& key : deleted_keys) {
1084
0
        static_cast<void>(_meta->remove(META_COLUMN_FAMILY_INDEX, key));
1085
0
    }
1086
0
}
1087
1088
} // namespace doris