/root/doris/cloud/src/recycler/checker.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "recycler/checker.h" |
19 | | |
20 | | #include <aws/s3/S3Client.h> |
21 | | #include <aws/s3/model/ListObjectsV2Request.h> |
22 | | #include <butil/endpoint.h> |
23 | | #include <butil/strings/string_split.h> |
24 | | #include <fmt/core.h> |
25 | | #include <gen_cpp/cloud.pb.h> |
26 | | #include <gen_cpp/olap_file.pb.h> |
27 | | #include <glog/logging.h> |
28 | | |
29 | | #include <chrono> |
30 | | #include <cstdint> |
31 | | #include <memory> |
32 | | #include <mutex> |
33 | | #include <sstream> |
34 | | #include <string_view> |
35 | | #include <unordered_set> |
36 | | #include <vector> |
37 | | |
38 | | #include "common/bvars.h" |
39 | | #include "common/config.h" |
40 | | #include "common/encryption_util.h" |
41 | | #include "common/logging.h" |
42 | | #include "common/util.h" |
43 | | #include "cpp/sync_point.h" |
44 | | #include "meta-service/keys.h" |
45 | | #include "meta-service/txn_kv.h" |
46 | | #include "meta-service/txn_kv_error.h" |
47 | | #include "recycler/hdfs_accessor.h" |
48 | | #include "recycler/s3_accessor.h" |
49 | | #include "recycler/storage_vault_accessor.h" |
50 | | #ifdef UNIT_TEST |
51 | | #include "../test/mock_accessor.h" |
52 | | #endif |
53 | | #include "recycler/util.h" |
54 | | |
55 | | namespace doris::cloud { |
56 | | namespace config { |
57 | | extern int32_t brpc_listen_port; |
58 | | extern int32_t scan_instances_interval_seconds; |
59 | | extern int32_t recycle_job_lease_expired_ms; |
60 | | extern int32_t recycle_concurrency; |
61 | | extern std::vector<std::string> recycle_whitelist; |
62 | | extern std::vector<std::string> recycle_blacklist; |
63 | | extern bool enable_inverted_check; |
64 | | } // namespace config |
65 | | |
66 | 5 | Checker::Checker(std::shared_ptr<TxnKv> txn_kv) : txn_kv_(std::move(txn_kv)) { |
67 | 5 | ip_port_ = std::string(butil::my_ip_cstr()) + ":" + std::to_string(config::brpc_listen_port); |
68 | 5 | } |
69 | | |
70 | 5 | Checker::~Checker() { |
71 | 5 | if (!stopped()) { |
72 | 1 | stop(); |
73 | 1 | } |
74 | 5 | } |
75 | | |
76 | 4 | int Checker::start() { |
77 | 4 | DCHECK(txn_kv_); |
78 | 4 | instance_filter_.reset(config::recycle_whitelist, config::recycle_blacklist); |
79 | | |
80 | | // launch instance scanner |
81 | 4 | auto scanner_func = [this]() { |
82 | 4 | std::this_thread::sleep_for( |
83 | 4 | std::chrono::seconds(config::recycler_sleep_before_scheduling_seconds)); |
84 | 7 | while (!stopped()) { |
85 | 3 | std::vector<InstanceInfoPB> instances; |
86 | 3 | get_all_instances(txn_kv_.get(), instances); |
87 | 3 | LOG(INFO) << "Checker get instances: " << [&instances] { |
88 | 3 | std::stringstream ss; |
89 | 30 | for (auto& i : instances) ss << ' ' << i.instance_id(); |
90 | 3 | return ss.str(); |
91 | 3 | }(); |
92 | 3 | if (!instances.empty()) { |
93 | | // enqueue instances |
94 | 3 | std::lock_guard lock(mtx_); |
95 | 30 | for (auto& instance : instances) { |
96 | 30 | if (instance_filter_.filter_out(instance.instance_id())) continue; |
97 | 30 | if (instance.status() == InstanceInfoPB::DELETED) continue; |
98 | 30 | using namespace std::chrono; |
99 | 30 | auto enqueue_time_s = |
100 | 30 | duration_cast<seconds>(system_clock::now().time_since_epoch()).count(); |
101 | 30 | auto [_, success] = |
102 | 30 | pending_instance_map_.insert({instance.instance_id(), enqueue_time_s}); |
103 | | // skip instance already in pending queue |
104 | 30 | if (success) { |
105 | 30 | pending_instance_queue_.push_back(std::move(instance)); |
106 | 30 | } |
107 | 30 | } |
108 | 3 | pending_instance_cond_.notify_all(); |
109 | 3 | } |
110 | 3 | { |
111 | 3 | std::unique_lock lock(mtx_); |
112 | 3 | notifier_.wait_for(lock, |
113 | 3 | std::chrono::seconds(config::scan_instances_interval_seconds), |
114 | 6 | [&]() { return stopped(); }); |
115 | 3 | } |
116 | 3 | } |
117 | 4 | }; |
118 | 4 | workers_.emplace_back(scanner_func); |
119 | | // Launch lease thread |
120 | 4 | workers_.emplace_back([this] { lease_check_jobs(); }); |
121 | | // Launch inspect thread |
122 | 4 | workers_.emplace_back([this] { inspect_instance_check_interval(); }); |
123 | | |
124 | | // launch check workers |
125 | 8 | auto checker_func = [this]() { |
126 | 36 | while (!stopped()) { |
127 | | // fetch instance to check |
128 | 34 | InstanceInfoPB instance; |
129 | 34 | long enqueue_time_s = 0; |
130 | 34 | { |
131 | 34 | std::unique_lock lock(mtx_); |
132 | 48 | pending_instance_cond_.wait(lock, [&]() -> bool { |
133 | 48 | return !pending_instance_queue_.empty() || stopped(); |
134 | 48 | }); |
135 | 34 | if (stopped()) { |
136 | 6 | return; |
137 | 6 | } |
138 | 28 | instance = std::move(pending_instance_queue_.front()); |
139 | 28 | pending_instance_queue_.pop_front(); |
140 | 28 | enqueue_time_s = pending_instance_map_[instance.instance_id()]; |
141 | 28 | pending_instance_map_.erase(instance.instance_id()); |
142 | 28 | } |
143 | 0 | const auto& instance_id = instance.instance_id(); |
144 | 28 | { |
145 | 28 | std::lock_guard lock(mtx_); |
146 | | // skip instance in recycling |
147 | 28 | if (working_instance_map_.count(instance_id)) continue; |
148 | 28 | } |
149 | 28 | auto checker = std::make_shared<InstanceChecker>(txn_kv_, instance.instance_id()); |
150 | 28 | if (checker->init(instance) != 0) { |
151 | 0 | LOG(WARNING) << "failed to init instance checker, instance_id=" |
152 | 0 | << instance.instance_id(); |
153 | 0 | continue; |
154 | 0 | } |
155 | 28 | std::string check_job_key; |
156 | 28 | job_check_key({instance.instance_id()}, &check_job_key); |
157 | 28 | int ret = prepare_instance_recycle_job(txn_kv_.get(), check_job_key, |
158 | 28 | instance.instance_id(), ip_port_, |
159 | 28 | config::check_object_interval_seconds * 1000); |
160 | 28 | if (ret != 0) { // Prepare failed |
161 | 20 | continue; |
162 | 20 | } else { |
163 | 8 | std::lock_guard lock(mtx_); |
164 | 8 | working_instance_map_.emplace(instance_id, checker); |
165 | 8 | } |
166 | 8 | if (stopped()) return; |
167 | 8 | using namespace std::chrono; |
168 | 8 | auto ctime_ms = |
169 | 8 | duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count(); |
170 | 8 | g_bvar_checker_enqueue_cost_s.put(instance_id, ctime_ms / 1000 - enqueue_time_s); |
171 | | |
172 | 8 | bool success {true}; |
173 | | |
174 | 8 | if (int ret = checker->do_check(); ret != 0) { |
175 | 0 | success = false; |
176 | 0 | } |
177 | | |
178 | 8 | if (config::enable_inverted_check) { |
179 | 0 | if (int ret = checker->do_inverted_check(); ret != 0) { |
180 | 0 | success = false; |
181 | 0 | } |
182 | 0 | } |
183 | | |
184 | 8 | if (config::enable_delete_bitmap_inverted_check) { |
185 | 0 | if (int ret = checker->do_delete_bitmap_inverted_check(); ret != 0) { |
186 | 0 | success = false; |
187 | 0 | } |
188 | 0 | } |
189 | | |
190 | 8 | if (config::enable_delete_bitmap_storage_optimize_check) { |
191 | 0 | if (int ret = checker->do_delete_bitmap_storage_optimize_check(); ret != 0) { |
192 | 0 | success = false; |
193 | 0 | } |
194 | 0 | } |
195 | | |
196 | 8 | if (config::enable_mow_compaction_key_check) { |
197 | 0 | if (int ret = checker->do_mow_compaction_key_check(); ret != 0) { |
198 | 0 | success = false; |
199 | 0 | } |
200 | 0 | } |
201 | | |
202 | | // If instance checker has been aborted, don't finish this job |
203 | 10 | if (!checker->stopped()) { |
204 | 10 | finish_instance_recycle_job(txn_kv_.get(), check_job_key, instance.instance_id(), |
205 | 10 | ip_port_, success, ctime_ms); |
206 | 10 | } |
207 | 8 | { |
208 | 8 | std::lock_guard lock(mtx_); |
209 | 8 | working_instance_map_.erase(instance.instance_id()); |
210 | 8 | } |
211 | 8 | } |
212 | 8 | }; |
213 | 4 | int num_threads = config::recycle_concurrency; // FIXME: use a new config entry? |
214 | 12 | for (int i = 0; i < num_threads; ++i) { |
215 | 8 | workers_.emplace_back(checker_func); |
216 | 8 | } |
217 | 4 | return 0; |
218 | 4 | } |
219 | | |
220 | 5 | void Checker::stop() { |
221 | 5 | stopped_ = true; |
222 | 5 | notifier_.notify_all(); |
223 | 5 | pending_instance_cond_.notify_all(); |
224 | 5 | { |
225 | 5 | std::lock_guard lock(mtx_); |
226 | 5 | for (auto& [_, checker] : working_instance_map_) { |
227 | 0 | checker->stop(); |
228 | 0 | } |
229 | 5 | } |
230 | 20 | for (auto& w : workers_) { |
231 | 20 | if (w.joinable()) w.join(); |
232 | 20 | } |
233 | 5 | } |
234 | | |
235 | 4 | void Checker::lease_check_jobs() { |
236 | 54 | while (!stopped()) { |
237 | 50 | std::vector<std::string> instances; |
238 | 50 | instances.reserve(working_instance_map_.size()); |
239 | 50 | { |
240 | 50 | std::lock_guard lock(mtx_); |
241 | 50 | for (auto& [id, _] : working_instance_map_) { |
242 | 30 | instances.push_back(id); |
243 | 30 | } |
244 | 50 | } |
245 | 50 | for (auto& i : instances) { |
246 | 30 | std::string check_job_key; |
247 | 30 | job_check_key({i}, &check_job_key); |
248 | 30 | int ret = lease_instance_recycle_job(txn_kv_.get(), check_job_key, i, ip_port_); |
249 | 30 | if (ret == 1) { |
250 | 0 | std::lock_guard lock(mtx_); |
251 | 0 | if (auto it = working_instance_map_.find(i); it != working_instance_map_.end()) { |
252 | 0 | it->second->stop(); |
253 | 0 | } |
254 | 0 | } |
255 | 30 | } |
256 | 50 | { |
257 | 50 | std::unique_lock lock(mtx_); |
258 | 50 | notifier_.wait_for(lock, |
259 | 50 | std::chrono::milliseconds(config::recycle_job_lease_expired_ms / 3), |
260 | 100 | [&]() { return stopped(); }); |
261 | 50 | } |
262 | 50 | } |
263 | 4 | } |
264 | | |
265 | 0 | #define LOG_CHECK_INTERVAL_ALARM LOG(WARNING) << "Err for check interval: " |
266 | 34 | void Checker::do_inspect(const InstanceInfoPB& instance) { |
267 | 34 | std::string check_job_key = job_check_key({instance.instance_id()}); |
268 | 34 | std::unique_ptr<Transaction> txn; |
269 | 34 | std::string val; |
270 | 34 | TxnErrorCode err = txn_kv_->create_txn(&txn); |
271 | 34 | if (err != TxnErrorCode::TXN_OK) { |
272 | 0 | LOG_CHECK_INTERVAL_ALARM << "failed to create txn"; |
273 | 0 | return; |
274 | 0 | } |
275 | 34 | err = txn->get(check_job_key, &val); |
276 | 34 | if (err != TxnErrorCode::TXN_OK && err != TxnErrorCode::TXN_KEY_NOT_FOUND) { |
277 | 0 | LOG_CHECK_INTERVAL_ALARM << "failed to get kv, err=" << err |
278 | 0 | << " key=" << hex(check_job_key); |
279 | 0 | return; |
280 | 0 | } |
281 | 34 | auto checker = InstanceChecker(txn_kv_, instance.instance_id()); |
282 | 34 | if (checker.init(instance) != 0) { |
283 | 0 | LOG_CHECK_INTERVAL_ALARM << "failed to init instance checker, instance_id=" |
284 | 0 | << instance.instance_id(); |
285 | 0 | return; |
286 | 0 | } |
287 | | |
288 | 34 | int64_t bucket_lifecycle_days = 0; |
289 | 34 | if (checker.get_bucket_lifecycle(&bucket_lifecycle_days) != 0) { |
290 | 0 | LOG_CHECK_INTERVAL_ALARM << "failed to get bucket lifecycle, instance_id=" |
291 | 0 | << instance.instance_id(); |
292 | 0 | return; |
293 | 0 | } |
294 | 34 | DCHECK(bucket_lifecycle_days > 0); |
295 | | |
296 | 34 | if (bucket_lifecycle_days == INT64_MAX) { |
297 | | // No s3 bucket (may all accessors are HdfsAccessor), skip inspect |
298 | 34 | return; |
299 | 34 | } |
300 | | |
301 | 0 | int64_t last_ctime_ms = -1; |
302 | 0 | auto job_status = JobRecyclePB::IDLE; |
303 | 0 | auto has_last_ctime = [&]() { |
304 | 0 | JobRecyclePB job_info; |
305 | 0 | if (!job_info.ParseFromString(val)) { |
306 | 0 | LOG_CHECK_INTERVAL_ALARM << "failed to parse JobRecyclePB, key=" << hex(check_job_key); |
307 | 0 | } |
308 | 0 | DCHECK(job_info.instance_id() == instance.instance_id()); |
309 | 0 | if (!job_info.has_last_ctime_ms()) return false; |
310 | 0 | last_ctime_ms = job_info.last_ctime_ms(); |
311 | 0 | job_status = job_info.status(); |
312 | 0 | g_bvar_checker_last_success_time_ms.put(instance.instance_id(), |
313 | 0 | job_info.last_success_time_ms()); |
314 | 0 | return true; |
315 | 0 | }; |
316 | 0 | using namespace std::chrono; |
317 | 0 | auto now = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count(); |
318 | 0 | if (err == TxnErrorCode::TXN_KEY_NOT_FOUND || !has_last_ctime()) { |
319 | | // Use instance's ctime for instances that do not have job's last ctime |
320 | 0 | last_ctime_ms = instance.ctime(); |
321 | 0 | } |
322 | 0 | DCHECK(now - last_ctime_ms >= 0); |
323 | 0 | int64_t expiration_ms = |
324 | 0 | bucket_lifecycle_days > config::reserved_buffer_days |
325 | 0 | ? (bucket_lifecycle_days - config::reserved_buffer_days) * 86400000 |
326 | 0 | : bucket_lifecycle_days * 86400000; |
327 | 0 | TEST_SYNC_POINT_CALLBACK("Checker:do_inspect", &last_ctime_ms); |
328 | 0 | if (now - last_ctime_ms >= expiration_ms) { |
329 | 0 | LOG_CHECK_INTERVAL_ALARM << "check risks, instance_id: " << instance.instance_id() |
330 | 0 | << " last_ctime_ms: " << last_ctime_ms |
331 | 0 | << " job_status: " << job_status |
332 | 0 | << " bucket_lifecycle_days: " << bucket_lifecycle_days |
333 | 0 | << " reserved_buffer_days: " << config::reserved_buffer_days |
334 | 0 | << " expiration_ms: " << expiration_ms; |
335 | 0 | } |
336 | 0 | } |
337 | | #undef LOG_CHECK_INTERVAL_ALARM |
338 | 4 | void Checker::inspect_instance_check_interval() { |
339 | 7 | while (!stopped()) { |
340 | 3 | LOG(INFO) << "start to inspect instance check interval"; |
341 | 3 | std::vector<InstanceInfoPB> instances; |
342 | 3 | get_all_instances(txn_kv_.get(), instances); |
343 | 30 | for (const auto& instance : instances) { |
344 | 30 | if (instance_filter_.filter_out(instance.instance_id())) continue; |
345 | 30 | if (stopped()) return; |
346 | 30 | if (instance.status() == InstanceInfoPB::DELETED) continue; |
347 | 30 | do_inspect(instance); |
348 | 30 | } |
349 | 3 | { |
350 | 3 | std::unique_lock lock(mtx_); |
351 | 3 | notifier_.wait_for(lock, std::chrono::seconds(config::scan_instances_interval_seconds), |
352 | 6 | [&]() { return stopped(); }); |
353 | 3 | } |
354 | 3 | } |
355 | 4 | } |
356 | | |
357 | | // return 0 for success get a key, 1 for key not found, negative for error |
358 | 11 | int key_exist(TxnKv* txn_kv, std::string_view key) { |
359 | 11 | std::unique_ptr<Transaction> txn; |
360 | 11 | TxnErrorCode err = txn_kv->create_txn(&txn); |
361 | 11 | if (err != TxnErrorCode::TXN_OK) { |
362 | 0 | LOG(WARNING) << "failed to init txn, err=" << err; |
363 | 0 | return -1; |
364 | 0 | } |
365 | 11 | std::string val; |
366 | 11 | switch (txn->get(key, &val)) { |
367 | 11 | case TxnErrorCode::TXN_OK: |
368 | 11 | return 0; |
369 | 0 | case TxnErrorCode::TXN_KEY_NOT_FOUND: |
370 | 0 | return 1; |
371 | 0 | default: |
372 | 0 | return -1; |
373 | 11 | } |
374 | 11 | } |
375 | | |
376 | | InstanceChecker::InstanceChecker(std::shared_ptr<TxnKv> txn_kv, const std::string& instance_id) |
377 | 72 | : txn_kv_(std::move(txn_kv)), instance_id_(instance_id) {} |
378 | | |
379 | 72 | int InstanceChecker::init(const InstanceInfoPB& instance) { |
380 | 72 | int ret = init_obj_store_accessors(instance); |
381 | 72 | if (ret != 0) { |
382 | 0 | return ret; |
383 | 0 | } |
384 | | |
385 | 72 | return init_storage_vault_accessors(instance); |
386 | 72 | } |
387 | | |
388 | 72 | int InstanceChecker::init_obj_store_accessors(const InstanceInfoPB& instance) { |
389 | 72 | for (const auto& obj_info : instance.obj_info()) { |
390 | 72 | #ifdef UNIT_TEST |
391 | 72 | auto accessor = std::make_shared<MockAccessor>(); |
392 | | #else |
393 | | auto s3_conf = S3Conf::from_obj_store_info(obj_info); |
394 | | if (!s3_conf) { |
395 | | LOG(WARNING) << "failed to init object accessor, instance_id=" << instance_id_; |
396 | | return -1; |
397 | | } |
398 | | |
399 | | std::shared_ptr<S3Accessor> accessor; |
400 | | int ret = S3Accessor::create(std::move(*s3_conf), &accessor); |
401 | | if (ret != 0) { |
402 | | LOG(WARNING) << "failed to init object accessor. instance_id=" << instance_id_ |
403 | | << " resource_id=" << obj_info.id(); |
404 | | return ret; |
405 | | } |
406 | | #endif |
407 | | |
408 | 72 | accessor_map_.emplace(obj_info.id(), std::move(accessor)); |
409 | 72 | } |
410 | | |
411 | 72 | return 0; |
412 | 72 | } |
413 | | |
414 | 72 | int InstanceChecker::init_storage_vault_accessors(const InstanceInfoPB& instance) { |
415 | 72 | if (instance.resource_ids().empty()) { |
416 | 72 | return 0; |
417 | 72 | } |
418 | | |
419 | 0 | FullRangeGetIteratorOptions opts(txn_kv_); |
420 | 0 | opts.prefetch = true; |
421 | 0 | auto it = txn_kv_->full_range_get(storage_vault_key({instance_id_, ""}), |
422 | 0 | storage_vault_key({instance_id_, "\xff"}), std::move(opts)); |
423 | |
|
424 | 0 | for (auto kv = it->next(); kv.has_value(); kv = it->next()) { |
425 | 0 | auto [k, v] = *kv; |
426 | 0 | StorageVaultPB vault; |
427 | 0 | if (!vault.ParseFromArray(v.data(), v.size())) { |
428 | 0 | LOG(WARNING) << "malformed storage vault, unable to deserialize key=" << hex(k); |
429 | 0 | return -1; |
430 | 0 | } |
431 | | |
432 | 0 | if (vault.has_hdfs_info()) { |
433 | 0 | auto accessor = std::make_shared<HdfsAccessor>(vault.hdfs_info()); |
434 | 0 | int ret = accessor->init(); |
435 | 0 | if (ret != 0) { |
436 | 0 | LOG(WARNING) << "failed to init hdfs accessor. instance_id=" << instance_id_ |
437 | 0 | << " resource_id=" << vault.id() << " name=" << vault.name(); |
438 | 0 | return ret; |
439 | 0 | } |
440 | | |
441 | 0 | accessor_map_.emplace(vault.id(), std::move(accessor)); |
442 | 0 | } else if (vault.has_obj_info()) { |
443 | 0 | #ifdef UNIT_TEST |
444 | 0 | auto accessor = std::make_shared<MockAccessor>(); |
445 | | #else |
446 | | auto s3_conf = S3Conf::from_obj_store_info(vault.obj_info()); |
447 | | if (!s3_conf) { |
448 | | LOG(WARNING) << "failed to init object accessor, instance_id=" << instance_id_; |
449 | | return -1; |
450 | | } |
451 | | |
452 | | std::shared_ptr<S3Accessor> accessor; |
453 | | int ret = S3Accessor::create(std::move(*s3_conf), &accessor); |
454 | | if (ret != 0) { |
455 | | LOG(WARNING) << "failed to init s3 accessor. instance_id=" << instance_id_ |
456 | | << " resource_id=" << vault.id() << " name=" << vault.name(); |
457 | | return ret; |
458 | | } |
459 | | #endif |
460 | |
|
461 | 0 | accessor_map_.emplace(vault.id(), std::move(accessor)); |
462 | 0 | } |
463 | 0 | } |
464 | | |
465 | 0 | if (!it->is_valid()) { |
466 | 0 | LOG_WARNING("failed to get storage vault kv"); |
467 | 0 | return -1; |
468 | 0 | } |
469 | 0 | return 0; |
470 | 0 | } |
471 | | |
472 | 12 | int InstanceChecker::do_check() { |
473 | 12 | TEST_SYNC_POINT("InstanceChecker.do_check"); |
474 | 12 | LOG(INFO) << "begin to check instance objects instance_id=" << instance_id_; |
475 | 12 | int check_ret = 0; |
476 | 12 | long num_scanned = 0; |
477 | 12 | long num_scanned_with_segment = 0; |
478 | 12 | long num_rowset_loss = 0; |
479 | 12 | long instance_volume = 0; |
480 | 12 | using namespace std::chrono; |
481 | 12 | auto start_time = steady_clock::now(); |
482 | 12 | std::unique_ptr<int, std::function<void(int*)>> defer_log_statistics((int*)0x01, [&](int*) { |
483 | 12 | auto cost = duration<float>(steady_clock::now() - start_time).count(); |
484 | 12 | LOG(INFO) << "check instance objects finished, cost=" << cost |
485 | 12 | << "s. instance_id=" << instance_id_ << " num_scanned=" << num_scanned |
486 | 12 | << " num_scanned_with_segment=" << num_scanned_with_segment |
487 | 12 | << " num_rowset_loss=" << num_rowset_loss |
488 | 12 | << " instance_volume=" << instance_volume; |
489 | 12 | g_bvar_checker_num_scanned.put(instance_id_, num_scanned); |
490 | 12 | g_bvar_checker_num_scanned_with_segment.put(instance_id_, num_scanned_with_segment); |
491 | 12 | g_bvar_checker_num_check_failed.put(instance_id_, num_rowset_loss); |
492 | 12 | g_bvar_checker_check_cost_s.put(instance_id_, static_cast<long>(cost)); |
493 | | // FIXME(plat1ko): What if some list operation failed? |
494 | 12 | g_bvar_checker_instance_volume.put(instance_id_, instance_volume); |
495 | 12 | }); |
496 | | |
497 | 12 | struct TabletFiles { |
498 | 12 | int64_t tablet_id {0}; |
499 | 12 | std::unordered_set<std::string> files; |
500 | 12 | }; |
501 | 12 | TabletFiles tablet_files_cache; |
502 | | |
503 | 12 | auto check_rowset_objects = [&, this](const doris::RowsetMetaCloudPB& rs_meta, |
504 | 4.00k | std::string_view key) { |
505 | 4.00k | if (rs_meta.num_segments() == 0) { |
506 | 0 | return; |
507 | 0 | } |
508 | | |
509 | 4.00k | ++num_scanned_with_segment; |
510 | 4.00k | if (tablet_files_cache.tablet_id != rs_meta.tablet_id()) { |
511 | 400 | long tablet_volume = 0; |
512 | | // Clear cache |
513 | 400 | tablet_files_cache.tablet_id = 0; |
514 | 400 | tablet_files_cache.files.clear(); |
515 | | // Get all file paths under this tablet directory |
516 | 400 | auto find_it = accessor_map_.find(rs_meta.resource_id()); |
517 | 400 | if (find_it == accessor_map_.end()) { |
518 | 0 | LOG_WARNING("resource id not found in accessor map") |
519 | 0 | .tag("resource_id", rs_meta.resource_id()) |
520 | 0 | .tag("tablet_id", rs_meta.tablet_id()) |
521 | 0 | .tag("rowset_id", rs_meta.rowset_id_v2()); |
522 | 0 | check_ret = -1; |
523 | 0 | return; |
524 | 0 | } |
525 | | |
526 | 400 | std::unique_ptr<ListIterator> list_iter; |
527 | 400 | int ret = find_it->second->list_directory(tablet_path_prefix(rs_meta.tablet_id()), |
528 | 400 | &list_iter); |
529 | 400 | if (ret != 0) { // No need to log, because S3Accessor has logged this error |
530 | 0 | check_ret = -1; |
531 | 0 | return; |
532 | 0 | } |
533 | | |
534 | 18.3k | for (auto file = list_iter->next(); file.has_value(); file = list_iter->next()) { |
535 | 17.9k | tablet_files_cache.files.insert(std::move(file->path)); |
536 | 17.9k | tablet_volume += file->size; |
537 | 17.9k | } |
538 | 400 | tablet_files_cache.tablet_id = rs_meta.tablet_id(); |
539 | 400 | instance_volume += tablet_volume; |
540 | 400 | } |
541 | | |
542 | 4.00k | bool data_loss = false; |
543 | 16.0k | for (int i = 0; i < rs_meta.num_segments(); ++i) { |
544 | 12.0k | auto path = segment_path(rs_meta.tablet_id(), rs_meta.rowset_id_v2(), i); |
545 | 12.0k | if (tablet_files_cache.files.contains(path)) { |
546 | 11.9k | continue; |
547 | 11.9k | } |
548 | | |
549 | 11 | if (1 == key_exist(txn_kv_.get(), key)) { |
550 | | // Rowset has been deleted instead of data loss |
551 | 0 | break; |
552 | 0 | } |
553 | 11 | data_loss = true; |
554 | 11 | TEST_SYNC_POINT_CALLBACK("InstanceChecker.do_check1", &path); |
555 | 11 | LOG(WARNING) << "object not exist, path=" << path << " key=" << hex(key); |
556 | 11 | } |
557 | | |
558 | 4.00k | if (data_loss) { |
559 | 9 | ++num_rowset_loss; |
560 | 9 | } |
561 | 4.00k | }; |
562 | | |
563 | | // scan visible rowsets |
564 | 12 | auto start_key = meta_rowset_key({instance_id_, 0, 0}); |
565 | 12 | auto end_key = meta_rowset_key({instance_id_, INT64_MAX, 0}); |
566 | | |
567 | 12 | std::unique_ptr<RangeGetIterator> it; |
568 | 12 | do { |
569 | 12 | std::unique_ptr<Transaction> txn; |
570 | 12 | TxnErrorCode err = txn_kv_->create_txn(&txn); |
571 | 12 | if (err != TxnErrorCode::TXN_OK) { |
572 | 0 | LOG(WARNING) << "failed to init txn, err=" << err; |
573 | 0 | return -1; |
574 | 0 | } |
575 | | |
576 | 12 | err = txn->get(start_key, end_key, &it); |
577 | 12 | if (err != TxnErrorCode::TXN_OK) { |
578 | 0 | LOG(WARNING) << "internal error, failed to get rowset meta, err=" << err; |
579 | 0 | return -1; |
580 | 0 | } |
581 | 12 | num_scanned += it->size(); |
582 | | |
583 | 4.01k | while (it->has_next() && !stopped()) { |
584 | 4.00k | auto [k, v] = it->next(); |
585 | 4.00k | if (!it->has_next()) start_key = k; |
586 | | |
587 | 4.00k | doris::RowsetMetaCloudPB rs_meta; |
588 | 4.00k | if (!rs_meta.ParseFromArray(v.data(), v.size())) { |
589 | 0 | ++num_rowset_loss; |
590 | 0 | LOG(WARNING) << "malformed rowset meta. key=" << hex(k) << " val=" << hex(v); |
591 | 0 | continue; |
592 | 0 | } |
593 | 4.00k | check_rowset_objects(rs_meta, k); |
594 | 4.00k | } |
595 | 12 | start_key.push_back('\x00'); // Update to next smallest key for iteration |
596 | 12 | } while (it->more() && !stopped()); |
597 | | |
598 | 12 | return num_rowset_loss > 0 ? 1 : check_ret; |
599 | 12 | } |
600 | | |
601 | 34 | int InstanceChecker::get_bucket_lifecycle(int64_t* lifecycle_days) { |
602 | | // If there are multiple buckets, return the minimum lifecycle. |
603 | 34 | int64_t min_lifecycle_days = INT64_MAX; |
604 | 34 | int64_t tmp_liefcycle_days = 0; |
605 | 34 | for (const auto& [id, accessor] : accessor_map_) { |
606 | 34 | if (accessor->type() != AccessorType::S3) { |
607 | 34 | continue; |
608 | 34 | } |
609 | | |
610 | 0 | auto* s3_accessor = static_cast<S3Accessor*>(accessor.get()); |
611 | |
|
612 | 0 | if (s3_accessor->check_versioning() != 0) { |
613 | 0 | return -1; |
614 | 0 | } |
615 | | |
616 | 0 | if (s3_accessor->get_life_cycle(&tmp_liefcycle_days) != 0) { |
617 | 0 | return -1; |
618 | 0 | } |
619 | | |
620 | 0 | if (tmp_liefcycle_days < min_lifecycle_days) { |
621 | 0 | min_lifecycle_days = tmp_liefcycle_days; |
622 | 0 | } |
623 | 0 | } |
624 | 34 | *lifecycle_days = min_lifecycle_days; |
625 | 34 | return 0; |
626 | 34 | } |
627 | | |
628 | 1 | int InstanceChecker::do_inverted_check() { |
629 | 1 | if (accessor_map_.size() > 1) { |
630 | 0 | LOG(INFO) << "currently not support inverted check for multi accessor. instance_id=" |
631 | 0 | << instance_id_; |
632 | 0 | return 0; |
633 | 0 | } |
634 | | |
635 | 1 | LOG(INFO) << "begin to inverted check objects instance_id=" << instance_id_; |
636 | 1 | int check_ret = 0; |
637 | 1 | long num_scanned = 0; |
638 | 1 | long num_file_leak = 0; |
639 | 1 | using namespace std::chrono; |
640 | 1 | auto start_time = steady_clock::now(); |
641 | 1 | std::unique_ptr<int, std::function<void(int*)>> defer_log_statistics((int*)0x01, [&](int*) { |
642 | 1 | g_bvar_inverted_checker_num_scanned.put(instance_id_, num_scanned); |
643 | 1 | g_bvar_inverted_checker_num_check_failed.put(instance_id_, num_file_leak); |
644 | 1 | auto cost = duration<float>(steady_clock::now() - start_time).count(); |
645 | 1 | LOG(INFO) << "inverted check instance objects finished, cost=" << cost |
646 | 1 | << "s. instance_id=" << instance_id_ << " num_scanned=" << num_scanned |
647 | 1 | << " num_file_leak=" << num_file_leak; |
648 | 1 | }); |
649 | | |
650 | 1 | struct TabletRowsets { |
651 | 1 | int64_t tablet_id {0}; |
652 | 1 | std::unordered_set<std::string> rowset_ids; |
653 | 1 | }; |
654 | 1 | TabletRowsets tablet_rowsets_cache; |
655 | | |
656 | | // Return 0 if check success, return 1 if file is garbage data, negative if error occurred |
657 | 1 | auto check_segment_file = [&](const std::string& obj_key) { |
658 | 0 | std::vector<std::string> str; |
659 | 0 | butil::SplitString(obj_key, '/', &str); |
660 | | // data/{tablet_id}/{rowset_id}_{seg_num}.dat |
661 | 0 | if (str.size() < 3) { |
662 | 0 | return -1; |
663 | 0 | } |
664 | | |
665 | 0 | int64_t tablet_id = atol(str[1].c_str()); |
666 | 0 | if (tablet_id <= 0) { |
667 | 0 | LOG(WARNING) << "failed to parse tablet_id, key=" << obj_key; |
668 | 0 | return -1; |
669 | 0 | } |
670 | | |
671 | 0 | std::string rowset_id; |
672 | 0 | if (auto pos = str.back().find('_'); pos != std::string::npos) { |
673 | 0 | rowset_id = str.back().substr(0, pos); |
674 | 0 | } else { |
675 | 0 | LOG(WARNING) << "failed to parse rowset_id, key=" << obj_key; |
676 | 0 | return -1; |
677 | 0 | } |
678 | | |
679 | 0 | if (tablet_rowsets_cache.tablet_id == tablet_id) { |
680 | 0 | if (tablet_rowsets_cache.rowset_ids.contains(rowset_id)) { |
681 | 0 | return 0; |
682 | 0 | } else { |
683 | 0 | LOG(WARNING) << "rowset not exists, key=" << obj_key; |
684 | 0 | return -1; |
685 | 0 | } |
686 | 0 | } |
687 | | // Get all rowset id of this tablet |
688 | 0 | tablet_rowsets_cache.tablet_id = tablet_id; |
689 | 0 | tablet_rowsets_cache.rowset_ids.clear(); |
690 | 0 | std::unique_ptr<Transaction> txn; |
691 | 0 | TxnErrorCode err = txn_kv_->create_txn(&txn); |
692 | 0 | if (err != TxnErrorCode::TXN_OK) { |
693 | 0 | LOG(WARNING) << "failed to create txn"; |
694 | 0 | return -1; |
695 | 0 | } |
696 | 0 | std::unique_ptr<RangeGetIterator> it; |
697 | 0 | auto begin = meta_rowset_key({instance_id_, tablet_id, 0}); |
698 | 0 | auto end = meta_rowset_key({instance_id_, tablet_id, INT64_MAX}); |
699 | 0 | do { |
700 | 0 | TxnErrorCode err = txn->get(begin, end, &it); |
701 | 0 | if (err != TxnErrorCode::TXN_OK) { |
702 | 0 | LOG(WARNING) << "failed to get rowset kv, err=" << err; |
703 | 0 | return -1; |
704 | 0 | } |
705 | 0 | if (!it->has_next()) { |
706 | 0 | break; |
707 | 0 | } |
708 | 0 | while (it->has_next()) { |
709 | | // recycle corresponding resources |
710 | 0 | auto [k, v] = it->next(); |
711 | 0 | doris::RowsetMetaCloudPB rowset; |
712 | 0 | if (!rowset.ParseFromArray(v.data(), v.size())) { |
713 | 0 | LOG(WARNING) << "malformed rowset meta value, key=" << hex(k); |
714 | 0 | return -1; |
715 | 0 | } |
716 | 0 | tablet_rowsets_cache.rowset_ids.insert(rowset.rowset_id_v2()); |
717 | 0 | if (!it->has_next()) { |
718 | 0 | begin = k; |
719 | 0 | begin.push_back('\x00'); // Update to next smallest key for iteration |
720 | 0 | break; |
721 | 0 | } |
722 | 0 | } |
723 | 0 | } while (it->more() && !stopped()); |
724 | | |
725 | 0 | if (!tablet_rowsets_cache.rowset_ids.contains(rowset_id)) { |
726 | | // Garbage data leak |
727 | 0 | LOG(WARNING) << "rowset should be recycled, key=" << obj_key; |
728 | 0 | return 1; |
729 | 0 | } |
730 | | |
731 | 0 | return 0; |
732 | 0 | }; |
733 | | |
734 | | // TODO(Xiaocc): Currently we haven't implemented one generator-like s3 accessor list function |
735 | | // so we choose to skip here. |
736 | 1 | TEST_SYNC_POINT_RETURN_WITH_VALUE("InstanceChecker::do_inverted_check", (int)0); |
737 | |
|
738 | 0 | for (auto& [_, accessor] : accessor_map_) { |
739 | 0 | std::unique_ptr<ListIterator> list_iter; |
740 | 0 | int ret = accessor->list_directory("data", &list_iter); |
741 | 0 | if (ret != 0) { |
742 | 0 | return -1; |
743 | 0 | } |
744 | | |
745 | 0 | for (auto file = list_iter->next(); file.has_value(); file = list_iter->next()) { |
746 | 0 | ++num_scanned; |
747 | 0 | int ret = check_segment_file(file->path); |
748 | 0 | if (ret != 0) { |
749 | 0 | LOG(WARNING) << "failed to check segment file, uri=" << accessor->uri() |
750 | 0 | << " path=" << file->path; |
751 | 0 | if (ret == 1) { |
752 | 0 | ++num_file_leak; |
753 | 0 | } else { |
754 | 0 | check_ret = -1; |
755 | 0 | } |
756 | 0 | } |
757 | 0 | } |
758 | |
|
759 | 0 | if (!list_iter->is_valid()) { |
760 | 0 | LOG(WARNING) << "failed to list data directory. uri=" << accessor->uri(); |
761 | 0 | return -1; |
762 | 0 | } |
763 | 0 | } |
764 | 0 | return num_file_leak > 0 ? 1 : check_ret; |
765 | 0 | } |
766 | | |
767 | 2 | int InstanceChecker::traverse_mow_tablet(const std::function<int(int64_t)>& check_func) { |
768 | 2 | std::unique_ptr<RangeGetIterator> it; |
769 | 2 | auto begin = meta_rowset_key({instance_id_, 0, 0}); |
770 | 2 | auto end = meta_rowset_key({instance_id_, std::numeric_limits<int64_t>::max(), 0}); |
771 | 37 | do { |
772 | 37 | std::unique_ptr<Transaction> txn; |
773 | 37 | TxnErrorCode err = txn_kv_->create_txn(&txn); |
774 | 37 | if (err != TxnErrorCode::TXN_OK) { |
775 | 0 | LOG(WARNING) << "failed to create txn"; |
776 | 0 | return -1; |
777 | 0 | } |
778 | 37 | err = txn->get(begin, end, &it, false, 1); |
779 | 37 | if (err != TxnErrorCode::TXN_OK) { |
780 | 0 | LOG(WARNING) << "failed to get rowset kv, err=" << err; |
781 | 0 | return -1; |
782 | 0 | } |
783 | 37 | if (!it->has_next()) { |
784 | 2 | break; |
785 | 2 | } |
786 | 70 | while (it->has_next() && !stopped()) { |
787 | 35 | auto [k, v] = it->next(); |
788 | 35 | std::string_view k1 = k; |
789 | 35 | k1.remove_prefix(1); |
790 | 35 | std::vector<std::tuple<std::variant<int64_t, std::string>, int, int>> out; |
791 | 35 | decode_key(&k1, &out); |
792 | | // 0x01 "meta" ${instance_id} "rowset" ${tablet_id} ${version} -> RowsetMetaCloudPB |
793 | 35 | auto tablet_id = std::get<int64_t>(std::get<0>(out[3])); |
794 | | |
795 | 35 | if (!it->has_next()) { |
796 | | // Update to next smallest key for iteration |
797 | | // scan for next tablet in this instance |
798 | 35 | begin = meta_rowset_key({instance_id_, tablet_id + 1, 0}); |
799 | 35 | } |
800 | | |
801 | 35 | TabletMetaCloudPB tablet_meta; |
802 | 35 | int ret = get_tablet_meta(txn_kv_.get(), instance_id_, tablet_id, tablet_meta); |
803 | 35 | if (ret < 0) { |
804 | 0 | LOG(WARNING) << fmt::format( |
805 | 0 | "failed to get_tablet_meta in do_delete_bitmap_integrity_check(), " |
806 | 0 | "instance_id={}, tablet_id={}", |
807 | 0 | instance_id_, tablet_id); |
808 | 0 | return ret; |
809 | 0 | } |
810 | | |
811 | 35 | if (tablet_meta.enable_unique_key_merge_on_write()) { |
812 | | // only check merge-on-write table |
813 | 25 | int ret = check_func(tablet_id); |
814 | 25 | if (ret < 0) { |
815 | | // return immediately when encounter unexpected error, |
816 | | // otherwise, we continue to check the next tablet |
817 | 0 | return ret; |
818 | 0 | } |
819 | 25 | } |
820 | 35 | } |
821 | 35 | } while (it->more() && !stopped()); |
822 | 2 | return 0; |
823 | 2 | } |
824 | | |
825 | | int InstanceChecker::traverse_rowset_delete_bitmaps( |
826 | | int64_t tablet_id, std::string rowset_id, |
827 | 60 | const std::function<int(int64_t, std::string_view, int64_t, int64_t)>& callback) { |
828 | 60 | std::unique_ptr<RangeGetIterator> it; |
829 | 60 | auto begin = meta_delete_bitmap_key({instance_id_, tablet_id, rowset_id, 0, 0}); |
830 | 60 | auto end = meta_delete_bitmap_key({instance_id_, tablet_id, rowset_id, |
831 | 60 | std::numeric_limits<int64_t>::max(), |
832 | 60 | std::numeric_limits<int64_t>::max()}); |
833 | 60 | do { |
834 | 60 | std::unique_ptr<Transaction> txn; |
835 | 60 | TxnErrorCode err = txn_kv_->create_txn(&txn); |
836 | 60 | if (err != TxnErrorCode::TXN_OK) { |
837 | 0 | LOG(WARNING) << "failed to create txn"; |
838 | 0 | return -1; |
839 | 0 | } |
840 | 60 | err = txn->get(begin, end, &it); |
841 | 60 | if (err != TxnErrorCode::TXN_OK) { |
842 | 0 | LOG(WARNING) << "failed to get rowset kv, err=" << err; |
843 | 0 | return -1; |
844 | 0 | } |
845 | 60 | if (!it->has_next()) { |
846 | 0 | break; |
847 | 0 | } |
848 | 150 | while (it->has_next() && !stopped()) { |
849 | 150 | auto [k, v] = it->next(); |
850 | 150 | std::string_view k1 = k; |
851 | 150 | k1.remove_prefix(1); |
852 | 150 | std::vector<std::tuple<std::variant<int64_t, std::string>, int, int>> out; |
853 | 150 | decode_key(&k1, &out); |
854 | | // 0x01 "meta" ${instance_id} "delete_bitmap" ${tablet_id} ${rowset_id} ${version} ${segment_id} -> roaringbitmap |
855 | 150 | auto version = std::get<std::int64_t>(std::get<0>(out[5])); |
856 | 150 | auto segment_id = std::get<std::int64_t>(std::get<0>(out[6])); |
857 | | |
858 | 150 | int ret = callback(tablet_id, rowset_id, version, segment_id); |
859 | 150 | if (ret != 0) { |
860 | 10 | return ret; |
861 | 10 | } |
862 | | |
863 | 140 | if (!it->has_next()) { |
864 | 50 | begin = k; |
865 | 50 | begin.push_back('\x00'); // Update to next smallest key for iteration |
866 | 50 | break; |
867 | 50 | } |
868 | 140 | } |
869 | 60 | } while (it->more() && !stopped()); |
870 | | |
871 | 50 | return 0; |
872 | 60 | } |
873 | | |
874 | | int InstanceChecker::collect_tablet_rowsets( |
875 | 45 | int64_t tablet_id, const std::function<void(const doris::RowsetMetaCloudPB&)>& collect_cb) { |
876 | 45 | std::unique_ptr<Transaction> txn; |
877 | 45 | TxnErrorCode err = txn_kv_->create_txn(&txn); |
878 | 45 | if (err != TxnErrorCode::TXN_OK) { |
879 | 0 | LOG(WARNING) << "failed to create txn"; |
880 | 0 | return -1; |
881 | 0 | } |
882 | 45 | std::unique_ptr<RangeGetIterator> it; |
883 | 45 | auto begin = meta_rowset_key({instance_id_, tablet_id, 0}); |
884 | 45 | auto end = meta_rowset_key({instance_id_, tablet_id + 1, 0}); |
885 | | |
886 | 45 | int64_t rowsets_num {0}; |
887 | 45 | do { |
888 | 45 | TxnErrorCode err = txn->get(begin, end, &it); |
889 | 45 | if (err != TxnErrorCode::TXN_OK) { |
890 | 0 | LOG(WARNING) << "failed to get rowset kv, err=" << err; |
891 | 0 | return -1; |
892 | 0 | } |
893 | 45 | if (!it->has_next()) { |
894 | 0 | break; |
895 | 0 | } |
896 | 375 | while (it->has_next() && !stopped()) { |
897 | 375 | auto [k, v] = it->next(); |
898 | 375 | doris::RowsetMetaCloudPB rowset; |
899 | 375 | if (!rowset.ParseFromArray(v.data(), v.size())) { |
900 | 0 | LOG(WARNING) << "malformed rowset meta value, key=" << hex(k); |
901 | 0 | return -1; |
902 | 0 | } |
903 | | |
904 | 375 | ++rowsets_num; |
905 | 375 | collect_cb(rowset); |
906 | | |
907 | 375 | if (!it->has_next()) { |
908 | 45 | begin = k; |
909 | 45 | begin.push_back('\x00'); // Update to next smallest key for iteration |
910 | 45 | break; |
911 | 45 | } |
912 | 375 | } |
913 | 45 | } while (it->more() && !stopped()); |
914 | | |
915 | 45 | LOG(INFO) << fmt::format( |
916 | 45 | "[delete bitmap checker] successfully collect rowsets for instance_id={}, " |
917 | 45 | "tablet_id={}, rowsets_num={}", |
918 | 45 | instance_id_, tablet_id, rowsets_num); |
919 | 45 | return 0; |
920 | 45 | } |
921 | | |
922 | 2 | int InstanceChecker::do_delete_bitmap_inverted_check() { |
923 | 2 | LOG(INFO) << fmt::format( |
924 | 2 | "[delete bitmap checker] begin to do_delete_bitmap_inverted_check for instance_id={}", |
925 | 2 | instance_id_); |
926 | | |
927 | | // number of delete bitmap keys being scanned |
928 | 2 | int64_t total_delete_bitmap_keys {0}; |
929 | | // number of delete bitmaps which belongs to non mow tablet |
930 | 2 | int64_t abnormal_delete_bitmaps {0}; |
931 | | // number of delete bitmaps which doesn't have corresponding rowset in MS |
932 | 2 | int64_t leaked_delete_bitmaps {0}; |
933 | | |
934 | 2 | auto start_time = std::chrono::steady_clock::now(); |
935 | 2 | std::unique_ptr<int, std::function<void(int*)>> defer_log_statistics((int*)0x01, [&](int*) { |
936 | 2 | g_bvar_inverted_checker_leaked_delete_bitmaps.put(instance_id_, leaked_delete_bitmaps); |
937 | 2 | g_bvar_inverted_checker_abnormal_delete_bitmaps.put(instance_id_, abnormal_delete_bitmaps); |
938 | 2 | g_bvar_inverted_checker_delete_bitmaps_scanned.put(instance_id_, total_delete_bitmap_keys); |
939 | | |
940 | 2 | auto cost = std::chrono::duration_cast<std::chrono::milliseconds>( |
941 | 2 | std::chrono::steady_clock::now() - start_time) |
942 | 2 | .count(); |
943 | 2 | if (leaked_delete_bitmaps > 0 || abnormal_delete_bitmaps > 0) { |
944 | 1 | LOG(WARNING) << fmt::format( |
945 | 1 | "[delete bitmap check fails] delete bitmap inverted check for instance_id={}, " |
946 | 1 | "cost={} ms, total_delete_bitmap_keys={}, leaked_delete_bitmaps={}, " |
947 | 1 | "abnormal_delete_bitmaps={}", |
948 | 1 | instance_id_, cost, total_delete_bitmap_keys, leaked_delete_bitmaps, |
949 | 1 | abnormal_delete_bitmaps); |
950 | 1 | } else { |
951 | 1 | LOG(INFO) << fmt::format( |
952 | 1 | "[delete bitmap checker] delete bitmap inverted check for instance_id={}, " |
953 | 1 | "passed. cost={} ms, total_delete_bitmap_keys={}", |
954 | 1 | instance_id_, cost, total_delete_bitmap_keys); |
955 | 1 | } |
956 | 2 | }); |
957 | | |
958 | 2 | struct TabletsRowsetsCache { |
959 | 2 | int64_t tablet_id {-1}; |
960 | 2 | bool enable_merge_on_write {false}; |
961 | 2 | std::unordered_set<std::string> rowsets {}; |
962 | 2 | } tablet_rowsets_cache {}; |
963 | | |
964 | 2 | std::unique_ptr<RangeGetIterator> it; |
965 | 2 | auto begin = meta_delete_bitmap_key({instance_id_, 0, "", 0, 0}); |
966 | 2 | auto end = |
967 | 2 | meta_delete_bitmap_key({instance_id_, std::numeric_limits<int64_t>::max(), "", 0, 0}); |
968 | 2 | do { |
969 | 2 | std::unique_ptr<Transaction> txn; |
970 | 2 | TxnErrorCode err = txn_kv_->create_txn(&txn); |
971 | 2 | if (err != TxnErrorCode::TXN_OK) { |
972 | 0 | LOG(WARNING) << "failed to create txn"; |
973 | 0 | return -1; |
974 | 0 | } |
975 | 2 | err = txn->get(begin, end, &it); |
976 | 2 | if (err != TxnErrorCode::TXN_OK) { |
977 | 0 | LOG(WARNING) << "failed to get rowset kv, err=" << err; |
978 | 0 | return -1; |
979 | 0 | } |
980 | 2 | if (!it->has_next()) { |
981 | 0 | break; |
982 | 0 | } |
983 | 502 | while (it->has_next() && !stopped()) { |
984 | 500 | auto [k, v] = it->next(); |
985 | 500 | std::string_view k1 = k; |
986 | 500 | k1.remove_prefix(1); |
987 | 500 | std::vector<std::tuple<std::variant<int64_t, std::string>, int, int>> out; |
988 | 500 | decode_key(&k1, &out); |
989 | | // 0x01 "meta" ${instance_id} "delete_bitmap" ${tablet_id} ${rowset_id} ${version} ${segment_id} -> roaringbitmap |
990 | 500 | auto tablet_id = std::get<int64_t>(std::get<0>(out[3])); |
991 | 500 | auto rowset_id = std::get<std::string>(std::get<0>(out[4])); |
992 | 500 | auto version = std::get<std::int64_t>(std::get<0>(out[5])); |
993 | 500 | auto segment_id = std::get<std::int64_t>(std::get<0>(out[6])); |
994 | | |
995 | 500 | ++total_delete_bitmap_keys; |
996 | | |
997 | 500 | if (!it->has_next()) { |
998 | 2 | begin = k; |
999 | 2 | begin.push_back('\x00'); // Update to next smallest key for iteration |
1000 | 2 | } |
1001 | | |
1002 | 500 | if (tablet_rowsets_cache.tablet_id == -1 || |
1003 | 500 | tablet_rowsets_cache.tablet_id != tablet_id) { |
1004 | 30 | TabletMetaCloudPB tablet_meta; |
1005 | 30 | int ret = get_tablet_meta(txn_kv_.get(), instance_id_, tablet_id, tablet_meta); |
1006 | 30 | if (ret < 0) { |
1007 | 0 | LOG(WARNING) << fmt::format( |
1008 | 0 | "[delete bitmap checker] failed to get_tablet_meta in " |
1009 | 0 | "do_delete_bitmap_inverted_check(), instance_id={}, tablet_id={}", |
1010 | 0 | instance_id_, tablet_id); |
1011 | 0 | return ret; |
1012 | 0 | } |
1013 | | |
1014 | 30 | tablet_rowsets_cache.tablet_id = tablet_id; |
1015 | 30 | tablet_rowsets_cache.enable_merge_on_write = |
1016 | 30 | tablet_meta.enable_unique_key_merge_on_write(); |
1017 | 30 | tablet_rowsets_cache.rowsets.clear(); |
1018 | | |
1019 | 30 | if (tablet_rowsets_cache.enable_merge_on_write) { |
1020 | | // only collect rowsets for merge-on-write tablet |
1021 | 20 | auto collect_cb = |
1022 | 200 | [&tablet_rowsets_cache](const doris::RowsetMetaCloudPB& rowset) { |
1023 | 200 | tablet_rowsets_cache.rowsets.insert(rowset.rowset_id_v2()); |
1024 | 200 | }; |
1025 | 20 | ret = collect_tablet_rowsets(tablet_id, collect_cb); |
1026 | 20 | if (ret < 0) { |
1027 | 0 | return ret; |
1028 | 0 | } |
1029 | 20 | } |
1030 | 30 | } |
1031 | 500 | DCHECK_EQ(tablet_id, tablet_rowsets_cache.tablet_id); |
1032 | | |
1033 | 500 | if (!tablet_rowsets_cache.enable_merge_on_write) { |
1034 | | // clang-format off |
1035 | 40 | TEST_SYNC_POINT_CALLBACK( |
1036 | 40 | "InstanceChecker::do_delete_bitmap_inverted_check.get_abnormal_delete_bitmap", |
1037 | 40 | &tablet_id, &rowset_id, &version, &segment_id); |
1038 | | // clang-format on |
1039 | 40 | ++abnormal_delete_bitmaps; |
1040 | | // log an error and continue to check the next delete bitmap |
1041 | 40 | LOG(WARNING) << fmt::format( |
1042 | 40 | "[delete bitmap check fails] find a delete bitmap belongs to tablet " |
1043 | 40 | "which is not a merge-on-write table! instance_id={}, tablet_id={}, " |
1044 | 40 | "version={}, segment_id={}", |
1045 | 40 | instance_id_, tablet_id, version, segment_id); |
1046 | 40 | continue; |
1047 | 40 | } |
1048 | | |
1049 | 460 | if (!tablet_rowsets_cache.rowsets.contains(rowset_id)) { |
1050 | 170 | TEST_SYNC_POINT_CALLBACK( |
1051 | 170 | "InstanceChecker::do_delete_bitmap_inverted_check.get_leaked_delete_bitmap", |
1052 | 170 | &tablet_id, &rowset_id, &version, &segment_id); |
1053 | 170 | ++leaked_delete_bitmaps; |
1054 | | // log an error and continue to check the next delete bitmap |
1055 | 170 | LOG(WARNING) << fmt::format( |
1056 | 170 | "[delete bitmap check fails] can't find corresponding rowset for delete " |
1057 | 170 | "bitmap instance_id={}, tablet_id={}, rowset_id={}, version={}, " |
1058 | 170 | "segment_id={}", |
1059 | 170 | instance_id_, tablet_id, rowset_id, version, segment_id); |
1060 | 170 | } |
1061 | 460 | } |
1062 | 2 | } while (it->more() && !stopped()); |
1063 | | |
1064 | 2 | return (leaked_delete_bitmaps > 0 || abnormal_delete_bitmaps > 0) ? 1 : 0; |
1065 | 2 | } |
1066 | | |
1067 | 25 | int InstanceChecker::check_delete_bitmap_storage_optimize(int64_t tablet_id) { |
1068 | 25 | using Version = std::pair<int64_t, int64_t>; |
1069 | 25 | struct RowsetDigest { |
1070 | 25 | std::string rowset_id; |
1071 | 25 | Version version; |
1072 | 25 | doris::SegmentsOverlapPB segments_overlap; |
1073 | | |
1074 | 300 | bool operator<(const RowsetDigest& other) const { |
1075 | 300 | return version.first < other.version.first; |
1076 | 300 | } |
1077 | | |
1078 | 115 | bool produced_by_compaction() const { |
1079 | 115 | return (version.first < version.second) || |
1080 | 115 | ((version.first == version.second) && segments_overlap == NONOVERLAPPING); |
1081 | 115 | } |
1082 | 25 | }; |
1083 | | |
1084 | | // number of rowsets which may have problems |
1085 | 25 | int64_t abnormal_rowsets_num {0}; |
1086 | | |
1087 | 25 | std::vector<RowsetDigest> tablet_rowsets {}; |
1088 | | // Get all visible rowsets of this tablet |
1089 | 175 | auto collect_cb = [&tablet_rowsets](const doris::RowsetMetaCloudPB& rowset) { |
1090 | 175 | if (rowset.start_version() == 0 && rowset.end_version() == 1) { |
1091 | | // ignore dummy rowset [0-1] |
1092 | 0 | return; |
1093 | 0 | } |
1094 | 175 | tablet_rowsets.emplace_back( |
1095 | 175 | rowset.rowset_id_v2(), |
1096 | 175 | std::make_pair<int64_t, int64_t>(rowset.start_version(), rowset.end_version()), |
1097 | 175 | rowset.segments_overlap_pb()); |
1098 | 175 | }; |
1099 | 25 | if (int ret = collect_tablet_rowsets(tablet_id, collect_cb); ret != 0) { |
1100 | 0 | return ret; |
1101 | 0 | } |
1102 | | |
1103 | 25 | std::sort(tablet_rowsets.begin(), tablet_rowsets.end()); |
1104 | | |
1105 | | // find right-most rowset which is produced by compaction |
1106 | 25 | auto it = std::find_if( |
1107 | 25 | tablet_rowsets.crbegin(), tablet_rowsets.crend(), |
1108 | 115 | [](const RowsetDigest& rowset) { return rowset.produced_by_compaction(); }); |
1109 | 25 | if (it == tablet_rowsets.crend()) { |
1110 | 5 | LOG(INFO) << fmt::format( |
1111 | 5 | "[delete bitmap checker] skip to check delete bitmap storage optimize for " |
1112 | 5 | "tablet_id={} because it doesn't have compacted rowsets.", |
1113 | 5 | tablet_id); |
1114 | 5 | return 0; |
1115 | 5 | } |
1116 | | |
1117 | 20 | int64_t start_version = it->version.first; |
1118 | 20 | int64_t pre_min_version = it->version.second; |
1119 | | |
1120 | | // after BE sweeping stale rowsets, all rowsets in this tablet before |
1121 | | // should not have delete bitmaps with versions lower than `pre_min_version` |
1122 | 20 | if (config::delete_bitmap_storage_optimize_check_version_gap > 0) { |
1123 | 0 | pre_min_version -= config::delete_bitmap_storage_optimize_check_version_gap; |
1124 | 0 | if (pre_min_version <= 1) { |
1125 | 0 | LOG(INFO) << fmt::format( |
1126 | 0 | "[delete bitmap checker] skip to check delete bitmap storage optimize for " |
1127 | 0 | "tablet_id={} because pre_min_version is too small.", |
1128 | 0 | tablet_id); |
1129 | 0 | return 0; |
1130 | 0 | } |
1131 | 0 | } |
1132 | | |
1133 | 20 | auto check_func = [pre_min_version, instance_id = instance_id_]( |
1134 | 20 | int64_t tablet_id, std::string_view rowset_id, int64_t version, |
1135 | 150 | int64_t segment_id) -> int { |
1136 | 150 | if (version < pre_min_version) { |
1137 | 10 | LOG(WARNING) << fmt::format( |
1138 | 10 | "[delete bitmap check fails] delete bitmap storage optimize check fail for " |
1139 | 10 | "instance_id={}, tablet_id={}, rowset_id={}, found delete bitmap with " |
1140 | 10 | "version={} < pre_min_version={}", |
1141 | 10 | instance_id, tablet_id, rowset_id, version, pre_min_version); |
1142 | 10 | return 1; |
1143 | 10 | } |
1144 | 140 | return 0; |
1145 | 150 | }; |
1146 | | |
1147 | 135 | for (const auto& rowset : tablet_rowsets) { |
1148 | | // check for all rowsets before the max compacted rowset |
1149 | 135 | if (rowset.version.second < start_version) { |
1150 | 60 | auto rowset_id = rowset.rowset_id; |
1151 | 60 | int ret = traverse_rowset_delete_bitmaps(tablet_id, rowset_id, check_func); |
1152 | 60 | if (ret < 0) { |
1153 | 0 | return ret; |
1154 | 0 | } |
1155 | | |
1156 | 60 | if (ret != 0) { |
1157 | 10 | ++abnormal_rowsets_num; |
1158 | 10 | TEST_SYNC_POINT_CALLBACK( |
1159 | 10 | "InstanceChecker::check_delete_bitmap_storage_optimize.get_abnormal_rowset", |
1160 | 10 | &tablet_id, &rowset_id); |
1161 | 10 | } |
1162 | 60 | } |
1163 | 135 | } |
1164 | | |
1165 | 20 | LOG(INFO) << fmt::format( |
1166 | 20 | "[delete bitmap checker] finish check delete bitmap storage optimize for " |
1167 | 20 | "instance_id={}, tablet_id={}, rowsets_num={}, abnormal_rowsets_num={}, " |
1168 | 20 | "pre_min_version={}", |
1169 | 20 | instance_id_, tablet_id, tablet_rowsets.size(), abnormal_rowsets_num, pre_min_version); |
1170 | | |
1171 | 20 | return (abnormal_rowsets_num > 1 ? 1 : 0); |
1172 | 20 | } |
1173 | | |
1174 | 2 | int InstanceChecker::do_delete_bitmap_storage_optimize_check() { |
1175 | 2 | int64_t total_tablets_num {0}; |
1176 | 2 | int64_t failed_tablets_num {0}; |
1177 | | |
1178 | | // check that for every visible rowset, there exists at least delete one bitmap in MS |
1179 | 25 | int ret = traverse_mow_tablet([&](int64_t tablet_id) { |
1180 | 25 | ++total_tablets_num; |
1181 | 25 | int res = check_delete_bitmap_storage_optimize(tablet_id); |
1182 | 25 | failed_tablets_num += (res != 0); |
1183 | 25 | return res; |
1184 | 25 | }); |
1185 | | |
1186 | 2 | if (ret < 0) { |
1187 | 0 | return ret; |
1188 | 0 | } |
1189 | | |
1190 | 2 | LOG(INFO) << fmt::format( |
1191 | 2 | "[delete bitmap checker] check delete bitmap storage optimize for instance_id={}, " |
1192 | 2 | "total_tablets_num={}, failed_tablets_num={}", |
1193 | 2 | instance_id_, total_tablets_num, failed_tablets_num); |
1194 | | |
1195 | 2 | return (failed_tablets_num > 0) ? 1 : 0; |
1196 | 2 | } |
1197 | | |
1198 | 3 | int InstanceChecker::do_mow_compaction_key_check() { |
1199 | 3 | std::unique_ptr<RangeGetIterator> it; |
1200 | 3 | std::string begin = mow_tablet_compaction_key({instance_id_, 0, 0}); |
1201 | 3 | std::string end = mow_tablet_compaction_key({instance_id_, INT64_MAX, 0}); |
1202 | 3 | MowTabletCompactionPB mow_tablet_compaction; |
1203 | 3 | do { |
1204 | 3 | std::unique_ptr<Transaction> txn; |
1205 | 3 | TxnErrorCode err = txn_kv_->create_txn(&txn); |
1206 | 3 | if (err != TxnErrorCode::TXN_OK) { |
1207 | 0 | LOG(WARNING) << "failed to create txn"; |
1208 | 0 | return -1; |
1209 | 0 | } |
1210 | 3 | err = txn->get(begin, end, &it); |
1211 | 3 | if (err != TxnErrorCode::TXN_OK) { |
1212 | 0 | LOG(WARNING) << "failed to get mow tablet compaction key, err=" << err; |
1213 | 0 | return -1; |
1214 | 0 | } |
1215 | 3 | int64_t now = duration_cast<std::chrono::seconds>( |
1216 | 3 | std::chrono::system_clock::now().time_since_epoch()) |
1217 | 3 | .count(); |
1218 | 3 | while (it->has_next() && !stopped()) { |
1219 | 2 | auto [k, v] = it->next(); |
1220 | 2 | std::string_view k1 = k; |
1221 | 2 | k1.remove_prefix(1); |
1222 | 2 | std::vector<std::tuple<std::variant<int64_t, std::string>, int, int>> out; |
1223 | 2 | decode_key(&k1, &out); |
1224 | | // 0x01 "meta" ${instance_id} "mow_tablet_comp" ${table_id} ${initiator} |
1225 | 2 | auto table_id = std::get<int64_t>(std::get<0>(out[3])); |
1226 | 2 | auto initiator = std::get<int64_t>(std::get<0>(out[4])); |
1227 | 2 | if (!mow_tablet_compaction.ParseFromArray(v.data(), v.size())) [[unlikely]] { |
1228 | 0 | LOG(WARNING) << "failed to parse MowTabletCompactionPB"; |
1229 | 0 | return -1; |
1230 | 0 | } |
1231 | 2 | int64_t expiration = mow_tablet_compaction.expiration(); |
1232 | | //check compaction key failed should meet both following two condition: |
1233 | | //1.compaction key is expired |
1234 | | //2.table lock key is not found or key is not expired |
1235 | 2 | if (expiration < now - config::compaction_key_check_expiration_diff_seconds) { |
1236 | 2 | std::string lock_key = |
1237 | 2 | meta_delete_bitmap_update_lock_key({instance_id_, table_id, -1}); |
1238 | 2 | std::string lock_val; |
1239 | 2 | err = txn->get(lock_key, &lock_val); |
1240 | 2 | std::string reason = ""; |
1241 | 2 | if (err == TxnErrorCode::TXN_KEY_NOT_FOUND) { |
1242 | 0 | reason = "table lock key not found"; |
1243 | |
|
1244 | 2 | } else { |
1245 | 2 | DeleteBitmapUpdateLockPB lock_info; |
1246 | 2 | if (!lock_info.ParseFromString(lock_val)) [[unlikely]] { |
1247 | 0 | LOG(WARNING) << "failed to parse DeleteBitmapUpdateLockPB"; |
1248 | 0 | return -1; |
1249 | 0 | } |
1250 | 2 | if (lock_info.expiration() > now || lock_info.lock_id() != -1) { |
1251 | 2 | reason = "table lock is not expired,lock_id=" + |
1252 | 2 | std::to_string(lock_info.lock_id()); |
1253 | 2 | } |
1254 | 2 | } |
1255 | 2 | if (reason != "") { |
1256 | 2 | LOG(WARNING) << fmt::format( |
1257 | 2 | "[compaction key check fails] compaction key check fail for " |
1258 | 2 | "instance_id={}, table_id={}, initiator={}, expiration={}, now={}, " |
1259 | 2 | "reason={}", |
1260 | 2 | instance_id_, table_id, initiator, expiration, now, reason); |
1261 | 2 | return -1; |
1262 | 2 | } |
1263 | 2 | } |
1264 | 2 | } |
1265 | 1 | begin = it->next_begin_key(); // Update to next smallest key for iteration |
1266 | 1 | } while (it->more() && !stopped()); |
1267 | 1 | return 0; |
1268 | 3 | } |
1269 | | |
1270 | | } // namespace doris::cloud |