/root/doris/cloud/src/recycler/recycler.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #pragma once |
19 | | |
20 | | #include <gen_cpp/cloud.pb.h> |
21 | | #include <glog/logging.h> |
22 | | |
23 | | #include <atomic> |
24 | | #include <condition_variable> |
25 | | #include <cstdint> |
26 | | #include <deque> |
27 | | #include <functional> |
28 | | #include <memory> |
29 | | #include <string> |
30 | | #include <string_view> |
31 | | #include <thread> |
32 | | #include <utility> |
33 | | |
34 | | #include "common/bvars.h" |
35 | | #include "meta-service/txn_lazy_committer.h" |
36 | | #include "meta-store/versionstamp.h" |
37 | | #include "recycler/storage_vault_accessor.h" |
38 | | #include "recycler/white_black_list.h" |
39 | | #include "snapshot/snapshot_manager.h" |
40 | | |
41 | | namespace brpc { |
42 | | class Server; |
43 | | } // namespace brpc |
44 | | |
45 | | namespace doris::cloud { |
46 | | class TxnKv; |
47 | | class InstanceRecycler; |
48 | | class StorageVaultAccessor; |
49 | | class Checker; |
50 | | class SimpleThreadPool; |
51 | | class RecyclerMetricsContext; |
52 | | class TabletRecyclerMetricsContext; |
53 | | class SegmentRecyclerMetricsContext; |
54 | | struct RecyclerThreadPoolGroup { |
55 | 8 | RecyclerThreadPoolGroup() = default; |
56 | | RecyclerThreadPoolGroup(std::shared_ptr<SimpleThreadPool> s3_producer_pool, |
57 | | std::shared_ptr<SimpleThreadPool> recycle_tablet_pool, |
58 | | std::shared_ptr<SimpleThreadPool> group_recycle_function_pool) |
59 | | : s3_producer_pool(std::move(s3_producer_pool)), |
60 | | recycle_tablet_pool(std::move(recycle_tablet_pool)), |
61 | 10 | group_recycle_function_pool(std::move(group_recycle_function_pool)) {} |
62 | 216 | ~RecyclerThreadPoolGroup() = default; |
63 | 99 | RecyclerThreadPoolGroup(const RecyclerThreadPoolGroup&) = default; |
64 | | RecyclerThreadPoolGroup& operator=(RecyclerThreadPoolGroup& other) = default; |
65 | 10 | RecyclerThreadPoolGroup& operator=(RecyclerThreadPoolGroup&& other) = default; |
66 | 99 | RecyclerThreadPoolGroup(RecyclerThreadPoolGroup&&) = default; |
67 | | // used for accessor.delete_files, accessor.delete_directory |
68 | | std::shared_ptr<SimpleThreadPool> s3_producer_pool; |
69 | | // used for InstanceRecycler::recycle_tablet |
70 | | std::shared_ptr<SimpleThreadPool> recycle_tablet_pool; |
71 | | std::shared_ptr<SimpleThreadPool> group_recycle_function_pool; |
72 | | }; |
73 | | |
74 | | class Recycler { |
75 | | public: |
76 | | explicit Recycler(std::shared_ptr<TxnKv> txn_kv); |
77 | | ~Recycler(); |
78 | | |
79 | | // returns 0 for success otherwise error |
80 | | int start(brpc::Server* server); |
81 | | |
82 | | void stop(); |
83 | | |
84 | 294 | bool stopped() const { return stopped_.load(std::memory_order_acquire); } |
85 | | |
86 | | private: |
87 | | void recycle_callback(); |
88 | | |
89 | | void instance_scanner_callback(); |
90 | | |
91 | | void lease_recycle_jobs(); |
92 | | |
93 | | void check_recycle_tasks(); |
94 | | |
95 | | private: |
96 | | friend class RecyclerServiceImpl; |
97 | | |
98 | | std::shared_ptr<TxnKv> txn_kv_; |
99 | | std::atomic_bool stopped_ {false}; |
100 | | |
101 | | std::vector<std::thread> workers_; |
102 | | |
103 | | std::mutex mtx_; |
104 | | // notify recycle workers |
105 | | std::condition_variable pending_instance_cond_; |
106 | | std::deque<InstanceInfoPB> pending_instance_queue_; |
107 | | std::unordered_set<std::string> pending_instance_set_; |
108 | | std::unordered_map<std::string, std::shared_ptr<InstanceRecycler>> recycling_instance_map_; |
109 | | // notify instance scanner and lease thread |
110 | | std::condition_variable notifier_; |
111 | | |
112 | | std::string ip_port_; |
113 | | |
114 | | WhiteBlackList instance_filter_; |
115 | | std::unique_ptr<Checker> checker_; |
116 | | |
117 | | RecyclerThreadPoolGroup _thread_pool_group; |
118 | | |
119 | | std::shared_ptr<TxnLazyCommitter> txn_lazy_committer_; |
120 | | std::shared_ptr<SnapshotManager> snapshot_manager_; |
121 | | }; |
122 | | |
123 | | enum class RowsetRecyclingState { |
124 | | FORMAL_ROWSET, |
125 | | TMP_ROWSET, |
126 | | }; |
127 | | |
128 | | class RecyclerMetricsContext { |
129 | | public: |
130 | 2 | RecyclerMetricsContext() = default; |
131 | | |
132 | | RecyclerMetricsContext(std::string instance_id, std::string operation_type) |
133 | 372 | : operation_type(std::move(operation_type)), instance_id(std::move(instance_id)) { |
134 | 372 | start(); |
135 | 372 | } |
136 | | |
137 | 373 | ~RecyclerMetricsContext() = default; |
138 | | |
139 | | std::atomic_ullong total_need_recycle_data_size = 0; |
140 | | std::atomic_ullong total_need_recycle_num = 0; |
141 | | |
142 | | std::atomic_ullong total_recycled_data_size = 0; |
143 | | std::atomic_ullong total_recycled_num = 0; |
144 | | |
145 | | std::string operation_type; |
146 | | std::string instance_id; |
147 | | |
148 | | double start_time = 0; |
149 | | |
150 | 372 | void start() { |
151 | 372 | start_time = duration_cast<std::chrono::milliseconds>( |
152 | 372 | std::chrono::system_clock::now().time_since_epoch()) |
153 | 372 | .count(); |
154 | 372 | } |
155 | | |
156 | 192 | double duration() const { |
157 | 192 | return duration_cast<std::chrono::milliseconds>( |
158 | 192 | std::chrono::system_clock::now().time_since_epoch()) |
159 | 192 | .count() - |
160 | 192 | start_time; |
161 | 192 | } |
162 | | |
163 | 20 | void reset() { |
164 | 20 | total_need_recycle_data_size = 0; |
165 | 20 | total_need_recycle_num = 0; |
166 | 20 | total_recycled_data_size = 0; |
167 | 20 | total_recycled_num = 0; |
168 | 20 | start_time = duration_cast<std::chrono::milliseconds>( |
169 | 20 | std::chrono::system_clock::now().time_since_epoch()) |
170 | 20 | .count(); |
171 | 20 | } |
172 | | |
173 | 0 | void finish_report() { |
174 | 0 | if (!operation_type.empty()) { |
175 | 0 | double cost = duration(); |
176 | 0 | g_bvar_recycler_instance_last_round_recycle_elpased_ts.put( |
177 | 0 | {instance_id, operation_type}, cost); |
178 | 0 | g_bvar_recycler_instance_recycle_round.put({instance_id, operation_type}, 1); |
179 | 0 | LOG(INFO) << "recycle instance: " << instance_id |
180 | 0 | << ", operation type: " << operation_type << ", cost: " << cost |
181 | 0 | << " ms, total recycled num: " << total_recycled_num.load() |
182 | 0 | << ", total recycled data size: " << total_recycled_data_size.load() |
183 | 0 | << " bytes"; |
184 | 0 | if (cost != 0) { |
185 | 0 | if (total_recycled_num.load() != 0) { |
186 | 0 | g_bvar_recycler_instance_recycle_time_per_resource.put( |
187 | 0 | {instance_id, operation_type}, cost / total_recycled_num.load()); |
188 | 0 | } |
189 | 0 | g_bvar_recycler_instance_recycle_bytes_per_ms.put( |
190 | 0 | {instance_id, operation_type}, total_recycled_data_size.load() / cost); |
191 | 0 | } |
192 | 0 | } |
193 | 0 | } |
194 | | |
195 | | // `is_begin` is used to initialize total num of items need to be recycled |
196 | 24.5k | void report(bool is_begin = false) { |
197 | 24.5k | if (!operation_type.empty()) { |
198 | | // is init |
199 | 24.5k | if (is_begin) { |
200 | 0 | auto value = total_need_recycle_num.load(); |
201 | |
|
202 | 0 | g_bvar_recycler_instance_last_round_to_recycle_bytes.put( |
203 | 0 | {instance_id, operation_type}, total_need_recycle_data_size.load()); |
204 | 0 | g_bvar_recycler_instance_last_round_to_recycle_num.put( |
205 | 0 | {instance_id, operation_type}, value); |
206 | 24.5k | } else { |
207 | 24.5k | g_bvar_recycler_instance_last_round_recycled_bytes.put( |
208 | 24.5k | {instance_id, operation_type}, total_recycled_data_size.load()); |
209 | 24.5k | g_bvar_recycler_instance_recycle_total_bytes_since_started.put( |
210 | 24.5k | {instance_id, operation_type}, total_recycled_data_size.load()); |
211 | 24.5k | g_bvar_recycler_instance_last_round_recycled_num.put({instance_id, operation_type}, |
212 | 24.5k | total_recycled_num.load()); |
213 | 24.5k | g_bvar_recycler_instance_recycle_total_num_since_started.put( |
214 | 24.5k | {instance_id, operation_type}, total_recycled_num.load()); |
215 | 24.5k | } |
216 | 24.5k | } |
217 | 24.5k | } |
218 | | }; |
219 | | |
220 | | class TabletRecyclerMetricsContext : public RecyclerMetricsContext { |
221 | | public: |
222 | 99 | TabletRecyclerMetricsContext() : RecyclerMetricsContext("global_recycler", "recycle_tablet") {} |
223 | | }; |
224 | | |
225 | | class SegmentRecyclerMetricsContext : public RecyclerMetricsContext { |
226 | | public: |
227 | | SegmentRecyclerMetricsContext() |
228 | 99 | : RecyclerMetricsContext("global_recycler", "recycle_segment") {} |
229 | | }; |
230 | | |
231 | | class InstanceRecycler { |
232 | | public: |
233 | | explicit InstanceRecycler(std::shared_ptr<TxnKv> txn_kv, const InstanceInfoPB& instance, |
234 | | RecyclerThreadPoolGroup thread_pool_group, |
235 | | std::shared_ptr<TxnLazyCommitter> txn_lazy_committer); |
236 | | ~InstanceRecycler(); |
237 | | |
238 | | // returns 0 for success otherwise error |
239 | | int init(); |
240 | | |
241 | 0 | void stop() { stopped_.store(true, std::memory_order_release); } |
242 | 51 | bool stopped() const { return stopped_.load(std::memory_order_acquire); } |
243 | | |
244 | | // returns 0 for success otherwise error |
245 | | int do_recycle(); |
246 | | |
247 | | // remove all kv and data in this instance, ONLY be called when instance has been deleted |
248 | | // returns 0 for success otherwise error |
249 | | int recycle_deleted_instance(); |
250 | | |
251 | | // scan and recycle expired indexes: |
252 | | // 1. dropped table, dropped mv |
253 | | // 2. half-successtable/index when create |
254 | | // returns 0 for success otherwise error |
255 | | int recycle_indexes(); |
256 | | |
257 | | // scan and recycle expired partitions: |
258 | | // 1. dropped parttion |
259 | | // 2. half-success partition when create |
260 | | // returns 0 for success otherwise error |
261 | | int recycle_partitions(); |
262 | | |
263 | | // scan and recycle expired rowsets: |
264 | | // 1. prepare_rowset will produce recycle_rowset before uploading data to remote storage (memo) |
265 | | // 2. compaction will change the input rowsets to recycle_rowset |
266 | | // returns 0 for success otherwise error |
267 | | int recycle_rowsets(); |
268 | | |
269 | | // like `recycle_rowsets`, but for versioned rowsets. |
270 | | int recycle_versioned_rowsets(); |
271 | | |
272 | | // scan and recycle expired tmp rowsets: |
273 | | // 1. commit_rowset will produce tmp_rowset when finish upload data (load or compaction) to remote storage |
274 | | // returns 0 for success otherwise error |
275 | | int recycle_tmp_rowsets(); |
276 | | |
277 | | /** |
278 | | * recycle all tablets belonging to the index specified by `index_id` |
279 | | * |
280 | | * @param partition_id if positive, only recycle tablets in this partition belonging to the specified index |
281 | | * @return 0 for success otherwise error |
282 | | */ |
283 | | int recycle_tablets(int64_t table_id, int64_t index_id, RecyclerMetricsContext& ctx, |
284 | | int64_t partition_id = -1); |
285 | | |
286 | | /** |
287 | | * recycle all rowsets belonging to the tablet specified by `tablet_id` |
288 | | * |
289 | | * @return 0 for success otherwise error |
290 | | */ |
291 | | int recycle_tablet(int64_t tablet_id, RecyclerMetricsContext& metrics_context); |
292 | | |
293 | | /** |
294 | | * like `recycle_tablet`, but for versioned tablet |
295 | | */ |
296 | | int recycle_versioned_tablet(int64_t tablet_id, RecyclerMetricsContext& metrics_context); |
297 | | |
298 | | // scan and recycle useless partition version kv |
299 | | int recycle_versions(); |
300 | | |
301 | | // scan and recycle the orphan partitions |
302 | | int recycle_orphan_partitions(); |
303 | | |
304 | | // scan and abort timeout txn label |
305 | | // returns 0 for success otherwise error |
306 | | int abort_timeout_txn(); |
307 | | |
308 | | //scan and recycle expire txn label |
309 | | // returns 0 for success otherwise error |
310 | | int recycle_expired_txn_label(); |
311 | | |
312 | | // scan and recycle finished or timeout copy jobs |
313 | | // returns 0 for success otherwise error |
314 | | int recycle_copy_jobs(); |
315 | | |
316 | | // scan and recycle dropped internal stage |
317 | | // returns 0 for success otherwise error |
318 | | int recycle_stage(); |
319 | | |
320 | | // scan and recycle expired stage objects |
321 | | // returns 0 for success otherwise error |
322 | | int recycle_expired_stage_objects(); |
323 | | |
324 | | // scan and recycle operation logs |
325 | | // returns 0 for success otherwise error |
326 | | int recycle_operation_logs(); |
327 | | |
328 | | // scan and recycle expired restore jobs |
329 | | // returns 0 for success otherwise error |
330 | | int recycle_restore_jobs(); |
331 | | |
332 | | bool check_recycle_tasks(); |
333 | | |
334 | | int scan_and_statistics_indexes(); |
335 | | |
336 | | int scan_and_statistics_partitions(); |
337 | | |
338 | | int scan_and_statistics_rowsets(); |
339 | | |
340 | | int scan_and_statistics_tmp_rowsets(); |
341 | | |
342 | | int scan_and_statistics_abort_timeout_txn(); |
343 | | |
344 | | int scan_and_statistics_expired_txn_label(); |
345 | | |
346 | | int scan_and_statistics_copy_jobs(); |
347 | | |
348 | | int scan_and_statistics_stage(); |
349 | | |
350 | | int scan_and_statistics_expired_stage_objects(); |
351 | | |
352 | | int scan_and_statistics_versions(); |
353 | | |
354 | | int scan_and_statistics_restore_jobs(); |
355 | | |
356 | 10 | void TEST_add_accessor(std::string_view id, std::shared_ptr<StorageVaultAccessor> accessor) { |
357 | 10 | accessor_map_.insert({std::string(id), std::move(accessor)}); |
358 | 10 | } |
359 | | |
360 | | private: |
361 | | // returns 0 for success otherwise error |
362 | | int init_obj_store_accessors(); |
363 | | |
364 | | // returns 0 for success otherwise error |
365 | | int init_storage_vault_accessors(); |
366 | | |
367 | | /** |
368 | | * Scan key-value pairs between [`begin`, `end`), and perform `recycle_func` on each key-value pair. |
369 | | * |
370 | | * @param recycle_func defines how to recycle resources corresponding to a key-value pair. Returns 0 if the recycling is successful. |
371 | | * @param loop_done is called after `RangeGetIterator` has no next kv. Usually used to perform a batch recycling. Returns 0 if success. |
372 | | * @return 0 if all corresponding resources are recycled successfully, otherwise non-zero |
373 | | */ |
374 | | int scan_and_recycle(std::string begin, std::string_view end, |
375 | | std::function<int(std::string_view k, std::string_view v)> recycle_func, |
376 | | std::function<int()> loop_done = nullptr); |
377 | | |
378 | | // return 0 for success otherwise error |
379 | | int delete_rowset_data(const doris::RowsetMetaCloudPB& rs_meta_pb); |
380 | | |
381 | | // return 0 for success otherwise error |
382 | | // NOTE: this function ONLY be called when the file paths cannot be calculated |
383 | | int delete_rowset_data(const std::string& resource_id, int64_t tablet_id, |
384 | | const std::string& rowset_id); |
385 | | |
386 | | // return 0 for success otherwise error |
387 | | int delete_rowset_data(const std::map<std::string, doris::RowsetMetaCloudPB>& rowsets, |
388 | | RowsetRecyclingState type, RecyclerMetricsContext& metrics_context); |
389 | | |
390 | | /** |
391 | | * Get stage storage info from instance and init StorageVaultAccessor |
392 | | * @return 0 if accessor is successfully inited, 1 if stage not found, negative for error |
393 | | */ |
394 | | int init_copy_job_accessor(const std::string& stage_id, const StagePB::StageType& stage_type, |
395 | | std::shared_ptr<StorageVaultAccessor>* accessor); |
396 | | |
397 | | void register_recycle_task(const std::string& task_name, int64_t start_time); |
398 | | |
399 | | void unregister_recycle_task(const std::string& task_name); |
400 | | |
401 | | // for scan all tablets and statistics metrics |
402 | | int scan_tablets_and_statistics(int64_t tablet_id, int64_t index_id, |
403 | | RecyclerMetricsContext& metrics_context, |
404 | | int64_t partition_id = -1, bool is_empty_tablet = false); |
405 | | |
406 | | // for scan all rs of tablet and statistics metrics |
407 | | int scan_tablet_and_statistics(int64_t tablet_id, RecyclerMetricsContext& metrics_context); |
408 | | |
409 | | // Recycle operation log and the log key. |
410 | | // |
411 | | // The log_key is constructed from the log_version and instance_id. |
412 | | // Both `operation_log` and `log_key` will be removed in the same transaction, to ensure atomicity. |
413 | | int recycle_operation_log(Versionstamp log_version, OperationLogPB operation_log); |
414 | | |
415 | | // Recycle rowset meta and data, return 0 for success otherwise error |
416 | | // |
417 | | // This function will decrease the rowset ref count and remove the rowset meta and data if the ref count is 1. |
418 | | int recycle_rowset_meta_and_data(std::string_view recycle_rowset_key, |
419 | | const RowsetMetaCloudPB& rowset_meta); |
420 | | |
421 | | // Whether the instance has any snapshots, return 0 for success otherwise error. |
422 | | int has_cluster_snapshots(bool* any); |
423 | | |
424 | | private: |
425 | | std::atomic_bool stopped_ {false}; |
426 | | std::shared_ptr<TxnKv> txn_kv_; |
427 | | std::string instance_id_; |
428 | | InstanceInfoPB instance_info_; |
429 | | |
430 | | // TODO(plat1ko): Add new accessor to map in runtime for new created storage vaults |
431 | | std::unordered_map<std::string, std::shared_ptr<StorageVaultAccessor>> accessor_map_; |
432 | | using InvertedIndexInfo = |
433 | | std::pair<InvertedIndexStorageFormatPB, std::vector<std::pair<int64_t, std::string>>>; |
434 | | |
435 | | class InvertedIndexIdCache; |
436 | | std::unique_ptr<InvertedIndexIdCache> inverted_index_id_cache_; |
437 | | |
438 | | std::mutex recycled_tablets_mtx_; |
439 | | // Store recycled tablets, we can skip deleting rowset data of these tablets because these data has already been deleted. |
440 | | std::unordered_set<int64_t> recycled_tablets_; |
441 | | |
442 | | std::mutex recycle_tasks_mutex; |
443 | | // <task_name, start_time>> |
444 | | std::map<std::string, int64_t> running_recycle_tasks; |
445 | | |
446 | | RecyclerThreadPoolGroup _thread_pool_group; |
447 | | |
448 | | std::shared_ptr<TxnLazyCommitter> txn_lazy_committer_; |
449 | | |
450 | | TabletRecyclerMetricsContext tablet_metrics_context_; |
451 | | SegmentRecyclerMetricsContext segment_metrics_context_; |
452 | | }; |
453 | | |
454 | | // Helper class to check if operation logs can be recycled based on snapshots and versionstamps |
455 | | class OperationLogRecycleChecker { |
456 | | public: |
457 | | OperationLogRecycleChecker(std::string_view instance_id, TxnKv* txn_kv) |
458 | 24 | : instance_id_(instance_id), txn_kv_(txn_kv) {} |
459 | | |
460 | | // Initialize the checker by loading snapshots and setting max version stamp |
461 | | int init(); |
462 | | |
463 | | // Check if an operation log can be recycled |
464 | | bool can_recycle(const Versionstamp& log_versionstamp, int64_t log_min_timestamp) const; |
465 | | |
466 | 0 | Versionstamp max_versionstamp() const { return max_versionstamp_; } |
467 | | |
468 | | private: |
469 | | std::string_view instance_id_; |
470 | | TxnKv* txn_kv_; |
471 | | Versionstamp max_versionstamp_; |
472 | | std::map<Versionstamp, size_t> snapshot_indexes_; |
473 | | std::vector<std::pair<SnapshotPB, Versionstamp>> snapshots_; |
474 | | }; |
475 | | |
476 | | } // namespace doris::cloud |