/root/doris/cloud/src/recycler/recycler.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #pragma once |
19 | | |
20 | | #include <gen_cpp/cloud.pb.h> |
21 | | #include <glog/logging.h> |
22 | | |
23 | | #include <atomic> |
24 | | #include <condition_variable> |
25 | | #include <cstdint> |
26 | | #include <deque> |
27 | | #include <functional> |
28 | | #include <memory> |
29 | | #include <string> |
30 | | #include <string_view> |
31 | | #include <thread> |
32 | | #include <unordered_map> |
33 | | #include <unordered_set> |
34 | | #include <utility> |
35 | | |
36 | | #include "common/bvars.h" |
37 | | #include "meta-service/delete_bitmap_lock_white_list.h" |
38 | | #include "meta-service/txn_lazy_committer.h" |
39 | | #include "meta-store/versionstamp.h" |
40 | | #include "recycler/snapshot_chain_compactor.h" |
41 | | #include "recycler/snapshot_data_migrator.h" |
42 | | #include "recycler/storage_vault_accessor.h" |
43 | | #include "recycler/white_black_list.h" |
44 | | #include "snapshot/snapshot_manager.h" |
45 | | |
46 | | namespace brpc { |
47 | | class Server; |
48 | | } // namespace brpc |
49 | | |
50 | | namespace doris::cloud { |
51 | | class TxnKv; |
52 | | class InstanceRecycler; |
53 | | class StorageVaultAccessor; |
54 | | class Checker; |
55 | | class SimpleThreadPool; |
56 | | class RecyclerMetricsContext; |
57 | | class TabletRecyclerMetricsContext; |
58 | | class SegmentRecyclerMetricsContext; |
59 | | struct RecyclerThreadPoolGroup { |
60 | 8 | RecyclerThreadPoolGroup() = default; |
61 | | RecyclerThreadPoolGroup(std::shared_ptr<SimpleThreadPool> s3_producer_pool, |
62 | | std::shared_ptr<SimpleThreadPool> recycle_tablet_pool, |
63 | | std::shared_ptr<SimpleThreadPool> group_recycle_function_pool) |
64 | | : s3_producer_pool(std::move(s3_producer_pool)), |
65 | | recycle_tablet_pool(std::move(recycle_tablet_pool)), |
66 | 11 | group_recycle_function_pool(std::move(group_recycle_function_pool)) {} |
67 | 267 | ~RecyclerThreadPoolGroup() = default; |
68 | 124 | RecyclerThreadPoolGroup(const RecyclerThreadPoolGroup&) = default; |
69 | | RecyclerThreadPoolGroup& operator=(RecyclerThreadPoolGroup& other) = default; |
70 | 11 | RecyclerThreadPoolGroup& operator=(RecyclerThreadPoolGroup&& other) = default; |
71 | 124 | RecyclerThreadPoolGroup(RecyclerThreadPoolGroup&&) = default; |
72 | | // used for accessor.delete_files, accessor.delete_directory |
73 | | std::shared_ptr<SimpleThreadPool> s3_producer_pool; |
74 | | // used for InstanceRecycler::recycle_tablet |
75 | | std::shared_ptr<SimpleThreadPool> recycle_tablet_pool; |
76 | | std::shared_ptr<SimpleThreadPool> group_recycle_function_pool; |
77 | | }; |
78 | | |
79 | | class Recycler { |
80 | | public: |
81 | | explicit Recycler(std::shared_ptr<TxnKv> txn_kv); |
82 | | ~Recycler(); |
83 | | |
84 | | // returns 0 for success otherwise error |
85 | | int start(brpc::Server* server); |
86 | | |
87 | | void stop(); |
88 | | |
89 | 288 | bool stopped() const { return stopped_.load(std::memory_order_acquire); } |
90 | | |
91 | | private: |
92 | | void recycle_callback(); |
93 | | |
94 | | void instance_scanner_callback(); |
95 | | |
96 | | void lease_recycle_jobs(); |
97 | | |
98 | | void check_recycle_tasks(); |
99 | | |
100 | | private: |
101 | | friend class RecyclerServiceImpl; |
102 | | |
103 | | std::shared_ptr<TxnKv> txn_kv_; |
104 | | std::atomic_bool stopped_ {false}; |
105 | | |
106 | | std::vector<std::thread> workers_; |
107 | | |
108 | | std::mutex mtx_; |
109 | | // notify recycle workers |
110 | | std::condition_variable pending_instance_cond_; |
111 | | std::deque<InstanceInfoPB> pending_instance_queue_; |
112 | | std::unordered_set<std::string> pending_instance_set_; |
113 | | std::unordered_map<std::string, std::shared_ptr<InstanceRecycler>> recycling_instance_map_; |
114 | | // notify instance scanner and lease thread |
115 | | std::condition_variable notifier_; |
116 | | |
117 | | std::string ip_port_; |
118 | | |
119 | | WhiteBlackList instance_filter_; |
120 | | std::unique_ptr<Checker> checker_; |
121 | | |
122 | | RecyclerThreadPoolGroup _thread_pool_group; |
123 | | |
124 | | std::shared_ptr<TxnLazyCommitter> txn_lazy_committer_; |
125 | | std::shared_ptr<SnapshotManager> snapshot_manager_; |
126 | | std::shared_ptr<SnapshotDataMigrator> snapshot_data_migrator_; |
127 | | std::shared_ptr<SnapshotChainCompactor> snapshot_chain_compactor_; |
128 | | }; |
129 | | |
130 | | enum class RowsetRecyclingState { |
131 | | FORMAL_ROWSET, |
132 | | TMP_ROWSET, |
133 | | }; |
134 | | |
135 | | // Represents a single rowset deletion task for batch delete |
136 | | struct RowsetDeleteTask { |
137 | | RowsetMetaCloudPB rowset_meta; |
138 | | std::string recycle_rowset_key; // Primary key marking "pending recycle" |
139 | | std::string non_versioned_rowset_key; // Legacy non-versioned rowset meta key |
140 | | std::string versioned_rowset_key; // Versioned meta rowset key |
141 | | std::string rowset_ref_count_key; |
142 | | }; |
143 | | |
144 | | class RecyclerMetricsContext { |
145 | | public: |
146 | 9 | RecyclerMetricsContext() = default; |
147 | | |
148 | | RecyclerMetricsContext(std::string instance_id, std::string operation_type) |
149 | 451 | : operation_type(std::move(operation_type)), instance_id(std::move(instance_id)) { |
150 | 451 | start(); |
151 | 451 | } |
152 | | |
153 | 459 | ~RecyclerMetricsContext() = default; |
154 | | |
155 | | std::atomic_ullong total_need_recycle_data_size = 0; |
156 | | std::atomic_ullong total_need_recycle_num = 0; |
157 | | |
158 | | std::atomic_ullong total_recycled_data_size = 0; |
159 | | std::atomic_ullong total_recycled_num = 0; |
160 | | |
161 | | std::string operation_type; |
162 | | std::string instance_id; |
163 | | |
164 | | double start_time = 0; |
165 | | |
166 | 450 | void start() { |
167 | 450 | start_time = duration_cast<std::chrono::milliseconds>( |
168 | 450 | std::chrono::system_clock::now().time_since_epoch()) |
169 | 450 | .count(); |
170 | 450 | } |
171 | | |
172 | 210 | double duration() const { |
173 | 210 | return duration_cast<std::chrono::milliseconds>( |
174 | 210 | std::chrono::system_clock::now().time_since_epoch()) |
175 | 210 | .count() - |
176 | 210 | start_time; |
177 | 210 | } |
178 | | |
179 | 20 | void reset() { |
180 | 20 | total_need_recycle_data_size = 0; |
181 | 20 | total_need_recycle_num = 0; |
182 | 20 | total_recycled_data_size = 0; |
183 | 20 | total_recycled_num = 0; |
184 | 20 | start_time = duration_cast<std::chrono::milliseconds>( |
185 | 20 | std::chrono::system_clock::now().time_since_epoch()) |
186 | 20 | .count(); |
187 | 20 | } |
188 | | |
189 | 211 | void finish_report() { |
190 | 211 | if (!operation_type.empty()) { |
191 | 211 | double cost = duration(); |
192 | 211 | g_bvar_recycler_instance_last_round_recycle_elpased_ts.put( |
193 | 211 | {instance_id, operation_type}, cost); |
194 | 211 | g_bvar_recycler_instance_recycle_round.put({instance_id, operation_type}, 1); |
195 | 211 | LOG(INFO) << "recycle instance: " << instance_id |
196 | 211 | << ", operation type: " << operation_type << ", cost: " << cost |
197 | 211 | << " ms, total recycled num: " << total_recycled_num.load() |
198 | 211 | << ", total recycled data size: " << total_recycled_data_size.load() |
199 | 211 | << " bytes"; |
200 | 211 | if (cost != 0) { |
201 | 194 | if (total_recycled_num.load() != 0) { |
202 | 36 | g_bvar_recycler_instance_recycle_time_per_resource.put( |
203 | 36 | {instance_id, operation_type}, cost / total_recycled_num.load()); |
204 | 36 | } |
205 | 194 | g_bvar_recycler_instance_recycle_bytes_per_ms.put( |
206 | 194 | {instance_id, operation_type}, total_recycled_data_size.load() / cost); |
207 | 194 | } |
208 | 211 | } |
209 | 211 | } |
210 | | |
211 | | // `is_begin` is used to initialize total num of items need to be recycled |
212 | 24.6k | void report(bool is_begin = false) { |
213 | 24.6k | if (!operation_type.empty()) { |
214 | | // is init |
215 | 24.6k | if (is_begin) { |
216 | 0 | auto value = total_need_recycle_num.load(); |
217 | |
|
218 | 0 | g_bvar_recycler_instance_last_round_to_recycle_bytes.put( |
219 | 0 | {instance_id, operation_type}, total_need_recycle_data_size.load()); |
220 | 0 | g_bvar_recycler_instance_last_round_to_recycle_num.put( |
221 | 0 | {instance_id, operation_type}, value); |
222 | 24.6k | } else { |
223 | 24.6k | g_bvar_recycler_instance_last_round_recycled_bytes.put( |
224 | 24.6k | {instance_id, operation_type}, total_recycled_data_size.load()); |
225 | 24.6k | g_bvar_recycler_instance_recycle_total_bytes_since_started.put( |
226 | 24.6k | {instance_id, operation_type}, total_recycled_data_size.load()); |
227 | 24.6k | g_bvar_recycler_instance_last_round_recycled_num.put({instance_id, operation_type}, |
228 | 24.6k | total_recycled_num.load()); |
229 | 24.6k | g_bvar_recycler_instance_recycle_total_num_since_started.put( |
230 | 24.6k | {instance_id, operation_type}, total_recycled_num.load()); |
231 | 24.6k | } |
232 | 24.6k | } |
233 | 24.6k | } |
234 | | }; |
235 | | |
236 | | class TabletRecyclerMetricsContext : public RecyclerMetricsContext { |
237 | | public: |
238 | 124 | TabletRecyclerMetricsContext() : RecyclerMetricsContext("global_recycler", "recycle_tablet") {} |
239 | | }; |
240 | | |
241 | | class SegmentRecyclerMetricsContext : public RecyclerMetricsContext { |
242 | | public: |
243 | | SegmentRecyclerMetricsContext() |
244 | 124 | : RecyclerMetricsContext("global_recycler", "recycle_segment") {} |
245 | | }; |
246 | | |
247 | | class InstanceRecycler { |
248 | | public: |
249 | | struct PackedFileRecycleStats { |
250 | | int64_t num_scanned = 0; // packed-file kv scanned |
251 | | int64_t num_corrected = 0; // packed-file kv corrected |
252 | | int64_t num_deleted = 0; // packed-file kv deleted |
253 | | int64_t num_failed = 0; // packed-file kv failed |
254 | | int64_t bytes_deleted = 0; // packed-file kv bytes deleted from txn-kv |
255 | | int64_t num_object_deleted = 0; // packed-file objects deleted from storage (vault/HDFS) |
256 | | int64_t bytes_object_deleted = 0; // bytes deleted from storage objects |
257 | | int64_t rowset_scan_count = 0; // rowset metas scanned during correction |
258 | | }; |
259 | | |
260 | | explicit InstanceRecycler(std::shared_ptr<TxnKv> txn_kv, const InstanceInfoPB& instance, |
261 | | RecyclerThreadPoolGroup thread_pool_group, |
262 | | std::shared_ptr<TxnLazyCommitter> txn_lazy_committer); |
263 | | ~InstanceRecycler(); |
264 | | |
265 | 0 | std::string_view instance_id() const { return instance_id_; } |
266 | 3 | const InstanceInfoPB& instance_info() const { return instance_info_; } |
267 | | |
268 | | // returns 0 for success otherwise error |
269 | | int init(); |
270 | | |
271 | 0 | void stop() { stopped_.store(true, std::memory_order_release); } |
272 | 80 | bool stopped() const { return stopped_.load(std::memory_order_acquire); } |
273 | | |
274 | | // returns 0 for success otherwise error |
275 | | int do_recycle(); |
276 | | |
277 | | // remove all kv and data in this instance, ONLY be called when instance has been deleted |
278 | | // returns 0 for success otherwise error |
279 | | int recycle_deleted_instance(); |
280 | | |
281 | | // scan and recycle expired indexes: |
282 | | // 1. dropped table, dropped mv |
283 | | // 2. half-successtable/index when create |
284 | | // returns 0 for success otherwise error |
285 | | int recycle_indexes(); |
286 | | |
287 | | // scan and recycle expired partitions: |
288 | | // 1. dropped parttion |
289 | | // 2. half-success partition when create |
290 | | // returns 0 for success otherwise error |
291 | | int recycle_partitions(); |
292 | | |
293 | | // scan and recycle expired rowsets: |
294 | | // 1. prepare_rowset will produce recycle_rowset before uploading data to remote storage (memo) |
295 | | // 2. compaction will change the input rowsets to recycle_rowset |
296 | | // returns 0 for success otherwise error |
297 | | int recycle_rowsets(); |
298 | | |
299 | | // like `recycle_rowsets`, but for versioned rowsets. |
300 | | int recycle_versioned_rowsets(); |
301 | | |
302 | | // scan and recycle expired tmp rowsets: |
303 | | // 1. commit_rowset will produce tmp_rowset when finish upload data (load or compaction) to remote storage |
304 | | // returns 0 for success otherwise error |
305 | | int recycle_tmp_rowsets(); |
306 | | |
307 | | /** |
308 | | * recycle all tablets belonging to the index specified by `index_id` |
309 | | * |
310 | | * @param partition_id if positive, only recycle tablets in this partition belonging to the specified index |
311 | | * @return 0 for success otherwise error |
312 | | */ |
313 | | int recycle_tablets(int64_t table_id, int64_t index_id, RecyclerMetricsContext& ctx, |
314 | | int64_t partition_id = -1); |
315 | | |
316 | | /** |
317 | | * recycle all rowsets belonging to the tablet specified by `tablet_id` |
318 | | * |
319 | | * @return 0 for success otherwise error |
320 | | */ |
321 | | int recycle_tablet(int64_t tablet_id, RecyclerMetricsContext& metrics_context); |
322 | | |
323 | | /** |
324 | | * like `recycle_tablet`, but for versioned tablet |
325 | | */ |
326 | | int recycle_versioned_tablet(int64_t tablet_id, RecyclerMetricsContext& metrics_context); |
327 | | |
328 | | // scan and recycle useless partition version kv |
329 | | int recycle_versions(); |
330 | | |
331 | | // scan and recycle the orphan partitions |
332 | | int recycle_orphan_partitions(); |
333 | | |
334 | | // scan and abort timeout txn label |
335 | | // returns 0 for success otherwise error |
336 | | int abort_timeout_txn(); |
337 | | |
338 | | //scan and recycle expire txn label |
339 | | // returns 0 for success otherwise error |
340 | | int recycle_expired_txn_label(); |
341 | | |
342 | | // scan and recycle finished or timeout copy jobs |
343 | | // returns 0 for success otherwise error |
344 | | int recycle_copy_jobs(); |
345 | | |
346 | | // scan and recycle dropped internal stage |
347 | | // returns 0 for success otherwise error |
348 | | int recycle_stage(); |
349 | | |
350 | | // scan and recycle expired stage objects |
351 | | // returns 0 for success otherwise error |
352 | | int recycle_expired_stage_objects(); |
353 | | |
354 | | // scan and recycle operation logs |
355 | | // returns 0 for success otherwise error |
356 | | int recycle_operation_logs(); |
357 | | |
358 | | // scan and recycle expired restore jobs |
359 | | // returns 0 for success otherwise error |
360 | | int recycle_restore_jobs(); |
361 | | |
362 | | /** |
363 | | * Scan packed-file metadata, correct reference counters, and recycle unused packed files. |
364 | | * |
365 | | * @return 0 on success, non-zero error code otherwise |
366 | | */ |
367 | | int recycle_packed_files(); |
368 | | |
369 | | // scan and recycle snapshots |
370 | | // returns 0 for success otherwise error |
371 | | int recycle_cluster_snapshots(); |
372 | | |
373 | | // scan and recycle ref rowsets for deleted instance |
374 | | // returns 0 for success otherwise error |
375 | | int recycle_ref_rowsets(bool* has_unrecycled_rowsets); |
376 | | |
377 | | bool check_recycle_tasks(); |
378 | | |
379 | | int scan_and_statistics_indexes(); |
380 | | |
381 | | int scan_and_statistics_partitions(); |
382 | | |
383 | | int scan_and_statistics_rowsets(); |
384 | | |
385 | | int scan_and_statistics_tmp_rowsets(); |
386 | | |
387 | | int scan_and_statistics_abort_timeout_txn(); |
388 | | |
389 | | int scan_and_statistics_expired_txn_label(); |
390 | | |
391 | | int scan_and_statistics_copy_jobs(); |
392 | | |
393 | | int scan_and_statistics_stage(); |
394 | | |
395 | | int scan_and_statistics_expired_stage_objects(); |
396 | | |
397 | | int scan_and_statistics_versions(); |
398 | | |
399 | | int scan_and_statistics_restore_jobs(); |
400 | | |
401 | | /** |
402 | | * Decode the key of a packed-file metadata record into the persisted object path. |
403 | | * |
404 | | * @param key raw key persisted in txn-kv |
405 | | * @param packed_path output object storage path referenced by the key |
406 | | * @return true if decoding succeeds, false otherwise |
407 | | */ |
408 | | static bool decode_packed_file_key(std::string_view key, std::string* packed_path); |
409 | | |
410 | 29 | void TEST_add_accessor(std::string_view id, std::shared_ptr<StorageVaultAccessor> accessor) { |
411 | 29 | accessor_map_.insert({std::string(id), std::move(accessor)}); |
412 | 29 | } |
413 | | |
414 | | // Recycle snapshot meta and data, return 0 for success otherwise error. |
415 | | int recycle_snapshot_meta_and_data(const std::string& resource_id, |
416 | | Versionstamp snapshot_version, |
417 | | const SnapshotPB& snapshot_pb); |
418 | | |
419 | | private: |
420 | | // returns 0 for success otherwise error |
421 | | int init_obj_store_accessors(); |
422 | | |
423 | | // returns 0 for success otherwise error |
424 | | int init_storage_vault_accessors(); |
425 | | |
426 | | /** |
427 | | * Scan key-value pairs between [`begin`, `end`), and perform `recycle_func` on each key-value pair. |
428 | | * |
429 | | * @param recycle_func defines how to recycle resources corresponding to a key-value pair. Returns 0 if the recycling is successful. |
430 | | * @param loop_done is called after `RangeGetIterator` has no next kv. Usually used to perform a batch recycling. Returns 0 if success. |
431 | | * @return 0 if all corresponding resources are recycled successfully, otherwise non-zero |
432 | | */ |
433 | | int scan_and_recycle(std::string begin, std::string_view end, |
434 | | std::function<int(std::string_view k, std::string_view v)> recycle_func, |
435 | | std::function<int()> loop_done = nullptr); |
436 | | |
437 | | // return 0 for success otherwise error |
438 | | int delete_rowset_data(const doris::RowsetMetaCloudPB& rs_meta_pb); |
439 | | |
440 | | // return 0 for success otherwise error |
441 | | // NOTE: this function ONLY be called when the file paths cannot be calculated |
442 | | int delete_rowset_data(const std::string& resource_id, int64_t tablet_id, |
443 | | const std::string& rowset_id); |
444 | | |
445 | | // return 0 for success otherwise error |
446 | | int delete_rowset_data(const std::map<std::string, doris::RowsetMetaCloudPB>& rowsets, |
447 | | RowsetRecyclingState type, RecyclerMetricsContext& metrics_context); |
448 | | |
449 | | // return 0 for success otherwise error |
450 | | int decrement_packed_file_ref_counts(const doris::RowsetMetaCloudPB& rs_meta_pb); |
451 | | |
452 | | int delete_packed_file_and_kv(const std::string& packed_file_path, |
453 | | const std::string& packed_key, |
454 | | const cloud::PackedFileInfoPB& packed_info); |
455 | | |
456 | | /** |
457 | | * Get stage storage info from instance and init StorageVaultAccessor |
458 | | * @return 0 if accessor is successfully inited, 1 if stage not found, negative for error |
459 | | */ |
460 | | int init_copy_job_accessor(const std::string& stage_id, const StagePB::StageType& stage_type, |
461 | | std::shared_ptr<StorageVaultAccessor>* accessor); |
462 | | |
463 | | void register_recycle_task(const std::string& task_name, int64_t start_time); |
464 | | |
465 | | void unregister_recycle_task(const std::string& task_name); |
466 | | |
467 | | // for scan all tablets and statistics metrics |
468 | | int scan_tablets_and_statistics(int64_t tablet_id, int64_t index_id, |
469 | | RecyclerMetricsContext& metrics_context, |
470 | | int64_t partition_id = -1, bool is_empty_tablet = false); |
471 | | |
472 | | // for scan all rs of tablet and statistics metrics |
473 | | int scan_tablet_and_statistics(int64_t tablet_id, RecyclerMetricsContext& metrics_context); |
474 | | |
475 | | // Recycle operation log and the log keys. The log keys are specified by `raw_keys`. |
476 | | // |
477 | | // Both `operation_log` and `raw_keys` will be removed in the same transaction, to ensure atomicity. |
478 | | int recycle_operation_log(Versionstamp log_version, const std::vector<std::string>& raw_keys, |
479 | | OperationLogPB operation_log); |
480 | | |
481 | | // Recycle rowset meta and data, return 0 for success otherwise error |
482 | | // |
483 | | // Both recycle_rowset_key and non_versioned_rowset_key will be removed in the same transaction. |
484 | | // |
485 | | // This function will decrease the rowset ref count and remove the rowset meta and data if the ref count is 1. |
486 | | int recycle_rowset_meta_and_data(std::string_view recycle_rowset_key, |
487 | | const RowsetMetaCloudPB& rowset_meta, |
488 | | std::string_view non_versioned_rowset_key = ""); |
489 | | |
490 | | // Classify rowset task by ref_count, return 0 to add to batch delete, 1 if handled (ref>1), -1 on error |
491 | | int classify_rowset_task_by_ref_count(RowsetDeleteTask& task, |
492 | | std::vector<RowsetDeleteTask>& batch_delete_tasks); |
493 | | |
494 | | // Cleanup metadata for deleted rowsets, return 0 for success otherwise error |
495 | | int cleanup_rowset_metadata(const std::vector<RowsetDeleteTask>& tasks); |
496 | | |
497 | | // Whether the instance has any snapshots, return 0 for success otherwise error. |
498 | | int has_cluster_snapshots(bool* any); |
499 | | |
500 | | // Whether need to recycle versioned keys |
501 | | bool should_recycle_versioned_keys() const; |
502 | | |
503 | | /** |
504 | | * Parse the path of a packed-file fragment and output the owning tablet and rowset identifiers. |
505 | | * |
506 | | * @param path packed-file fragment path to decode |
507 | | * @param tablet_id output tablet identifier extracted from the path |
508 | | * @param rowset_id output rowset identifier extracted from the path |
509 | | * @return true if both identifiers are successfully parsed, false otherwise |
510 | | */ |
511 | | static bool parse_packed_slice_path(std::string_view path, int64_t* tablet_id, |
512 | | std::string* rowset_id); |
513 | | // Check whether a rowset referenced by a packed file still exists in metadata. |
514 | | // @param stats optional recycle statistics collector. |
515 | | int check_rowset_exists(int64_t tablet_id, const std::string& rowset_id, bool* exists, |
516 | | PackedFileRecycleStats* stats = nullptr); |
517 | | int check_recycle_and_tmp_rowset_exists(int64_t tablet_id, const std::string& rowset_id, |
518 | | int64_t txn_id, bool* recycle_exists, bool* tmp_exists); |
519 | | /** |
520 | | * Resolve which storage accessor should be used for a packed file. |
521 | | * |
522 | | * @param hint preferred storage resource identifier persisted with the file |
523 | | * @return pair of the resolved resource identifier and accessor; the accessor can be null if unavailable |
524 | | */ |
525 | | std::pair<std::string, std::shared_ptr<StorageVaultAccessor>> resolve_packed_file_accessor( |
526 | | const std::string& hint); |
527 | | // Recompute packed-file counters and lifecycle state after validating contained fragments. |
528 | | // @param stats optional recycle statistics collector. |
529 | | int correct_packed_file_info(cloud::PackedFileInfoPB* packed_info, bool* changed, |
530 | | const std::string& packed_file_path, |
531 | | PackedFileRecycleStats* stats = nullptr); |
532 | | // Correct and recycle a single packed-file record, updating metadata and accounting statistics. |
533 | | // @param stats optional recycle statistics collector. |
534 | | int process_single_packed_file(const std::string& packed_key, |
535 | | const std::string& packed_file_path, |
536 | | PackedFileRecycleStats* stats); |
537 | | // Process a packed-file KV while scanning and aggregate recycling statistics. |
538 | | int handle_packed_file_kv(std::string_view key, std::string_view value, |
539 | | PackedFileRecycleStats* stats, int* ret); |
540 | | |
541 | | // Abort the transaction/job associated with a rowset that is about to be recycled. |
542 | | // This function is called during rowset recycling to prevent data loss by ensuring that |
543 | | // the transaction/job cannot be committed after its rowset data has been deleted. |
544 | | // |
545 | | // Scenario: |
546 | | // When recycler detects an expired prepared rowset (e.g., from a failed load transaction/job), |
547 | | // it needs to recycle the rowset data. However, if the transaction/job is still active and gets |
548 | | // committed after the data is deleted, it would lead to data loss - the transaction/job would |
549 | | // reference non-existent data. |
550 | | // |
551 | | // Solution: |
552 | | // Before recycling the rowset data, this function aborts the associated transaction/job to ensure |
553 | | // it cannot be committed. This guarantees that: |
554 | | // 1. The transaction/job state is marked as ABORTED |
555 | | // 2. Any subsequent commit_rowset/commit_txn attempts will fail |
556 | | // 3. The rowset data can be safely deleted without risk of data loss |
557 | | // |
558 | | // Parameters: |
559 | | // txn_id: The transaction/job ID associated with the rowset to be recycled |
560 | | // |
561 | | // Returns: |
562 | | // 0 on success, -1 on failure |
563 | | int abort_txn_for_related_rowset(int64_t txn_id); |
564 | | int abort_job_for_related_rowset(const RowsetMetaCloudPB& rowset_meta); |
565 | | |
566 | | template <typename T> |
567 | | int abort_txn_or_job_for_recycle(T& rowset_meta_pb); |
568 | | |
569 | | private: |
570 | | std::atomic_bool stopped_ {false}; |
571 | | std::shared_ptr<TxnKv> txn_kv_; |
572 | | std::string instance_id_; |
573 | | InstanceInfoPB instance_info_; |
574 | | |
575 | | // TODO(plat1ko): Add new accessor to map in runtime for new created storage vaults |
576 | | std::unordered_map<std::string, std::shared_ptr<StorageVaultAccessor>> accessor_map_; |
577 | | using InvertedIndexInfo = |
578 | | std::pair<InvertedIndexStorageFormatPB, std::vector<std::pair<int64_t, std::string>>>; |
579 | | |
580 | | class InvertedIndexIdCache; |
581 | | std::unique_ptr<InvertedIndexIdCache> inverted_index_id_cache_; |
582 | | |
583 | | std::mutex recycled_tablets_mtx_; |
584 | | // Store recycled tablets, we can skip deleting rowset data of these tablets because these data has already been deleted. |
585 | | std::unordered_set<int64_t> recycled_tablets_; |
586 | | |
587 | | std::mutex recycle_tasks_mutex; |
588 | | // <task_name, start_time>> |
589 | | std::map<std::string, int64_t> running_recycle_tasks; |
590 | | |
591 | | RecyclerThreadPoolGroup _thread_pool_group; |
592 | | |
593 | | std::shared_ptr<TxnLazyCommitter> txn_lazy_committer_; |
594 | | std::shared_ptr<SnapshotManager> snapshot_manager_; |
595 | | std::shared_ptr<DeleteBitmapLockWhiteList> delete_bitmap_lock_white_list_; |
596 | | std::shared_ptr<ResourceManager> resource_mgr_; |
597 | | |
598 | | TabletRecyclerMetricsContext tablet_metrics_context_; |
599 | | SegmentRecyclerMetricsContext segment_metrics_context_; |
600 | | }; |
601 | | |
602 | | struct OperationLogReferenceInfo { |
603 | | bool referenced_by_instance = false; |
604 | | bool referenced_by_snapshot = false; |
605 | | Versionstamp referenced_snapshot_timestamp; |
606 | | }; |
607 | | |
608 | | // Helper class to check if operation logs can be recycled based on snapshots and versionstamps |
609 | | class OperationLogRecycleChecker { |
610 | | public: |
611 | | OperationLogRecycleChecker(std::string_view instance_id, TxnKv* txn_kv, |
612 | | const InstanceInfoPB& instance_info) |
613 | 27 | : instance_id_(instance_id), txn_kv_(txn_kv), instance_info_(instance_info) {} |
614 | | |
615 | | // Initialize the checker by loading snapshots and setting max version stamp |
616 | | int init(); |
617 | | |
618 | | // Check if an operation log can be recycled |
619 | | bool can_recycle(const Versionstamp& log_versionstamp, int64_t log_min_timestamp, |
620 | | OperationLogReferenceInfo* reference_info) const; |
621 | | |
622 | 0 | Versionstamp max_versionstamp() const { return max_versionstamp_; } |
623 | | |
624 | 23 | const std::vector<std::pair<SnapshotPB, Versionstamp>>& get_snapshots() const { |
625 | 23 | return snapshots_; |
626 | 23 | } |
627 | | |
628 | | private: |
629 | | std::string_view instance_id_; |
630 | | TxnKv* txn_kv_; |
631 | | const InstanceInfoPB& instance_info_; |
632 | | Versionstamp max_versionstamp_; |
633 | | Versionstamp source_snapshot_versionstamp_; |
634 | | std::map<Versionstamp, size_t> snapshot_indexes_; |
635 | | std::vector<std::pair<SnapshotPB, Versionstamp>> snapshots_; |
636 | | }; |
637 | | |
638 | | class SnapshotDataSizeCalculator { |
639 | | public: |
640 | | SnapshotDataSizeCalculator(std::string_view instance_id, std::shared_ptr<TxnKv> txn_kv) |
641 | 24 | : instance_id_(instance_id), txn_kv_(std::move(txn_kv)) {} |
642 | | |
643 | | void init(const std::vector<std::pair<SnapshotPB, Versionstamp>>& snapshots); |
644 | | |
645 | | int calculate_operation_log_data_size(const std::string_view& log_key, |
646 | | OperationLogPB& operation_log, |
647 | | OperationLogReferenceInfo& reference_info); |
648 | | |
649 | | int save_snapshot_data_size_with_retry(); |
650 | | |
651 | | private: |
652 | | int get_all_index_partitions(int64_t db_id, int64_t table_id, int64_t index_id, |
653 | | std::vector<int64_t>* partition_ids); |
654 | | int get_index_partition_data_size(int64_t db_id, int64_t table_id, int64_t index_id, |
655 | | int64_t partition_id, int64_t* data_size); |
656 | | int save_operation_log(const std::string_view& log_key, OperationLogPB& operation_log); |
657 | | int save_snapshot_data_size(); |
658 | | |
659 | | std::string_view instance_id_; |
660 | | std::shared_ptr<TxnKv> txn_kv_; |
661 | | |
662 | | int64_t instance_retained_data_size_ = 0; |
663 | | std::map<Versionstamp, int64_t> retained_data_size_; |
664 | | std::set<std::string> calculated_partitions_; |
665 | | }; |
666 | | |
667 | | } // namespace doris::cloud |