/root/doris/be/src/olap/olap_server.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include <gen_cpp/Types_types.h> |
19 | | #include <gen_cpp/olap_file.pb.h> |
20 | | #include <glog/logging.h> |
21 | | #include <rapidjson/prettywriter.h> |
22 | | #include <rapidjson/stringbuffer.h> |
23 | | #include <stdint.h> |
24 | | #include <sys/types.h> |
25 | | |
26 | | #include <algorithm> |
27 | | #include <atomic> |
28 | | // IWYU pragma: no_include <bits/chrono.h> |
29 | | #include <chrono> // IWYU pragma: keep |
30 | | #include <cmath> |
31 | | #include <condition_variable> |
32 | | #include <cstdint> |
33 | | #include <ctime> |
34 | | #include <functional> |
35 | | #include <map> |
36 | | #include <memory> |
37 | | #include <mutex> |
38 | | #include <ostream> |
39 | | #include <random> |
40 | | #include <shared_mutex> |
41 | | #include <string> |
42 | | #include <thread> |
43 | | #include <unordered_set> |
44 | | #include <utility> |
45 | | #include <vector> |
46 | | |
47 | | #include "agent/utils.h" |
48 | | #include "common/config.h" |
49 | | #include "common/logging.h" |
50 | | #include "common/status.h" |
51 | | #include "cpp/sync_point.h" |
52 | | #include "gen_cpp/FrontendService.h" |
53 | | #include "gen_cpp/internal_service.pb.h" |
54 | | #include "gutil/ref_counted.h" |
55 | | #include "io/fs/file_writer.h" // IWYU pragma: keep |
56 | | #include "io/fs/path.h" |
57 | | #include "olap/base_tablet.h" |
58 | | #include "olap/cold_data_compaction.h" |
59 | | #include "olap/compaction_permit_limiter.h" |
60 | | #include "olap/cumulative_compaction.h" |
61 | | #include "olap/cumulative_compaction_policy.h" |
62 | | #include "olap/cumulative_compaction_time_series_policy.h" |
63 | | #include "olap/data_dir.h" |
64 | | #include "olap/olap_common.h" |
65 | | #include "olap/olap_define.h" |
66 | | #include "olap/rowset/segcompaction.h" |
67 | | #include "olap/schema_change.h" |
68 | | #include "olap/single_replica_compaction.h" |
69 | | #include "olap/storage_engine.h" |
70 | | #include "olap/storage_policy.h" |
71 | | #include "olap/tablet.h" |
72 | | #include "olap/tablet_manager.h" |
73 | | #include "olap/tablet_meta.h" |
74 | | #include "olap/tablet_meta_manager.h" |
75 | | #include "olap/tablet_schema.h" |
76 | | #include "olap/task/engine_publish_version_task.h" |
77 | | #include "olap/task/index_builder.h" |
78 | | #include "runtime/client_cache.h" |
79 | | #include "runtime/memory/cache_manager.h" |
80 | | #include "runtime/memory/global_memory_arbitrator.h" |
81 | | #include "util/countdown_latch.h" |
82 | | #include "util/debug_points.h" |
83 | | #include "util/doris_metrics.h" |
84 | | #include "util/mem_info.h" |
85 | | #include "util/metrics.h" |
86 | | #include "util/thread.h" |
87 | | #include "util/threadpool.h" |
88 | | #include "util/thrift_rpc_helper.h" |
89 | | #include "util/time.h" |
90 | | #include "util/uid_util.h" |
91 | | #include "util/work_thread_pool.hpp" |
92 | | |
93 | | using std::string; |
94 | | |
95 | | namespace doris { |
96 | | |
97 | | using io::Path; |
98 | | |
99 | | // number of running SCHEMA-CHANGE threads |
100 | | volatile uint32_t g_schema_change_active_threads = 0; |
101 | | |
102 | | static const uint64_t DEFAULT_SEED = 104729; |
103 | | static const uint64_t MOD_PRIME = 7652413; |
104 | | |
105 | 0 | CompactionSubmitRegistry::CompactionSubmitRegistry(CompactionSubmitRegistry&& r) { |
106 | 0 | std::swap(_tablet_submitted_cumu_compaction, r._tablet_submitted_cumu_compaction); |
107 | 0 | std::swap(_tablet_submitted_base_compaction, r._tablet_submitted_base_compaction); |
108 | 0 | std::swap(_tablet_submitted_full_compaction, r._tablet_submitted_full_compaction); |
109 | 0 | } |
110 | | |
111 | 0 | CompactionSubmitRegistry CompactionSubmitRegistry::create_snapshot() { |
112 | | // full compaction is not engaged in this method |
113 | 0 | std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex); |
114 | 0 | CompactionSubmitRegistry registry; |
115 | 0 | registry._tablet_submitted_base_compaction = _tablet_submitted_base_compaction; |
116 | 0 | registry._tablet_submitted_cumu_compaction = _tablet_submitted_cumu_compaction; |
117 | 0 | return registry; |
118 | 0 | } |
119 | | |
120 | 1 | void CompactionSubmitRegistry::reset(const std::vector<DataDir*>& stores) { |
121 | | // full compaction is not engaged in this method |
122 | 1 | for (const auto& store : stores) { |
123 | 0 | _tablet_submitted_cumu_compaction[store] = {}; |
124 | 0 | _tablet_submitted_base_compaction[store] = {}; |
125 | 0 | } |
126 | 1 | } |
127 | | |
128 | | uint32_t CompactionSubmitRegistry::count_executing_compaction(DataDir* dir, |
129 | 2 | CompactionType compaction_type) { |
130 | | // non-lock, used in snapshot |
131 | 2 | const auto& compaction_tasks = _get_tablet_set(dir, compaction_type); |
132 | 10 | return std::count_if(compaction_tasks.begin(), compaction_tasks.end(), [](const auto& task) { |
133 | 10 | return task->compaction_stage == CompactionStage::EXECUTING; |
134 | 10 | }); |
135 | 2 | } |
136 | | |
137 | 1 | uint32_t CompactionSubmitRegistry::count_executing_cumu_and_base(DataDir* dir) { |
138 | | // non-lock, used in snapshot |
139 | 1 | return count_executing_compaction(dir, CompactionType::BASE_COMPACTION) + |
140 | 1 | count_executing_compaction(dir, CompactionType::CUMULATIVE_COMPACTION); |
141 | 1 | } |
142 | | |
143 | 0 | bool CompactionSubmitRegistry::has_compaction_task(DataDir* dir, CompactionType compaction_type) { |
144 | | // non-lock, used in snapshot |
145 | 0 | return !_get_tablet_set(dir, compaction_type).empty(); |
146 | 0 | } |
147 | | |
148 | | std::vector<TabletSharedPtr> CompactionSubmitRegistry::pick_topn_tablets_for_compaction( |
149 | | TabletManager* tablet_mgr, DataDir* data_dir, CompactionType compaction_type, |
150 | 0 | const CumuCompactionPolicyTable& cumu_compaction_policies, uint32_t* disk_max_score) { |
151 | | // non-lock, used in snapshot |
152 | 0 | return tablet_mgr->find_best_tablets_to_compaction(compaction_type, data_dir, |
153 | 0 | _get_tablet_set(data_dir, compaction_type), |
154 | 0 | disk_max_score, cumu_compaction_policies); |
155 | 0 | } |
156 | | |
157 | 21 | bool CompactionSubmitRegistry::insert(TabletSharedPtr tablet, CompactionType compaction_type) { |
158 | 21 | std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex); |
159 | 21 | auto& tablet_set = _get_tablet_set(tablet->data_dir(), compaction_type); |
160 | 21 | bool already_exist = !(tablet_set.insert(tablet).second); |
161 | 21 | return already_exist; |
162 | 21 | } |
163 | | |
164 | | void CompactionSubmitRegistry::remove(TabletSharedPtr tablet, CompactionType compaction_type, |
165 | 7 | std::function<void()> wakeup_cb) { |
166 | 7 | std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex); |
167 | 7 | auto& tablet_set = _get_tablet_set(tablet->data_dir(), compaction_type); |
168 | 7 | size_t removed = tablet_set.erase(tablet); |
169 | 7 | if (removed == 1) { |
170 | 7 | wakeup_cb(); |
171 | 7 | } |
172 | 7 | } |
173 | | |
174 | | CompactionSubmitRegistry::TabletSet& CompactionSubmitRegistry::_get_tablet_set( |
175 | 30 | DataDir* dir, CompactionType compaction_type) { |
176 | 30 | switch (compaction_type) { |
177 | 1 | case CompactionType::BASE_COMPACTION: |
178 | 1 | return _tablet_submitted_base_compaction[dir]; |
179 | 29 | case CompactionType::CUMULATIVE_COMPACTION: |
180 | 29 | return _tablet_submitted_cumu_compaction[dir]; |
181 | 0 | case CompactionType::FULL_COMPACTION: |
182 | 0 | return _tablet_submitted_full_compaction[dir]; |
183 | 0 | default: |
184 | 0 | CHECK(false) << "invalid compaction type"; |
185 | 30 | } |
186 | 30 | } |
187 | | |
188 | 0 | static int32_t get_cumu_compaction_threads_num(size_t data_dirs_num) { |
189 | 0 | int32_t threads_num = config::max_cumu_compaction_threads; |
190 | 0 | if (threads_num == -1) { |
191 | 0 | threads_num = data_dirs_num; |
192 | 0 | } |
193 | 0 | threads_num = threads_num <= 0 ? 1 : threads_num; |
194 | 0 | return threads_num; |
195 | 0 | } |
196 | | |
197 | 0 | static int32_t get_base_compaction_threads_num(size_t data_dirs_num) { |
198 | 0 | int32_t threads_num = config::max_base_compaction_threads; |
199 | 0 | if (threads_num == -1) { |
200 | 0 | threads_num = data_dirs_num; |
201 | 0 | } |
202 | 0 | threads_num = threads_num <= 0 ? 1 : threads_num; |
203 | 0 | return threads_num; |
204 | 0 | } |
205 | | |
206 | 0 | static int32_t get_single_replica_compaction_threads_num(size_t data_dirs_num) { |
207 | 0 | int32_t threads_num = config::max_single_replica_compaction_threads; |
208 | 0 | if (threads_num == -1) { |
209 | 0 | threads_num = data_dirs_num; |
210 | 0 | } |
211 | 0 | threads_num = threads_num <= 0 ? 1 : threads_num; |
212 | 0 | return threads_num; |
213 | 0 | } |
214 | | |
215 | 0 | Status StorageEngine::start_bg_threads() { |
216 | 0 | RETURN_IF_ERROR(Thread::create( |
217 | 0 | "StorageEngine", "unused_rowset_monitor_thread", |
218 | 0 | [this]() { this->_unused_rowset_monitor_thread_callback(); }, |
219 | 0 | &_unused_rowset_monitor_thread)); |
220 | 0 | LOG(INFO) << "unused rowset monitor thread started"; |
221 | |
|
222 | 0 | RETURN_IF_ERROR(Thread::create( |
223 | 0 | "StorageEngine", "evict_querying_rowset_thread", |
224 | 0 | [this]() { this->_evict_quring_rowset_thread_callback(); }, |
225 | 0 | &_evict_quering_rowset_thread)); |
226 | 0 | LOG(INFO) << "evict quering thread started"; |
227 | | |
228 | | // start thread for monitoring the snapshot and trash folder |
229 | 0 | RETURN_IF_ERROR(Thread::create( |
230 | 0 | "StorageEngine", "garbage_sweeper_thread", |
231 | 0 | [this]() { this->_garbage_sweeper_thread_callback(); }, &_garbage_sweeper_thread)); |
232 | 0 | LOG(INFO) << "garbage sweeper thread started"; |
233 | | |
234 | | // start thread for monitoring the tablet with io error |
235 | 0 | RETURN_IF_ERROR(Thread::create( |
236 | 0 | "StorageEngine", "disk_stat_monitor_thread", |
237 | 0 | [this]() { this->_disk_stat_monitor_thread_callback(); }, &_disk_stat_monitor_thread)); |
238 | 0 | LOG(INFO) << "disk stat monitor thread started"; |
239 | | |
240 | | // convert store map to vector |
241 | 0 | std::vector<DataDir*> data_dirs = get_stores(); |
242 | |
|
243 | 0 | auto base_compaction_threads = get_base_compaction_threads_num(data_dirs.size()); |
244 | 0 | auto cumu_compaction_threads = get_cumu_compaction_threads_num(data_dirs.size()); |
245 | 0 | auto single_replica_compaction_threads = |
246 | 0 | get_single_replica_compaction_threads_num(data_dirs.size()); |
247 | |
|
248 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("BaseCompactionTaskThreadPool") |
249 | 0 | .set_min_threads(base_compaction_threads) |
250 | 0 | .set_max_threads(base_compaction_threads) |
251 | 0 | .build(&_base_compaction_thread_pool)); |
252 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("CumuCompactionTaskThreadPool") |
253 | 0 | .set_min_threads(cumu_compaction_threads) |
254 | 0 | .set_max_threads(cumu_compaction_threads) |
255 | 0 | .build(&_cumu_compaction_thread_pool)); |
256 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("SingleReplicaCompactionTaskThreadPool") |
257 | 0 | .set_min_threads(single_replica_compaction_threads) |
258 | 0 | .set_max_threads(single_replica_compaction_threads) |
259 | 0 | .build(&_single_replica_compaction_thread_pool)); |
260 | | |
261 | 0 | if (config::enable_segcompaction) { |
262 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("SegCompactionTaskThreadPool") |
263 | 0 | .set_min_threads(config::segcompaction_num_threads) |
264 | 0 | .set_max_threads(config::segcompaction_num_threads) |
265 | 0 | .build(&_seg_compaction_thread_pool)); |
266 | 0 | } |
267 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("ColdDataCompactionTaskThreadPool") |
268 | 0 | .set_min_threads(config::cold_data_compaction_thread_num) |
269 | 0 | .set_max_threads(config::cold_data_compaction_thread_num) |
270 | 0 | .build(&_cold_data_compaction_thread_pool)); |
271 | | |
272 | | // compaction tasks producer thread |
273 | 0 | RETURN_IF_ERROR(Thread::create( |
274 | 0 | "StorageEngine", "compaction_tasks_producer_thread", |
275 | 0 | [this]() { this->_compaction_tasks_producer_callback(); }, |
276 | 0 | &_compaction_tasks_producer_thread)); |
277 | 0 | LOG(INFO) << "compaction tasks producer thread started"; |
278 | |
|
279 | 0 | RETURN_IF_ERROR(Thread::create( |
280 | 0 | "StorageEngine", "_update_replica_infos_thread", |
281 | 0 | [this]() { this->_update_replica_infos_callback(); }, &_update_replica_infos_thread)); |
282 | 0 | LOG(INFO) << "tablet replicas info update thread started"; |
283 | |
|
284 | 0 | int32_t max_checkpoint_thread_num = config::max_meta_checkpoint_threads; |
285 | 0 | if (max_checkpoint_thread_num < 0) { |
286 | 0 | max_checkpoint_thread_num = data_dirs.size(); |
287 | 0 | } |
288 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("TabletMetaCheckpointTaskThreadPool") |
289 | 0 | .set_max_threads(max_checkpoint_thread_num) |
290 | 0 | .build(&_tablet_meta_checkpoint_thread_pool)); |
291 | | |
292 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("MultiGetTaskThreadPool") |
293 | 0 | .set_min_threads(config::multi_get_max_threads) |
294 | 0 | .set_max_threads(config::multi_get_max_threads) |
295 | 0 | .build(&_bg_multi_get_thread_pool)); |
296 | 0 | RETURN_IF_ERROR(Thread::create( |
297 | 0 | "StorageEngine", "tablet_checkpoint_tasks_producer_thread", |
298 | 0 | [this, data_dirs]() { this->_tablet_checkpoint_callback(data_dirs); }, |
299 | 0 | &_tablet_checkpoint_tasks_producer_thread)); |
300 | 0 | LOG(INFO) << "tablet checkpoint tasks producer thread started"; |
301 | |
|
302 | 0 | RETURN_IF_ERROR(Thread::create( |
303 | 0 | "StorageEngine", "tablet_path_check_thread", |
304 | 0 | [this]() { this->_tablet_path_check_callback(); }, &_tablet_path_check_thread)); |
305 | 0 | LOG(INFO) << "tablet path check thread started"; |
306 | | |
307 | | // path scan and gc thread |
308 | 0 | if (config::path_gc_check) { |
309 | 0 | for (auto data_dir : get_stores()) { |
310 | 0 | scoped_refptr<Thread> path_gc_thread; |
311 | 0 | RETURN_IF_ERROR(Thread::create( |
312 | 0 | "StorageEngine", "path_gc_thread", |
313 | 0 | [this, data_dir]() { this->_path_gc_thread_callback(data_dir); }, |
314 | 0 | &path_gc_thread)); |
315 | 0 | _path_gc_threads.emplace_back(path_gc_thread); |
316 | 0 | } |
317 | 0 | LOG(INFO) << "path gc threads started. number:" << get_stores().size(); |
318 | 0 | } |
319 | | |
320 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("CooldownTaskThreadPool") |
321 | 0 | .set_min_threads(config::cooldown_thread_num) |
322 | 0 | .set_max_threads(config::cooldown_thread_num) |
323 | 0 | .build(&_cooldown_thread_pool)); |
324 | 0 | LOG(INFO) << "cooldown thread pool started"; |
325 | |
|
326 | 0 | RETURN_IF_ERROR(Thread::create( |
327 | 0 | "StorageEngine", "cooldown_tasks_producer_thread", |
328 | 0 | [this]() { this->_cooldown_tasks_producer_callback(); }, |
329 | 0 | &_cooldown_tasks_producer_thread)); |
330 | 0 | LOG(INFO) << "cooldown tasks producer thread started"; |
331 | |
|
332 | 0 | RETURN_IF_ERROR(Thread::create( |
333 | 0 | "StorageEngine", "remove_unused_remote_files_thread", |
334 | 0 | [this]() { this->_remove_unused_remote_files_callback(); }, |
335 | 0 | &_remove_unused_remote_files_thread)); |
336 | 0 | LOG(INFO) << "remove unused remote files thread started"; |
337 | |
|
338 | 0 | RETURN_IF_ERROR(Thread::create( |
339 | 0 | "StorageEngine", "cold_data_compaction_producer_thread", |
340 | 0 | [this]() { this->_cold_data_compaction_producer_callback(); }, |
341 | 0 | &_cold_data_compaction_producer_thread)); |
342 | 0 | LOG(INFO) << "cold data compaction producer thread started"; |
343 | | |
344 | | // add tablet publish version thread pool |
345 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("TabletPublishTxnThreadPool") |
346 | 0 | .set_min_threads(config::tablet_publish_txn_max_thread) |
347 | 0 | .set_max_threads(config::tablet_publish_txn_max_thread) |
348 | 0 | .build(&_tablet_publish_txn_thread_pool)); |
349 | | |
350 | 0 | RETURN_IF_ERROR(Thread::create( |
351 | 0 | "StorageEngine", "async_publish_version_thread", |
352 | 0 | [this]() { this->_async_publish_callback(); }, &_async_publish_thread)); |
353 | 0 | LOG(INFO) << "async publish thread started"; |
354 | |
|
355 | 0 | RETURN_IF_ERROR(Thread::create( |
356 | 0 | "StorageEngine", "check_tablet_delete_bitmap_score_thread", |
357 | 0 | [this]() { this->_check_tablet_delete_bitmap_score_callback(); }, |
358 | 0 | &_check_delete_bitmap_score_thread)); |
359 | 0 | LOG(INFO) << "check tablet delete bitmap score thread started"; |
360 | |
|
361 | 0 | LOG(INFO) << "all storage engine's background threads are started."; |
362 | 0 | return Status::OK(); |
363 | 0 | } |
364 | | |
365 | 0 | void StorageEngine::_garbage_sweeper_thread_callback() { |
366 | 0 | uint32_t max_interval = config::max_garbage_sweep_interval; |
367 | 0 | uint32_t min_interval = config::min_garbage_sweep_interval; |
368 | |
|
369 | 0 | if (max_interval < min_interval || min_interval <= 0) { |
370 | 0 | LOG(WARNING) << "garbage sweep interval config is illegal: [max=" << max_interval |
371 | 0 | << " min=" << min_interval << "]."; |
372 | 0 | min_interval = 1; |
373 | 0 | max_interval = max_interval >= min_interval ? max_interval : min_interval; |
374 | 0 | LOG(INFO) << "force reset garbage sweep interval. " |
375 | 0 | << "max_interval=" << max_interval << ", min_interval=" << min_interval; |
376 | 0 | } |
377 | |
|
378 | 0 | const double pi = M_PI; |
379 | 0 | double usage = 1.0; |
380 | | // After the program starts, the first round of cleaning starts after min_interval. |
381 | 0 | uint32_t curr_interval = min_interval; |
382 | 0 | do { |
383 | | // Function properties: |
384 | | // when usage < 0.6, ratio close to 1.(interval close to max_interval) |
385 | | // when usage at [0.6, 0.75], ratio is rapidly decreasing from 0.87 to 0.27. |
386 | | // when usage > 0.75, ratio is slowly decreasing. |
387 | | // when usage > 0.8, ratio close to min_interval. |
388 | | // when usage = 0.88, ratio is approximately 0.0057. |
389 | 0 | double ratio = (1.1 * (pi / 2 - std::atan(usage * 100 / 5 - 14)) - 0.28) / pi; |
390 | 0 | ratio = ratio > 0 ? ratio : 0; |
391 | 0 | curr_interval = uint32_t(max_interval * ratio); |
392 | 0 | curr_interval = std::max(curr_interval, min_interval); |
393 | 0 | curr_interval = std::min(curr_interval, max_interval); |
394 | | |
395 | | // start clean trash and update usage. |
396 | 0 | Status res = start_trash_sweep(&usage); |
397 | 0 | if (res.ok() && _need_clean_trash.exchange(false, std::memory_order_relaxed)) { |
398 | 0 | res = start_trash_sweep(&usage, true); |
399 | 0 | } |
400 | |
|
401 | 0 | if (!res.ok()) { |
402 | 0 | LOG(WARNING) << "one or more errors occur when sweep trash." |
403 | 0 | << "see previous message for detail. err code=" << res; |
404 | | // do nothing. continue next loop. |
405 | 0 | } |
406 | 0 | LOG(INFO) << "trash thread check usage=" << usage << " ratio=" << ratio |
407 | 0 | << " curr_interval=" << curr_interval; |
408 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(curr_interval))); |
409 | 0 | } |
410 | | |
411 | 0 | void StorageEngine::_disk_stat_monitor_thread_callback() { |
412 | 0 | int32_t interval = config::disk_stat_monitor_interval; |
413 | 0 | do { |
414 | 0 | _start_disk_stat_monitor(); |
415 | |
|
416 | 0 | interval = config::disk_stat_monitor_interval; |
417 | 0 | if (interval <= 0) { |
418 | 0 | LOG(WARNING) << "disk_stat_monitor_interval config is illegal: " << interval |
419 | 0 | << ", force set to 1"; |
420 | 0 | interval = 1; |
421 | 0 | } |
422 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
423 | 0 | } |
424 | | |
425 | 0 | void StorageEngine::_unused_rowset_monitor_thread_callback() { |
426 | 0 | int32_t interval = config::unused_rowset_monitor_interval; |
427 | 0 | do { |
428 | 0 | start_delete_unused_rowset(); |
429 | |
|
430 | 0 | interval = config::unused_rowset_monitor_interval; |
431 | 0 | if (interval <= 0) { |
432 | 0 | LOG(WARNING) << "unused_rowset_monitor_interval config is illegal: " << interval |
433 | 0 | << ", force set to 1"; |
434 | 0 | interval = 1; |
435 | 0 | } |
436 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
437 | 0 | } |
438 | | |
439 | 0 | int32_t StorageEngine::_auto_get_interval_by_disk_capacity(DataDir* data_dir) { |
440 | 0 | double disk_used = data_dir->get_usage(0); |
441 | 0 | double remain_used = 1 - disk_used; |
442 | 0 | DCHECK(remain_used >= 0 && remain_used <= 1); |
443 | 0 | DCHECK(config::path_gc_check_interval_second >= 0); |
444 | 0 | int32_t ret = 0; |
445 | 0 | if (remain_used > 0.9) { |
446 | | // if config::path_gc_check_interval_second == 24h |
447 | 0 | ret = config::path_gc_check_interval_second; |
448 | 0 | } else if (remain_used > 0.7) { |
449 | | // 12h |
450 | 0 | ret = config::path_gc_check_interval_second / 2; |
451 | 0 | } else if (remain_used > 0.5) { |
452 | | // 6h |
453 | 0 | ret = config::path_gc_check_interval_second / 4; |
454 | 0 | } else if (remain_used > 0.3) { |
455 | | // 4h |
456 | 0 | ret = config::path_gc_check_interval_second / 6; |
457 | 0 | } else { |
458 | | // 3h |
459 | 0 | ret = config::path_gc_check_interval_second / 8; |
460 | 0 | } |
461 | 0 | return ret; |
462 | 0 | } |
463 | | |
464 | 0 | void StorageEngine::_path_gc_thread_callback(DataDir* data_dir) { |
465 | 0 | LOG(INFO) << "try to start path gc thread!"; |
466 | 0 | int32_t last_exec_time = 0; |
467 | 0 | do { |
468 | 0 | int32_t current_time = time(nullptr); |
469 | |
|
470 | 0 | int32_t interval = _auto_get_interval_by_disk_capacity(data_dir); |
471 | 0 | DBUG_EXECUTE_IF("_path_gc_thread_callback.interval.eq.1ms", { |
472 | 0 | LOG(INFO) << "debug point change interval eq 1ms"; |
473 | 0 | interval = 1; |
474 | 0 | while (DebugPoints::instance()->is_enable("_path_gc_thread_callback.always.do")) { |
475 | 0 | data_dir->perform_path_gc(); |
476 | 0 | std::this_thread::sleep_for(std::chrono::milliseconds(10)); |
477 | 0 | } |
478 | 0 | }); |
479 | 0 | if (interval <= 0) { |
480 | 0 | LOG(WARNING) << "path gc thread check interval config is illegal:" << interval |
481 | 0 | << " will be forced set to half hour"; |
482 | 0 | interval = 1800; // 0.5 hour |
483 | 0 | } |
484 | 0 | if (current_time - last_exec_time >= interval) { |
485 | 0 | LOG(INFO) << "try to perform path gc! disk remain [" << 1 - data_dir->get_usage(0) |
486 | 0 | << "] internal [" << interval << "]"; |
487 | 0 | data_dir->perform_path_gc(); |
488 | 0 | last_exec_time = time(nullptr); |
489 | 0 | } |
490 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(5))); |
491 | 0 | LOG(INFO) << "stop path gc thread!"; |
492 | 0 | } |
493 | | |
494 | 0 | void StorageEngine::_tablet_checkpoint_callback(const std::vector<DataDir*>& data_dirs) { |
495 | 0 | int64_t interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs; |
496 | 0 | do { |
497 | 0 | for (auto data_dir : data_dirs) { |
498 | 0 | LOG(INFO) << "begin to produce tablet meta checkpoint tasks, data_dir=" |
499 | 0 | << data_dir->path(); |
500 | 0 | auto st = _tablet_meta_checkpoint_thread_pool->submit_func( |
501 | 0 | [data_dir, this]() { _tablet_manager->do_tablet_meta_checkpoint(data_dir); }); |
502 | 0 | if (!st.ok()) { |
503 | 0 | LOG(WARNING) << "submit tablet checkpoint tasks failed."; |
504 | 0 | } |
505 | 0 | } |
506 | 0 | interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs; |
507 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
508 | 0 | } |
509 | | |
510 | 0 | void StorageEngine::_tablet_path_check_callback() { |
511 | 0 | struct TabletIdComparator { |
512 | 0 | bool operator()(Tablet* a, Tablet* b) { return a->tablet_id() < b->tablet_id(); } |
513 | 0 | }; |
514 | |
|
515 | 0 | using TabletQueue = std::priority_queue<Tablet*, std::vector<Tablet*>, TabletIdComparator>; |
516 | |
|
517 | 0 | int64_t interval = config::tablet_path_check_interval_seconds; |
518 | 0 | if (interval <= 0) { |
519 | 0 | return; |
520 | 0 | } |
521 | | |
522 | 0 | int64_t last_tablet_id = 0; |
523 | 0 | do { |
524 | 0 | int32_t batch_size = config::tablet_path_check_batch_size; |
525 | 0 | if (batch_size <= 0) { |
526 | 0 | if (_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))) { |
527 | 0 | break; |
528 | 0 | } |
529 | 0 | continue; |
530 | 0 | } |
531 | | |
532 | 0 | LOG(INFO) << "start to check tablet path"; |
533 | |
|
534 | 0 | auto all_tablets = _tablet_manager->get_all_tablet( |
535 | 0 | [](Tablet* t) { return t->is_used() && t->tablet_state() == TABLET_RUNNING; }); |
536 | |
|
537 | 0 | TabletQueue big_id_tablets; |
538 | 0 | TabletQueue small_id_tablets; |
539 | 0 | for (auto tablet : all_tablets) { |
540 | 0 | auto tablet_id = tablet->tablet_id(); |
541 | 0 | TabletQueue* belong_tablets = nullptr; |
542 | 0 | if (tablet_id > last_tablet_id) { |
543 | 0 | if (big_id_tablets.size() < batch_size || |
544 | 0 | big_id_tablets.top()->tablet_id() > tablet_id) { |
545 | 0 | belong_tablets = &big_id_tablets; |
546 | 0 | } |
547 | 0 | } else if (big_id_tablets.size() < batch_size) { |
548 | 0 | if (small_id_tablets.size() < batch_size || |
549 | 0 | small_id_tablets.top()->tablet_id() > tablet_id) { |
550 | 0 | belong_tablets = &small_id_tablets; |
551 | 0 | } |
552 | 0 | } |
553 | 0 | if (belong_tablets != nullptr) { |
554 | 0 | belong_tablets->push(tablet.get()); |
555 | 0 | if (belong_tablets->size() > batch_size) { |
556 | 0 | belong_tablets->pop(); |
557 | 0 | } |
558 | 0 | } |
559 | 0 | } |
560 | |
|
561 | 0 | int32_t need_small_id_tablet_size = |
562 | 0 | batch_size - static_cast<int32_t>(big_id_tablets.size()); |
563 | |
|
564 | 0 | if (!big_id_tablets.empty()) { |
565 | 0 | last_tablet_id = big_id_tablets.top()->tablet_id(); |
566 | 0 | } |
567 | 0 | while (!big_id_tablets.empty()) { |
568 | 0 | big_id_tablets.top()->check_tablet_path_exists(); |
569 | 0 | big_id_tablets.pop(); |
570 | 0 | } |
571 | |
|
572 | 0 | if (!small_id_tablets.empty() && need_small_id_tablet_size > 0) { |
573 | 0 | while (static_cast<int32_t>(small_id_tablets.size()) > need_small_id_tablet_size) { |
574 | 0 | small_id_tablets.pop(); |
575 | 0 | } |
576 | |
|
577 | 0 | last_tablet_id = small_id_tablets.top()->tablet_id(); |
578 | 0 | while (!small_id_tablets.empty()) { |
579 | 0 | small_id_tablets.top()->check_tablet_path_exists(); |
580 | 0 | small_id_tablets.pop(); |
581 | 0 | } |
582 | 0 | } |
583 | |
|
584 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
585 | 0 | } |
586 | | |
587 | 0 | void StorageEngine::_adjust_compaction_thread_num() { |
588 | 0 | auto base_compaction_threads_num = get_base_compaction_threads_num(_store_map.size()); |
589 | 0 | if (_base_compaction_thread_pool->max_threads() != base_compaction_threads_num) { |
590 | 0 | int old_max_threads = _base_compaction_thread_pool->max_threads(); |
591 | 0 | Status status = _base_compaction_thread_pool->set_max_threads(base_compaction_threads_num); |
592 | 0 | if (status.ok()) { |
593 | 0 | VLOG_NOTICE << "update base compaction thread pool max_threads from " << old_max_threads |
594 | 0 | << " to " << base_compaction_threads_num; |
595 | 0 | } |
596 | 0 | } |
597 | 0 | if (_base_compaction_thread_pool->min_threads() != base_compaction_threads_num) { |
598 | 0 | int old_min_threads = _base_compaction_thread_pool->min_threads(); |
599 | 0 | Status status = _base_compaction_thread_pool->set_min_threads(base_compaction_threads_num); |
600 | 0 | if (status.ok()) { |
601 | 0 | VLOG_NOTICE << "update base compaction thread pool min_threads from " << old_min_threads |
602 | 0 | << " to " << base_compaction_threads_num; |
603 | 0 | } |
604 | 0 | } |
605 | |
|
606 | 0 | auto cumu_compaction_threads_num = get_cumu_compaction_threads_num(_store_map.size()); |
607 | 0 | if (_cumu_compaction_thread_pool->max_threads() != cumu_compaction_threads_num) { |
608 | 0 | int old_max_threads = _cumu_compaction_thread_pool->max_threads(); |
609 | 0 | Status status = _cumu_compaction_thread_pool->set_max_threads(cumu_compaction_threads_num); |
610 | 0 | if (status.ok()) { |
611 | 0 | VLOG_NOTICE << "update cumu compaction thread pool max_threads from " << old_max_threads |
612 | 0 | << " to " << cumu_compaction_threads_num; |
613 | 0 | } |
614 | 0 | } |
615 | 0 | if (_cumu_compaction_thread_pool->min_threads() != cumu_compaction_threads_num) { |
616 | 0 | int old_min_threads = _cumu_compaction_thread_pool->min_threads(); |
617 | 0 | Status status = _cumu_compaction_thread_pool->set_min_threads(cumu_compaction_threads_num); |
618 | 0 | if (status.ok()) { |
619 | 0 | VLOG_NOTICE << "update cumu compaction thread pool min_threads from " << old_min_threads |
620 | 0 | << " to " << cumu_compaction_threads_num; |
621 | 0 | } |
622 | 0 | } |
623 | |
|
624 | 0 | auto single_replica_compaction_threads_num = |
625 | 0 | get_single_replica_compaction_threads_num(_store_map.size()); |
626 | 0 | if (_single_replica_compaction_thread_pool->max_threads() != |
627 | 0 | single_replica_compaction_threads_num) { |
628 | 0 | int old_max_threads = _single_replica_compaction_thread_pool->max_threads(); |
629 | 0 | Status status = _single_replica_compaction_thread_pool->set_max_threads( |
630 | 0 | single_replica_compaction_threads_num); |
631 | 0 | if (status.ok()) { |
632 | 0 | VLOG_NOTICE << "update single replica compaction thread pool max_threads from " |
633 | 0 | << old_max_threads << " to " << single_replica_compaction_threads_num; |
634 | 0 | } |
635 | 0 | } |
636 | 0 | if (_single_replica_compaction_thread_pool->min_threads() != |
637 | 0 | single_replica_compaction_threads_num) { |
638 | 0 | int old_min_threads = _single_replica_compaction_thread_pool->min_threads(); |
639 | 0 | Status status = _single_replica_compaction_thread_pool->set_min_threads( |
640 | 0 | single_replica_compaction_threads_num); |
641 | 0 | if (status.ok()) { |
642 | 0 | VLOG_NOTICE << "update single replica compaction thread pool min_threads from " |
643 | 0 | << old_min_threads << " to " << single_replica_compaction_threads_num; |
644 | 0 | } |
645 | 0 | } |
646 | 0 | } |
647 | | |
648 | 1 | void StorageEngine::_compaction_tasks_producer_callback() { |
649 | 1 | LOG(INFO) << "try to start compaction producer process!"; |
650 | | |
651 | 1 | std::vector<DataDir*> data_dirs = get_stores(); |
652 | 1 | _compaction_submit_registry.reset(data_dirs); |
653 | | |
654 | 1 | int round = 0; |
655 | 1 | CompactionType compaction_type; |
656 | | |
657 | | // Used to record the time when the score metric was last updated. |
658 | | // The update of the score metric is accompanied by the logic of selecting the tablet. |
659 | | // If there is no slot available, the logic of selecting the tablet will be terminated, |
660 | | // which causes the score metric update to be terminated. |
661 | | // In order to avoid this situation, we need to update the score regularly. |
662 | 1 | int64_t last_cumulative_score_update_time = 0; |
663 | 1 | int64_t last_base_score_update_time = 0; |
664 | 1 | static const int64_t check_score_interval_ms = 5000; // 5 secs |
665 | | |
666 | 1 | int64_t interval = config::generate_compaction_tasks_interval_ms; |
667 | 1 | do { |
668 | 1 | int64_t cur_time = UnixMillis(); |
669 | 1 | if (!config::disable_auto_compaction && |
670 | 1 | (!config::enable_compaction_pause_on_high_memory || |
671 | 0 | !GlobalMemoryArbitrator::is_exceed_soft_mem_limit(GB_EXCHANGE_BYTE))) { |
672 | 0 | _adjust_compaction_thread_num(); |
673 | |
|
674 | 0 | bool check_score = false; |
675 | 0 | int64_t cur_time = UnixMillis(); |
676 | 0 | if (round < config::cumulative_compaction_rounds_for_each_base_compaction_round) { |
677 | 0 | compaction_type = CompactionType::CUMULATIVE_COMPACTION; |
678 | 0 | round++; |
679 | 0 | if (cur_time - last_cumulative_score_update_time >= check_score_interval_ms) { |
680 | 0 | check_score = true; |
681 | 0 | last_cumulative_score_update_time = cur_time; |
682 | 0 | } |
683 | 0 | } else { |
684 | 0 | compaction_type = CompactionType::BASE_COMPACTION; |
685 | 0 | round = 0; |
686 | 0 | if (cur_time - last_base_score_update_time >= check_score_interval_ms) { |
687 | 0 | check_score = true; |
688 | 0 | last_base_score_update_time = cur_time; |
689 | 0 | } |
690 | 0 | } |
691 | 0 | std::vector<TabletSharedPtr> tablets_compaction = |
692 | 0 | _generate_compaction_tasks(compaction_type, data_dirs, check_score); |
693 | 0 | if (tablets_compaction.size() == 0) { |
694 | 0 | std::unique_lock<std::mutex> lock(_compaction_producer_sleep_mutex); |
695 | 0 | _wakeup_producer_flag = 0; |
696 | | // It is necessary to wake up the thread on timeout to prevent deadlock |
697 | | // in case of no running compaction task. |
698 | 0 | _compaction_producer_sleep_cv.wait_for( |
699 | 0 | lock, std::chrono::milliseconds(2000), |
700 | 0 | [this] { return _wakeup_producer_flag == 1; }); |
701 | 0 | continue; |
702 | 0 | } |
703 | | |
704 | 0 | for (const auto& tablet : tablets_compaction) { |
705 | 0 | if (compaction_type == CompactionType::BASE_COMPACTION) { |
706 | 0 | tablet->set_last_base_compaction_schedule_time(UnixMillis()); |
707 | 0 | } else if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) { |
708 | 0 | tablet->set_last_cumu_compaction_schedule_time(UnixMillis()); |
709 | 0 | } else if (compaction_type == CompactionType::FULL_COMPACTION) { |
710 | 0 | tablet->set_last_full_compaction_schedule_time(UnixMillis()); |
711 | 0 | } |
712 | 0 | Status st = _submit_compaction_task(tablet, compaction_type, false); |
713 | 0 | if (!st.ok()) { |
714 | 0 | LOG(WARNING) << "failed to submit compaction task for tablet: " |
715 | 0 | << tablet->tablet_id() << ", err: " << st; |
716 | 0 | } |
717 | 0 | } |
718 | 0 | interval = config::generate_compaction_tasks_interval_ms; |
719 | 1 | } else { |
720 | 1 | interval = 5000; // 5s to check disable_auto_compaction |
721 | 1 | } |
722 | | |
723 | | // wait some seconds for ut test |
724 | 1 | { |
725 | 1 | std ::vector<std ::any> args {}; |
726 | 1 | args.emplace_back(1); |
727 | 1 | doris ::SyncPoint ::get_instance()->process( |
728 | 1 | "StorageEngine::_compaction_tasks_producer_callback", std ::move(args)); |
729 | 1 | } |
730 | 1 | int64_t end_time = UnixMillis(); |
731 | 1 | DorisMetrics::instance()->compaction_producer_callback_a_round_time->set_value(end_time - |
732 | 1 | cur_time); |
733 | 1 | } while (!_stop_background_threads_latch.wait_for(std::chrono::milliseconds(interval))); |
734 | 1 | } |
735 | | |
736 | 0 | void StorageEngine::_update_replica_infos_callback() { |
737 | | #ifdef GOOGLE_PROFILER |
738 | | ProfilerRegisterThread(); |
739 | | #endif |
740 | 0 | LOG(INFO) << "start to update replica infos!"; |
741 | |
|
742 | 0 | int64_t interval = config::update_replica_infos_interval_seconds; |
743 | 0 | do { |
744 | 0 | auto all_tablets = _tablet_manager->get_all_tablet([](Tablet* t) { |
745 | 0 | return t->is_used() && t->tablet_state() == TABLET_RUNNING && |
746 | 0 | !t->tablet_meta()->tablet_schema()->disable_auto_compaction() && |
747 | 0 | t->tablet_meta()->tablet_schema()->enable_single_replica_compaction(); |
748 | 0 | }); |
749 | 0 | ClusterInfo* cluster_info = ExecEnv::GetInstance()->cluster_info(); |
750 | 0 | if (cluster_info == nullptr) { |
751 | 0 | LOG(WARNING) << "Have not get FE Master heartbeat yet"; |
752 | 0 | std::this_thread::sleep_for(std::chrono::seconds(2)); |
753 | 0 | continue; |
754 | 0 | } |
755 | 0 | TNetworkAddress master_addr = cluster_info->master_fe_addr; |
756 | 0 | if (master_addr.hostname == "" || master_addr.port == 0) { |
757 | 0 | LOG(WARNING) << "Have not get FE Master heartbeat yet"; |
758 | 0 | std::this_thread::sleep_for(std::chrono::seconds(2)); |
759 | 0 | continue; |
760 | 0 | } |
761 | | |
762 | 0 | int start = 0; |
763 | 0 | int tablet_size = all_tablets.size(); |
764 | | // The while loop may take a long time, we should skip it when stop |
765 | 0 | while (start < tablet_size && _stop_background_threads_latch.count() > 0) { |
766 | 0 | int batch_size = std::min(100, tablet_size - start); |
767 | 0 | int end = start + batch_size; |
768 | 0 | TGetTabletReplicaInfosRequest request; |
769 | 0 | TGetTabletReplicaInfosResult result; |
770 | 0 | for (int i = start; i < end; i++) { |
771 | 0 | request.tablet_ids.emplace_back(all_tablets[i]->tablet_id()); |
772 | 0 | } |
773 | 0 | Status rpc_st = ThriftRpcHelper::rpc<FrontendServiceClient>( |
774 | 0 | master_addr.hostname, master_addr.port, |
775 | 0 | [&request, &result](FrontendServiceConnection& client) { |
776 | 0 | client->getTabletReplicaInfos(result, request); |
777 | 0 | }); |
778 | |
|
779 | 0 | if (!rpc_st.ok()) { |
780 | 0 | LOG(WARNING) << "Failed to get tablet replica infos, encounter rpc failure, " |
781 | 0 | "tablet start: " |
782 | 0 | << start << " end: " << end; |
783 | 0 | continue; |
784 | 0 | } |
785 | | |
786 | 0 | std::unique_lock<std::mutex> lock(_peer_replica_infos_mutex); |
787 | 0 | for (const auto& it : result.tablet_replica_infos) { |
788 | 0 | auto tablet_id = it.first; |
789 | 0 | auto tablet = _tablet_manager->get_tablet(tablet_id); |
790 | 0 | if (tablet == nullptr) { |
791 | 0 | VLOG_CRITICAL << "tablet ptr is nullptr"; |
792 | 0 | continue; |
793 | 0 | } |
794 | | |
795 | 0 | VLOG_NOTICE << tablet_id << " tablet has " << it.second.size() << " replicas"; |
796 | 0 | uint64_t min_modulo = MOD_PRIME; |
797 | 0 | TReplicaInfo peer_replica; |
798 | 0 | for (const auto& replica : it.second) { |
799 | 0 | int64_t peer_replica_id = replica.replica_id; |
800 | 0 | uint64_t modulo = HashUtil::hash64(&peer_replica_id, sizeof(peer_replica_id), |
801 | 0 | DEFAULT_SEED) % |
802 | 0 | MOD_PRIME; |
803 | 0 | if (modulo < min_modulo) { |
804 | 0 | peer_replica = replica; |
805 | 0 | min_modulo = modulo; |
806 | 0 | } |
807 | 0 | } |
808 | 0 | VLOG_NOTICE << "tablet " << tablet_id << ", peer replica host is " |
809 | 0 | << peer_replica.host; |
810 | 0 | _peer_replica_infos[tablet_id] = peer_replica; |
811 | 0 | } |
812 | 0 | _token = result.token; |
813 | 0 | VLOG_NOTICE << "get tablet replica infos from fe, size is " << end - start |
814 | 0 | << " token = " << result.token; |
815 | 0 | start = end; |
816 | 0 | } |
817 | 0 | interval = config::update_replica_infos_interval_seconds; |
818 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
819 | 0 | } |
820 | | |
821 | | Status StorageEngine::_submit_single_replica_compaction_task(TabletSharedPtr tablet, |
822 | 0 | CompactionType compaction_type) { |
823 | | // For single replica compaction, the local version to be merged is determined based on the version fetched from the peer replica. |
824 | | // Therefore, it is currently not possible to determine whether it should be a base compaction or cumulative compaction. |
825 | | // As a result, the tablet needs to be pushed to both the _tablet_submitted_cumu_compaction and the _tablet_submitted_base_compaction simultaneously. |
826 | 0 | bool already_exist = |
827 | 0 | _compaction_submit_registry.insert(tablet, CompactionType::CUMULATIVE_COMPACTION); |
828 | 0 | if (already_exist) { |
829 | 0 | return Status::AlreadyExist<false>( |
830 | 0 | "compaction task has already been submitted, tablet_id={}", tablet->tablet_id()); |
831 | 0 | } |
832 | | |
833 | 0 | already_exist = _compaction_submit_registry.insert(tablet, CompactionType::BASE_COMPACTION); |
834 | 0 | if (already_exist) { |
835 | 0 | _pop_tablet_from_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION); |
836 | 0 | return Status::AlreadyExist<false>( |
837 | 0 | "compaction task has already been submitted, tablet_id={}", tablet->tablet_id()); |
838 | 0 | } |
839 | | |
840 | 0 | auto compaction = std::make_shared<SingleReplicaCompaction>(*this, tablet, compaction_type); |
841 | 0 | DorisMetrics::instance()->single_compaction_request_total->increment(1); |
842 | 0 | auto st = compaction->prepare_compact(); |
843 | |
|
844 | 0 | auto clean_single_replica_compaction = [tablet, this]() { |
845 | 0 | _pop_tablet_from_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION); |
846 | 0 | _pop_tablet_from_submitted_compaction(tablet, CompactionType::BASE_COMPACTION); |
847 | 0 | }; |
848 | |
|
849 | 0 | if (!st.ok()) { |
850 | 0 | clean_single_replica_compaction(); |
851 | 0 | if (!st.is<ErrorCode::CUMULATIVE_NO_SUITABLE_VERSION>()) { |
852 | 0 | LOG(WARNING) << "failed to prepare single replica compaction, tablet_id=" |
853 | 0 | << tablet->tablet_id() << " : " << st; |
854 | 0 | return st; |
855 | 0 | } |
856 | 0 | return Status::OK(); // No suitable version, regard as OK |
857 | 0 | } |
858 | | |
859 | 0 | auto submit_st = _single_replica_compaction_thread_pool->submit_func( |
860 | 0 | [tablet, compaction = std::move(compaction), |
861 | 0 | clean_single_replica_compaction]() mutable { |
862 | 0 | tablet->execute_single_replica_compaction(*compaction); |
863 | 0 | clean_single_replica_compaction(); |
864 | 0 | }); |
865 | 0 | if (!submit_st.ok()) { |
866 | 0 | clean_single_replica_compaction(); |
867 | 0 | return Status::InternalError( |
868 | 0 | "failed to submit single replica compaction task to thread pool, " |
869 | 0 | "tablet_id={}", |
870 | 0 | tablet->tablet_id()); |
871 | 0 | } |
872 | 0 | return Status::OK(); |
873 | 0 | } |
874 | | |
875 | | void StorageEngine::get_tablet_rowset_versions(const PGetTabletVersionsRequest* request, |
876 | 0 | PGetTabletVersionsResponse* response) { |
877 | 0 | TabletSharedPtr tablet = _tablet_manager->get_tablet(request->tablet_id()); |
878 | 0 | if (tablet == nullptr) { |
879 | 0 | response->mutable_status()->set_status_code(TStatusCode::CANCELLED); |
880 | 0 | return; |
881 | 0 | } |
882 | 0 | std::vector<Version> local_versions = tablet->get_all_local_versions(); |
883 | 0 | for (const auto& local_version : local_versions) { |
884 | 0 | auto version = response->add_versions(); |
885 | 0 | version->set_first(local_version.first); |
886 | 0 | version->set_second(local_version.second); |
887 | 0 | } |
888 | 0 | response->mutable_status()->set_status_code(0); |
889 | 0 | } |
890 | | |
891 | | bool need_generate_compaction_tasks(int task_cnt_per_disk, int thread_per_disk, |
892 | 0 | CompactionType compaction_type, bool all_base) { |
893 | | // We need to reserve at least one Slot for cumulative compaction. |
894 | | // So when there is only one Slot, we have to judge whether there is a cumulative compaction |
895 | | // in the current submitted tasks. |
896 | | // If so, the last Slot can be assigned to Base compaction, |
897 | | // otherwise, this Slot needs to be reserved for cumulative compaction. |
898 | 0 | if (task_cnt_per_disk >= thread_per_disk) { |
899 | | // Return if no available slot |
900 | 0 | return false; |
901 | 0 | } else if (task_cnt_per_disk >= thread_per_disk - 1) { |
902 | | // Only one slot left, check if it can be assigned to base compaction task. |
903 | 0 | if (compaction_type == CompactionType::BASE_COMPACTION) { |
904 | 0 | if (all_base) { |
905 | 0 | return false; |
906 | 0 | } |
907 | 0 | } |
908 | 0 | } |
909 | 0 | return true; |
910 | 0 | } |
911 | | |
912 | 0 | int get_concurrent_per_disk(int max_score, int thread_per_disk) { |
913 | 0 | if (!config::enable_compaction_priority_scheduling) { |
914 | 0 | return thread_per_disk; |
915 | 0 | } |
916 | | |
917 | 0 | double load_average = 0; |
918 | 0 | if (DorisMetrics::instance()->system_metrics() != nullptr) { |
919 | 0 | load_average = DorisMetrics::instance()->system_metrics()->get_load_average_1_min(); |
920 | 0 | } |
921 | 0 | int num_cores = doris::CpuInfo::num_cores(); |
922 | 0 | bool cpu_usage_high = load_average > num_cores * 0.8; |
923 | |
|
924 | 0 | auto process_memory_usage = doris::GlobalMemoryArbitrator::process_memory_usage(); |
925 | 0 | bool memory_usage_high = process_memory_usage > MemInfo::soft_mem_limit() * 0.8; |
926 | |
|
927 | 0 | if (max_score <= config::low_priority_compaction_score_threshold && |
928 | 0 | (cpu_usage_high || memory_usage_high)) { |
929 | 0 | return config::low_priority_compaction_task_num_per_disk; |
930 | 0 | } |
931 | | |
932 | 0 | return thread_per_disk; |
933 | 0 | } |
934 | | |
935 | 0 | int32_t disk_compaction_slot_num(const DataDir& data_dir) { |
936 | 0 | return data_dir.is_ssd_disk() ? config::compaction_task_num_per_fast_disk |
937 | 0 | : config::compaction_task_num_per_disk; |
938 | 0 | } |
939 | | |
940 | | bool has_free_compaction_slot(CompactionSubmitRegistry* registry, DataDir* dir, |
941 | 0 | CompactionType compaction_type, uint32_t executing_cnt) { |
942 | 0 | int32_t thread_per_disk = disk_compaction_slot_num(*dir); |
943 | 0 | return need_generate_compaction_tasks( |
944 | 0 | executing_cnt, thread_per_disk, compaction_type, |
945 | 0 | !registry->has_compaction_task(dir, CompactionType::CUMULATIVE_COMPACTION)); |
946 | 0 | } |
947 | | |
948 | | std::vector<TabletSharedPtr> StorageEngine::_generate_compaction_tasks( |
949 | 0 | CompactionType compaction_type, std::vector<DataDir*>& data_dirs, bool check_score) { |
950 | 0 | _update_cumulative_compaction_policy(); |
951 | 0 | std::vector<TabletSharedPtr> tablets_compaction; |
952 | 0 | uint32_t max_compaction_score = 0; |
953 | |
|
954 | 0 | std::random_device rd; |
955 | 0 | std::mt19937 g(rd()); |
956 | 0 | std::shuffle(data_dirs.begin(), data_dirs.end(), g); |
957 | | |
958 | | // Copy _tablet_submitted_xxx_compaction map so that we don't need to hold _tablet_submitted_compaction_mutex |
959 | | // when traversing the data dir |
960 | 0 | auto compaction_registry_snapshot = _compaction_submit_registry.create_snapshot(); |
961 | 0 | for (auto* data_dir : data_dirs) { |
962 | 0 | bool need_pick_tablet = true; |
963 | 0 | uint32_t executing_task_num = |
964 | 0 | compaction_registry_snapshot.count_executing_cumu_and_base(data_dir); |
965 | 0 | need_pick_tablet = has_free_compaction_slot(&compaction_registry_snapshot, data_dir, |
966 | 0 | compaction_type, executing_task_num); |
967 | 0 | if (!need_pick_tablet && !check_score) { |
968 | 0 | continue; |
969 | 0 | } |
970 | | |
971 | | // Even if need_pick_tablet is false, we still need to call find_best_tablet_to_compaction(), |
972 | | // So that we can update the max_compaction_score metric. |
973 | 0 | if (!data_dir->reach_capacity_limit(0)) { |
974 | 0 | uint32_t disk_max_score = 0; |
975 | 0 | auto tablets = compaction_registry_snapshot.pick_topn_tablets_for_compaction( |
976 | 0 | _tablet_manager.get(), data_dir, compaction_type, |
977 | 0 | _cumulative_compaction_policies, &disk_max_score); |
978 | 0 | int concurrent_num = |
979 | 0 | get_concurrent_per_disk(disk_max_score, disk_compaction_slot_num(*data_dir)); |
980 | 0 | need_pick_tablet = need_generate_compaction_tasks( |
981 | 0 | executing_task_num, concurrent_num, compaction_type, |
982 | 0 | !compaction_registry_snapshot.has_compaction_task( |
983 | 0 | data_dir, CompactionType::CUMULATIVE_COMPACTION)); |
984 | 0 | for (const auto& tablet : tablets) { |
985 | 0 | if (tablet != nullptr) { |
986 | 0 | if (need_pick_tablet) { |
987 | 0 | tablets_compaction.emplace_back(tablet); |
988 | 0 | } |
989 | 0 | max_compaction_score = std::max(max_compaction_score, disk_max_score); |
990 | 0 | } |
991 | 0 | } |
992 | 0 | } |
993 | 0 | } |
994 | |
|
995 | 0 | if (max_compaction_score > 0) { |
996 | 0 | if (compaction_type == CompactionType::BASE_COMPACTION) { |
997 | 0 | DorisMetrics::instance()->tablet_base_max_compaction_score->set_value( |
998 | 0 | max_compaction_score); |
999 | 0 | } else { |
1000 | 0 | DorisMetrics::instance()->tablet_cumulative_max_compaction_score->set_value( |
1001 | 0 | max_compaction_score); |
1002 | 0 | } |
1003 | 0 | } |
1004 | 0 | return tablets_compaction; |
1005 | 0 | } |
1006 | | |
1007 | 0 | void StorageEngine::_update_cumulative_compaction_policy() { |
1008 | 0 | if (_cumulative_compaction_policies.empty()) { |
1009 | 0 | _cumulative_compaction_policies[CUMULATIVE_SIZE_BASED_POLICY] = |
1010 | 0 | CumulativeCompactionPolicyFactory::create_cumulative_compaction_policy( |
1011 | 0 | CUMULATIVE_SIZE_BASED_POLICY); |
1012 | 0 | _cumulative_compaction_policies[CUMULATIVE_TIME_SERIES_POLICY] = |
1013 | 0 | CumulativeCompactionPolicyFactory::create_cumulative_compaction_policy( |
1014 | 0 | CUMULATIVE_TIME_SERIES_POLICY); |
1015 | 0 | } |
1016 | 0 | } |
1017 | | |
1018 | | void StorageEngine::_pop_tablet_from_submitted_compaction(TabletSharedPtr tablet, |
1019 | 7 | CompactionType compaction_type) { |
1020 | 7 | _compaction_submit_registry.remove(tablet, compaction_type, [this]() { |
1021 | 7 | std::unique_lock<std::mutex> lock(_compaction_producer_sleep_mutex); |
1022 | 7 | _wakeup_producer_flag = 1; |
1023 | 7 | _compaction_producer_sleep_cv.notify_one(); |
1024 | 7 | }); |
1025 | 7 | } |
1026 | | |
1027 | | Status StorageEngine::_submit_compaction_task(TabletSharedPtr tablet, |
1028 | 21 | CompactionType compaction_type, bool force) { |
1029 | 21 | if (tablet->tablet_meta()->tablet_schema()->enable_single_replica_compaction() && |
1030 | 21 | should_fetch_from_peer(tablet->tablet_id())) { |
1031 | 0 | VLOG_CRITICAL << "start to submit single replica compaction task for tablet: " |
1032 | 0 | << tablet->tablet_id(); |
1033 | 0 | Status st = _submit_single_replica_compaction_task(tablet, compaction_type); |
1034 | 0 | if (!st.ok()) { |
1035 | 0 | LOG(WARNING) << "failed to submit single replica compaction task for tablet: " |
1036 | 0 | << tablet->tablet_id() << ", err: " << st; |
1037 | 0 | } |
1038 | |
|
1039 | 0 | return Status::OK(); |
1040 | 0 | } |
1041 | 21 | bool already_exist = _compaction_submit_registry.insert(tablet, compaction_type); |
1042 | 21 | if (already_exist) { |
1043 | 0 | return Status::AlreadyExist<false>( |
1044 | 0 | "compaction task has already been submitted, tablet_id={}, compaction_type={}.", |
1045 | 0 | tablet->tablet_id(), compaction_type); |
1046 | 0 | } |
1047 | 21 | tablet->compaction_stage = CompactionStage::PENDING; |
1048 | 21 | std::shared_ptr<CompactionMixin> compaction; |
1049 | 21 | int64_t permits = 0; |
1050 | 21 | Status st = Tablet::prepare_compaction_and_calculate_permits(compaction_type, tablet, |
1051 | 21 | compaction, permits); |
1052 | 21 | if (st.ok() && permits > 0) { |
1053 | 21 | if (!force) { |
1054 | 21 | _permit_limiter.request(permits); |
1055 | 21 | } |
1056 | 21 | std::unique_ptr<ThreadPool>& thread_pool = |
1057 | 21 | (compaction_type == CompactionType::CUMULATIVE_COMPACTION) |
1058 | 21 | ? _cumu_compaction_thread_pool |
1059 | 21 | : _base_compaction_thread_pool; |
1060 | 21 | VLOG_CRITICAL << "compaction thread pool. type: " |
1061 | 0 | << (compaction_type == CompactionType::CUMULATIVE_COMPACTION ? "CUMU" |
1062 | 0 | : "BASE") |
1063 | 0 | << ", num_threads: " << thread_pool->num_threads() |
1064 | 0 | << ", num_threads_pending_start: " << thread_pool->num_threads_pending_start() |
1065 | 0 | << ", num_active_threads: " << thread_pool->num_active_threads() |
1066 | 0 | << ", max_threads: " << thread_pool->max_threads() |
1067 | 0 | << ", min_threads: " << thread_pool->min_threads() |
1068 | 0 | << ", num_total_queued_tasks: " << thread_pool->get_queue_size(); |
1069 | 21 | auto st = thread_pool->submit_func([tablet, compaction = std::move(compaction), |
1070 | 21 | compaction_type, permits, force, this]() { |
1071 | 7 | if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) [[likely]] { |
1072 | 7 | DorisMetrics::instance()->cumulative_compaction_task_running_total->increment(1); |
1073 | 7 | DorisMetrics::instance()->cumulative_compaction_task_pending_total->set_value( |
1074 | 7 | _cumu_compaction_thread_pool->get_queue_size()); |
1075 | 7 | } else if (compaction_type == CompactionType::BASE_COMPACTION) { |
1076 | 0 | DorisMetrics::instance()->base_compaction_task_running_total->increment(1); |
1077 | 0 | DorisMetrics::instance()->base_compaction_task_pending_total->set_value( |
1078 | 0 | _base_compaction_thread_pool->get_queue_size()); |
1079 | 0 | } |
1080 | 7 | bool is_large_task = true; |
1081 | 7 | Defer defer {[&]() { |
1082 | 7 | DBUG_EXECUTE_IF("StorageEngine._submit_compaction_task.sleep", { sleep(5); }) |
1083 | 7 | if (!force) { |
1084 | 7 | _permit_limiter.release(permits); |
1085 | 7 | } |
1086 | 7 | _pop_tablet_from_submitted_compaction(tablet, compaction_type); |
1087 | 7 | tablet->compaction_stage = CompactionStage::NOT_SCHEDULED; |
1088 | 7 | if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) { |
1089 | 7 | std::lock_guard<std::mutex> lock(_cumu_compaction_delay_mtx); |
1090 | 7 | _cumu_compaction_thread_pool_used_threads--; |
1091 | 7 | if (!is_large_task) { |
1092 | 0 | _cumu_compaction_thread_pool_small_tasks_running--; |
1093 | 0 | } |
1094 | 7 | DorisMetrics::instance()->cumulative_compaction_task_running_total->increment( |
1095 | 7 | -1); |
1096 | 7 | DorisMetrics::instance()->cumulative_compaction_task_pending_total->set_value( |
1097 | 7 | _cumu_compaction_thread_pool->get_queue_size()); |
1098 | 7 | } else if (compaction_type == CompactionType::BASE_COMPACTION) { |
1099 | 0 | DorisMetrics::instance()->base_compaction_task_running_total->increment(-1); |
1100 | 0 | DorisMetrics::instance()->base_compaction_task_pending_total->set_value( |
1101 | 0 | _base_compaction_thread_pool->get_queue_size()); |
1102 | 0 | } |
1103 | 7 | }}; |
1104 | 7 | do { |
1105 | 7 | if (compaction->compaction_type() == ReaderType::READER_CUMULATIVE_COMPACTION) { |
1106 | 7 | std::lock_guard<std::mutex> lock(_cumu_compaction_delay_mtx); |
1107 | 7 | _cumu_compaction_thread_pool_used_threads++; |
1108 | 7 | if (config::large_cumu_compaction_task_min_thread_num > 1 && |
1109 | 7 | _cumu_compaction_thread_pool->max_threads() >= |
1110 | 0 | config::large_cumu_compaction_task_min_thread_num) { |
1111 | | // Determine if this is a large task based on configured thresholds |
1112 | 0 | is_large_task = |
1113 | 0 | (compaction->calc_input_rowsets_total_size() > |
1114 | 0 | config::large_cumu_compaction_task_bytes_threshold || |
1115 | 0 | compaction->calc_input_rowsets_row_num() > |
1116 | 0 | config::large_cumu_compaction_task_row_num_threshold); |
1117 | | |
1118 | | // Small task. No delay needed |
1119 | 0 | if (!is_large_task) { |
1120 | 0 | _cumu_compaction_thread_pool_small_tasks_running++; |
1121 | 0 | break; |
1122 | 0 | } |
1123 | | // Deal with large task |
1124 | 0 | if (_should_delay_large_task()) { |
1125 | 0 | LOG_WARNING( |
1126 | 0 | "failed to do CumulativeCompaction, cumu thread pool is " |
1127 | 0 | "intensive, delay large task.") |
1128 | 0 | .tag("tablet_id", tablet->tablet_id()) |
1129 | 0 | .tag("input_rows", compaction->calc_input_rowsets_row_num()) |
1130 | 0 | .tag("input_rowsets_total_size", |
1131 | 0 | compaction->calc_input_rowsets_total_size()) |
1132 | 0 | .tag("config::large_cumu_compaction_task_bytes_threshold", |
1133 | 0 | config::large_cumu_compaction_task_bytes_threshold) |
1134 | 0 | .tag("config::large_cumu_compaction_task_row_num_threshold", |
1135 | 0 | config::large_cumu_compaction_task_row_num_threshold) |
1136 | 0 | .tag("remaining threads", |
1137 | 0 | _cumu_compaction_thread_pool_used_threads) |
1138 | 0 | .tag("small_tasks_running", |
1139 | 0 | _cumu_compaction_thread_pool_small_tasks_running); |
1140 | | // Delay this task and sleep 5s for this tablet |
1141 | 0 | long now = duration_cast<std::chrono::milliseconds>( |
1142 | 0 | std::chrono::system_clock::now().time_since_epoch()) |
1143 | 0 | .count(); |
1144 | 0 | tablet->set_last_cumu_compaction_failure_time(now); |
1145 | 0 | return; |
1146 | 0 | } |
1147 | 0 | } |
1148 | 7 | } |
1149 | 7 | } while (false); |
1150 | 7 | if (!tablet->can_do_compaction(tablet->data_dir()->path_hash(), compaction_type)) { |
1151 | 0 | LOG(INFO) << "Tablet state has been changed, no need to begin this compaction " |
1152 | 0 | "task, tablet_id=" |
1153 | 0 | << tablet->tablet_id() << ", tablet_state=" << tablet->tablet_state(); |
1154 | 0 | return; |
1155 | 0 | } |
1156 | 7 | tablet->compaction_stage = CompactionStage::EXECUTING; |
1157 | 7 | TEST_SYNC_POINT_RETURN_WITH_VOID("olap_server::execute_compaction"); |
1158 | 1 | tablet->execute_compaction(*compaction); |
1159 | 1 | }); |
1160 | 21 | if (compaction_type == CompactionType::CUMULATIVE_COMPACTION) [[likely]] { |
1161 | 21 | DorisMetrics::instance()->cumulative_compaction_task_pending_total->set_value( |
1162 | 21 | _cumu_compaction_thread_pool->get_queue_size()); |
1163 | 21 | } else if (compaction_type == CompactionType::BASE_COMPACTION) { |
1164 | 0 | DorisMetrics::instance()->base_compaction_task_pending_total->set_value( |
1165 | 0 | _base_compaction_thread_pool->get_queue_size()); |
1166 | 0 | } |
1167 | 21 | if (!st.ok()) { |
1168 | 0 | if (!force) { |
1169 | 0 | _permit_limiter.release(permits); |
1170 | 0 | } |
1171 | 0 | _pop_tablet_from_submitted_compaction(tablet, compaction_type); |
1172 | 0 | tablet->compaction_stage = CompactionStage::NOT_SCHEDULED; |
1173 | 0 | return Status::InternalError( |
1174 | 0 | "failed to submit compaction task to thread pool, " |
1175 | 0 | "tablet_id={}, compaction_type={}.", |
1176 | 0 | tablet->tablet_id(), compaction_type); |
1177 | 0 | } |
1178 | 21 | return Status::OK(); |
1179 | 21 | } else { |
1180 | 0 | _pop_tablet_from_submitted_compaction(tablet, compaction_type); |
1181 | 0 | tablet->compaction_stage = CompactionStage::NOT_SCHEDULED; |
1182 | 0 | if (!st.ok()) { |
1183 | 0 | return Status::InternalError( |
1184 | 0 | "failed to prepare compaction task and calculate permits, " |
1185 | 0 | "tablet_id={}, compaction_type={}, " |
1186 | 0 | "permit={}, current_permit={}, status={}", |
1187 | 0 | tablet->tablet_id(), compaction_type, permits, _permit_limiter.usage(), |
1188 | 0 | st.to_string()); |
1189 | 0 | } |
1190 | 0 | return st; |
1191 | 0 | } |
1192 | 21 | } |
1193 | | |
1194 | | Status StorageEngine::submit_compaction_task(TabletSharedPtr tablet, CompactionType compaction_type, |
1195 | 0 | bool force, bool eager) { |
1196 | 0 | if (!eager) { |
1197 | 0 | DCHECK(compaction_type == CompactionType::BASE_COMPACTION || |
1198 | 0 | compaction_type == CompactionType::CUMULATIVE_COMPACTION); |
1199 | 0 | auto compaction_registry_snapshot = _compaction_submit_registry.create_snapshot(); |
1200 | 0 | auto stores = get_stores(); |
1201 | |
|
1202 | 0 | bool is_busy = std::none_of( |
1203 | 0 | stores.begin(), stores.end(), |
1204 | 0 | [&compaction_registry_snapshot, compaction_type](auto* data_dir) { |
1205 | 0 | return has_free_compaction_slot( |
1206 | 0 | &compaction_registry_snapshot, data_dir, compaction_type, |
1207 | 0 | compaction_registry_snapshot.count_executing_cumu_and_base(data_dir)); |
1208 | 0 | }); |
1209 | 0 | if (is_busy) { |
1210 | 0 | LOG_EVERY_N(WARNING, 100) |
1211 | 0 | << "Too busy to submit a compaction task, tablet=" << tablet->get_table_id(); |
1212 | 0 | return Status::OK(); |
1213 | 0 | } |
1214 | 0 | } |
1215 | 0 | _update_cumulative_compaction_policy(); |
1216 | | // alter table tableName set ("compaction_policy"="time_series") |
1217 | | // if atler table's compaction policy, we need to modify tablet compaction policy shared ptr |
1218 | 0 | if (tablet->get_cumulative_compaction_policy() == nullptr || |
1219 | 0 | tablet->get_cumulative_compaction_policy()->name() != |
1220 | 0 | tablet->tablet_meta()->compaction_policy()) { |
1221 | 0 | tablet->set_cumulative_compaction_policy( |
1222 | 0 | _cumulative_compaction_policies.at(tablet->tablet_meta()->compaction_policy())); |
1223 | 0 | } |
1224 | 0 | tablet->set_skip_compaction(false); |
1225 | 0 | return _submit_compaction_task(tablet, compaction_type, force); |
1226 | 0 | } |
1227 | | |
1228 | | Status StorageEngine::_handle_seg_compaction(std::shared_ptr<SegcompactionWorker> worker, |
1229 | | SegCompactionCandidatesSharedPtr segments, |
1230 | 11 | uint64_t submission_time) { |
1231 | | // note: be aware that worker->_writer maybe released when the task is cancelled |
1232 | 11 | uint64_t exec_queue_time = GetCurrentTimeMicros() - submission_time; |
1233 | 11 | LOG(INFO) << "segcompaction thread pool queue time(ms): " << exec_queue_time / 1000; |
1234 | 11 | worker->compact_segments(segments); |
1235 | | // return OK here. error will be reported via BetaRowsetWriter::_segcompaction_status |
1236 | 11 | return Status::OK(); |
1237 | 11 | } |
1238 | | |
1239 | | Status StorageEngine::submit_seg_compaction_task(std::shared_ptr<SegcompactionWorker> worker, |
1240 | 11 | SegCompactionCandidatesSharedPtr segments) { |
1241 | 11 | uint64_t submission_time = GetCurrentTimeMicros(); |
1242 | 11 | return _seg_compaction_thread_pool->submit_func([this, worker, segments, submission_time] { |
1243 | 11 | static_cast<void>(_handle_seg_compaction(worker, segments, submission_time)); |
1244 | 11 | }); |
1245 | 11 | } |
1246 | | |
1247 | 0 | Status StorageEngine::process_index_change_task(const TAlterInvertedIndexReq& request) { |
1248 | 0 | auto tablet_id = request.tablet_id; |
1249 | 0 | TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id); |
1250 | 0 | DBUG_EXECUTE_IF("StorageEngine::process_index_change_task_tablet_nullptr", |
1251 | 0 | { tablet = nullptr; }) |
1252 | 0 | if (tablet == nullptr) { |
1253 | 0 | LOG(WARNING) << "tablet: " << tablet_id << " not exist"; |
1254 | 0 | return Status::InternalError("tablet not exist, tablet_id={}.", tablet_id); |
1255 | 0 | } |
1256 | | |
1257 | 0 | IndexBuilderSharedPtr index_builder = std::make_shared<IndexBuilder>( |
1258 | 0 | *this, tablet, request.columns, request.alter_inverted_indexes, request.is_drop_op); |
1259 | 0 | RETURN_IF_ERROR(_handle_index_change(index_builder)); |
1260 | 0 | return Status::OK(); |
1261 | 0 | } |
1262 | | |
1263 | 0 | Status StorageEngine::_handle_index_change(IndexBuilderSharedPtr index_builder) { |
1264 | 0 | RETURN_IF_ERROR(index_builder->init()); |
1265 | 0 | RETURN_IF_ERROR(index_builder->do_build_inverted_index()); |
1266 | 0 | return Status::OK(); |
1267 | 0 | } |
1268 | | |
1269 | 0 | void StorageEngine::_cooldown_tasks_producer_callback() { |
1270 | 0 | int64_t interval = config::generate_cooldown_task_interval_sec; |
1271 | | // the cooldown replica may be slow to upload it's meta file, so we should wait |
1272 | | // until it has done uploaded |
1273 | 0 | int64_t skip_failed_interval = interval * 10; |
1274 | 0 | do { |
1275 | | // these tables are ordered by priority desc |
1276 | 0 | std::vector<TabletSharedPtr> tablets; |
1277 | 0 | std::vector<RowsetSharedPtr> rowsets; |
1278 | | // TODO(luwei) : a more efficient way to get cooldown tablets |
1279 | 0 | auto cur_time = time(nullptr); |
1280 | | // we should skip all the tablets which are not running and those pending to do cooldown |
1281 | | // also tablets once failed to do follow cooldown |
1282 | 0 | auto skip_tablet = [this, skip_failed_interval, |
1283 | 0 | cur_time](const TabletSharedPtr& tablet) -> bool { |
1284 | 0 | bool is_skip = |
1285 | 0 | cur_time - tablet->last_failed_follow_cooldown_time() < skip_failed_interval || |
1286 | 0 | TABLET_RUNNING != tablet->tablet_state(); |
1287 | 0 | if (is_skip) { |
1288 | 0 | return is_skip; |
1289 | 0 | } |
1290 | 0 | std::lock_guard<std::mutex> lock(_running_cooldown_mutex); |
1291 | 0 | return _running_cooldown_tablets.find(tablet->tablet_id()) != |
1292 | 0 | _running_cooldown_tablets.end(); |
1293 | 0 | }; |
1294 | 0 | _tablet_manager->get_cooldown_tablets(&tablets, &rowsets, std::move(skip_tablet)); |
1295 | 0 | LOG(INFO) << "cooldown producer get tablet num: " << tablets.size(); |
1296 | 0 | int max_priority = tablets.size(); |
1297 | 0 | int index = 0; |
1298 | 0 | for (const auto& tablet : tablets) { |
1299 | 0 | { |
1300 | 0 | std::lock_guard<std::mutex> lock(_running_cooldown_mutex); |
1301 | 0 | _running_cooldown_tablets.insert(tablet->tablet_id()); |
1302 | 0 | } |
1303 | 0 | PriorityThreadPool::Task task; |
1304 | 0 | RowsetSharedPtr rowset = std::move(rowsets[index++]); |
1305 | 0 | task.work_function = [tablet, rowset, task_size = tablets.size(), this]() { |
1306 | 0 | Status st = tablet->cooldown(rowset); |
1307 | 0 | { |
1308 | 0 | std::lock_guard<std::mutex> lock(_running_cooldown_mutex); |
1309 | 0 | _running_cooldown_tablets.erase(tablet->tablet_id()); |
1310 | 0 | } |
1311 | 0 | if (!st.ok()) { |
1312 | 0 | LOG(WARNING) << "failed to cooldown, tablet: " << tablet->tablet_id() |
1313 | 0 | << " err: " << st; |
1314 | 0 | } else { |
1315 | 0 | LOG(INFO) << "succeed to cooldown, tablet: " << tablet->tablet_id() |
1316 | 0 | << " cooldown progress (" |
1317 | 0 | << task_size - _cooldown_thread_pool->get_queue_size() << "/" |
1318 | 0 | << task_size << ")"; |
1319 | 0 | } |
1320 | 0 | }; |
1321 | 0 | task.priority = max_priority--; |
1322 | 0 | bool submited = _cooldown_thread_pool->offer(std::move(task)); |
1323 | |
|
1324 | 0 | if (!submited) { |
1325 | 0 | LOG(INFO) << "failed to submit cooldown task"; |
1326 | 0 | } |
1327 | 0 | } |
1328 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
1329 | 0 | } |
1330 | | |
1331 | 0 | void StorageEngine::_remove_unused_remote_files_callback() { |
1332 | 0 | while (!_stop_background_threads_latch.wait_for( |
1333 | 0 | std::chrono::seconds(config::remove_unused_remote_files_interval_sec))) { |
1334 | 0 | LOG(INFO) << "begin to remove unused remote files"; |
1335 | 0 | do_remove_unused_remote_files(); |
1336 | 0 | } |
1337 | 0 | } |
1338 | | |
1339 | 0 | void StorageEngine::do_remove_unused_remote_files() { |
1340 | 0 | auto tablets = tablet_manager()->get_all_tablet([](Tablet* t) { |
1341 | 0 | return t->tablet_meta()->cooldown_meta_id().initialized() && t->is_used() && |
1342 | 0 | t->tablet_state() == TABLET_RUNNING && |
1343 | 0 | t->cooldown_conf_unlocked().cooldown_replica_id == t->replica_id(); |
1344 | 0 | }); |
1345 | 0 | TConfirmUnusedRemoteFilesRequest req; |
1346 | 0 | req.__isset.confirm_list = true; |
1347 | | // tablet_id -> [storage_resource, unused_remote_files] |
1348 | 0 | using unused_remote_files_buffer_t = |
1349 | 0 | std::unordered_map<int64_t, std::pair<StorageResource, std::vector<io::FileInfo>>>; |
1350 | 0 | unused_remote_files_buffer_t buffer; |
1351 | 0 | int64_t num_files_in_buffer = 0; |
1352 | | // assume a filename is 0.1KB, buffer size should not larger than 100MB |
1353 | 0 | constexpr int64_t max_files_in_buffer = 1000000; |
1354 | |
|
1355 | 0 | auto calc_unused_remote_files = [&req, &buffer, &num_files_in_buffer, this](Tablet* t) { |
1356 | 0 | auto storage_resource = get_resource_by_storage_policy_id(t->storage_policy_id()); |
1357 | 0 | if (!storage_resource) { |
1358 | 0 | LOG(WARNING) << "encounter error when remove unused remote files, tablet_id=" |
1359 | 0 | << t->tablet_id() << " : " << storage_resource.error(); |
1360 | 0 | return; |
1361 | 0 | } |
1362 | | |
1363 | | // TODO(plat1ko): Support path v1 |
1364 | 0 | if (storage_resource->path_version > 0) { |
1365 | 0 | return; |
1366 | 0 | } |
1367 | | |
1368 | 0 | std::vector<io::FileInfo> files; |
1369 | | // FIXME(plat1ko): What if user reset resource in storage policy to another resource? |
1370 | | // Maybe we should also list files in previously uploaded resources. |
1371 | 0 | bool exists = true; |
1372 | 0 | auto st = storage_resource->fs->list(storage_resource->remote_tablet_path(t->tablet_id()), |
1373 | 0 | true, &files, &exists); |
1374 | 0 | if (!st.ok()) { |
1375 | 0 | LOG(WARNING) << "encounter error when remove unused remote files, tablet_id=" |
1376 | 0 | << t->tablet_id() << " : " << st; |
1377 | 0 | return; |
1378 | 0 | } |
1379 | 0 | if (!exists || files.empty()) { |
1380 | 0 | return; |
1381 | 0 | } |
1382 | | // get all cooldowned rowsets |
1383 | 0 | RowsetIdUnorderedSet cooldowned_rowsets; |
1384 | 0 | UniqueId cooldown_meta_id; |
1385 | 0 | { |
1386 | 0 | std::shared_lock rlock(t->get_header_lock()); |
1387 | 0 | for (auto&& rs_meta : t->tablet_meta()->all_rs_metas()) { |
1388 | 0 | if (!rs_meta->is_local()) { |
1389 | 0 | cooldowned_rowsets.insert(rs_meta->rowset_id()); |
1390 | 0 | } |
1391 | 0 | } |
1392 | 0 | if (cooldowned_rowsets.empty()) { |
1393 | 0 | return; |
1394 | 0 | } |
1395 | 0 | cooldown_meta_id = t->tablet_meta()->cooldown_meta_id(); |
1396 | 0 | } |
1397 | 0 | auto [cooldown_term, cooldown_replica_id] = t->cooldown_conf(); |
1398 | 0 | if (cooldown_replica_id != t->replica_id()) { |
1399 | 0 | return; |
1400 | 0 | } |
1401 | | // {cooldown_replica_id}.{cooldown_term}.meta |
1402 | 0 | std::string remote_meta_path = |
1403 | 0 | cooldown_tablet_meta_filename(cooldown_replica_id, cooldown_term); |
1404 | | // filter out the paths that should be reserved |
1405 | 0 | auto filter = [&, this](io::FileInfo& info) { |
1406 | 0 | std::string_view filename = info.file_name; |
1407 | 0 | if (filename.ends_with(".meta")) { |
1408 | 0 | return filename == remote_meta_path; |
1409 | 0 | } |
1410 | 0 | auto rowset_id = extract_rowset_id(filename); |
1411 | 0 | if (rowset_id.hi == 0) { |
1412 | 0 | return false; |
1413 | 0 | } |
1414 | 0 | return cooldowned_rowsets.contains(rowset_id) || |
1415 | 0 | pending_remote_rowsets().contains(rowset_id); |
1416 | 0 | }; |
1417 | 0 | files.erase(std::remove_if(files.begin(), files.end(), std::move(filter)), files.end()); |
1418 | 0 | if (files.empty()) { |
1419 | 0 | return; |
1420 | 0 | } |
1421 | 0 | files.shrink_to_fit(); |
1422 | 0 | num_files_in_buffer += files.size(); |
1423 | 0 | buffer.insert({t->tablet_id(), {*storage_resource, std::move(files)}}); |
1424 | 0 | auto& info = req.confirm_list.emplace_back(); |
1425 | 0 | info.__set_tablet_id(t->tablet_id()); |
1426 | 0 | info.__set_cooldown_replica_id(cooldown_replica_id); |
1427 | 0 | info.__set_cooldown_meta_id(cooldown_meta_id.to_thrift()); |
1428 | 0 | }; |
1429 | |
|
1430 | 0 | auto confirm_and_remove_files = [&buffer, &req, &num_files_in_buffer]() { |
1431 | 0 | TConfirmUnusedRemoteFilesResult result; |
1432 | 0 | LOG(INFO) << "begin to confirm unused remote files. num_tablets=" << buffer.size() |
1433 | 0 | << " num_files=" << num_files_in_buffer; |
1434 | 0 | auto st = MasterServerClient::instance()->confirm_unused_remote_files(req, &result); |
1435 | 0 | if (!st.ok()) { |
1436 | 0 | LOG(WARNING) << st; |
1437 | 0 | return; |
1438 | 0 | } |
1439 | 0 | for (auto id : result.confirmed_tablets) { |
1440 | 0 | if (auto it = buffer.find(id); LIKELY(it != buffer.end())) { |
1441 | 0 | auto& storage_resource = it->second.first; |
1442 | 0 | auto& files = it->second.second; |
1443 | 0 | std::vector<io::Path> paths; |
1444 | 0 | paths.reserve(files.size()); |
1445 | | // delete unused files |
1446 | 0 | LOG(INFO) << "delete unused files. root_path=" << storage_resource.fs->root_path() |
1447 | 0 | << " tablet_id=" << id; |
1448 | 0 | io::Path dir = storage_resource.remote_tablet_path(id); |
1449 | 0 | for (auto& file : files) { |
1450 | 0 | auto file_path = dir / file.file_name; |
1451 | 0 | LOG(INFO) << "delete unused file: " << file_path.native(); |
1452 | 0 | paths.push_back(std::move(file_path)); |
1453 | 0 | } |
1454 | 0 | st = storage_resource.fs->batch_delete(paths); |
1455 | 0 | if (!st.ok()) { |
1456 | 0 | LOG(WARNING) << "failed to delete unused files, tablet_id=" << id << " : " |
1457 | 0 | << st; |
1458 | 0 | } |
1459 | 0 | buffer.erase(it); |
1460 | 0 | } |
1461 | 0 | } |
1462 | 0 | }; |
1463 | | |
1464 | | // batch confirm to reduce FE's overhead |
1465 | 0 | auto next_confirm_time = std::chrono::steady_clock::now() + |
1466 | 0 | std::chrono::seconds(config::confirm_unused_remote_files_interval_sec); |
1467 | 0 | for (auto& t : tablets) { |
1468 | 0 | if (t.use_count() <= 1 // this means tablet has been dropped |
1469 | 0 | || t->cooldown_conf_unlocked().cooldown_replica_id != t->replica_id() || |
1470 | 0 | t->tablet_state() != TABLET_RUNNING) { |
1471 | 0 | continue; |
1472 | 0 | } |
1473 | 0 | calc_unused_remote_files(t.get()); |
1474 | 0 | if (num_files_in_buffer > 0 && (num_files_in_buffer > max_files_in_buffer || |
1475 | 0 | std::chrono::steady_clock::now() > next_confirm_time)) { |
1476 | 0 | confirm_and_remove_files(); |
1477 | 0 | buffer.clear(); |
1478 | 0 | req.confirm_list.clear(); |
1479 | 0 | num_files_in_buffer = 0; |
1480 | 0 | next_confirm_time = |
1481 | 0 | std::chrono::steady_clock::now() + |
1482 | 0 | std::chrono::seconds(config::confirm_unused_remote_files_interval_sec); |
1483 | 0 | } |
1484 | 0 | } |
1485 | 0 | if (num_files_in_buffer > 0) { |
1486 | 0 | confirm_and_remove_files(); |
1487 | 0 | } |
1488 | 0 | } |
1489 | | |
1490 | 0 | void StorageEngine::_cold_data_compaction_producer_callback() { |
1491 | 0 | while (!_stop_background_threads_latch.wait_for( |
1492 | 0 | std::chrono::seconds(config::cold_data_compaction_interval_sec))) { |
1493 | 0 | if (config::disable_auto_compaction || |
1494 | 0 | GlobalMemoryArbitrator::is_exceed_soft_mem_limit(GB_EXCHANGE_BYTE)) { |
1495 | 0 | continue; |
1496 | 0 | } |
1497 | | |
1498 | 0 | std::unordered_set<int64_t> copied_tablet_submitted; |
1499 | 0 | { |
1500 | 0 | std::lock_guard lock(_cold_compaction_tablet_submitted_mtx); |
1501 | 0 | copied_tablet_submitted = _cold_compaction_tablet_submitted; |
1502 | 0 | } |
1503 | 0 | int n = config::cold_data_compaction_thread_num - copied_tablet_submitted.size(); |
1504 | 0 | if (n <= 0) { |
1505 | 0 | continue; |
1506 | 0 | } |
1507 | 0 | auto tablets = _tablet_manager->get_all_tablet([&copied_tablet_submitted](Tablet* t) { |
1508 | 0 | return t->tablet_meta()->cooldown_meta_id().initialized() && t->is_used() && |
1509 | 0 | t->tablet_state() == TABLET_RUNNING && |
1510 | 0 | !copied_tablet_submitted.contains(t->tablet_id()) && |
1511 | 0 | !t->tablet_meta()->tablet_schema()->disable_auto_compaction(); |
1512 | 0 | }); |
1513 | 0 | std::vector<std::pair<TabletSharedPtr, int64_t>> tablet_to_compact; |
1514 | 0 | tablet_to_compact.reserve(n + 1); |
1515 | 0 | std::vector<std::pair<TabletSharedPtr, int64_t>> tablet_to_follow; |
1516 | 0 | tablet_to_follow.reserve(n + 1); |
1517 | |
|
1518 | 0 | for (auto& t : tablets) { |
1519 | 0 | if (t->replica_id() == t->cooldown_conf_unlocked().cooldown_replica_id) { |
1520 | 0 | auto score = t->calc_cold_data_compaction_score(); |
1521 | 0 | if (score < 4) { |
1522 | 0 | continue; |
1523 | 0 | } |
1524 | 0 | tablet_to_compact.emplace_back(t, score); |
1525 | 0 | if (tablet_to_compact.size() > n) { |
1526 | 0 | std::sort(tablet_to_compact.begin(), tablet_to_compact.end(), |
1527 | 0 | [](auto& a, auto& b) { return a.second > b.second; }); |
1528 | 0 | tablet_to_compact.pop_back(); |
1529 | 0 | } |
1530 | 0 | continue; |
1531 | 0 | } |
1532 | | // else, need to follow |
1533 | 0 | { |
1534 | 0 | std::lock_guard lock(_running_cooldown_mutex); |
1535 | 0 | if (_running_cooldown_tablets.contains(t->table_id())) { |
1536 | | // already in cooldown queue |
1537 | 0 | continue; |
1538 | 0 | } |
1539 | 0 | } |
1540 | | // TODO(plat1ko): some avoidance strategy if failed to follow |
1541 | 0 | auto score = t->calc_cold_data_compaction_score(); |
1542 | 0 | tablet_to_follow.emplace_back(t, score); |
1543 | |
|
1544 | 0 | if (tablet_to_follow.size() > n) { |
1545 | 0 | std::sort(tablet_to_follow.begin(), tablet_to_follow.end(), |
1546 | 0 | [](auto& a, auto& b) { return a.second > b.second; }); |
1547 | 0 | tablet_to_follow.pop_back(); |
1548 | 0 | } |
1549 | 0 | } |
1550 | |
|
1551 | 0 | for (auto& [tablet, score] : tablet_to_compact) { |
1552 | 0 | LOG(INFO) << "submit cold data compaction. tablet_id=" << tablet->tablet_id() |
1553 | 0 | << " score=" << score; |
1554 | 0 | static_cast<void>(_cold_data_compaction_thread_pool->submit_func( |
1555 | 0 | [&, t = std::move(tablet), this]() { |
1556 | 0 | auto compaction = std::make_shared<ColdDataCompaction>(*this, t); |
1557 | 0 | { |
1558 | 0 | std::lock_guard lock(_cold_compaction_tablet_submitted_mtx); |
1559 | 0 | _cold_compaction_tablet_submitted.insert(t->tablet_id()); |
1560 | 0 | } |
1561 | 0 | Defer defer {[&] { |
1562 | 0 | std::lock_guard lock(_cold_compaction_tablet_submitted_mtx); |
1563 | 0 | _cold_compaction_tablet_submitted.erase(t->tablet_id()); |
1564 | 0 | }}; |
1565 | 0 | std::unique_lock cold_compaction_lock(t->get_cold_compaction_lock(), |
1566 | 0 | std::try_to_lock); |
1567 | 0 | if (!cold_compaction_lock.owns_lock()) { |
1568 | 0 | LOG(WARNING) << "try cold_compaction_lock failed, tablet_id=" |
1569 | 0 | << t->tablet_id(); |
1570 | 0 | return; |
1571 | 0 | } |
1572 | 0 | _update_cumulative_compaction_policy(); |
1573 | 0 | if (t->get_cumulative_compaction_policy() == nullptr || |
1574 | 0 | t->get_cumulative_compaction_policy()->name() != |
1575 | 0 | t->tablet_meta()->compaction_policy()) { |
1576 | 0 | t->set_cumulative_compaction_policy(_cumulative_compaction_policies.at( |
1577 | 0 | t->tablet_meta()->compaction_policy())); |
1578 | 0 | } |
1579 | |
|
1580 | 0 | auto st = compaction->prepare_compact(); |
1581 | 0 | if (!st.ok()) { |
1582 | 0 | LOG(WARNING) << "failed to prepare cold data compaction. tablet_id=" |
1583 | 0 | << t->tablet_id() << " err=" << st; |
1584 | 0 | return; |
1585 | 0 | } |
1586 | | |
1587 | 0 | st = compaction->execute_compact(); |
1588 | 0 | if (!st.ok()) { |
1589 | 0 | LOG(WARNING) << "failed to execute cold data compaction. tablet_id=" |
1590 | 0 | << t->tablet_id() << " err=" << st; |
1591 | 0 | return; |
1592 | 0 | } |
1593 | 0 | })); |
1594 | 0 | } |
1595 | |
|
1596 | 0 | for (auto& [tablet, score] : tablet_to_follow) { |
1597 | 0 | LOG(INFO) << "submit to follow cooldown meta. tablet_id=" << tablet->tablet_id() |
1598 | 0 | << " score=" << score; |
1599 | 0 | static_cast<void>(_cold_data_compaction_thread_pool->submit_func([&, |
1600 | 0 | t = std::move( |
1601 | 0 | tablet)]() { |
1602 | 0 | { |
1603 | 0 | std::lock_guard lock(_cold_compaction_tablet_submitted_mtx); |
1604 | 0 | _cold_compaction_tablet_submitted.insert(t->tablet_id()); |
1605 | 0 | } |
1606 | 0 | auto st = t->cooldown(); |
1607 | 0 | { |
1608 | 0 | std::lock_guard lock(_cold_compaction_tablet_submitted_mtx); |
1609 | 0 | _cold_compaction_tablet_submitted.erase(t->tablet_id()); |
1610 | 0 | } |
1611 | 0 | if (!st.ok()) { |
1612 | | // The cooldown of the replica may be relatively slow |
1613 | | // resulting in a short period of time where following cannot be successful |
1614 | 0 | LOG_EVERY_N(WARNING, 5) |
1615 | 0 | << "failed to cooldown. tablet_id=" << t->tablet_id() << " err=" << st; |
1616 | 0 | } |
1617 | 0 | })); |
1618 | 0 | } |
1619 | 0 | } |
1620 | 0 | } |
1621 | | |
1622 | | void StorageEngine::add_async_publish_task(int64_t partition_id, int64_t tablet_id, |
1623 | | int64_t publish_version, int64_t transaction_id, |
1624 | 2.05k | bool is_recovery) { |
1625 | 2.05k | if (!is_recovery) { |
1626 | 2.05k | bool exists = false; |
1627 | 2.05k | { |
1628 | 2.05k | std::shared_lock<std::shared_mutex> rlock(_async_publish_lock); |
1629 | 2.05k | if (auto tablet_iter = _async_publish_tasks.find(tablet_id); |
1630 | 2.05k | tablet_iter != _async_publish_tasks.end()) { |
1631 | 2.05k | if (auto iter = tablet_iter->second.find(publish_version); |
1632 | 2.05k | iter != tablet_iter->second.end()) { |
1633 | 20 | exists = true; |
1634 | 20 | } |
1635 | 2.05k | } |
1636 | 2.05k | } |
1637 | 2.05k | if (exists) { |
1638 | 20 | return; |
1639 | 20 | } |
1640 | 2.03k | TabletSharedPtr tablet = tablet_manager()->get_tablet(tablet_id); |
1641 | 2.03k | if (tablet == nullptr) { |
1642 | 0 | LOG(INFO) << "tablet may be dropped when add async publish task, tablet_id: " |
1643 | 0 | << tablet_id; |
1644 | 0 | return; |
1645 | 0 | } |
1646 | 2.03k | PendingPublishInfoPB pending_publish_info_pb; |
1647 | 2.03k | pending_publish_info_pb.set_partition_id(partition_id); |
1648 | 2.03k | pending_publish_info_pb.set_transaction_id(transaction_id); |
1649 | 2.03k | static_cast<void>(TabletMetaManager::save_pending_publish_info( |
1650 | 2.03k | tablet->data_dir(), tablet->tablet_id(), publish_version, |
1651 | 2.03k | pending_publish_info_pb.SerializeAsString())); |
1652 | 2.03k | } |
1653 | 2.03k | LOG(INFO) << "add pending publish task, tablet_id: " << tablet_id |
1654 | 2.03k | << " version: " << publish_version << " txn_id:" << transaction_id |
1655 | 2.03k | << " is_recovery: " << is_recovery; |
1656 | 2.03k | std::unique_lock<std::shared_mutex> wlock(_async_publish_lock); |
1657 | 2.03k | _async_publish_tasks[tablet_id][publish_version] = {transaction_id, partition_id}; |
1658 | 2.03k | } |
1659 | | |
1660 | 3 | int64_t StorageEngine::get_pending_publish_min_version(int64_t tablet_id) { |
1661 | 3 | std::shared_lock<std::shared_mutex> rlock(_async_publish_lock); |
1662 | 3 | auto iter = _async_publish_tasks.find(tablet_id); |
1663 | 3 | if (iter == _async_publish_tasks.end()) { |
1664 | 0 | return INT64_MAX; |
1665 | 0 | } |
1666 | 3 | if (iter->second.empty()) { |
1667 | 0 | return INT64_MAX; |
1668 | 0 | } |
1669 | 3 | return iter->second.begin()->first; |
1670 | 3 | } |
1671 | | |
1672 | 10 | void StorageEngine::_process_async_publish() { |
1673 | | // tablet, publish_version |
1674 | 10 | std::vector<std::pair<TabletSharedPtr, int64_t>> need_removed_tasks; |
1675 | 10 | { |
1676 | 10 | std::unique_lock<std::shared_mutex> wlock(_async_publish_lock); |
1677 | 10 | for (auto tablet_iter = _async_publish_tasks.begin(); |
1678 | 20 | tablet_iter != _async_publish_tasks.end();) { |
1679 | 10 | if (tablet_iter->second.empty()) { |
1680 | 1 | tablet_iter = _async_publish_tasks.erase(tablet_iter); |
1681 | 1 | continue; |
1682 | 1 | } |
1683 | 9 | int64_t tablet_id = tablet_iter->first; |
1684 | 9 | TabletSharedPtr tablet = tablet_manager()->get_tablet(tablet_id); |
1685 | 9 | if (!tablet) { |
1686 | 1 | LOG(WARNING) << "tablet does not exist when async publush, tablet_id: " |
1687 | 1 | << tablet_id; |
1688 | 1 | tablet_iter = _async_publish_tasks.erase(tablet_iter); |
1689 | 1 | continue; |
1690 | 1 | } |
1691 | | |
1692 | 8 | auto task_iter = tablet_iter->second.begin(); |
1693 | 8 | int64_t version = task_iter->first; |
1694 | 8 | int64_t transaction_id = task_iter->second.first; |
1695 | 8 | int64_t partition_id = task_iter->second.second; |
1696 | 8 | int64_t max_version = tablet->max_version().second; |
1697 | | |
1698 | 8 | if (version <= max_version) { |
1699 | 6 | need_removed_tasks.emplace_back(tablet, version); |
1700 | 6 | tablet_iter->second.erase(task_iter); |
1701 | 6 | tablet_iter++; |
1702 | 6 | continue; |
1703 | 6 | } |
1704 | 2 | if (version != max_version + 1) { |
1705 | 1 | int32_t max_version_config = tablet->max_version_config(); |
1706 | | // Keep only the most recent versions |
1707 | 31 | while (tablet_iter->second.size() > max_version_config) { |
1708 | 30 | need_removed_tasks.emplace_back(tablet, version); |
1709 | 30 | task_iter = tablet_iter->second.erase(task_iter); |
1710 | 30 | version = task_iter->first; |
1711 | 30 | } |
1712 | 1 | tablet_iter++; |
1713 | 1 | continue; |
1714 | 1 | } |
1715 | | |
1716 | 1 | auto async_publish_task = std::make_shared<AsyncTabletPublishTask>( |
1717 | 1 | *this, tablet, partition_id, transaction_id, version); |
1718 | 1 | static_cast<void>(_tablet_publish_txn_thread_pool->submit_func( |
1719 | 1 | [=]() { async_publish_task->handle(); })); |
1720 | 1 | tablet_iter->second.erase(task_iter); |
1721 | 1 | need_removed_tasks.emplace_back(tablet, version); |
1722 | 1 | tablet_iter++; |
1723 | 1 | } |
1724 | 10 | } |
1725 | 37 | for (auto& [tablet, publish_version] : need_removed_tasks) { |
1726 | 37 | static_cast<void>(TabletMetaManager::remove_pending_publish_info( |
1727 | 37 | tablet->data_dir(), tablet->tablet_id(), publish_version)); |
1728 | 37 | } |
1729 | 10 | } |
1730 | | |
1731 | 0 | void StorageEngine::_async_publish_callback() { |
1732 | 0 | while (!_stop_background_threads_latch.wait_for(std::chrono::milliseconds(30))) { |
1733 | 0 | _process_async_publish(); |
1734 | 0 | } |
1735 | 0 | } |
1736 | | |
1737 | 0 | void StorageEngine::_check_tablet_delete_bitmap_score_callback() { |
1738 | 0 | LOG(INFO) << "try to start check tablet delete bitmap score!"; |
1739 | 0 | while (!_stop_background_threads_latch.wait_for( |
1740 | 0 | std::chrono::seconds(config::check_tablet_delete_bitmap_interval_seconds))) { |
1741 | 0 | if (!config::enable_check_tablet_delete_bitmap_score) { |
1742 | 0 | return; |
1743 | 0 | } |
1744 | 0 | uint64_t max_delete_bitmap_score = 0; |
1745 | 0 | uint64_t max_base_rowset_delete_bitmap_score = 0; |
1746 | 0 | std::vector<CloudTabletSPtr> tablets; |
1747 | 0 | _tablet_manager.get()->get_topn_tablet_delete_bitmap_score( |
1748 | 0 | &max_delete_bitmap_score, &max_base_rowset_delete_bitmap_score); |
1749 | 0 | if (max_delete_bitmap_score > 0) { |
1750 | 0 | _tablet_max_delete_bitmap_score_metrics->set_value(max_delete_bitmap_score); |
1751 | 0 | } |
1752 | 0 | if (max_base_rowset_delete_bitmap_score > 0) { |
1753 | 0 | _tablet_max_base_rowset_delete_bitmap_score_metrics->set_value( |
1754 | 0 | max_base_rowset_delete_bitmap_score); |
1755 | 0 | } |
1756 | 0 | } |
1757 | 0 | } |
1758 | | |
1759 | | } // namespace doris |