/root/doris/be/src/olap/olap_server.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include <gen_cpp/Types_types.h> |
19 | | #include <gen_cpp/olap_file.pb.h> |
20 | | #include <stdint.h> |
21 | | |
22 | | #include <algorithm> |
23 | | #include <atomic> |
24 | | // IWYU pragma: no_include <bits/chrono.h> |
25 | | #include <chrono> // IWYU pragma: keep |
26 | | #include <cmath> |
27 | | #include <condition_variable> |
28 | | #include <ctime> |
29 | | #include <functional> |
30 | | #include <map> |
31 | | #include <memory> |
32 | | #include <mutex> |
33 | | #include <ostream> |
34 | | #include <random> |
35 | | #include <shared_mutex> |
36 | | #include <string> |
37 | | #include <type_traits> |
38 | | #include <unordered_set> |
39 | | #include <utility> |
40 | | #include <vector> |
41 | | |
42 | | #include "agent/utils.h" |
43 | | #include "common/config.h" |
44 | | #include "common/logging.h" |
45 | | #include "common/status.h" |
46 | | #include "common/sync_point.h" |
47 | | #include "gen_cpp/BackendService.h" |
48 | | #include "gen_cpp/FrontendService.h" |
49 | | #include "gen_cpp/Types_constants.h" |
50 | | #include "gen_cpp/internal_service.pb.h" |
51 | | #include "gutil/ref_counted.h" |
52 | | #include "io/fs/file_writer.h" // IWYU pragma: keep |
53 | | #include "io/fs/path.h" |
54 | | #include "olap/base_tablet.h" |
55 | | #include "olap/cold_data_compaction.h" |
56 | | #include "olap/compaction_permit_limiter.h" |
57 | | #include "olap/cumulative_compaction_policy.h" |
58 | | #include "olap/cumulative_compaction_time_series_policy.h" |
59 | | #include "olap/data_dir.h" |
60 | | #include "olap/olap_common.h" |
61 | | #include "olap/rowset/segcompaction.h" |
62 | | #include "olap/schema_change.h" |
63 | | #include "olap/single_replica_compaction.h" |
64 | | #include "olap/storage_engine.h" |
65 | | #include "olap/storage_policy.h" |
66 | | #include "olap/tablet.h" |
67 | | #include "olap/tablet_manager.h" |
68 | | #include "olap/tablet_meta.h" |
69 | | #include "olap/tablet_meta_manager.h" |
70 | | #include "olap/tablet_schema.h" |
71 | | #include "olap/task/engine_publish_version_task.h" |
72 | | #include "olap/task/index_builder.h" |
73 | | #include "runtime/client_cache.h" |
74 | | #include "runtime/memory/cache_manager.h" |
75 | | #include "runtime/memory/global_memory_arbitrator.h" |
76 | | #include "service/brpc.h" |
77 | | #include "service/point_query_executor.h" |
78 | | #include "util/brpc_client_cache.h" |
79 | | #include "util/countdown_latch.h" |
80 | | #include "util/doris_metrics.h" |
81 | | #include "util/mem_info.h" |
82 | | #include "util/thread.h" |
83 | | #include "util/threadpool.h" |
84 | | #include "util/thrift_rpc_helper.h" |
85 | | #include "util/time.h" |
86 | | #include "util/uid_util.h" |
87 | | #include "util/work_thread_pool.hpp" |
88 | | |
89 | | using std::string; |
90 | | |
91 | | namespace doris { |
92 | | |
93 | | using io::Path; |
94 | | |
95 | | // number of running SCHEMA-CHANGE threads |
96 | | volatile uint32_t g_schema_change_active_threads = 0; |
97 | | |
98 | | static const uint64_t DEFAULT_SEED = 104729; |
99 | | static const uint64_t MOD_PRIME = 7652413; |
100 | | |
101 | 53 | static int32_t get_cumu_compaction_threads_num(size_t data_dirs_num) { |
102 | 53 | int32_t threads_num = config::max_cumu_compaction_threads; |
103 | 53 | if (threads_num == -1) { |
104 | 53 | threads_num = data_dirs_num; |
105 | 53 | } |
106 | 53 | threads_num = threads_num <= 0 ? 1 : threads_num; |
107 | 53 | return threads_num; |
108 | 53 | } |
109 | | |
110 | 53 | static int32_t get_base_compaction_threads_num(size_t data_dirs_num) { |
111 | 53 | int32_t threads_num = config::max_base_compaction_threads; |
112 | 53 | if (threads_num == -1) { |
113 | 0 | threads_num = data_dirs_num; |
114 | 0 | } |
115 | 53 | threads_num = threads_num <= 0 ? 1 : threads_num; |
116 | 53 | return threads_num; |
117 | 53 | } |
118 | | |
119 | 53 | static int32_t get_single_replica_compaction_threads_num(size_t data_dirs_num) { |
120 | 53 | int32_t threads_num = config::max_single_replica_compaction_threads; |
121 | 53 | if (threads_num == -1) { |
122 | 53 | threads_num = data_dirs_num; |
123 | 53 | } |
124 | 53 | threads_num = threads_num <= 0 ? 1 : threads_num; |
125 | 53 | return threads_num; |
126 | 53 | } |
127 | | |
128 | 17 | Status StorageEngine::start_bg_threads(std::shared_ptr<WorkloadGroup> wg_sptr) { |
129 | 17 | RETURN_IF_ERROR(Thread::create( |
130 | 17 | "StorageEngine", "unused_rowset_monitor_thread", |
131 | 17 | [this]() { this->_unused_rowset_monitor_thread_callback(); }, |
132 | 17 | &_unused_rowset_monitor_thread)); |
133 | 17 | LOG(INFO) << "unused rowset monitor thread started"; |
134 | | |
135 | | // start thread for monitoring the snapshot and trash folder |
136 | 17 | RETURN_IF_ERROR(Thread::create( |
137 | 17 | "StorageEngine", "garbage_sweeper_thread", |
138 | 17 | [this]() { this->_garbage_sweeper_thread_callback(); }, &_garbage_sweeper_thread)); |
139 | 17 | LOG(INFO) << "garbage sweeper thread started"; |
140 | | |
141 | | // start thread for monitoring the tablet with io error |
142 | 17 | RETURN_IF_ERROR(Thread::create( |
143 | 17 | "StorageEngine", "disk_stat_monitor_thread", |
144 | 17 | [this]() { this->_disk_stat_monitor_thread_callback(); }, &_disk_stat_monitor_thread)); |
145 | 17 | LOG(INFO) << "disk stat monitor thread started"; |
146 | | |
147 | | // convert store map to vector |
148 | 17 | std::vector<DataDir*> data_dirs; |
149 | 18 | for (auto& tmp_store : _store_map) { |
150 | 18 | data_dirs.push_back(tmp_store.second); |
151 | 18 | } |
152 | | |
153 | 17 | auto base_compaction_threads = get_base_compaction_threads_num(data_dirs.size()); |
154 | 17 | auto cumu_compaction_threads = get_cumu_compaction_threads_num(data_dirs.size()); |
155 | 17 | auto single_replica_compaction_threads = |
156 | 17 | get_single_replica_compaction_threads_num(data_dirs.size()); |
157 | | |
158 | 17 | if (wg_sptr && wg_sptr->get_cgroup_cpu_ctl_wptr().lock()) { |
159 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("gBaseCompactionTaskThreadPool") |
160 | 0 | .set_min_threads(base_compaction_threads) |
161 | 0 | .set_max_threads(base_compaction_threads) |
162 | 0 | .set_cgroup_cpu_ctl(wg_sptr->get_cgroup_cpu_ctl_wptr()) |
163 | 0 | .build(&_base_compaction_thread_pool)); |
164 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("gCumuCompactionTaskThreadPool") |
165 | 0 | .set_min_threads(cumu_compaction_threads) |
166 | 0 | .set_max_threads(cumu_compaction_threads) |
167 | 0 | .set_cgroup_cpu_ctl(wg_sptr->get_cgroup_cpu_ctl_wptr()) |
168 | 0 | .build(&_cumu_compaction_thread_pool)); |
169 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("gSingleReplicaCompactionTaskThreadPool") |
170 | 0 | .set_min_threads(single_replica_compaction_threads) |
171 | 0 | .set_max_threads(single_replica_compaction_threads) |
172 | 0 | .set_cgroup_cpu_ctl(wg_sptr->get_cgroup_cpu_ctl_wptr()) |
173 | 0 | .build(&_single_replica_compaction_thread_pool)); |
174 | | |
175 | 0 | if (config::enable_segcompaction) { |
176 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("gSegCompactionTaskThreadPool") |
177 | 0 | .set_min_threads(config::segcompaction_num_threads) |
178 | 0 | .set_max_threads(config::segcompaction_num_threads) |
179 | 0 | .set_cgroup_cpu_ctl(wg_sptr->get_cgroup_cpu_ctl_wptr()) |
180 | 0 | .build(&_seg_compaction_thread_pool)); |
181 | 0 | } |
182 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("gColdDataCompactionTaskThreadPool") |
183 | 0 | .set_min_threads(config::cold_data_compaction_thread_num) |
184 | 0 | .set_max_threads(config::cold_data_compaction_thread_num) |
185 | 0 | .set_cgroup_cpu_ctl(wg_sptr->get_cgroup_cpu_ctl_wptr()) |
186 | 0 | .build(&_cold_data_compaction_thread_pool)); |
187 | 17 | } else { |
188 | 17 | RETURN_IF_ERROR(ThreadPoolBuilder("BaseCompactionTaskThreadPool") |
189 | 17 | .set_min_threads(base_compaction_threads) |
190 | 17 | .set_max_threads(base_compaction_threads) |
191 | 17 | .build(&_base_compaction_thread_pool)); |
192 | 17 | RETURN_IF_ERROR(ThreadPoolBuilder("CumuCompactionTaskThreadPool") |
193 | 17 | .set_min_threads(cumu_compaction_threads) |
194 | 17 | .set_max_threads(cumu_compaction_threads) |
195 | 17 | .build(&_cumu_compaction_thread_pool)); |
196 | 17 | RETURN_IF_ERROR(ThreadPoolBuilder("SingleReplicaCompactionTaskThreadPool") |
197 | 17 | .set_min_threads(single_replica_compaction_threads) |
198 | 17 | .set_max_threads(single_replica_compaction_threads) |
199 | 17 | .build(&_single_replica_compaction_thread_pool)); |
200 | | |
201 | 17 | if (config::enable_segcompaction) { |
202 | 3 | RETURN_IF_ERROR(ThreadPoolBuilder("SegCompactionTaskThreadPool") |
203 | 3 | .set_min_threads(config::segcompaction_num_threads) |
204 | 3 | .set_max_threads(config::segcompaction_num_threads) |
205 | 3 | .build(&_seg_compaction_thread_pool)); |
206 | 3 | } |
207 | 17 | RETURN_IF_ERROR(ThreadPoolBuilder("ColdDataCompactionTaskThreadPool") |
208 | 17 | .set_min_threads(config::cold_data_compaction_thread_num) |
209 | 17 | .set_max_threads(config::cold_data_compaction_thread_num) |
210 | 17 | .build(&_cold_data_compaction_thread_pool)); |
211 | 17 | } |
212 | | |
213 | | // compaction tasks producer thread |
214 | 17 | RETURN_IF_ERROR(Thread::create( |
215 | 17 | "StorageEngine", "compaction_tasks_producer_thread", |
216 | 17 | [this]() { this->_compaction_tasks_producer_callback(); }, |
217 | 17 | &_compaction_tasks_producer_thread)); |
218 | 17 | LOG(INFO) << "compaction tasks producer thread started"; |
219 | | |
220 | 17 | RETURN_IF_ERROR(Thread::create( |
221 | 17 | "StorageEngine", "_update_replica_infos_thread", |
222 | 17 | [this]() { this->_update_replica_infos_callback(); }, &_update_replica_infos_thread)); |
223 | 17 | LOG(INFO) << "tablet replicas info update thread started"; |
224 | | |
225 | 17 | int32_t max_checkpoint_thread_num = config::max_meta_checkpoint_threads; |
226 | 17 | if (max_checkpoint_thread_num < 0) { |
227 | 17 | max_checkpoint_thread_num = data_dirs.size(); |
228 | 17 | } |
229 | 17 | RETURN_IF_ERROR(ThreadPoolBuilder("TabletMetaCheckpointTaskThreadPool") |
230 | 17 | .set_max_threads(max_checkpoint_thread_num) |
231 | 17 | .build(&_tablet_meta_checkpoint_thread_pool)); |
232 | | |
233 | 17 | RETURN_IF_ERROR(ThreadPoolBuilder("MultiGetTaskThreadPool") |
234 | 17 | .set_min_threads(config::multi_get_max_threads) |
235 | 17 | .set_max_threads(config::multi_get_max_threads) |
236 | 17 | .build(&_bg_multi_get_thread_pool)); |
237 | 17 | RETURN_IF_ERROR(Thread::create( |
238 | 17 | "StorageEngine", "tablet_checkpoint_tasks_producer_thread", |
239 | 17 | [this, data_dirs]() { this->_tablet_checkpoint_callback(data_dirs); }, |
240 | 17 | &_tablet_checkpoint_tasks_producer_thread)); |
241 | 17 | LOG(INFO) << "tablet checkpoint tasks producer thread started"; |
242 | | |
243 | 17 | RETURN_IF_ERROR(Thread::create( |
244 | 17 | "StorageEngine", "tablet_path_check_thread", |
245 | 17 | [this]() { this->_tablet_path_check_callback(); }, &_tablet_path_check_thread)); |
246 | 17 | LOG(INFO) << "tablet path check thread started"; |
247 | | |
248 | | // path scan and gc thread |
249 | 17 | if (config::path_gc_check) { |
250 | 18 | for (auto data_dir : get_stores()) { |
251 | 18 | scoped_refptr<Thread> path_gc_thread; |
252 | 18 | RETURN_IF_ERROR(Thread::create( |
253 | 18 | "StorageEngine", "path_gc_thread", |
254 | 18 | [this, data_dir]() { this->_path_gc_thread_callback(data_dir); }, |
255 | 18 | &path_gc_thread)); |
256 | 18 | _path_gc_threads.emplace_back(path_gc_thread); |
257 | 18 | } |
258 | 17 | LOG(INFO) << "path gc threads started. number:" << get_stores().size(); |
259 | 17 | } |
260 | | |
261 | 17 | RETURN_IF_ERROR(ThreadPoolBuilder("CooldownTaskThreadPool") |
262 | 17 | .set_min_threads(config::cooldown_thread_num) |
263 | 17 | .set_max_threads(config::cooldown_thread_num) |
264 | 17 | .build(&_cooldown_thread_pool)); |
265 | 17 | LOG(INFO) << "cooldown thread pool started"; |
266 | | |
267 | 17 | RETURN_IF_ERROR(Thread::create( |
268 | 17 | "StorageEngine", "cooldown_tasks_producer_thread", |
269 | 17 | [this]() { this->_cooldown_tasks_producer_callback(); }, |
270 | 17 | &_cooldown_tasks_producer_thread)); |
271 | 17 | LOG(INFO) << "cooldown tasks producer thread started"; |
272 | | |
273 | 17 | RETURN_IF_ERROR(Thread::create( |
274 | 17 | "StorageEngine", "remove_unused_remote_files_thread", |
275 | 17 | [this]() { this->_remove_unused_remote_files_callback(); }, |
276 | 17 | &_remove_unused_remote_files_thread)); |
277 | 17 | LOG(INFO) << "remove unused remote files thread started"; |
278 | | |
279 | 17 | RETURN_IF_ERROR(Thread::create( |
280 | 17 | "StorageEngine", "cold_data_compaction_producer_thread", |
281 | 17 | [this]() { this->_cold_data_compaction_producer_callback(); }, |
282 | 17 | &_cold_data_compaction_producer_thread)); |
283 | 17 | LOG(INFO) << "cold data compaction producer thread started"; |
284 | | |
285 | | // add tablet publish version thread pool |
286 | 17 | RETURN_IF_ERROR(ThreadPoolBuilder("TabletPublishTxnThreadPool") |
287 | 17 | .set_min_threads(config::tablet_publish_txn_max_thread) |
288 | 17 | .set_max_threads(config::tablet_publish_txn_max_thread) |
289 | 17 | .build(&_tablet_publish_txn_thread_pool)); |
290 | | |
291 | 17 | RETURN_IF_ERROR(Thread::create( |
292 | 17 | "StorageEngine", "async_publish_version_thread", |
293 | 17 | [this]() { this->_async_publish_callback(); }, &_async_publish_thread)); |
294 | 17 | LOG(INFO) << "async publish thread started"; |
295 | | |
296 | 17 | LOG(INFO) << "all storage engine's background threads are started."; |
297 | 17 | return Status::OK(); |
298 | 17 | } |
299 | | |
300 | 17 | void StorageEngine::_garbage_sweeper_thread_callback() { |
301 | 17 | uint32_t max_interval = config::max_garbage_sweep_interval; |
302 | 17 | uint32_t min_interval = config::min_garbage_sweep_interval; |
303 | | |
304 | 17 | if (!(max_interval >= min_interval && min_interval > 0)) { |
305 | 0 | LOG(WARNING) << "garbage sweep interval config is illegal: [max=" << max_interval |
306 | 0 | << " min=" << min_interval << "]."; |
307 | 0 | min_interval = 1; |
308 | 0 | max_interval = max_interval >= min_interval ? max_interval : min_interval; |
309 | 0 | LOG(INFO) << "force reset garbage sweep interval. " |
310 | 0 | << "max_interval=" << max_interval << ", min_interval=" << min_interval; |
311 | 0 | } |
312 | | |
313 | 17 | const double pi = M_PI; |
314 | 17 | double usage = 1.0; |
315 | | // After the program starts, the first round of cleaning starts after min_interval. |
316 | 17 | uint32_t curr_interval = min_interval; |
317 | 17 | do { |
318 | | // Function properties: |
319 | | // when usage < 0.6, ratio close to 1.(interval close to max_interval) |
320 | | // when usage at [0.6, 0.75], ratio is rapidly decreasing from 0.87 to 0.27. |
321 | | // when usage > 0.75, ratio is slowly decreasing. |
322 | | // when usage > 0.8, ratio close to min_interval. |
323 | | // when usage = 0.88, ratio is approximately 0.0057. |
324 | 17 | double ratio = (1.1 * (pi / 2 - std::atan(usage * 100 / 5 - 14)) - 0.28) / pi; |
325 | 17 | ratio = ratio > 0 ? ratio : 0; |
326 | 17 | auto curr_interval = uint32_t(max_interval * ratio); |
327 | 17 | curr_interval = std::max(curr_interval, min_interval); |
328 | 17 | curr_interval = std::min(curr_interval, max_interval); |
329 | | |
330 | | // start clean trash and update usage. |
331 | 17 | Status res = start_trash_sweep(&usage); |
332 | 17 | if (res.ok() && _need_clean_trash.exchange(false, std::memory_order_relaxed)) { |
333 | 0 | res = start_trash_sweep(&usage, true); |
334 | 0 | } |
335 | | |
336 | 17 | if (!res.ok()) { |
337 | 0 | LOG(WARNING) << "one or more errors occur when sweep trash." |
338 | 0 | << "see previous message for detail. err code=" << res; |
339 | | // do nothing. continue next loop. |
340 | 0 | } |
341 | 17 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(curr_interval))); |
342 | 17 | } |
343 | | |
344 | 17 | void StorageEngine::_disk_stat_monitor_thread_callback() { |
345 | 17 | int32_t interval = config::disk_stat_monitor_interval; |
346 | 25 | do { |
347 | 25 | _start_disk_stat_monitor(); |
348 | | |
349 | 25 | interval = config::disk_stat_monitor_interval; |
350 | 25 | if (interval <= 0) { |
351 | 0 | LOG(WARNING) << "disk_stat_monitor_interval config is illegal: " << interval |
352 | 0 | << ", force set to 1"; |
353 | 0 | interval = 1; |
354 | 0 | } |
355 | 25 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
356 | 17 | } |
357 | | |
358 | 0 | void StorageEngine::check_cumulative_compaction_config() { |
359 | 0 | int64_t promotion_size = config::compaction_promotion_size_mbytes; |
360 | 0 | int64_t promotion_min_size = config::compaction_promotion_min_size_mbytes; |
361 | 0 | int64_t compaction_min_size = config::compaction_min_size_mbytes; |
362 | | |
363 | | // check size_based_promotion_size must be greater than size_based_promotion_min_size and 2 * size_based_compaction_lower_bound_size |
364 | 0 | int64_t should_min_promotion_size = std::max(promotion_min_size, 2 * compaction_min_size); |
365 | |
|
366 | 0 | if (promotion_size < should_min_promotion_size) { |
367 | 0 | promotion_size = should_min_promotion_size; |
368 | 0 | LOG(WARNING) << "the config promotion_size is adjusted to " |
369 | 0 | "promotion_min_size or 2 * " |
370 | 0 | "compaction_min_size " |
371 | 0 | << should_min_promotion_size << ", because size_based_promotion_size is small"; |
372 | 0 | } |
373 | 0 | } |
374 | | |
375 | 17 | void StorageEngine::_unused_rowset_monitor_thread_callback() { |
376 | 17 | int32_t interval = config::unused_rowset_monitor_interval; |
377 | 18 | do { |
378 | 18 | start_delete_unused_rowset(); |
379 | | |
380 | 18 | interval = config::unused_rowset_monitor_interval; |
381 | 18 | if (interval <= 0) { |
382 | 0 | LOG(WARNING) << "unused_rowset_monitor_interval config is illegal: " << interval |
383 | 0 | << ", force set to 1"; |
384 | 0 | interval = 1; |
385 | 0 | } |
386 | 18 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
387 | 17 | } |
388 | | |
389 | 26 | int32_t StorageEngine::_auto_get_interval_by_disk_capacity(DataDir* data_dir) { |
390 | 26 | double disk_used = data_dir->get_usage(0); |
391 | 26 | double remain_used = 1 - disk_used; |
392 | 26 | DCHECK(remain_used >= 0 && remain_used <= 1); |
393 | 26 | DCHECK(config::path_gc_check_interval_second >= 0); |
394 | 26 | int32_t ret = 0; |
395 | 26 | if (remain_used > 0.9) { |
396 | | // if config::path_gc_check_interval_second == 24h |
397 | 0 | ret = config::path_gc_check_interval_second; |
398 | 26 | } else if (remain_used > 0.7) { |
399 | | // 12h |
400 | 0 | ret = config::path_gc_check_interval_second / 2; |
401 | 26 | } else if (remain_used > 0.5) { |
402 | | // 6h |
403 | 0 | ret = config::path_gc_check_interval_second / 4; |
404 | 26 | } else if (remain_used > 0.3) { |
405 | | // 4h |
406 | 26 | ret = config::path_gc_check_interval_second / 6; |
407 | 26 | } else { |
408 | | // 3h |
409 | 0 | ret = config::path_gc_check_interval_second / 8; |
410 | 0 | } |
411 | 26 | return ret; |
412 | 26 | } |
413 | | |
414 | 18 | void StorageEngine::_path_gc_thread_callback(DataDir* data_dir) { |
415 | 18 | LOG(INFO) << "try to start path gc thread!"; |
416 | 18 | int32_t last_exec_time = 0; |
417 | 26 | do { |
418 | 26 | int32_t current_time = time(nullptr); |
419 | | |
420 | 26 | int32_t interval = _auto_get_interval_by_disk_capacity(data_dir); |
421 | 26 | DBUG_EXECUTE_IF("_path_gc_thread_callback.interval.eq.1ms", { |
422 | 26 | LOG(INFO) << "debug point change interval eq 1ms"; |
423 | 26 | interval = 1; |
424 | 26 | while (DebugPoints::instance()->is_enable("_path_gc_thread_callback.always.do")) { |
425 | 26 | data_dir->perform_path_gc(); |
426 | 26 | std::this_thread::sleep_for(std::chrono::milliseconds(10)); |
427 | 26 | } |
428 | 26 | }); |
429 | 26 | if (interval <= 0) { |
430 | 0 | LOG(WARNING) << "path gc thread check interval config is illegal:" << interval |
431 | 0 | << " will be forced set to half hour"; |
432 | 0 | interval = 1800; // 0.5 hour |
433 | 0 | } |
434 | 26 | if (current_time - last_exec_time >= interval) { |
435 | 18 | LOG(INFO) << "try to perform path gc! disk remain [" << 1 - data_dir->get_usage(0) |
436 | 18 | << "] internal [" << interval << "]"; |
437 | 18 | data_dir->perform_path_gc(); |
438 | 18 | last_exec_time = time(nullptr); |
439 | 18 | } |
440 | 26 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(5))); |
441 | 18 | LOG(INFO) << "stop path gc thread!"; |
442 | 18 | } |
443 | | |
444 | 17 | void StorageEngine::_tablet_checkpoint_callback(const std::vector<DataDir*>& data_dirs) { |
445 | 17 | int64_t interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs; |
446 | 17 | do { |
447 | 18 | for (auto data_dir : data_dirs) { |
448 | 18 | LOG(INFO) << "begin to produce tablet meta checkpoint tasks, data_dir=" |
449 | 18 | << data_dir->path(); |
450 | 18 | auto st = _tablet_meta_checkpoint_thread_pool->submit_func( |
451 | 18 | [data_dir, this]() { _tablet_manager->do_tablet_meta_checkpoint(data_dir); }); |
452 | 18 | if (!st.ok()) { |
453 | 0 | LOG(WARNING) << "submit tablet checkpoint tasks failed."; |
454 | 0 | } |
455 | 18 | } |
456 | 17 | interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs; |
457 | 17 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
458 | 17 | } |
459 | | |
460 | 17 | void StorageEngine::_tablet_path_check_callback() { |
461 | 17 | struct TabletIdComparator { |
462 | 17 | bool operator()(Tablet* a, Tablet* b) { return a->tablet_id() < b->tablet_id(); } |
463 | 17 | }; |
464 | | |
465 | 17 | using TabletQueue = std::priority_queue<Tablet*, std::vector<Tablet*>, TabletIdComparator>; |
466 | | |
467 | 17 | int64_t interval = config::tablet_path_check_interval_seconds; |
468 | 17 | if (interval <= 0) { |
469 | 17 | return; |
470 | 17 | } |
471 | | |
472 | 0 | int64_t last_tablet_id = 0; |
473 | 0 | do { |
474 | 0 | int32_t batch_size = config::tablet_path_check_batch_size; |
475 | 0 | if (batch_size <= 0) { |
476 | 0 | if (_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))) { |
477 | 0 | break; |
478 | 0 | } |
479 | 0 | continue; |
480 | 0 | } |
481 | | |
482 | 0 | LOG(INFO) << "start to check tablet path"; |
483 | |
|
484 | 0 | auto all_tablets = _tablet_manager->get_all_tablet( |
485 | 0 | [](Tablet* t) { return t->is_used() && t->tablet_state() == TABLET_RUNNING; }); |
486 | |
|
487 | 0 | TabletQueue big_id_tablets; |
488 | 0 | TabletQueue small_id_tablets; |
489 | 0 | for (auto tablet : all_tablets) { |
490 | 0 | auto tablet_id = tablet->tablet_id(); |
491 | 0 | TabletQueue* belong_tablets = nullptr; |
492 | 0 | if (tablet_id > last_tablet_id) { |
493 | 0 | if (big_id_tablets.size() < batch_size || |
494 | 0 | big_id_tablets.top()->tablet_id() > tablet_id) { |
495 | 0 | belong_tablets = &big_id_tablets; |
496 | 0 | } |
497 | 0 | } else if (big_id_tablets.size() < batch_size) { |
498 | 0 | if (small_id_tablets.size() < batch_size || |
499 | 0 | small_id_tablets.top()->tablet_id() > tablet_id) { |
500 | 0 | belong_tablets = &small_id_tablets; |
501 | 0 | } |
502 | 0 | } |
503 | 0 | if (belong_tablets != nullptr) { |
504 | 0 | belong_tablets->push(tablet.get()); |
505 | 0 | if (belong_tablets->size() > batch_size) { |
506 | 0 | belong_tablets->pop(); |
507 | 0 | } |
508 | 0 | } |
509 | 0 | } |
510 | |
|
511 | 0 | int32_t need_small_id_tablet_size = |
512 | 0 | batch_size - static_cast<int32_t>(big_id_tablets.size()); |
513 | |
|
514 | 0 | if (!big_id_tablets.empty()) { |
515 | 0 | last_tablet_id = big_id_tablets.top()->tablet_id(); |
516 | 0 | } |
517 | 0 | while (!big_id_tablets.empty()) { |
518 | 0 | big_id_tablets.top()->check_tablet_path_exists(); |
519 | 0 | big_id_tablets.pop(); |
520 | 0 | } |
521 | |
|
522 | 0 | if (!small_id_tablets.empty() && need_small_id_tablet_size > 0) { |
523 | 0 | while (static_cast<int32_t>(small_id_tablets.size()) > need_small_id_tablet_size) { |
524 | 0 | small_id_tablets.pop(); |
525 | 0 | } |
526 | |
|
527 | 0 | last_tablet_id = small_id_tablets.top()->tablet_id(); |
528 | 0 | while (!small_id_tablets.empty()) { |
529 | 0 | small_id_tablets.top()->check_tablet_path_exists(); |
530 | 0 | small_id_tablets.pop(); |
531 | 0 | } |
532 | 0 | } |
533 | |
|
534 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
535 | 0 | } |
536 | | |
537 | 36 | void StorageEngine::_adjust_compaction_thread_num() { |
538 | 36 | auto base_compaction_threads_num = get_base_compaction_threads_num(_store_map.size()); |
539 | 36 | if (_base_compaction_thread_pool->max_threads() != base_compaction_threads_num) { |
540 | 0 | int old_max_threads = _base_compaction_thread_pool->max_threads(); |
541 | 0 | Status status = _base_compaction_thread_pool->set_max_threads(base_compaction_threads_num); |
542 | 0 | if (status.ok()) { |
543 | 0 | VLOG_NOTICE << "update base compaction thread pool max_threads from " << old_max_threads |
544 | 0 | << " to " << base_compaction_threads_num; |
545 | 0 | } |
546 | 0 | } |
547 | 36 | if (_base_compaction_thread_pool->min_threads() != base_compaction_threads_num) { |
548 | 0 | int old_min_threads = _base_compaction_thread_pool->min_threads(); |
549 | 0 | Status status = _base_compaction_thread_pool->set_min_threads(base_compaction_threads_num); |
550 | 0 | if (status.ok()) { |
551 | 0 | VLOG_NOTICE << "update base compaction thread pool min_threads from " << old_min_threads |
552 | 0 | << " to " << base_compaction_threads_num; |
553 | 0 | } |
554 | 0 | } |
555 | | |
556 | 36 | auto cumu_compaction_threads_num = get_cumu_compaction_threads_num(_store_map.size()); |
557 | 36 | if (_cumu_compaction_thread_pool->max_threads() != cumu_compaction_threads_num) { |
558 | 0 | int old_max_threads = _cumu_compaction_thread_pool->max_threads(); |
559 | 0 | Status status = _cumu_compaction_thread_pool->set_max_threads(cumu_compaction_threads_num); |
560 | 0 | if (status.ok()) { |
561 | 0 | VLOG_NOTICE << "update cumu compaction thread pool max_threads from " << old_max_threads |
562 | 0 | << " to " << cumu_compaction_threads_num; |
563 | 0 | } |
564 | 0 | } |
565 | 36 | if (_cumu_compaction_thread_pool->min_threads() != cumu_compaction_threads_num) { |
566 | 0 | int old_min_threads = _cumu_compaction_thread_pool->min_threads(); |
567 | 0 | Status status = _cumu_compaction_thread_pool->set_min_threads(cumu_compaction_threads_num); |
568 | 0 | if (status.ok()) { |
569 | 0 | VLOG_NOTICE << "update cumu compaction thread pool min_threads from " << old_min_threads |
570 | 0 | << " to " << cumu_compaction_threads_num; |
571 | 0 | } |
572 | 0 | } |
573 | | |
574 | 36 | auto single_replica_compaction_threads_num = |
575 | 36 | get_single_replica_compaction_threads_num(_store_map.size()); |
576 | 36 | if (_single_replica_compaction_thread_pool->max_threads() != |
577 | 36 | single_replica_compaction_threads_num) { |
578 | 0 | int old_max_threads = _single_replica_compaction_thread_pool->max_threads(); |
579 | 0 | Status status = _single_replica_compaction_thread_pool->set_max_threads( |
580 | 0 | single_replica_compaction_threads_num); |
581 | 0 | if (status.ok()) { |
582 | 0 | VLOG_NOTICE << "update single replica compaction thread pool max_threads from " |
583 | 0 | << old_max_threads << " to " << single_replica_compaction_threads_num; |
584 | 0 | } |
585 | 0 | } |
586 | 36 | if (_single_replica_compaction_thread_pool->min_threads() != |
587 | 36 | single_replica_compaction_threads_num) { |
588 | 0 | int old_min_threads = _single_replica_compaction_thread_pool->min_threads(); |
589 | 0 | Status status = _single_replica_compaction_thread_pool->set_min_threads( |
590 | 0 | single_replica_compaction_threads_num); |
591 | 0 | if (status.ok()) { |
592 | 0 | VLOG_NOTICE << "update single replica compaction thread pool min_threads from " |
593 | 0 | << old_min_threads << " to " << single_replica_compaction_threads_num; |
594 | 0 | } |
595 | 0 | } |
596 | 36 | } |
597 | | |
598 | 17 | void StorageEngine::_compaction_tasks_producer_callback() { |
599 | 17 | LOG(INFO) << "try to start compaction producer process!"; |
600 | | |
601 | 17 | std::unordered_set<TabletSharedPtr> tablet_submitted_cumu; |
602 | 17 | std::unordered_set<TabletSharedPtr> tablet_submitted_base; |
603 | 17 | std::vector<DataDir*> data_dirs; |
604 | 18 | for (auto& tmp_store : _store_map) { |
605 | 18 | data_dirs.push_back(tmp_store.second); |
606 | 18 | _tablet_submitted_cumu_compaction[tmp_store.second] = tablet_submitted_cumu; |
607 | 18 | _tablet_submitted_base_compaction[tmp_store.second] = tablet_submitted_base; |
608 | 18 | } |
609 | | |
610 | 17 | int round = 0; |
611 | 17 | CompactionType compaction_type; |
612 | | |
613 | | // Used to record the time when the score metric was last updated. |
614 | | // The update of the score metric is accompanied by the logic of selecting the tablet. |
615 | | // If there is no slot available, the logic of selecting the tablet will be terminated, |
616 | | // which causes the score metric update to be terminated. |
617 | | // In order to avoid this situation, we need to update the score regularly. |
618 | 17 | int64_t last_cumulative_score_update_time = 0; |
619 | 17 | int64_t last_base_score_update_time = 0; |
620 | 17 | static const int64_t check_score_interval_ms = 5000; // 5 secs |
621 | | |
622 | 17 | int64_t interval = config::generate_compaction_tasks_interval_ms; |
623 | 36 | do { |
624 | 36 | if (!config::disable_auto_compaction && |
625 | 36 | !GlobalMemoryArbitrator::is_exceed_soft_mem_limit(GB_EXCHANGE_BYTE)) { |
626 | 36 | _adjust_compaction_thread_num(); |
627 | | |
628 | 36 | bool check_score = false; |
629 | 36 | int64_t cur_time = UnixMillis(); |
630 | 36 | if (round < config::cumulative_compaction_rounds_for_each_base_compaction_round) { |
631 | 34 | compaction_type = CompactionType::CUMULATIVE_COMPACTION; |
632 | 34 | round++; |
633 | 34 | if (cur_time - last_cumulative_score_update_time >= check_score_interval_ms) { |
634 | 22 | check_score = true; |
635 | 22 | last_cumulative_score_update_time = cur_time; |
636 | 22 | } |
637 | 34 | } else { |
638 | 2 | compaction_type = CompactionType::BASE_COMPACTION; |
639 | 2 | round = 0; |
640 | 2 | if (cur_time - last_base_score_update_time >= check_score_interval_ms) { |
641 | 2 | check_score = true; |
642 | 2 | last_base_score_update_time = cur_time; |
643 | 2 | } |
644 | 2 | } |
645 | 36 | std::vector<TabletSharedPtr> tablets_compaction = |
646 | 36 | _generate_compaction_tasks(compaction_type, data_dirs, check_score); |
647 | 36 | if (tablets_compaction.size() == 0) { |
648 | 36 | std::unique_lock<std::mutex> lock(_compaction_producer_sleep_mutex); |
649 | 36 | _wakeup_producer_flag = 0; |
650 | | // It is necessary to wake up the thread on timeout to prevent deadlock |
651 | | // in case of no running compaction task. |
652 | 36 | _compaction_producer_sleep_cv.wait_for( |
653 | 36 | lock, std::chrono::milliseconds(2000), |
654 | 72 | [this] { return _wakeup_producer_flag == 1; }); |
655 | 36 | continue; |
656 | 36 | } |
657 | | |
658 | 0 | for (const auto& tablet : tablets_compaction) { |
659 | 0 | if (compaction_type == CompactionType::BASE_COMPACTION) { |
660 | 0 | tablet->set_last_base_compaction_schedule_time(UnixMillis()); |
661 | 0 | } |
662 | 0 | Status st = _submit_compaction_task(tablet, compaction_type, false); |
663 | 0 | if (!st.ok()) { |
664 | 0 | LOG(WARNING) << "failed to submit compaction task for tablet: " |
665 | 0 | << tablet->tablet_id() << ", err: " << st; |
666 | 0 | } |
667 | 0 | } |
668 | 0 | interval = config::generate_compaction_tasks_interval_ms; |
669 | 0 | } else { |
670 | 0 | interval = 5000; // 5s to check disable_auto_compaction |
671 | 0 | } |
672 | 36 | } while (!_stop_background_threads_latch.wait_for(std::chrono::milliseconds(interval))); |
673 | 17 | } |
674 | | |
675 | 17 | void StorageEngine::_update_replica_infos_callback() { |
676 | | #ifdef GOOGLE_PROFILER |
677 | | ProfilerRegisterThread(); |
678 | | #endif |
679 | 17 | LOG(INFO) << "start to update replica infos!"; |
680 | | |
681 | 17 | int64_t interval = config::update_replica_infos_interval_seconds; |
682 | 17 | do { |
683 | 17 | auto all_tablets = _tablet_manager->get_all_tablet([](Tablet* t) { |
684 | 0 | return t->is_used() && t->tablet_state() == TABLET_RUNNING && |
685 | 0 | !t->tablet_meta()->tablet_schema()->disable_auto_compaction() && |
686 | 0 | t->tablet_meta()->tablet_schema()->enable_single_replica_compaction(); |
687 | 0 | }); |
688 | 17 | TMasterInfo* master_info = ExecEnv::GetInstance()->master_info(); |
689 | 17 | if (master_info == nullptr) { |
690 | 17 | LOG(WARNING) << "Have not get FE Master heartbeat yet"; |
691 | 17 | std::this_thread::sleep_for(std::chrono::seconds(2)); |
692 | 17 | continue; |
693 | 17 | } |
694 | 0 | TNetworkAddress master_addr = master_info->network_address; |
695 | 0 | if (master_addr.hostname == "" || master_addr.port == 0) { |
696 | 0 | LOG(WARNING) << "Have not get FE Master heartbeat yet"; |
697 | 0 | std::this_thread::sleep_for(std::chrono::seconds(2)); |
698 | 0 | continue; |
699 | 0 | } |
700 | | |
701 | 0 | int start = 0; |
702 | 0 | int tablet_size = all_tablets.size(); |
703 | | // The while loop may take a long time, we should skip it when stop |
704 | 0 | while (start < tablet_size && _stop_background_threads_latch.count() > 0) { |
705 | 0 | int batch_size = std::min(100, tablet_size - start); |
706 | 0 | int end = start + batch_size; |
707 | 0 | TGetTabletReplicaInfosRequest request; |
708 | 0 | TGetTabletReplicaInfosResult result; |
709 | 0 | for (int i = start; i < end; i++) { |
710 | 0 | request.tablet_ids.emplace_back(all_tablets[i]->tablet_id()); |
711 | 0 | } |
712 | 0 | Status rpc_st = ThriftRpcHelper::rpc<FrontendServiceClient>( |
713 | 0 | master_addr.hostname, master_addr.port, |
714 | 0 | [&request, &result](FrontendServiceConnection& client) { |
715 | 0 | client->getTabletReplicaInfos(result, request); |
716 | 0 | }); |
717 | |
|
718 | 0 | if (!rpc_st.ok()) { |
719 | 0 | LOG(WARNING) << "Failed to get tablet replica infos, encounter rpc failure, " |
720 | 0 | "tablet start: " |
721 | 0 | << start << " end: " << end; |
722 | 0 | continue; |
723 | 0 | } |
724 | | |
725 | 0 | std::unique_lock<std::mutex> lock(_peer_replica_infos_mutex); |
726 | 0 | for (const auto& it : result.tablet_replica_infos) { |
727 | 0 | auto tablet_id = it.first; |
728 | 0 | auto tablet = _tablet_manager->get_tablet(tablet_id); |
729 | 0 | if (tablet == nullptr) { |
730 | 0 | VLOG_CRITICAL << "tablet ptr is nullptr"; |
731 | 0 | continue; |
732 | 0 | } |
733 | | |
734 | 0 | VLOG_NOTICE << tablet_id << " tablet has " << it.second.size() << " replicas"; |
735 | 0 | uint64_t min_modulo = MOD_PRIME; |
736 | 0 | TReplicaInfo peer_replica; |
737 | 0 | for (const auto& replica : it.second) { |
738 | 0 | int64_t peer_replica_id = replica.replica_id; |
739 | 0 | uint64_t modulo = HashUtil::hash64(&peer_replica_id, sizeof(peer_replica_id), |
740 | 0 | DEFAULT_SEED) % |
741 | 0 | MOD_PRIME; |
742 | 0 | if (modulo < min_modulo) { |
743 | 0 | peer_replica = replica; |
744 | 0 | min_modulo = modulo; |
745 | 0 | } |
746 | 0 | } |
747 | 0 | VLOG_NOTICE << "tablet " << tablet_id << ", peer replica host is " |
748 | 0 | << peer_replica.host; |
749 | 0 | _peer_replica_infos[tablet_id] = peer_replica; |
750 | 0 | } |
751 | 0 | _token = result.token; |
752 | 0 | VLOG_NOTICE << "get tablet replica infos from fe, size is " << end - start |
753 | 0 | << " token = " << result.token; |
754 | 0 | start = end; |
755 | 0 | } |
756 | 0 | interval = config::update_replica_infos_interval_seconds; |
757 | 17 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
758 | 17 | } |
759 | | |
760 | | Status StorageEngine::_submit_single_replica_compaction_task(TabletSharedPtr tablet, |
761 | 0 | CompactionType compaction_type) { |
762 | | // For single replica compaction, the local version to be merged is determined based on the version fetched from the peer replica. |
763 | | // Therefore, it is currently not possible to determine whether it should be a base compaction or cumulative compaction. |
764 | | // As a result, the tablet needs to be pushed to both the _tablet_submitted_cumu_compaction and the _tablet_submitted_base_compaction simultaneously. |
765 | 0 | bool already_exist = |
766 | 0 | _push_tablet_into_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION); |
767 | 0 | if (already_exist) { |
768 | 0 | return Status::AlreadyExist<false>( |
769 | 0 | "compaction task has already been submitted, tablet_id={}", tablet->tablet_id()); |
770 | 0 | } |
771 | | |
772 | 0 | already_exist = _push_tablet_into_submitted_compaction(tablet, CompactionType::BASE_COMPACTION); |
773 | 0 | if (already_exist) { |
774 | 0 | _pop_tablet_from_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION); |
775 | 0 | return Status::AlreadyExist<false>( |
776 | 0 | "compaction task has already been submitted, tablet_id={}", tablet->tablet_id()); |
777 | 0 | } |
778 | | |
779 | 0 | auto compaction = std::make_shared<SingleReplicaCompaction>(tablet, compaction_type); |
780 | 0 | DorisMetrics::instance()->single_compaction_request_total->increment(1); |
781 | 0 | auto st = compaction->prepare_compact(); |
782 | |
|
783 | 0 | auto clean_single_replica_compaction = [tablet, this]() { |
784 | 0 | _pop_tablet_from_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION); |
785 | 0 | _pop_tablet_from_submitted_compaction(tablet, CompactionType::BASE_COMPACTION); |
786 | 0 | }; |
787 | |
|
788 | 0 | if (!st.ok()) { |
789 | 0 | clean_single_replica_compaction(); |
790 | 0 | if (!st.is<ErrorCode::CUMULATIVE_NO_SUITABLE_VERSION>()) { |
791 | 0 | LOG(WARNING) << "failed to prepare single replica compaction, tablet_id=" |
792 | 0 | << tablet->tablet_id() << " : " << st; |
793 | 0 | return st; |
794 | 0 | } |
795 | 0 | return Status::OK(); // No suitable version, regard as OK |
796 | 0 | } |
797 | | |
798 | 0 | auto submit_st = _single_replica_compaction_thread_pool->submit_func( |
799 | 0 | [tablet, compaction = std::move(compaction), |
800 | 0 | clean_single_replica_compaction]() mutable { |
801 | 0 | tablet->execute_single_replica_compaction(*compaction); |
802 | 0 | clean_single_replica_compaction(); |
803 | 0 | }); |
804 | 0 | if (!submit_st.ok()) { |
805 | 0 | clean_single_replica_compaction(); |
806 | 0 | return Status::InternalError( |
807 | 0 | "failed to submit single replica compaction task to thread pool, " |
808 | 0 | "tablet_id={}", |
809 | 0 | tablet->tablet_id()); |
810 | 0 | } |
811 | 0 | return Status::OK(); |
812 | 0 | } |
813 | | |
814 | | void StorageEngine::get_tablet_rowset_versions(const PGetTabletVersionsRequest* request, |
815 | 0 | PGetTabletVersionsResponse* response) { |
816 | 0 | TabletSharedPtr tablet = _tablet_manager->get_tablet(request->tablet_id()); |
817 | 0 | if (tablet == nullptr) { |
818 | 0 | response->mutable_status()->set_status_code(TStatusCode::CANCELLED); |
819 | 0 | return; |
820 | 0 | } |
821 | 0 | std::vector<Version> local_versions = tablet->get_all_local_versions(); |
822 | 0 | for (const auto& local_version : local_versions) { |
823 | 0 | auto version = response->add_versions(); |
824 | 0 | version->set_first(local_version.first); |
825 | 0 | version->set_second(local_version.second); |
826 | 0 | } |
827 | 0 | response->mutable_status()->set_status_code(0); |
828 | 0 | } |
829 | | |
830 | | int StorageEngine::_get_executing_compaction_num( |
831 | 75 | std::unordered_set<TabletSharedPtr>& compaction_tasks) { |
832 | 75 | int num = 0; |
833 | 75 | for (const auto& task : compaction_tasks) { |
834 | 10 | if (task->compaction_stage == CompactionStage::EXECUTING) { |
835 | 2 | num++; |
836 | 2 | } |
837 | 10 | } |
838 | 75 | return num; |
839 | 75 | } |
840 | | |
841 | | bool need_generate_compaction_tasks(int task_cnt_per_disk, int thread_per_disk, |
842 | 74 | CompactionType compaction_type, bool all_base) { |
843 | 74 | if (task_cnt_per_disk >= thread_per_disk) { |
844 | | // Return if no available slot |
845 | 0 | return false; |
846 | 74 | } else if (task_cnt_per_disk >= thread_per_disk - 1) { |
847 | | // Only one slot left, check if it can be assigned to base compaction task. |
848 | 0 | if (compaction_type == CompactionType::BASE_COMPACTION) { |
849 | 0 | if (all_base) { |
850 | 0 | return false; |
851 | 0 | } |
852 | 0 | } |
853 | 0 | } |
854 | 74 | return true; |
855 | 74 | } |
856 | | |
857 | 37 | int get_concurrent_per_disk(int max_score, int thread_per_disk) { |
858 | 37 | if (!config::enable_compaction_priority_scheduling) { |
859 | 0 | return thread_per_disk; |
860 | 0 | } |
861 | | |
862 | 37 | double load_average = 0; |
863 | 37 | if (DorisMetrics::instance()->system_metrics() != nullptr) { |
864 | 0 | load_average = DorisMetrics::instance()->system_metrics()->get_load_average_1_min(); |
865 | 0 | } |
866 | 37 | int num_cores = doris::CpuInfo::num_cores(); |
867 | 37 | bool cpu_usage_high = load_average > num_cores * 0.8; |
868 | | |
869 | 37 | auto process_memory_usage = doris::GlobalMemoryArbitrator::process_memory_usage(); |
870 | 37 | bool memory_usage_high = process_memory_usage > MemInfo::soft_mem_limit() * 0.8; |
871 | | |
872 | 37 | if (max_score <= config::low_priority_compaction_score_threshold && |
873 | 37 | (cpu_usage_high || memory_usage_high)) { |
874 | 0 | return config::low_priority_compaction_task_num_per_disk; |
875 | 0 | } |
876 | | |
877 | 37 | return thread_per_disk; |
878 | 37 | } |
879 | | |
880 | | std::vector<TabletSharedPtr> StorageEngine::_generate_compaction_tasks( |
881 | 36 | CompactionType compaction_type, std::vector<DataDir*>& data_dirs, bool check_score) { |
882 | 36 | _update_cumulative_compaction_policy(); |
883 | 36 | std::vector<TabletSharedPtr> tablets_compaction; |
884 | 36 | uint32_t max_compaction_score = 0; |
885 | | |
886 | 36 | std::random_device rd; |
887 | 36 | std::mt19937 g(rd()); |
888 | 36 | std::shuffle(data_dirs.begin(), data_dirs.end(), g); |
889 | | |
890 | | // Copy _tablet_submitted_xxx_compaction map so that we don't need to hold _tablet_submitted_compaction_mutex |
891 | | // when traversing the data dir |
892 | 36 | std::map<DataDir*, std::unordered_set<TabletSharedPtr>> copied_cumu_map; |
893 | 36 | std::map<DataDir*, std::unordered_set<TabletSharedPtr>> copied_base_map; |
894 | 36 | { |
895 | 36 | std::unique_lock<std::mutex> lock(_tablet_submitted_compaction_mutex); |
896 | 36 | copied_cumu_map = _tablet_submitted_cumu_compaction; |
897 | 36 | copied_base_map = _tablet_submitted_base_compaction; |
898 | 36 | } |
899 | 37 | for (auto* data_dir : data_dirs) { |
900 | 37 | bool need_pick_tablet = true; |
901 | | // We need to reserve at least one Slot for cumulative compaction. |
902 | | // So when there is only one Slot, we have to judge whether there is a cumulative compaction |
903 | | // in the current submitted tasks. |
904 | | // If so, the last Slot can be assigned to Base compaction, |
905 | | // otherwise, this Slot needs to be reserved for cumulative compaction. |
906 | 37 | int count = _get_executing_compaction_num(copied_cumu_map[data_dir]) + |
907 | 37 | _get_executing_compaction_num(copied_base_map[data_dir]); |
908 | 37 | int thread_per_disk = data_dir->is_ssd_disk() ? config::compaction_task_num_per_fast_disk |
909 | 37 | : config::compaction_task_num_per_disk; |
910 | | |
911 | 37 | need_pick_tablet = need_generate_compaction_tasks(count, thread_per_disk, compaction_type, |
912 | 37 | copied_cumu_map[data_dir].empty()); |
913 | 37 | if (!need_pick_tablet && !check_score) { |
914 | 0 | continue; |
915 | 0 | } |
916 | | |
917 | | // Even if need_pick_tablet is false, we still need to call find_best_tablets_to_compaction(), |
918 | | // So that we can update the max_compaction_score metric. |
919 | 37 | if (!data_dir->reach_capacity_limit(0)) { |
920 | 37 | uint32_t disk_max_score = 0; |
921 | 37 | std::vector<TabletSharedPtr> tablets = _tablet_manager->find_best_tablets_to_compaction( |
922 | 37 | compaction_type, data_dir, |
923 | 37 | compaction_type == CompactionType::CUMULATIVE_COMPACTION |
924 | 37 | ? copied_cumu_map[data_dir] |
925 | 37 | : copied_base_map[data_dir], |
926 | 37 | &disk_max_score, _cumulative_compaction_policies); |
927 | 37 | int concurrent_num = get_concurrent_per_disk(disk_max_score, thread_per_disk); |
928 | 37 | need_pick_tablet = need_generate_compaction_tasks( |
929 | 37 | count, concurrent_num, compaction_type, copied_cumu_map[data_dir].empty()); |
930 | 37 | for (const auto& tablet : tablets) { |
931 | 0 | if (tablet != nullptr) { |
932 | 0 | if (need_pick_tablet) { |
933 | 0 | tablets_compaction.emplace_back(tablet); |
934 | 0 | } |
935 | 0 | max_compaction_score = std::max(max_compaction_score, disk_max_score); |
936 | 0 | } |
937 | 0 | } |
938 | 37 | } |
939 | 37 | } |
940 | | |
941 | 36 | if (max_compaction_score > 0) { |
942 | 0 | if (compaction_type == CompactionType::BASE_COMPACTION) { |
943 | 0 | DorisMetrics::instance()->tablet_base_max_compaction_score->set_value( |
944 | 0 | max_compaction_score); |
945 | 0 | } else { |
946 | 0 | DorisMetrics::instance()->tablet_cumulative_max_compaction_score->set_value( |
947 | 0 | max_compaction_score); |
948 | 0 | } |
949 | 0 | } |
950 | 36 | return tablets_compaction; |
951 | 36 | } |
952 | | |
953 | 36 | void StorageEngine::_update_cumulative_compaction_policy() { |
954 | 36 | if (_cumulative_compaction_policies.empty()) { |
955 | 17 | _cumulative_compaction_policies[CUMULATIVE_SIZE_BASED_POLICY] = |
956 | 17 | CumulativeCompactionPolicyFactory::create_cumulative_compaction_policy( |
957 | 17 | CUMULATIVE_SIZE_BASED_POLICY); |
958 | 17 | _cumulative_compaction_policies[CUMULATIVE_TIME_SERIES_POLICY] = |
959 | 17 | CumulativeCompactionPolicyFactory::create_cumulative_compaction_policy( |
960 | 17 | CUMULATIVE_TIME_SERIES_POLICY); |
961 | 17 | } |
962 | 36 | } |
963 | | |
964 | | bool StorageEngine::_push_tablet_into_submitted_compaction(TabletSharedPtr tablet, |
965 | 10 | CompactionType compaction_type) { |
966 | 10 | std::unique_lock<std::mutex> lock(_tablet_submitted_compaction_mutex); |
967 | 10 | bool already_existed = false; |
968 | 10 | switch (compaction_type) { |
969 | 10 | case CompactionType::CUMULATIVE_COMPACTION: |
970 | 10 | already_existed = |
971 | 10 | !(_tablet_submitted_cumu_compaction[tablet->data_dir()].insert(tablet).second); |
972 | 10 | break; |
973 | 0 | case CompactionType::BASE_COMPACTION: |
974 | 0 | already_existed = |
975 | 0 | !(_tablet_submitted_base_compaction[tablet->data_dir()].insert(tablet).second); |
976 | 0 | break; |
977 | 0 | case CompactionType::FULL_COMPACTION: |
978 | 0 | already_existed = |
979 | 0 | !(_tablet_submitted_full_compaction[tablet->data_dir()].insert(tablet).second); |
980 | 0 | break; |
981 | 10 | } |
982 | 10 | return already_existed; |
983 | 10 | } |
984 | | |
985 | | void StorageEngine::_pop_tablet_from_submitted_compaction(TabletSharedPtr tablet, |
986 | 2 | CompactionType compaction_type) { |
987 | 2 | std::unique_lock<std::mutex> lock(_tablet_submitted_compaction_mutex); |
988 | 2 | int removed = 0; |
989 | 2 | switch (compaction_type) { |
990 | 2 | case CompactionType::CUMULATIVE_COMPACTION: |
991 | 2 | removed = _tablet_submitted_cumu_compaction[tablet->data_dir()].erase(tablet); |
992 | 2 | break; |
993 | 0 | case CompactionType::BASE_COMPACTION: |
994 | 0 | removed = _tablet_submitted_base_compaction[tablet->data_dir()].erase(tablet); |
995 | 0 | break; |
996 | 0 | case CompactionType::FULL_COMPACTION: |
997 | 0 | removed = _tablet_submitted_full_compaction[tablet->data_dir()].erase(tablet); |
998 | 0 | break; |
999 | 2 | } |
1000 | | |
1001 | 2 | if (removed == 1) { |
1002 | 2 | std::unique_lock<std::mutex> lock(_compaction_producer_sleep_mutex); |
1003 | 2 | _wakeup_producer_flag = 1; |
1004 | 2 | _compaction_producer_sleep_cv.notify_one(); |
1005 | 2 | } |
1006 | 2 | } |
1007 | | |
1008 | | Status StorageEngine::_submit_compaction_task(TabletSharedPtr tablet, |
1009 | 10 | CompactionType compaction_type, bool force) { |
1010 | 10 | if (tablet->tablet_meta()->tablet_schema()->enable_single_replica_compaction() && |
1011 | 10 | should_fetch_from_peer(tablet->tablet_id())) { |
1012 | 0 | VLOG_CRITICAL << "start to submit single replica compaction task for tablet: " |
1013 | 0 | << tablet->tablet_id(); |
1014 | 0 | Status st = _submit_single_replica_compaction_task(tablet, compaction_type); |
1015 | 0 | if (!st.ok()) { |
1016 | 0 | LOG(WARNING) << "failed to submit single replica compaction task for tablet: " |
1017 | 0 | << tablet->tablet_id() << ", err: " << st; |
1018 | 0 | } |
1019 | |
|
1020 | 0 | return Status::OK(); |
1021 | 0 | } |
1022 | 10 | bool already_exist = _push_tablet_into_submitted_compaction(tablet, compaction_type); |
1023 | 10 | if (already_exist) { |
1024 | 0 | return Status::AlreadyExist<false>( |
1025 | 0 | "compaction task has already been submitted, tablet_id={}, compaction_type={}.", |
1026 | 0 | tablet->tablet_id(), compaction_type); |
1027 | 0 | } |
1028 | 10 | std::shared_ptr<Compaction> compaction; |
1029 | 10 | tablet->compaction_stage = CompactionStage::PENDING; |
1030 | 10 | int64_t permits = 0; |
1031 | 10 | Status st = Tablet::prepare_compaction_and_calculate_permits(compaction_type, tablet, |
1032 | 10 | compaction, permits); |
1033 | 10 | if (st.ok() && permits > 0) { |
1034 | 10 | if (!force) { |
1035 | 10 | _permit_limiter.request(permits); |
1036 | 10 | } |
1037 | 10 | std::unique_ptr<ThreadPool>& thread_pool = |
1038 | 10 | (compaction_type == CompactionType::CUMULATIVE_COMPACTION) |
1039 | 10 | ? _cumu_compaction_thread_pool |
1040 | 10 | : _base_compaction_thread_pool; |
1041 | 10 | VLOG_CRITICAL << "compaction thread pool. type: " |
1042 | 0 | << (compaction_type == CompactionType::CUMULATIVE_COMPACTION ? "CUMU" |
1043 | 0 | : "BASE") |
1044 | 0 | << ", num_threads: " << thread_pool->num_threads() |
1045 | 0 | << ", num_threads_pending_start: " << thread_pool->num_threads_pending_start() |
1046 | 0 | << ", num_active_threads: " << thread_pool->num_active_threads() |
1047 | 0 | << ", max_threads: " << thread_pool->max_threads() |
1048 | 0 | << ", min_threads: " << thread_pool->min_threads() |
1049 | 0 | << ", num_total_queued_tasks: " << thread_pool->get_queue_size(); |
1050 | 10 | auto st = thread_pool->submit_func([tablet, compaction = std::move(compaction), |
1051 | 10 | compaction_type, permits, force, this]() { |
1052 | 2 | Defer defer {[&]() { |
1053 | 2 | if (!force) { |
1054 | 2 | _permit_limiter.release(permits); |
1055 | 2 | } |
1056 | 2 | _pop_tablet_from_submitted_compaction(tablet, compaction_type); |
1057 | 2 | tablet->compaction_stage = CompactionStage::NOT_SCHEDULED; |
1058 | 2 | }}; |
1059 | 2 | if (!tablet->can_do_compaction(tablet->data_dir()->path_hash(), compaction_type)) { |
1060 | 0 | LOG(INFO) << "Tablet state has been changed, no need to begin this compaction " |
1061 | 0 | "task, tablet_id=" |
1062 | 0 | << tablet->tablet_id() << ", tablet_state=" << tablet->tablet_state(); |
1063 | 0 | return; |
1064 | 0 | } |
1065 | 2 | tablet->compaction_stage = CompactionStage::EXECUTING; |
1066 | 2 | TEST_SYNC_POINT_RETURN_WITH_VOID("olap_server::execute_compaction"); |
1067 | 0 | tablet->execute_compaction(*compaction); |
1068 | 0 | }); |
1069 | 10 | if (!st.ok()) { |
1070 | 0 | if (!force) { |
1071 | 0 | _permit_limiter.release(permits); |
1072 | 0 | } |
1073 | 0 | _pop_tablet_from_submitted_compaction(tablet, compaction_type); |
1074 | 0 | tablet->compaction_stage = CompactionStage::NOT_SCHEDULED; |
1075 | 0 | return Status::InternalError( |
1076 | 0 | "failed to submit compaction task to thread pool, " |
1077 | 0 | "tablet_id={}, compaction_type={}.", |
1078 | 0 | tablet->tablet_id(), compaction_type); |
1079 | 0 | } |
1080 | 10 | return Status::OK(); |
1081 | 10 | } else { |
1082 | 0 | _pop_tablet_from_submitted_compaction(tablet, compaction_type); |
1083 | 0 | tablet->compaction_stage = CompactionStage::NOT_SCHEDULED; |
1084 | 0 | if (!st.ok()) { |
1085 | 0 | return Status::InternalError( |
1086 | 0 | "failed to prepare compaction task and calculate permits, " |
1087 | 0 | "tablet_id={}, compaction_type={}, " |
1088 | 0 | "permit={}, current_permit={}, status={}", |
1089 | 0 | tablet->tablet_id(), compaction_type, permits, _permit_limiter.usage(), |
1090 | 0 | st.to_string()); |
1091 | 0 | } |
1092 | 0 | return st; |
1093 | 0 | } |
1094 | 10 | } |
1095 | | |
1096 | | Status StorageEngine::submit_compaction_task(TabletSharedPtr tablet, CompactionType compaction_type, |
1097 | 0 | bool force, bool eager) { |
1098 | 0 | if (!eager) { |
1099 | 0 | DCHECK(compaction_type == CompactionType::BASE_COMPACTION || |
1100 | 0 | compaction_type == CompactionType::CUMULATIVE_COMPACTION); |
1101 | 0 | std::map<DataDir*, std::unordered_set<TabletSharedPtr>> copied_cumu_map; |
1102 | 0 | std::map<DataDir*, std::unordered_set<TabletSharedPtr>> copied_base_map; |
1103 | 0 | { |
1104 | 0 | std::unique_lock<std::mutex> lock(_tablet_submitted_compaction_mutex); |
1105 | 0 | copied_cumu_map = _tablet_submitted_cumu_compaction; |
1106 | 0 | copied_base_map = _tablet_submitted_base_compaction; |
1107 | 0 | } |
1108 | 0 | auto stores = get_stores(); |
1109 | |
|
1110 | 0 | auto busy_pred = [&copied_cumu_map, &copied_base_map, compaction_type, |
1111 | 0 | this](auto* data_dir) { |
1112 | 0 | int count = _get_executing_compaction_num(copied_base_map[data_dir]) + |
1113 | 0 | _get_executing_compaction_num(copied_cumu_map[data_dir]); |
1114 | 0 | int paral = data_dir->is_ssd_disk() ? config::compaction_task_num_per_fast_disk |
1115 | 0 | : config::compaction_task_num_per_disk; |
1116 | 0 | bool all_base = copied_cumu_map[data_dir].empty(); |
1117 | 0 | return need_generate_compaction_tasks(count, paral, compaction_type, all_base); |
1118 | 0 | }; |
1119 | |
|
1120 | 0 | bool is_busy = std::none_of(stores.begin(), stores.end(), busy_pred); |
1121 | 0 | if (is_busy) { |
1122 | 0 | LOG_EVERY_N(WARNING, 100) |
1123 | 0 | << "Too busy to submit a compaction task, tablet=" << tablet->get_table_id(); |
1124 | 0 | return Status::OK(); |
1125 | 0 | } |
1126 | 0 | } |
1127 | 0 | _update_cumulative_compaction_policy(); |
1128 | | // alter table tableName set ("compaction_policy"="time_series") |
1129 | | // if atler table's compaction policy, we need to modify tablet compaction policy shared ptr |
1130 | 0 | if (tablet->get_cumulative_compaction_policy() == nullptr || |
1131 | 0 | tablet->get_cumulative_compaction_policy()->name() != |
1132 | 0 | tablet->tablet_meta()->compaction_policy()) { |
1133 | 0 | tablet->set_cumulative_compaction_policy( |
1134 | 0 | _cumulative_compaction_policies.at(tablet->tablet_meta()->compaction_policy())); |
1135 | 0 | } |
1136 | 0 | tablet->set_skip_compaction(false); |
1137 | 0 | return _submit_compaction_task(tablet, compaction_type, force); |
1138 | 0 | } |
1139 | | |
1140 | | Status StorageEngine::_handle_seg_compaction(std::shared_ptr<SegcompactionWorker> worker, |
1141 | | SegCompactionCandidatesSharedPtr segments, |
1142 | 11 | uint64_t submission_time) { |
1143 | | // note: be aware that worker->_writer maybe released when the task is cancelled |
1144 | 11 | uint64_t exec_queue_time = GetCurrentTimeMicros() - submission_time; |
1145 | 11 | LOG(INFO) << "segcompaction thread pool queue time(ms): " << exec_queue_time / 1000; |
1146 | 11 | worker->compact_segments(segments); |
1147 | | // return OK here. error will be reported via BetaRowsetWriter::_segcompaction_status |
1148 | 11 | return Status::OK(); |
1149 | 11 | } |
1150 | | |
1151 | | Status StorageEngine::submit_seg_compaction_task(std::shared_ptr<SegcompactionWorker> worker, |
1152 | 11 | SegCompactionCandidatesSharedPtr segments) { |
1153 | 11 | uint64_t submission_time = GetCurrentTimeMicros(); |
1154 | 11 | return _seg_compaction_thread_pool->submit_func(std::bind<void>( |
1155 | 11 | &StorageEngine::_handle_seg_compaction, this, worker, segments, submission_time)); |
1156 | 11 | } |
1157 | | |
1158 | 0 | Status StorageEngine::process_index_change_task(const TAlterInvertedIndexReq& request) { |
1159 | 0 | auto tablet_id = request.tablet_id; |
1160 | 0 | TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id); |
1161 | 0 | if (tablet == nullptr) { |
1162 | 0 | LOG(WARNING) << "tablet: " << tablet_id << " not exist"; |
1163 | 0 | return Status::InternalError("tablet not exist, tablet_id={}.", tablet_id); |
1164 | 0 | } |
1165 | | |
1166 | 0 | IndexBuilderSharedPtr index_builder = std::make_shared<IndexBuilder>( |
1167 | 0 | tablet, request.columns, request.alter_inverted_indexes, request.is_drop_op); |
1168 | 0 | RETURN_IF_ERROR(_handle_index_change(index_builder)); |
1169 | 0 | return Status::OK(); |
1170 | 0 | } |
1171 | | |
1172 | 0 | Status StorageEngine::_handle_index_change(IndexBuilderSharedPtr index_builder) { |
1173 | 0 | RETURN_IF_ERROR(index_builder->init()); |
1174 | 0 | RETURN_IF_ERROR(index_builder->do_build_inverted_index()); |
1175 | 0 | return Status::OK(); |
1176 | 0 | } |
1177 | | |
1178 | 17 | void StorageEngine::_cooldown_tasks_producer_callback() { |
1179 | 17 | int64_t interval = config::generate_cooldown_task_interval_sec; |
1180 | | // the cooldown replica may be slow to upload it's meta file, so we should wait |
1181 | | // until it has done uploaded |
1182 | 17 | int64_t skip_failed_interval = interval * 10; |
1183 | 19 | do { |
1184 | | // these tables are ordered by priority desc |
1185 | 19 | std::vector<TabletSharedPtr> tablets; |
1186 | 19 | std::vector<RowsetSharedPtr> rowsets; |
1187 | | // TODO(luwei) : a more efficient way to get cooldown tablets |
1188 | 19 | auto cur_time = time(nullptr); |
1189 | | // we should skip all the tablets which are not running and those pending to do cooldown |
1190 | | // also tablets once failed to do follow cooldown |
1191 | 19 | auto skip_tablet = [this, skip_failed_interval, |
1192 | 19 | cur_time](const TabletSharedPtr& tablet) -> bool { |
1193 | 0 | bool is_skip = |
1194 | 0 | cur_time - tablet->last_failed_follow_cooldown_time() < skip_failed_interval || |
1195 | 0 | TABLET_RUNNING != tablet->tablet_state(); |
1196 | 0 | if (is_skip) { |
1197 | 0 | return is_skip; |
1198 | 0 | } |
1199 | 0 | std::lock_guard<std::mutex> lock(_running_cooldown_mutex); |
1200 | 0 | return _running_cooldown_tablets.find(tablet->tablet_id()) != |
1201 | 0 | _running_cooldown_tablets.end(); |
1202 | 0 | }; |
1203 | 19 | _tablet_manager->get_cooldown_tablets(&tablets, &rowsets, std::move(skip_tablet)); |
1204 | 19 | LOG(INFO) << "cooldown producer get tablet num: " << tablets.size(); |
1205 | 19 | int max_priority = tablets.size(); |
1206 | 19 | int index = 0; |
1207 | 19 | for (const auto& tablet : tablets) { |
1208 | 0 | { |
1209 | 0 | std::lock_guard<std::mutex> lock(_running_cooldown_mutex); |
1210 | 0 | _running_cooldown_tablets.insert(tablet->tablet_id()); |
1211 | 0 | } |
1212 | 0 | PriorityThreadPool::Task task; |
1213 | 0 | RowsetSharedPtr rowset = std::move(rowsets[index++]); |
1214 | 0 | task.work_function = [tablet, rowset, task_size = tablets.size(), this]() { |
1215 | 0 | Status st = tablet->cooldown(rowset); |
1216 | 0 | { |
1217 | 0 | std::lock_guard<std::mutex> lock(_running_cooldown_mutex); |
1218 | 0 | _running_cooldown_tablets.erase(tablet->tablet_id()); |
1219 | 0 | } |
1220 | 0 | if (!st.ok()) { |
1221 | 0 | LOG(WARNING) << "failed to cooldown, tablet: " << tablet->tablet_id() |
1222 | 0 | << " err: " << st; |
1223 | 0 | } else { |
1224 | 0 | LOG(INFO) << "succeed to cooldown, tablet: " << tablet->tablet_id() |
1225 | 0 | << " cooldown progress (" |
1226 | 0 | << task_size - _cooldown_thread_pool->get_queue_size() << "/" |
1227 | 0 | << task_size << ")"; |
1228 | 0 | } |
1229 | 0 | }; |
1230 | 0 | task.priority = max_priority--; |
1231 | 0 | bool submited = _cooldown_thread_pool->offer(std::move(task)); |
1232 | |
|
1233 | 0 | if (!submited) { |
1234 | 0 | LOG(INFO) << "failed to submit cooldown task"; |
1235 | 0 | } |
1236 | 0 | } |
1237 | 19 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
1238 | 17 | } |
1239 | | |
1240 | 17 | void StorageEngine::_remove_unused_remote_files_callback() { |
1241 | 17 | while (!_stop_background_threads_latch.wait_for( |
1242 | 17 | std::chrono::seconds(config::remove_unused_remote_files_interval_sec))) { |
1243 | 0 | LOG(INFO) << "begin to remove unused remote files"; |
1244 | 0 | do_remove_unused_remote_files(); |
1245 | 0 | } |
1246 | 17 | } |
1247 | | |
1248 | 0 | void StorageEngine::do_remove_unused_remote_files() { |
1249 | 0 | auto tablets = tablet_manager()->get_all_tablet([](Tablet* t) { |
1250 | 0 | return t->tablet_meta()->cooldown_meta_id().initialized() && t->is_used() && |
1251 | 0 | t->tablet_state() == TABLET_RUNNING && |
1252 | 0 | t->cooldown_conf_unlocked().cooldown_replica_id == t->replica_id(); |
1253 | 0 | }); |
1254 | 0 | TConfirmUnusedRemoteFilesRequest req; |
1255 | 0 | req.__isset.confirm_list = true; |
1256 | | // tablet_id -> [fs, unused_remote_files] |
1257 | 0 | using unused_remote_files_buffer_t = std::unordered_map< |
1258 | 0 | int64_t, std::pair<std::shared_ptr<io::RemoteFileSystem>, std::vector<io::FileInfo>>>; |
1259 | 0 | unused_remote_files_buffer_t buffer; |
1260 | 0 | int64_t num_files_in_buffer = 0; |
1261 | | // assume a filename is 0.1KB, buffer size should not larger than 100MB |
1262 | 0 | constexpr int64_t max_files_in_buffer = 1000000; |
1263 | |
|
1264 | 0 | auto calc_unused_remote_files = [&req, &buffer, &num_files_in_buffer, this](Tablet* t) { |
1265 | 0 | auto storage_policy = get_storage_policy(t->storage_policy_id()); |
1266 | 0 | if (storage_policy == nullptr) { |
1267 | 0 | LOG(WARNING) << "could not find storage_policy, storage_policy_id=" |
1268 | 0 | << t->storage_policy_id(); |
1269 | 0 | return; |
1270 | 0 | } |
1271 | 0 | auto resource = get_storage_resource(storage_policy->resource_id); |
1272 | 0 | auto dest_fs = std::static_pointer_cast<io::RemoteFileSystem>(resource.fs); |
1273 | 0 | if (dest_fs == nullptr) { |
1274 | 0 | LOG(WARNING) << "could not find resource, resouce_id=" << storage_policy->resource_id; |
1275 | 0 | return; |
1276 | 0 | } |
1277 | 0 | DCHECK(atol(dest_fs->id().c_str()) == storage_policy->resource_id); |
1278 | 0 | DCHECK(dest_fs->type() != io::FileSystemType::LOCAL); |
1279 | |
|
1280 | 0 | std::shared_ptr<io::RemoteFileSystem> fs; |
1281 | 0 | auto st = get_remote_file_system(t->storage_policy_id(), &fs); |
1282 | 0 | if (!st.ok()) { |
1283 | 0 | LOG(WARNING) << "encounter error when remove unused remote files, tablet_id=" |
1284 | 0 | << t->tablet_id() << " : " << st; |
1285 | 0 | return; |
1286 | 0 | } |
1287 | | |
1288 | 0 | std::vector<io::FileInfo> files; |
1289 | | // FIXME(plat1ko): What if user reset resource in storage policy to another resource? |
1290 | | // Maybe we should also list files in previously uploaded resources. |
1291 | 0 | bool exists = true; |
1292 | 0 | st = dest_fs->list(io::Path(remote_tablet_path(t->tablet_id())), true, &files, &exists); |
1293 | 0 | if (!st.ok()) { |
1294 | 0 | LOG(WARNING) << "encounter error when remove unused remote files, tablet_id=" |
1295 | 0 | << t->tablet_id() << " : " << st; |
1296 | 0 | return; |
1297 | 0 | } |
1298 | 0 | if (!exists || files.empty()) { |
1299 | 0 | return; |
1300 | 0 | } |
1301 | | // get all cooldowned rowsets |
1302 | 0 | RowsetIdUnorderedSet cooldowned_rowsets; |
1303 | 0 | UniqueId cooldown_meta_id; |
1304 | 0 | { |
1305 | 0 | std::shared_lock rlock(t->get_header_lock()); |
1306 | 0 | for (auto&& rs_meta : t->tablet_meta()->all_rs_metas()) { |
1307 | 0 | if (!rs_meta->is_local()) { |
1308 | 0 | cooldowned_rowsets.insert(rs_meta->rowset_id()); |
1309 | 0 | } |
1310 | 0 | } |
1311 | 0 | if (cooldowned_rowsets.empty()) { |
1312 | 0 | return; |
1313 | 0 | } |
1314 | 0 | cooldown_meta_id = t->tablet_meta()->cooldown_meta_id(); |
1315 | 0 | } |
1316 | 0 | auto [cooldown_term, cooldown_replica_id] = t->cooldown_conf(); |
1317 | 0 | if (cooldown_replica_id != t->replica_id()) { |
1318 | 0 | return; |
1319 | 0 | } |
1320 | | // {cooldown_replica_id}.{cooldown_term}.meta |
1321 | 0 | std::string remote_meta_path = |
1322 | 0 | fmt::format("{}.{}.meta", cooldown_replica_id, cooldown_term); |
1323 | | // filter out the paths that should be reserved |
1324 | 0 | auto filter = [&, this](io::FileInfo& info) { |
1325 | 0 | std::string_view filename = info.file_name; |
1326 | 0 | if (filename.ends_with(".meta")) { |
1327 | 0 | return filename == remote_meta_path; |
1328 | 0 | } |
1329 | 0 | auto rowset_id = extract_rowset_id(filename); |
1330 | 0 | if (rowset_id.hi == 0) { |
1331 | 0 | return false; |
1332 | 0 | } |
1333 | 0 | return cooldowned_rowsets.contains(rowset_id) || |
1334 | 0 | pending_remote_rowsets().contains(rowset_id); |
1335 | 0 | }; |
1336 | 0 | files.erase(std::remove_if(files.begin(), files.end(), std::move(filter)), files.end()); |
1337 | 0 | if (files.empty()) { |
1338 | 0 | return; |
1339 | 0 | } |
1340 | 0 | files.shrink_to_fit(); |
1341 | 0 | num_files_in_buffer += files.size(); |
1342 | 0 | buffer.insert({t->tablet_id(), {std::move(dest_fs), std::move(files)}}); |
1343 | 0 | auto& info = req.confirm_list.emplace_back(); |
1344 | 0 | info.__set_tablet_id(t->tablet_id()); |
1345 | 0 | info.__set_cooldown_replica_id(cooldown_replica_id); |
1346 | 0 | info.__set_cooldown_meta_id(cooldown_meta_id.to_thrift()); |
1347 | 0 | }; |
1348 | |
|
1349 | 0 | auto confirm_and_remove_files = [&buffer, &req, &num_files_in_buffer]() { |
1350 | 0 | TConfirmUnusedRemoteFilesResult result; |
1351 | 0 | LOG(INFO) << "begin to confirm unused remote files. num_tablets=" << buffer.size() |
1352 | 0 | << " num_files=" << num_files_in_buffer; |
1353 | 0 | auto st = MasterServerClient::instance()->confirm_unused_remote_files(req, &result); |
1354 | 0 | if (!st.ok()) { |
1355 | 0 | LOG(WARNING) << st; |
1356 | 0 | return; |
1357 | 0 | } |
1358 | 0 | for (auto id : result.confirmed_tablets) { |
1359 | 0 | if (auto it = buffer.find(id); LIKELY(it != buffer.end())) { |
1360 | 0 | auto& fs = it->second.first; |
1361 | 0 | auto& files = it->second.second; |
1362 | 0 | std::vector<io::Path> paths; |
1363 | 0 | paths.reserve(files.size()); |
1364 | | // delete unused files |
1365 | 0 | LOG(INFO) << "delete unused files. root_path=" << fs->root_path() |
1366 | 0 | << " tablet_id=" << id; |
1367 | 0 | io::Path dir = remote_tablet_path(id); |
1368 | 0 | for (auto& file : files) { |
1369 | 0 | auto file_path = dir / file.file_name; |
1370 | 0 | LOG(INFO) << "delete unused file: " << file_path.native(); |
1371 | 0 | paths.push_back(std::move(file_path)); |
1372 | 0 | } |
1373 | 0 | st = fs->batch_delete(paths); |
1374 | 0 | if (!st.ok()) { |
1375 | 0 | LOG(WARNING) << "failed to delete unused files, tablet_id=" << id << " : " |
1376 | 0 | << st; |
1377 | 0 | } |
1378 | 0 | buffer.erase(it); |
1379 | 0 | } |
1380 | 0 | } |
1381 | 0 | }; |
1382 | | |
1383 | | // batch confirm to reduce FE's overhead |
1384 | 0 | auto next_confirm_time = std::chrono::steady_clock::now() + |
1385 | 0 | std::chrono::seconds(config::confirm_unused_remote_files_interval_sec); |
1386 | 0 | for (auto& t : tablets) { |
1387 | 0 | if (t.use_count() <= 1 // this means tablet has been dropped |
1388 | 0 | || t->cooldown_conf_unlocked().cooldown_replica_id != t->replica_id() || |
1389 | 0 | t->tablet_state() != TABLET_RUNNING) { |
1390 | 0 | continue; |
1391 | 0 | } |
1392 | 0 | calc_unused_remote_files(t.get()); |
1393 | 0 | if (num_files_in_buffer > 0 && (num_files_in_buffer > max_files_in_buffer || |
1394 | 0 | std::chrono::steady_clock::now() > next_confirm_time)) { |
1395 | 0 | confirm_and_remove_files(); |
1396 | 0 | buffer.clear(); |
1397 | 0 | req.confirm_list.clear(); |
1398 | 0 | num_files_in_buffer = 0; |
1399 | 0 | next_confirm_time = |
1400 | 0 | std::chrono::steady_clock::now() + |
1401 | 0 | std::chrono::seconds(config::confirm_unused_remote_files_interval_sec); |
1402 | 0 | } |
1403 | 0 | } |
1404 | 0 | if (num_files_in_buffer > 0) { |
1405 | 0 | confirm_and_remove_files(); |
1406 | 0 | } |
1407 | 0 | } |
1408 | | |
1409 | 17 | void StorageEngine::_cold_data_compaction_producer_callback() { |
1410 | 17 | std::unordered_set<int64_t> tablet_submitted; |
1411 | 17 | std::mutex tablet_submitted_mtx; |
1412 | | |
1413 | 17 | while (!_stop_background_threads_latch.wait_for( |
1414 | 17 | std::chrono::seconds(config::cold_data_compaction_interval_sec))) { |
1415 | 0 | if (config::disable_auto_compaction || |
1416 | 0 | GlobalMemoryArbitrator::is_exceed_soft_mem_limit(GB_EXCHANGE_BYTE)) { |
1417 | 0 | continue; |
1418 | 0 | } |
1419 | | |
1420 | 0 | std::unordered_set<int64_t> copied_tablet_submitted; |
1421 | 0 | { |
1422 | 0 | std::lock_guard lock(tablet_submitted_mtx); |
1423 | 0 | copied_tablet_submitted = tablet_submitted; |
1424 | 0 | } |
1425 | 0 | int n = config::cold_data_compaction_thread_num - copied_tablet_submitted.size(); |
1426 | 0 | if (n <= 0) { |
1427 | 0 | continue; |
1428 | 0 | } |
1429 | 0 | auto tablets = _tablet_manager->get_all_tablet([&copied_tablet_submitted](Tablet* t) { |
1430 | 0 | return t->tablet_meta()->cooldown_meta_id().initialized() && t->is_used() && |
1431 | 0 | t->tablet_state() == TABLET_RUNNING && |
1432 | 0 | !copied_tablet_submitted.count(t->tablet_id()) && |
1433 | 0 | !t->tablet_meta()->tablet_schema()->disable_auto_compaction(); |
1434 | 0 | }); |
1435 | 0 | std::vector<std::pair<TabletSharedPtr, int64_t>> tablet_to_compact; |
1436 | 0 | tablet_to_compact.reserve(n + 1); |
1437 | 0 | std::vector<std::pair<TabletSharedPtr, int64_t>> tablet_to_follow; |
1438 | 0 | tablet_to_follow.reserve(n + 1); |
1439 | |
|
1440 | 0 | for (auto& t : tablets) { |
1441 | 0 | if (t->replica_id() == t->cooldown_conf_unlocked().cooldown_replica_id) { |
1442 | 0 | auto score = t->calc_cold_data_compaction_score(); |
1443 | 0 | if (score < 4) { |
1444 | 0 | continue; |
1445 | 0 | } |
1446 | 0 | tablet_to_compact.emplace_back(t, score); |
1447 | 0 | if (tablet_to_compact.size() > n) { |
1448 | 0 | std::sort(tablet_to_compact.begin(), tablet_to_compact.end(), |
1449 | 0 | [](auto& a, auto& b) { return a.second > b.second; }); |
1450 | 0 | tablet_to_compact.pop_back(); |
1451 | 0 | } |
1452 | 0 | continue; |
1453 | 0 | } |
1454 | | // else, need to follow |
1455 | 0 | { |
1456 | 0 | std::lock_guard lock(_running_cooldown_mutex); |
1457 | 0 | if (_running_cooldown_tablets.count(t->table_id())) { |
1458 | | // already in cooldown queue |
1459 | 0 | continue; |
1460 | 0 | } |
1461 | 0 | } |
1462 | | // TODO(plat1ko): some avoidance strategy if failed to follow |
1463 | 0 | auto score = t->calc_cold_data_compaction_score(); |
1464 | 0 | tablet_to_follow.emplace_back(t, score); |
1465 | |
|
1466 | 0 | if (tablet_to_follow.size() > n) { |
1467 | 0 | std::sort(tablet_to_follow.begin(), tablet_to_follow.end(), |
1468 | 0 | [](auto& a, auto& b) { return a.second > b.second; }); |
1469 | 0 | tablet_to_follow.pop_back(); |
1470 | 0 | } |
1471 | 0 | } |
1472 | |
|
1473 | 0 | for (auto& [tablet, score] : tablet_to_compact) { |
1474 | 0 | LOG(INFO) << "submit cold data compaction. tablet_id=" << tablet->tablet_id() |
1475 | 0 | << " score=" << score; |
1476 | 0 | static_cast<void>( |
1477 | 0 | _cold_data_compaction_thread_pool->submit_func([&, t = std::move(tablet)]() { |
1478 | 0 | auto compaction = std::make_shared<ColdDataCompaction>(t); |
1479 | 0 | { |
1480 | 0 | std::lock_guard lock(tablet_submitted_mtx); |
1481 | 0 | tablet_submitted.insert(t->tablet_id()); |
1482 | 0 | } |
1483 | 0 | std::unique_lock cold_compaction_lock(t->get_cold_compaction_lock(), |
1484 | 0 | std::try_to_lock); |
1485 | 0 | if (!cold_compaction_lock.owns_lock()) { |
1486 | 0 | LOG(WARNING) << "try cold_compaction_lock failed, tablet_id=" |
1487 | 0 | << t->tablet_id(); |
1488 | 0 | } |
1489 | 0 | if (t->get_cumulative_compaction_policy() == nullptr || |
1490 | 0 | t->get_cumulative_compaction_policy()->name() != |
1491 | 0 | t->tablet_meta()->compaction_policy()) { |
1492 | 0 | t->set_cumulative_compaction_policy(_cumulative_compaction_policies.at( |
1493 | 0 | t->tablet_meta()->compaction_policy())); |
1494 | 0 | } |
1495 | 0 | auto st = compaction->compact(); |
1496 | 0 | { |
1497 | 0 | std::lock_guard lock(tablet_submitted_mtx); |
1498 | 0 | tablet_submitted.erase(t->tablet_id()); |
1499 | 0 | } |
1500 | 0 | if (!st.ok()) { |
1501 | 0 | LOG(WARNING) << "failed to do cold data compaction. tablet_id=" |
1502 | 0 | << t->tablet_id() << " err=" << st; |
1503 | 0 | } |
1504 | 0 | })); |
1505 | 0 | } |
1506 | |
|
1507 | 0 | for (auto& [tablet, score] : tablet_to_follow) { |
1508 | 0 | LOG(INFO) << "submit to follow cooldown meta. tablet_id=" << tablet->tablet_id() |
1509 | 0 | << " score=" << score; |
1510 | 0 | static_cast<void>( |
1511 | 0 | _cold_data_compaction_thread_pool->submit_func([&, t = std::move(tablet)]() { |
1512 | 0 | { |
1513 | 0 | std::lock_guard lock(tablet_submitted_mtx); |
1514 | 0 | tablet_submitted.insert(t->tablet_id()); |
1515 | 0 | } |
1516 | 0 | auto st = t->cooldown(); |
1517 | 0 | { |
1518 | 0 | std::lock_guard lock(tablet_submitted_mtx); |
1519 | 0 | tablet_submitted.erase(t->tablet_id()); |
1520 | 0 | } |
1521 | 0 | if (!st.ok()) { |
1522 | 0 | LOG(WARNING) << "failed to cooldown. tablet_id=" << t->tablet_id() |
1523 | 0 | << " err=" << st; |
1524 | 0 | } |
1525 | 0 | })); |
1526 | 0 | } |
1527 | 0 | } |
1528 | 17 | } |
1529 | | |
1530 | | void StorageEngine::add_async_publish_task(int64_t partition_id, int64_t tablet_id, |
1531 | | int64_t publish_version, int64_t transaction_id, |
1532 | 2.05k | bool is_recovery) { |
1533 | 2.05k | if (!is_recovery) { |
1534 | 2.05k | bool exists = false; |
1535 | 2.05k | { |
1536 | 2.05k | std::shared_lock<std::shared_mutex> rlock(_async_publish_lock); |
1537 | 2.05k | if (auto tablet_iter = _async_publish_tasks.find(tablet_id); |
1538 | 2.05k | tablet_iter != _async_publish_tasks.end()) { |
1539 | 2.05k | if (auto iter = tablet_iter->second.find(publish_version); |
1540 | 2.05k | iter != tablet_iter->second.end()) { |
1541 | 20 | exists = true; |
1542 | 20 | } |
1543 | 2.05k | } |
1544 | 2.05k | } |
1545 | 2.05k | if (exists) { |
1546 | 20 | return; |
1547 | 20 | } |
1548 | 2.03k | TabletSharedPtr tablet = tablet_manager()->get_tablet(tablet_id); |
1549 | 2.03k | if (tablet == nullptr) { |
1550 | 0 | LOG(INFO) << "tablet may be dropped when add async publish task, tablet_id: " |
1551 | 0 | << tablet_id; |
1552 | 0 | return; |
1553 | 0 | } |
1554 | 2.03k | PendingPublishInfoPB pending_publish_info_pb; |
1555 | 2.03k | pending_publish_info_pb.set_partition_id(partition_id); |
1556 | 2.03k | pending_publish_info_pb.set_transaction_id(transaction_id); |
1557 | 2.03k | static_cast<void>(TabletMetaManager::save_pending_publish_info( |
1558 | 2.03k | tablet->data_dir(), tablet->tablet_id(), publish_version, |
1559 | 2.03k | pending_publish_info_pb.SerializeAsString())); |
1560 | 2.03k | } |
1561 | 2.03k | LOG(INFO) << "add pending publish task, tablet_id: " << tablet_id |
1562 | 2.03k | << " version: " << publish_version << " txn_id:" << transaction_id |
1563 | 2.03k | << " is_recovery: " << is_recovery; |
1564 | 2.03k | std::unique_lock<std::shared_mutex> wlock(_async_publish_lock); |
1565 | 2.03k | _async_publish_tasks[tablet_id][publish_version] = {transaction_id, partition_id}; |
1566 | 2.03k | } |
1567 | | |
1568 | 3 | int64_t StorageEngine::get_pending_publish_min_version(int64_t tablet_id) { |
1569 | 3 | std::shared_lock<std::shared_mutex> rlock(_async_publish_lock); |
1570 | 3 | auto iter = _async_publish_tasks.find(tablet_id); |
1571 | 3 | if (iter == _async_publish_tasks.end()) { |
1572 | 0 | return INT64_MAX; |
1573 | 0 | } |
1574 | 3 | if (iter->second.empty()) { |
1575 | 0 | return INT64_MAX; |
1576 | 0 | } |
1577 | 3 | return iter->second.begin()->first; |
1578 | 3 | } |
1579 | | |
1580 | 1.49k | void StorageEngine::_process_async_publish() { |
1581 | | // tablet, publish_version |
1582 | 1.49k | std::vector<std::pair<TabletSharedPtr, int64_t>> need_removed_tasks; |
1583 | 1.49k | { |
1584 | 1.49k | std::unique_lock<std::shared_mutex> wlock(_async_publish_lock); |
1585 | 1.49k | for (auto tablet_iter = _async_publish_tasks.begin(); |
1586 | 1.50k | tablet_iter != _async_publish_tasks.end();) { |
1587 | 10 | if (tablet_iter->second.empty()) { |
1588 | 1 | tablet_iter = _async_publish_tasks.erase(tablet_iter); |
1589 | 1 | continue; |
1590 | 1 | } |
1591 | 9 | int64_t tablet_id = tablet_iter->first; |
1592 | 9 | TabletSharedPtr tablet = tablet_manager()->get_tablet(tablet_id); |
1593 | 9 | if (!tablet) { |
1594 | 1 | LOG(WARNING) << "tablet does not exist when async publush, tablet_id: " |
1595 | 1 | << tablet_id; |
1596 | 1 | tablet_iter = _async_publish_tasks.erase(tablet_iter); |
1597 | 1 | continue; |
1598 | 1 | } |
1599 | | |
1600 | 8 | auto task_iter = tablet_iter->second.begin(); |
1601 | 8 | int64_t version = task_iter->first; |
1602 | 8 | int64_t transaction_id = task_iter->second.first; |
1603 | 8 | int64_t partition_id = task_iter->second.second; |
1604 | 8 | int64_t max_version = tablet->max_version().second; |
1605 | | |
1606 | 8 | if (version <= max_version) { |
1607 | 6 | need_removed_tasks.emplace_back(tablet, version); |
1608 | 6 | tablet_iter->second.erase(task_iter); |
1609 | 6 | tablet_iter++; |
1610 | 6 | continue; |
1611 | 6 | } |
1612 | 2 | if (version != max_version + 1) { |
1613 | | // Keep only the most recent versions |
1614 | 31 | while (tablet_iter->second.size() > config::max_tablet_version_num) { |
1615 | 30 | need_removed_tasks.emplace_back(tablet, version); |
1616 | 30 | task_iter = tablet_iter->second.erase(task_iter); |
1617 | 30 | version = task_iter->first; |
1618 | 30 | } |
1619 | 1 | tablet_iter++; |
1620 | 1 | continue; |
1621 | 1 | } |
1622 | | |
1623 | 1 | auto async_publish_task = std::make_shared<AsyncTabletPublishTask>( |
1624 | 1 | tablet, partition_id, transaction_id, version); |
1625 | 1 | static_cast<void>(_tablet_publish_txn_thread_pool->submit_func( |
1626 | 1 | [=]() { async_publish_task->handle(); })); |
1627 | 1 | tablet_iter->second.erase(task_iter); |
1628 | 1 | need_removed_tasks.emplace_back(tablet, version); |
1629 | 1 | tablet_iter++; |
1630 | 1 | } |
1631 | 1.49k | } |
1632 | 1.49k | for (auto& [tablet, publish_version] : need_removed_tasks) { |
1633 | 37 | static_cast<void>(TabletMetaManager::remove_pending_publish_info( |
1634 | 37 | tablet->data_dir(), tablet->tablet_id(), publish_version)); |
1635 | 37 | } |
1636 | 1.49k | } |
1637 | | |
1638 | 17 | void StorageEngine::_async_publish_callback() { |
1639 | 1.50k | while (!_stop_background_threads_latch.wait_for(std::chrono::milliseconds(30))) { |
1640 | 1.48k | _process_async_publish(); |
1641 | 1.48k | } |
1642 | 17 | } |
1643 | | |
1644 | | } // namespace doris |