/root/doris/be/src/olap/olap_server.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include <gen_cpp/Types_types.h> |
19 | | #include <gen_cpp/olap_file.pb.h> |
20 | | #include <glog/logging.h> |
21 | | #include <rapidjson/prettywriter.h> |
22 | | #include <rapidjson/stringbuffer.h> |
23 | | #include <stdint.h> |
24 | | #include <sys/types.h> |
25 | | |
26 | | #include <algorithm> |
27 | | #include <atomic> |
28 | | // IWYU pragma: no_include <bits/chrono.h> |
29 | | #include <chrono> // IWYU pragma: keep |
30 | | #include <cmath> |
31 | | #include <condition_variable> |
32 | | #include <cstdint> |
33 | | #include <ctime> |
34 | | #include <functional> |
35 | | #include <map> |
36 | | #include <memory> |
37 | | #include <mutex> |
38 | | #include <ostream> |
39 | | #include <random> |
40 | | #include <shared_mutex> |
41 | | #include <string> |
42 | | #include <thread> |
43 | | #include <unordered_set> |
44 | | #include <utility> |
45 | | #include <vector> |
46 | | |
47 | | #include "agent/utils.h" |
48 | | #include "common/config.h" |
49 | | #include "common/logging.h" |
50 | | #include "common/status.h" |
51 | | #include "cpp/sync_point.h" |
52 | | #include "gen_cpp/FrontendService.h" |
53 | | #include "gen_cpp/internal_service.pb.h" |
54 | | #include "gutil/ref_counted.h" |
55 | | #include "io/fs/file_writer.h" // IWYU pragma: keep |
56 | | #include "io/fs/path.h" |
57 | | #include "olap/base_tablet.h" |
58 | | #include "olap/cold_data_compaction.h" |
59 | | #include "olap/compaction_permit_limiter.h" |
60 | | #include "olap/cumulative_compaction_policy.h" |
61 | | #include "olap/cumulative_compaction_time_series_policy.h" |
62 | | #include "olap/data_dir.h" |
63 | | #include "olap/olap_common.h" |
64 | | #include "olap/olap_define.h" |
65 | | #include "olap/rowset/segcompaction.h" |
66 | | #include "olap/schema_change.h" |
67 | | #include "olap/single_replica_compaction.h" |
68 | | #include "olap/storage_engine.h" |
69 | | #include "olap/storage_policy.h" |
70 | | #include "olap/tablet.h" |
71 | | #include "olap/tablet_manager.h" |
72 | | #include "olap/tablet_meta.h" |
73 | | #include "olap/tablet_meta_manager.h" |
74 | | #include "olap/tablet_schema.h" |
75 | | #include "olap/task/engine_publish_version_task.h" |
76 | | #include "olap/task/index_builder.h" |
77 | | #include "runtime/client_cache.h" |
78 | | #include "runtime/memory/cache_manager.h" |
79 | | #include "runtime/memory/global_memory_arbitrator.h" |
80 | | #include "util/countdown_latch.h" |
81 | | #include "util/debug_points.h" |
82 | | #include "util/doris_metrics.h" |
83 | | #include "util/mem_info.h" |
84 | | #include "util/thread.h" |
85 | | #include "util/threadpool.h" |
86 | | #include "util/thrift_rpc_helper.h" |
87 | | #include "util/time.h" |
88 | | #include "util/uid_util.h" |
89 | | #include "util/work_thread_pool.hpp" |
90 | | |
91 | | using std::string; |
92 | | |
93 | | namespace doris { |
94 | | |
95 | | using io::Path; |
96 | | |
97 | | // number of running SCHEMA-CHANGE threads |
98 | | volatile uint32_t g_schema_change_active_threads = 0; |
99 | | |
100 | | static const uint64_t DEFAULT_SEED = 104729; |
101 | | static const uint64_t MOD_PRIME = 7652413; |
102 | | |
103 | 0 | CompactionSubmitRegistry::CompactionSubmitRegistry(CompactionSubmitRegistry&& r) { |
104 | 0 | std::swap(_tablet_submitted_cumu_compaction, r._tablet_submitted_cumu_compaction); |
105 | 0 | std::swap(_tablet_submitted_base_compaction, r._tablet_submitted_base_compaction); |
106 | 0 | std::swap(_tablet_submitted_full_compaction, r._tablet_submitted_full_compaction); |
107 | 0 | } |
108 | | |
109 | 0 | CompactionSubmitRegistry CompactionSubmitRegistry::create_snapshot() { |
110 | | // full compaction is not engaged in this method |
111 | 0 | std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex); |
112 | 0 | CompactionSubmitRegistry registry; |
113 | 0 | registry._tablet_submitted_base_compaction = _tablet_submitted_base_compaction; |
114 | 0 | registry._tablet_submitted_cumu_compaction = _tablet_submitted_cumu_compaction; |
115 | 0 | return registry; |
116 | 0 | } |
117 | | |
118 | 0 | void CompactionSubmitRegistry::reset(const std::vector<DataDir*>& stores) { |
119 | | // full compaction is not engaged in this method |
120 | 0 | for (const auto& store : stores) { |
121 | 0 | _tablet_submitted_cumu_compaction[store] = {}; |
122 | 0 | _tablet_submitted_base_compaction[store] = {}; |
123 | 0 | } |
124 | 0 | } |
125 | | |
126 | | uint32_t CompactionSubmitRegistry::count_executing_compaction(DataDir* dir, |
127 | 2 | CompactionType compaction_type) { |
128 | | // non-lock, used in snapshot |
129 | 2 | const auto& compaction_tasks = _get_tablet_set(dir, compaction_type); |
130 | 10 | return std::count_if(compaction_tasks.begin(), compaction_tasks.end(), [](const auto& task) { |
131 | 10 | return task->compaction_stage == CompactionStage::EXECUTING; |
132 | 10 | }); |
133 | 2 | } |
134 | | |
135 | 1 | uint32_t CompactionSubmitRegistry::count_executing_cumu_and_base(DataDir* dir) { |
136 | | // non-lock, used in snapshot |
137 | 1 | return count_executing_compaction(dir, CompactionType::BASE_COMPACTION) + |
138 | 1 | count_executing_compaction(dir, CompactionType::CUMULATIVE_COMPACTION); |
139 | 1 | } |
140 | | |
141 | 0 | bool CompactionSubmitRegistry::has_compaction_task(DataDir* dir, CompactionType compaction_type) { |
142 | | // non-lock, used in snapshot |
143 | 0 | return !_get_tablet_set(dir, compaction_type).empty(); |
144 | 0 | } |
145 | | |
146 | | std::vector<TabletSharedPtr> CompactionSubmitRegistry::pick_topn_tablets_for_compaction( |
147 | | TabletManager* tablet_mgr, DataDir* data_dir, CompactionType compaction_type, |
148 | 0 | const CumuCompactionPolicyTable& cumu_compaction_policies, uint32_t* disk_max_score) { |
149 | | // non-lock, used in snapshot |
150 | 0 | return tablet_mgr->find_best_tablets_to_compaction(compaction_type, data_dir, |
151 | 0 | _get_tablet_set(data_dir, compaction_type), |
152 | 0 | disk_max_score, cumu_compaction_policies); |
153 | 0 | } |
154 | | |
155 | 10 | bool CompactionSubmitRegistry::insert(TabletSharedPtr tablet, CompactionType compaction_type) { |
156 | 10 | std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex); |
157 | 10 | auto& tablet_set = _get_tablet_set(tablet->data_dir(), compaction_type); |
158 | 10 | bool already_exist = !(tablet_set.insert(tablet).second); |
159 | 10 | return already_exist; |
160 | 10 | } |
161 | | |
162 | | void CompactionSubmitRegistry::remove(TabletSharedPtr tablet, CompactionType compaction_type, |
163 | 0 | std::function<void()> wakeup_cb) { |
164 | 0 | std::unique_lock<std::mutex> l(_tablet_submitted_compaction_mutex); |
165 | 0 | auto& tablet_set = _get_tablet_set(tablet->data_dir(), compaction_type); |
166 | 0 | size_t removed = tablet_set.erase(tablet); |
167 | 0 | if (removed == 1) { |
168 | 0 | wakeup_cb(); |
169 | 0 | } |
170 | 0 | } |
171 | | |
172 | | CompactionSubmitRegistry::TabletSet& CompactionSubmitRegistry::_get_tablet_set( |
173 | 12 | DataDir* dir, CompactionType compaction_type) { |
174 | 12 | switch (compaction_type) { |
175 | 1 | case CompactionType::BASE_COMPACTION: |
176 | 1 | return _tablet_submitted_base_compaction[dir]; |
177 | 11 | case CompactionType::CUMULATIVE_COMPACTION: |
178 | 11 | return _tablet_submitted_cumu_compaction[dir]; |
179 | 0 | case CompactionType::FULL_COMPACTION: |
180 | 0 | return _tablet_submitted_full_compaction[dir]; |
181 | 0 | default: |
182 | 0 | CHECK(false) << "invalid compaction type"; |
183 | 12 | } |
184 | 12 | } |
185 | | |
186 | 0 | static int32_t get_cumu_compaction_threads_num(size_t data_dirs_num) { |
187 | 0 | int32_t threads_num = config::max_cumu_compaction_threads; |
188 | 0 | if (threads_num == -1) { |
189 | 0 | threads_num = data_dirs_num; |
190 | 0 | } |
191 | 0 | threads_num = threads_num <= 0 ? 1 : threads_num; |
192 | 0 | return threads_num; |
193 | 0 | } |
194 | | |
195 | 0 | static int32_t get_base_compaction_threads_num(size_t data_dirs_num) { |
196 | 0 | int32_t threads_num = config::max_base_compaction_threads; |
197 | 0 | if (threads_num == -1) { |
198 | 0 | threads_num = data_dirs_num; |
199 | 0 | } |
200 | 0 | threads_num = threads_num <= 0 ? 1 : threads_num; |
201 | 0 | return threads_num; |
202 | 0 | } |
203 | | |
204 | 0 | static int32_t get_single_replica_compaction_threads_num(size_t data_dirs_num) { |
205 | 0 | int32_t threads_num = config::max_single_replica_compaction_threads; |
206 | 0 | if (threads_num == -1) { |
207 | 0 | threads_num = data_dirs_num; |
208 | 0 | } |
209 | 0 | threads_num = threads_num <= 0 ? 1 : threads_num; |
210 | 0 | return threads_num; |
211 | 0 | } |
212 | | |
213 | 0 | Status StorageEngine::start_bg_threads(std::shared_ptr<WorkloadGroup> wg_sptr) { |
214 | 0 | RETURN_IF_ERROR(Thread::create( |
215 | 0 | "StorageEngine", "unused_rowset_monitor_thread", |
216 | 0 | [this]() { this->_unused_rowset_monitor_thread_callback(); }, |
217 | 0 | &_unused_rowset_monitor_thread)); |
218 | 0 | LOG(INFO) << "unused rowset monitor thread started"; |
219 | |
|
220 | 0 | RETURN_IF_ERROR(Thread::create( |
221 | 0 | "StorageEngine", "evict_querying_rowset_thread", |
222 | 0 | [this]() { this->_evict_quring_rowset_thread_callback(); }, |
223 | 0 | &_evict_quering_rowset_thread)); |
224 | 0 | LOG(INFO) << "evict quering thread started"; |
225 | | |
226 | | // start thread for monitoring the snapshot and trash folder |
227 | 0 | RETURN_IF_ERROR(Thread::create( |
228 | 0 | "StorageEngine", "garbage_sweeper_thread", |
229 | 0 | [this]() { this->_garbage_sweeper_thread_callback(); }, &_garbage_sweeper_thread)); |
230 | 0 | LOG(INFO) << "garbage sweeper thread started"; |
231 | | |
232 | | // start thread for monitoring the tablet with io error |
233 | 0 | RETURN_IF_ERROR(Thread::create( |
234 | 0 | "StorageEngine", "disk_stat_monitor_thread", |
235 | 0 | [this]() { this->_disk_stat_monitor_thread_callback(); }, &_disk_stat_monitor_thread)); |
236 | 0 | LOG(INFO) << "disk stat monitor thread started"; |
237 | | |
238 | | // convert store map to vector |
239 | 0 | std::vector<DataDir*> data_dirs = get_stores(); |
240 | |
|
241 | 0 | auto base_compaction_threads = get_base_compaction_threads_num(data_dirs.size()); |
242 | 0 | auto cumu_compaction_threads = get_cumu_compaction_threads_num(data_dirs.size()); |
243 | 0 | auto single_replica_compaction_threads = |
244 | 0 | get_single_replica_compaction_threads_num(data_dirs.size()); |
245 | |
|
246 | 0 | if (wg_sptr->get_cgroup_cpu_ctl_wptr().lock()) { |
247 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("gBaseCompactionTaskThreadPool") |
248 | 0 | .set_min_threads(base_compaction_threads) |
249 | 0 | .set_max_threads(base_compaction_threads) |
250 | 0 | .set_cgroup_cpu_ctl(wg_sptr->get_cgroup_cpu_ctl_wptr()) |
251 | 0 | .build(&_base_compaction_thread_pool)); |
252 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("gCumuCompactionTaskThreadPool") |
253 | 0 | .set_min_threads(cumu_compaction_threads) |
254 | 0 | .set_max_threads(cumu_compaction_threads) |
255 | 0 | .set_cgroup_cpu_ctl(wg_sptr->get_cgroup_cpu_ctl_wptr()) |
256 | 0 | .build(&_cumu_compaction_thread_pool)); |
257 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("gSingleReplicaCompactionTaskThreadPool") |
258 | 0 | .set_min_threads(single_replica_compaction_threads) |
259 | 0 | .set_max_threads(single_replica_compaction_threads) |
260 | 0 | .set_cgroup_cpu_ctl(wg_sptr->get_cgroup_cpu_ctl_wptr()) |
261 | 0 | .build(&_single_replica_compaction_thread_pool)); |
262 | | |
263 | 0 | if (config::enable_segcompaction) { |
264 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("gSegCompactionTaskThreadPool") |
265 | 0 | .set_min_threads(config::segcompaction_num_threads) |
266 | 0 | .set_max_threads(config::segcompaction_num_threads) |
267 | 0 | .set_cgroup_cpu_ctl(wg_sptr->get_cgroup_cpu_ctl_wptr()) |
268 | 0 | .build(&_seg_compaction_thread_pool)); |
269 | 0 | } |
270 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("gColdDataCompactionTaskThreadPool") |
271 | 0 | .set_min_threads(config::cold_data_compaction_thread_num) |
272 | 0 | .set_max_threads(config::cold_data_compaction_thread_num) |
273 | 0 | .set_cgroup_cpu_ctl(wg_sptr->get_cgroup_cpu_ctl_wptr()) |
274 | 0 | .build(&_cold_data_compaction_thread_pool)); |
275 | 0 | } else { |
276 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("BaseCompactionTaskThreadPool") |
277 | 0 | .set_min_threads(base_compaction_threads) |
278 | 0 | .set_max_threads(base_compaction_threads) |
279 | 0 | .build(&_base_compaction_thread_pool)); |
280 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("CumuCompactionTaskThreadPool") |
281 | 0 | .set_min_threads(cumu_compaction_threads) |
282 | 0 | .set_max_threads(cumu_compaction_threads) |
283 | 0 | .build(&_cumu_compaction_thread_pool)); |
284 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("SingleReplicaCompactionTaskThreadPool") |
285 | 0 | .set_min_threads(single_replica_compaction_threads) |
286 | 0 | .set_max_threads(single_replica_compaction_threads) |
287 | 0 | .build(&_single_replica_compaction_thread_pool)); |
288 | | |
289 | 0 | if (config::enable_segcompaction) { |
290 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("SegCompactionTaskThreadPool") |
291 | 0 | .set_min_threads(config::segcompaction_num_threads) |
292 | 0 | .set_max_threads(config::segcompaction_num_threads) |
293 | 0 | .build(&_seg_compaction_thread_pool)); |
294 | 0 | } |
295 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("ColdDataCompactionTaskThreadPool") |
296 | 0 | .set_min_threads(config::cold_data_compaction_thread_num) |
297 | 0 | .set_max_threads(config::cold_data_compaction_thread_num) |
298 | 0 | .build(&_cold_data_compaction_thread_pool)); |
299 | 0 | } |
300 | | |
301 | | // compaction tasks producer thread |
302 | 0 | RETURN_IF_ERROR(Thread::create( |
303 | 0 | "StorageEngine", "compaction_tasks_producer_thread", |
304 | 0 | [this]() { this->_compaction_tasks_producer_callback(); }, |
305 | 0 | &_compaction_tasks_producer_thread)); |
306 | 0 | LOG(INFO) << "compaction tasks producer thread started"; |
307 | |
|
308 | 0 | RETURN_IF_ERROR(Thread::create( |
309 | 0 | "StorageEngine", "_update_replica_infos_thread", |
310 | 0 | [this]() { this->_update_replica_infos_callback(); }, &_update_replica_infos_thread)); |
311 | 0 | LOG(INFO) << "tablet replicas info update thread started"; |
312 | |
|
313 | 0 | int32_t max_checkpoint_thread_num = config::max_meta_checkpoint_threads; |
314 | 0 | if (max_checkpoint_thread_num < 0) { |
315 | 0 | max_checkpoint_thread_num = data_dirs.size(); |
316 | 0 | } |
317 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("TabletMetaCheckpointTaskThreadPool") |
318 | 0 | .set_max_threads(max_checkpoint_thread_num) |
319 | 0 | .build(&_tablet_meta_checkpoint_thread_pool)); |
320 | | |
321 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("MultiGetTaskThreadPool") |
322 | 0 | .set_min_threads(config::multi_get_max_threads) |
323 | 0 | .set_max_threads(config::multi_get_max_threads) |
324 | 0 | .build(&_bg_multi_get_thread_pool)); |
325 | 0 | RETURN_IF_ERROR(Thread::create( |
326 | 0 | "StorageEngine", "tablet_checkpoint_tasks_producer_thread", |
327 | 0 | [this, data_dirs]() { this->_tablet_checkpoint_callback(data_dirs); }, |
328 | 0 | &_tablet_checkpoint_tasks_producer_thread)); |
329 | 0 | LOG(INFO) << "tablet checkpoint tasks producer thread started"; |
330 | |
|
331 | 0 | RETURN_IF_ERROR(Thread::create( |
332 | 0 | "StorageEngine", "tablet_path_check_thread", |
333 | 0 | [this]() { this->_tablet_path_check_callback(); }, &_tablet_path_check_thread)); |
334 | 0 | LOG(INFO) << "tablet path check thread started"; |
335 | | |
336 | | // path scan and gc thread |
337 | 0 | if (config::path_gc_check) { |
338 | 0 | for (auto data_dir : get_stores()) { |
339 | 0 | scoped_refptr<Thread> path_gc_thread; |
340 | 0 | RETURN_IF_ERROR(Thread::create( |
341 | 0 | "StorageEngine", "path_gc_thread", |
342 | 0 | [this, data_dir]() { this->_path_gc_thread_callback(data_dir); }, |
343 | 0 | &path_gc_thread)); |
344 | 0 | _path_gc_threads.emplace_back(path_gc_thread); |
345 | 0 | } |
346 | 0 | LOG(INFO) << "path gc threads started. number:" << get_stores().size(); |
347 | 0 | } |
348 | | |
349 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("CooldownTaskThreadPool") |
350 | 0 | .set_min_threads(config::cooldown_thread_num) |
351 | 0 | .set_max_threads(config::cooldown_thread_num) |
352 | 0 | .build(&_cooldown_thread_pool)); |
353 | 0 | LOG(INFO) << "cooldown thread pool started"; |
354 | |
|
355 | 0 | RETURN_IF_ERROR(Thread::create( |
356 | 0 | "StorageEngine", "cooldown_tasks_producer_thread", |
357 | 0 | [this]() { this->_cooldown_tasks_producer_callback(); }, |
358 | 0 | &_cooldown_tasks_producer_thread)); |
359 | 0 | LOG(INFO) << "cooldown tasks producer thread started"; |
360 | |
|
361 | 0 | RETURN_IF_ERROR(Thread::create( |
362 | 0 | "StorageEngine", "remove_unused_remote_files_thread", |
363 | 0 | [this]() { this->_remove_unused_remote_files_callback(); }, |
364 | 0 | &_remove_unused_remote_files_thread)); |
365 | 0 | LOG(INFO) << "remove unused remote files thread started"; |
366 | |
|
367 | 0 | RETURN_IF_ERROR(Thread::create( |
368 | 0 | "StorageEngine", "cold_data_compaction_producer_thread", |
369 | 0 | [this]() { this->_cold_data_compaction_producer_callback(); }, |
370 | 0 | &_cold_data_compaction_producer_thread)); |
371 | 0 | LOG(INFO) << "cold data compaction producer thread started"; |
372 | | |
373 | | // add tablet publish version thread pool |
374 | 0 | RETURN_IF_ERROR(ThreadPoolBuilder("TabletPublishTxnThreadPool") |
375 | 0 | .set_min_threads(config::tablet_publish_txn_max_thread) |
376 | 0 | .set_max_threads(config::tablet_publish_txn_max_thread) |
377 | 0 | .build(&_tablet_publish_txn_thread_pool)); |
378 | | |
379 | 0 | RETURN_IF_ERROR(Thread::create( |
380 | 0 | "StorageEngine", "async_publish_version_thread", |
381 | 0 | [this]() { this->_async_publish_callback(); }, &_async_publish_thread)); |
382 | 0 | LOG(INFO) << "async publish thread started"; |
383 | |
|
384 | 0 | LOG(INFO) << "all storage engine's background threads are started."; |
385 | 0 | return Status::OK(); |
386 | 0 | } |
387 | | |
388 | 0 | void StorageEngine::_garbage_sweeper_thread_callback() { |
389 | 0 | uint32_t max_interval = config::max_garbage_sweep_interval; |
390 | 0 | uint32_t min_interval = config::min_garbage_sweep_interval; |
391 | |
|
392 | 0 | if (max_interval < min_interval || min_interval <= 0) { |
393 | 0 | LOG(WARNING) << "garbage sweep interval config is illegal: [max=" << max_interval |
394 | 0 | << " min=" << min_interval << "]."; |
395 | 0 | min_interval = 1; |
396 | 0 | max_interval = max_interval >= min_interval ? max_interval : min_interval; |
397 | 0 | LOG(INFO) << "force reset garbage sweep interval. " |
398 | 0 | << "max_interval=" << max_interval << ", min_interval=" << min_interval; |
399 | 0 | } |
400 | |
|
401 | 0 | const double pi = M_PI; |
402 | 0 | double usage = 1.0; |
403 | | // After the program starts, the first round of cleaning starts after min_interval. |
404 | 0 | uint32_t curr_interval = min_interval; |
405 | 0 | do { |
406 | | // Function properties: |
407 | | // when usage < 0.6, ratio close to 1.(interval close to max_interval) |
408 | | // when usage at [0.6, 0.75], ratio is rapidly decreasing from 0.87 to 0.27. |
409 | | // when usage > 0.75, ratio is slowly decreasing. |
410 | | // when usage > 0.8, ratio close to min_interval. |
411 | | // when usage = 0.88, ratio is approximately 0.0057. |
412 | 0 | double ratio = (1.1 * (pi / 2 - std::atan(usage * 100 / 5 - 14)) - 0.28) / pi; |
413 | 0 | ratio = ratio > 0 ? ratio : 0; |
414 | 0 | auto curr_interval = uint32_t(max_interval * ratio); |
415 | 0 | curr_interval = std::max(curr_interval, min_interval); |
416 | 0 | curr_interval = std::min(curr_interval, max_interval); |
417 | | |
418 | | // start clean trash and update usage. |
419 | 0 | Status res = start_trash_sweep(&usage); |
420 | 0 | if (res.ok() && _need_clean_trash.exchange(false, std::memory_order_relaxed)) { |
421 | 0 | res = start_trash_sweep(&usage, true); |
422 | 0 | } |
423 | |
|
424 | 0 | if (!res.ok()) { |
425 | 0 | LOG(WARNING) << "one or more errors occur when sweep trash." |
426 | 0 | << "see previous message for detail. err code=" << res; |
427 | | // do nothing. continue next loop. |
428 | 0 | } |
429 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(curr_interval))); |
430 | 0 | } |
431 | | |
432 | 0 | void StorageEngine::_disk_stat_monitor_thread_callback() { |
433 | 0 | int32_t interval = config::disk_stat_monitor_interval; |
434 | 0 | do { |
435 | 0 | _start_disk_stat_monitor(); |
436 | |
|
437 | 0 | interval = config::disk_stat_monitor_interval; |
438 | 0 | if (interval <= 0) { |
439 | 0 | LOG(WARNING) << "disk_stat_monitor_interval config is illegal: " << interval |
440 | 0 | << ", force set to 1"; |
441 | 0 | interval = 1; |
442 | 0 | } |
443 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
444 | 0 | } |
445 | | |
446 | 0 | void StorageEngine::_unused_rowset_monitor_thread_callback() { |
447 | 0 | int32_t interval = config::unused_rowset_monitor_interval; |
448 | 0 | do { |
449 | 0 | start_delete_unused_rowset(); |
450 | |
|
451 | 0 | interval = config::unused_rowset_monitor_interval; |
452 | 0 | if (interval <= 0) { |
453 | 0 | LOG(WARNING) << "unused_rowset_monitor_interval config is illegal: " << interval |
454 | 0 | << ", force set to 1"; |
455 | 0 | interval = 1; |
456 | 0 | } |
457 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
458 | 0 | } |
459 | | |
460 | 0 | int32_t StorageEngine::_auto_get_interval_by_disk_capacity(DataDir* data_dir) { |
461 | 0 | double disk_used = data_dir->get_usage(0); |
462 | 0 | double remain_used = 1 - disk_used; |
463 | 0 | DCHECK(remain_used >= 0 && remain_used <= 1); |
464 | 0 | DCHECK(config::path_gc_check_interval_second >= 0); |
465 | 0 | int32_t ret = 0; |
466 | 0 | if (remain_used > 0.9) { |
467 | | // if config::path_gc_check_interval_second == 24h |
468 | 0 | ret = config::path_gc_check_interval_second; |
469 | 0 | } else if (remain_used > 0.7) { |
470 | | // 12h |
471 | 0 | ret = config::path_gc_check_interval_second / 2; |
472 | 0 | } else if (remain_used > 0.5) { |
473 | | // 6h |
474 | 0 | ret = config::path_gc_check_interval_second / 4; |
475 | 0 | } else if (remain_used > 0.3) { |
476 | | // 4h |
477 | 0 | ret = config::path_gc_check_interval_second / 6; |
478 | 0 | } else { |
479 | | // 3h |
480 | 0 | ret = config::path_gc_check_interval_second / 8; |
481 | 0 | } |
482 | 0 | return ret; |
483 | 0 | } |
484 | | |
485 | 0 | void StorageEngine::_path_gc_thread_callback(DataDir* data_dir) { |
486 | 0 | LOG(INFO) << "try to start path gc thread!"; |
487 | 0 | int32_t last_exec_time = 0; |
488 | 0 | do { |
489 | 0 | int32_t current_time = time(nullptr); |
490 | |
|
491 | 0 | int32_t interval = _auto_get_interval_by_disk_capacity(data_dir); |
492 | 0 | if (interval <= 0) { |
493 | 0 | LOG(WARNING) << "path gc thread check interval config is illegal:" << interval |
494 | 0 | << "will be forced set to half hour"; |
495 | 0 | interval = 1800; // 0.5 hour |
496 | 0 | } |
497 | 0 | if (current_time - last_exec_time >= interval) { |
498 | 0 | LOG(INFO) << "try to perform path gc! disk remain [" << 1 - data_dir->get_usage(0) |
499 | 0 | << "] internal [" << interval << "]"; |
500 | 0 | data_dir->perform_path_gc(); |
501 | 0 | last_exec_time = time(nullptr); |
502 | 0 | } |
503 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(5))); |
504 | 0 | LOG(INFO) << "stop path gc thread!"; |
505 | 0 | } |
506 | | |
507 | 0 | void StorageEngine::_tablet_checkpoint_callback(const std::vector<DataDir*>& data_dirs) { |
508 | 0 | int64_t interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs; |
509 | 0 | do { |
510 | 0 | LOG(INFO) << "begin to produce tablet meta checkpoint tasks."; |
511 | 0 | for (auto data_dir : data_dirs) { |
512 | 0 | auto st = _tablet_meta_checkpoint_thread_pool->submit_func( |
513 | 0 | [data_dir, this]() { _tablet_manager->do_tablet_meta_checkpoint(data_dir); }); |
514 | 0 | if (!st.ok()) { |
515 | 0 | LOG(WARNING) << "submit tablet checkpoint tasks failed."; |
516 | 0 | } |
517 | 0 | } |
518 | 0 | interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs; |
519 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
520 | 0 | } |
521 | | |
522 | 0 | void StorageEngine::_tablet_path_check_callback() { |
523 | 0 | struct TabletIdComparator { |
524 | 0 | bool operator()(Tablet* a, Tablet* b) { return a->tablet_id() < b->tablet_id(); } |
525 | 0 | }; |
526 | |
|
527 | 0 | using TabletQueue = std::priority_queue<Tablet*, std::vector<Tablet*>, TabletIdComparator>; |
528 | |
|
529 | 0 | int64_t interval = config::tablet_path_check_interval_seconds; |
530 | 0 | if (interval <= 0) { |
531 | 0 | return; |
532 | 0 | } |
533 | | |
534 | 0 | int64_t last_tablet_id = 0; |
535 | 0 | do { |
536 | 0 | int32_t batch_size = config::tablet_path_check_batch_size; |
537 | 0 | if (batch_size <= 0) { |
538 | 0 | if (_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))) { |
539 | 0 | break; |
540 | 0 | } |
541 | 0 | continue; |
542 | 0 | } |
543 | | |
544 | 0 | LOG(INFO) << "start to check tablet path"; |
545 | |
|
546 | 0 | auto all_tablets = _tablet_manager->get_all_tablet( |
547 | 0 | [](Tablet* t) { return t->is_used() && t->tablet_state() == TABLET_RUNNING; }); |
548 | |
|
549 | 0 | TabletQueue big_id_tablets; |
550 | 0 | TabletQueue small_id_tablets; |
551 | 0 | for (auto tablet : all_tablets) { |
552 | 0 | auto tablet_id = tablet->tablet_id(); |
553 | 0 | TabletQueue* belong_tablets = nullptr; |
554 | 0 | if (tablet_id > last_tablet_id) { |
555 | 0 | if (big_id_tablets.size() < batch_size || |
556 | 0 | big_id_tablets.top()->tablet_id() > tablet_id) { |
557 | 0 | belong_tablets = &big_id_tablets; |
558 | 0 | } |
559 | 0 | } else if (big_id_tablets.size() < batch_size) { |
560 | 0 | if (small_id_tablets.size() < batch_size || |
561 | 0 | small_id_tablets.top()->tablet_id() > tablet_id) { |
562 | 0 | belong_tablets = &small_id_tablets; |
563 | 0 | } |
564 | 0 | } |
565 | 0 | if (belong_tablets != nullptr) { |
566 | 0 | belong_tablets->push(tablet.get()); |
567 | 0 | if (belong_tablets->size() > batch_size) { |
568 | 0 | belong_tablets->pop(); |
569 | 0 | } |
570 | 0 | } |
571 | 0 | } |
572 | |
|
573 | 0 | int32_t need_small_id_tablet_size = |
574 | 0 | batch_size - static_cast<int32_t>(big_id_tablets.size()); |
575 | |
|
576 | 0 | if (!big_id_tablets.empty()) { |
577 | 0 | last_tablet_id = big_id_tablets.top()->tablet_id(); |
578 | 0 | } |
579 | 0 | while (!big_id_tablets.empty()) { |
580 | 0 | big_id_tablets.top()->check_tablet_path_exists(); |
581 | 0 | big_id_tablets.pop(); |
582 | 0 | } |
583 | |
|
584 | 0 | if (!small_id_tablets.empty() && need_small_id_tablet_size > 0) { |
585 | 0 | while (static_cast<int32_t>(small_id_tablets.size()) > need_small_id_tablet_size) { |
586 | 0 | small_id_tablets.pop(); |
587 | 0 | } |
588 | |
|
589 | 0 | last_tablet_id = small_id_tablets.top()->tablet_id(); |
590 | 0 | while (!small_id_tablets.empty()) { |
591 | 0 | small_id_tablets.top()->check_tablet_path_exists(); |
592 | 0 | small_id_tablets.pop(); |
593 | 0 | } |
594 | 0 | } |
595 | |
|
596 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
597 | 0 | } |
598 | | |
599 | 0 | void StorageEngine::_adjust_compaction_thread_num() { |
600 | 0 | auto base_compaction_threads_num = get_base_compaction_threads_num(_store_map.size()); |
601 | 0 | if (_base_compaction_thread_pool->max_threads() != base_compaction_threads_num) { |
602 | 0 | int old_max_threads = _base_compaction_thread_pool->max_threads(); |
603 | 0 | Status status = _base_compaction_thread_pool->set_max_threads(base_compaction_threads_num); |
604 | 0 | if (status.ok()) { |
605 | 0 | VLOG_NOTICE << "update base compaction thread pool max_threads from " << old_max_threads |
606 | 0 | << " to " << base_compaction_threads_num; |
607 | 0 | } |
608 | 0 | } |
609 | 0 | if (_base_compaction_thread_pool->min_threads() != base_compaction_threads_num) { |
610 | 0 | int old_min_threads = _base_compaction_thread_pool->min_threads(); |
611 | 0 | Status status = _base_compaction_thread_pool->set_min_threads(base_compaction_threads_num); |
612 | 0 | if (status.ok()) { |
613 | 0 | VLOG_NOTICE << "update base compaction thread pool min_threads from " << old_min_threads |
614 | 0 | << " to " << base_compaction_threads_num; |
615 | 0 | } |
616 | 0 | } |
617 | |
|
618 | 0 | auto cumu_compaction_threads_num = get_cumu_compaction_threads_num(_store_map.size()); |
619 | 0 | if (_cumu_compaction_thread_pool->max_threads() != cumu_compaction_threads_num) { |
620 | 0 | int old_max_threads = _cumu_compaction_thread_pool->max_threads(); |
621 | 0 | Status status = _cumu_compaction_thread_pool->set_max_threads(cumu_compaction_threads_num); |
622 | 0 | if (status.ok()) { |
623 | 0 | VLOG_NOTICE << "update cumu compaction thread pool max_threads from " << old_max_threads |
624 | 0 | << " to " << cumu_compaction_threads_num; |
625 | 0 | } |
626 | 0 | } |
627 | 0 | if (_cumu_compaction_thread_pool->min_threads() != cumu_compaction_threads_num) { |
628 | 0 | int old_min_threads = _cumu_compaction_thread_pool->min_threads(); |
629 | 0 | Status status = _cumu_compaction_thread_pool->set_min_threads(cumu_compaction_threads_num); |
630 | 0 | if (status.ok()) { |
631 | 0 | VLOG_NOTICE << "update cumu compaction thread pool min_threads from " << old_min_threads |
632 | 0 | << " to " << cumu_compaction_threads_num; |
633 | 0 | } |
634 | 0 | } |
635 | |
|
636 | 0 | auto single_replica_compaction_threads_num = |
637 | 0 | get_single_replica_compaction_threads_num(_store_map.size()); |
638 | 0 | if (_single_replica_compaction_thread_pool->max_threads() != |
639 | 0 | single_replica_compaction_threads_num) { |
640 | 0 | int old_max_threads = _single_replica_compaction_thread_pool->max_threads(); |
641 | 0 | Status status = _single_replica_compaction_thread_pool->set_max_threads( |
642 | 0 | single_replica_compaction_threads_num); |
643 | 0 | if (status.ok()) { |
644 | 0 | VLOG_NOTICE << "update single replica compaction thread pool max_threads from " |
645 | 0 | << old_max_threads << " to " << single_replica_compaction_threads_num; |
646 | 0 | } |
647 | 0 | } |
648 | 0 | if (_single_replica_compaction_thread_pool->min_threads() != |
649 | 0 | single_replica_compaction_threads_num) { |
650 | 0 | int old_min_threads = _single_replica_compaction_thread_pool->min_threads(); |
651 | 0 | Status status = _single_replica_compaction_thread_pool->set_min_threads( |
652 | 0 | single_replica_compaction_threads_num); |
653 | 0 | if (status.ok()) { |
654 | 0 | VLOG_NOTICE << "update single replica compaction thread pool min_threads from " |
655 | 0 | << old_min_threads << " to " << single_replica_compaction_threads_num; |
656 | 0 | } |
657 | 0 | } |
658 | 0 | } |
659 | | |
660 | 0 | void StorageEngine::_compaction_tasks_producer_callback() { |
661 | 0 | LOG(INFO) << "try to start compaction producer process!"; |
662 | |
|
663 | 0 | std::vector<DataDir*> data_dirs = get_stores(); |
664 | 0 | _compaction_submit_registry.reset(data_dirs); |
665 | |
|
666 | 0 | int round = 0; |
667 | 0 | CompactionType compaction_type; |
668 | | |
669 | | // Used to record the time when the score metric was last updated. |
670 | | // The update of the score metric is accompanied by the logic of selecting the tablet. |
671 | | // If there is no slot available, the logic of selecting the tablet will be terminated, |
672 | | // which causes the score metric update to be terminated. |
673 | | // In order to avoid this situation, we need to update the score regularly. |
674 | 0 | int64_t last_cumulative_score_update_time = 0; |
675 | 0 | int64_t last_base_score_update_time = 0; |
676 | 0 | static const int64_t check_score_interval_ms = 5000; // 5 secs |
677 | |
|
678 | 0 | int64_t interval = config::generate_compaction_tasks_interval_ms; |
679 | 0 | do { |
680 | 0 | if (!config::disable_auto_compaction && |
681 | 0 | !GlobalMemoryArbitrator::is_exceed_soft_mem_limit(GB_EXCHANGE_BYTE)) { |
682 | 0 | _adjust_compaction_thread_num(); |
683 | |
|
684 | 0 | bool check_score = false; |
685 | 0 | int64_t cur_time = UnixMillis(); |
686 | 0 | if (round < config::cumulative_compaction_rounds_for_each_base_compaction_round) { |
687 | 0 | compaction_type = CompactionType::CUMULATIVE_COMPACTION; |
688 | 0 | round++; |
689 | 0 | if (cur_time - last_cumulative_score_update_time >= check_score_interval_ms) { |
690 | 0 | check_score = true; |
691 | 0 | last_cumulative_score_update_time = cur_time; |
692 | 0 | } |
693 | 0 | } else { |
694 | 0 | compaction_type = CompactionType::BASE_COMPACTION; |
695 | 0 | round = 0; |
696 | 0 | if (cur_time - last_base_score_update_time >= check_score_interval_ms) { |
697 | 0 | check_score = true; |
698 | 0 | last_base_score_update_time = cur_time; |
699 | 0 | } |
700 | 0 | } |
701 | 0 | std::unique_ptr<ThreadPool>& thread_pool = |
702 | 0 | (compaction_type == CompactionType::CUMULATIVE_COMPACTION) |
703 | 0 | ? _cumu_compaction_thread_pool |
704 | 0 | : _base_compaction_thread_pool; |
705 | 0 | VLOG_CRITICAL << "compaction thread pool. type: " |
706 | 0 | << (compaction_type == CompactionType::CUMULATIVE_COMPACTION ? "CUMU" |
707 | 0 | : "BASE") |
708 | 0 | << ", num_threads: " << thread_pool->num_threads() |
709 | 0 | << ", num_threads_pending_start: " |
710 | 0 | << thread_pool->num_threads_pending_start() |
711 | 0 | << ", num_active_threads: " << thread_pool->num_active_threads() |
712 | 0 | << ", max_threads: " << thread_pool->max_threads() |
713 | 0 | << ", min_threads: " << thread_pool->min_threads() |
714 | 0 | << ", num_total_queued_tasks: " << thread_pool->get_queue_size(); |
715 | 0 | std::vector<TabletSharedPtr> tablets_compaction = |
716 | 0 | _generate_compaction_tasks(compaction_type, data_dirs, check_score); |
717 | 0 | if (tablets_compaction.size() == 0) { |
718 | 0 | std::unique_lock<std::mutex> lock(_compaction_producer_sleep_mutex); |
719 | 0 | _wakeup_producer_flag = 0; |
720 | | // It is necessary to wake up the thread on timeout to prevent deadlock |
721 | | // in case of no running compaction task. |
722 | 0 | _compaction_producer_sleep_cv.wait_for( |
723 | 0 | lock, std::chrono::milliseconds(2000), |
724 | 0 | [this] { return _wakeup_producer_flag == 1; }); |
725 | 0 | continue; |
726 | 0 | } |
727 | | |
728 | 0 | for (const auto& tablet : tablets_compaction) { |
729 | 0 | if (compaction_type == CompactionType::BASE_COMPACTION) { |
730 | 0 | tablet->set_last_base_compaction_schedule_time(UnixMillis()); |
731 | 0 | } |
732 | 0 | Status st = _submit_compaction_task(tablet, compaction_type, false); |
733 | 0 | if (!st.ok()) { |
734 | 0 | LOG(WARNING) << "failed to submit compaction task for tablet: " |
735 | 0 | << tablet->tablet_id() << ", err: " << st; |
736 | 0 | } |
737 | 0 | } |
738 | 0 | interval = config::generate_compaction_tasks_interval_ms; |
739 | 0 | } else { |
740 | 0 | interval = 5000; // 5s to check disable_auto_compaction |
741 | 0 | } |
742 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::milliseconds(interval))); |
743 | 0 | } |
744 | | |
745 | 0 | void StorageEngine::_update_replica_infos_callback() { |
746 | | #ifdef GOOGLE_PROFILER |
747 | | ProfilerRegisterThread(); |
748 | | #endif |
749 | 0 | LOG(INFO) << "start to update replica infos!"; |
750 | |
|
751 | 0 | int64_t interval = config::update_replica_infos_interval_seconds; |
752 | 0 | do { |
753 | 0 | auto all_tablets = _tablet_manager->get_all_tablet([](Tablet* t) { |
754 | 0 | return t->is_used() && t->tablet_state() == TABLET_RUNNING && |
755 | 0 | !t->tablet_meta()->tablet_schema()->disable_auto_compaction() && |
756 | 0 | t->tablet_meta()->tablet_schema()->enable_single_replica_compaction(); |
757 | 0 | }); |
758 | 0 | ClusterInfo* cluster_info = ExecEnv::GetInstance()->cluster_info(); |
759 | 0 | if (cluster_info == nullptr) { |
760 | 0 | LOG(WARNING) << "Have not get FE Master heartbeat yet"; |
761 | 0 | std::this_thread::sleep_for(std::chrono::seconds(2)); |
762 | 0 | continue; |
763 | 0 | } |
764 | 0 | TNetworkAddress master_addr = cluster_info->master_fe_addr; |
765 | 0 | if (master_addr.hostname == "" || master_addr.port == 0) { |
766 | 0 | LOG(WARNING) << "Have not get FE Master heartbeat yet"; |
767 | 0 | std::this_thread::sleep_for(std::chrono::seconds(2)); |
768 | 0 | continue; |
769 | 0 | } |
770 | | |
771 | 0 | int start = 0; |
772 | 0 | int tablet_size = all_tablets.size(); |
773 | | // The while loop may take a long time, we should skip it when stop |
774 | 0 | while (start < tablet_size && _stop_background_threads_latch.count() > 0) { |
775 | 0 | int batch_size = std::min(100, tablet_size - start); |
776 | 0 | int end = start + batch_size; |
777 | 0 | TGetTabletReplicaInfosRequest request; |
778 | 0 | TGetTabletReplicaInfosResult result; |
779 | 0 | for (int i = start; i < end; i++) { |
780 | 0 | request.tablet_ids.emplace_back(all_tablets[i]->tablet_id()); |
781 | 0 | } |
782 | 0 | Status rpc_st = ThriftRpcHelper::rpc<FrontendServiceClient>( |
783 | 0 | master_addr.hostname, master_addr.port, |
784 | 0 | [&request, &result](FrontendServiceConnection& client) { |
785 | 0 | client->getTabletReplicaInfos(result, request); |
786 | 0 | }); |
787 | |
|
788 | 0 | if (!rpc_st.ok()) { |
789 | 0 | LOG(WARNING) << "Failed to get tablet replica infos, encounter rpc failure, " |
790 | 0 | "tablet start: " |
791 | 0 | << start << " end: " << end; |
792 | 0 | continue; |
793 | 0 | } |
794 | | |
795 | 0 | std::unique_lock<std::mutex> lock(_peer_replica_infos_mutex); |
796 | 0 | for (const auto& it : result.tablet_replica_infos) { |
797 | 0 | auto tablet_id = it.first; |
798 | 0 | auto tablet = _tablet_manager->get_tablet(tablet_id); |
799 | 0 | if (tablet == nullptr) { |
800 | 0 | VLOG_CRITICAL << "tablet ptr is nullptr"; |
801 | 0 | continue; |
802 | 0 | } |
803 | | |
804 | 0 | VLOG_NOTICE << tablet_id << " tablet has " << it.second.size() << " replicas"; |
805 | 0 | uint64_t min_modulo = MOD_PRIME; |
806 | 0 | TReplicaInfo peer_replica; |
807 | 0 | for (const auto& replica : it.second) { |
808 | 0 | int64_t peer_replica_id = replica.replica_id; |
809 | 0 | uint64_t modulo = HashUtil::hash64(&peer_replica_id, sizeof(peer_replica_id), |
810 | 0 | DEFAULT_SEED) % |
811 | 0 | MOD_PRIME; |
812 | 0 | if (modulo < min_modulo) { |
813 | 0 | peer_replica = replica; |
814 | 0 | min_modulo = modulo; |
815 | 0 | } |
816 | 0 | } |
817 | 0 | VLOG_NOTICE << "tablet " << tablet_id << ", peer replica host is " |
818 | 0 | << peer_replica.host; |
819 | 0 | _peer_replica_infos[tablet_id] = peer_replica; |
820 | 0 | } |
821 | 0 | _token = result.token; |
822 | 0 | VLOG_NOTICE << "get tablet replica infos from fe, size is " << end - start |
823 | 0 | << " token = " << result.token; |
824 | 0 | start = end; |
825 | 0 | } |
826 | 0 | interval = config::update_replica_infos_interval_seconds; |
827 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
828 | 0 | } |
829 | | |
830 | | Status StorageEngine::_submit_single_replica_compaction_task(TabletSharedPtr tablet, |
831 | 0 | CompactionType compaction_type) { |
832 | | // For single replica compaction, the local version to be merged is determined based on the version fetched from the peer replica. |
833 | | // Therefore, it is currently not possible to determine whether it should be a base compaction or cumulative compaction. |
834 | | // As a result, the tablet needs to be pushed to both the _tablet_submitted_cumu_compaction and the _tablet_submitted_base_compaction simultaneously. |
835 | 0 | bool already_exist = |
836 | 0 | _compaction_submit_registry.insert(tablet, CompactionType::CUMULATIVE_COMPACTION); |
837 | 0 | if (already_exist) { |
838 | 0 | return Status::AlreadyExist<false>( |
839 | 0 | "compaction task has already been submitted, tablet_id={}", tablet->tablet_id()); |
840 | 0 | } |
841 | | |
842 | 0 | already_exist = _compaction_submit_registry.insert(tablet, CompactionType::BASE_COMPACTION); |
843 | 0 | if (already_exist) { |
844 | 0 | _pop_tablet_from_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION); |
845 | 0 | return Status::AlreadyExist<false>( |
846 | 0 | "compaction task has already been submitted, tablet_id={}", tablet->tablet_id()); |
847 | 0 | } |
848 | | |
849 | 0 | auto compaction = std::make_shared<SingleReplicaCompaction>(*this, tablet, compaction_type); |
850 | 0 | DorisMetrics::instance()->single_compaction_request_total->increment(1); |
851 | 0 | auto st = compaction->prepare_compact(); |
852 | |
|
853 | 0 | auto clean_single_replica_compaction = [tablet, this]() { |
854 | 0 | _pop_tablet_from_submitted_compaction(tablet, CompactionType::CUMULATIVE_COMPACTION); |
855 | 0 | _pop_tablet_from_submitted_compaction(tablet, CompactionType::BASE_COMPACTION); |
856 | 0 | }; |
857 | |
|
858 | 0 | if (!st.ok()) { |
859 | 0 | clean_single_replica_compaction(); |
860 | 0 | if (!st.is<ErrorCode::CUMULATIVE_NO_SUITABLE_VERSION>()) { |
861 | 0 | LOG(WARNING) << "failed to prepare single replica compaction, tablet_id=" |
862 | 0 | << tablet->tablet_id() << " : " << st; |
863 | 0 | return st; |
864 | 0 | } |
865 | 0 | return Status::OK(); // No suitable version, regard as OK |
866 | 0 | } |
867 | | |
868 | 0 | auto submit_st = _single_replica_compaction_thread_pool->submit_func( |
869 | 0 | [tablet, compaction = std::move(compaction), |
870 | 0 | clean_single_replica_compaction]() mutable { |
871 | 0 | tablet->execute_single_replica_compaction(*compaction); |
872 | 0 | clean_single_replica_compaction(); |
873 | 0 | }); |
874 | 0 | if (!submit_st.ok()) { |
875 | 0 | clean_single_replica_compaction(); |
876 | 0 | return Status::InternalError( |
877 | 0 | "failed to submit single replica compaction task to thread pool, " |
878 | 0 | "tablet_id={}", |
879 | 0 | tablet->tablet_id()); |
880 | 0 | } |
881 | 0 | return Status::OK(); |
882 | 0 | } |
883 | | |
884 | | void StorageEngine::get_tablet_rowset_versions(const PGetTabletVersionsRequest* request, |
885 | 0 | PGetTabletVersionsResponse* response) { |
886 | 0 | TabletSharedPtr tablet = _tablet_manager->get_tablet(request->tablet_id()); |
887 | 0 | if (tablet == nullptr) { |
888 | 0 | response->mutable_status()->set_status_code(TStatusCode::CANCELLED); |
889 | 0 | return; |
890 | 0 | } |
891 | 0 | std::vector<Version> local_versions = tablet->get_all_local_versions(); |
892 | 0 | for (const auto& local_version : local_versions) { |
893 | 0 | auto version = response->add_versions(); |
894 | 0 | version->set_first(local_version.first); |
895 | 0 | version->set_second(local_version.second); |
896 | 0 | } |
897 | 0 | response->mutable_status()->set_status_code(0); |
898 | 0 | } |
899 | | |
900 | | bool need_generate_compaction_tasks(int task_cnt_per_disk, int thread_per_disk, |
901 | 0 | CompactionType compaction_type, bool all_base) { |
902 | | // We need to reserve at least one Slot for cumulative compaction. |
903 | | // So when there is only one Slot, we have to judge whether there is a cumulative compaction |
904 | | // in the current submitted tasks. |
905 | | // If so, the last Slot can be assigned to Base compaction, |
906 | | // otherwise, this Slot needs to be reserved for cumulative compaction. |
907 | 0 | if (task_cnt_per_disk >= thread_per_disk) { |
908 | | // Return if no available slot |
909 | 0 | return false; |
910 | 0 | } else if (task_cnt_per_disk >= thread_per_disk - 1) { |
911 | | // Only one slot left, check if it can be assigned to base compaction task. |
912 | 0 | if (compaction_type == CompactionType::BASE_COMPACTION) { |
913 | 0 | if (all_base) { |
914 | 0 | return false; |
915 | 0 | } |
916 | 0 | } |
917 | 0 | } |
918 | 0 | return true; |
919 | 0 | } |
920 | | |
921 | 0 | int get_concurrent_per_disk(int max_score, int thread_per_disk) { |
922 | 0 | if (!config::enable_compaction_priority_scheduling) { |
923 | 0 | return thread_per_disk; |
924 | 0 | } |
925 | | |
926 | 0 | double load_average = 0; |
927 | 0 | if (DorisMetrics::instance()->system_metrics() != nullptr) { |
928 | 0 | load_average = DorisMetrics::instance()->system_metrics()->get_load_average_1_min(); |
929 | 0 | } |
930 | 0 | int num_cores = doris::CpuInfo::num_cores(); |
931 | 0 | bool cpu_usage_high = load_average > num_cores * 0.8; |
932 | |
|
933 | 0 | auto process_memory_usage = doris::GlobalMemoryArbitrator::process_memory_usage(); |
934 | 0 | bool memory_usage_high = process_memory_usage > MemInfo::soft_mem_limit() * 0.8; |
935 | |
|
936 | 0 | if (max_score <= config::low_priority_compaction_score_threshold && |
937 | 0 | (cpu_usage_high || memory_usage_high)) { |
938 | 0 | return config::low_priority_compaction_task_num_per_disk; |
939 | 0 | } |
940 | | |
941 | 0 | return thread_per_disk; |
942 | 0 | } |
943 | | |
944 | 0 | int32_t disk_compaction_slot_num(const DataDir& data_dir) { |
945 | 0 | return data_dir.is_ssd_disk() ? config::compaction_task_num_per_fast_disk |
946 | 0 | : config::compaction_task_num_per_disk; |
947 | 0 | } |
948 | | |
949 | | bool has_free_compaction_slot(CompactionSubmitRegistry* registry, DataDir* dir, |
950 | 0 | CompactionType compaction_type, uint32_t executing_cnt) { |
951 | 0 | int32_t thread_per_disk = disk_compaction_slot_num(*dir); |
952 | 0 | return need_generate_compaction_tasks( |
953 | 0 | executing_cnt, thread_per_disk, compaction_type, |
954 | 0 | !registry->has_compaction_task(dir, CompactionType::CUMULATIVE_COMPACTION)); |
955 | 0 | } |
956 | | |
957 | | std::vector<TabletSharedPtr> StorageEngine::_generate_compaction_tasks( |
958 | 0 | CompactionType compaction_type, std::vector<DataDir*>& data_dirs, bool check_score) { |
959 | 0 | _update_cumulative_compaction_policy(); |
960 | 0 | std::vector<TabletSharedPtr> tablets_compaction; |
961 | 0 | uint32_t max_compaction_score = 0; |
962 | |
|
963 | 0 | std::random_device rd; |
964 | 0 | std::mt19937 g(rd()); |
965 | 0 | std::shuffle(data_dirs.begin(), data_dirs.end(), g); |
966 | | |
967 | | // Copy _tablet_submitted_xxx_compaction map so that we don't need to hold _tablet_submitted_compaction_mutex |
968 | | // when traversing the data dir |
969 | 0 | auto compaction_registry_snapshot = _compaction_submit_registry.create_snapshot(); |
970 | 0 | for (auto* data_dir : data_dirs) { |
971 | 0 | bool need_pick_tablet = true; |
972 | 0 | uint32_t executing_task_num = |
973 | 0 | compaction_registry_snapshot.count_executing_cumu_and_base(data_dir); |
974 | 0 | need_pick_tablet = has_free_compaction_slot(&compaction_registry_snapshot, data_dir, |
975 | 0 | compaction_type, executing_task_num); |
976 | 0 | if (!need_pick_tablet && !check_score) { |
977 | 0 | continue; |
978 | 0 | } |
979 | | |
980 | | // Even if need_pick_tablet is false, we still need to call find_best_tablet_to_compaction(), |
981 | | // So that we can update the max_compaction_score metric. |
982 | 0 | if (!data_dir->reach_capacity_limit(0)) { |
983 | 0 | uint32_t disk_max_score = 0; |
984 | 0 | auto tablets = compaction_registry_snapshot.pick_topn_tablets_for_compaction( |
985 | 0 | _tablet_manager.get(), data_dir, compaction_type, |
986 | 0 | _cumulative_compaction_policies, &disk_max_score); |
987 | 0 | int concurrent_num = |
988 | 0 | get_concurrent_per_disk(disk_max_score, disk_compaction_slot_num(*data_dir)); |
989 | 0 | need_pick_tablet = need_generate_compaction_tasks( |
990 | 0 | executing_task_num, concurrent_num, compaction_type, |
991 | 0 | !compaction_registry_snapshot.has_compaction_task( |
992 | 0 | data_dir, CompactionType::CUMULATIVE_COMPACTION)); |
993 | 0 | for (const auto& tablet : tablets) { |
994 | 0 | if (tablet != nullptr) { |
995 | 0 | if (need_pick_tablet) { |
996 | 0 | tablets_compaction.emplace_back(tablet); |
997 | 0 | } |
998 | 0 | max_compaction_score = std::max(max_compaction_score, disk_max_score); |
999 | 0 | } |
1000 | 0 | } |
1001 | 0 | } |
1002 | 0 | } |
1003 | |
|
1004 | 0 | if (max_compaction_score > 0) { |
1005 | 0 | if (compaction_type == CompactionType::BASE_COMPACTION) { |
1006 | 0 | DorisMetrics::instance()->tablet_base_max_compaction_score->set_value( |
1007 | 0 | max_compaction_score); |
1008 | 0 | } else { |
1009 | 0 | DorisMetrics::instance()->tablet_cumulative_max_compaction_score->set_value( |
1010 | 0 | max_compaction_score); |
1011 | 0 | } |
1012 | 0 | } |
1013 | 0 | return tablets_compaction; |
1014 | 0 | } |
1015 | | |
1016 | 0 | void StorageEngine::_update_cumulative_compaction_policy() { |
1017 | 0 | if (_cumulative_compaction_policies.empty()) { |
1018 | 0 | _cumulative_compaction_policies[CUMULATIVE_SIZE_BASED_POLICY] = |
1019 | 0 | CumulativeCompactionPolicyFactory::create_cumulative_compaction_policy( |
1020 | 0 | CUMULATIVE_SIZE_BASED_POLICY); |
1021 | 0 | _cumulative_compaction_policies[CUMULATIVE_TIME_SERIES_POLICY] = |
1022 | 0 | CumulativeCompactionPolicyFactory::create_cumulative_compaction_policy( |
1023 | 0 | CUMULATIVE_TIME_SERIES_POLICY); |
1024 | 0 | } |
1025 | 0 | } |
1026 | | |
1027 | | void StorageEngine::_pop_tablet_from_submitted_compaction(TabletSharedPtr tablet, |
1028 | 0 | CompactionType compaction_type) { |
1029 | 0 | _compaction_submit_registry.remove(tablet, compaction_type, [this]() { |
1030 | 0 | std::unique_lock<std::mutex> lock(_compaction_producer_sleep_mutex); |
1031 | 0 | _wakeup_producer_flag = 1; |
1032 | 0 | _compaction_producer_sleep_cv.notify_one(); |
1033 | 0 | }); |
1034 | 0 | } |
1035 | | |
1036 | | Status StorageEngine::_submit_compaction_task(TabletSharedPtr tablet, |
1037 | 10 | CompactionType compaction_type, bool force) { |
1038 | 10 | if (tablet->tablet_meta()->tablet_schema()->enable_single_replica_compaction() && |
1039 | 10 | should_fetch_from_peer(tablet->tablet_id())) { |
1040 | 0 | VLOG_CRITICAL << "start to submit single replica compaction task for tablet: " |
1041 | 0 | << tablet->tablet_id(); |
1042 | 0 | Status st = _submit_single_replica_compaction_task(tablet, compaction_type); |
1043 | 0 | if (!st.ok()) { |
1044 | 0 | LOG(WARNING) << "failed to submit single replica compaction task for tablet: " |
1045 | 0 | << tablet->tablet_id() << ", err: " << st; |
1046 | 0 | } |
1047 | |
|
1048 | 0 | return Status::OK(); |
1049 | 0 | } |
1050 | 10 | bool already_exist = _compaction_submit_registry.insert(tablet, compaction_type); |
1051 | 10 | if (already_exist) { |
1052 | 0 | return Status::AlreadyExist<false>( |
1053 | 0 | "compaction task has already been submitted, tablet_id={}, compaction_type={}.", |
1054 | 0 | tablet->tablet_id(), compaction_type); |
1055 | 0 | } |
1056 | 10 | tablet->compaction_stage = CompactionStage::PENDING; |
1057 | 10 | std::shared_ptr<CompactionMixin> compaction; |
1058 | 10 | int64_t permits = 0; |
1059 | 10 | Status st = Tablet::prepare_compaction_and_calculate_permits(compaction_type, tablet, |
1060 | 10 | compaction, permits); |
1061 | 10 | if (st.ok() && permits > 0) { |
1062 | 10 | if (!force) { |
1063 | 10 | _permit_limiter.request(permits); |
1064 | 10 | } |
1065 | 10 | std::unique_ptr<ThreadPool>& thread_pool = |
1066 | 10 | (compaction_type == CompactionType::CUMULATIVE_COMPACTION) |
1067 | 10 | ? _cumu_compaction_thread_pool |
1068 | 10 | : _base_compaction_thread_pool; |
1069 | 10 | auto st = thread_pool->submit_func([tablet, compaction = std::move(compaction), |
1070 | 10 | compaction_type, permits, force, this]() { |
1071 | 2 | if (!tablet->can_do_compaction(tablet->data_dir()->path_hash(), compaction_type)) { |
1072 | 0 | LOG(INFO) << "Tablet state has been changed, no need to begin this compaction " |
1073 | 0 | "task, tablet_id=" |
1074 | 0 | << tablet->tablet_id() << "tablet_state=" << tablet->tablet_state(); |
1075 | 0 | return; |
1076 | 0 | } |
1077 | 2 | tablet->compaction_stage = CompactionStage::EXECUTING; |
1078 | 2 | TEST_SYNC_POINT_RETURN_WITH_VOID("olap_server::execute_compaction"); |
1079 | 0 | tablet->execute_compaction(*compaction); |
1080 | 0 | if (!force) { |
1081 | 0 | _permit_limiter.release(permits); |
1082 | 0 | } |
1083 | 0 | _pop_tablet_from_submitted_compaction(tablet, compaction_type); |
1084 | 0 | tablet->compaction_stage = CompactionStage::NOT_SCHEDULED; |
1085 | 0 | }); |
1086 | 10 | if (!st.ok()) { |
1087 | 0 | if (!force) { |
1088 | 0 | _permit_limiter.release(permits); |
1089 | 0 | } |
1090 | 0 | _pop_tablet_from_submitted_compaction(tablet, compaction_type); |
1091 | 0 | tablet->compaction_stage = CompactionStage::NOT_SCHEDULED; |
1092 | 0 | return Status::InternalError( |
1093 | 0 | "failed to submit compaction task to thread pool, " |
1094 | 0 | "tablet_id={}, compaction_type={}.", |
1095 | 0 | tablet->tablet_id(), compaction_type); |
1096 | 0 | } |
1097 | 10 | return Status::OK(); |
1098 | 10 | } else { |
1099 | 0 | _pop_tablet_from_submitted_compaction(tablet, compaction_type); |
1100 | 0 | tablet->compaction_stage = CompactionStage::NOT_SCHEDULED; |
1101 | 0 | if (!st.ok()) { |
1102 | 0 | return Status::InternalError( |
1103 | 0 | "failed to prepare compaction task and calculate permits, " |
1104 | 0 | "tablet_id={}, compaction_type={}, " |
1105 | 0 | "permit={}, current_permit={}, status={}", |
1106 | 0 | tablet->tablet_id(), compaction_type, permits, _permit_limiter.usage(), |
1107 | 0 | st.to_string()); |
1108 | 0 | } |
1109 | 0 | return st; |
1110 | 0 | } |
1111 | 10 | } |
1112 | | |
1113 | | Status StorageEngine::submit_compaction_task(TabletSharedPtr tablet, CompactionType compaction_type, |
1114 | 0 | bool force, bool eager) { |
1115 | 0 | if (!eager) { |
1116 | 0 | DCHECK(compaction_type == CompactionType::BASE_COMPACTION || |
1117 | 0 | compaction_type == CompactionType::CUMULATIVE_COMPACTION); |
1118 | 0 | auto compaction_registry_snapshot = _compaction_submit_registry.create_snapshot(); |
1119 | 0 | auto stores = get_stores(); |
1120 | |
|
1121 | 0 | bool is_busy = std::none_of( |
1122 | 0 | stores.begin(), stores.end(), |
1123 | 0 | [&compaction_registry_snapshot, compaction_type](auto* data_dir) { |
1124 | 0 | return has_free_compaction_slot( |
1125 | 0 | &compaction_registry_snapshot, data_dir, compaction_type, |
1126 | 0 | compaction_registry_snapshot.count_executing_cumu_and_base(data_dir)); |
1127 | 0 | }); |
1128 | 0 | if (is_busy) { |
1129 | 0 | LOG_EVERY_N(WARNING, 100) |
1130 | 0 | << "Too busy to submit a compaction task, tablet=" << tablet->get_table_id(); |
1131 | 0 | return Status::OK(); |
1132 | 0 | } |
1133 | 0 | } |
1134 | 0 | _update_cumulative_compaction_policy(); |
1135 | | // alter table tableName set ("compaction_policy"="time_series") |
1136 | | // if atler table's compaction policy, we need to modify tablet compaction policy shared ptr |
1137 | 0 | if (tablet->get_cumulative_compaction_policy() == nullptr || |
1138 | 0 | tablet->get_cumulative_compaction_policy()->name() != |
1139 | 0 | tablet->tablet_meta()->compaction_policy()) { |
1140 | 0 | tablet->set_cumulative_compaction_policy( |
1141 | 0 | _cumulative_compaction_policies.at(tablet->tablet_meta()->compaction_policy())); |
1142 | 0 | } |
1143 | 0 | tablet->set_skip_compaction(false); |
1144 | 0 | return _submit_compaction_task(tablet, compaction_type, force); |
1145 | 0 | } |
1146 | | |
1147 | | Status StorageEngine::_handle_seg_compaction(std::shared_ptr<SegcompactionWorker> worker, |
1148 | | SegCompactionCandidatesSharedPtr segments, |
1149 | 11 | uint64_t submission_time) { |
1150 | | // note: be aware that worker->_writer maybe released when the task is cancelled |
1151 | 11 | uint64_t exec_queue_time = GetCurrentTimeMicros() - submission_time; |
1152 | 11 | LOG(INFO) << "segcompaction thread pool queue time(ms): " << exec_queue_time / 1000; |
1153 | 11 | worker->compact_segments(segments); |
1154 | | // return OK here. error will be reported via BetaRowsetWriter::_segcompaction_status |
1155 | 11 | return Status::OK(); |
1156 | 11 | } |
1157 | | |
1158 | | Status StorageEngine::submit_seg_compaction_task(std::shared_ptr<SegcompactionWorker> worker, |
1159 | 11 | SegCompactionCandidatesSharedPtr segments) { |
1160 | 11 | uint64_t submission_time = GetCurrentTimeMicros(); |
1161 | 11 | return _seg_compaction_thread_pool->submit_func([this, worker, segments, submission_time] { |
1162 | 11 | static_cast<void>(_handle_seg_compaction(worker, segments, submission_time)); |
1163 | 11 | }); |
1164 | 11 | } |
1165 | | |
1166 | 0 | Status StorageEngine::process_index_change_task(const TAlterInvertedIndexReq& request) { |
1167 | 0 | auto tablet_id = request.tablet_id; |
1168 | 0 | TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id); |
1169 | 0 | DBUG_EXECUTE_IF("StorageEngine::process_index_change_task_tablet_nullptr", |
1170 | 0 | { tablet = nullptr; }) |
1171 | 0 | if (tablet == nullptr) { |
1172 | 0 | LOG(WARNING) << "tablet: " << tablet_id << " not exist"; |
1173 | 0 | return Status::InternalError("tablet not exist, tablet_id={}.", tablet_id); |
1174 | 0 | } |
1175 | | |
1176 | 0 | IndexBuilderSharedPtr index_builder = std::make_shared<IndexBuilder>( |
1177 | 0 | *this, tablet, request.columns, request.alter_inverted_indexes, request.is_drop_op); |
1178 | 0 | RETURN_IF_ERROR(_handle_index_change(index_builder)); |
1179 | 0 | return Status::OK(); |
1180 | 0 | } |
1181 | | |
1182 | 0 | Status StorageEngine::_handle_index_change(IndexBuilderSharedPtr index_builder) { |
1183 | 0 | RETURN_IF_ERROR(index_builder->init()); |
1184 | 0 | RETURN_IF_ERROR(index_builder->do_build_inverted_index()); |
1185 | 0 | return Status::OK(); |
1186 | 0 | } |
1187 | | |
1188 | 0 | void StorageEngine::_cooldown_tasks_producer_callback() { |
1189 | 0 | int64_t interval = config::generate_cooldown_task_interval_sec; |
1190 | | // the cooldown replica may be slow to upload it's meta file, so we should wait |
1191 | | // until it has done uploaded |
1192 | 0 | int64_t skip_failed_interval = interval * 10; |
1193 | 0 | do { |
1194 | | // these tables are ordered by priority desc |
1195 | 0 | std::vector<TabletSharedPtr> tablets; |
1196 | 0 | std::vector<RowsetSharedPtr> rowsets; |
1197 | | // TODO(luwei) : a more efficient way to get cooldown tablets |
1198 | 0 | auto cur_time = time(nullptr); |
1199 | | // we should skip all the tablets which are not running and those pending to do cooldown |
1200 | | // also tablets once failed to do follow cooldown |
1201 | 0 | auto skip_tablet = [this, skip_failed_interval, |
1202 | 0 | cur_time](const TabletSharedPtr& tablet) -> bool { |
1203 | 0 | bool is_skip = |
1204 | 0 | cur_time - tablet->last_failed_follow_cooldown_time() < skip_failed_interval || |
1205 | 0 | TABLET_RUNNING != tablet->tablet_state(); |
1206 | 0 | if (is_skip) { |
1207 | 0 | return is_skip; |
1208 | 0 | } |
1209 | 0 | std::lock_guard<std::mutex> lock(_running_cooldown_mutex); |
1210 | 0 | return _running_cooldown_tablets.find(tablet->tablet_id()) != |
1211 | 0 | _running_cooldown_tablets.end(); |
1212 | 0 | }; |
1213 | 0 | _tablet_manager->get_cooldown_tablets(&tablets, &rowsets, std::move(skip_tablet)); |
1214 | 0 | LOG(INFO) << "cooldown producer get tablet num: " << tablets.size(); |
1215 | 0 | int max_priority = tablets.size(); |
1216 | 0 | int index = 0; |
1217 | 0 | for (const auto& tablet : tablets) { |
1218 | 0 | { |
1219 | 0 | std::lock_guard<std::mutex> lock(_running_cooldown_mutex); |
1220 | 0 | _running_cooldown_tablets.insert(tablet->tablet_id()); |
1221 | 0 | } |
1222 | 0 | PriorityThreadPool::Task task; |
1223 | 0 | RowsetSharedPtr rowset = std::move(rowsets[index++]); |
1224 | 0 | task.work_function = [tablet, rowset, task_size = tablets.size(), this]() { |
1225 | 0 | Status st = tablet->cooldown(rowset); |
1226 | 0 | { |
1227 | 0 | std::lock_guard<std::mutex> lock(_running_cooldown_mutex); |
1228 | 0 | _running_cooldown_tablets.erase(tablet->tablet_id()); |
1229 | 0 | } |
1230 | 0 | if (!st.ok()) { |
1231 | 0 | LOG(WARNING) << "failed to cooldown, tablet: " << tablet->tablet_id() |
1232 | 0 | << " err: " << st; |
1233 | 0 | } else { |
1234 | 0 | LOG(INFO) << "succeed to cooldown, tablet: " << tablet->tablet_id() |
1235 | 0 | << " cooldown progress (" |
1236 | 0 | << task_size - _cooldown_thread_pool->get_queue_size() << "/" |
1237 | 0 | << task_size << ")"; |
1238 | 0 | } |
1239 | 0 | }; |
1240 | 0 | task.priority = max_priority--; |
1241 | 0 | bool submited = _cooldown_thread_pool->offer(std::move(task)); |
1242 | |
|
1243 | 0 | if (!submited) { |
1244 | 0 | LOG(INFO) << "failed to submit cooldown task"; |
1245 | 0 | } |
1246 | 0 | } |
1247 | 0 | } while (!_stop_background_threads_latch.wait_for(std::chrono::seconds(interval))); |
1248 | 0 | } |
1249 | | |
1250 | 0 | void StorageEngine::_remove_unused_remote_files_callback() { |
1251 | 0 | while (!_stop_background_threads_latch.wait_for( |
1252 | 0 | std::chrono::seconds(config::remove_unused_remote_files_interval_sec))) { |
1253 | 0 | LOG(INFO) << "begin to remove unused remote files"; |
1254 | 0 | do_remove_unused_remote_files(); |
1255 | 0 | } |
1256 | 0 | } |
1257 | | |
1258 | 0 | void StorageEngine::do_remove_unused_remote_files() { |
1259 | 0 | auto tablets = tablet_manager()->get_all_tablet([](Tablet* t) { |
1260 | 0 | return t->tablet_meta()->cooldown_meta_id().initialized() && t->is_used() && |
1261 | 0 | t->tablet_state() == TABLET_RUNNING && |
1262 | 0 | t->cooldown_conf_unlocked().cooldown_replica_id == t->replica_id(); |
1263 | 0 | }); |
1264 | 0 | TConfirmUnusedRemoteFilesRequest req; |
1265 | 0 | req.__isset.confirm_list = true; |
1266 | | // tablet_id -> [storage_resource, unused_remote_files] |
1267 | 0 | using unused_remote_files_buffer_t = |
1268 | 0 | std::unordered_map<int64_t, std::pair<StorageResource, std::vector<io::FileInfo>>>; |
1269 | 0 | unused_remote_files_buffer_t buffer; |
1270 | 0 | int64_t num_files_in_buffer = 0; |
1271 | | // assume a filename is 0.1KB, buffer size should not larger than 100MB |
1272 | 0 | constexpr int64_t max_files_in_buffer = 1000000; |
1273 | |
|
1274 | 0 | auto calc_unused_remote_files = [&req, &buffer, &num_files_in_buffer, this](Tablet* t) { |
1275 | 0 | auto storage_resource = get_resource_by_storage_policy_id(t->storage_policy_id()); |
1276 | 0 | if (!storage_resource) { |
1277 | 0 | LOG(WARNING) << "encounter error when remove unused remote files, tablet_id=" |
1278 | 0 | << t->tablet_id() << " : " << storage_resource.error(); |
1279 | 0 | return; |
1280 | 0 | } |
1281 | | |
1282 | | // TODO(plat1ko): Support path v1 |
1283 | 0 | if (storage_resource->path_version > 0) { |
1284 | 0 | return; |
1285 | 0 | } |
1286 | | |
1287 | 0 | std::vector<io::FileInfo> files; |
1288 | | // FIXME(plat1ko): What if user reset resource in storage policy to another resource? |
1289 | | // Maybe we should also list files in previously uploaded resources. |
1290 | 0 | bool exists = true; |
1291 | 0 | auto st = storage_resource->fs->list(storage_resource->remote_tablet_path(t->tablet_id()), |
1292 | 0 | true, &files, &exists); |
1293 | 0 | if (!st.ok()) { |
1294 | 0 | LOG(WARNING) << "encounter error when remove unused remote files, tablet_id=" |
1295 | 0 | << t->tablet_id() << " : " << st; |
1296 | 0 | return; |
1297 | 0 | } |
1298 | 0 | if (!exists || files.empty()) { |
1299 | 0 | return; |
1300 | 0 | } |
1301 | | // get all cooldowned rowsets |
1302 | 0 | RowsetIdUnorderedSet cooldowned_rowsets; |
1303 | 0 | UniqueId cooldown_meta_id; |
1304 | 0 | { |
1305 | 0 | std::shared_lock rlock(t->get_header_lock()); |
1306 | 0 | for (auto&& rs_meta : t->tablet_meta()->all_rs_metas()) { |
1307 | 0 | if (!rs_meta->is_local()) { |
1308 | 0 | cooldowned_rowsets.insert(rs_meta->rowset_id()); |
1309 | 0 | } |
1310 | 0 | } |
1311 | 0 | if (cooldowned_rowsets.empty()) { |
1312 | 0 | return; |
1313 | 0 | } |
1314 | 0 | cooldown_meta_id = t->tablet_meta()->cooldown_meta_id(); |
1315 | 0 | } |
1316 | 0 | auto [cooldown_replica_id, cooldown_term] = t->cooldown_conf(); |
1317 | 0 | if (cooldown_replica_id != t->replica_id()) { |
1318 | 0 | return; |
1319 | 0 | } |
1320 | | // {cooldown_replica_id}.{cooldown_term}.meta |
1321 | 0 | std::string remote_meta_path = |
1322 | 0 | cooldown_tablet_meta_filename(cooldown_replica_id, cooldown_term); |
1323 | | // filter out the paths that should be reserved |
1324 | 0 | auto filter = [&, this](io::FileInfo& info) { |
1325 | 0 | std::string_view filename = info.file_name; |
1326 | 0 | if (filename.ends_with(".meta")) { |
1327 | 0 | return filename == remote_meta_path; |
1328 | 0 | } |
1329 | 0 | auto rowset_id = extract_rowset_id(filename); |
1330 | 0 | if (rowset_id.hi == 0) { |
1331 | 0 | return false; |
1332 | 0 | } |
1333 | 0 | return cooldowned_rowsets.contains(rowset_id) || |
1334 | 0 | pending_remote_rowsets().contains(rowset_id); |
1335 | 0 | }; |
1336 | 0 | files.erase(std::remove_if(files.begin(), files.end(), std::move(filter)), files.end()); |
1337 | 0 | if (files.empty()) { |
1338 | 0 | return; |
1339 | 0 | } |
1340 | 0 | files.shrink_to_fit(); |
1341 | 0 | num_files_in_buffer += files.size(); |
1342 | 0 | buffer.insert({t->tablet_id(), {*storage_resource, std::move(files)}}); |
1343 | 0 | auto& info = req.confirm_list.emplace_back(); |
1344 | 0 | info.__set_tablet_id(t->tablet_id()); |
1345 | 0 | info.__set_cooldown_replica_id(cooldown_replica_id); |
1346 | 0 | info.__set_cooldown_meta_id(cooldown_meta_id.to_thrift()); |
1347 | 0 | }; |
1348 | |
|
1349 | 0 | auto confirm_and_remove_files = [&buffer, &req, &num_files_in_buffer]() { |
1350 | 0 | TConfirmUnusedRemoteFilesResult result; |
1351 | 0 | LOG(INFO) << "begin to confirm unused remote files. num_tablets=" << buffer.size() |
1352 | 0 | << " num_files=" << num_files_in_buffer; |
1353 | 0 | auto st = MasterServerClient::instance()->confirm_unused_remote_files(req, &result); |
1354 | 0 | if (!st.ok()) { |
1355 | 0 | LOG(WARNING) << st; |
1356 | 0 | return; |
1357 | 0 | } |
1358 | 0 | for (auto id : result.confirmed_tablets) { |
1359 | 0 | if (auto it = buffer.find(id); LIKELY(it != buffer.end())) { |
1360 | 0 | auto& storage_resource = it->second.first; |
1361 | 0 | auto& files = it->second.second; |
1362 | 0 | std::vector<io::Path> paths; |
1363 | 0 | paths.reserve(files.size()); |
1364 | | // delete unused files |
1365 | 0 | LOG(INFO) << "delete unused files. root_path=" << storage_resource.fs->root_path() |
1366 | 0 | << " tablet_id=" << id; |
1367 | 0 | io::Path dir = storage_resource.remote_tablet_path(id); |
1368 | 0 | for (auto& file : files) { |
1369 | 0 | auto file_path = dir / file.file_name; |
1370 | 0 | LOG(INFO) << "delete unused file: " << file_path.native(); |
1371 | 0 | paths.push_back(std::move(file_path)); |
1372 | 0 | } |
1373 | 0 | st = storage_resource.fs->batch_delete(paths); |
1374 | 0 | if (!st.ok()) { |
1375 | 0 | LOG(WARNING) << "failed to delete unused files, tablet_id=" << id << " : " |
1376 | 0 | << st; |
1377 | 0 | } |
1378 | 0 | buffer.erase(it); |
1379 | 0 | } |
1380 | 0 | } |
1381 | 0 | }; |
1382 | | |
1383 | | // batch confirm to reduce FE's overhead |
1384 | 0 | auto next_confirm_time = std::chrono::steady_clock::now() + |
1385 | 0 | std::chrono::seconds(config::confirm_unused_remote_files_interval_sec); |
1386 | 0 | for (auto& t : tablets) { |
1387 | 0 | if (t.use_count() <= 1 // this means tablet has been dropped |
1388 | 0 | || t->cooldown_conf_unlocked().cooldown_replica_id != t->replica_id() || |
1389 | 0 | t->tablet_state() != TABLET_RUNNING) { |
1390 | 0 | continue; |
1391 | 0 | } |
1392 | 0 | calc_unused_remote_files(t.get()); |
1393 | 0 | if (num_files_in_buffer > 0 && (num_files_in_buffer > max_files_in_buffer || |
1394 | 0 | std::chrono::steady_clock::now() > next_confirm_time)) { |
1395 | 0 | confirm_and_remove_files(); |
1396 | 0 | buffer.clear(); |
1397 | 0 | req.confirm_list.clear(); |
1398 | 0 | num_files_in_buffer = 0; |
1399 | 0 | next_confirm_time = |
1400 | 0 | std::chrono::steady_clock::now() + |
1401 | 0 | std::chrono::seconds(config::confirm_unused_remote_files_interval_sec); |
1402 | 0 | } |
1403 | 0 | } |
1404 | 0 | if (num_files_in_buffer > 0) { |
1405 | 0 | confirm_and_remove_files(); |
1406 | 0 | } |
1407 | 0 | } |
1408 | | |
1409 | 0 | void StorageEngine::_cold_data_compaction_producer_callback() { |
1410 | 0 | while (!_stop_background_threads_latch.wait_for( |
1411 | 0 | std::chrono::seconds(config::cold_data_compaction_interval_sec))) { |
1412 | 0 | if (config::disable_auto_compaction || |
1413 | 0 | GlobalMemoryArbitrator::is_exceed_soft_mem_limit(GB_EXCHANGE_BYTE)) { |
1414 | 0 | continue; |
1415 | 0 | } |
1416 | | |
1417 | 0 | std::unordered_set<int64_t> copied_tablet_submitted; |
1418 | 0 | { |
1419 | 0 | std::lock_guard lock(_cold_compaction_tablet_submitted_mtx); |
1420 | 0 | copied_tablet_submitted = _cold_compaction_tablet_submitted; |
1421 | 0 | } |
1422 | 0 | int n = config::cold_data_compaction_thread_num - copied_tablet_submitted.size(); |
1423 | 0 | if (n <= 0) { |
1424 | 0 | continue; |
1425 | 0 | } |
1426 | 0 | auto tablets = _tablet_manager->get_all_tablet([&copied_tablet_submitted](Tablet* t) { |
1427 | 0 | return t->tablet_meta()->cooldown_meta_id().initialized() && t->is_used() && |
1428 | 0 | t->tablet_state() == TABLET_RUNNING && |
1429 | 0 | !copied_tablet_submitted.contains(t->tablet_id()) && |
1430 | 0 | !t->tablet_meta()->tablet_schema()->disable_auto_compaction(); |
1431 | 0 | }); |
1432 | 0 | std::vector<std::pair<TabletSharedPtr, int64_t>> tablet_to_compact; |
1433 | 0 | tablet_to_compact.reserve(n + 1); |
1434 | 0 | std::vector<std::pair<TabletSharedPtr, int64_t>> tablet_to_follow; |
1435 | 0 | tablet_to_follow.reserve(n + 1); |
1436 | |
|
1437 | 0 | for (auto& t : tablets) { |
1438 | 0 | if (t->replica_id() == t->cooldown_conf_unlocked().cooldown_replica_id) { |
1439 | 0 | auto score = t->calc_cold_data_compaction_score(); |
1440 | 0 | if (score < 4) { |
1441 | 0 | continue; |
1442 | 0 | } |
1443 | 0 | tablet_to_compact.emplace_back(t, score); |
1444 | 0 | if (tablet_to_compact.size() > n) { |
1445 | 0 | std::sort(tablet_to_compact.begin(), tablet_to_compact.end(), |
1446 | 0 | [](auto& a, auto& b) { return a.second > b.second; }); |
1447 | 0 | tablet_to_compact.pop_back(); |
1448 | 0 | } |
1449 | 0 | continue; |
1450 | 0 | } |
1451 | | // else, need to follow |
1452 | 0 | { |
1453 | 0 | std::lock_guard lock(_running_cooldown_mutex); |
1454 | 0 | if (_running_cooldown_tablets.contains(t->table_id())) { |
1455 | | // already in cooldown queue |
1456 | 0 | continue; |
1457 | 0 | } |
1458 | 0 | } |
1459 | | // TODO(plat1ko): some avoidance strategy if failed to follow |
1460 | 0 | auto score = t->calc_cold_data_compaction_score(); |
1461 | 0 | tablet_to_follow.emplace_back(t, score); |
1462 | |
|
1463 | 0 | if (tablet_to_follow.size() > n) { |
1464 | 0 | std::sort(tablet_to_follow.begin(), tablet_to_follow.end(), |
1465 | 0 | [](auto& a, auto& b) { return a.second > b.second; }); |
1466 | 0 | tablet_to_follow.pop_back(); |
1467 | 0 | } |
1468 | 0 | } |
1469 | |
|
1470 | 0 | for (auto& [tablet, score] : tablet_to_compact) { |
1471 | 0 | LOG(INFO) << "submit cold data compaction. tablet_id=" << tablet->tablet_id() |
1472 | 0 | << " score=" << score; |
1473 | 0 | static_cast<void>(_cold_data_compaction_thread_pool->submit_func( |
1474 | 0 | [&, t = std::move(tablet), this]() { |
1475 | 0 | auto compaction = std::make_shared<ColdDataCompaction>(*this, t); |
1476 | 0 | { |
1477 | 0 | std::lock_guard lock(_cold_compaction_tablet_submitted_mtx); |
1478 | 0 | _cold_compaction_tablet_submitted.insert(t->tablet_id()); |
1479 | 0 | } |
1480 | 0 | Defer defer {[&] { |
1481 | 0 | std::lock_guard lock(_cold_compaction_tablet_submitted_mtx); |
1482 | 0 | _cold_compaction_tablet_submitted.erase(t->tablet_id()); |
1483 | 0 | }}; |
1484 | 0 | std::unique_lock cold_compaction_lock(t->get_cold_compaction_lock(), |
1485 | 0 | std::try_to_lock); |
1486 | 0 | if (!cold_compaction_lock.owns_lock()) { |
1487 | 0 | LOG(WARNING) << "try cold_compaction_lock failed, tablet_id=" |
1488 | 0 | << t->tablet_id(); |
1489 | 0 | return; |
1490 | 0 | } |
1491 | | |
1492 | 0 | auto st = compaction->prepare_compact(); |
1493 | 0 | if (!st.ok()) { |
1494 | 0 | LOG(WARNING) << "failed to prepare cold data compaction. tablet_id=" |
1495 | 0 | << t->tablet_id() << " err=" << st; |
1496 | 0 | return; |
1497 | 0 | } |
1498 | | |
1499 | 0 | st = compaction->execute_compact(); |
1500 | 0 | if (!st.ok()) { |
1501 | 0 | LOG(WARNING) << "failed to execute cold data compaction. tablet_id=" |
1502 | 0 | << t->tablet_id() << " err=" << st; |
1503 | 0 | return; |
1504 | 0 | } |
1505 | 0 | })); |
1506 | 0 | } |
1507 | |
|
1508 | 0 | for (auto& [tablet, score] : tablet_to_follow) { |
1509 | 0 | LOG(INFO) << "submit to follow cooldown meta. tablet_id=" << tablet->tablet_id() |
1510 | 0 | << " score=" << score; |
1511 | 0 | static_cast<void>(_cold_data_compaction_thread_pool->submit_func([&, |
1512 | 0 | t = std::move( |
1513 | 0 | tablet)]() { |
1514 | 0 | { |
1515 | 0 | std::lock_guard lock(_cold_compaction_tablet_submitted_mtx); |
1516 | 0 | _cold_compaction_tablet_submitted.insert(t->tablet_id()); |
1517 | 0 | } |
1518 | 0 | auto st = t->cooldown(); |
1519 | 0 | { |
1520 | 0 | std::lock_guard lock(_cold_compaction_tablet_submitted_mtx); |
1521 | 0 | _cold_compaction_tablet_submitted.erase(t->tablet_id()); |
1522 | 0 | } |
1523 | 0 | if (!st.ok()) { |
1524 | | // The cooldown of the replica may be relatively slow |
1525 | | // resulting in a short period of time where following cannot be successful |
1526 | 0 | LOG_EVERY_N(WARNING, 5) |
1527 | 0 | << "failed to cooldown. tablet_id=" << t->tablet_id() << " err=" << st; |
1528 | 0 | } |
1529 | 0 | })); |
1530 | 0 | } |
1531 | 0 | } |
1532 | 0 | } |
1533 | | |
1534 | | void StorageEngine::add_async_publish_task(int64_t partition_id, int64_t tablet_id, |
1535 | | int64_t publish_version, int64_t transaction_id, |
1536 | 2.05k | bool is_recovery) { |
1537 | 2.05k | if (!is_recovery) { |
1538 | 2.05k | bool exists = false; |
1539 | 2.05k | { |
1540 | 2.05k | std::shared_lock<std::shared_mutex> rlock(_async_publish_lock); |
1541 | 2.05k | if (auto tablet_iter = _async_publish_tasks.find(tablet_id); |
1542 | 2.05k | tablet_iter != _async_publish_tasks.end()) { |
1543 | 2.05k | if (auto iter = tablet_iter->second.find(publish_version); |
1544 | 2.05k | iter != tablet_iter->second.end()) { |
1545 | 20 | exists = true; |
1546 | 20 | } |
1547 | 2.05k | } |
1548 | 2.05k | } |
1549 | 2.05k | if (exists) { |
1550 | 20 | return; |
1551 | 20 | } |
1552 | 2.03k | TabletSharedPtr tablet = tablet_manager()->get_tablet(tablet_id); |
1553 | 2.03k | if (tablet == nullptr) { |
1554 | 0 | LOG(INFO) << "tablet may be dropped when add async publish task, tablet_id: " |
1555 | 0 | << tablet_id; |
1556 | 0 | return; |
1557 | 0 | } |
1558 | 2.03k | PendingPublishInfoPB pending_publish_info_pb; |
1559 | 2.03k | pending_publish_info_pb.set_partition_id(partition_id); |
1560 | 2.03k | pending_publish_info_pb.set_transaction_id(transaction_id); |
1561 | 2.03k | static_cast<void>(TabletMetaManager::save_pending_publish_info( |
1562 | 2.03k | tablet->data_dir(), tablet->tablet_id(), publish_version, |
1563 | 2.03k | pending_publish_info_pb.SerializeAsString())); |
1564 | 2.03k | } |
1565 | 2.03k | LOG(INFO) << "add pending publish task, tablet_id: " << tablet_id |
1566 | 2.03k | << " version: " << publish_version << " txn_id:" << transaction_id |
1567 | 2.03k | << " is_recovery: " << is_recovery; |
1568 | 2.03k | std::unique_lock<std::shared_mutex> wlock(_async_publish_lock); |
1569 | 2.03k | _async_publish_tasks[tablet_id][publish_version] = {transaction_id, partition_id}; |
1570 | 2.03k | } |
1571 | | |
1572 | 3 | int64_t StorageEngine::get_pending_publish_min_version(int64_t tablet_id) { |
1573 | 3 | std::shared_lock<std::shared_mutex> rlock(_async_publish_lock); |
1574 | 3 | auto iter = _async_publish_tasks.find(tablet_id); |
1575 | 3 | if (iter == _async_publish_tasks.end()) { |
1576 | 0 | return INT64_MAX; |
1577 | 0 | } |
1578 | 3 | if (iter->second.empty()) { |
1579 | 0 | return INT64_MAX; |
1580 | 0 | } |
1581 | 3 | return iter->second.begin()->first; |
1582 | 3 | } |
1583 | | |
1584 | 10 | void StorageEngine::_process_async_publish() { |
1585 | | // tablet, publish_version |
1586 | 10 | std::vector<std::pair<TabletSharedPtr, int64_t>> need_removed_tasks; |
1587 | 10 | { |
1588 | 10 | std::unique_lock<std::shared_mutex> wlock(_async_publish_lock); |
1589 | 10 | for (auto tablet_iter = _async_publish_tasks.begin(); |
1590 | 20 | tablet_iter != _async_publish_tasks.end();) { |
1591 | 10 | if (tablet_iter->second.empty()) { |
1592 | 1 | tablet_iter = _async_publish_tasks.erase(tablet_iter); |
1593 | 1 | continue; |
1594 | 1 | } |
1595 | 9 | int64_t tablet_id = tablet_iter->first; |
1596 | 9 | TabletSharedPtr tablet = tablet_manager()->get_tablet(tablet_id); |
1597 | 9 | if (!tablet) { |
1598 | 1 | LOG(WARNING) << "tablet does not exist when async publush, tablet_id: " |
1599 | 1 | << tablet_id; |
1600 | 1 | tablet_iter = _async_publish_tasks.erase(tablet_iter); |
1601 | 1 | continue; |
1602 | 1 | } |
1603 | | |
1604 | 8 | auto task_iter = tablet_iter->second.begin(); |
1605 | 8 | int64_t version = task_iter->first; |
1606 | 8 | int64_t transaction_id = task_iter->second.first; |
1607 | 8 | int64_t partition_id = task_iter->second.second; |
1608 | 8 | int64_t max_version = tablet->max_version().second; |
1609 | | |
1610 | 8 | if (version <= max_version) { |
1611 | 6 | need_removed_tasks.emplace_back(tablet, version); |
1612 | 6 | tablet_iter->second.erase(task_iter); |
1613 | 6 | tablet_iter++; |
1614 | 6 | continue; |
1615 | 6 | } |
1616 | 2 | if (version != max_version + 1) { |
1617 | | // Keep only the most recent versions |
1618 | 31 | while (tablet_iter->second.size() > config::max_tablet_version_num) { |
1619 | 30 | need_removed_tasks.emplace_back(tablet, version); |
1620 | 30 | task_iter = tablet_iter->second.erase(task_iter); |
1621 | 30 | version = task_iter->first; |
1622 | 30 | } |
1623 | 1 | tablet_iter++; |
1624 | 1 | continue; |
1625 | 1 | } |
1626 | | |
1627 | 1 | auto async_publish_task = std::make_shared<AsyncTabletPublishTask>( |
1628 | 1 | *this, tablet, partition_id, transaction_id, version); |
1629 | 1 | static_cast<void>(_tablet_publish_txn_thread_pool->submit_func( |
1630 | 1 | [=]() { async_publish_task->handle(); })); |
1631 | 1 | tablet_iter->second.erase(task_iter); |
1632 | 1 | need_removed_tasks.emplace_back(tablet, version); |
1633 | 1 | tablet_iter++; |
1634 | 1 | } |
1635 | 10 | } |
1636 | 37 | for (auto& [tablet, publish_version] : need_removed_tasks) { |
1637 | 37 | static_cast<void>(TabletMetaManager::remove_pending_publish_info( |
1638 | 37 | tablet->data_dir(), tablet->tablet_id(), publish_version)); |
1639 | 37 | } |
1640 | 10 | } |
1641 | | |
1642 | 0 | void StorageEngine::_async_publish_callback() { |
1643 | 0 | while (!_stop_background_threads_latch.wait_for(std::chrono::milliseconds(30))) { |
1644 | 0 | _process_async_publish(); |
1645 | 0 | } |
1646 | 0 | } |
1647 | | |
1648 | | } // namespace doris |