/root/doris/be/src/cloud/cloud_tablet.h
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #pragma once |
19 | | |
20 | | #include <memory> |
21 | | |
22 | | #include "olap/base_tablet.h" |
23 | | #include "olap/partial_update_info.h" |
24 | | #include "olap/rowset/rowset.h" |
25 | | |
26 | | namespace doris { |
27 | | |
28 | | class CloudStorageEngine; |
29 | | enum class WarmUpState : int; |
30 | | |
31 | | struct SyncRowsetStats { |
32 | | int64_t get_remote_rowsets_num {0}; |
33 | | int64_t get_remote_rowsets_rpc_ns {0}; |
34 | | |
35 | | int64_t get_local_delete_bitmap_rowsets_num {0}; |
36 | | int64_t get_remote_delete_bitmap_rowsets_num {0}; |
37 | | int64_t get_remote_delete_bitmap_key_count {0}; |
38 | | int64_t get_remote_delete_bitmap_bytes {0}; |
39 | | int64_t get_remote_delete_bitmap_rpc_ns {0}; |
40 | | |
41 | | int64_t get_remote_tablet_meta_rpc_ns {0}; |
42 | | int64_t tablet_meta_cache_hit {0}; |
43 | | int64_t tablet_meta_cache_miss {0}; |
44 | | }; |
45 | | |
46 | | struct SyncOptions { |
47 | | bool warmup_delta_data = false; |
48 | | bool sync_delete_bitmap = true; |
49 | | bool full_sync = false; |
50 | | bool merge_schema = false; |
51 | | int64_t query_version = -1; |
52 | | }; |
53 | | |
54 | | struct RecycledRowsets { |
55 | | RowsetId rowset_id; |
56 | | int64_t num_segments; |
57 | | std::vector<std::string> index_file_names; |
58 | | }; |
59 | | |
60 | | class CloudTablet final : public BaseTablet { |
61 | | public: |
62 | | CloudTablet(CloudStorageEngine& engine, TabletMetaSharedPtr tablet_meta); |
63 | | |
64 | | ~CloudTablet() override; |
65 | | |
66 | | bool exceed_version_limit(int32_t limit) override; |
67 | | |
68 | | Result<std::unique_ptr<RowsetWriter>> create_rowset_writer(RowsetWriterContext& context, |
69 | | bool vertical) override; |
70 | | |
71 | | Status capture_rs_readers(const Version& spec_version, std::vector<RowSetSplits>* rs_splits, |
72 | | const CaptureRsReaderOptions& opts) override; |
73 | | Status capture_rs_readers_internal(const Version& spec_version, |
74 | | std::vector<RowSetSplits>* rs_splits); |
75 | | |
76 | | // Capture rowset readers with cache preference optimization. |
77 | | // This method prioritizes using cached/warmed-up rowsets when building version paths, |
78 | | // avoiding cold data reads when possible. It uses capture_consistent_versions_prefer_cache |
79 | | // to find a consistent version path that prefers already warmed-up rowsets. |
80 | | Status capture_rs_readers_prefer_cache(const Version& spec_version, |
81 | | std::vector<RowSetSplits>* rs_splits); |
82 | | |
83 | | // Capture rowset readers with query freshness tolerance. |
84 | | // This method finds a consistent version path where all rowsets are warmed up, |
85 | | // but allows fallback to normal capture if there are newer rowsets that should be |
86 | | // visible (based on freshness tolerance) but haven't been warmed up yet. |
87 | | // For merge-on-write tables, uses special validation to ensure data correctness. |
88 | | // |
89 | | // IMPORTANT: The returned version may be smaller than the requested version if newer |
90 | | // data hasn't been warmed up yet. This can cause different tablets in the same query |
91 | | // to read from different versions, potentially leading to inconsistent query results. |
92 | | // |
93 | | // @param query_freshness_tolerance_ms: Time tolerance in milliseconds. Rowsets that |
94 | | // became visible within this time range (after current_time - query_freshness_tolerance_ms) |
95 | | // can be skipped if not warmed up. However, if older rowsets (before this time point) |
96 | | // are not warmed up, the method will fallback to normal capture. |
97 | | Status capture_rs_readers_with_freshness_tolerance(const Version& spec_version, |
98 | | std::vector<RowSetSplits>* rs_splits, |
99 | | int64_t query_freshness_tolerance_ms); |
100 | | |
101 | | Status capture_consistent_rowsets_unlocked( |
102 | | const Version& spec_version, std::vector<RowsetSharedPtr>* rowsets) const override; |
103 | | |
104 | 0 | size_t tablet_footprint() override { |
105 | 0 | return _approximate_data_size.load(std::memory_order_relaxed); |
106 | 0 | } |
107 | | |
108 | | std::string tablet_path() const override; |
109 | | |
110 | | // clang-format off |
111 | 2 | int64_t fetch_add_approximate_num_rowsets (int64_t x) { return _approximate_num_rowsets .fetch_add(x, std::memory_order_relaxed); } |
112 | 0 | int64_t fetch_add_approximate_num_segments(int64_t x) { return _approximate_num_segments.fetch_add(x, std::memory_order_relaxed); } |
113 | 0 | int64_t fetch_add_approximate_num_rows (int64_t x) { return _approximate_num_rows .fetch_add(x, std::memory_order_relaxed); } |
114 | 0 | int64_t fetch_add_approximate_data_size (int64_t x) { return _approximate_data_size .fetch_add(x, std::memory_order_relaxed); } |
115 | 2 | int64_t fetch_add_approximate_cumu_num_rowsets (int64_t x) { return _approximate_cumu_num_rowsets.fetch_add(x, std::memory_order_relaxed); } |
116 | 0 | int64_t fetch_add_approximate_cumu_num_deltas (int64_t x) { return _approximate_cumu_num_deltas.fetch_add(x, std::memory_order_relaxed); } |
117 | | // clang-format on |
118 | | |
119 | | // meta lock must be held when calling this function |
120 | | void reset_approximate_stats(int64_t num_rowsets, int64_t num_segments, int64_t num_rows, |
121 | | int64_t data_size); |
122 | | |
123 | | // return a json string to show the compaction status of this tablet |
124 | | void get_compaction_status(std::string* json_result); |
125 | | |
126 | | // Synchronize the rowsets from meta service. |
127 | | // If tablet state is not `TABLET_RUNNING`, sync tablet meta and all visible rowsets. |
128 | | // If `query_version` > 0 and local max_version of the tablet >= `query_version`, do nothing. |
129 | | // If 'need_download_data_async' is true, it means that we need to download the new version |
130 | | // rowsets datum async. |
131 | | Status sync_rowsets(const SyncOptions& options = {}, SyncRowsetStats* stats = nullptr); |
132 | | |
133 | | // Synchronize the tablet meta from meta service. |
134 | | Status sync_meta(); |
135 | | |
136 | | // If `version_overlap` is true, function will delete rowsets with overlapped version in this tablet. |
137 | | // If 'warmup_delta_data' is true, download the new version rowset data in background. |
138 | | // MUST hold EXCLUSIVE `_meta_lock`. |
139 | | // If 'need_download_data_async' is true, it means that we need to download the new version |
140 | | // rowsets datum async. |
141 | | void add_rowsets(std::vector<RowsetSharedPtr> to_add, bool version_overlap, |
142 | | std::unique_lock<std::shared_mutex>& meta_lock, |
143 | | bool warmup_delta_data = false); |
144 | | |
145 | | // MUST hold EXCLUSIVE `_meta_lock`. |
146 | | void delete_rowsets(const std::vector<RowsetSharedPtr>& to_delete, |
147 | | std::unique_lock<std::shared_mutex>& meta_lock); |
148 | | |
149 | | // When the tablet is dropped, we need to recycle cached data: |
150 | | // 1. The data in file cache |
151 | | // 2. The memory in tablet cache |
152 | | void clear_cache() override; |
153 | | |
154 | | // Return number of deleted stale rowsets |
155 | | uint64_t delete_expired_stale_rowsets(); |
156 | | |
157 | 0 | bool has_stale_rowsets() const { return !_stale_rs_version_map.empty(); } |
158 | | |
159 | | int64_t get_cloud_base_compaction_score() const; |
160 | | int64_t get_cloud_cumu_compaction_score() const; |
161 | | |
162 | 2 | int64_t max_version_unlocked() const override { return _max_version; } |
163 | 3 | int64_t base_compaction_cnt() const { return _base_compaction_cnt; } |
164 | 3 | int64_t cumulative_compaction_cnt() const { return _cumulative_compaction_cnt; } |
165 | 0 | int64_t full_compaction_cnt() const { return _full_compaction_cnt; } |
166 | 6 | int64_t cumulative_layer_point() const { |
167 | 6 | return _cumulative_point.load(std::memory_order_relaxed); |
168 | 6 | } |
169 | | |
170 | 0 | void set_base_compaction_cnt(int64_t cnt) { _base_compaction_cnt = cnt; } |
171 | 0 | void set_cumulative_compaction_cnt(int64_t cnt) { _cumulative_compaction_cnt = cnt; } |
172 | 0 | void set_full_compaction_cnt(int64_t cnt) { _full_compaction_cnt = cnt; } |
173 | | void set_cumulative_layer_point(int64_t new_point); |
174 | | |
175 | 1 | int64_t last_cumu_compaction_failure_time() { return _last_cumu_compaction_failure_millis; } |
176 | 3 | void set_last_cumu_compaction_failure_time(int64_t millis) { |
177 | 3 | _last_cumu_compaction_failure_millis = millis; |
178 | 3 | } |
179 | | |
180 | 2 | int64_t last_base_compaction_failure_time() { return _last_base_compaction_failure_millis; } |
181 | 3 | void set_last_base_compaction_failure_time(int64_t millis) { |
182 | 3 | _last_base_compaction_failure_millis = millis; |
183 | 3 | } |
184 | | |
185 | 0 | int64_t last_full_compaction_failure_time() { return _last_full_compaction_failure_millis; } |
186 | 0 | void set_last_full_compaction_failure_time(int64_t millis) { |
187 | 0 | _last_full_compaction_failure_millis = millis; |
188 | 0 | } |
189 | | |
190 | 0 | int64_t last_cumu_compaction_success_time() { return _last_cumu_compaction_success_millis; } |
191 | 0 | void set_last_cumu_compaction_success_time(int64_t millis) { |
192 | 0 | _last_cumu_compaction_success_millis = millis; |
193 | 0 | } |
194 | | |
195 | 0 | int64_t last_base_compaction_success_time() { return _last_base_compaction_success_millis; } |
196 | 0 | void set_last_base_compaction_success_time(int64_t millis) { |
197 | 0 | _last_base_compaction_success_millis = millis; |
198 | 0 | } |
199 | | |
200 | 0 | int64_t last_full_compaction_success_time() { return _last_full_compaction_success_millis; } |
201 | 0 | void set_last_full_compaction_success_time(int64_t millis) { |
202 | 0 | _last_full_compaction_success_millis = millis; |
203 | 0 | } |
204 | | |
205 | 0 | int64_t last_cumu_compaction_schedule_time() { return _last_cumu_compaction_schedule_millis; } |
206 | 0 | void set_last_cumu_compaction_schedule_time(int64_t millis) { |
207 | 0 | _last_cumu_compaction_schedule_millis = millis; |
208 | 0 | } |
209 | | |
210 | 0 | int64_t last_base_compaction_schedule_time() { return _last_base_compaction_schedule_millis; } |
211 | 0 | void set_last_base_compaction_schedule_time(int64_t millis) { |
212 | 0 | _last_base_compaction_schedule_millis = millis; |
213 | 0 | } |
214 | | |
215 | 0 | int64_t last_full_compaction_schedule_time() { return _last_full_compaction_schedule_millis; } |
216 | 0 | void set_last_full_compaction_schedule_time(int64_t millis) { |
217 | 0 | _last_full_compaction_schedule_millis = millis; |
218 | 0 | } |
219 | | |
220 | 0 | void set_last_cumu_compaction_status(std::string status) { |
221 | 0 | _last_cumu_compaction_status = std::move(status); |
222 | 0 | } |
223 | | |
224 | 0 | std::string get_last_cumu_compaction_status() { return _last_cumu_compaction_status; } |
225 | | |
226 | 0 | void set_last_base_compaction_status(std::string status) { |
227 | 0 | _last_base_compaction_status = std::move(status); |
228 | 0 | } |
229 | | |
230 | 0 | std::string get_last_base_compaction_status() { return _last_base_compaction_status; } |
231 | | |
232 | 0 | void set_last_full_compaction_status(std::string status) { |
233 | 0 | _last_full_compaction_status = std::move(status); |
234 | 0 | } |
235 | | |
236 | 0 | std::string get_last_full_compaction_status() { return _last_full_compaction_status; } |
237 | | |
238 | 0 | int64_t alter_version() const { return _alter_version; } |
239 | 2 | void set_alter_version(int64_t alter_version) { _alter_version = alter_version; } |
240 | | |
241 | | std::vector<RowsetSharedPtr> pick_candidate_rowsets_to_base_compaction(); |
242 | | |
243 | 7 | inline Version max_version() const { |
244 | 7 | std::shared_lock rdlock(_meta_lock); |
245 | 7 | return _tablet_meta->max_version(); |
246 | 7 | } |
247 | | |
248 | 7 | int64_t base_size() const { return _base_size; } |
249 | | |
250 | | std::vector<RowsetSharedPtr> pick_candidate_rowsets_to_full_compaction(); |
251 | | Result<RowsetSharedPtr> pick_a_rowset_for_index_change(int schema_version, |
252 | | bool& is_base_rowset); |
253 | | Status check_rowset_schema_for_build_index(std::vector<TColumn>& columns, int schema_version); |
254 | | |
255 | 0 | std::mutex& get_base_compaction_lock() { return _base_compaction_lock; } |
256 | 0 | std::mutex& get_cumulative_compaction_lock() { return _cumulative_compaction_lock; } |
257 | | |
258 | | Result<std::unique_ptr<RowsetWriter>> create_transient_rowset_writer( |
259 | | const Rowset& rowset, std::shared_ptr<PartialUpdateInfo> partial_update_info, |
260 | | int64_t txn_expiration = 0) override; |
261 | | |
262 | | CalcDeleteBitmapExecutor* calc_delete_bitmap_executor() override; |
263 | | |
264 | | Status save_delete_bitmap(const TabletTxnInfo* txn_info, int64_t txn_id, |
265 | | DeleteBitmapPtr delete_bitmap, RowsetWriter* rowset_writer, |
266 | | const RowsetIdUnorderedSet& cur_rowset_ids, int64_t lock_id = -1, |
267 | | int64_t next_visible_version = -1) override; |
268 | | |
269 | | Status save_delete_bitmap_to_ms(int64_t cur_version, int64_t txn_id, |
270 | | DeleteBitmapPtr delete_bitmap, int64_t lock_id, |
271 | | int64_t next_visible_version, RowsetSharedPtr rowset); |
272 | | |
273 | | Status calc_delete_bitmap_for_compaction(const std::vector<RowsetSharedPtr>& input_rowsets, |
274 | | const RowsetSharedPtr& output_rowset, |
275 | | const RowIdConversion& rowid_conversion, |
276 | | ReaderType compaction_type, int64_t merged_rows, |
277 | | int64_t filtered_rows, int64_t initiator, |
278 | | DeleteBitmapPtr& output_rowset_delete_bitmap, |
279 | | bool allow_delete_in_cumu_compaction, |
280 | | int64_t& get_delete_bitmap_lock_start_time); |
281 | | |
282 | | // Find the missed versions until the spec_version. |
283 | | // |
284 | | // for example: |
285 | | // [0-4][5-5][8-8][9-9][14-14] |
286 | | // if spec_version = 12, it will return [6-7],[10-12] |
287 | | Versions calc_missed_versions(int64_t spec_version, Versions existing_versions) const override; |
288 | | |
289 | 0 | std::mutex& get_rowset_update_lock() { return _rowset_update_lock; } |
290 | | |
291 | 0 | bthread::Mutex& get_sync_meta_lock() { return _sync_meta_lock; } |
292 | | |
293 | 94 | const auto& rowset_map() const { return _rs_version_map; } |
294 | | |
295 | | int64_t last_sync_time_s = 0; |
296 | | int64_t last_load_time_ms = 0; |
297 | | int64_t last_base_compaction_success_time_ms = 0; |
298 | | int64_t last_cumu_compaction_success_time_ms = 0; |
299 | | int64_t last_cumu_no_suitable_version_ms = 0; |
300 | | int64_t last_access_time_ms = 0; |
301 | | |
302 | | std::atomic<int64_t> local_read_time_us = 0; |
303 | | std::atomic<int64_t> remote_read_time_us = 0; |
304 | | std::atomic<int64_t> exec_compaction_time_us = 0; |
305 | | |
306 | | void build_tablet_report_info(TTabletInfo* tablet_info); |
307 | | |
308 | | // check that if the delete bitmap in delete bitmap cache has the same cardinality with the expected_delete_bitmap's |
309 | | Status check_delete_bitmap_cache(int64_t txn_id, DeleteBitmap* expected_delete_bitmap) override; |
310 | | |
311 | | void agg_delete_bitmap_for_compaction(int64_t start_version, int64_t end_version, |
312 | | const std::vector<RowsetSharedPtr>& pre_rowsets, |
313 | | DeleteBitmapPtr& new_delete_bitmap, |
314 | | std::map<std::string, int64_t>& pre_rowset_to_versions); |
315 | | |
316 | | bool need_remove_unused_rowsets(); |
317 | | |
318 | | void add_unused_rowsets(const std::vector<RowsetSharedPtr>& rowsets); |
319 | | void remove_unused_rowsets(); |
320 | | |
321 | | // For each given rowset not in active use, clears its file cache and returns its |
322 | | // ID, segment count, and index file names as RecycledRowsets entries. |
323 | | static std::vector<RecycledRowsets> recycle_cached_data( |
324 | | const std::vector<RowsetSharedPtr>& rowsets); |
325 | | |
326 | | // Add warmup state management |
327 | | WarmUpState get_rowset_warmup_state(RowsetId rowset_id); |
328 | | bool add_rowset_warmup_state( |
329 | | const RowsetMeta& rowset, WarmUpState state, |
330 | | std::chrono::steady_clock::time_point start_tp = std::chrono::steady_clock::now()); |
331 | | void update_rowset_warmup_state_inverted_idx_num(RowsetId rowset_id, int64_t delta); |
332 | | void update_rowset_warmup_state_inverted_idx_num_unlocked(RowsetId rowset_id, int64_t delta); |
333 | | WarmUpState complete_rowset_segment_warmup(RowsetId rowset_id, Status status, |
334 | | int64_t segment_num, int64_t inverted_idx_num); |
335 | | |
336 | | bool is_rowset_warmed_up(const RowsetId& rowset_id) const; |
337 | | |
338 | | void add_warmed_up_rowset(const RowsetId& rowset_id); |
339 | | |
340 | 5 | std::string rowset_warmup_digest() { |
341 | 5 | std::string res; |
342 | 104 | auto add_log = [&](const RowsetSharedPtr& rs) { |
343 | 104 | auto tmp = fmt::format("{}{}", rs->rowset_id().to_string(), rs->version().to_string()); |
344 | 104 | if (_rowset_warm_up_states.contains(rs->rowset_id())) { |
345 | 87 | tmp += fmt::format( |
346 | 87 | ", state={}, segments_warmed_up={}/{}, inverted_idx_warmed_up={}/{}", |
347 | 87 | _rowset_warm_up_states.at(rs->rowset_id()).state, |
348 | 87 | _rowset_warm_up_states.at(rs->rowset_id()).num_segments_warmed_up, |
349 | 87 | _rowset_warm_up_states.at(rs->rowset_id()).num_segments, |
350 | 87 | _rowset_warm_up_states.at(rs->rowset_id()).num_inverted_idx_warmed_up, |
351 | 87 | _rowset_warm_up_states.at(rs->rowset_id()).num_inverted_idx); |
352 | 87 | } |
353 | 104 | res += fmt::format("[{}],", tmp); |
354 | 104 | }; |
355 | 5 | traverse_rowsets_unlocked(add_log, true); |
356 | 5 | return res; |
357 | 5 | } |
358 | | |
359 | | private: |
360 | | // FIXME(plat1ko): No need to record base size if rowsets are ordered by version |
361 | | void update_base_size(const Rowset& rs); |
362 | | |
363 | | Status sync_if_not_running(SyncRowsetStats* stats = nullptr); |
364 | | |
365 | | bool add_rowset_warmup_state_unlocked( |
366 | | const RowsetMeta& rowset, WarmUpState state, |
367 | | std::chrono::steady_clock::time_point start_tp = std::chrono::steady_clock::now()); |
368 | | |
369 | | // used by capture_rs_reader_xxx functions |
370 | | bool rowset_is_warmed_up_unlocked(int64_t start_version, int64_t end_version); |
371 | | |
372 | | CloudStorageEngine& _engine; |
373 | | |
374 | | // this mutex MUST ONLY be used when sync meta |
375 | | bthread::Mutex _sync_meta_lock; |
376 | | // ATTENTION: lock order should be: _sync_meta_lock -> _meta_lock |
377 | | |
378 | | std::atomic<int64_t> _cumulative_point {-1}; |
379 | | std::atomic<int64_t> _approximate_num_rowsets {-1}; |
380 | | std::atomic<int64_t> _approximate_num_segments {-1}; |
381 | | std::atomic<int64_t> _approximate_num_rows {-1}; |
382 | | std::atomic<int64_t> _approximate_data_size {-1}; |
383 | | std::atomic<int64_t> _approximate_cumu_num_rowsets {-1}; |
384 | | // Number of sorted arrays (e.g. for rowset with N segments, if rowset is overlapping, delta is N, otherwise 1) after cumu point |
385 | | std::atomic<int64_t> _approximate_cumu_num_deltas {-1}; |
386 | | |
387 | | // timestamp of last cumu compaction failure |
388 | | std::atomic<int64_t> _last_cumu_compaction_failure_millis; |
389 | | // timestamp of last base compaction failure |
390 | | std::atomic<int64_t> _last_base_compaction_failure_millis; |
391 | | // timestamp of last full compaction failure |
392 | | std::atomic<int64_t> _last_full_compaction_failure_millis; |
393 | | // timestamp of last cumu compaction success |
394 | | std::atomic<int64_t> _last_cumu_compaction_success_millis; |
395 | | // timestamp of last base compaction success |
396 | | std::atomic<int64_t> _last_base_compaction_success_millis; |
397 | | // timestamp of last full compaction success |
398 | | std::atomic<int64_t> _last_full_compaction_success_millis; |
399 | | // timestamp of last cumu compaction schedule time |
400 | | std::atomic<int64_t> _last_cumu_compaction_schedule_millis; |
401 | | // timestamp of last base compaction schedule time |
402 | | std::atomic<int64_t> _last_base_compaction_schedule_millis; |
403 | | // timestamp of last full compaction schedule time |
404 | | std::atomic<int64_t> _last_full_compaction_schedule_millis; |
405 | | |
406 | | std::string _last_cumu_compaction_status; |
407 | | std::string _last_base_compaction_status; |
408 | | std::string _last_full_compaction_status; |
409 | | |
410 | | int64_t _base_compaction_cnt = 0; |
411 | | int64_t _cumulative_compaction_cnt = 0; |
412 | | int64_t _full_compaction_cnt = 0; |
413 | | int64_t _max_version = -1; |
414 | | int64_t _base_size = 0; |
415 | | int64_t _alter_version = -1; |
416 | | |
417 | | std::mutex _base_compaction_lock; |
418 | | std::mutex _cumulative_compaction_lock; |
419 | | |
420 | | // To avoid multiple calc delete bitmap tasks on same (txn_id, tablet_id) with different |
421 | | // signatures being executed concurrently, we use _rowset_update_lock to serialize them |
422 | | mutable std::mutex _rowset_update_lock; |
423 | | |
424 | | // unused_rowsets, [start_version, end_version] |
425 | | std::mutex _gc_mutex; |
426 | | std::unordered_map<RowsetId, RowsetSharedPtr> _unused_rowsets; |
427 | | std::vector<std::pair<std::vector<RowsetId>, DeleteBitmapKeyRanges>> _unused_delete_bitmap; |
428 | | |
429 | | // for warm up states management |
430 | | struct RowsetWarmUpInfo { |
431 | | WarmUpState state; |
432 | | int64_t num_segments = 0; |
433 | | int64_t num_inverted_idx = 0; |
434 | | int64_t num_segments_warmed_up = 0; |
435 | | int64_t num_inverted_idx_warmed_up = 0; |
436 | | std::chrono::steady_clock::time_point start_tp; |
437 | | |
438 | 16 | void done(int64_t num_segments, int64_t num_inverted_idx) { |
439 | 16 | num_segments_warmed_up += num_segments; |
440 | 16 | num_inverted_idx_warmed_up += num_inverted_idx; |
441 | 16 | } |
442 | | |
443 | 16 | bool has_finished() const { |
444 | 16 | return (num_segments_warmed_up >= num_segments) && |
445 | 16 | (num_inverted_idx_warmed_up >= num_inverted_idx); |
446 | 16 | } |
447 | | }; |
448 | | std::unordered_map<RowsetId, RowsetWarmUpInfo> _rowset_warm_up_states; |
449 | | |
450 | | mutable std::shared_mutex _warmed_up_rowsets_mutex; |
451 | | std::unordered_set<RowsetId> _warmed_up_rowsets; |
452 | | }; |
453 | | |
454 | | using CloudTabletSPtr = std::shared_ptr<CloudTablet>; |
455 | | |
456 | | } // namespace doris |