/root/doris/be/src/runtime/tablets_channel.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "runtime/tablets_channel.h" |
19 | | |
20 | | #include <bvar/bvar.h> |
21 | | #include <fmt/format.h> |
22 | | #include <gen_cpp/internal_service.pb.h> |
23 | | #include <gen_cpp/types.pb.h> |
24 | | |
25 | | #include <ctime> |
26 | | |
27 | | #include "common/compiler_util.h" // IWYU pragma: keep |
28 | | #include "common/status.h" |
29 | | // IWYU pragma: no_include <bits/chrono.h> |
30 | | #include <chrono> // IWYU pragma: keep |
31 | | #include <initializer_list> |
32 | | #include <set> |
33 | | #include <thread> |
34 | | #include <utility> |
35 | | |
36 | | #ifdef DEBUG |
37 | | #include <unordered_set> |
38 | | #endif |
39 | | |
40 | | #include "common/logging.h" |
41 | | #include "exec/tablet_info.h" |
42 | | #include "olap/delta_writer.h" |
43 | | #include "olap/storage_engine.h" |
44 | | #include "olap/txn_manager.h" |
45 | | #include "runtime/load_channel.h" |
46 | | #include "util/defer_op.h" |
47 | | #include "util/doris_metrics.h" |
48 | | #include "util/metrics.h" |
49 | | #include "vec/core/block.h" |
50 | | |
51 | | namespace doris { |
52 | | class SlotDescriptor; |
53 | | |
54 | | bvar::Adder<int64_t> g_tablets_channel_send_data_allocated_size( |
55 | | "tablets_channel_send_data_allocated_size"); |
56 | | |
57 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(tablet_writer_count, MetricUnit::NOUNIT); |
58 | | |
59 | | std::atomic<uint64_t> BaseTabletsChannel::_s_tablet_writer_count; |
60 | | |
61 | | BaseTabletsChannel::BaseTabletsChannel(const TabletsChannelKey& key, const UniqueId& load_id, |
62 | | bool is_high_priority, RuntimeProfile* profile) |
63 | | : _key(key), |
64 | | _state(kInitialized), |
65 | | _load_id(load_id), |
66 | | _closed_senders(64), |
67 | 0 | _is_high_priority(is_high_priority) { |
68 | 0 | static std::once_flag once_flag; |
69 | 0 | _init_profile(profile); |
70 | 0 | std::call_once(once_flag, [] { |
71 | 0 | REGISTER_HOOK_METRIC(tablet_writer_count, [&]() { return _s_tablet_writer_count.load(); }); Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
72 | 0 | }); |
73 | 0 | } |
74 | | |
75 | | TabletsChannel::TabletsChannel(StorageEngine& engine, const TabletsChannelKey& key, |
76 | | const UniqueId& load_id, bool is_high_priority, |
77 | | RuntimeProfile* profile) |
78 | 0 | : BaseTabletsChannel(key, load_id, is_high_priority, profile), _engine(engine) {} |
79 | | |
80 | 0 | BaseTabletsChannel::~BaseTabletsChannel() { |
81 | 0 | _s_tablet_writer_count -= _tablet_writers.size(); |
82 | 0 | } |
83 | | |
84 | 0 | TabletsChannel::~TabletsChannel() = default; |
85 | | |
86 | | Status BaseTabletsChannel::_get_current_seq(int64_t& cur_seq, |
87 | 0 | const PTabletWriterAddBlockRequest& request) { |
88 | 0 | std::lock_guard<std::mutex> l(_lock); |
89 | 0 | if (_state != kOpened) { Branch (89:9): [True: 0, False: 0]
|
90 | 0 | return _state == kFinished ? _close_status Branch (90:16): [True: 0, False: 0]
|
91 | 0 | : Status::InternalError("TabletsChannel {} state: {}", |
92 | 0 | _key.to_string(), _state); |
93 | 0 | } |
94 | 0 | cur_seq = _next_seqs[request.sender_id()]; |
95 | | // check packet |
96 | 0 | if (request.packet_seq() > cur_seq) { Branch (96:9): [True: 0, False: 0]
|
97 | 0 | LOG(WARNING) << "lost data packet, expect_seq=" << cur_seq |
98 | 0 | << ", recept_seq=" << request.packet_seq(); |
99 | 0 | return Status::InternalError("lost data packet"); |
100 | 0 | } |
101 | 0 | return Status::OK(); |
102 | 0 | } |
103 | | |
104 | 0 | void BaseTabletsChannel::_init_profile(RuntimeProfile* profile) { |
105 | 0 | _profile = |
106 | 0 | profile->create_child(fmt::format("TabletsChannel {}", _key.to_string()), true, true); |
107 | 0 | _add_batch_number_counter = ADD_COUNTER(_profile, "NumberBatchAdded", TUnit::UNIT); Line | Count | Source | 57 | 0 | #define ADD_COUNTER(profile, name, type) (profile)->add_counter(name, type) |
|
108 | |
|
109 | 0 | auto* memory_usage = _profile->create_child("PeakMemoryUsage", true, true); |
110 | 0 | _add_batch_timer = ADD_TIMER(_profile, "AddBatchTime"); Line | Count | Source | 60 | 0 | #define ADD_TIMER(profile, name) (profile)->add_counter(name, TUnit::TIME_NS) |
|
111 | 0 | _write_block_timer = ADD_TIMER(_profile, "WriteBlockTime"); Line | Count | Source | 60 | 0 | #define ADD_TIMER(profile, name) (profile)->add_counter(name, TUnit::TIME_NS) |
|
112 | 0 | _incremental_open_timer = ADD_TIMER(_profile, "IncrementalOpenTabletTime"); Line | Count | Source | 60 | 0 | #define ADD_TIMER(profile, name) (profile)->add_counter(name, TUnit::TIME_NS) |
|
113 | 0 | _memory_usage_counter = memory_usage->AddHighWaterMarkCounter("Total", TUnit::BYTES); |
114 | 0 | _write_memory_usage_counter = memory_usage->AddHighWaterMarkCounter("Write", TUnit::BYTES); |
115 | 0 | _flush_memory_usage_counter = memory_usage->AddHighWaterMarkCounter("Flush", TUnit::BYTES); |
116 | 0 | _max_tablet_memory_usage_counter = |
117 | 0 | memory_usage->AddHighWaterMarkCounter("MaxTablet", TUnit::BYTES); |
118 | 0 | _max_tablet_write_memory_usage_counter = |
119 | 0 | memory_usage->AddHighWaterMarkCounter("MaxTabletWrite", TUnit::BYTES); |
120 | 0 | _max_tablet_flush_memory_usage_counter = |
121 | 0 | memory_usage->AddHighWaterMarkCounter("MaxTabletFlush", TUnit::BYTES); |
122 | 0 | } |
123 | | |
124 | 0 | void TabletsChannel::_init_profile(RuntimeProfile* profile) { |
125 | 0 | BaseTabletsChannel::_init_profile(profile); |
126 | 0 | _slave_replica_timer = ADD_TIMER(_profile, "SlaveReplicaTime"); Line | Count | Source | 60 | 0 | #define ADD_TIMER(profile, name) (profile)->add_counter(name, TUnit::TIME_NS) |
|
127 | 0 | } |
128 | | |
129 | 0 | Status BaseTabletsChannel::open(const PTabletWriterOpenRequest& request) { |
130 | 0 | std::lock_guard<std::mutex> l(_lock); |
131 | | // if _state is kOpened, it's a normal case, already open by other sender |
132 | | // if _state is kFinished, already cancelled by other sender |
133 | 0 | if (_state == kOpened || _state == kFinished) { Branch (133:9): [True: 0, False: 0]
Branch (133:30): [True: 0, False: 0]
|
134 | 0 | return Status::OK(); |
135 | 0 | } |
136 | 0 | _txn_id = request.txn_id(); |
137 | 0 | _index_id = request.index_id(); |
138 | 0 | _schema = std::make_shared<OlapTableSchemaParam>(); |
139 | 0 | RETURN_IF_ERROR(_schema->init(request.schema())); |
140 | 0 | _tuple_desc = _schema->tuple_desc(); |
141 | |
|
142 | 0 | int max_sender = request.num_senders(); |
143 | | /* |
144 | | * a tablets channel in reciever is related to a bulk of VNodeChannel of sender. each instance one or none. |
145 | | * there are two possibilities: |
146 | | * 1. there's partitions originally broadcasted by FE. so all sender(instance) know it at start. and open() will be |
147 | | * called directly, not by incremental_open(). and after _state changes to kOpened. _open_by_incremental will never |
148 | | * be true. in this case, _num_remaining_senders will keep same with senders number. when all sender sent close rpc, |
149 | | * the tablets channel will close. and if for auto partition table, these channel's closing will hang on reciever and |
150 | | * return together to avoid close-then-incremental-open problem. |
151 | | * 2. this tablets channel is opened by incremental_open of sender's sink node. so only this sender will know this partition |
152 | | * (this TabletsChannel) at that time. and we are not sure how many sender will know in the end. it depends on data |
153 | | * distribution. in this situation open() is called by incremental_open() at first time. so _open_by_incremental is true. |
154 | | * then _num_remaining_senders will not be set here. but inc every time when incremental_open() called. so it's dynamic |
155 | | * and also need same number of senders' close to close. but will not hang. |
156 | | */ |
157 | 0 | if (_open_by_incremental) { Branch (157:9): [True: 0, False: 0]
|
158 | 0 | DCHECK(_num_remaining_senders == 0) << _num_remaining_senders; |
159 | 0 | } else { |
160 | 0 | _num_remaining_senders = max_sender; |
161 | 0 | } |
162 | 0 | LOG(INFO) << fmt::format( |
163 | 0 | "open tablets channel {}, tablets num: {} timeout(s): {}, init senders {} with " |
164 | 0 | "incremental {}", |
165 | 0 | _key.to_string(), request.tablets().size(), request.load_channel_timeout_s(), |
166 | 0 | _num_remaining_senders, _open_by_incremental ? "on" : "off"); Branch (166:37): [True: 0, False: 0]
|
167 | | // just use max_sender no matter incremental or not cuz we dont know how many senders will open. |
168 | 0 | _next_seqs.resize(max_sender, 0); |
169 | 0 | _closed_senders.Reset(max_sender); |
170 | |
|
171 | 0 | RETURN_IF_ERROR(_open_all_writers(request)); |
172 | | |
173 | 0 | _state = kOpened; |
174 | 0 | return Status::OK(); |
175 | 0 | } |
176 | | |
177 | 0 | Status BaseTabletsChannel::incremental_open(const PTabletWriterOpenRequest& params) { |
178 | 0 | SCOPED_TIMER(_incremental_open_timer); Line | Count | Source | 69 | 0 | #define SCOPED_TIMER(c) ScopedTimer<MonotonicStopWatch> MACRO_CONCAT(SCOPED_TIMER, __COUNTER__)(c) Line | Count | Source | 52 | 0 | #define MACRO_CONCAT(x, y) CONCAT_IMPL(x, y) Line | Count | Source | 51 | 0 | #define CONCAT_IMPL(x, y) x##y |
|
|
|
179 | | |
180 | | // current node first opened by incremental open |
181 | 0 | if (_state == kInitialized) { Branch (181:9): [True: 0, False: 0]
|
182 | 0 | _open_by_incremental = true; |
183 | 0 | RETURN_IF_ERROR(open(params)); |
184 | 0 | } |
185 | | |
186 | 0 | std::lock_guard<std::mutex> l(_lock); |
187 | | |
188 | | // one sender may incremental_open many times. but only close one time. so dont count duplicately. |
189 | 0 | if (_open_by_incremental) { Branch (189:9): [True: 0, False: 0]
|
190 | 0 | if (params.has_sender_id() && !_recieved_senders.contains(params.sender_id())) { Branch (190:13): [True: 0, False: 0]
Branch (190:13): [True: 0, False: 0]
Branch (190:39): [True: 0, False: 0]
|
191 | 0 | _recieved_senders.insert(params.sender_id()); |
192 | 0 | _num_remaining_senders++; |
193 | 0 | } else if (!params.has_sender_id()) { // for compatible Branch (193:20): [True: 0, False: 0]
|
194 | 0 | _num_remaining_senders++; |
195 | 0 | } |
196 | 0 | VLOG_DEBUG << fmt::format("txn {}: TabletsChannel {} inc senders to {}", _txn_id, _index_id, Line | Count | Source | 41 | 0 | #define VLOG_DEBUG VLOG(7) |
|
197 | 0 | _num_remaining_senders); |
198 | 0 | } |
199 | |
|
200 | 0 | std::vector<SlotDescriptor*>* index_slots = nullptr; |
201 | 0 | int32_t schema_hash = 0; |
202 | 0 | for (const auto& index : _schema->indexes()) { Branch (202:28): [True: 0, False: 0]
|
203 | 0 | if (index->index_id == _index_id) { Branch (203:13): [True: 0, False: 0]
|
204 | 0 | index_slots = &index->slots; |
205 | 0 | schema_hash = index->schema_hash; |
206 | 0 | break; |
207 | 0 | } |
208 | 0 | } |
209 | 0 | if (index_slots == nullptr) { Branch (209:9): [True: 0, False: 0]
|
210 | 0 | return Status::InternalError("unknown index id, key={}", _key.to_string()); |
211 | 0 | } |
212 | | // update tablets |
213 | 0 | size_t incremental_tablet_num = 0; |
214 | 0 | std::stringstream ss; |
215 | 0 | ss << "LocalTabletsChannel txn_id: " << _txn_id << " load_id: " << print_id(params.id()) |
216 | 0 | << " incremental open delta writer: "; |
217 | | |
218 | | // every change will hold _lock. this find in under _lock too. so no need _tablet_writers_lock again. |
219 | 0 | for (const auto& tablet : params.tablets()) { Branch (219:29): [True: 0, False: 0]
|
220 | 0 | if (_tablet_writers.find(tablet.tablet_id()) != _tablet_writers.end()) { Branch (220:13): [True: 0, False: 0]
|
221 | 0 | continue; |
222 | 0 | } |
223 | 0 | incremental_tablet_num++; |
224 | |
|
225 | 0 | WriteRequest wrequest; |
226 | 0 | wrequest.index_id = params.index_id(); |
227 | 0 | wrequest.tablet_id = tablet.tablet_id(); |
228 | 0 | wrequest.schema_hash = schema_hash; |
229 | 0 | wrequest.txn_id = _txn_id; |
230 | 0 | wrequest.partition_id = tablet.partition_id(); |
231 | 0 | wrequest.load_id = params.id(); |
232 | 0 | wrequest.tuple_desc = _tuple_desc; |
233 | 0 | wrequest.slots = index_slots; |
234 | 0 | wrequest.is_high_priority = _is_high_priority; |
235 | 0 | wrequest.table_schema_param = _schema; |
236 | 0 | wrequest.txn_expiration = params.txn_expiration(); // Required by CLOUD. |
237 | 0 | wrequest.storage_vault_id = params.storage_vault_id(); |
238 | |
|
239 | 0 | auto delta_writer = create_delta_writer(wrequest); |
240 | 0 | { |
241 | | // here we modify _tablet_writers. so need lock. |
242 | 0 | std::lock_guard<std::mutex> l(_tablet_writers_lock); |
243 | 0 | _tablet_writers.emplace(tablet.tablet_id(), std::move(delta_writer)); |
244 | 0 | } |
245 | |
|
246 | 0 | ss << "[" << tablet.tablet_id() << "]"; |
247 | 0 | } |
248 | |
|
249 | 0 | _s_tablet_writer_count += incremental_tablet_num; |
250 | 0 | LOG(INFO) << ss.str(); |
251 | |
|
252 | 0 | _state = kOpened; |
253 | 0 | return Status::OK(); |
254 | 0 | } |
255 | | |
256 | 0 | std::unique_ptr<BaseDeltaWriter> TabletsChannel::create_delta_writer(const WriteRequest& request) { |
257 | 0 | return std::make_unique<DeltaWriter>(_engine, request, _profile, _load_id); |
258 | 0 | } |
259 | | |
260 | | Status TabletsChannel::close(LoadChannel* parent, const PTabletWriterAddBlockRequest& req, |
261 | 0 | PTabletWriterAddBlockResult* res, bool* finished) { |
262 | 0 | int sender_id = req.sender_id(); |
263 | 0 | int64_t backend_id = req.backend_id(); |
264 | 0 | const auto& partition_ids = req.partition_ids(); |
265 | 0 | auto* tablet_errors = res->mutable_tablet_errors(); |
266 | 0 | std::lock_guard<std::mutex> l(_lock); |
267 | 0 | if (_state == kFinished) { Branch (267:9): [True: 0, False: 0]
|
268 | 0 | return _close_status; |
269 | 0 | } |
270 | 0 | if (_closed_senders.Get(sender_id)) { Branch (270:9): [True: 0, False: 0]
|
271 | | // Double close from one sender, just return OK |
272 | 0 | *finished = (_num_remaining_senders == 0); |
273 | 0 | return _close_status; |
274 | 0 | } |
275 | | |
276 | 0 | for (auto pid : partition_ids) { Branch (276:19): [True: 0, False: 0]
|
277 | 0 | _partition_ids.emplace(pid); |
278 | 0 | } |
279 | 0 | _closed_senders.Set(sender_id, true); |
280 | 0 | _num_remaining_senders--; |
281 | 0 | *finished = (_num_remaining_senders == 0); |
282 | |
|
283 | 0 | LOG(INFO) << fmt::format( |
284 | 0 | "txn {}: close tablets channel of index {} , sender id: {}, backend {}, remain " |
285 | 0 | "senders: {}", |
286 | 0 | _txn_id, _index_id, sender_id, backend_id, _num_remaining_senders); |
287 | |
|
288 | 0 | if (!*finished) { Branch (288:9): [True: 0, False: 0]
|
289 | 0 | return Status::OK(); |
290 | 0 | } |
291 | | |
292 | 0 | _state = kFinished; |
293 | | // All senders are closed |
294 | | // 1. close all delta writers |
295 | 0 | std::set<DeltaWriter*> need_wait_writers; |
296 | | // under _lock. no need _tablet_writers_lock again. |
297 | 0 | for (auto&& [tablet_id, writer] : _tablet_writers) { Branch (297:37): [True: 0, False: 0]
|
298 | 0 | if (_partition_ids.contains(writer->partition_id())) { Branch (298:13): [True: 0, False: 0]
|
299 | 0 | auto st = writer->close(); |
300 | 0 | if (!st.ok()) { Branch (300:17): [True: 0, False: 0]
|
301 | 0 | auto err_msg = fmt::format( |
302 | 0 | "close tablet writer failed, tablet_id={}, " |
303 | 0 | "transaction_id={}, err={}", |
304 | 0 | tablet_id, _txn_id, st.to_string()); |
305 | 0 | LOG(WARNING) << err_msg; |
306 | 0 | PTabletError* tablet_error = tablet_errors->Add(); |
307 | 0 | tablet_error->set_tablet_id(tablet_id); |
308 | 0 | tablet_error->set_msg(st.to_string()); |
309 | | // just skip this tablet(writer) and continue to close others |
310 | 0 | continue; |
311 | 0 | } |
312 | | // tablet writer in `_broken_tablets` should not call `build_rowset` and |
313 | | // `commit_txn` method, after that, the publish-version task will success, |
314 | | // which can cause the replica inconsistency. |
315 | 0 | if (_is_broken_tablet(writer->tablet_id())) { Branch (315:17): [True: 0, False: 0]
|
316 | 0 | LOG(WARNING) << "SHOULD NOT HAPPEN, tablet writer is broken but not cancelled" |
317 | 0 | << ", tablet_id=" << tablet_id << ", transaction_id=" << _txn_id; |
318 | 0 | continue; |
319 | 0 | } |
320 | 0 | need_wait_writers.insert(static_cast<DeltaWriter*>(writer.get())); |
321 | 0 | } else { |
322 | 0 | auto st = writer->cancel(); |
323 | 0 | if (!st.ok()) { Branch (323:17): [True: 0, False: 0]
|
324 | 0 | LOG(WARNING) << "cancel tablet writer failed, tablet_id=" << tablet_id |
325 | 0 | << ", transaction_id=" << _txn_id; |
326 | | // just skip this tablet(writer) and continue to close others |
327 | 0 | continue; |
328 | 0 | } |
329 | 0 | VLOG_PROGRESS << "cancel tablet writer successfully, tablet_id=" << tablet_id Line | Count | Source | 39 | 0 | #define VLOG_PROGRESS VLOG(2) |
|
330 | 0 | << ", transaction_id=" << _txn_id; |
331 | 0 | } |
332 | 0 | } |
333 | |
|
334 | 0 | _write_single_replica = req.write_single_replica(); |
335 | | |
336 | | // 2. wait all writer finished flush. |
337 | 0 | for (auto* writer : need_wait_writers) { Branch (337:23): [True: 0, False: 0]
|
338 | 0 | RETURN_IF_ERROR((writer->wait_flush())); |
339 | 0 | } |
340 | | |
341 | | // 3. build rowset |
342 | 0 | for (auto it = need_wait_writers.begin(); it != need_wait_writers.end();) { Branch (342:47): [True: 0, False: 0]
|
343 | 0 | Status st = (*it)->build_rowset(); |
344 | 0 | if (!st.ok()) { Branch (344:13): [True: 0, False: 0]
|
345 | 0 | _add_error_tablet(tablet_errors, (*it)->tablet_id(), st); |
346 | 0 | it = need_wait_writers.erase(it); |
347 | 0 | continue; |
348 | 0 | } |
349 | | // 3.1 calculate delete bitmap for Unique Key MoW tables |
350 | 0 | st = (*it)->submit_calc_delete_bitmap_task(); |
351 | 0 | if (!st.ok()) { Branch (351:13): [True: 0, False: 0]
|
352 | 0 | _add_error_tablet(tablet_errors, (*it)->tablet_id(), st); |
353 | 0 | it = need_wait_writers.erase(it); |
354 | 0 | continue; |
355 | 0 | } |
356 | 0 | it++; |
357 | 0 | } |
358 | | |
359 | | // 4. wait for delete bitmap calculation complete if necessary |
360 | 0 | for (auto it = need_wait_writers.begin(); it != need_wait_writers.end();) { Branch (360:47): [True: 0, False: 0]
|
361 | 0 | Status st = (*it)->wait_calc_delete_bitmap(); |
362 | 0 | if (!st.ok()) { Branch (362:13): [True: 0, False: 0]
|
363 | 0 | _add_error_tablet(tablet_errors, (*it)->tablet_id(), st); |
364 | 0 | it = need_wait_writers.erase(it); |
365 | 0 | continue; |
366 | 0 | } |
367 | 0 | it++; |
368 | 0 | } |
369 | | |
370 | | // 5. commit all writers |
371 | |
|
372 | 0 | for (auto* writer : need_wait_writers) { Branch (372:23): [True: 0, False: 0]
|
373 | 0 | PSlaveTabletNodes slave_nodes; |
374 | | |
375 | | // close may return failed, but no need to handle it here. |
376 | | // tablet_vec will only contains success tablet, and then let FE judge it. |
377 | 0 | _commit_txn(writer, req, res); |
378 | 0 | } |
379 | |
|
380 | 0 | if (_write_single_replica) { Branch (380:9): [True: 0, False: 0]
|
381 | 0 | auto* success_slave_tablet_node_ids = res->mutable_success_slave_tablet_node_ids(); |
382 | | // The operation waiting for all slave replicas to complete must end before the timeout, |
383 | | // so that there is enough time to collect completed replica. Otherwise, the task may |
384 | | // timeout and fail even though most of the replicas are completed. Here we set 0.9 |
385 | | // times the timeout as the maximum waiting time. |
386 | 0 | SCOPED_TIMER(_slave_replica_timer); Line | Count | Source | 69 | 0 | #define SCOPED_TIMER(c) ScopedTimer<MonotonicStopWatch> MACRO_CONCAT(SCOPED_TIMER, __COUNTER__)(c) Line | Count | Source | 52 | 0 | #define MACRO_CONCAT(x, y) CONCAT_IMPL(x, y) Line | Count | Source | 51 | 0 | #define CONCAT_IMPL(x, y) x##y |
|
|
|
387 | 0 | while (!need_wait_writers.empty() && Branch (387:16): [True: 0, False: 0]
|
388 | 0 | (time(nullptr) - parent->last_updated_time()) < (parent->timeout() * 0.9)) { Branch (388:16): [True: 0, False: 0]
|
389 | 0 | std::set<DeltaWriter*>::iterator it; |
390 | 0 | for (it = need_wait_writers.begin(); it != need_wait_writers.end();) { Branch (390:50): [True: 0, False: 0]
|
391 | 0 | bool is_done = (*it)->check_slave_replicas_done(success_slave_tablet_node_ids); |
392 | 0 | if (is_done) { Branch (392:21): [True: 0, False: 0]
|
393 | 0 | need_wait_writers.erase(it++); |
394 | 0 | } else { |
395 | 0 | it++; |
396 | 0 | } |
397 | 0 | } |
398 | 0 | std::this_thread::sleep_for(std::chrono::milliseconds(100)); |
399 | 0 | } |
400 | 0 | for (auto* writer : need_wait_writers) { Branch (400:27): [True: 0, False: 0]
|
401 | 0 | writer->add_finished_slave_replicas(success_slave_tablet_node_ids); |
402 | 0 | } |
403 | 0 | _engine.txn_manager()->clear_txn_tablet_delta_writer(_txn_id); |
404 | 0 | } |
405 | |
|
406 | 0 | return Status::OK(); |
407 | 0 | } |
408 | | |
409 | | void TabletsChannel::_commit_txn(DeltaWriter* writer, const PTabletWriterAddBlockRequest& req, |
410 | 0 | PTabletWriterAddBlockResult* res) { |
411 | 0 | Status st = writer->commit_txn(_write_single_replica Branch (411:36): [True: 0, False: 0]
|
412 | 0 | ? req.slave_tablet_nodes().at(writer->tablet_id()) |
413 | 0 | : PSlaveTabletNodes {}); |
414 | 0 | if (st.ok()) [[likely]] { Branch (414:9): [True: 0, False: 0]
|
415 | 0 | auto* tablet_vec = res->mutable_tablet_vec(); |
416 | 0 | PTabletInfo* tablet_info = tablet_vec->Add(); |
417 | 0 | tablet_info->set_tablet_id(writer->tablet_id()); |
418 | | // unused required field. |
419 | 0 | tablet_info->set_schema_hash(0); |
420 | 0 | tablet_info->set_received_rows(writer->total_received_rows()); |
421 | 0 | tablet_info->set_num_rows_filtered(writer->num_rows_filtered()); |
422 | 0 | _total_received_rows += writer->total_received_rows(); |
423 | 0 | _num_rows_filtered += writer->num_rows_filtered(); |
424 | 0 | } else { |
425 | 0 | _add_error_tablet(res->mutable_tablet_errors(), writer->tablet_id(), st); |
426 | 0 | } |
427 | 0 | } |
428 | | |
429 | | void BaseTabletsChannel::_add_error_tablet( |
430 | | google::protobuf::RepeatedPtrField<PTabletError>* tablet_errors, int64_t tablet_id, |
431 | 0 | Status error) const { |
432 | 0 | PTabletError* tablet_error = tablet_errors->Add(); |
433 | 0 | tablet_error->set_tablet_id(tablet_id); |
434 | 0 | tablet_error->set_msg(error.to_string()); |
435 | 0 | VLOG_PROGRESS << "close wait failed tablet " << tablet_id << " transaction_id " << _txn_id Line | Count | Source | 39 | 0 | #define VLOG_PROGRESS VLOG(2) |
|
436 | 0 | << "err msg " << error; |
437 | 0 | } |
438 | | |
439 | 0 | void BaseTabletsChannel::refresh_profile() { |
440 | 0 | int64_t write_mem_usage = 0; |
441 | 0 | int64_t flush_mem_usage = 0; |
442 | 0 | int64_t max_tablet_mem_usage = 0; |
443 | 0 | int64_t max_tablet_write_mem_usage = 0; |
444 | 0 | int64_t max_tablet_flush_mem_usage = 0; |
445 | 0 | { |
446 | 0 | std::lock_guard<std::mutex> l(_tablet_writers_lock); |
447 | 0 | for (auto&& [tablet_id, writer] : _tablet_writers) { Branch (447:41): [True: 0, False: 0]
|
448 | 0 | int64_t write_mem = writer->mem_consumption(MemType::WRITE_FINISHED); |
449 | 0 | write_mem_usage += write_mem; |
450 | 0 | int64_t flush_mem = writer->mem_consumption(MemType::FLUSH); |
451 | 0 | flush_mem_usage += flush_mem; |
452 | 0 | if (write_mem > max_tablet_write_mem_usage) { Branch (452:17): [True: 0, False: 0]
|
453 | 0 | max_tablet_write_mem_usage = write_mem; |
454 | 0 | } |
455 | 0 | if (flush_mem > max_tablet_flush_mem_usage) { Branch (455:17): [True: 0, False: 0]
|
456 | 0 | max_tablet_flush_mem_usage = flush_mem; |
457 | 0 | } |
458 | 0 | if (write_mem + flush_mem > max_tablet_mem_usage) { Branch (458:17): [True: 0, False: 0]
|
459 | 0 | max_tablet_mem_usage = write_mem + flush_mem; |
460 | 0 | } |
461 | 0 | } |
462 | 0 | } |
463 | 0 | COUNTER_SET(_memory_usage_counter, write_mem_usage + flush_mem_usage); Line | Count | Source | 83 | 0 | #define COUNTER_SET(c, v) (c)->set(v) |
|
464 | 0 | COUNTER_SET(_write_memory_usage_counter, write_mem_usage); Line | Count | Source | 83 | 0 | #define COUNTER_SET(c, v) (c)->set(v) |
|
465 | 0 | COUNTER_SET(_flush_memory_usage_counter, flush_mem_usage); Line | Count | Source | 83 | 0 | #define COUNTER_SET(c, v) (c)->set(v) |
|
466 | 0 | COUNTER_SET(_max_tablet_memory_usage_counter, max_tablet_mem_usage); Line | Count | Source | 83 | 0 | #define COUNTER_SET(c, v) (c)->set(v) |
|
467 | 0 | COUNTER_SET(_max_tablet_write_memory_usage_counter, max_tablet_write_mem_usage); Line | Count | Source | 83 | 0 | #define COUNTER_SET(c, v) (c)->set(v) |
|
468 | 0 | COUNTER_SET(_max_tablet_flush_memory_usage_counter, max_tablet_flush_mem_usage); Line | Count | Source | 83 | 0 | #define COUNTER_SET(c, v) (c)->set(v) |
|
469 | 0 | } |
470 | | |
471 | 0 | Status BaseTabletsChannel::_open_all_writers(const PTabletWriterOpenRequest& request) { |
472 | 0 | std::vector<SlotDescriptor*>* index_slots = nullptr; |
473 | 0 | int32_t schema_hash = 0; |
474 | 0 | for (const auto& index : _schema->indexes()) { Branch (474:28): [True: 0, False: 0]
|
475 | 0 | if (index->index_id == _index_id) { Branch (475:13): [True: 0, False: 0]
|
476 | 0 | index_slots = &index->slots; |
477 | 0 | schema_hash = index->schema_hash; |
478 | 0 | break; |
479 | 0 | } |
480 | 0 | } |
481 | 0 | if (index_slots == nullptr) { Branch (481:9): [True: 0, False: 0]
|
482 | 0 | return Status::InternalError("unknown index id, key={}", _key.to_string()); |
483 | 0 | } |
484 | | |
485 | | #ifdef DEBUG |
486 | | // check: tablet ids should be unique |
487 | | { |
488 | | std::unordered_set<int64_t> tablet_ids; |
489 | | for (const auto& tablet : request.tablets()) { |
490 | | CHECK(tablet_ids.count(tablet.tablet_id()) == 0) |
491 | | << "found duplicate tablet id: " << tablet.tablet_id(); |
492 | | tablet_ids.insert(tablet.tablet_id()); |
493 | | } |
494 | | } |
495 | | #endif |
496 | | |
497 | 0 | int tablet_cnt = 0; |
498 | | // under _lock. no need _tablet_writers_lock again. |
499 | 0 | for (const auto& tablet : request.tablets()) { Branch (499:29): [True: 0, False: 0]
|
500 | 0 | if (_tablet_writers.find(tablet.tablet_id()) != _tablet_writers.end()) { Branch (500:13): [True: 0, False: 0]
|
501 | 0 | continue; |
502 | 0 | } |
503 | 0 | tablet_cnt++; |
504 | 0 | WriteRequest wrequest { |
505 | 0 | .tablet_id = tablet.tablet_id(), |
506 | 0 | .schema_hash = schema_hash, |
507 | 0 | .txn_id = _txn_id, |
508 | 0 | .txn_expiration = request.txn_expiration(), // Required by CLOUD. |
509 | 0 | .index_id = request.index_id(), |
510 | 0 | .partition_id = tablet.partition_id(), |
511 | 0 | .load_id = request.id(), |
512 | 0 | .tuple_desc = _tuple_desc, |
513 | 0 | .slots = index_slots, |
514 | 0 | .table_schema_param = _schema, |
515 | 0 | .is_high_priority = _is_high_priority, |
516 | 0 | .write_file_cache = request.write_file_cache(), |
517 | 0 | .storage_vault_id = request.storage_vault_id(), |
518 | 0 | }; |
519 | |
|
520 | 0 | auto delta_writer = create_delta_writer(wrequest); |
521 | 0 | { |
522 | 0 | std::lock_guard<std::mutex> l(_tablet_writers_lock); |
523 | 0 | _tablet_writers.emplace(tablet.tablet_id(), std::move(delta_writer)); |
524 | 0 | } |
525 | 0 | } |
526 | 0 | _s_tablet_writer_count += _tablet_writers.size(); |
527 | 0 | DCHECK_EQ(_tablet_writers.size(), tablet_cnt); |
528 | 0 | return Status::OK(); |
529 | 0 | } |
530 | | |
531 | 0 | Status BaseTabletsChannel::cancel() { |
532 | 0 | std::lock_guard<std::mutex> l(_lock); |
533 | 0 | if (_state == kFinished) { Branch (533:9): [True: 0, False: 0]
|
534 | 0 | return _close_status; |
535 | 0 | } |
536 | 0 | for (auto& it : _tablet_writers) { Branch (536:19): [True: 0, False: 0]
|
537 | 0 | static_cast<void>(it.second->cancel()); |
538 | 0 | } |
539 | 0 | _state = kFinished; |
540 | |
|
541 | 0 | return Status::OK(); |
542 | 0 | } |
543 | | |
544 | 0 | Status TabletsChannel::cancel() { |
545 | 0 | RETURN_IF_ERROR(BaseTabletsChannel::cancel()); |
546 | 0 | if (_write_single_replica) { Branch (546:9): [True: 0, False: 0]
|
547 | 0 | _engine.txn_manager()->clear_txn_tablet_delta_writer(_txn_id); |
548 | 0 | } |
549 | 0 | return Status::OK(); |
550 | 0 | } |
551 | | |
552 | 0 | std::string TabletsChannelKey::to_string() const { |
553 | 0 | std::stringstream ss; |
554 | 0 | ss << *this; |
555 | 0 | return ss.str(); |
556 | 0 | } |
557 | | |
558 | 0 | std::ostream& operator<<(std::ostream& os, const TabletsChannelKey& key) { |
559 | 0 | os << "(load_id=" << key.id << ", index_id=" << key.index_id << ")"; |
560 | 0 | return os; |
561 | 0 | } |
562 | | |
563 | | Status BaseTabletsChannel::_write_block_data( |
564 | | const PTabletWriterAddBlockRequest& request, int64_t cur_seq, |
565 | | std::unordered_map<int64_t, std::vector<uint32_t>>& tablet_to_rowidxs, |
566 | 0 | PTabletWriterAddBlockResult* response) { |
567 | 0 | vectorized::Block send_data; |
568 | 0 | RETURN_IF_ERROR(send_data.deserialize(request.block())); |
569 | 0 | CHECK(send_data.rows() == request.tablet_ids_size()) |
570 | 0 | << "block rows: " << send_data.rows() |
571 | 0 | << ", tablet_ids_size: " << request.tablet_ids_size(); |
572 | |
|
573 | 0 | g_tablets_channel_send_data_allocated_size << send_data.allocated_bytes(); |
574 | 0 | Defer defer { |
575 | 0 | [&]() { g_tablets_channel_send_data_allocated_size << -send_data.allocated_bytes(); }}; |
576 | |
|
577 | 0 | auto write_tablet_data = [&](int64_t tablet_id, |
578 | 0 | std::function<Status(BaseDeltaWriter * writer)> write_func) { |
579 | 0 | google::protobuf::RepeatedPtrField<PTabletError>* tablet_errors = |
580 | 0 | response->mutable_tablet_errors(); |
581 | | |
582 | | // add_batch may concurrency with inc_open but not under _lock. |
583 | | // so need to protect it with _tablet_writers_lock. |
584 | 0 | decltype(_tablet_writers.find(tablet_id)) tablet_writer_it; |
585 | 0 | { |
586 | 0 | std::lock_guard<std::mutex> l(_tablet_writers_lock); |
587 | 0 | tablet_writer_it = _tablet_writers.find(tablet_id); |
588 | 0 | if (tablet_writer_it == _tablet_writers.end()) { Branch (588:17): [True: 0, False: 0]
|
589 | 0 | return Status::InternalError("unknown tablet to append data, tablet={}", tablet_id); |
590 | 0 | } |
591 | 0 | } |
592 | | |
593 | 0 | Status st = write_func(tablet_writer_it->second.get()); |
594 | 0 | if (!st.ok()) { Branch (594:13): [True: 0, False: 0]
|
595 | 0 | auto err_msg = |
596 | 0 | fmt::format("tablet writer write failed, tablet_id={}, txn_id={}, err={}", |
597 | 0 | tablet_id, _txn_id, st.to_string()); |
598 | 0 | LOG(WARNING) << err_msg; |
599 | 0 | PTabletError* error = tablet_errors->Add(); |
600 | 0 | error->set_tablet_id(tablet_id); |
601 | 0 | error->set_msg(err_msg); |
602 | 0 | static_cast<void>(tablet_writer_it->second->cancel_with_status(st)); |
603 | 0 | _add_broken_tablet(tablet_id); |
604 | | // continue write to other tablet. |
605 | | // the error will return back to sender. |
606 | 0 | } |
607 | 0 | return Status::OK(); |
608 | 0 | }; |
609 | |
|
610 | 0 | SCOPED_TIMER(_write_block_timer); Line | Count | Source | 69 | 0 | #define SCOPED_TIMER(c) ScopedTimer<MonotonicStopWatch> MACRO_CONCAT(SCOPED_TIMER, __COUNTER__)(c) Line | Count | Source | 52 | 0 | #define MACRO_CONCAT(x, y) CONCAT_IMPL(x, y) Line | Count | Source | 51 | 0 | #define CONCAT_IMPL(x, y) x##y |
|
|
|
611 | 0 | for (const auto& tablet_to_rowidxs_it : tablet_to_rowidxs) { Branch (611:43): [True: 0, False: 0]
|
612 | 0 | RETURN_IF_ERROR(write_tablet_data(tablet_to_rowidxs_it.first, [&](BaseDeltaWriter* writer) { |
613 | 0 | return writer->write(&send_data, tablet_to_rowidxs_it.second); |
614 | 0 | })); |
615 | 0 | } |
616 | | |
617 | 0 | { |
618 | 0 | std::lock_guard<std::mutex> l(_lock); |
619 | 0 | _next_seqs[request.sender_id()] = cur_seq + 1; |
620 | 0 | } |
621 | 0 | return Status::OK(); |
622 | 0 | } |
623 | | |
624 | | Status TabletsChannel::add_batch(const PTabletWriterAddBlockRequest& request, |
625 | 0 | PTabletWriterAddBlockResult* response) { |
626 | 0 | SCOPED_TIMER(_add_batch_timer); Line | Count | Source | 69 | 0 | #define SCOPED_TIMER(c) ScopedTimer<MonotonicStopWatch> MACRO_CONCAT(SCOPED_TIMER, __COUNTER__)(c) Line | Count | Source | 52 | 0 | #define MACRO_CONCAT(x, y) CONCAT_IMPL(x, y) Line | Count | Source | 51 | 0 | #define CONCAT_IMPL(x, y) x##y |
|
|
|
627 | 0 | int64_t cur_seq = 0; |
628 | 0 | _add_batch_number_counter->update(1); |
629 | |
|
630 | 0 | auto status = _get_current_seq(cur_seq, request); |
631 | 0 | if (UNLIKELY(!status.ok())) { |
632 | 0 | return status; |
633 | 0 | } |
634 | | |
635 | 0 | if (request.packet_seq() < cur_seq) { Branch (635:9): [True: 0, False: 0]
|
636 | 0 | LOG(INFO) << "packet has already recept before, expect_seq=" << cur_seq |
637 | 0 | << ", recept_seq=" << request.packet_seq(); |
638 | 0 | return Status::OK(); |
639 | 0 | } |
640 | | |
641 | 0 | std::unordered_map<int64_t /* tablet_id */, std::vector<uint32_t> /* row index */> |
642 | 0 | tablet_to_rowidxs; |
643 | 0 | _build_tablet_to_rowidxs(request, &tablet_to_rowidxs); |
644 | |
|
645 | 0 | return _write_block_data(request, cur_seq, tablet_to_rowidxs, response); |
646 | 0 | } |
647 | | |
648 | 0 | void BaseTabletsChannel::_add_broken_tablet(int64_t tablet_id) { |
649 | 0 | std::unique_lock<std::shared_mutex> wlock(_broken_tablets_lock); |
650 | 0 | _broken_tablets.insert(tablet_id); |
651 | 0 | } |
652 | | |
653 | 0 | bool BaseTabletsChannel::_is_broken_tablet(int64_t tablet_id) const { |
654 | 0 | return _broken_tablets.find(tablet_id) != _broken_tablets.end(); |
655 | 0 | } |
656 | | |
657 | | void BaseTabletsChannel::_build_tablet_to_rowidxs( |
658 | | const PTabletWriterAddBlockRequest& request, |
659 | 0 | std::unordered_map<int64_t, std::vector<uint32_t>>* tablet_to_rowidxs) { |
660 | | // just add a coarse-grained read lock here rather than each time when visiting _broken_tablets |
661 | | // tests show that a relatively coarse-grained read lock here performs better under multicore scenario |
662 | | // see: https://github.com/apache/doris/pull/28552 |
663 | 0 | std::shared_lock<std::shared_mutex> rlock(_broken_tablets_lock); |
664 | 0 | if (request.is_single_tablet_block()) { Branch (664:9): [True: 0, False: 0]
|
665 | | // The cloud mode need the tablet ids to prepare rowsets. |
666 | 0 | int64_t tablet_id = request.tablet_ids(0); |
667 | 0 | tablet_to_rowidxs->emplace(tablet_id, std::initializer_list<uint32_t> {0}); |
668 | 0 | return; |
669 | 0 | } |
670 | 0 | for (uint32_t i = 0; i < request.tablet_ids_size(); ++i) { Branch (670:26): [True: 0, False: 0]
|
671 | 0 | int64_t tablet_id = request.tablet_ids(i); |
672 | 0 | if (_is_broken_tablet(tablet_id)) { Branch (672:13): [True: 0, False: 0]
|
673 | | // skip broken tablets |
674 | 0 | VLOG_PROGRESS << "skip broken tablet tablet=" << tablet_id; Line | Count | Source | 39 | 0 | #define VLOG_PROGRESS VLOG(2) |
|
675 | 0 | continue; |
676 | 0 | } |
677 | 0 | auto it = tablet_to_rowidxs->find(tablet_id); |
678 | 0 | if (it == tablet_to_rowidxs->end()) { Branch (678:13): [True: 0, False: 0]
|
679 | 0 | tablet_to_rowidxs->emplace(tablet_id, std::initializer_list<uint32_t> {i}); |
680 | 0 | } else { |
681 | 0 | it->second.emplace_back(i); |
682 | 0 | } |
683 | 0 | } |
684 | 0 | } |
685 | | |
686 | | } // namespace doris |