Coverage Report

Created: 2026-03-16 08:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/exec/sink/writer/vtablet_writer.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "exec/sink/writer/vtablet_writer.h"
19
20
#include <brpc/http_method.h>
21
#include <bthread/bthread.h>
22
#include <fmt/format.h>
23
#include <gen_cpp/DataSinks_types.h>
24
#include <gen_cpp/Descriptors_types.h>
25
#include <gen_cpp/Exprs_types.h>
26
#include <gen_cpp/FrontendService.h>
27
#include <gen_cpp/FrontendService_types.h>
28
#include <gen_cpp/HeartbeatService_types.h>
29
#include <gen_cpp/Metrics_types.h>
30
#include <gen_cpp/Types_types.h>
31
#include <gen_cpp/data.pb.h>
32
#include <gen_cpp/internal_service.pb.h>
33
#include <glog/logging.h>
34
#include <google/protobuf/stubs/common.h>
35
#include <sys/param.h>
36
37
#include <algorithm>
38
#include <initializer_list>
39
#include <memory>
40
#include <mutex>
41
#include <sstream>
42
#include <string>
43
#include <unordered_map>
44
#include <utility>
45
#include <vector>
46
47
#include "cloud/config.h"
48
#include "common/config.h"
49
#include "core/data_type/data_type.h"
50
#include "cpp/sync_point.h"
51
#include "exec/sink/vrow_distribution.h"
52
#include "exprs/vexpr_fwd.h"
53
#include "runtime/runtime_profile.h"
54
55
#ifdef DEBUG
56
#include <unordered_set>
57
#endif
58
59
#include "common/compiler_util.h" // IWYU pragma: keep
60
#include "common/logging.h"
61
#include "common/metrics/doris_metrics.h"
62
#include "common/object_pool.h"
63
#include "common/signal_handler.h"
64
#include "common/status.h"
65
#include "core/block/block.h"
66
#include "core/column/column.h"
67
#include "core/column/column_const.h"
68
#include "core/data_type/data_type_nullable.h"
69
#include "exec/sink/vtablet_block_convertor.h"
70
#include "exec/sink/vtablet_finder.h"
71
#include "exprs/vexpr.h"
72
#include "runtime/descriptors.h"
73
#include "runtime/exec_env.h"
74
#include "runtime/memory/memory_reclamation.h"
75
#include "runtime/query_context.h"
76
#include "runtime/runtime_state.h"
77
#include "runtime/thread_context.h"
78
#include "service/backend_options.h"
79
#include "storage/tablet_info.h"
80
#include "util/brpc_closure.h"
81
#include "util/debug_points.h"
82
#include "util/defer_op.h"
83
#include "util/mem_info.h"
84
#include "util/network_util.h"
85
#include "util/proto_util.h"
86
#include "util/threadpool.h"
87
#include "util/thrift_rpc_helper.h"
88
#include "util/thrift_util.h"
89
#include "util/time.h"
90
#include "util/uid_util.h"
91
92
namespace doris {
93
class TExpr;
94
95
#include "common/compile_check_begin.h"
96
97
bvar::Adder<int64_t> g_sink_write_bytes;
98
bvar::PerSecond<bvar::Adder<int64_t>> g_sink_write_bytes_per_second("sink_throughput_byte",
99
                                                                    &g_sink_write_bytes, 60);
100
bvar::Adder<int64_t> g_sink_write_rows;
101
bvar::PerSecond<bvar::Adder<int64_t>> g_sink_write_rows_per_second("sink_throughput_row",
102
                                                                   &g_sink_write_rows, 60);
103
bvar::Adder<int64_t> g_sink_load_back_pressure_version_time_ms(
104
        "load_back_pressure_version_time_ms");
105
106
Status IndexChannel::init(RuntimeState* state, const std::vector<TTabletWithPartition>& tablets,
107
67.6k
                          bool incremental) {
108
67.6k
    SCOPED_CONSUME_MEM_TRACKER(_index_channel_tracker.get());
109
414k
    for (const auto& tablet : tablets) {
110
        // First find the location BEs of this tablet
111
414k
        auto* tablet_locations = _parent->_location->find_tablet(tablet.tablet_id);
112
414k
        if (tablet_locations == nullptr) {
113
0
            return Status::InternalError("unknown tablet, tablet_id={}", tablet.tablet_id);
114
0
        }
115
414k
        std::vector<std::shared_ptr<VNodeChannel>> channels;
116
        // For tablet, deal with its' all replica (in some node).
117
415k
        for (auto& replica_node_id : tablet_locations->node_ids) {
118
415k
            std::shared_ptr<VNodeChannel> channel;
119
415k
            auto it = _node_channels.find(replica_node_id);
120
            // when we prepare for TableSink or incremental open tablet, we need init
121
415k
            if (it == _node_channels.end()) {
122
                // NodeChannel is not added to the _parent->_pool.
123
                // Because the deconstruction of NodeChannel may take a long time to wait rpc finish.
124
                // but the ObjectPool will hold a spin lock to delete objects.
125
68.2k
                channel =
126
68.2k
                        std::make_shared<VNodeChannel>(_parent, this, replica_node_id, incremental);
127
68.2k
                _node_channels.emplace(replica_node_id, channel);
128
                // incremental opened new node. when close we have use two-stage close.
129
68.2k
                if (incremental) {
130
0
                    _has_inc_node = true;
131
0
                }
132
68.2k
                VLOG_CRITICAL << "init new node for instance " << _parent->_sender_id
133
660
                              << ", node id:" << replica_node_id << ", incremantal:" << incremental;
134
347k
            } else {
135
347k
                channel = it->second;
136
347k
            }
137
415k
            channel->add_tablet(tablet);
138
415k
            if (_parent->_write_single_replica) {
139
0
                auto* slave_location = _parent->_slave_location->find_tablet(tablet.tablet_id);
140
0
                if (slave_location != nullptr) {
141
0
                    channel->add_slave_tablet_nodes(tablet.tablet_id, slave_location->node_ids);
142
0
                }
143
0
            }
144
415k
            channels.push_back(channel);
145
415k
            _tablets_by_channel[replica_node_id].insert(tablet.tablet_id);
146
415k
        }
147
414k
        _channels_by_tablet.emplace(tablet.tablet_id, std::move(channels));
148
414k
    }
149
68.3k
    for (auto& it : _node_channels) {
150
68.3k
        RETURN_IF_ERROR(it.second->init(state));
151
68.3k
    }
152
67.6k
    if (_where_clause != nullptr) {
153
33
        RETURN_IF_ERROR(_where_clause->prepare(state, *_parent->_output_row_desc));
154
33
        RETURN_IF_ERROR(_where_clause->open(state));
155
33
    }
156
157
67.6k
    return Status::OK();
158
67.6k
}
159
160
void IndexChannel::mark_as_failed(const VNodeChannel* node_channel, const std::string& err,
161
48
                                  int64_t tablet_id) {
162
48
    DCHECK(node_channel != nullptr);
163
48
    LOG(INFO) << "mark node_id:" << node_channel->channel_info() << " tablet_id: " << tablet_id
164
48
              << " as failed, err: " << err;
165
48
    auto node_id = node_channel->node_id();
166
48
    const auto& it = _tablets_by_channel.find(node_id);
167
48
    if (it == _tablets_by_channel.end()) {
168
0
        return;
169
0
    }
170
171
48
    {
172
48
        std::lock_guard<std::mutex> l(_fail_lock);
173
48
        if (tablet_id == -1) {
174
141
            for (const auto the_tablet_id : it->second) {
175
141
                _failed_channels[the_tablet_id].insert(node_id);
176
141
                _failed_channels_msgs.emplace(the_tablet_id,
177
141
                                              err + ", host: " + node_channel->host());
178
141
                if (_failed_channels[the_tablet_id].size() > _max_failed_replicas(the_tablet_id)) {
179
141
                    _intolerable_failure_status = Status::Error<ErrorCode::INTERNAL_ERROR, false>(
180
141
                            _failed_channels_msgs[the_tablet_id]);
181
141
                }
182
141
            }
183
48
        } else {
184
0
            _failed_channels[tablet_id].insert(node_id);
185
0
            _failed_channels_msgs.emplace(tablet_id, err + ", host: " + node_channel->host());
186
0
            if (_failed_channels[tablet_id].size() > _max_failed_replicas(tablet_id)) {
187
0
                _intolerable_failure_status = Status::Error<ErrorCode::INTERNAL_ERROR, false>(
188
0
                        _failed_channels_msgs[tablet_id]);
189
0
            }
190
0
        }
191
48
    }
192
48
}
193
194
141
int IndexChannel::_max_failed_replicas(int64_t tablet_id) {
195
141
    auto [total_replicas_num, load_required_replicas_num] =
196
141
            _parent->_tablet_replica_info[tablet_id];
197
141
    int max_failed_replicas = total_replicas_num == 0
198
141
                                      ? (_parent->_num_replicas - 1) / 2
199
141
                                      : total_replicas_num - load_required_replicas_num;
200
141
    return max_failed_replicas;
201
141
}
202
203
1.63M
int IndexChannel::_load_required_replicas_num(int64_t tablet_id) {
204
1.63M
    auto [total_replicas_num, load_required_replicas_num] =
205
1.63M
            _parent->_tablet_replica_info[tablet_id];
206
1.63M
    if (total_replicas_num == 0) {
207
0
        return (_parent->_num_replicas + 1) / 2;
208
0
    }
209
1.63M
    return load_required_replicas_num;
210
1.63M
}
211
212
176k
Status IndexChannel::check_intolerable_failure() {
213
176k
    std::lock_guard<std::mutex> l(_fail_lock);
214
176k
    return _intolerable_failure_status;
215
176k
}
216
217
68.1k
void IndexChannel::set_error_tablet_in_state(RuntimeState* state) {
218
68.1k
    std::vector<TErrorTabletInfo> error_tablet_infos;
219
220
68.1k
    {
221
68.1k
        std::lock_guard<std::mutex> l(_fail_lock);
222
68.1k
        for (const auto& it : _failed_channels_msgs) {
223
0
            TErrorTabletInfo error_info;
224
0
            error_info.__set_tabletId(it.first);
225
0
            error_info.__set_msg(it.second);
226
0
            error_tablet_infos.emplace_back(error_info);
227
0
        }
228
68.1k
    }
229
68.1k
    state->add_error_tablet_infos(error_tablet_infos);
230
68.1k
}
231
232
void IndexChannel::set_tablets_received_rows(
233
68.1k
        const std::vector<std::pair<int64_t, int64_t>>& tablets_received_rows, int64_t node_id) {
234
182k
    for (const auto& [tablet_id, rows_num] : tablets_received_rows) {
235
182k
        _tablets_received_rows[tablet_id].emplace_back(node_id, rows_num);
236
182k
    }
237
68.1k
}
238
239
void IndexChannel::set_tablets_filtered_rows(
240
68.1k
        const std::vector<std::pair<int64_t, int64_t>>& tablets_filtered_rows, int64_t node_id) {
241
182k
    for (const auto& [tablet_id, rows_num] : tablets_filtered_rows) {
242
182k
        _tablets_filtered_rows[tablet_id].emplace_back(node_id, rows_num);
243
182k
    }
244
68.1k
}
245
246
0
Status IndexChannel::check_tablet_received_rows_consistency() {
247
0
    for (auto& tablet : _tablets_received_rows) {
248
0
        for (size_t i = 0; i < tablet.second.size(); i++) {
249
0
            VLOG_NOTICE << "check_tablet_received_rows_consistency, load_id: " << _parent->_load_id
250
0
                        << ", txn_id: " << std::to_string(_parent->_txn_id)
251
0
                        << ", tablet_id: " << tablet.first
252
0
                        << ", node_id: " << tablet.second[i].first
253
0
                        << ", rows_num: " << tablet.second[i].second;
254
0
            if (i == 0) {
255
0
                continue;
256
0
            }
257
0
            if (tablet.second[i].second != tablet.second[0].second) {
258
0
                return Status::InternalError(
259
0
                        "rows num written by multi replicas doest't match, load_id={}, txn_id={}, "
260
0
                        "tablt_id={}, node_id={}, rows_num={}, node_id={}, rows_num={}",
261
0
                        print_id(_parent->_load_id), _parent->_txn_id, tablet.first,
262
0
                        tablet.second[i].first, tablet.second[i].second, tablet.second[0].first,
263
0
                        tablet.second[0].second);
264
0
            }
265
0
        }
266
0
    }
267
0
    return Status::OK();
268
0
}
269
270
59
Status IndexChannel::check_tablet_filtered_rows_consistency() {
271
95
    for (auto& tablet : _tablets_filtered_rows) {
272
190
        for (size_t i = 0; i < tablet.second.size(); i++) {
273
95
            VLOG_NOTICE << "check_tablet_filtered_rows_consistency, load_id: " << _parent->_load_id
274
0
                        << ", txn_id: " << std::to_string(_parent->_txn_id)
275
0
                        << ", tablet_id: " << tablet.first
276
0
                        << ", node_id: " << tablet.second[i].first
277
0
                        << ", rows_num: " << tablet.second[i].second;
278
95
            if (i == 0) {
279
95
                continue;
280
95
            }
281
0
            if (tablet.second[i].second != tablet.second[0].second) {
282
0
                return Status::InternalError(
283
0
                        "rows num filtered by multi replicas doest't match, load_id={}, txn_id={}, "
284
0
                        "tablt_id={}, node_id={}, rows_num={}, node_id={}, rows_num={}",
285
0
                        print_id(_parent->_load_id), _parent->_txn_id, tablet.first,
286
0
                        tablet.second[i].first, tablet.second[i].second, tablet.second[0].first,
287
0
                        tablet.second[0].second);
288
0
            }
289
0
        }
290
95
    }
291
59
    return Status::OK();
292
59
}
293
294
static Status cancel_channel_and_check_intolerable_failure(Status status,
295
                                                           const std::string& err_msg,
296
48
                                                           IndexChannel& ich, VNodeChannel& nch) {
297
48
    LOG(WARNING) << nch.channel_info() << ", close channel failed, err: " << err_msg;
298
48
    ich.mark_as_failed(&nch, err_msg, -1);
299
    // cancel the node channel in best effort
300
48
    nch.cancel(err_msg);
301
302
    // check if index has intolerable failure
303
48
    if (Status index_st = ich.check_intolerable_failure(); !index_st.ok()) {
304
48
        status = std::move(index_st);
305
48
    } else if (Status receive_st = ich.check_tablet_received_rows_consistency(); !receive_st.ok()) {
306
0
        status = std::move(receive_st);
307
0
    } else if (Status filter_st = ich.check_tablet_filtered_rows_consistency(); !filter_st.ok()) {
308
0
        status = std::move(filter_st);
309
0
    }
310
48
    return status;
311
48
}
312
313
Status IndexChannel::close_wait(
314
        RuntimeState* state, WriterStats* writer_stats,
315
        std::unordered_map<int64_t, AddBatchCounter>* node_add_batch_counter_map,
316
        std::unordered_set<int64_t> unfinished_node_channel_ids,
317
68.2k
        bool need_wait_after_quorum_success) {
318
68.2k
    DBUG_EXECUTE_IF("IndexChannel.close_wait.timeout",
319
68.2k
                    { return Status::TimedOut("injected timeout"); });
320
68.2k
    Status status = Status::OK();
321
    // 1. wait quorum success
322
68.2k
    std::unordered_set<int64_t> need_finish_tablets;
323
68.2k
    auto partition_ids = _parent->_tablet_finder->partition_ids();
324
82.4k
    for (const auto& part : _parent->_vpartition->get_partitions()) {
325
82.4k
        if (partition_ids.contains(part->id)) {
326
49.5k
            for (const auto& index : part->indexes) {
327
262k
                for (const auto& tablet_id : index.tablets) {
328
262k
                    need_finish_tablets.insert(tablet_id);
329
262k
                }
330
49.5k
            }
331
32.9k
        }
332
82.4k
    }
333
1.56M
    while (true) {
334
1.56M
        RETURN_IF_ERROR(check_each_node_channel_close(
335
1.56M
                &unfinished_node_channel_ids, node_add_batch_counter_map, writer_stats, status));
336
1.56M
        bool quorum_success = _quorum_success(unfinished_node_channel_ids, need_finish_tablets);
337
1.56M
        if (unfinished_node_channel_ids.empty() || quorum_success) {
338
68.1k
            LOG(INFO) << "quorum_success: " << quorum_success
339
68.1k
                      << ", is all finished: " << unfinished_node_channel_ids.empty()
340
68.1k
                      << ", txn_id: " << _parent->_txn_id
341
68.1k
                      << ", load_id: " << print_id(_parent->_load_id);
342
68.1k
            break;
343
68.1k
        }
344
1.49M
        bthread_usleep(1000 * 10);
345
1.49M
    }
346
347
    // 2. wait for all node channel to complete as much as possible
348
68.2k
    if (!unfinished_node_channel_ids.empty() && need_wait_after_quorum_success) {
349
0
        int64_t arrival_quorum_success_time = UnixMillis();
350
0
        int64_t max_wait_time_ms = _calc_max_wait_time_ms(unfinished_node_channel_ids);
351
0
        while (true) {
352
0
            RETURN_IF_ERROR(check_each_node_channel_close(&unfinished_node_channel_ids,
353
0
                                                          node_add_batch_counter_map, writer_stats,
354
0
                                                          status));
355
0
            if (unfinished_node_channel_ids.empty()) {
356
0
                break;
357
0
            }
358
0
            int64_t elapsed_ms = UnixMillis() - arrival_quorum_success_time;
359
0
            if (elapsed_ms > max_wait_time_ms ||
360
0
                _parent->_load_channel_timeout_s - elapsed_ms / 1000 <
361
0
                        config::quorum_success_remaining_timeout_seconds) {
362
                // cancel unfinished node channel
363
0
                std::stringstream unfinished_node_channel_host_str;
364
0
                for (auto& it : unfinished_node_channel_ids) {
365
0
                    unfinished_node_channel_host_str << _node_channels[it]->host() << ",";
366
0
                    _node_channels[it]->cancel("timeout");
367
0
                }
368
0
                LOG(WARNING) << "reach max wait time, max_wait_time_ms: " << max_wait_time_ms
369
0
                             << ", cancel unfinished node channel and finish close"
370
0
                             << ", load id: " << print_id(_parent->_load_id)
371
0
                             << ", txn_id: " << _parent->_txn_id << ", unfinished node channel: "
372
0
                             << unfinished_node_channel_host_str.str();
373
0
                break;
374
0
            }
375
0
            bthread_usleep(1000 * 10);
376
0
        }
377
0
    }
378
68.2k
    return status;
379
68.2k
}
380
381
Status IndexChannel::check_each_node_channel_close(
382
        std::unordered_set<int64_t>* unfinished_node_channel_ids,
383
        std::unordered_map<int64_t, AddBatchCounter>* node_add_batch_counter_map,
384
1.56M
        WriterStats* writer_stats, Status status) {
385
1.56M
    Status final_status = Status::OK();
386
1.56M
    for (auto& it : _node_channels) {
387
1.56M
        std::shared_ptr<VNodeChannel> node_channel = it.second;
388
        // If the node channel is not in the unfinished_node_channel_ids,
389
        // it means the node channel is already closed.
390
1.56M
        if (!unfinished_node_channel_ids->contains(it.first)) {
391
0
            continue;
392
0
        }
393
1.56M
        bool node_channel_closed = false;
394
1.56M
        auto close_status = it.second->close_wait(_parent->_state, &node_channel_closed);
395
1.56M
        if (node_channel_closed) {
396
68.2k
            close_status = it.second->after_close_handle(_parent->_state, writer_stats,
397
68.2k
                                                         node_add_batch_counter_map);
398
68.2k
            unfinished_node_channel_ids->erase(it.first);
399
68.2k
        }
400
1.56M
        DBUG_EXECUTE_IF("IndexChannel.check_each_node_channel_close.close_status_not_ok",
401
1.56M
                        { close_status = Status::InternalError("injected close status not ok"); });
402
1.56M
        if (!close_status.ok()) {
403
48
            final_status = cancel_channel_and_check_intolerable_failure(
404
48
                    std::move(final_status), close_status.to_string(), *this, *it.second);
405
48
        }
406
1.56M
    }
407
408
1.56M
    return final_status;
409
1.56M
}
410
411
bool IndexChannel::_quorum_success(const std::unordered_set<int64_t>& unfinished_node_channel_ids,
412
1.56M
                                   const std::unordered_set<int64_t>& need_finish_tablets) {
413
1.56M
    if (!config::enable_quorum_success_write) {
414
0
        return false;
415
0
    }
416
1.56M
    if (need_finish_tablets.empty()) [[unlikely]] {
417
87.0k
        return false;
418
87.0k
    }
419
420
    // 1. collect all write tablets and finished tablets
421
1.47M
    std::unordered_map<int64_t, int64_t> finished_tablets_replica;
422
1.47M
    for (const auto& [node_id, node_channel] : _node_channels) {
423
1.47M
        if (unfinished_node_channel_ids.contains(node_id) || !node_channel->check_status().ok()) {
424
1.44M
            continue;
425
1.44M
        }
426
271k
        for (const auto& tablet_id : _tablets_by_channel[node_id]) {
427
            // Only count non-gap backends for quorum success.
428
            // Gap backends' success doesn't count toward majority write.
429
271k
            auto gap_it = _parent->_tablet_version_gap_backends.find(tablet_id);
430
271k
            if (gap_it == _parent->_tablet_version_gap_backends.end() ||
431
271k
                gap_it->second.find(node_id) == gap_it->second.end()) {
432
271k
                finished_tablets_replica[tablet_id]++;
433
271k
            }
434
271k
        }
435
30.6k
    }
436
437
    // 2. check if quorum success
438
1.63M
    for (const auto& tablet_id : need_finish_tablets) {
439
1.63M
        if (finished_tablets_replica[tablet_id] < _load_required_replicas_num(tablet_id)) {
440
1.44M
            return false;
441
1.44M
        }
442
1.63M
    }
443
444
26.8k
    return true;
445
1.47M
}
446
447
int64_t IndexChannel::_calc_max_wait_time_ms(
448
0
        const std::unordered_set<int64_t>& unfinished_node_channel_ids) {
449
    // 1. calculate avg speed of all unfinished node channel
450
0
    int64_t elapsed_ms = UnixMillis() - _start_time;
451
0
    int64_t total_bytes = 0;
452
0
    int finished_count = 0;
453
0
    for (const auto& [node_id, node_channel] : _node_channels) {
454
0
        if (unfinished_node_channel_ids.contains(node_id)) {
455
0
            continue;
456
0
        }
457
0
        total_bytes += node_channel->write_bytes();
458
0
        finished_count++;
459
0
    }
460
    // no data loaded in index channel, return 0
461
0
    if (total_bytes == 0 || finished_count == 0) {
462
0
        return 0;
463
0
    }
464
    // if elapsed_ms is equal to 0, explain the loaded data is too small
465
0
    if (elapsed_ms <= 0) {
466
0
        return config::quorum_success_min_wait_seconds * 1000;
467
0
    }
468
0
    double avg_speed =
469
0
            static_cast<double>(total_bytes) / (static_cast<double>(elapsed_ms) * finished_count);
470
471
    // 2. calculate max wait time of each unfinished node channel and return the max value
472
0
    int64_t max_wait_time_ms = 0;
473
0
    for (int64_t id : unfinished_node_channel_ids) {
474
0
        int64_t bytes = _node_channels[id]->write_bytes();
475
0
        int64_t wait =
476
0
                avg_speed > 0 ? static_cast<int64_t>(static_cast<double>(bytes) / avg_speed) : 0;
477
0
        max_wait_time_ms = std::max(max_wait_time_ms, wait);
478
0
    }
479
480
    // 3. calculate max wait time
481
    // introduce quorum_success_min_wait_seconds to avoid jitter of small load
482
0
    max_wait_time_ms -= UnixMillis() - _start_time;
483
0
    max_wait_time_ms =
484
0
            std::max(static_cast<int64_t>(static_cast<double>(max_wait_time_ms) *
485
0
                                          (1.0 + config::quorum_success_max_wait_multiplier)),
486
0
                     config::quorum_success_min_wait_seconds * 1000);
487
488
0
    return max_wait_time_ms;
489
0
}
490
491
1.69M
static Status none_of(std::initializer_list<bool> vars) {
492
3.39M
    bool none = std::none_of(vars.begin(), vars.end(), [](bool var) { return var; });
493
1.69M
    Status st = Status::OK();
494
1.69M
    if (!none) {
495
48
        std::string vars_str;
496
48
        std::for_each(vars.begin(), vars.end(),
497
96
                      [&vars_str](bool var) -> void { vars_str += (var ? "1/" : "0/"); });
498
48
        if (!vars_str.empty()) {
499
48
            vars_str.pop_back(); // 0/1/0/ -> 0/1/0
500
48
        }
501
48
        st = Status::Uninitialized(vars_str);
502
48
    }
503
504
1.69M
    return st;
505
1.69M
}
506
507
VNodeChannel::VNodeChannel(VTabletWriter* parent, IndexChannel* index_channel, int64_t node_id,
508
                           bool is_incremental)
509
68.0k
        : _parent(parent),
510
68.0k
          _index_channel(index_channel),
511
68.0k
          _node_id(node_id),
512
68.0k
          _is_incremental(is_incremental) {
513
68.0k
    _cur_add_block_request = std::make_shared<PTabletWriterAddBlockRequest>();
514
68.0k
    _node_channel_tracker = std::make_shared<MemTracker>(
515
68.0k
            fmt::format("NodeChannel:indexID={}:threadId={}",
516
68.0k
                        std::to_string(_index_channel->_index_id), ThreadContext::get_thread_id()));
517
68.0k
    _load_mem_limit = MemInfo::mem_limit() * config::load_process_max_memory_limit_percent / 100;
518
68.0k
}
519
520
68.3k
VNodeChannel::~VNodeChannel() = default;
521
522
68.3k
void VNodeChannel::clear_all_blocks() {
523
68.3k
    std::lock_guard<std::mutex> lg(_pending_batches_lock);
524
68.3k
    std::queue<AddBlockReq> empty;
525
68.3k
    std::swap(_pending_blocks, empty);
526
68.3k
    _cur_mutable_block.reset();
527
68.3k
}
528
529
// we don't need to send tablet_writer_cancel rpc request when
530
// init failed, so set _is_closed to true.
531
// if "_cancelled" is set to true,
532
// no need to set _cancel_msg because the error will be
533
// returned directly via "TabletSink::prepare()" method.
534
68.3k
Status VNodeChannel::init(RuntimeState* state) {
535
68.3k
    if (_inited) {
536
178
        return Status::OK();
537
178
    }
538
539
68.1k
    SCOPED_CONSUME_MEM_TRACKER(_node_channel_tracker.get());
540
68.1k
    _task_exec_ctx = state->get_task_execution_context();
541
68.1k
    _tuple_desc = _parent->_output_tuple_desc;
542
68.1k
    _state = state;
543
    // get corresponding BE node.
544
68.1k
    const auto* node = _parent->_nodes_info->find_node(_node_id);
545
68.1k
    if (node == nullptr) {
546
0
        _cancelled = true;
547
0
        _is_closed = true;
548
0
        return Status::InternalError("unknown node id, id={}", _node_id);
549
0
    }
550
68.1k
    _node_info = *node;
551
552
68.1k
    _load_info = "load_id=" + print_id(_parent->_load_id) +
553
68.1k
                 ", txn_id=" + std::to_string(_parent->_txn_id);
554
555
68.1k
    _row_desc = std::make_unique<RowDescriptor>(_tuple_desc);
556
68.1k
    _batch_size = state->batch_size();
557
558
68.1k
    _stub = state->exec_env()->brpc_internal_client_cache()->get_client(_node_info.host,
559
68.1k
                                                                        _node_info.brpc_port);
560
68.1k
    if (_stub == nullptr) {
561
0
        _cancelled = true;
562
0
        _is_closed = true;
563
0
        return Status::InternalError("Get rpc stub failed, host={}, port={}, info={}",
564
0
                                     _node_info.host, _node_info.brpc_port, channel_info());
565
0
    }
566
567
68.1k
    _rpc_timeout_ms = state->execution_timeout() * 1000;
568
68.1k
    _timeout_watch.start();
569
570
    // Initialize _cur_add_block_request
571
68.3k
    if (!_cur_add_block_request->has_id()) {
572
68.3k
        *(_cur_add_block_request->mutable_id()) = _parent->_load_id;
573
68.3k
    }
574
68.1k
    _cur_add_block_request->set_index_id(_index_channel->_index_id);
575
68.1k
    _cur_add_block_request->set_sender_id(_parent->_sender_id);
576
68.1k
    _cur_add_block_request->set_backend_id(_node_id);
577
68.1k
    _cur_add_block_request->set_eos(false);
578
579
    // add block closure
580
    // Has to using value to capture _task_exec_ctx because tablet writer may destroyed during callback.
581
68.1k
    _send_block_callback = WriteBlockCallback<PTabletWriterAddBlockResult>::create_shared();
582
68.1k
    _send_block_callback->addFailedHandler(
583
68.1k
            [&, task_exec_ctx = _task_exec_ctx](const WriteBlockCallbackContext& ctx) {
584
0
                std::shared_ptr<TaskExecutionContext> ctx_lock = task_exec_ctx.lock();
585
0
                if (ctx_lock == nullptr) {
586
0
                    return;
587
0
                }
588
0
                _add_block_failed_callback(ctx);
589
0
            });
590
591
68.1k
    _send_block_callback->addSuccessHandler(
592
68.1k
            [&, task_exec_ctx = _task_exec_ctx](const PTabletWriterAddBlockResult& result,
593
70.6k
                                                const WriteBlockCallbackContext& ctx) {
594
70.6k
                std::shared_ptr<TaskExecutionContext> ctx_lock = task_exec_ctx.lock();
595
70.6k
                if (ctx_lock == nullptr) {
596
0
                    return;
597
0
                }
598
70.6k
                _add_block_success_callback(result, ctx);
599
70.6k
            });
600
601
68.1k
    _name = fmt::format("VNodeChannel[{}-{}]", _index_channel->_index_id, _node_id);
602
    // The node channel will send _batch_size rows of data each rpc. When the
603
    // number of tablets is large, the number of data rows received by each
604
    // tablet is small, TabletsChannel need to traverse each tablet for import.
605
    // so the import performance is poor. Therefore, we set _batch_size to
606
    // a relatively large value to improve the import performance.
607
68.1k
    _batch_size = std::max(_batch_size, 8192);
608
609
68.1k
    if (_state) {
610
68.1k
        QueryContext* query_ctx = _state->get_query_ctx();
611
68.2k
        if (query_ctx) {
612
68.2k
            auto wg_ptr = query_ctx->workload_group();
613
68.3k
            if (wg_ptr) {
614
68.3k
                _wg_id = wg_ptr->id();
615
68.3k
            }
616
68.2k
        }
617
68.1k
    }
618
619
68.1k
    _inited = true;
620
68.1k
    return Status::OK();
621
68.1k
}
622
623
68.4k
void VNodeChannel::_open_internal(bool is_incremental) {
624
68.4k
    if (_tablets_wait_open.empty()) {
625
0
        return;
626
0
    }
627
68.4k
    SCOPED_CONSUME_MEM_TRACKER(_node_channel_tracker.get());
628
68.4k
    auto request = std::make_shared<PTabletWriterOpenRequest>();
629
68.4k
    request->set_allocated_id(&_parent->_load_id);
630
68.4k
    request->set_index_id(_index_channel->_index_id);
631
68.4k
    request->set_txn_id(_parent->_txn_id);
632
68.4k
    request->set_sender_id(_parent->_sender_id);
633
68.4k
    request->set_allocated_schema(_parent->_schema->to_protobuf());
634
68.4k
    if (_parent->_t_sink.olap_table_sink.__isset.storage_vault_id) {
635
0
        request->set_storage_vault_id(_parent->_t_sink.olap_table_sink.storage_vault_id);
636
0
    }
637
68.4k
    std::set<int64_t> deduper;
638
415k
    for (auto& tablet : _tablets_wait_open) {
639
415k
        if (deduper.contains(tablet.tablet_id)) {
640
0
            continue;
641
0
        }
642
415k
        auto* ptablet = request->add_tablets();
643
415k
        ptablet->set_partition_id(tablet.partition_id);
644
415k
        ptablet->set_tablet_id(tablet.tablet_id);
645
415k
        deduper.insert(tablet.tablet_id);
646
415k
        _all_tablets.push_back(std::move(tablet));
647
415k
    }
648
68.4k
    _tablets_wait_open.clear();
649
650
68.4k
    request->set_num_senders(_parent->_num_senders);
651
68.4k
    request->set_need_gen_rollup(false); // Useless but it is a required field in pb
652
68.4k
    request->set_load_channel_timeout_s(_parent->_load_channel_timeout_s);
653
68.4k
    request->set_is_high_priority(_parent->_is_high_priority);
654
68.4k
    request->set_sender_ip(BackendOptions::get_localhost());
655
68.4k
    request->set_is_vectorized(true);
656
68.4k
    request->set_backend_id(_node_id);
657
68.4k
    request->set_enable_profile(_state->enable_profile());
658
68.4k
    request->set_is_incremental(is_incremental);
659
68.4k
    request->set_txn_expiration(_parent->_txn_expiration);
660
68.4k
    request->set_write_file_cache(_parent->_write_file_cache);
661
662
68.4k
    if (_wg_id > 0) {
663
68.1k
        request->set_workload_group_id(_wg_id);
664
68.1k
    }
665
666
68.4k
    auto open_callback = DummyBrpcCallback<PTabletWriterOpenResult>::create_shared();
667
68.4k
    auto open_closure = AutoReleaseClosure<
668
68.4k
            PTabletWriterOpenRequest,
669
68.4k
            DummyBrpcCallback<PTabletWriterOpenResult>>::create_unique(request, open_callback);
670
68.4k
    open_callback->cntl_->set_timeout_ms(config::tablet_writer_open_rpc_timeout_sec * 1000);
671
68.4k
    if (config::tablet_writer_ignore_eovercrowded) {
672
68.0k
        open_callback->cntl_->ignore_eovercrowded();
673
68.0k
    }
674
68.4k
    VLOG_DEBUG << fmt::format("txn {}: open NodeChannel to {}, incremental: {}, senders: {}",
675
428
                              _parent->_txn_id, _node_id, is_incremental, _parent->_num_senders);
676
    // the real transmission here. the corresponding BE's load mgr will open load channel for it.
677
68.4k
    _stub->tablet_writer_open(open_closure->cntl_.get(), open_closure->request_.get(),
678
68.4k
                              open_closure->response_.get(), open_closure.get());
679
68.4k
    open_closure.release();
680
68.4k
    _open_callbacks.push_back(open_callback);
681
682
68.4k
    static_cast<void>(request->release_id());
683
68.4k
    static_cast<void>(request->release_schema());
684
68.4k
}
685
686
68.2k
void VNodeChannel::open() {
687
68.2k
    _open_internal(false);
688
68.2k
}
689
690
178
void VNodeChannel::incremental_open() {
691
178
    VLOG_DEBUG << "incremental opening node channel" << _node_id;
692
178
    _open_internal(true);
693
178
}
694
695
68.4k
Status VNodeChannel::open_wait() {
696
68.4k
    Status status;
697
68.6k
    for (auto& open_callback : _open_callbacks) {
698
        // because of incremental open, we will wait multi times. so skip the closures which have been checked and set to nullptr in previous rounds
699
68.6k
        if (open_callback == nullptr) {
700
0
            continue;
701
0
        }
702
703
68.6k
        open_callback->join();
704
68.6k
        SCOPED_CONSUME_MEM_TRACKER(_node_channel_tracker.get());
705
68.6k
        if (open_callback->cntl_->Failed()) {
706
0
            if (!ExecEnv::GetInstance()->brpc_internal_client_cache()->available(
707
0
                        _stub, _node_info.host, _node_info.brpc_port)) {
708
0
                ExecEnv::GetInstance()->brpc_internal_client_cache()->erase(
709
0
                        open_callback->cntl_->remote_side());
710
0
            }
711
0
            _cancelled = true;
712
0
            auto error_code = open_callback->cntl_->ErrorCode();
713
0
            auto error_text = open_callback->cntl_->ErrorText();
714
0
            if (error_text.find("Reached timeout") != std::string::npos) {
715
0
                LOG(WARNING) << "failed to open tablet writer may caused by timeout. increase BE "
716
0
                                "config `tablet_writer_open_rpc_timeout_sec` if you are sure that "
717
0
                                "your table building and data are reasonable.";
718
0
            }
719
0
            return Status::Error<ErrorCode::INTERNAL_ERROR, false>(
720
0
                    "failed to open tablet writer, error={}, error_text={}, info={}",
721
0
                    berror(error_code), error_text, channel_info());
722
0
        }
723
68.6k
        status = Status::create(open_callback->response_->status());
724
725
68.6k
        if (!status.ok()) {
726
0
            _cancelled = true;
727
0
            return status;
728
0
        }
729
68.6k
    }
730
731
68.4k
    return status;
732
68.4k
}
733
734
37.2k
Status VNodeChannel::add_block(Block* block, const Payload* payload) {
735
37.2k
    SCOPED_CONSUME_MEM_TRACKER(_node_channel_tracker.get());
736
37.2k
    if (payload->second.empty()) {
737
0
        return Status::OK();
738
0
    }
739
    // If add_block() when _eos_is_produced==true, there must be sth wrong, we can only mark this channel as failed.
740
37.2k
    auto st = none_of({_cancelled, _eos_is_produced});
741
37.2k
    if (!st.ok()) {
742
0
        if (_cancelled) {
743
0
            std::lock_guard<std::mutex> l(_cancel_msg_lock);
744
0
            return Status::Error<ErrorCode::INTERNAL_ERROR, false>("add row failed. {}",
745
0
                                                                   _cancel_msg);
746
0
        } else {
747
0
            return std::move(st.prepend("already stopped, can't add row. cancelled/eos: "));
748
0
        }
749
0
    }
750
751
    // We use OlapTableSink mem_tracker which has the same ancestor of _plan node,
752
    // so in the ideal case, mem limit is a matter for _plan node.
753
    // But there is still some unfinished things, we do mem limit here temporarily.
754
    // _cancelled may be set by rpc callback, and it's possible that _cancelled might be set in any of the steps below.
755
    // It's fine to do a fake add_block() and return OK, because we will check _cancelled in next add_block() or mark_close().
756
37.2k
    constexpr int64_t kBackPressureSleepMs = 10;
757
37.2k
    auto* memtable_limiter = ExecEnv::GetInstance()->memtable_memory_limiter();
758
37.2k
    while (true) {
759
37.2k
        bool is_exceed_soft_mem_limit = GlobalMemoryArbitrator::is_exceed_soft_mem_limit();
760
37.2k
        int64_t memtable_mem =
761
37.2k
                (memtable_limiter != nullptr && memtable_limiter->mem_tracker() != nullptr)
762
37.2k
                        ? memtable_limiter->mem_tracker()->consumption()
763
37.2k
                        : 0;
764
        // Note: Memtable memory is not included in load memory statistics (MemoryProfile::load_current_usage())
765
        // for performance and memory control complexity reasons. Therefore, we explicitly add memtable memory
766
        // consumption here to ensure accurate back pressure decisions and prevent OOM during heavy loads.
767
37.2k
        auto current_load_mem_value = MemoryProfile::load_current_usage() + memtable_mem;
768
37.2k
        bool mem_limit_exceeded = is_exceed_soft_mem_limit ||
769
37.2k
                                  current_load_mem_value > _load_mem_limit ||
770
37.2k
                                  _pending_batches_bytes > _max_pending_batches_bytes;
771
37.2k
        bool need_back_pressure = !_cancelled && !_state->is_cancelled() &&
772
37.2k
                                  _pending_batches_num > 0 && mem_limit_exceeded;
773
37.2k
        if (!need_back_pressure) {
774
37.2k
            break;
775
37.2k
        }
776
0
        SCOPED_RAW_TIMER(&_stat.mem_exceeded_block_ns);
777
0
        std::this_thread::sleep_for(std::chrono::milliseconds(kBackPressureSleepMs));
778
0
    }
779
780
37.2k
    if (UNLIKELY(!_cur_mutable_block)) {
781
30.2k
        _cur_mutable_block = MutableBlock::create_unique(block->clone_empty());
782
30.2k
    }
783
784
37.2k
    SCOPED_RAW_TIMER(&_stat.append_node_channel_ns);
785
37.2k
    st = block->append_to_block_by_selector(_cur_mutable_block.get(), *(payload->first));
786
37.2k
    if (!st.ok()) {
787
0
        _cancel_with_msg(fmt::format("{}, err: {}", channel_info(), st.to_string()));
788
0
        return st;
789
0
    }
790
35.9M
    for (auto tablet_id : payload->second) {
791
35.9M
        _cur_add_block_request->add_tablet_ids(tablet_id);
792
35.9M
    }
793
37.2k
    _write_bytes.fetch_add(_cur_mutable_block->bytes());
794
795
37.2k
    if (_cur_mutable_block->rows() >= _batch_size ||
796
37.2k
        _cur_mutable_block->bytes() > config::doris_scanner_row_bytes) {
797
2.36k
        {
798
2.36k
            SCOPED_ATOMIC_TIMER(&_queue_push_lock_ns);
799
2.36k
            std::lock_guard<std::mutex> l(_pending_batches_lock);
800
            // To simplify the add_row logic, postpone adding block into req until the time of sending req
801
2.36k
            _pending_batches_bytes += _cur_mutable_block->allocated_bytes();
802
2.36k
            _cur_add_block_request->set_eos(
803
2.36k
                    false); // for multi-add, only when marking close we set it eos.
804
            // Copy the request to tmp request to add to pend block queue
805
2.36k
            auto tmp_add_block_request = std::make_shared<PTabletWriterAddBlockRequest>();
806
2.36k
            *tmp_add_block_request = *_cur_add_block_request;
807
2.36k
            _pending_blocks.emplace(std::move(_cur_mutable_block), tmp_add_block_request);
808
2.36k
            _pending_batches_num++;
809
2.36k
            VLOG_DEBUG << "VTabletWriter:" << _parent << " VNodeChannel:" << this
810
0
                       << " pending_batches_bytes:" << _pending_batches_bytes
811
0
                       << " jobid:" << std::to_string(_state->load_job_id())
812
0
                       << " loadinfo:" << _load_info;
813
2.36k
        }
814
2.36k
        _cur_mutable_block = MutableBlock::create_unique(block->clone_empty());
815
2.36k
        _cur_add_block_request->clear_tablet_ids();
816
2.36k
    }
817
818
37.2k
    return Status::OK();
819
37.2k
}
820
821
0
static void injection_full_gc_fn() {
822
0
    MemoryReclamation::revoke_process_memory("injection_full_gc_fn");
823
0
}
824
825
int VNodeChannel::try_send_and_fetch_status(RuntimeState* state,
826
23.6M
                                            std::unique_ptr<ThreadPoolToken>& thread_pool_token) {
827
23.6M
    DBUG_EXECUTE_IF("VNodeChannel.try_send_and_fetch_status_full_gc", {
828
23.6M
        std::thread t(injection_full_gc_fn);
829
23.6M
        t.join();
830
23.6M
    });
831
832
23.9M
    if (_cancelled || _send_finished) { // not run
833
951k
        return 0;
834
951k
    }
835
836
22.7M
    auto load_back_pressure_version_wait_time_ms = _load_back_pressure_version_wait_time_ms.load();
837
22.7M
    if (UNLIKELY(load_back_pressure_version_wait_time_ms > 0)) {
838
0
        std::this_thread::sleep_for(
839
0
                std::chrono::milliseconds(load_back_pressure_version_wait_time_ms));
840
0
        _load_back_pressure_version_block_ms.fetch_add(
841
0
                load_back_pressure_version_wait_time_ms); // already in milliseconds
842
0
        _load_back_pressure_version_wait_time_ms = 0;
843
0
    }
844
845
    // set closure for sending block.
846
22.7M
    if (!_send_block_callback->try_set_in_flight()) {
847
        // There is packet in flight, skip.
848
1.23M
        return _send_finished ? 0 : 1;
849
1.23M
    }
850
851
    // We are sure that try_send_batch is not running
852
21.4M
    if (_pending_batches_num > 0) {
853
70.6k
        auto s = thread_pool_token->submit_func([this, state] { try_send_pending_block(state); });
854
70.6k
        if (!s.ok()) {
855
0
            _cancel_with_msg("submit send_batch task to send_batch_thread_pool failed");
856
            // sending finished. clear in flight
857
0
            _send_block_callback->clear_in_flight();
858
0
        }
859
        // in_flight is cleared in closure::Run
860
21.4M
    } else {
861
        // sending finished. clear in flight
862
21.4M
        _send_block_callback->clear_in_flight();
863
21.4M
    }
864
21.4M
    return _send_finished ? 0 : 1;
865
22.7M
}
866
867
192
void VNodeChannel::_cancel_with_msg(const std::string& msg) {
868
192
    LOG(WARNING) << "cancel node channel " << channel_info() << ", error message: " << msg;
869
192
    {
870
192
        std::lock_guard<std::mutex> l(_cancel_msg_lock);
871
192
        if (_cancel_msg.empty()) {
872
144
            _cancel_msg = msg;
873
144
        }
874
192
    }
875
192
    _cancelled = true;
876
192
}
877
878
void VNodeChannel::_refresh_back_pressure_version_wait_time(
879
        const ::google::protobuf::RepeatedPtrField<::doris::PTabletLoadRowsetInfo>&
880
70.4k
                tablet_load_infos) {
881
70.4k
    int64_t max_rowset_num_gap = 0;
882
    // if any one tablet is under high load pressure, we would make the whole procedure
883
    // sleep to prevent the corresponding BE return -235
884
70.4k
    std::for_each(
885
70.4k
            tablet_load_infos.begin(), tablet_load_infos.end(),
886
70.4k
            [&max_rowset_num_gap](auto& load_info) {
887
0
                int64_t cur_rowset_num = load_info.current_rowset_nums();
888
0
                int64_t high_load_point = load_info.max_config_rowset_nums() *
889
0
                                          (config::load_back_pressure_version_threshold / 100);
890
0
                DCHECK(cur_rowset_num > high_load_point);
891
0
                max_rowset_num_gap = std::max(max_rowset_num_gap, cur_rowset_num - high_load_point);
892
0
            });
893
    // to slow down the high load pressure
894
    // we would use the rowset num gap to calculate one sleep time
895
    // for example:
896
    // if the max tablet version is 2000, there are 3 BE
897
    // A: ====================  1800
898
    // B: ===================   1700
899
    // C: ==================    1600
900
    //    ==================    1600
901
    //                      ^
902
    //                      the high load point
903
    // then then max gap is 1800 - (max tablet version * config::load_back_pressure_version_threshold / 100) = 200,
904
    // we would make the whole send procesure sleep
905
    // 1200ms for compaction to be done toe reduce the high pressure
906
70.4k
    auto max_time = config::max_load_back_pressure_version_wait_time_ms;
907
70.4k
    if (UNLIKELY(max_rowset_num_gap > 0)) {
908
0
        _load_back_pressure_version_wait_time_ms.store(
909
0
                std::min(max_rowset_num_gap + 1000, max_time));
910
0
        LOG(INFO) << "try to back pressure version, wait time(ms): "
911
0
                  << _load_back_pressure_version_wait_time_ms
912
0
                  << ", load id: " << print_id(_parent->_load_id)
913
0
                  << ", max_rowset_num_gap: " << max_rowset_num_gap;
914
0
    }
915
70.4k
}
916
917
70.6k
void VNodeChannel::try_send_pending_block(RuntimeState* state) {
918
70.6k
    SCOPED_ATTACH_TASK(state);
919
70.6k
    SCOPED_CONSUME_MEM_TRACKER(_node_channel_tracker);
920
70.6k
    SCOPED_ATOMIC_TIMER(&_actual_consume_ns);
921
70.6k
    signal::set_signal_task_id(_parent->_load_id);
922
70.6k
    AddBlockReq send_block;
923
70.6k
    {
924
70.6k
        std::lock_guard<std::mutex> l(_pending_batches_lock);
925
70.6k
        DCHECK(!_pending_blocks.empty());
926
70.6k
        send_block = std::move(_pending_blocks.front());
927
70.6k
        _pending_blocks.pop();
928
70.6k
        _pending_batches_num--;
929
70.6k
        _pending_batches_bytes -= send_block.first->allocated_bytes();
930
70.6k
    }
931
932
70.6k
    auto mutable_block = std::move(send_block.first);
933
70.6k
    auto request = std::move(send_block.second); // doesn't need to be saved in heap
934
935
    // tablet_ids has already set when add row
936
70.6k
    request->set_packet_seq(_next_packet_seq);
937
70.6k
    auto block = mutable_block->to_block();
938
70.6k
    CHECK(block.rows() == request->tablet_ids_size())
939
12
            << "block rows: " << block.rows()
940
12
            << ", tablet_ids_size: " << request->tablet_ids_size();
941
70.6k
    if (block.rows() > 0) {
942
32.4k
        SCOPED_ATOMIC_TIMER(&_serialize_batch_ns);
943
32.4k
        size_t uncompressed_bytes = 0, compressed_bytes = 0;
944
32.4k
        int64_t compressed_time = 0;
945
32.4k
        Status st = block.serialize(state->be_exec_version(), request->mutable_block(),
946
32.4k
                                    &uncompressed_bytes, &compressed_bytes, &compressed_time,
947
32.4k
                                    state->fragement_transmission_compression_type(),
948
32.4k
                                    _parent->_transfer_large_data_by_brpc);
949
32.4k
        TEST_INJECTION_POINT_CALLBACK("VNodeChannel::try_send_block", &st);
950
32.4k
        if (!st.ok()) {
951
0
            cancel(fmt::format("{}, err: {}", channel_info(), st.to_string()));
952
0
            _send_block_callback->clear_in_flight();
953
0
            return;
954
0
        }
955
32.4k
        if (double(compressed_bytes) >= double(config::brpc_max_body_size) * 0.95F) {
956
0
            LOG(WARNING) << "send block too large, this rpc may failed. send size: "
957
0
                         << compressed_bytes << ", threshold: " << config::brpc_max_body_size
958
0
                         << ", " << channel_info();
959
0
        }
960
32.4k
    }
961
962
70.6k
    auto remain_ms = _rpc_timeout_ms - _timeout_watch.elapsed_time() / NANOS_PER_MILLIS;
963
70.6k
    if (UNLIKELY(remain_ms < config::min_load_rpc_timeout_ms)) {
964
4
        if (remain_ms <= 0 && !request->eos()) {
965
0
            cancel(fmt::format("{}, err: load timeout after {} ms", channel_info(),
966
0
                               _rpc_timeout_ms));
967
0
            _send_block_callback->clear_in_flight();
968
0
            return;
969
4
        } else {
970
4
            remain_ms = config::min_load_rpc_timeout_ms;
971
4
        }
972
4
    }
973
974
70.6k
    _send_block_callback->reset();
975
70.6k
    _send_block_callback->cntl_->set_timeout_ms(remain_ms);
976
70.6k
    if (config::tablet_writer_ignore_eovercrowded) {
977
70.6k
        _send_block_callback->cntl_->ignore_eovercrowded();
978
70.6k
    }
979
980
70.6k
    if (request->eos()) {
981
68.2k
        for (auto pid : _parent->_tablet_finder->partition_ids()) {
982
33.0k
            request->add_partition_ids(pid);
983
33.0k
        }
984
985
68.2k
        request->set_write_single_replica(_parent->_write_single_replica);
986
68.2k
        if (_parent->_write_single_replica) {
987
0
            for (auto& _slave_tablet_node : _slave_tablet_nodes) {
988
0
                PSlaveTabletNodes slave_tablet_nodes;
989
0
                for (auto node_id : _slave_tablet_node.second) {
990
0
                    const auto* node = _parent->_nodes_info->find_node(node_id);
991
0
                    DBUG_EXECUTE_IF("VNodeChannel.try_send_pending_block.slave_node_not_found", {
992
0
                        LOG(WARNING) << "trigger "
993
0
                                        "VNodeChannel.try_send_pending_block.slave_node_not_found "
994
0
                                        "debug point will set node to nullptr";
995
0
                        node = nullptr;
996
0
                    });
997
0
                    if (node == nullptr) {
998
0
                        LOG(WARNING) << "slave node not found, node_id=" << node_id;
999
0
                        cancel(fmt::format("slave node not found, node_id={}", node_id));
1000
0
                        _send_block_callback->clear_in_flight();
1001
0
                        return;
1002
0
                    }
1003
0
                    PNodeInfo* pnode = slave_tablet_nodes.add_slave_nodes();
1004
0
                    pnode->set_id(node->id);
1005
0
                    pnode->set_option(node->option);
1006
0
                    pnode->set_host(node->host);
1007
0
                    pnode->set_async_internal_port(node->brpc_port);
1008
0
                }
1009
0
                request->mutable_slave_tablet_nodes()->insert(
1010
0
                        {_slave_tablet_node.first, slave_tablet_nodes});
1011
0
            }
1012
0
        }
1013
1014
        // eos request must be the last request-> it's a signal makeing callback function to set _add_batch_finished true.
1015
        // end_mark makes is_last_rpc true when rpc finished and call callbacks.
1016
68.2k
        _send_block_callback->end_mark();
1017
68.2k
        _send_finished = true;
1018
18.4E
        CHECK(_pending_batches_num == 0) << _pending_batches_num;
1019
68.2k
    }
1020
1021
70.6k
    auto send_block_closure = AutoReleaseClosure<
1022
70.6k
            PTabletWriterAddBlockRequest,
1023
70.6k
            WriteBlockCallback<PTabletWriterAddBlockResult>>::create_unique(request,
1024
70.6k
                                                                            _send_block_callback);
1025
70.6k
    if (_parent->_transfer_large_data_by_brpc && request->has_block() &&
1026
70.6k
        request->block().has_column_values() && request->ByteSizeLong() > MIN_HTTP_BRPC_SIZE) {
1027
0
        Status st = request_embed_attachment_contain_blockv2(send_block_closure->request_.get(),
1028
0
                                                             send_block_closure);
1029
0
        if (!st.ok()) {
1030
0
            cancel(fmt::format("{}, err: {}", channel_info(), st.to_string()));
1031
0
            _send_block_callback->clear_in_flight();
1032
0
            return;
1033
0
        }
1034
1035
0
        std::string host = _node_info.host;
1036
0
        auto dns_cache = ExecEnv::GetInstance()->dns_cache();
1037
0
        if (dns_cache == nullptr) {
1038
0
            LOG(WARNING) << "DNS cache is not initialized, skipping hostname resolve";
1039
0
        } else if (!is_valid_ip(_node_info.host)) {
1040
0
            Status status = dns_cache->get(_node_info.host, &host);
1041
0
            if (!status.ok()) {
1042
0
                LOG(WARNING) << "failed to get ip from host " << _node_info.host << ": "
1043
0
                             << status.to_string();
1044
0
                cancel(fmt::format("failed to get ip from host {}", _node_info.host));
1045
0
                _send_block_callback->clear_in_flight();
1046
0
                return;
1047
0
            }
1048
0
        }
1049
        //format an ipv6 address
1050
0
        std::string brpc_url = get_brpc_http_url(host, _node_info.brpc_port);
1051
0
        std::shared_ptr<PBackendService_Stub> _brpc_http_stub =
1052
0
                _state->exec_env()->brpc_internal_client_cache()->get_new_client_no_cache(brpc_url,
1053
0
                                                                                          "http");
1054
0
        if (_brpc_http_stub == nullptr) {
1055
0
            cancel(fmt::format("{}, failed to open brpc http client to {}", channel_info(),
1056
0
                               brpc_url));
1057
0
            _send_block_callback->clear_in_flight();
1058
0
            return;
1059
0
        }
1060
0
        _send_block_callback->cntl_->http_request().uri() =
1061
0
                brpc_url + "/PInternalServiceImpl/tablet_writer_add_block_by_http";
1062
0
        _send_block_callback->cntl_->http_request().set_method(brpc::HTTP_METHOD_POST);
1063
0
        _send_block_callback->cntl_->http_request().set_content_type("application/json");
1064
1065
0
        {
1066
0
            _brpc_http_stub->tablet_writer_add_block_by_http(
1067
0
                    send_block_closure->cntl_.get(), nullptr, send_block_closure->response_.get(),
1068
0
                    send_block_closure.get());
1069
0
            send_block_closure.release();
1070
0
        }
1071
70.6k
    } else {
1072
70.6k
        _send_block_callback->cntl_->http_request().Clear();
1073
70.6k
        {
1074
70.6k
            _stub->tablet_writer_add_block(
1075
70.6k
                    send_block_closure->cntl_.get(), send_block_closure->request_.get(),
1076
70.6k
                    send_block_closure->response_.get(), send_block_closure.get());
1077
70.6k
            send_block_closure.release();
1078
70.6k
        }
1079
70.6k
    }
1080
1081
70.6k
    _next_packet_seq++;
1082
70.6k
}
1083
1084
void VNodeChannel::_add_block_success_callback(const PTabletWriterAddBlockResult& result,
1085
70.6k
                                               const WriteBlockCallbackContext& ctx) {
1086
70.6k
    std::lock_guard<std::mutex> l(this->_closed_lock);
1087
70.6k
    if (this->_is_closed) {
1088
        // if the node channel is closed, no need to call the following logic,
1089
        // and notice that _index_channel may already be destroyed.
1090
0
        return;
1091
0
    }
1092
70.6k
    SCOPED_ATTACH_TASK(_state);
1093
70.6k
    Status status(Status::create(result.status()));
1094
70.6k
    if (status.ok()) {
1095
70.4k
        _refresh_back_pressure_version_wait_time(result.tablet_load_rowset_num_infos());
1096
        // if has error tablet, handle them first
1097
70.4k
        for (const auto& error : result.tablet_errors()) {
1098
0
            _index_channel->mark_as_failed(this, "tablet error: " + error.msg(), error.tablet_id());
1099
0
        }
1100
1101
70.4k
        Status st = _index_channel->check_intolerable_failure();
1102
70.4k
        if (!st.ok()) {
1103
0
            _cancel_with_msg(st.to_string());
1104
70.4k
        } else if (ctx._is_last_rpc) {
1105
68.2k
            bool skip_tablet_info = false;
1106
68.2k
            DBUG_EXECUTE_IF("VNodeChannel.add_block_success_callback.incomplete_commit_info",
1107
68.2k
                            { skip_tablet_info = true; });
1108
182k
            for (const auto& tablet : result.tablet_vec()) {
1109
182k
                DBUG_EXECUTE_IF("VNodeChannel.add_block_success_callback.incomplete_commit_info", {
1110
182k
                    if (skip_tablet_info) {
1111
182k
                        LOG(INFO) << "skip tablet info: " << tablet.tablet_id();
1112
182k
                        skip_tablet_info = false;
1113
182k
                        continue;
1114
182k
                    }
1115
182k
                });
1116
182k
                TTabletCommitInfo commit_info;
1117
182k
                commit_info.tabletId = tablet.tablet_id();
1118
182k
                commit_info.backendId = _node_id;
1119
182k
                _tablet_commit_infos.emplace_back(std::move(commit_info));
1120
182k
                if (tablet.has_received_rows()) {
1121
182k
                    _tablets_received_rows.emplace_back(tablet.tablet_id(), tablet.received_rows());
1122
182k
                }
1123
182k
                if (tablet.has_num_rows_filtered()) {
1124
182k
                    _tablets_filtered_rows.emplace_back(tablet.tablet_id(),
1125
182k
                                                        tablet.num_rows_filtered());
1126
182k
                }
1127
18.4E
                VLOG_CRITICAL << "master replica commit info: tabletId=" << tablet.tablet_id()
1128
18.4E
                              << ", backendId=" << _node_id
1129
18.4E
                              << ", master node id: " << this->node_id()
1130
18.4E
                              << ", host: " << this->host() << ", txn_id=" << _parent->_txn_id;
1131
182k
            }
1132
68.2k
            if (_parent->_write_single_replica) {
1133
0
                for (const auto& tablet_slave_node_ids : result.success_slave_tablet_node_ids()) {
1134
0
                    for (auto slave_node_id : tablet_slave_node_ids.second.slave_node_ids()) {
1135
0
                        TTabletCommitInfo commit_info;
1136
0
                        commit_info.tabletId = tablet_slave_node_ids.first;
1137
0
                        commit_info.backendId = slave_node_id;
1138
0
                        _tablet_commit_infos.emplace_back(std::move(commit_info));
1139
0
                        VLOG_CRITICAL
1140
0
                                << "slave replica commit info: tabletId="
1141
0
                                << tablet_slave_node_ids.first << ", backendId=" << slave_node_id
1142
0
                                << ", master node id: " << this->node_id()
1143
0
                                << ", host: " << this->host() << ", txn_id=" << _parent->_txn_id;
1144
0
                    }
1145
0
                }
1146
0
            }
1147
68.2k
            _add_batches_finished = true;
1148
68.2k
        }
1149
70.4k
    } else {
1150
144
        _cancel_with_msg(fmt::format("{}, add batch req success but status isn't ok, err: {}",
1151
144
                                     channel_info(), status.to_string()));
1152
144
    }
1153
1154
70.6k
    if (result.has_execution_time_us()) {
1155
70.6k
        _add_batch_counter.add_batch_execution_time_us += result.execution_time_us();
1156
70.6k
        _add_batch_counter.add_batch_wait_execution_time_us += result.wait_execution_time_us();
1157
70.6k
        _add_batch_counter.add_batch_num++;
1158
70.6k
    }
1159
70.6k
    if (result.has_load_channel_profile()) {
1160
20
        TRuntimeProfileTree tprofile;
1161
20
        const auto* buf = (const uint8_t*)result.load_channel_profile().data();
1162
20
        auto len = cast_set<uint32_t>(result.load_channel_profile().size());
1163
20
        auto st = deserialize_thrift_msg(buf, &len, false, &tprofile);
1164
21
        if (st.ok()) {
1165
21
            _state->load_channel_profile()->update(tprofile);
1166
18.4E
        } else {
1167
18.4E
            LOG(WARNING) << "load channel TRuntimeProfileTree deserialize failed, errmsg=" << st;
1168
18.4E
        }
1169
20
    }
1170
70.6k
}
1171
1172
0
void VNodeChannel::_add_block_failed_callback(const WriteBlockCallbackContext& ctx) {
1173
0
    std::lock_guard<std::mutex> l(this->_closed_lock);
1174
0
    if (this->_is_closed) {
1175
        // if the node channel is closed, no need to call `mark_as_failed`,
1176
        // and notice that _index_channel may already be destroyed.
1177
0
        return;
1178
0
    }
1179
0
    SCOPED_ATTACH_TASK(_state);
1180
    // If rpc failed, mark all tablets on this node channel as failed
1181
0
    _index_channel->mark_as_failed(this,
1182
0
                                   fmt::format("rpc failed, error code:{}, error text:{}",
1183
0
                                               _send_block_callback->cntl_->ErrorCode(),
1184
0
                                               _send_block_callback->cntl_->ErrorText()),
1185
0
                                   -1);
1186
0
    if (_send_block_callback->cntl_->ErrorText().find("Reached timeout") != std::string::npos) {
1187
0
        LOG(WARNING) << "rpc failed may caused by timeout. increase BE config "
1188
0
                        "`min_load_rpc_timeout_ms` of to avoid this if you are sure that your "
1189
0
                        "table building and data are reasonable.";
1190
0
    }
1191
0
    Status st = _index_channel->check_intolerable_failure();
1192
0
    if (!st.ok()) {
1193
0
        _cancel_with_msg(fmt::format("{}, err: {}", channel_info(), st.to_string()));
1194
0
    } else if (ctx._is_last_rpc) {
1195
        // if this is last rpc, will must set _add_batches_finished. otherwise, node channel's close_wait
1196
        // will be blocked.
1197
0
        _add_batches_finished = true;
1198
0
    }
1199
0
}
1200
1201
// When _cancelled is true, we still need to send a tablet_writer_cancel
1202
// rpc request to truly release the load channel
1203
192
void VNodeChannel::cancel(const std::string& cancel_msg) {
1204
192
    if (_is_closed) {
1205
        // skip the channels that have been canceled or close_wait.
1206
48
        return;
1207
48
    }
1208
144
    SCOPED_CONSUME_MEM_TRACKER(_node_channel_tracker.get());
1209
    // set _is_closed to true finally
1210
144
    Defer set_closed {[&]() {
1211
144
        std::lock_guard<std::mutex> l(_closed_lock);
1212
144
        _is_closed = true;
1213
144
    }};
1214
    // we don't need to wait last rpc finished, cause closure's release/reset will join.
1215
    // But do we need brpc::StartCancel(call_id)?
1216
144
    _cancel_with_msg(cancel_msg);
1217
    // if not inited, _stub will be nullptr, skip sending cancel rpc
1218
144
    if (!_inited) {
1219
0
        return;
1220
0
    }
1221
1222
144
    auto request = std::make_shared<PTabletWriterCancelRequest>();
1223
144
    request->set_allocated_id(&_parent->_load_id);
1224
144
    request->set_index_id(_index_channel->_index_id);
1225
144
    request->set_sender_id(_parent->_sender_id);
1226
144
    request->set_cancel_reason(cancel_msg);
1227
1228
144
    auto cancel_callback = DummyBrpcCallback<PTabletWriterCancelResult>::create_shared();
1229
144
    auto closure = AutoReleaseClosure<
1230
144
            PTabletWriterCancelRequest,
1231
144
            DummyBrpcCallback<PTabletWriterCancelResult>>::create_unique(request, cancel_callback);
1232
1233
144
    auto remain_ms = _rpc_timeout_ms - _timeout_watch.elapsed_time() / NANOS_PER_MILLIS;
1234
144
    if (UNLIKELY(remain_ms < config::min_load_rpc_timeout_ms)) {
1235
0
        remain_ms = config::min_load_rpc_timeout_ms;
1236
0
    }
1237
144
    cancel_callback->cntl_->set_timeout_ms(remain_ms);
1238
144
    if (config::tablet_writer_ignore_eovercrowded) {
1239
144
        closure->cntl_->ignore_eovercrowded();
1240
144
    }
1241
144
    _stub->tablet_writer_cancel(closure->cntl_.get(), closure->request_.get(),
1242
144
                                closure->response_.get(), closure.get());
1243
144
    closure.release();
1244
144
    static_cast<void>(request->release_id());
1245
144
}
1246
1247
1.56M
Status VNodeChannel::close_wait(RuntimeState* state, bool* is_closed) {
1248
1.56M
    DBUG_EXECUTE_IF("VNodeChannel.close_wait_full_gc", {
1249
1.56M
        std::thread t(injection_full_gc_fn);
1250
1.56M
        t.join();
1251
1.56M
    });
1252
1.56M
    SCOPED_CONSUME_MEM_TRACKER(_node_channel_tracker.get());
1253
1254
1.56M
    *is_closed = true;
1255
1256
1.56M
    auto st = none_of({_cancelled, !_eos_is_produced});
1257
1.56M
    if (!st.ok()) {
1258
48
        if (_cancelled) {
1259
48
            std::lock_guard<std::mutex> l(_cancel_msg_lock);
1260
48
            return Status::Error<ErrorCode::INTERNAL_ERROR, false>("wait close failed. {}",
1261
48
                                                                   _cancel_msg);
1262
48
        } else {
1263
0
            return std::move(
1264
0
                    st.prepend("already stopped, skip waiting for close. cancelled/!eos: "));
1265
0
        }
1266
48
    }
1267
1268
1.56M
    DBUG_EXECUTE_IF("VNodeChannel.close_wait.cancelled", {
1269
1.56M
        _cancelled = true;
1270
1.56M
        _cancel_msg = "injected cancel";
1271
1.56M
    });
1272
1273
1.56M
    if (state->is_cancelled()) {
1274
0
        _cancel_with_msg(state->cancel_reason().to_string());
1275
0
    }
1276
1277
    // Waiting for finished until _add_batches_finished changed by rpc's finished callback.
1278
    // it may take a long time, so we couldn't set a timeout
1279
    // For pipeline engine, the close is called in async writer's process block method,
1280
    // so that it will not block pipeline thread.
1281
1.56M
    if (!_add_batches_finished && !_cancelled && !state->is_cancelled()) {
1282
1.49M
        *is_closed = false;
1283
1.49M
        return Status::OK();
1284
1.49M
    }
1285
68.7k
    VLOG_CRITICAL << _parent->_sender_id << " close wait finished";
1286
68.7k
    return Status::OK();
1287
1.56M
}
1288
1289
Status VNodeChannel::after_close_handle(
1290
        RuntimeState* state, WriterStats* writer_stats,
1291
68.2k
        std::unordered_map<int64_t, AddBatchCounter>* node_add_batch_counter_map) {
1292
68.2k
    Status st = Status::Error<ErrorCode::INTERNAL_ERROR, false>(get_cancel_msg());
1293
68.2k
    _close_time_ms = UnixMillis() - _close_time_ms;
1294
1295
68.2k
    if (_add_batches_finished) {
1296
68.1k
        _close_check();
1297
68.1k
        _state->add_tablet_commit_infos(_tablet_commit_infos);
1298
1299
68.1k
        _index_channel->set_error_tablet_in_state(state);
1300
68.1k
        _index_channel->set_tablets_received_rows(_tablets_received_rows, _node_id);
1301
68.1k
        _index_channel->set_tablets_filtered_rows(_tablets_filtered_rows, _node_id);
1302
1303
68.1k
        std::lock_guard<std::mutex> l(_closed_lock);
1304
        // only when normal close, we set _is_closed to true.
1305
        // otherwise, we will set it to true in cancel().
1306
68.1k
        _is_closed = true;
1307
68.1k
        st = Status::OK();
1308
68.1k
    }
1309
1310
68.2k
    time_report(node_add_batch_counter_map, writer_stats);
1311
68.2k
    return st;
1312
68.2k
}
1313
1314
30.2k
Status VNodeChannel::check_status() {
1315
30.2k
    return none_of({_cancelled, !_eos_is_produced});
1316
30.2k
}
1317
1318
68.0k
void VNodeChannel::_close_check() {
1319
68.0k
    std::lock_guard<std::mutex> lg(_pending_batches_lock);
1320
18.4E
    CHECK(_pending_blocks.empty()) << name();
1321
18.4E
    CHECK(_cur_mutable_block == nullptr) << name();
1322
68.0k
}
1323
1324
68.2k
void VNodeChannel::mark_close(bool hang_wait) {
1325
68.2k
    auto st = none_of({_cancelled, _eos_is_produced});
1326
68.2k
    if (!st.ok()) {
1327
0
        return;
1328
0
    }
1329
1330
68.2k
    _cur_add_block_request->set_eos(true);
1331
68.2k
    _cur_add_block_request->set_hang_wait(hang_wait);
1332
68.2k
    {
1333
68.2k
        std::lock_guard<std::mutex> l(_pending_batches_lock);
1334
68.2k
        if (!_cur_mutable_block) [[unlikely]] {
1335
            // never had a block arrived. add a dummy block
1336
38.0k
            _cur_mutable_block = MutableBlock::create_unique();
1337
38.0k
        }
1338
68.2k
        auto tmp_add_block_request =
1339
68.2k
                std::make_shared<PTabletWriterAddBlockRequest>(*_cur_add_block_request);
1340
        // when prepare to close, add block to queue so that try_send_pending_block thread will send it.
1341
68.2k
        _pending_blocks.emplace(std::move(_cur_mutable_block), tmp_add_block_request);
1342
68.2k
        _pending_batches_num++;
1343
68.2k
        DCHECK(_pending_blocks.back().second->eos());
1344
68.2k
        _close_time_ms = UnixMillis();
1345
68.2k
        LOG(INFO) << channel_info()
1346
68.2k
                  << " mark closed, left pending batch size: " << _pending_blocks.size()
1347
68.2k
                  << " hang_wait: " << hang_wait;
1348
68.2k
    }
1349
1350
68.2k
    _eos_is_produced = true;
1351
68.2k
}
1352
1353
VTabletWriter::VTabletWriter(const TDataSink& t_sink, const VExprContextSPtrs& output_exprs,
1354
                             std::shared_ptr<Dependency> dep, std::shared_ptr<Dependency> fin_dep)
1355
66.9k
        : AsyncResultWriter(output_exprs, dep, fin_dep), _t_sink(t_sink) {
1356
66.9k
    _transfer_large_data_by_brpc = config::transfer_large_data_by_brpc;
1357
66.9k
}
1358
1359
67.3k
void VTabletWriter::_send_batch_process() {
1360
67.3k
    SCOPED_TIMER(_non_blocking_send_timer);
1361
67.3k
    SCOPED_ATTACH_TASK(_state);
1362
67.3k
    SCOPED_CONSUME_MEM_TRACKER(_mem_tracker);
1363
1364
67.3k
    int sleep_time = int(config::olap_table_sink_send_interval_microseconds *
1365
67.3k
                         (_vpartition->is_auto_partition()
1366
67.3k
                                  ? config::olap_table_sink_send_interval_auto_partition_factor
1367
67.3k
                                  : 1));
1368
1369
19.6M
    while (true) {
1370
        // incremental open will temporarily make channels into abnormal state. stop checking when this.
1371
19.6M
        std::unique_lock<bthread::Mutex> l(_stop_check_channel);
1372
1373
19.6M
        int running_channels_num = 0;
1374
19.6M
        int opened_nodes = 0;
1375
23.9M
        for (const auto& index_channel : _channels) {
1376
23.9M
            index_channel->for_each_node_channel([&running_channels_num,
1377
23.9M
                                                  this](const std::shared_ptr<VNodeChannel>& ch) {
1378
                // if this channel all completed(cancelled), got 0. else 1.
1379
23.8M
                running_channels_num +=
1380
23.8M
                        ch->try_send_and_fetch_status(_state, this->_send_batch_thread_pool_token);
1381
23.8M
            });
1382
23.9M
            opened_nodes += index_channel->num_node_channels();
1383
23.9M
        }
1384
1385
        // auto partition table may have no node channel temporarily. wait to open.
1386
19.6M
        if (opened_nodes != 0 && running_channels_num == 0) {
1387
67.3k
            LOG(INFO) << "All node channels are stopped(maybe finished/offending/cancelled), "
1388
67.3k
                         "sender thread exit. "
1389
67.3k
                      << print_id(_load_id);
1390
67.3k
            return;
1391
67.3k
        }
1392
1393
        // for auto partition tables, there's a situation: we haven't open any node channel but decide to cancel the task.
1394
        // then the judge in front will never be true because opened_nodes won't increase. so we have to specially check wether we called close.
1395
        // we must RECHECK opened_nodes below, after got closed signal, because it may changed. Think of this:
1396
        //      checked opened_nodes = 0 ---> new block arrived ---> task finished, close() was called ---> we got _try_close here
1397
        // if we don't check again, we may lose the last package.
1398
19.6M
        if (_try_close) {
1399
196k
            opened_nodes = 0;
1400
196k
            std::ranges::for_each(_channels,
1401
1.98M
                                  [&opened_nodes](const std::shared_ptr<IndexChannel>& ich) {
1402
1.98M
                                      opened_nodes += ich->num_node_channels();
1403
1.98M
                                  });
1404
196k
            if (opened_nodes == 0) {
1405
0
                LOG(INFO) << "No node channel have ever opened but now we have to close. sender "
1406
0
                             "thread exit. "
1407
0
                          << print_id(_load_id);
1408
0
                return;
1409
0
            }
1410
196k
        }
1411
19.6M
        bthread_usleep(sleep_time);
1412
19.6M
    }
1413
67.3k
}
1414
1415
67.3k
static void* periodic_send_batch(void* writer) {
1416
67.3k
    auto* tablet_writer = (VTabletWriter*)(writer);
1417
67.3k
    tablet_writer->_send_batch_process();
1418
67.3k
    return nullptr;
1419
67.3k
}
1420
1421
67.3k
Status VTabletWriter::open(doris::RuntimeState* state, doris::RuntimeProfile* profile) {
1422
67.3k
    RETURN_IF_ERROR(_init(state, profile));
1423
67.3k
    signal::set_signal_task_id(_load_id);
1424
67.3k
    SCOPED_TIMER(profile->total_time_counter());
1425
67.3k
    SCOPED_TIMER(_open_timer);
1426
67.3k
    SCOPED_CONSUME_MEM_TRACKER(_mem_tracker.get());
1427
1428
67.3k
    fmt::memory_buffer buf;
1429
68.3k
    for (const auto& index_channel : _channels) {
1430
68.3k
        fmt::format_to(buf, "index id:{}", index_channel->_index_id);
1431
68.3k
        index_channel->for_each_node_channel(
1432
68.3k
                [](const std::shared_ptr<VNodeChannel>& ch) { ch->open(); });
1433
68.3k
    }
1434
67.3k
    VLOG_DEBUG << "list of open index id = " << fmt::to_string(buf);
1435
1436
68.2k
    for (const auto& index_channel : _channels) {
1437
68.2k
        index_channel->set_start_time(UnixMillis());
1438
68.2k
        index_channel->for_each_node_channel([&index_channel](
1439
68.3k
                                                     const std::shared_ptr<VNodeChannel>& ch) {
1440
68.3k
            auto st = ch->open_wait();
1441
68.3k
            if (!st.ok()) {
1442
                // The open() phase is mainly to generate DeltaWriter instances on the nodes corresponding to each node channel.
1443
                // This phase will not fail due to a single tablet.
1444
                // Therefore, if the open() phase fails, all tablets corresponding to the node need to be marked as failed.
1445
0
                index_channel->mark_as_failed(
1446
0
                        ch.get(),
1447
0
                        fmt::format("{}, open failed, err: {}", ch->channel_info(), st.to_string()),
1448
0
                        -1);
1449
0
            }
1450
68.3k
        });
1451
1452
68.2k
        RETURN_IF_ERROR(index_channel->check_intolerable_failure());
1453
68.2k
    }
1454
67.3k
    _send_batch_thread_pool_token = state->exec_env()->send_batch_thread_pool()->new_token(
1455
67.3k
            ThreadPool::ExecutionMode::CONCURRENT, _send_batch_parallelism);
1456
1457
    // start to send batch continually. this must be called after _init
1458
67.3k
    if (bthread_start_background(&_sender_thread, nullptr, periodic_send_batch, (void*)this) != 0) {
1459
0
        return Status::Error<ErrorCode::INTERNAL_ERROR>("bthread_start_backgroud failed");
1460
0
    }
1461
67.3k
    return Status::OK();
1462
67.3k
}
1463
1464
178
Status VTabletWriter::on_partitions_created(TCreatePartitionResult* result) {
1465
    // add new tablet locations. it will use by address. so add to pool
1466
178
    auto* new_locations = _pool->add(new std::vector<TTabletLocation>(result->tablets));
1467
178
    _location->add_locations(*new_locations);
1468
178
    if (_write_single_replica) {
1469
0
        auto* slave_locations = _pool->add(new std::vector<TTabletLocation>(result->slave_tablets));
1470
0
        _slave_location->add_locations(*slave_locations);
1471
0
    }
1472
1473
    // update new node info
1474
178
    _nodes_info->add_nodes(result->nodes);
1475
1476
    // incremental open node channel
1477
178
    RETURN_IF_ERROR(_incremental_open_node_channel(result->partitions));
1478
1479
178
    return Status::OK();
1480
178
}
1481
1482
178
static Status on_partitions_created(void* writer, TCreatePartitionResult* result) {
1483
178
    return static_cast<VTabletWriter*>(writer)->on_partitions_created(result);
1484
178
}
1485
1486
67.1k
Status VTabletWriter::_init_row_distribution() {
1487
67.1k
    _row_distribution.init({.state = _state,
1488
67.1k
                            .block_convertor = _block_convertor.get(),
1489
67.1k
                            .tablet_finder = _tablet_finder.get(),
1490
67.1k
                            .vpartition = _vpartition,
1491
67.1k
                            .add_partition_request_timer = _add_partition_request_timer,
1492
67.1k
                            .txn_id = _txn_id,
1493
67.1k
                            .pool = _pool,
1494
67.1k
                            .location = _location,
1495
67.1k
                            .vec_output_expr_ctxs = &_vec_output_expr_ctxs,
1496
67.1k
                            .schema = _schema,
1497
67.1k
                            .caller = this,
1498
67.1k
                            .write_single_replica = _write_single_replica,
1499
67.1k
                            .create_partition_callback = &::doris::on_partitions_created});
1500
1501
67.1k
    return _row_distribution.open(_output_row_desc);
1502
67.1k
}
1503
1504
67.3k
Status VTabletWriter::_init(RuntimeState* state, RuntimeProfile* profile) {
1505
67.3k
    DCHECK(_t_sink.__isset.olap_table_sink);
1506
67.3k
    _pool = state->obj_pool();
1507
67.3k
    auto& table_sink = _t_sink.olap_table_sink;
1508
67.3k
    _load_id.set_hi(table_sink.load_id.hi);
1509
67.3k
    _load_id.set_lo(table_sink.load_id.lo);
1510
67.3k
    _txn_id = table_sink.txn_id;
1511
67.3k
    _num_replicas = table_sink.num_replicas;
1512
67.3k
    _tuple_desc_id = table_sink.tuple_id;
1513
67.3k
    _write_file_cache = table_sink.write_file_cache;
1514
67.3k
    _schema.reset(new OlapTableSchemaParam());
1515
67.3k
    RETURN_IF_ERROR(_schema->init(table_sink.schema));
1516
67.3k
    _schema->set_timestamp_ms(state->timestamp_ms());
1517
67.3k
    _schema->set_nano_seconds(state->nano_seconds());
1518
67.3k
    _schema->set_timezone(state->timezone());
1519
67.3k
    _location = _pool->add(new OlapTableLocationParam(table_sink.location));
1520
67.3k
    _nodes_info = _pool->add(new DorisNodesInfo(table_sink.nodes_info));
1521
67.3k
    if (table_sink.__isset.write_single_replica && table_sink.write_single_replica) {
1522
0
        _write_single_replica = true;
1523
0
        _slave_location = _pool->add(new OlapTableLocationParam(table_sink.slave_location));
1524
0
        if (!config::enable_single_replica_load) {
1525
0
            return Status::InternalError("single replica load is disabled on BE.");
1526
0
        }
1527
0
    }
1528
1529
67.3k
    if (config::is_cloud_mode() &&
1530
67.3k
        (!table_sink.__isset.txn_timeout_s || table_sink.txn_timeout_s <= 0)) {
1531
0
        return Status::InternalError("The txn_timeout_s of TDataSink is invalid");
1532
0
    }
1533
67.3k
    _txn_expiration = ::time(nullptr) + table_sink.txn_timeout_s;
1534
1535
67.3k
    if (table_sink.__isset.load_channel_timeout_s) {
1536
67.0k
        _load_channel_timeout_s = table_sink.load_channel_timeout_s;
1537
67.0k
    } else {
1538
264
        _load_channel_timeout_s = config::streaming_load_rpc_max_alive_time_sec;
1539
264
    }
1540
67.3k
    if (table_sink.__isset.send_batch_parallelism && table_sink.send_batch_parallelism > 1) {
1541
1
        _send_batch_parallelism = table_sink.send_batch_parallelism;
1542
1
    }
1543
    // if distributed column list is empty, we can ensure that tablet is with random distribution info
1544
    // and if load_to_single_tablet is set and set to true, we should find only one tablet in one partition
1545
    // for the whole olap table sink
1546
67.3k
    auto find_tablet_mode = OlapTabletFinder::FindTabletMode::FIND_TABLET_EVERY_ROW;
1547
67.3k
    if (table_sink.partition.distributed_columns.empty()) {
1548
35.4k
        if (table_sink.__isset.load_to_single_tablet && table_sink.load_to_single_tablet) {
1549
8
            find_tablet_mode = OlapTabletFinder::FindTabletMode::FIND_TABLET_EVERY_SINK;
1550
35.4k
        } else {
1551
35.4k
            find_tablet_mode = OlapTabletFinder::FindTabletMode::FIND_TABLET_EVERY_BATCH;
1552
35.4k
        }
1553
35.4k
    }
1554
67.3k
    _vpartition = _pool->add(new doris::VOlapTablePartitionParam(_schema, table_sink.partition));
1555
67.3k
    _tablet_finder = std::make_unique<OlapTabletFinder>(_vpartition, find_tablet_mode);
1556
67.3k
    RETURN_IF_ERROR(_vpartition->init());
1557
1558
67.3k
    _state = state;
1559
67.3k
    _operator_profile = profile;
1560
1561
67.3k
    _sender_id = state->per_fragment_instance_idx();
1562
67.3k
    _num_senders = state->num_per_fragment_instances();
1563
67.3k
    _is_high_priority =
1564
67.3k
            (state->execution_timeout() <= config::load_task_high_priority_threshold_second);
1565
67.3k
    DBUG_EXECUTE_IF("VTabletWriter._init.is_high_priority", { _is_high_priority = true; });
1566
    // profile must add to state's object pool
1567
67.3k
    _mem_tracker =
1568
67.3k
            std::make_shared<MemTracker>("OlapTableSink:" + std::to_string(state->load_job_id()));
1569
67.3k
    SCOPED_TIMER(profile->total_time_counter());
1570
67.3k
    SCOPED_CONSUME_MEM_TRACKER(_mem_tracker.get());
1571
1572
    // get table's tuple descriptor
1573
67.3k
    _output_tuple_desc = state->desc_tbl().get_tuple_descriptor(_tuple_desc_id);
1574
67.3k
    if (_output_tuple_desc == nullptr) {
1575
0
        LOG(WARNING) << "unknown destination tuple descriptor, id=" << _tuple_desc_id;
1576
0
        return Status::InternalError("unknown destination tuple descriptor");
1577
0
    }
1578
1579
67.3k
    if (!_vec_output_expr_ctxs.empty() &&
1580
67.3k
        _output_tuple_desc->slots().size() != _vec_output_expr_ctxs.size()) {
1581
0
        LOG(WARNING) << "output tuple slot num should be equal to num of output exprs, "
1582
0
                     << "output_tuple_slot_num " << _output_tuple_desc->slots().size()
1583
0
                     << " output_expr_num " << _vec_output_expr_ctxs.size();
1584
0
        return Status::InvalidArgument(
1585
0
                "output_tuple_slot_num {} should be equal to output_expr_num {}",
1586
0
                _output_tuple_desc->slots().size(), _vec_output_expr_ctxs.size());
1587
0
    }
1588
1589
67.3k
    _block_convertor = std::make_unique<OlapTableBlockConvertor>(_output_tuple_desc);
1590
    // if partition_type is OLAP_TABLE_SINK_HASH_PARTITIONED, we handle the processing of auto_increment column
1591
    // on exchange node rather than on TabletWriter
1592
67.3k
    _block_convertor->init_autoinc_info(
1593
67.3k
            _schema->db_id(), _schema->table_id(), _state->batch_size(),
1594
67.3k
            _schema->is_fixed_partial_update() && !_schema->auto_increment_coulumn().empty(),
1595
67.3k
            _schema->auto_increment_column_unique_id());
1596
67.3k
    _output_row_desc = _pool->add(new RowDescriptor(_output_tuple_desc));
1597
1598
    // add all counter
1599
67.3k
    _input_rows_counter = ADD_COUNTER(profile, "RowsRead", TUnit::UNIT);
1600
67.3k
    _output_rows_counter = ADD_COUNTER(profile, "RowsProduced", TUnit::UNIT);
1601
67.3k
    _filtered_rows_counter = ADD_COUNTER(profile, "RowsFiltered", TUnit::UNIT);
1602
67.3k
    _send_data_timer = ADD_TIMER(profile, "SendDataTime");
1603
67.3k
    _wait_mem_limit_timer = ADD_CHILD_TIMER(profile, "WaitMemLimitTime", "SendDataTime");
1604
67.3k
    _row_distribution_timer = ADD_CHILD_TIMER(profile, "RowDistributionTime", "SendDataTime");
1605
67.3k
    _filter_timer = ADD_CHILD_TIMER(profile, "FilterTime", "SendDataTime");
1606
67.3k
    _where_clause_timer = ADD_CHILD_TIMER(profile, "WhereClauseTime", "SendDataTime");
1607
67.3k
    _append_node_channel_timer = ADD_CHILD_TIMER(profile, "AppendNodeChannelTime", "SendDataTime");
1608
67.3k
    _add_partition_request_timer =
1609
67.3k
            ADD_CHILD_TIMER(profile, "AddPartitionRequestTime", "SendDataTime");
1610
67.3k
    _validate_data_timer = ADD_TIMER(profile, "ValidateDataTime");
1611
67.3k
    _open_timer = ADD_TIMER(profile, "OpenTime");
1612
67.3k
    _close_timer = ADD_TIMER(profile, "CloseWaitTime");
1613
67.3k
    _non_blocking_send_timer = ADD_TIMER(profile, "NonBlockingSendTime");
1614
67.3k
    _non_blocking_send_work_timer =
1615
67.3k
            ADD_CHILD_TIMER(profile, "NonBlockingSendWorkTime", "NonBlockingSendTime");
1616
67.3k
    _serialize_batch_timer =
1617
67.3k
            ADD_CHILD_TIMER(profile, "SerializeBatchTime", "NonBlockingSendWorkTime");
1618
67.3k
    _total_add_batch_exec_timer = ADD_TIMER(profile, "TotalAddBatchExecTime");
1619
67.3k
    _max_add_batch_exec_timer = ADD_TIMER(profile, "MaxAddBatchExecTime");
1620
67.3k
    _total_wait_exec_timer = ADD_TIMER(profile, "TotalWaitExecTime");
1621
67.3k
    _max_wait_exec_timer = ADD_TIMER(profile, "MaxWaitExecTime");
1622
67.3k
    _add_batch_number = ADD_COUNTER(profile, "NumberBatchAdded", TUnit::UNIT);
1623
67.3k
    _num_node_channels = ADD_COUNTER(profile, "NumberNodeChannels", TUnit::UNIT);
1624
67.3k
    _load_back_pressure_version_time_ms = ADD_TIMER(profile, "LoadBackPressureVersionTimeMs");
1625
1626
#ifdef DEBUG
1627
    // check: tablet ids should be unique
1628
    {
1629
        std::unordered_set<int64_t> tablet_ids;
1630
        const auto& partitions = _vpartition->get_partitions();
1631
        for (int i = 0; i < _schema->indexes().size(); ++i) {
1632
            for (const auto& partition : partitions) {
1633
                for (const auto& tablet : partition->indexes[i].tablets) {
1634
                    CHECK(tablet_ids.count(tablet) == 0) << "found duplicate tablet id: " << tablet;
1635
                    tablet_ids.insert(tablet);
1636
                }
1637
            }
1638
        }
1639
    }
1640
#endif
1641
1642
    // open all channels
1643
67.3k
    const auto& partitions = _vpartition->get_partitions();
1644
135k
    for (int i = 0; i < _schema->indexes().size(); ++i) {
1645
        // collect all tablets belong to this rollup
1646
67.7k
        std::vector<TTabletWithPartition> tablets;
1647
67.7k
        auto* index = _schema->indexes()[i];
1648
83.7k
        for (const auto& part : partitions) {
1649
405k
            for (const auto& tablet : part->indexes[i].tablets) {
1650
405k
                TTabletWithPartition tablet_with_partition;
1651
405k
                tablet_with_partition.partition_id = part->id;
1652
405k
                tablet_with_partition.tablet_id = tablet;
1653
405k
                tablets.emplace_back(std::move(tablet_with_partition));
1654
405k
                _build_tablet_replica_info(tablet, part);
1655
405k
            }
1656
83.7k
        }
1657
67.7k
        if (tablets.empty() && !_vpartition->is_auto_partition()) {
1658
0
            LOG(WARNING) << "load job:" << state->load_job_id() << " index: " << index->index_id
1659
0
                         << " would open 0 tablet";
1660
0
        }
1661
67.7k
        _channels.emplace_back(new IndexChannel(this, index->index_id, index->where_clause));
1662
67.7k
        _index_id_to_channel[index->index_id] = _channels.back();
1663
67.7k
        RETURN_IF_ERROR(_channels.back()->init(state, tablets));
1664
67.7k
    }
1665
1666
67.3k
    RETURN_IF_ERROR(_init_row_distribution());
1667
1668
67.3k
    _inited = true;
1669
67.3k
    return Status::OK();
1670
67.3k
}
1671
1672
Status VTabletWriter::_incremental_open_node_channel(
1673
178
        const std::vector<TOlapTablePartition>& partitions) {
1674
    // do what we did in prepare() for partitions. indexes which don't change when we create new partition is orthogonal to partitions.
1675
178
    std::unique_lock<bthread::Mutex> _l(_stop_check_channel);
1676
356
    for (int i = 0; i < _schema->indexes().size(); ++i) {
1677
178
        const OlapTableIndexSchema* index = _schema->indexes()[i];
1678
178
        std::vector<TTabletWithPartition> tablets;
1679
357
        for (const auto& t_part : partitions) {
1680
357
            VOlapTablePartition* part = nullptr;
1681
357
            RETURN_IF_ERROR(_vpartition->generate_partition_from(t_part, part));
1682
8.82k
            for (const auto& tablet : part->indexes[i].tablets) {
1683
8.82k
                TTabletWithPartition tablet_with_partition;
1684
8.82k
                tablet_with_partition.partition_id = part->id;
1685
8.82k
                tablet_with_partition.tablet_id = tablet;
1686
8.82k
                tablets.emplace_back(std::move(tablet_with_partition));
1687
8.82k
                _build_tablet_replica_info(tablet, part);
1688
8.82k
            }
1689
357
            DCHECK(!tablets.empty()) << "incremental open got nothing!";
1690
357
        }
1691
        // update and reinit for existing channels.
1692
178
        std::shared_ptr<IndexChannel> channel = _index_id_to_channel[index->index_id];
1693
178
        DCHECK(channel != nullptr);
1694
178
        RETURN_IF_ERROR(channel->init(_state, tablets, true)); // add tablets into it
1695
178
    }
1696
1697
178
    fmt::memory_buffer buf;
1698
178
    for (auto& channel : _channels) {
1699
        // incremental open new partition's tablet on storage side
1700
178
        channel->for_each_node_channel(
1701
178
                [](const std::shared_ptr<VNodeChannel>& ch) { ch->incremental_open(); });
1702
178
        fmt::format_to(buf, "index id:{}", channel->_index_id);
1703
178
        VLOG_DEBUG << "list of open index id = " << fmt::to_string(buf);
1704
1705
178
        channel->for_each_node_channel([&channel](const std::shared_ptr<VNodeChannel>& ch) {
1706
178
            auto st = ch->open_wait();
1707
178
            if (!st.ok()) {
1708
                // The open() phase is mainly to generate DeltaWriter instances on the nodes corresponding to each node channel.
1709
                // This phase will not fail due to a single tablet.
1710
                // Therefore, if the open() phase fails, all tablets corresponding to the node need to be marked as failed.
1711
0
                channel->mark_as_failed(
1712
0
                        ch.get(),
1713
0
                        fmt::format("{}, open failed, err: {}", ch->channel_info(), st.to_string()),
1714
0
                        -1);
1715
0
            }
1716
178
        });
1717
1718
178
        RETURN_IF_ERROR(channel->check_intolerable_failure());
1719
178
    }
1720
1721
178
    return Status::OK();
1722
178
}
1723
1724
void VTabletWriter::_build_tablet_replica_info(const int64_t tablet_id,
1725
413k
                                               VOlapTablePartition* partition) {
1726
414k
    if (partition != nullptr) {
1727
414k
        int total_replicas_num =
1728
414k
                partition->total_replica_num == 0 ? _num_replicas : partition->total_replica_num;
1729
414k
        int load_required_replicas_num = partition->load_required_replica_num == 0
1730
414k
                                                 ? (_num_replicas + 1) / 2
1731
414k
                                                 : partition->load_required_replica_num;
1732
414k
        _tablet_replica_info.emplace(
1733
414k
                tablet_id, std::make_pair(total_replicas_num, load_required_replicas_num));
1734
        // Copy version gap backends info for this tablet
1735
414k
        if (auto it = partition->tablet_version_gap_backends.find(tablet_id);
1736
414k
            it != partition->tablet_version_gap_backends.end()) {
1737
0
            _tablet_version_gap_backends[tablet_id] = it->second;
1738
0
        }
1739
18.4E
    } else {
1740
18.4E
        _tablet_replica_info.emplace(tablet_id,
1741
18.4E
                                     std::make_pair(_num_replicas, (_num_replicas + 1) / 2));
1742
18.4E
    }
1743
413k
}
1744
1745
144
void VTabletWriter::_cancel_all_channel(Status status) {
1746
144
    for (const auto& index_channel : _channels) {
1747
144
        index_channel->for_each_node_channel([&status](const std::shared_ptr<VNodeChannel>& ch) {
1748
144
            ch->cancel(status.to_string());
1749
144
        });
1750
144
    }
1751
144
    LOG(INFO) << fmt::format(
1752
144
            "close olap table sink. load_id={}, txn_id={}, canceled all node channels due to "
1753
144
            "error: {}",
1754
144
            print_id(_load_id), _txn_id, status);
1755
144
}
1756
1757
103k
Status VTabletWriter::_send_new_partition_batch() {
1758
103k
    if (_row_distribution.need_deal_batching()) { // maybe try_close more than 1 time
1759
159
        RETURN_IF_ERROR(_row_distribution.automatic_create_partition());
1760
1761
159
        Block tmp_block = _row_distribution._batching_block->to_block(); // Borrow out, for lval ref
1762
1763
        // these order is unique.
1764
        //  1. clear batching stats(and flag goes true) so that we won't make a new batching process in dealing batched block.
1765
        //  2. deal batched block
1766
        //  3. now reuse the column of lval block. cuz write doesn't real adjust it. it generate a new block from that.
1767
159
        _row_distribution.clear_batching_stats();
1768
159
        RETURN_IF_ERROR(this->write(_state, tmp_block));
1769
159
        _row_distribution._batching_block->set_mutable_columns(
1770
159
                tmp_block.mutate_columns()); // Recovery back
1771
159
        _row_distribution._batching_block->clear_column_data();
1772
159
        _row_distribution._deal_batched = false;
1773
159
    }
1774
103k
    return Status::OK();
1775
103k
}
1776
1777
67.3k
void VTabletWriter::_do_try_close(RuntimeState* state, const Status& exec_status) {
1778
67.3k
    SCOPED_TIMER(_close_timer);
1779
67.3k
    Status status = exec_status;
1780
1781
    // must before set _try_close
1782
67.3k
    if (status.ok()) {
1783
67.2k
        SCOPED_TIMER(_operator_profile->total_time_counter());
1784
67.2k
        _row_distribution._deal_batched = true;
1785
67.2k
        status = _send_new_partition_batch();
1786
67.2k
    }
1787
1788
67.3k
    _try_close = true; // will stop periodic thread
1789
67.3k
    if (status.ok()) {
1790
        // BE id -> add_batch method counter
1791
67.2k
        std::unordered_map<int64_t, AddBatchCounter> node_add_batch_counter_map;
1792
1793
        // only if status is ok can we call this _profile->total_time_counter().
1794
        // if status is not ok, this sink may not be prepared, so that _profile is null
1795
67.2k
        SCOPED_TIMER(_operator_profile->total_time_counter());
1796
68.2k
        for (const auto& index_channel : _channels) {
1797
            // two-step mark close. first we send close_origin to recievers to close all originly exist TabletsChannel.
1798
            // when they all closed, we are sure all Writer of instances called _do_try_close. that means no new channel
1799
            // will be opened. the refcount of recievers will be monotonically decreasing. then we are safe to close all
1800
            // our channels.
1801
68.2k
            if (index_channel->has_incremental_node_channel()) {
1802
0
                if (!status.ok()) {
1803
0
                    break;
1804
0
                }
1805
0
                VLOG_TRACE << _sender_id << " first stage close start " << _txn_id;
1806
0
                index_channel->for_init_node_channel(
1807
0
                        [&index_channel, &status, this](const std::shared_ptr<VNodeChannel>& ch) {
1808
0
                            if (!status.ok() || ch->is_closed()) {
1809
0
                                return;
1810
0
                            }
1811
0
                            VLOG_DEBUG << index_channel->_parent->_sender_id << "'s " << ch->host()
1812
0
                                       << "mark close1 for inits " << _txn_id;
1813
0
                            ch->mark_close(true);
1814
0
                            if (ch->is_cancelled()) {
1815
0
                                status = cancel_channel_and_check_intolerable_failure(
1816
0
                                        std::move(status), ch->get_cancel_msg(), *index_channel,
1817
0
                                        *ch);
1818
0
                            }
1819
0
                        });
1820
0
                if (!status.ok()) {
1821
0
                    break;
1822
0
                }
1823
                // Do not need to wait after quorum success,
1824
                // for first-stage close_wait only ensure incremental node channels load has been completed,
1825
                // unified waiting in the second-stage close_wait.
1826
0
                status = index_channel->close_wait(_state, nullptr, nullptr,
1827
0
                                                   index_channel->init_node_channel_ids(), false);
1828
0
                if (!status.ok()) {
1829
0
                    break;
1830
0
                }
1831
0
                VLOG_DEBUG << _sender_id << " first stage finished. closeing inc nodes " << _txn_id;
1832
0
                index_channel->for_inc_node_channel(
1833
0
                        [&index_channel, &status, this](const std::shared_ptr<VNodeChannel>& ch) {
1834
0
                            if (!status.ok() || ch->is_closed()) {
1835
0
                                return;
1836
0
                            }
1837
                            // only first try close, all node channels will mark_close()
1838
0
                            VLOG_DEBUG << index_channel->_parent->_sender_id << "'s " << ch->host()
1839
0
                                       << "mark close2 for inc " << _txn_id;
1840
0
                            ch->mark_close();
1841
0
                            if (ch->is_cancelled()) {
1842
0
                                status = cancel_channel_and_check_intolerable_failure(
1843
0
                                        std::move(status), ch->get_cancel_msg(), *index_channel,
1844
0
                                        *ch);
1845
0
                            }
1846
0
                        });
1847
68.2k
            } else { // not has_incremental_node_channel
1848
68.2k
                VLOG_TRACE << _sender_id << " has no incremental channels " << _txn_id;
1849
68.2k
                index_channel->for_each_node_channel(
1850
68.2k
                        [&index_channel, &status](const std::shared_ptr<VNodeChannel>& ch) {
1851
68.2k
                            if (!status.ok() || ch->is_closed()) {
1852
0
                                return;
1853
0
                            }
1854
                            // only first try close, all node channels will mark_close()
1855
68.2k
                            ch->mark_close();
1856
68.2k
                            if (ch->is_cancelled()) {
1857
0
                                status = cancel_channel_and_check_intolerable_failure(
1858
0
                                        std::move(status), ch->get_cancel_msg(), *index_channel,
1859
0
                                        *ch);
1860
0
                            }
1861
68.2k
                        });
1862
68.2k
            }
1863
68.2k
        } // end for index channels
1864
67.2k
    }
1865
1866
67.3k
    if (!status.ok()) {
1867
96
        _cancel_all_channel(status);
1868
96
        _close_status = status;
1869
96
        _close_wait = true;
1870
96
    }
1871
67.3k
}
1872
1873
67.3k
Status VTabletWriter::close(Status exec_status) {
1874
67.3k
    if (!_inited) {
1875
0
        DCHECK(!exec_status.ok());
1876
0
        _cancel_all_channel(exec_status);
1877
0
        _close_status = exec_status;
1878
0
        return _close_status;
1879
0
    }
1880
1881
67.3k
    SCOPED_TIMER(_close_timer);
1882
67.3k
    SCOPED_TIMER(_operator_profile->total_time_counter());
1883
1884
    // will make the last batch of request-> close_wait will wait this finished.
1885
67.3k
    _do_try_close(_state, exec_status);
1886
67.3k
    TEST_INJECTION_POINT("VOlapTableSink::close");
1887
1888
67.3k
    DBUG_EXECUTE_IF("VTabletWriter.close.sleep", {
1889
67.3k
        auto sleep_sec = DebugPoints::instance()->get_debug_param_or_default<int32_t>(
1890
67.3k
                "VTabletWriter.close.sleep", "sleep_sec", 1);
1891
67.3k
        std::this_thread::sleep_for(std::chrono::seconds(sleep_sec));
1892
67.3k
    });
1893
67.3k
    DBUG_EXECUTE_IF("VTabletWriter.close.close_status_not_ok",
1894
67.3k
                    { _close_status = Status::InternalError("injected close status not ok"); });
1895
1896
    // If _close_status is not ok, all nodes have been canceled in try_close.
1897
67.3k
    if (_close_status.ok()) {
1898
67.2k
        auto status = Status::OK();
1899
        // BE id -> add_batch method counter
1900
67.2k
        std::unordered_map<int64_t, AddBatchCounter> node_add_batch_counter_map;
1901
67.2k
        WriterStats writer_stats;
1902
1903
68.2k
        for (const auto& index_channel : _channels) {
1904
68.2k
            if (!status.ok()) {
1905
0
                break;
1906
0
            }
1907
68.2k
            int64_t add_batch_exec_time = 0;
1908
68.2k
            int64_t wait_exec_time = 0;
1909
68.2k
            status = index_channel->close_wait(_state, &writer_stats, &node_add_batch_counter_map,
1910
68.2k
                                               index_channel->each_node_channel_ids(), true);
1911
1912
            // Due to the non-determinism of compaction, the rowsets of each replica may be different from each other on different
1913
            // BE nodes. The number of rows filtered in SegmentWriter depends on the historical rowsets located in the correspoding
1914
            // BE node. So we check the number of rows filtered on each succeccful BE to ensure the consistency of the current load
1915
68.2k
            if (status.ok() && !_write_single_replica && _schema->is_strict_mode() &&
1916
68.2k
                _schema->is_partial_update()) {
1917
59
                if (Status st = index_channel->check_tablet_filtered_rows_consistency(); !st.ok()) {
1918
0
                    status = st;
1919
59
                } else {
1920
59
                    _state->set_num_rows_filtered_in_strict_mode_partial_update(
1921
59
                            index_channel->num_rows_filtered());
1922
59
                }
1923
59
            }
1924
1925
68.2k
            writer_stats.num_node_channels += index_channel->num_node_channels();
1926
68.2k
            writer_stats.max_add_batch_exec_time_ns =
1927
68.2k
                    std::max(add_batch_exec_time, writer_stats.max_add_batch_exec_time_ns);
1928
68.2k
            writer_stats.max_wait_exec_time_ns =
1929
68.2k
                    std::max(wait_exec_time, writer_stats.max_wait_exec_time_ns);
1930
68.2k
        } // end for index channels
1931
1932
67.2k
        if (status.ok()) {
1933
            // TODO need to be improved
1934
67.1k
            LOG(INFO) << "total mem_exceeded_block_ns="
1935
67.1k
                      << writer_stats.channel_stat.mem_exceeded_block_ns
1936
67.1k
                      << ", total queue_push_lock_ns=" << writer_stats.queue_push_lock_ns
1937
67.1k
                      << ", total actual_consume_ns=" << writer_stats.actual_consume_ns
1938
67.1k
                      << ", load id=" << print_id(_load_id) << ", txn_id=" << _txn_id;
1939
1940
67.1k
            COUNTER_SET(_input_rows_counter, _number_input_rows);
1941
67.1k
            COUNTER_SET(_output_rows_counter, _number_output_rows);
1942
67.1k
            COUNTER_SET(_filtered_rows_counter,
1943
67.1k
                        _block_convertor->num_filtered_rows() +
1944
67.1k
                                _tablet_finder->num_filtered_rows() +
1945
67.1k
                                _state->num_rows_filtered_in_strict_mode_partial_update());
1946
67.1k
            COUNTER_SET(_send_data_timer, _send_data_ns);
1947
67.1k
            COUNTER_SET(_row_distribution_timer, (int64_t)_row_distribution_watch.elapsed_time());
1948
67.1k
            COUNTER_SET(_filter_timer, _filter_ns);
1949
67.1k
            COUNTER_SET(_append_node_channel_timer,
1950
67.1k
                        writer_stats.channel_stat.append_node_channel_ns);
1951
67.1k
            COUNTER_SET(_where_clause_timer, writer_stats.channel_stat.where_clause_ns);
1952
67.1k
            COUNTER_SET(_wait_mem_limit_timer, writer_stats.channel_stat.mem_exceeded_block_ns);
1953
67.1k
            COUNTER_SET(_validate_data_timer, _block_convertor->validate_data_ns());
1954
67.1k
            COUNTER_SET(_serialize_batch_timer, writer_stats.serialize_batch_ns);
1955
67.1k
            COUNTER_SET(_non_blocking_send_work_timer, writer_stats.actual_consume_ns);
1956
67.1k
            COUNTER_SET(_total_add_batch_exec_timer, writer_stats.total_add_batch_exec_time_ns);
1957
67.1k
            COUNTER_SET(_max_add_batch_exec_timer, writer_stats.max_add_batch_exec_time_ns);
1958
67.1k
            COUNTER_SET(_total_wait_exec_timer, writer_stats.total_wait_exec_time_ns);
1959
67.1k
            COUNTER_SET(_max_wait_exec_timer, writer_stats.max_wait_exec_time_ns);
1960
67.1k
            COUNTER_SET(_add_batch_number, writer_stats.total_add_batch_num);
1961
67.1k
            COUNTER_SET(_num_node_channels, writer_stats.num_node_channels);
1962
67.1k
            COUNTER_SET(_load_back_pressure_version_time_ms,
1963
67.1k
                        writer_stats.load_back_pressure_version_time_ms);
1964
67.1k
            g_sink_load_back_pressure_version_time_ms
1965
67.1k
                    << writer_stats.load_back_pressure_version_time_ms;
1966
1967
            // _number_input_rows don't contain num_rows_load_filtered and num_rows_load_unselected in scan node
1968
67.1k
            int64_t num_rows_load_total = _number_input_rows + _state->num_rows_load_filtered() +
1969
67.1k
                                          _state->num_rows_load_unselected();
1970
67.1k
            _state->set_num_rows_load_total(num_rows_load_total);
1971
67.1k
            _state->update_num_rows_load_filtered(
1972
67.1k
                    _block_convertor->num_filtered_rows() + _tablet_finder->num_filtered_rows() +
1973
67.1k
                    _state->num_rows_filtered_in_strict_mode_partial_update());
1974
67.1k
            _state->update_num_rows_load_unselected(
1975
67.1k
                    _tablet_finder->num_immutable_partition_filtered_rows());
1976
1977
67.1k
            if (_state->enable_profile() && _state->profile_level() >= 2) {
1978
                // Output detailed profiling info for auto-partition requests
1979
7
                _row_distribution.output_profile_info(_operator_profile);
1980
7
            }
1981
1982
            // print log of add batch time of all node, for tracing load performance easily
1983
67.1k
            std::stringstream ss;
1984
67.1k
            ss << "finished to close olap table sink. load_id=" << print_id(_load_id)
1985
67.1k
               << ", txn_id=" << _txn_id
1986
67.1k
               << ", node add batch time(ms)/wait execution time(ms)/close time(ms)/num: ";
1987
67.1k
            for (auto const& pair : node_add_batch_counter_map) {
1988
67.1k
                ss << "{" << pair.first << ":(" << (pair.second.add_batch_execution_time_us / 1000)
1989
67.1k
                   << ")(" << (pair.second.add_batch_wait_execution_time_us / 1000) << ")("
1990
67.1k
                   << pair.second.close_wait_time_ms << ")(" << pair.second.add_batch_num << ")} ";
1991
67.1k
            }
1992
67.1k
            LOG(INFO) << ss.str();
1993
67.1k
        } else {
1994
39
            _cancel_all_channel(status);
1995
39
        }
1996
67.2k
        _close_status = status;
1997
67.2k
    }
1998
1999
    // Sender join() must put after node channels mark_close/cancel.
2000
    // But there is no specific sequence required between sender join() & close_wait().
2001
67.3k
    if (_sender_thread) {
2002
67.3k
        bthread_join(_sender_thread, nullptr);
2003
        // We have to wait all task in _send_batch_thread_pool_token finished,
2004
        // because it is difficult to handle concurrent problem if we just
2005
        // shutdown it.
2006
67.3k
        _send_batch_thread_pool_token->wait();
2007
67.3k
    }
2008
2009
    // We clear NodeChannels' batches here, cuz NodeChannels' batches destruction will use
2010
    // OlapTableSink::_mem_tracker and its parents.
2011
    // But their destructions are after OlapTableSink's.
2012
68.4k
    for (const auto& index_channel : _channels) {
2013
68.4k
        index_channel->for_each_node_channel(
2014
68.4k
                [](const std::shared_ptr<VNodeChannel>& ch) { ch->clear_all_blocks(); });
2015
68.4k
    }
2016
67.3k
    return _close_status;
2017
67.3k
}
2018
2019
void VTabletWriter::_generate_one_index_channel_payload(
2020
        RowPartTabletIds& row_part_tablet_id, int32_t index_idx,
2021
37.5k
        ChannelDistributionPayload& channel_payload) {
2022
37.5k
    auto& row_ids = row_part_tablet_id.row_ids;
2023
37.5k
    auto& tablet_ids = row_part_tablet_id.tablet_ids;
2024
2025
37.5k
    size_t row_cnt = row_ids.size();
2026
2027
35.7M
    for (size_t i = 0; i < row_ids.size(); i++) {
2028
        // (tablet_id, VNodeChannel) where this tablet locate
2029
35.7M
        auto it = _channels[index_idx]->_channels_by_tablet.find(tablet_ids[i]);
2030
18.4E
        DCHECK(it != _channels[index_idx]->_channels_by_tablet.end())
2031
18.4E
                << "unknown tablet, tablet_id=" << tablet_ids[i];
2032
2033
35.7M
        std::vector<std::shared_ptr<VNodeChannel>>& tablet_locations = it->second;
2034
35.8M
        for (const auto& locate_node : tablet_locations) {
2035
35.8M
            auto payload_it = channel_payload.find(locate_node.get()); // <VNodeChannel*, Payload>
2036
35.8M
            if (payload_it == channel_payload.end()) {
2037
37.2k
                auto [tmp_it, _] = channel_payload.emplace(
2038
37.2k
                        locate_node.get(),
2039
37.2k
                        Payload {std::make_unique<IColumn::Selector>(), std::vector<int64_t>()});
2040
37.2k
                payload_it = tmp_it;
2041
37.2k
                payload_it->second.first->reserve(row_cnt);
2042
37.2k
                payload_it->second.second.reserve(row_cnt);
2043
37.2k
            }
2044
35.8M
            payload_it->second.first->push_back(row_ids[i]);
2045
35.8M
            payload_it->second.second.push_back(tablet_ids[i]);
2046
35.8M
        }
2047
35.7M
    }
2048
37.5k
}
2049
2050
void VTabletWriter::_generate_index_channels_payloads(
2051
        std::vector<RowPartTabletIds>& row_part_tablet_ids,
2052
36.5k
        ChannelDistributionPayloadVec& payload) {
2053
74.1k
    for (int i = 0; i < _schema->indexes().size(); i++) {
2054
37.5k
        _generate_one_index_channel_payload(row_part_tablet_ids[i], i, payload[i]);
2055
37.5k
    }
2056
36.5k
}
2057
2058
36.5k
Status VTabletWriter::write(RuntimeState* state, doris::Block& input_block) {
2059
36.5k
    SCOPED_CONSUME_MEM_TRACKER(_mem_tracker.get());
2060
36.5k
    Status status = Status::OK();
2061
2062
36.5k
    DCHECK(_state);
2063
36.5k
    DCHECK(_state->query_options().__isset.dry_run_query);
2064
36.5k
    if (_state->query_options().dry_run_query) {
2065
1
        return status;
2066
1
    }
2067
2068
    // check out of limit
2069
36.5k
    RETURN_IF_ERROR(_send_new_partition_batch());
2070
2071
36.5k
    auto rows = input_block.rows();
2072
36.5k
    auto bytes = input_block.bytes();
2073
36.5k
    if (UNLIKELY(rows == 0)) {
2074
0
        return status;
2075
0
    }
2076
36.5k
    SCOPED_TIMER(_operator_profile->total_time_counter());
2077
36.5k
    SCOPED_RAW_TIMER(&_send_data_ns);
2078
2079
36.5k
    std::shared_ptr<Block> block;
2080
36.5k
    _number_input_rows += rows;
2081
    // update incrementally so that FE can get the progress.
2082
    // the real 'num_rows_load_total' will be set when sink being closed.
2083
36.5k
    _state->update_num_rows_load_total(rows);
2084
36.5k
    _state->update_num_bytes_load_total(bytes);
2085
36.5k
    DorisMetrics::instance()->load_rows->increment(rows);
2086
36.5k
    DorisMetrics::instance()->load_bytes->increment(bytes);
2087
2088
36.5k
    _row_distribution_watch.start();
2089
36.5k
    RETURN_IF_ERROR(_row_distribution.generate_rows_distribution(
2090
36.5k
            input_block, block, _row_part_tablet_ids, _number_input_rows));
2091
2092
36.5k
    ChannelDistributionPayloadVec channel_to_payload;
2093
2094
36.5k
    channel_to_payload.resize(_channels.size());
2095
36.5k
    _generate_index_channels_payloads(_row_part_tablet_ids, channel_to_payload);
2096
36.5k
    _row_distribution_watch.stop();
2097
2098
    // Add block to node channel
2099
74.1k
    for (size_t i = 0; i < _channels.size(); i++) {
2100
37.5k
        for (const auto& entry : channel_to_payload[i]) {
2101
            // if this node channel is already failed, this add_row will be skipped
2102
            // entry.second is a [row -> tablet] mapping
2103
37.2k
            auto st = entry.first->add_block(block.get(), &entry.second);
2104
37.2k
            if (!st.ok()) {
2105
0
                _channels[i]->mark_as_failed(entry.first, st.to_string());
2106
0
            }
2107
37.2k
        }
2108
37.5k
    }
2109
2110
    // check intolerable failure
2111
37.5k
    for (const auto& index_channel : _channels) {
2112
37.5k
        RETURN_IF_ERROR(index_channel->check_intolerable_failure());
2113
37.5k
    }
2114
2115
36.5k
    g_sink_write_bytes << bytes;
2116
36.5k
    g_sink_write_rows << rows;
2117
36.5k
    return Status::OK();
2118
36.5k
}
2119
2120
} // namespace doris