Coverage Report

Created: 2026-03-13 07:13

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/exec/sink/load_stream_stub.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "exec/sink/load_stream_stub.h"
19
20
#include <sstream>
21
22
#include "common/cast_set.h"
23
#include "runtime/query_context.h"
24
#include "storage/rowset/rowset_writer.h"
25
#include "util/brpc_client_cache.h"
26
#include "util/debug_points.h"
27
#include "util/network_util.h"
28
#include "util/thrift_util.h"
29
#include "util/uid_util.h"
30
31
namespace doris {
32
#include "common/compile_check_begin.h"
33
34
int LoadStreamReplyHandler::on_received_messages(brpc::StreamId id, butil::IOBuf* const messages[],
35
28
                                                 size_t size) {
36
28
    auto stub = _stub.lock();
37
28
    if (!stub) {
38
0
        LOG(WARNING) << "stub is not exist when on_received_messages, " << *this
39
0
                     << ", stream_id=" << id;
40
0
        return 0;
41
0
    }
42
56
    for (size_t i = 0; i < size; i++) {
43
28
        butil::IOBufAsZeroCopyInputStream wrapper(*messages[i]);
44
28
        PLoadStreamResponse response;
45
28
        response.ParseFromZeroCopyStream(&wrapper);
46
47
28
        if (response.eos()) {
48
28
            stub->_is_eos.store(true);
49
28
        }
50
51
28
        Status st = Status::create<false>(response.status());
52
53
28
        std::stringstream ss;
54
28
        ss << "on_received_messages, " << *this << ", stream_id=" << id;
55
28
        if (response.success_tablet_ids_size() > 0) {
56
14
            ss << ", success tablet ids:";
57
98
            for (auto tablet_id : response.success_tablet_ids()) {
58
98
                ss << " " << tablet_id;
59
98
            }
60
14
            std::lock_guard<bthread::Mutex> lock(stub->_success_tablets_mutex);
61
98
            for (auto tablet_id : response.success_tablet_ids()) {
62
98
                stub->_success_tablets.push_back(tablet_id);
63
98
            }
64
14
        }
65
28
        if (response.failed_tablets_size() > 0) {
66
0
            ss << ", failed tablet ids:";
67
0
            for (auto pb : response.failed_tablets()) {
68
0
                ss << " " << pb.id() << ":" << Status::create(pb.status());
69
0
            }
70
0
            std::lock_guard<bthread::Mutex> lock(stub->_failed_tablets_mutex);
71
0
            for (auto pb : response.failed_tablets()) {
72
0
                stub->_failed_tablets.emplace(pb.id(), Status::create(pb.status()));
73
0
            }
74
0
        }
75
28
        if (response.tablet_schemas_size() > 0) {
76
0
            ss << ", tablet schema num: " << response.tablet_schemas_size();
77
0
            std::lock_guard<bthread::Mutex> lock(stub->_schema_mutex);
78
0
            for (const auto& schema : response.tablet_schemas()) {
79
0
                auto tablet_schema = std::make_unique<TabletSchema>();
80
0
                tablet_schema->init_from_pb(schema.tablet_schema());
81
0
                stub->_tablet_schema_for_index->emplace(schema.index_id(),
82
0
                                                        std::move(tablet_schema));
83
0
                stub->_enable_unique_mow_for_index->emplace(
84
0
                        schema.index_id(), schema.enable_unique_key_merge_on_write());
85
0
            }
86
0
            stub->_schema_cv.notify_all();
87
0
        }
88
28
        ss << ", status: " << st;
89
28
        LOG(INFO) << ss.str();
90
91
28
        if (response.tablet_load_rowset_num_infos_size() > 0) {
92
0
            stub->_refresh_back_pressure_version_wait_time(response.tablet_load_rowset_num_infos());
93
0
        }
94
95
28
        if (response.has_load_stream_profile()) {
96
0
            TRuntimeProfileTree tprofile;
97
0
            const uint8_t* buf =
98
0
                    reinterpret_cast<const uint8_t*>(response.load_stream_profile().data());
99
0
            uint32_t len = cast_set<uint32_t>(response.load_stream_profile().size());
100
0
            auto status = deserialize_thrift_msg(buf, &len, false, &tprofile);
101
0
            if (status.ok()) {
102
                // TODO
103
                //_sink->_state->load_channel_profile()->update(tprofile);
104
0
            } else {
105
0
                LOG(WARNING) << "load stream TRuntimeProfileTree deserialize failed, errmsg="
106
0
                             << status;
107
0
            }
108
0
        }
109
28
    }
110
28
    return 0;
111
28
}
112
113
28
void LoadStreamReplyHandler::on_closed(brpc::StreamId id) {
114
28
    Defer defer {[this]() { delete this; }};
115
28
    LOG(INFO) << "on_closed, " << *this << ", stream_id=" << id;
116
28
    auto stub = _stub.lock();
117
28
    if (!stub) {
118
0
        LOG(WARNING) << "stub is not exist when on_closed, " << *this;
119
0
        return;
120
0
    }
121
28
    stub->_is_closed.store(true);
122
28
}
123
124
56
inline std::ostream& operator<<(std::ostream& ostr, const LoadStreamReplyHandler& handler) {
125
56
    ostr << "LoadStreamReplyHandler load_id=" << UniqueId(handler._load_id)
126
56
         << ", dst_id=" << handler._dst_id;
127
56
    return ostr;
128
56
}
129
130
LoadStreamStub::LoadStreamStub(PUniqueId load_id, int64_t src_id,
131
                               std::shared_ptr<IndexToTabletSchema> schema_map,
132
                               std::shared_ptr<IndexToEnableMoW> mow_map, bool incremental)
133
85
        : _load_id(load_id),
134
85
          _src_id(src_id),
135
85
          _tablet_schema_for_index(schema_map),
136
85
          _enable_unique_mow_for_index(mow_map),
137
85
          _is_incremental(incremental) {};
138
139
85
LoadStreamStub::~LoadStreamStub() {
140
85
    if (_is_open.load() && !_is_closed.load()) {
141
0
        auto ret = brpc::StreamClose(_stream_id);
142
0
        LOG(INFO) << *this << " is deconstructed, close " << (ret == 0 ? "success" : "failed");
143
0
    }
144
85
}
145
146
// open_load_stream
147
Status LoadStreamStub::open(BrpcClientCache<PBackendService_Stub>* client_cache,
148
                            const NodeInfo& node_info, int64_t txn_id,
149
                            const OlapTableSchemaParam& schema,
150
                            const std::vector<PTabletID>& tablets_for_schema, int total_streams,
151
28
                            int64_t idle_timeout_ms, bool enable_profile) {
152
28
    std::unique_lock<bthread::Mutex> lock(_open_mutex);
153
28
    if (_is_init.load()) {
154
0
        return _status;
155
0
    }
156
28
    _is_init.store(true);
157
28
    _dst_id = node_info.id;
158
28
    brpc::StreamOptions opt;
159
28
    opt.max_buf_size = cast_set<int>(config::load_stream_max_buf_size);
160
28
    opt.idle_timeout_ms = idle_timeout_ms;
161
28
    opt.messages_in_batch = config::load_stream_messages_in_batch;
162
28
    opt.handler = new LoadStreamReplyHandler(_load_id, _dst_id, shared_from_this());
163
28
    brpc::Controller cntl;
164
28
    if (int ret = brpc::StreamCreate(&_stream_id, cntl, &opt)) {
165
0
        delete opt.handler;
166
0
        _status = Status::Error<true>(ret, "Failed to create stream");
167
0
        return _status;
168
0
    }
169
28
    cntl.set_timeout_ms(config::open_load_stream_timeout_ms);
170
28
    POpenLoadStreamRequest request;
171
28
    *request.mutable_load_id() = _load_id;
172
28
    request.set_src_id(_src_id);
173
28
    request.set_txn_id(txn_id);
174
28
    request.set_enable_profile(enable_profile);
175
28
    if (_is_incremental) {
176
0
        request.set_total_streams(0);
177
28
    } else if (total_streams > 0) {
178
28
        request.set_total_streams(total_streams);
179
28
    } else {
180
0
        _status = Status::InternalError("total_streams should be greator than 0");
181
0
        return _status;
182
0
    }
183
28
    request.set_idle_timeout_ms(idle_timeout_ms);
184
28
    schema.to_protobuf(request.mutable_schema());
185
28
    for (auto& tablet : tablets_for_schema) {
186
14
        *request.add_tablets() = tablet;
187
14
    }
188
28
    POpenLoadStreamResponse response;
189
    // set connection_group "streaming" to distinguish with non-streaming connections
190
28
    const auto& stub = client_cache->get_client(node_info.host, node_info.brpc_port);
191
28
    if (stub == nullptr) {
192
0
        return Status::InternalError("failed to init brpc client to {}:{}", node_info.host,
193
0
                                     node_info.brpc_port);
194
0
    }
195
28
    stub->open_load_stream(&cntl, &request, &response, nullptr);
196
28
    for (const auto& resp : response.tablet_schemas()) {
197
14
        auto tablet_schema = std::make_unique<TabletSchema>();
198
14
        tablet_schema->init_from_pb(resp.tablet_schema());
199
14
        _tablet_schema_for_index->emplace(resp.index_id(), std::move(tablet_schema));
200
14
        _enable_unique_mow_for_index->emplace(resp.index_id(),
201
14
                                              resp.enable_unique_key_merge_on_write());
202
14
    }
203
28
    if (response.tablet_load_rowset_num_infos_size() > 0) {
204
0
        _refresh_back_pressure_version_wait_time(response.tablet_load_rowset_num_infos());
205
0
    }
206
28
    if (cntl.Failed()) {
207
0
        brpc::StreamClose(_stream_id);
208
0
        _status = Status::InternalError("Failed to connect to backend {}: {}", _dst_id,
209
0
                                        cntl.ErrorText());
210
0
        return _status;
211
0
    }
212
28
    LOG(INFO) << "open load stream to host=" << node_info.host << ", port=" << node_info.brpc_port
213
28
              << ", " << *this;
214
28
    _is_open.store(true);
215
28
    _status = Status::OK();
216
28
    return _status;
217
28
}
218
219
// APPEND_DATA
220
Status LoadStreamStub::append_data(int64_t partition_id, int64_t index_id, int64_t tablet_id,
221
                                   int32_t segment_id, uint64_t offset, std::span<const Slice> data,
222
3.06k
                                   bool segment_eos, FileType file_type) {
223
3.06k
    if (!_is_open.load()) {
224
0
        add_failed_tablet(tablet_id, _status);
225
0
        return _status;
226
0
    }
227
3.06k
    DBUG_EXECUTE_IF("LoadStreamStub.skip_send_segment", { return Status::OK(); });
228
3.06k
    PStreamHeader header;
229
3.06k
    header.set_src_id(_src_id);
230
3.06k
    *header.mutable_load_id() = _load_id;
231
3.06k
    header.set_partition_id(partition_id);
232
3.06k
    header.set_index_id(index_id);
233
3.06k
    header.set_tablet_id(tablet_id);
234
3.06k
    header.set_segment_id(segment_id);
235
3.06k
    header.set_segment_eos(segment_eos);
236
3.06k
    header.set_offset(offset);
237
3.06k
    header.set_opcode(doris::PStreamHeader::APPEND_DATA);
238
3.06k
    header.set_file_type(file_type);
239
3.06k
    return _encode_and_send(header, data);
240
3.06k
}
241
242
// ADD_SEGMENT
243
Status LoadStreamStub::add_segment(int64_t partition_id, int64_t index_id, int64_t tablet_id,
244
64
                                   int32_t segment_id, const SegmentStatistics& segment_stat) {
245
64
    if (!_is_open.load()) {
246
0
        add_failed_tablet(tablet_id, _status);
247
0
        return _status;
248
0
    }
249
64
    DBUG_EXECUTE_IF("LoadStreamStub.skip_send_segment", { return Status::OK(); });
250
64
    PStreamHeader header;
251
64
    header.set_src_id(_src_id);
252
64
    *header.mutable_load_id() = _load_id;
253
64
    header.set_partition_id(partition_id);
254
64
    header.set_index_id(index_id);
255
64
    header.set_tablet_id(tablet_id);
256
64
    header.set_segment_id(segment_id);
257
64
    header.set_opcode(doris::PStreamHeader::ADD_SEGMENT);
258
64
    segment_stat.to_pb(header.mutable_segment_statistics());
259
64
    return _encode_and_send(header);
260
64
}
261
262
// CLOSE_LOAD
263
Status LoadStreamStub::close_load(const std::vector<PTabletID>& tablets_to_commit,
264
28
                                  int num_incremental_streams) {
265
28
    if (!_is_open.load()) {
266
0
        return _status;
267
0
    }
268
28
    PStreamHeader header;
269
28
    *header.mutable_load_id() = _load_id;
270
28
    header.set_src_id(_src_id);
271
28
    header.set_opcode(doris::PStreamHeader::CLOSE_LOAD);
272
98
    for (const auto& tablet : tablets_to_commit) {
273
98
        *header.add_tablets() = tablet;
274
98
    }
275
28
    header.set_num_incremental_streams(num_incremental_streams);
276
28
    _status = _encode_and_send(header);
277
28
    if (!_status.ok()) {
278
0
        LOG(WARNING) << "stream " << _stream_id << " close failed: " << _status;
279
0
        return _status;
280
0
    }
281
28
    _is_closing.store(true);
282
28
    return Status::OK();
283
28
}
284
285
// GET_SCHEMA
286
0
Status LoadStreamStub::get_schema(const std::vector<PTabletID>& tablets) {
287
0
    if (!_is_open.load()) {
288
0
        return _status;
289
0
    }
290
0
    PStreamHeader header;
291
0
    *header.mutable_load_id() = _load_id;
292
0
    header.set_src_id(_src_id);
293
0
    header.set_opcode(doris::PStreamHeader::GET_SCHEMA);
294
0
    std::ostringstream oss;
295
0
    oss << "fetching tablet schema from stream " << _stream_id
296
0
        << ", load id: " << print_id(_load_id) << ", tablet id:";
297
0
    for (const auto& tablet : tablets) {
298
0
        *header.add_tablets() = tablet;
299
0
        oss << " " << tablet.tablet_id();
300
0
    }
301
0
    if (tablets.size() == 0) {
302
0
        oss << " none";
303
0
    }
304
0
    LOG(INFO) << oss.str();
305
0
    return _encode_and_send(header);
306
0
}
307
308
Status LoadStreamStub::wait_for_schema(int64_t partition_id, int64_t index_id, int64_t tablet_id,
309
64
                                       int64_t timeout_ms) {
310
64
    if (!_is_open.load()) {
311
0
        return _status;
312
0
    }
313
64
    if (_tablet_schema_for_index->contains(index_id)) {
314
64
        return Status::OK();
315
64
    }
316
0
    PTabletID tablet;
317
0
    tablet.set_partition_id(partition_id);
318
0
    tablet.set_index_id(index_id);
319
0
    tablet.set_tablet_id(tablet_id);
320
0
    RETURN_IF_ERROR(get_schema({tablet}));
321
322
0
    MonotonicStopWatch watch;
323
0
    watch.start();
324
0
    while (!_tablet_schema_for_index->contains(index_id) &&
325
0
           watch.elapsed_time() / 1000 / 1000 < timeout_ms) {
326
0
        RETURN_IF_ERROR(check_cancel());
327
0
        static_cast<void>(wait_for_new_schema(100));
328
0
    }
329
330
0
    if (!_tablet_schema_for_index->contains(index_id)) {
331
0
        return Status::TimedOut("timeout to get tablet schema for index {}", index_id);
332
0
    }
333
0
    return Status::OK();
334
0
}
335
336
110
Status LoadStreamStub::close_finish_check(RuntimeState* state, bool* is_closed) {
337
110
    DBUG_EXECUTE_IF("LoadStreamStub::close_wait.long_wait", DBUG_BLOCK);
338
110
    DBUG_EXECUTE_IF("LoadStreamStub::close_finish_check.close_failed",
339
110
                    { return Status::InternalError("close failed"); });
340
110
    *is_closed = true;
341
110
    if (!_is_open.load()) {
342
        // we don't need to close wait on non-open streams
343
0
        return Status::OK();
344
0
    }
345
    // If stream is cancelled (e.g., due to connection failure), treat it as closed
346
    // to avoid waiting indefinitely for a stream that will never respond.
347
110
    if (_is_cancelled.load()) {
348
0
        return check_cancel();
349
0
    }
350
110
    if (state->get_query_ctx()->is_cancelled()) {
351
0
        return state->get_query_ctx()->exec_status();
352
0
    }
353
110
    if (!_is_closing.load()) {
354
0
        *is_closed = false;
355
0
        return _status;
356
0
    }
357
110
    if (_is_closed.load()) {
358
56
        RETURN_IF_ERROR(check_cancel());
359
56
        if (!_is_eos.load()) {
360
0
            return Status::InternalError("Stream closed without EOS, {}", to_string());
361
0
        }
362
56
        return Status::OK();
363
56
    }
364
54
    *is_closed = false;
365
54
    return Status::OK();
366
110
}
367
368
0
void LoadStreamStub::cancel(Status reason) {
369
0
    LOG(WARNING) << *this << " is cancelled because of " << reason;
370
0
    if (_is_open.load()) {
371
0
        brpc::StreamClose(_stream_id);
372
0
    }
373
0
    {
374
0
        std::lock_guard<bthread::Mutex> lock(_cancel_mutex);
375
0
        _cancel_st = reason;
376
0
        _is_cancelled.store(true);
377
0
    }
378
0
    _is_closed.store(true);
379
0
}
380
381
3.16k
Status LoadStreamStub::_encode_and_send(PStreamHeader& header, std::span<const Slice> data) {
382
3.16k
    butil::IOBuf buf;
383
3.16k
    size_t header_len = header.ByteSizeLong();
384
3.16k
    buf.append(reinterpret_cast<uint8_t*>(&header_len), sizeof(header_len));
385
3.16k
    buf.append(header.SerializeAsString());
386
3.16k
    size_t data_len = std::transform_reduce(data.begin(), data.end(), 0, std::plus(),
387
10.0k
                                            [](const Slice& s) { return s.get_size(); });
388
3.16k
    buf.append(reinterpret_cast<uint8_t*>(&data_len), sizeof(data_len));
389
10.0k
    for (const auto& slice : data) {
390
10.0k
        buf.append(slice.get_data(), slice.get_size());
391
10.0k
    }
392
3.16k
    bool eos = header.opcode() == doris::PStreamHeader::CLOSE_LOAD;
393
3.16k
    bool get_schema = header.opcode() == doris::PStreamHeader::GET_SCHEMA;
394
3.16k
    add_bytes_written(buf.size());
395
3.16k
    return _send_with_buffer(buf, eos || get_schema);
396
3.16k
}
397
398
3.16k
Status LoadStreamStub::_send_with_buffer(butil::IOBuf& buf, bool sync) {
399
3.16k
    butil::IOBuf output;
400
3.16k
    std::unique_lock<decltype(_buffer_mutex)> buffer_lock(_buffer_mutex);
401
3.16k
    _buffer.append(buf);
402
3.16k
    if (!sync && _buffer.size() < config::brpc_streaming_client_batch_bytes) {
403
3.13k
        return Status::OK();
404
3.13k
    }
405
26
    output.swap(_buffer);
406
    // acquire send lock while holding buffer lock, to ensure the message order
407
26
    std::lock_guard<decltype(_send_mutex)> send_lock(_send_mutex);
408
26
    buffer_lock.unlock();
409
18.4E
    VLOG_DEBUG << "send buf size : " << output.size() << ", sync: " << sync;
410
26
    auto st = _send_with_retry(output);
411
26
    if (!st.ok()) {
412
0
        _handle_failure(output, st);
413
0
    }
414
26
    return st;
415
3.16k
}
416
417
0
void LoadStreamStub::_handle_failure(butil::IOBuf& buf, Status st) {
418
0
    while (buf.size() > 0) {
419
        // step 1: parse header
420
0
        size_t hdr_len = 0;
421
0
        buf.cutn((void*)&hdr_len, sizeof(size_t));
422
0
        butil::IOBuf hdr_buf;
423
0
        PStreamHeader hdr;
424
0
        buf.cutn(&hdr_buf, hdr_len);
425
0
        butil::IOBufAsZeroCopyInputStream wrapper(hdr_buf);
426
0
        hdr.ParseFromZeroCopyStream(&wrapper);
427
428
        // step 2: cut data
429
0
        size_t data_len = 0;
430
0
        buf.cutn((void*)&data_len, sizeof(size_t));
431
0
        butil::IOBuf data_buf;
432
0
        buf.cutn(&data_buf, data_len);
433
434
        // step 3: handle failure
435
0
        switch (hdr.opcode()) {
436
0
        case PStreamHeader::ADD_SEGMENT:
437
0
        case PStreamHeader::APPEND_DATA: {
438
0
            DBUG_EXECUTE_IF("LoadStreamStub._handle_failure.append_data_failed", {
439
0
                add_failed_tablet(hdr.tablet_id(), st);
440
0
                return;
441
0
            });
442
0
            DBUG_EXECUTE_IF("LoadStreamStub._handle_failure.add_segment_failed", {
443
0
                add_failed_tablet(hdr.tablet_id(), st);
444
0
                return;
445
0
            });
446
0
            add_failed_tablet(hdr.tablet_id(), st);
447
0
        } break;
448
0
        case PStreamHeader::CLOSE_LOAD: {
449
0
            DBUG_EXECUTE_IF("LoadStreamStub._handle_failure.close_load_failed", {
450
0
                brpc::StreamClose(_stream_id);
451
0
                return;
452
0
            });
453
0
            brpc::StreamClose(_stream_id);
454
0
        } break;
455
0
        case PStreamHeader::GET_SCHEMA: {
456
0
            DBUG_EXECUTE_IF("LoadStreamStub._handle_failure.get_schema_failed", {
457
                // Just log and let wait_for_schema timeout
458
0
                std::ostringstream oss;
459
0
                for (const auto& tablet : hdr.tablets()) {
460
0
                    oss << " " << tablet.tablet_id();
461
0
                }
462
0
                LOG(WARNING) << "failed to send GET_SCHEMA request, tablet_id:" << oss.str() << ", "
463
0
                             << *this;
464
0
                return;
465
0
            });
466
            // Just log and let wait_for_schema timeout
467
0
            std::ostringstream oss;
468
0
            for (const auto& tablet : hdr.tablets()) {
469
0
                oss << " " << tablet.tablet_id();
470
0
            }
471
0
            LOG(WARNING) << "failed to send GET_SCHEMA request, tablet_id:" << oss.str() << ", "
472
0
                         << *this;
473
0
        } break;
474
0
        default:
475
0
            LOG(WARNING) << "unexpected stream message " << hdr.opcode() << ", " << *this;
476
0
            DCHECK(false);
477
0
        }
478
0
    }
479
0
}
480
481
28
Status LoadStreamStub::_send_with_retry(butil::IOBuf& buf) {
482
28
    for (;;) {
483
28
        RETURN_IF_ERROR(check_cancel());
484
28
        int ret;
485
28
        {
486
28
            DBUG_EXECUTE_IF("LoadStreamStub._send_with_retry.delay_before_send", {
487
28
                int64_t delay_ms = dp->param<int64_t>("delay_ms", 1000);
488
28
                bthread_usleep(delay_ms * 1000);
489
28
            });
490
28
            brpc::StreamWriteOptions options;
491
28
            options.write_in_background = config::enable_brpc_stream_write_background;
492
28
            ret = brpc::StreamWrite(_stream_id, buf, &options);
493
28
        }
494
28
        DBUG_EXECUTE_IF("LoadStreamStub._send_with_retry.stream_write_failed", { ret = EPIPE; });
495
28
        switch (ret) {
496
28
        case 0:
497
28
            return Status::OK();
498
0
        case EAGAIN: {
499
0
            const timespec time = butil::seconds_from_now(config::load_stream_eagain_wait_seconds);
500
0
            int wait_ret = brpc::StreamWait(_stream_id, &time);
501
0
            if (wait_ret != 0) {
502
0
                return Status::InternalError("StreamWait failed, err={}, {}", wait_ret,
503
0
                                             to_string());
504
0
            }
505
0
            break;
506
0
        }
507
0
        default:
508
0
            return Status::InternalError("StreamWrite failed, err={}, {}", ret, to_string());
509
28
        }
510
28
    }
511
28
}
512
513
void LoadStreamStub::_refresh_back_pressure_version_wait_time(
514
        const ::google::protobuf::RepeatedPtrField<::doris::PTabletLoadRowsetInfo>&
515
0
                tablet_load_infos) {
516
0
    int64_t max_rowset_num_gap = 0;
517
    // if any one tablet is under high load pressure, we would make the whole procedure
518
    // sleep to prevent the corresponding BE return -235
519
0
    std::for_each(
520
0
            tablet_load_infos.begin(), tablet_load_infos.end(),
521
0
            [&max_rowset_num_gap](auto& load_info) {
522
0
                int64_t cur_rowset_num = load_info.current_rowset_nums();
523
0
                int64_t high_load_point = load_info.max_config_rowset_nums() *
524
0
                                          (config::load_back_pressure_version_threshold / 100);
525
0
                DCHECK(cur_rowset_num > high_load_point);
526
0
                max_rowset_num_gap = std::max(max_rowset_num_gap, cur_rowset_num - high_load_point);
527
0
            });
528
    // to slow down the high load pressure
529
    // we would use the rowset num gap to calculate one sleep time
530
    // for example:
531
    // if the max tablet version is 2000, there are 3 BE
532
    // A: ====================  1800
533
    // B: ===================   1700
534
    // C: ==================    1600
535
    //    ==================    1600
536
    //                      ^
537
    //                      the high load point
538
    // then then max gap is 1800 - (max tablet version * config::load_back_pressure_version_threshold / 100) = 200,
539
    // we would make the whole send procesure sleep
540
    // 1200ms for compaction to be done toe reduce the high pressure
541
0
    auto max_time = config::max_load_back_pressure_version_wait_time_ms;
542
0
    if (UNLIKELY(max_rowset_num_gap > 0)) {
543
0
        _load_back_pressure_version_wait_time_ms.store(
544
0
                std::min(max_rowset_num_gap + 1000, max_time));
545
0
        LOG(INFO) << "try to back pressure version, wait time(ms): "
546
0
                  << _load_back_pressure_version_wait_time_ms << ", load id: " << print_id(_load_id)
547
0
                  << ", max_rowset_num_gap: " << max_rowset_num_gap;
548
0
    }
549
0
}
550
551
0
std::string LoadStreamStub::to_string() {
552
0
    std::ostringstream ss;
553
0
    ss << *this;
554
0
    return ss.str();
555
0
}
556
557
28
inline std::ostream& operator<<(std::ostream& ostr, const LoadStreamStub& stub) {
558
28
    ostr << "LoadStreamStub load_id=" << print_id(stub._load_id) << ", src_id=" << stub._src_id
559
28
         << ", dst_id=" << stub._dst_id << ", stream_id=" << stub._stream_id;
560
28
    return ostr;
561
28
}
562
563
Status LoadStreamStubs::open(BrpcClientCache<PBackendService_Stub>* client_cache,
564
                             const NodeInfo& node_info, int64_t txn_id,
565
                             const OlapTableSchemaParam& schema,
566
                             const std::vector<PTabletID>& tablets_for_schema, int total_streams,
567
14
                             int64_t idle_timeout_ms, bool enable_profile) {
568
14
    bool get_schema = true;
569
14
    auto status = Status::OK();
570
14
    bool first_stream = true;
571
28
    for (auto& stream : _streams) {
572
28
        Status st;
573
28
        if (get_schema) {
574
14
            st = stream->open(client_cache, node_info, txn_id, schema, tablets_for_schema,
575
14
                              total_streams, idle_timeout_ms, enable_profile);
576
14
        } else {
577
14
            st = stream->open(client_cache, node_info, txn_id, schema, {}, total_streams,
578
14
                              idle_timeout_ms, enable_profile);
579
14
        }
580
        // Simulate one stream open failure within LoadStreamStubs.
581
        // This causes the successfully opened streams to be cancelled,
582
        // reproducing the bug where cancelled streams cause close_wait timeout.
583
28
        DBUG_EXECUTE_IF("LoadStreamStubs.open.fail_one_stream", {
584
28
            if (st.ok() && !first_stream) {
585
28
                st = Status::InternalError("Injected stream open failure");
586
28
            }
587
28
        });
588
28
        if (st.ok()) {
589
28
            get_schema = false;
590
28
        } else {
591
0
            LOG(WARNING) << "open stream failed: " << st << "; stream: " << *stream;
592
0
            status = st;
593
            // no break here to try get schema from the rest streams
594
0
        }
595
28
        first_stream = false;
596
28
    }
597
    // only mark open when all streams open success
598
14
    _open_success.store(status.ok());
599
    // cancel all streams if open failed
600
14
    if (!status.ok()) {
601
0
        cancel(status);
602
0
    }
603
14
    return status;
604
14
}
605
606
Status LoadStreamStubs::close_load(const std::vector<PTabletID>& tablets_to_commit,
607
14
                                   int num_incremental_streams) {
608
14
    if (!_open_success.load()) {
609
0
        return Status::InternalError("streams not open");
610
0
    }
611
14
    bool first = true;
612
14
    auto status = Status::OK();
613
28
    for (auto& stream : _streams) {
614
28
        Status st;
615
28
        if (first) {
616
14
            st = stream->close_load(tablets_to_commit, num_incremental_streams);
617
14
            first = false;
618
14
        } else {
619
14
            st = stream->close_load({}, num_incremental_streams);
620
14
        }
621
28
        if (!st.ok()) {
622
            LOG(WARNING) << "close_load failed: " << st << "; stream: " << *stream;
623
0
        }
624
28
    }
625
14
    return status;
626
14
}
627
628
} // namespace doris