Coverage Report

Created: 2026-04-22 09:47

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/load/channel/load_stream.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "load/channel/load_stream.h"
19
20
#include <brpc/stream.h>
21
#include <bthread/bthread.h>
22
#include <bthread/condition_variable.h>
23
#include <bthread/mutex.h>
24
25
#include <memory>
26
#include <sstream>
27
28
#include "bvar/bvar.h"
29
#include "cloud/config.h"
30
#include "common/signal_handler.h"
31
#include "load/channel/load_channel.h"
32
#include "load/channel/load_stream_mgr.h"
33
#include "load/channel/load_stream_writer.h"
34
#include "load/delta_writer/delta_writer.h"
35
#include "runtime/exec_env.h"
36
#include "runtime/fragment_mgr.h"
37
#include "runtime/runtime_profile.h"
38
#include "runtime/workload_group/workload_group_manager.h"
39
#include "storage/rowset/rowset_factory.h"
40
#include "storage/rowset/rowset_meta.h"
41
#include "storage/storage_engine.h"
42
#include "storage/tablet/tablet.h"
43
#include "storage/tablet/tablet_fwd.h"
44
#include "storage/tablet/tablet_manager.h"
45
#include "storage/tablet/tablet_schema.h"
46
#include "storage/tablet_info.h"
47
#include "util/debug_points.h"
48
#include "util/thrift_util.h"
49
#include "util/uid_util.h"
50
51
#define UNKNOWN_ID_FOR_TEST 0x7c00
52
53
namespace doris {
54
55
bvar::Adder<int64_t> g_load_stream_cnt("load_stream_count");
56
bvar::LatencyRecorder g_load_stream_flush_wait_ms("load_stream_flush_wait_ms");
57
bvar::Adder<int> g_load_stream_flush_running_threads("load_stream_flush_wait_threads");
58
59
TabletStream::TabletStream(const PUniqueId& load_id, int64_t id, int64_t txn_id,
60
                           LoadStreamMgr* load_stream_mgr, RuntimeProfile* profile)
61
154
        : _id(id),
62
154
          _next_segid(0),
63
154
          _load_id(load_id),
64
154
          _txn_id(txn_id),
65
154
          _load_stream_mgr(load_stream_mgr) {
66
154
    load_stream_mgr->create_token(_flush_token);
67
154
    _profile = profile->create_child(fmt::format("TabletStream {}", id), true, true);
68
154
    _append_data_timer = ADD_TIMER(_profile, "AppendDataTime");
69
154
    _add_segment_timer = ADD_TIMER(_profile, "AddSegmentTime");
70
154
    _close_wait_timer = ADD_TIMER(_profile, "CloseWaitTime");
71
154
}
72
73
10
inline std::ostream& operator<<(std::ostream& ostr, const TabletStream& tablet_stream) {
74
10
    ostr << "load_id=" << print_id(tablet_stream._load_id) << ", txn_id=" << tablet_stream._txn_id
75
10
         << ", tablet_id=" << tablet_stream._id << ", status=" << tablet_stream._status.status();
76
10
    return ostr;
77
10
}
78
79
Status TabletStream::init(std::shared_ptr<OlapTableSchemaParam> schema, int64_t index_id,
80
154
                          int64_t partition_id) {
81
154
    WriteRequest req {
82
154
            .tablet_id = _id,
83
154
            .txn_id = _txn_id,
84
154
            .index_id = index_id,
85
154
            .partition_id = partition_id,
86
154
            .load_id = _load_id,
87
154
            .table_schema_param = schema,
88
            // TODO(plat1ko): write_file_cache
89
154
            .storage_vault_id {},
90
154
    };
91
92
154
    _load_stream_writer = std::make_shared<LoadStreamWriter>(&req, _profile);
93
154
    DBUG_EXECUTE_IF("TabletStream.init.uninited_writer", {
94
154
        _status.update(Status::Uninitialized("fault injection"));
95
154
        return _status.status();
96
154
    });
97
154
    _status.update(_load_stream_writer->init());
98
154
    if (!_status.ok()) {
99
1
        LOG(INFO) << "failed to init rowset builder due to " << *this;
100
1
    }
101
154
    return _status.status();
102
154
}
103
104
2.26k
Status TabletStream::append_data(const PStreamHeader& header, butil::IOBuf* data) {
105
2.26k
    if (!_status.ok()) {
106
1
        return _status.status();
107
1
    }
108
109
    // dispatch add_segment request
110
2.26k
    if (header.opcode() == PStreamHeader::ADD_SEGMENT) {
111
34
        return add_segment(header, data);
112
34
    }
113
114
2.23k
    SCOPED_TIMER(_append_data_timer);
115
116
2.23k
    int64_t src_id = header.src_id();
117
2.23k
    uint32_t segid = header.segment_id();
118
    // Ensure there are enough space and mapping are built.
119
2.23k
    SegIdMapping* mapping = nullptr;
120
2.23k
    {
121
2.23k
        std::lock_guard lock_guard(_lock);
122
2.23k
        if (!_segids_mapping.contains(src_id)) {
123
48
            _segids_mapping[src_id] = std::make_unique<SegIdMapping>();
124
48
        }
125
2.23k
        mapping = _segids_mapping[src_id].get();
126
2.23k
    }
127
2.23k
    if (segid + 1 > mapping->size()) {
128
        // TODO: Each sender lock is enough.
129
49
        std::lock_guard lock_guard(_lock);
130
49
        ssize_t origin_size = mapping->size();
131
49
        if (segid + 1 > origin_size) {
132
49
            mapping->resize(segid + 1, std::numeric_limits<uint32_t>::max());
133
107
            for (size_t index = origin_size; index <= segid; index++) {
134
58
                mapping->at(index) = _next_segid;
135
58
                _next_segid++;
136
58
                VLOG_DEBUG << "src_id=" << src_id << ", segid=" << index << " to "
137
0
                           << " segid=" << _next_segid - 1 << ", " << *this;
138
58
            }
139
49
        }
140
49
    }
141
142
    // Each sender sends data in one segment sequential, so we also do not
143
    // need a lock here.
144
2.23k
    bool eos = header.segment_eos();
145
2.23k
    FileType file_type = header.file_type();
146
2.23k
    uint32_t new_segid = mapping->at(segid);
147
2.23k
    DCHECK(new_segid != std::numeric_limits<uint32_t>::max());
148
2.23k
    butil::IOBuf buf = data->movable();
149
2.23k
    auto flush_func = [this, new_segid, eos, buf, header, file_type]() mutable {
150
2.23k
        signal::set_signal_task_id(_load_id);
151
2.23k
        g_load_stream_flush_running_threads << -1;
152
2.23k
        auto st = _load_stream_writer->append_data(new_segid, header.offset(), buf, file_type);
153
2.23k
        if (!st.ok() && !config::is_cloud_mode()) {
154
1
            auto res = ExecEnv::get_tablet(_id);
155
1
            TabletSharedPtr tablet =
156
1
                    res.has_value() ? std::dynamic_pointer_cast<Tablet>(res.value()) : nullptr;
157
1
            if (tablet) {
158
1
                tablet->report_error(st);
159
1
            }
160
1
        }
161
2.23k
        if (eos && st.ok()) {
162
55
            DBUG_EXECUTE_IF("TabletStream.append_data.unknown_file_type",
163
55
                            { file_type = static_cast<FileType>(-1); });
164
55
            if (file_type == FileType::SEGMENT_FILE || file_type == FileType::INVERTED_INDEX_FILE) {
165
55
                st = _load_stream_writer->close_writer(new_segid, file_type);
166
55
            } else {
167
0
                st = Status::InternalError(
168
0
                        "appent data failed, file type error, file type = {}, "
169
0
                        "segment_id={}",
170
0
                        file_type, new_segid);
171
0
            }
172
55
        }
173
2.23k
        DBUG_EXECUTE_IF("TabletStream.append_data.append_failed",
174
2.23k
                        { st = Status::InternalError("fault injection"); });
175
2.23k
        if (!st.ok()) {
176
2
            _status.update(st);
177
2
            LOG(WARNING) << "write data failed " << st << ", " << *this;
178
2
        }
179
2.23k
    };
180
2.23k
    auto load_stream_flush_token_max_tasks = config::load_stream_flush_token_max_tasks;
181
2.23k
    auto load_stream_max_wait_flush_token_time_ms =
182
2.23k
            config::load_stream_max_wait_flush_token_time_ms;
183
2.23k
    DBUG_EXECUTE_IF("TabletStream.append_data.long_wait", {
184
2.23k
        load_stream_flush_token_max_tasks = 0;
185
2.23k
        load_stream_max_wait_flush_token_time_ms = 1000;
186
2.23k
    });
187
2.23k
    MonotonicStopWatch timer;
188
2.23k
    timer.start();
189
2.23k
    while (_flush_token->num_tasks() >= load_stream_flush_token_max_tasks) {
190
2
        if (timer.elapsed_time() / 1000 / 1000 >= load_stream_max_wait_flush_token_time_ms) {
191
0
            _status.update(
192
0
                    Status::Error<true>("wait flush token back pressure time is more than "
193
0
                                        "load_stream_max_wait_flush_token_time {}",
194
0
                                        load_stream_max_wait_flush_token_time_ms));
195
0
            return _status.status();
196
0
        }
197
2
        bthread_usleep(2 * 1000); // 2ms
198
2
    }
199
2.23k
    timer.stop();
200
2.23k
    int64_t time_ms = timer.elapsed_time() / 1000 / 1000;
201
2.23k
    g_load_stream_flush_wait_ms << time_ms;
202
2.23k
    g_load_stream_flush_running_threads << 1;
203
2.23k
    Status st = Status::OK();
204
2.23k
    DBUG_EXECUTE_IF("TabletStream.append_data.submit_func_failed",
205
2.23k
                    { st = Status::InternalError("fault injection"); });
206
2.23k
    if (st.ok()) {
207
2.23k
        st = _flush_token->submit_func(flush_func);
208
2.23k
    }
209
2.23k
    if (!st.ok()) {
210
0
        _status.update(st);
211
0
    }
212
2.23k
    return _status.status();
213
2.23k
}
214
215
34
Status TabletStream::add_segment(const PStreamHeader& header, butil::IOBuf* data) {
216
34
    if (!_status.ok()) {
217
0
        return _status.status();
218
0
    }
219
220
34
    SCOPED_TIMER(_add_segment_timer);
221
34
    DCHECK(header.has_segment_statistics());
222
34
    SegmentStatistics stat(header.segment_statistics());
223
224
34
    int64_t src_id = header.src_id();
225
34
    uint32_t segid = header.segment_id();
226
34
    uint32_t new_segid;
227
34
    DBUG_EXECUTE_IF("TabletStream.add_segment.unknown_segid", { segid = UNKNOWN_ID_FOR_TEST; });
228
34
    {
229
34
        std::lock_guard lock_guard(_lock);
230
34
        if (!_segids_mapping.contains(src_id)) {
231
0
            _status.update(Status::InternalError(
232
0
                    "add segment failed, no segment written by this src be yet, src_id={}, "
233
0
                    "segment_id={}",
234
0
                    src_id, segid));
235
0
            return _status.status();
236
0
        }
237
34
        DBUG_EXECUTE_IF("TabletStream.add_segment.segid_never_written",
238
34
                        { segid = static_cast<uint32_t>(_segids_mapping[src_id]->size()); });
239
34
        if (segid >= _segids_mapping[src_id]->size()) {
240
0
            _status.update(Status::InternalError(
241
0
                    "add segment failed, segment is never written, src_id={}, segment_id={}",
242
0
                    src_id, segid));
243
0
            return _status.status();
244
0
        }
245
34
        new_segid = _segids_mapping[src_id]->at(segid);
246
34
    }
247
34
    DCHECK(new_segid != std::numeric_limits<uint32_t>::max());
248
249
34
    auto add_segment_func = [this, new_segid, stat]() {
250
34
        signal::set_signal_task_id(_load_id);
251
34
        auto st = _load_stream_writer->add_segment(new_segid, stat);
252
34
        DBUG_EXECUTE_IF("TabletStream.add_segment.add_segment_failed",
253
34
                        { st = Status::InternalError("fault injection"); });
254
34
        if (!st.ok()) {
255
0
            _status.update(st);
256
0
            LOG(INFO) << "add segment failed " << *this;
257
0
        }
258
34
    };
259
34
    Status st = Status::OK();
260
34
    DBUG_EXECUTE_IF("TabletStream.add_segment.submit_func_failed",
261
34
                    { st = Status::InternalError("fault injection"); });
262
34
    if (st.ok()) {
263
34
        st = _flush_token->submit_func(add_segment_func);
264
34
    }
265
34
    if (!st.ok()) {
266
0
        _status.update(st);
267
0
    }
268
34
    return _status.status();
269
34
}
270
271
449
Status TabletStream::_run_in_heavy_work_pool(std::function<Status()> fn) {
272
449
    bthread::Mutex mu;
273
449
    std::unique_lock<bthread::Mutex> lock(mu);
274
449
    bthread::ConditionVariable cv;
275
449
    auto st = Status::OK();
276
449
    auto func = [this, &mu, &cv, &st, &fn] {
277
449
        signal::set_signal_task_id(_load_id);
278
449
        st = fn();
279
449
        std::lock_guard<bthread::Mutex> lock(mu);
280
449
        cv.notify_one();
281
449
    };
282
449
    bool ret = _load_stream_mgr->heavy_work_pool()->try_offer(func);
283
449
    if (!ret) {
284
0
        return Status::Error<ErrorCode::INTERNAL_ERROR>(
285
0
                "there is not enough thread resource for close load");
286
0
    }
287
449
    cv.wait(lock);
288
449
    return st;
289
449
}
290
291
308
void TabletStream::wait_for_flush_tasks() {
292
308
    {
293
308
        std::lock_guard lock_guard(_lock);
294
308
        if (_flush_tasks_done) {
295
154
            return;
296
154
        }
297
154
        _flush_tasks_done = true;
298
154
    }
299
300
154
    if (!_status.ok()) {
301
2
        _flush_token->shutdown();
302
2
        return;
303
2
    }
304
305
    // Use heavy_work_pool to avoid blocking bthread
306
152
    auto st = _run_in_heavy_work_pool([this]() {
307
152
        _flush_token->wait();
308
152
        return Status::OK();
309
152
    });
310
152
    if (!st.ok()) {
311
        // If heavy_work_pool is unavailable, fall back to shutdown
312
        // which will cancel pending tasks and wait for running tasks
313
0
        _flush_token->shutdown();
314
0
        _status.update(st);
315
0
    }
316
152
}
317
318
154
void TabletStream::pre_close() {
319
154
    SCOPED_TIMER(_close_wait_timer);
320
154
    wait_for_flush_tasks();
321
322
154
    if (!_status.ok()) {
323
3
        return;
324
3
    }
325
326
151
    DBUG_EXECUTE_IF("TabletStream.close.segment_num_mismatch", { _num_segments++; });
327
151
    if (_check_num_segments && (_next_segid.load() != _num_segments)) {
328
2
        _status.update(Status::Corruption(
329
2
                "segment num mismatch in tablet {}, expected: {}, actual: {}, load_id: {}", _id,
330
2
                _num_segments, _next_segid.load(), print_id(_load_id)));
331
2
        return;
332
2
    }
333
334
149
    _status.update(_run_in_heavy_work_pool([this]() { return _load_stream_writer->pre_close(); }));
335
149
}
336
337
154
Status TabletStream::close() {
338
154
    if (!_status.ok()) {
339
6
        return _status.status();
340
6
    }
341
342
148
    SCOPED_TIMER(_close_wait_timer);
343
148
    _status.update(_run_in_heavy_work_pool([this]() { return _load_stream_writer->close(); }));
344
148
    return _status.status();
345
154
}
346
347
IndexStream::IndexStream(const PUniqueId& load_id, int64_t id, int64_t txn_id,
348
                         std::shared_ptr<OlapTableSchemaParam> schema,
349
                         LoadStreamMgr* load_stream_mgr, RuntimeProfile* profile)
350
48
        : _id(id),
351
48
          _load_id(load_id),
352
48
          _txn_id(txn_id),
353
48
          _schema(schema),
354
48
          _load_stream_mgr(load_stream_mgr) {
355
48
    _profile = profile->create_child(fmt::format("IndexStream {}", id), true, true);
356
48
    _append_data_timer = ADD_TIMER(_profile, "AppendDataTime");
357
48
    _close_wait_timer = ADD_TIMER(_profile, "CloseWaitTime");
358
48
}
359
360
48
IndexStream::~IndexStream() {
361
    // Ensure all TabletStreams have their flush tokens properly handled before destruction.
362
    // In normal flow, close() should have called pre_close() on all tablet streams.
363
    // But if IndexStream is destroyed without close() being called (e.g., on_idle_timeout),
364
    // we need to wait for flush tasks here to ensure flush tokens are properly shut down.
365
154
    for (auto& [_, tablet_stream] : _tablet_streams_map) {
366
154
        tablet_stream->wait_for_flush_tasks();
367
154
    }
368
48
}
369
370
2.27k
Status IndexStream::append_data(const PStreamHeader& header, butil::IOBuf* data) {
371
2.27k
    SCOPED_TIMER(_append_data_timer);
372
2.27k
    int64_t tablet_id = header.tablet_id();
373
2.27k
    TabletStreamSharedPtr tablet_stream;
374
2.27k
    {
375
2.27k
        std::lock_guard lock_guard(_lock);
376
2.27k
        auto it = _tablet_streams_map.find(tablet_id);
377
2.27k
        if (it == _tablet_streams_map.end()) {
378
47
            _init_tablet_stream(tablet_stream, tablet_id, header.partition_id());
379
2.22k
        } else {
380
2.22k
            tablet_stream = it->second;
381
2.22k
        }
382
2.27k
    }
383
384
2.27k
    return tablet_stream->append_data(header, data);
385
2.27k
}
386
387
void IndexStream::_init_tablet_stream(TabletStreamSharedPtr& tablet_stream, int64_t tablet_id,
388
154
                                      int64_t partition_id) {
389
154
    tablet_stream = std::make_shared<TabletStream>(_load_id, tablet_id, _txn_id, _load_stream_mgr,
390
154
                                                   _profile);
391
154
    _tablet_streams_map[tablet_id] = tablet_stream;
392
154
    auto st = tablet_stream->init(_schema, _id, partition_id);
393
154
    if (!st.ok()) {
394
1
        LOG(WARNING) << "tablet stream init failed " << *tablet_stream;
395
1
    }
396
154
}
397
398
34
void IndexStream::get_all_write_tablet_ids(std::vector<int64_t>* tablet_ids) {
399
34
    std::lock_guard lock_guard(_lock);
400
78
    for (const auto& [tablet_id, _] : _tablet_streams_map) {
401
78
        tablet_ids->push_back(tablet_id);
402
78
    }
403
34
}
404
405
void IndexStream::close(const std::vector<PTabletID>& tablets_to_commit,
406
48
                        std::vector<int64_t>* success_tablet_ids, FailedTablets* failed_tablets) {
407
48
    std::lock_guard lock_guard(_lock);
408
48
    SCOPED_TIMER(_close_wait_timer);
409
    // open all need commit tablets
410
174
    for (const auto& tablet : tablets_to_commit) {
411
174
        if (_id != tablet.index_id()) {
412
18
            continue;
413
18
        }
414
156
        TabletStreamSharedPtr tablet_stream;
415
156
        auto it = _tablet_streams_map.find(tablet.tablet_id());
416
156
        if (it == _tablet_streams_map.end()) {
417
107
            _init_tablet_stream(tablet_stream, tablet.tablet_id(), tablet.partition_id());
418
107
        } else {
419
49
            tablet_stream = it->second;
420
49
        }
421
156
        if (tablet.has_num_segments()) {
422
156
            tablet_stream->add_num_segments(tablet.num_segments());
423
156
        } else {
424
            // for compatibility reasons (sink from old version BE)
425
0
            tablet_stream->disable_num_segments_check();
426
0
        }
427
156
    }
428
429
154
    for (auto& [_, tablet_stream] : _tablet_streams_map) {
430
154
        tablet_stream->pre_close();
431
154
    }
432
433
154
    for (auto& [_, tablet_stream] : _tablet_streams_map) {
434
154
        auto st = tablet_stream->close();
435
154
        if (st.ok()) {
436
148
            success_tablet_ids->push_back(tablet_stream->id());
437
148
        } else {
438
6
            LOG(INFO) << "close tablet stream " << *tablet_stream << ", status=" << st;
439
6
            failed_tablets->emplace_back(tablet_stream->id(), st);
440
6
        }
441
154
    }
442
48
}
443
444
// TODO: Profile is temporary disabled, because:
445
// 1. It's not being processed by the upstream for now
446
// 2. There are some problems in _profile->to_thrift()
447
LoadStream::LoadStream(const PUniqueId& load_id, LoadStreamMgr* load_stream_mgr,
448
                       bool enable_profile)
449
20
        : _load_id(load_id), _enable_profile(false), _load_stream_mgr(load_stream_mgr) {
450
20
    g_load_stream_cnt << 1;
451
20
    _profile = std::make_unique<RuntimeProfile>("LoadStream");
452
20
    _append_data_timer = ADD_TIMER(_profile, "AppendDataTime");
453
20
    _close_wait_timer = ADD_TIMER(_profile, "CloseWaitTime");
454
20
    TUniqueId load_tid = ((UniqueId)load_id).to_thrift();
455
20
#ifndef BE_TEST
456
20
    std::shared_ptr<QueryContext> query_context =
457
20
            ExecEnv::GetInstance()->fragment_mgr()->get_query_ctx(load_tid);
458
20
    if (query_context != nullptr) {
459
20
        _resource_ctx = query_context->resource_ctx();
460
20
    } else {
461
0
        _resource_ctx = ResourceContext::create_shared();
462
0
        _resource_ctx->task_controller()->set_task_id(load_tid);
463
0
        std::shared_ptr<MemTrackerLimiter> mem_tracker = MemTrackerLimiter::create_shared(
464
0
                MemTrackerLimiter::Type::LOAD,
465
0
                fmt::format("(FromLoadStream)Load#Id={}", ((UniqueId)load_id).to_string()));
466
0
        _resource_ctx->memory_context()->set_mem_tracker(mem_tracker);
467
0
    }
468
#else
469
    _resource_ctx = ResourceContext::create_shared();
470
    _resource_ctx->task_controller()->set_task_id(load_tid);
471
    std::shared_ptr<MemTrackerLimiter> mem_tracker = MemTrackerLimiter::create_shared(
472
            MemTrackerLimiter::Type::LOAD,
473
            fmt::format("(FromLoadStream)Load#Id={}", ((UniqueId)load_id).to_string()));
474
    _resource_ctx->memory_context()->set_mem_tracker(mem_tracker);
475
#endif
476
20
}
477
478
34
LoadStream::~LoadStream() {
479
34
    g_load_stream_cnt << -1;
480
34
    LOG(INFO) << "load stream is deconstructed " << *this;
481
34
}
482
483
34
Status LoadStream::init(const POpenLoadStreamRequest* request) {
484
34
    _txn_id = request->txn_id();
485
34
    _total_streams = static_cast<int32_t>(request->total_streams());
486
34
    _is_incremental = (_total_streams == 0);
487
488
34
    _schema = std::make_shared<OlapTableSchemaParam>();
489
34
    RETURN_IF_ERROR(_schema->init(request->schema()));
490
48
    for (auto& index : request->schema().indexes()) {
491
48
        _index_streams_map[index.id()] = std::make_shared<IndexStream>(
492
48
                _load_id, index.id(), _txn_id, _schema, _load_stream_mgr, _profile.get());
493
48
    }
494
34
    LOG(INFO) << "succeed to init load stream " << *this;
495
34
    return Status::OK();
496
34
}
497
498
bool LoadStream::close(int64_t src_id, const std::vector<PTabletID>& tablets_to_commit,
499
56
                       std::vector<int64_t>* success_tablet_ids, FailedTablets* failed_tablets) {
500
56
    std::lock_guard<bthread::Mutex> lock_guard(_lock);
501
56
    SCOPED_TIMER(_close_wait_timer);
502
503
    // we do nothing until recv CLOSE_LOAD from all stream to ensure all data are handled before ack
504
56
    _open_streams[src_id]--;
505
56
    if (_open_streams[src_id] == 0) {
506
36
        _open_streams.erase(src_id);
507
36
    }
508
56
    _close_load_cnt++;
509
56
    LOG(INFO) << "received CLOSE_LOAD from sender " << src_id << ", remaining "
510
56
              << _total_streams - _close_load_cnt << " senders, " << *this;
511
512
56
    _tablets_to_commit.insert(_tablets_to_commit.end(), tablets_to_commit.begin(),
513
56
                              tablets_to_commit.end());
514
515
56
    if (_close_load_cnt < _total_streams) {
516
        // do not return commit info if there is remaining streams.
517
22
        return false;
518
22
    }
519
520
48
    for (auto& [_, index_stream] : _index_streams_map) {
521
48
        index_stream->close(_tablets_to_commit, success_tablet_ids, failed_tablets);
522
48
    }
523
34
    LOG(INFO) << "close load " << *this << ", success_tablet_num=" << success_tablet_ids->size()
524
34
              << ", failed_tablet_num=" << failed_tablets->size();
525
34
    return true;
526
56
}
527
528
void LoadStream::_report_result(StreamId stream, const Status& status,
529
                                const std::vector<int64_t>& success_tablet_ids,
530
76
                                const FailedTablets& failed_tablets, bool eos) {
531
76
    LOG(INFO) << "report result " << *this << ", success tablet num " << success_tablet_ids.size()
532
76
              << ", failed tablet num " << failed_tablets.size();
533
76
    butil::IOBuf buf;
534
76
    PLoadStreamResponse response;
535
76
    response.set_eos(eos);
536
76
    status.to_protobuf(response.mutable_status());
537
148
    for (auto& id : success_tablet_ids) {
538
148
        response.add_success_tablet_ids(id);
539
148
    }
540
76
    for (auto& [id, st] : failed_tablets) {
541
10
        auto pb = response.add_failed_tablets();
542
10
        pb->set_id(id);
543
10
        st.to_protobuf(pb->mutable_status());
544
10
    }
545
546
76
    if (_enable_profile && _close_load_cnt == _total_streams) {
547
0
        TRuntimeProfileTree tprofile;
548
0
        ThriftSerializer ser(false, 4096);
549
0
        uint8_t* profile_buf = nullptr;
550
0
        uint32_t len = 0;
551
0
        std::unique_lock<bthread::Mutex> l(_lock);
552
553
0
        _profile->to_thrift(&tprofile);
554
0
        auto st = ser.serialize(&tprofile, &len, &profile_buf);
555
0
        if (st.ok()) {
556
0
            response.set_load_stream_profile(profile_buf, len);
557
0
        } else {
558
0
            LOG(WARNING) << "TRuntimeProfileTree serialize failed, errmsg=" << st << ", " << *this;
559
0
        }
560
0
    }
561
562
76
    buf.append(response.SerializeAsString());
563
76
    auto wst = _write_stream(stream, buf);
564
76
    if (!wst.ok()) {
565
0
        LOG(WARNING) << " report result failed with " << wst << ", " << *this;
566
0
    }
567
76
}
568
569
0
void LoadStream::_report_schema(StreamId stream, const PStreamHeader& hdr) {
570
0
    butil::IOBuf buf;
571
0
    PLoadStreamResponse response;
572
0
    Status st = Status::OK();
573
0
    for (const auto& req : hdr.tablets()) {
574
0
        BaseTabletSPtr tablet;
575
0
        if (auto res = ExecEnv::get_tablet(req.tablet_id()); res.has_value()) {
576
0
            tablet = std::move(res).value();
577
0
        } else {
578
0
            st = std::move(res).error();
579
0
            break;
580
0
        }
581
0
        auto* resp = response.add_tablet_schemas();
582
0
        resp->set_index_id(req.index_id());
583
0
        resp->set_enable_unique_key_merge_on_write(tablet->enable_unique_key_merge_on_write());
584
0
        tablet->tablet_schema()->to_schema_pb(resp->mutable_tablet_schema());
585
0
    }
586
0
    st.to_protobuf(response.mutable_status());
587
588
0
    buf.append(response.SerializeAsString());
589
0
    auto wst = _write_stream(stream, buf);
590
0
    if (!wst.ok()) {
591
0
        LOG(WARNING) << " report result failed with " << wst << ", " << *this;
592
0
    }
593
0
}
594
595
34
void LoadStream::_report_tablet_load_info(StreamId stream, int64_t index_id) {
596
34
    std::vector<int64_t> write_tablet_ids;
597
34
    auto it = _index_streams_map.find(index_id);
598
34
    if (it != _index_streams_map.end()) {
599
34
        it->second->get_all_write_tablet_ids(&write_tablet_ids);
600
34
    }
601
602
34
    if (!write_tablet_ids.empty()) {
603
34
        butil::IOBuf buf;
604
34
        PLoadStreamResponse response;
605
34
        auto* tablet_load_infos = response.mutable_tablet_load_rowset_num_infos();
606
34
        _collect_tablet_load_info_from_tablets(write_tablet_ids, tablet_load_infos);
607
34
        if (tablet_load_infos->empty()) {
608
34
            return;
609
34
        }
610
0
        buf.append(response.SerializeAsString());
611
0
        auto wst = _write_stream(stream, buf);
612
0
        if (!wst.ok()) {
613
0
            LOG(WARNING) << "report tablet load info failed with " << wst << ", " << *this;
614
0
        }
615
0
    }
616
34
}
617
618
void LoadStream::_collect_tablet_load_info_from_tablets(
619
        const std::vector<int64_t>& tablet_ids,
620
34
        google::protobuf::RepeatedPtrField<PTabletLoadRowsetInfo>* tablet_load_infos) {
621
78
    for (auto tablet_id : tablet_ids) {
622
78
        BaseTabletSPtr tablet;
623
78
        if (auto res = ExecEnv::get_tablet(tablet_id); res.has_value()) {
624
78
            tablet = std::move(res).value();
625
78
        } else {
626
0
            continue;
627
0
        }
628
78
        BaseDeltaWriter::collect_tablet_load_rowset_num_info(tablet.get(), tablet_load_infos);
629
78
    }
630
34
}
631
632
76
Status LoadStream::_write_stream(StreamId stream, butil::IOBuf& buf) {
633
76
    for (;;) {
634
76
        int ret = 0;
635
76
        DBUG_EXECUTE_IF("LoadStream._write_stream.EAGAIN", { ret = EAGAIN; });
636
76
        if (ret == 0) {
637
76
            ret = brpc::StreamWrite(stream, buf);
638
76
        }
639
76
        switch (ret) {
640
76
        case 0:
641
76
            return Status::OK();
642
0
        case EAGAIN: {
643
0
            const timespec time = butil::seconds_from_now(config::load_stream_eagain_wait_seconds);
644
0
            int wait_ret = brpc::StreamWait(stream, &time);
645
0
            if (wait_ret != 0) {
646
0
                return Status::InternalError("StreamWait failed, err={}", wait_ret);
647
0
            }
648
0
            break;
649
0
        }
650
0
        default:
651
0
            return Status::InternalError("StreamWrite failed, err={}", ret);
652
76
        }
653
76
    }
654
0
    return Status::OK();
655
76
}
656
657
2.34k
void LoadStream::_parse_header(butil::IOBuf* const message, PStreamHeader& hdr) {
658
2.34k
    butil::IOBufAsZeroCopyInputStream wrapper(*message);
659
2.34k
    hdr.ParseFromZeroCopyStream(&wrapper);
660
18.4E
    VLOG_DEBUG << "header parse result: " << hdr.DebugString();
661
2.34k
}
662
663
2.27k
Status LoadStream::_append_data(const PStreamHeader& header, butil::IOBuf* data) {
664
2.27k
    SCOPED_TIMER(_append_data_timer);
665
2.27k
    IndexStreamSharedPtr index_stream;
666
667
2.27k
    int64_t index_id = header.index_id();
668
2.27k
    DBUG_EXECUTE_IF("TabletStream._append_data.unknown_indexid",
669
2.27k
                    { index_id = UNKNOWN_ID_FOR_TEST; });
670
2.27k
    auto it = _index_streams_map.find(index_id);
671
2.27k
    if (it == _index_streams_map.end()) {
672
1
        return Status::Error<ErrorCode::INVALID_ARGUMENT>("unknown index_id {}", index_id);
673
2.27k
    } else {
674
2.27k
        index_stream = it->second;
675
2.27k
    }
676
677
2.27k
    return index_stream->append_data(header, data);
678
2.27k
}
679
680
89
int LoadStream::on_received_messages(StreamId id, butil::IOBuf* const messages[], size_t size) {
681
89
    VLOG_DEBUG << "on_received_messages " << id << " " << size;
682
191
    for (size_t i = 0; i < size; ++i) {
683
2.44k
        while (messages[i]->size() > 0) {
684
            // step 1: parse header
685
2.34k
            size_t hdr_len = 0;
686
2.34k
            messages[i]->cutn((void*)&hdr_len, sizeof(size_t));
687
2.34k
            butil::IOBuf hdr_buf;
688
2.34k
            PStreamHeader hdr;
689
2.34k
            messages[i]->cutn(&hdr_buf, hdr_len);
690
2.34k
            _parse_header(&hdr_buf, hdr);
691
692
            // step 2: cut data
693
2.34k
            size_t data_len = 0;
694
2.34k
            messages[i]->cutn((void*)&data_len, sizeof(size_t));
695
2.34k
            butil::IOBuf data_buf;
696
2.34k
            PStreamHeader data;
697
2.34k
            messages[i]->cutn(&data_buf, data_len);
698
699
            // step 3: dispatch
700
2.34k
            _dispatch(id, hdr, &data_buf);
701
2.34k
        }
702
102
    }
703
89
    return 0;
704
89
}
705
706
2.34k
void LoadStream::_dispatch(StreamId id, const PStreamHeader& hdr, butil::IOBuf* data) {
707
2.34k
    VLOG_DEBUG << PStreamHeader_Opcode_Name(hdr.opcode()) << " from " << hdr.src_id()
708
0
               << " with tablet " << hdr.tablet_id();
709
2.34k
    SCOPED_ATTACH_TASK(_resource_ctx);
710
    // CLOSE_LOAD message should not be fault injected,
711
    // otherwise the message will be ignored and causing close wait timeout
712
2.34k
    if (hdr.opcode() != PStreamHeader::CLOSE_LOAD) {
713
2.27k
        DBUG_EXECUTE_IF("LoadStream._dispatch.unknown_loadid", {
714
2.27k
            PStreamHeader& t_hdr = const_cast<PStreamHeader&>(hdr);
715
2.27k
            PUniqueId* load_id = t_hdr.mutable_load_id();
716
2.27k
            load_id->set_hi(UNKNOWN_ID_FOR_TEST);
717
2.27k
            load_id->set_lo(UNKNOWN_ID_FOR_TEST);
718
2.27k
        });
719
2.27k
        DBUG_EXECUTE_IF("LoadStream._dispatch.unknown_srcid", {
720
2.27k
            PStreamHeader& t_hdr = const_cast<PStreamHeader&>(hdr);
721
2.27k
            t_hdr.set_src_id(UNKNOWN_ID_FOR_TEST);
722
2.27k
        });
723
2.27k
    }
724
2.34k
    if (UniqueId(hdr.load_id()) != UniqueId(_load_id)) {
725
1
        Status st = Status::Error<ErrorCode::INVALID_ARGUMENT>(
726
1
                "invalid load id {}, expected {}", print_id(hdr.load_id()), print_id(_load_id));
727
1
        _report_failure(id, st, hdr);
728
1
        return;
729
1
    }
730
731
2.34k
    {
732
2.34k
        std::lock_guard lock_guard(_lock);
733
2.34k
        if (!_open_streams.contains(hdr.src_id())) {
734
17
            Status st = Status::Error<ErrorCode::INVALID_ARGUMENT>("no open stream from source {}",
735
17
                                                                   hdr.src_id());
736
17
            _report_failure(id, st, hdr);
737
17
            return;
738
17
        }
739
2.34k
    }
740
741
2.32k
    switch (hdr.opcode()) {
742
34
    case PStreamHeader::ADD_SEGMENT: {
743
34
        auto st = _append_data(hdr, data);
744
34
        if (!st.ok()) {
745
0
            _report_failure(id, st, hdr);
746
34
        } else {
747
            // Report tablet load info only on ADD_SEGMENT to reduce frequency.
748
            // ADD_SEGMENT is sent once per segment, while APPEND_DATA is sent
749
            // for every data batch. This reduces unnecessary writes and avoids
750
            // potential stream write failures when the sender is closing.
751
34
            _report_tablet_load_info(id, hdr.index_id());
752
34
        }
753
34
    } break;
754
2.23k
    case PStreamHeader::APPEND_DATA: {
755
2.23k
        auto st = _append_data(hdr, data);
756
2.23k
        if (!st.ok()) {
757
2
            _report_failure(id, st, hdr);
758
2
        }
759
2.23k
    } break;
760
56
    case PStreamHeader::CLOSE_LOAD: {
761
56
        DBUG_EXECUTE_IF("LoadStream.close_load.block", DBUG_BLOCK);
762
56
        std::vector<int64_t> success_tablet_ids;
763
56
        FailedTablets failed_tablets;
764
56
        std::vector<PTabletID> tablets_to_commit(hdr.tablets().begin(), hdr.tablets().end());
765
56
        bool all_closed =
766
56
                close(hdr.src_id(), tablets_to_commit, &success_tablet_ids, &failed_tablets);
767
56
        _report_result(id, Status::OK(), success_tablet_ids, failed_tablets, true);
768
56
        std::lock_guard<bthread::Mutex> lock_guard(_lock);
769
        // if incremental stream, we need to wait for all non-incremental streams to be closed
770
        // before closing incremental streams. We need a fencing mechanism to avoid use after closing
771
        // across different be.
772
56
        if (hdr.has_num_incremental_streams() && hdr.num_incremental_streams() > 0) {
773
0
            _closing_stream_ids.push_back(id);
774
56
        } else {
775
56
            brpc::StreamClose(id);
776
56
        }
777
778
56
        if (all_closed) {
779
34
            for (auto& closing_id : _closing_stream_ids) {
780
0
                brpc::StreamClose(closing_id);
781
0
            }
782
34
            _closing_stream_ids.clear();
783
34
        }
784
56
    } break;
785
0
    case PStreamHeader::GET_SCHEMA: {
786
0
        _report_schema(id, hdr);
787
0
    } break;
788
0
    default:
789
0
        LOG(WARNING) << "unexpected stream message " << hdr.opcode() << ", " << *this;
790
0
        DCHECK(false);
791
2.32k
    }
792
2.32k
}
793
794
0
void LoadStream::on_idle_timeout(StreamId id) {
795
0
    LOG(WARNING) << "closing load stream on idle timeout, " << *this;
796
0
    brpc::StreamClose(id);
797
0
}
798
799
56
void LoadStream::on_closed(StreamId id) {
800
    // `this` may be freed by other threads after increasing `_close_rpc_cnt`,
801
    // format string first to prevent use-after-free
802
56
    std::stringstream ss;
803
56
    ss << *this;
804
56
    auto remaining_streams = _total_streams - _close_rpc_cnt.fetch_add(1) - 1;
805
56
    LOG(INFO) << "stream " << id << " on_closed, remaining streams = " << remaining_streams << ", "
806
56
              << ss.str();
807
56
    if (remaining_streams == 0) {
808
34
        _load_stream_mgr->clear_load(_load_id);
809
34
    }
810
56
}
811
812
290
inline std::ostream& operator<<(std::ostream& ostr, const LoadStream& load_stream) {
813
290
    ostr << "load_id=" << print_id(load_stream._load_id) << ", txn_id=" << load_stream._txn_id;
814
290
    return ostr;
815
290
}
816
817
} // namespace doris