Coverage Report

Created: 2026-05-08 18:22

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/service/internal_service.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "service/internal_service.h"
19
20
#include <assert.h>
21
#include <brpc/closure_guard.h>
22
#include <brpc/controller.h>
23
#include <bthread/bthread.h>
24
#include <bthread/types.h>
25
#include <butil/errno.h>
26
#include <butil/iobuf.h>
27
#include <fcntl.h>
28
#include <fmt/core.h>
29
#include <gen_cpp/DataSinks_types.h>
30
#include <gen_cpp/MasterService_types.h>
31
#include <gen_cpp/PaloInternalService_types.h>
32
#include <gen_cpp/PlanNodes_types.h>
33
#include <gen_cpp/Status_types.h>
34
#include <gen_cpp/Types_types.h>
35
#include <gen_cpp/internal_service.pb.h>
36
#include <gen_cpp/olap_file.pb.h>
37
#include <gen_cpp/segment_v2.pb.h>
38
#include <gen_cpp/types.pb.h>
39
#include <google/protobuf/stubs/callback.h>
40
#include <stddef.h>
41
#include <stdint.h>
42
#include <sys/stat.h>
43
44
#include <algorithm>
45
#include <exception>
46
#include <filesystem>
47
#include <memory>
48
#include <set>
49
#include <sstream>
50
#include <string>
51
#include <utility>
52
#include <vector>
53
54
#include "cloud/cloud_storage_engine.h"
55
#include "cloud/cloud_tablet_mgr.h"
56
#include "cloud/config.h"
57
#include "common/config.h"
58
#include "common/exception.h"
59
#include "common/logging.h"
60
#include "common/metrics/doris_metrics.h"
61
#include "common/metrics/metrics.h"
62
#include "common/signal_handler.h"
63
#include "common/status.h"
64
#include "core/block/block.h"
65
#include "core/data_type/data_type.h"
66
#include "exec/common/variant_util.h"
67
#include "exec/exchange/vdata_stream_mgr.h"
68
#include "exec/rowid_fetcher.h"
69
#include "exec/sink/writer/varrow_flight_result_writer.h"
70
#include "exec/sink/writer/vmysql_result_writer.h"
71
#include "exprs/function/dictionary_factory.h"
72
#include "format/arrow/arrow_row_batch.h"
73
#include "format/csv/csv_reader.h"
74
#include "format/generic_reader.h"
75
#include "format/jni/jni_reader.h"
76
#include "format/json/new_json_reader.h"
77
#include "format/native/native_reader.h"
78
#include "format/orc/vorc_reader.h"
79
#include "format/parquet/vparquet_reader.h"
80
#include "format/text/text_reader.h"
81
#ifdef BUILD_RUST_READERS
82
#include "format/lance/lance_rust_reader.h"
83
#endif
84
#include "io/fs/local_file_system.h"
85
#include "io/fs/stream_load_pipe.h"
86
#include "io/io_common.h"
87
#include "load/channel/load_channel_mgr.h"
88
#include "load/channel/load_stream_mgr.h"
89
#include "load/delta_writer/delta_writer.h"
90
#include "load/group_commit/wal/wal_manager.h"
91
#include "load/routine_load/routine_load_task_executor.h"
92
#include "load/stream_load/new_load_stream_mgr.h"
93
#include "load/stream_load/stream_load_context.h"
94
#include "runtime/cache/result_cache.h"
95
#include "runtime/cdc_client_mgr.h"
96
#include "runtime/descriptors.h"
97
#include "runtime/exec_env.h"
98
#include "runtime/fold_constant_executor.h"
99
#include "runtime/fragment_mgr.h"
100
#include "runtime/result_block_buffer.h"
101
#include "runtime/result_buffer_mgr.h"
102
#include "runtime/runtime_profile.h"
103
#include "runtime/thread_context.h"
104
#include "runtime/workload_group/workload_group.h"
105
#include "runtime/workload_group/workload_group_manager.h"
106
#include "service/backend_options.h"
107
#include "service/http/http_client.h"
108
#include "service/point_query_executor.h"
109
#include "storage/data_dir.h"
110
#include "storage/index/inverted/inverted_index_desc.h"
111
#include "storage/olap_common.h"
112
#include "storage/olap_define.h"
113
#include "storage/rowset/beta_rowset.h"
114
#include "storage/rowset/rowset.h"
115
#include "storage/rowset/rowset_factory.h"
116
#include "storage/rowset/rowset_meta.h"
117
#include "storage/segment/column_reader.h"
118
#include "storage/storage_engine.h"
119
#include "storage/tablet/tablet_fwd.h"
120
#include "storage/tablet/tablet_manager.h"
121
#include "storage/tablet/tablet_schema.h"
122
#include "storage/txn/txn_manager.h"
123
#include "util/async_io.h"
124
#include "util/brpc_client_cache.h"
125
#include "util/brpc_closure.h"
126
#include "util/jdbc_utils.h"
127
#include "util/jsonb/serialize.h"
128
#include "util/md5.h"
129
#include "util/network_util.h"
130
#include "util/proto_util.h"
131
#include "util/stopwatch.hpp"
132
#include "util/string_util.h"
133
#include "util/thrift_util.h"
134
#include "util/time.h"
135
#include "util/uid_util.h"
136
137
namespace google {
138
namespace protobuf {
139
class RpcController;
140
} // namespace protobuf
141
} // namespace google
142
143
namespace doris {
144
#include "common/compile_check_avoid_begin.h"
145
using namespace ErrorCode;
146
147
const uint32_t DOWNLOAD_FILE_MAX_RETRY = 3;
148
149
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_pool_queue_size, MetricUnit::NOUNIT);
150
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_pool_queue_size, MetricUnit::NOUNIT);
151
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_active_threads, MetricUnit::NOUNIT);
152
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_active_threads, MetricUnit::NOUNIT);
153
154
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_pool_max_queue_size, MetricUnit::NOUNIT);
155
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_pool_max_queue_size, MetricUnit::NOUNIT);
156
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_max_threads, MetricUnit::NOUNIT);
157
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_max_threads, MetricUnit::NOUNIT);
158
159
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_pool_queue_size, MetricUnit::NOUNIT);
160
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_active_threads, MetricUnit::NOUNIT);
161
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_pool_max_queue_size, MetricUnit::NOUNIT);
162
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_max_threads, MetricUnit::NOUNIT);
163
164
static bvar::LatencyRecorder g_process_remote_fetch_rowsets_latency("process_remote_fetch_rowsets");
165
166
bthread_key_t btls_key;
167
168
2.75M
static void thread_context_deleter(void* d) {
169
2.75M
    delete static_cast<ThreadContext*>(d);
170
2.75M
}
171
172
template <typename T>
173
concept CanCancel = requires(T* response) { response->mutable_status(); };
174
175
template <typename T>
176
0
void offer_failed(T* response, google::protobuf::Closure* done, const FifoThreadPool& pool) {
177
0
    brpc::ClosureGuard closure_guard(done);
178
0
    LOG(WARNING) << "fail to offer request to the work pool, pool=" << pool.get_info();
179
0
}
Unexecuted instantiation: _ZN5doris12offer_failedINS_25PTabletWriterCancelResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedINS_14PCacheResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedINS_17PFetchCacheResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
180
181
template <CanCancel T>
182
0
void offer_failed(T* response, google::protobuf::Closure* done, const FifoThreadPool& pool) {
183
0
    brpc::ClosureGuard closure_guard(done);
184
    // Should use status to generate protobuf message, because it will encoding Backend Info
185
    // into the error message and then we could know which backend's pool is full.
186
0
    Status st = Status::Error<TStatusCode::CANCELLED>(
187
0
            "fail to offer request to the work pool, pool={}", pool.get_info());
188
0
    st.to_protobuf(response->mutable_status());
189
0
    LOG(WARNING) << "cancelled due to fail to offer request to the work pool, pool="
190
0
                 << pool.get_info();
191
0
}
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PTabletWriterOpenResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PExecPlanFragmentResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23POpenLoadStreamResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_27PTabletWriterAddBlockResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_25PCancelPlanFragmentResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_21PFetchArrowDataResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_26POutfileWriteSuccessResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PFetchTableSchemaResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_29PFetchArrowFlightSchemaResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_24PTabletKeyLookupResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_25PJdbcTestConnectionResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_20PFetchColIdsResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_26PFetchRemoteSchemaResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_12PProxyResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_20PMergeFilterResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PSendFilterSizeResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PSyncFilterSizeResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_22PPublishFilterResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_15PSendDataResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_13PCommitResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_15PRollbackResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_19PConstantExprResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_26PTransmitRecCTEBlockResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_20PRerunFragmentResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_20PResetGlobalRfResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_19PTransmitDataResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_24PCheckRPCChannelResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_24PResetRPCChannelResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PTabletWriteSlaveResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_27PTabletWriteSlaveDoneResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_17PMultiGetResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_13PGlobResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_26PGroupCommitInsertResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_24PGetWalQueueSizeResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_22PGetBeResourceResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PRequestCdcClientResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
192
193
template <typename T>
194
class NewHttpClosure : public ::google::protobuf::Closure {
195
public:
196
    NewHttpClosure(google::protobuf::Closure* done) : _done(done) {}
197
0
    NewHttpClosure(T* request, google::protobuf::Closure* done) : _request(request), _done(done) {}
Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_28PTabletWriterAddBlockRequestEEC2EPS1_PN6google8protobuf7ClosureE
Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_19PTransmitDataParamsEEC2EPS1_PN6google8protobuf7ClosureE
198
199
0
    void Run() override {
200
0
        if (_request != nullptr) {
201
0
            delete _request;
202
0
            _request = nullptr;
203
0
        }
204
0
        if (_done != nullptr) {
205
0
            _done->Run();
206
0
        }
207
0
        delete this;
208
0
    }
Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_28PTabletWriterAddBlockRequestEE3RunEv
Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_19PTransmitDataParamsEE3RunEv
209
210
private:
211
    T* _request = nullptr;
212
    google::protobuf::Closure* _done = nullptr;
213
};
214
215
PInternalService::PInternalService(ExecEnv* exec_env)
216
7
        : _exec_env(exec_env),
217
          // heavy threadpool is used for load process and other process that will read disk or access network.
218
7
          _heavy_work_pool(config::brpc_heavy_work_pool_threads != -1
219
7
                                   ? config::brpc_heavy_work_pool_threads
220
7
                                   : std::max(128, CpuInfo::num_cores() * 4),
221
7
                           config::brpc_heavy_work_pool_max_queue_size != -1
222
7
                                   ? config::brpc_heavy_work_pool_max_queue_size
223
7
                                   : std::max(10240, CpuInfo::num_cores() * 320),
224
7
                           "brpc_heavy"),
225
226
          // light threadpool should be only used in query processing logic. All hanlers should be very light, not locked, not access disk.
227
7
          _light_work_pool(config::brpc_light_work_pool_threads != -1
228
7
                                   ? config::brpc_light_work_pool_threads
229
7
                                   : std::max(128, CpuInfo::num_cores() * 4),
230
7
                           config::brpc_light_work_pool_max_queue_size != -1
231
7
                                   ? config::brpc_light_work_pool_max_queue_size
232
7
                                   : std::max(10240, CpuInfo::num_cores() * 320),
233
7
                           "brpc_light"),
234
7
          _arrow_flight_work_pool(config::brpc_arrow_flight_work_pool_threads != -1
235
7
                                          ? config::brpc_arrow_flight_work_pool_threads
236
7
                                          : std::max(512, CpuInfo::num_cores() * 2),
237
7
                                  config::brpc_arrow_flight_work_pool_max_queue_size != -1
238
7
                                          ? config::brpc_arrow_flight_work_pool_max_queue_size
239
7
                                          : std::max(20480, CpuInfo::num_cores() * 640),
240
7
                                  "brpc_arrow_flight") {
241
7
    REGISTER_HOOK_METRIC(heavy_work_pool_queue_size,
242
7
                         [this]() { return _heavy_work_pool.get_queue_size(); });
243
7
    REGISTER_HOOK_METRIC(light_work_pool_queue_size,
244
7
                         [this]() { return _light_work_pool.get_queue_size(); });
245
7
    REGISTER_HOOK_METRIC(heavy_work_active_threads,
246
7
                         [this]() { return _heavy_work_pool.get_active_threads(); });
247
7
    REGISTER_HOOK_METRIC(light_work_active_threads,
248
7
                         [this]() { return _light_work_pool.get_active_threads(); });
249
250
7
    REGISTER_HOOK_METRIC(heavy_work_pool_max_queue_size,
251
7
                         []() { return config::brpc_heavy_work_pool_max_queue_size; });
252
7
    REGISTER_HOOK_METRIC(light_work_pool_max_queue_size,
253
7
                         []() { return config::brpc_light_work_pool_max_queue_size; });
254
7
    REGISTER_HOOK_METRIC(heavy_work_max_threads,
255
7
                         []() { return config::brpc_heavy_work_pool_threads; });
256
7
    REGISTER_HOOK_METRIC(light_work_max_threads,
257
7
                         []() { return config::brpc_light_work_pool_threads; });
258
259
7
    REGISTER_HOOK_METRIC(arrow_flight_work_pool_queue_size,
260
7
                         [this]() { return _arrow_flight_work_pool.get_queue_size(); });
261
7
    REGISTER_HOOK_METRIC(arrow_flight_work_active_threads,
262
7
                         [this]() { return _arrow_flight_work_pool.get_active_threads(); });
263
7
    REGISTER_HOOK_METRIC(arrow_flight_work_pool_max_queue_size,
264
7
                         []() { return config::brpc_arrow_flight_work_pool_max_queue_size; });
265
7
    REGISTER_HOOK_METRIC(arrow_flight_work_max_threads,
266
7
                         []() { return config::brpc_arrow_flight_work_pool_threads; });
267
268
7
    _exec_env->load_stream_mgr()->set_heavy_work_pool(&_heavy_work_pool);
269
270
7
    CHECK_EQ(0, bthread_key_create(&btls_key, thread_context_deleter));
271
7
    CHECK_EQ(0, bthread_key_create(&AsyncIO::btls_io_ctx_key, AsyncIO::io_ctx_key_deleter));
272
7
}
273
274
PInternalServiceImpl::PInternalServiceImpl(StorageEngine& engine, ExecEnv* exec_env)
275
6
        : PInternalService(exec_env), _engine(engine) {}
276
277
3
PInternalServiceImpl::~PInternalServiceImpl() = default;
278
279
3
PInternalService::~PInternalService() {
280
3
    DEREGISTER_HOOK_METRIC(heavy_work_pool_queue_size);
281
3
    DEREGISTER_HOOK_METRIC(light_work_pool_queue_size);
282
3
    DEREGISTER_HOOK_METRIC(heavy_work_active_threads);
283
3
    DEREGISTER_HOOK_METRIC(light_work_active_threads);
284
285
3
    DEREGISTER_HOOK_METRIC(heavy_work_pool_max_queue_size);
286
3
    DEREGISTER_HOOK_METRIC(light_work_pool_max_queue_size);
287
3
    DEREGISTER_HOOK_METRIC(heavy_work_max_threads);
288
3
    DEREGISTER_HOOK_METRIC(light_work_max_threads);
289
290
3
    DEREGISTER_HOOK_METRIC(arrow_flight_work_pool_queue_size);
291
3
    DEREGISTER_HOOK_METRIC(arrow_flight_work_active_threads);
292
3
    DEREGISTER_HOOK_METRIC(arrow_flight_work_pool_max_queue_size);
293
3
    DEREGISTER_HOOK_METRIC(arrow_flight_work_max_threads);
294
295
3
    CHECK_EQ(0, bthread_key_delete(btls_key));
296
3
    CHECK_EQ(0, bthread_key_delete(AsyncIO::btls_io_ctx_key));
297
3
}
298
299
void PInternalService::tablet_writer_open(google::protobuf::RpcController* controller,
300
                                          const PTabletWriterOpenRequest* request,
301
                                          PTabletWriterOpenResult* response,
302
54.9k
                                          google::protobuf::Closure* done) {
303
55.1k
    bool ret = _heavy_work_pool.try_offer([this, request, response, done]() {
304
18.4E
        VLOG_RPC << "tablet writer open, id=" << request->id()
305
18.4E
                 << ", index_id=" << request->index_id() << ", txn_id=" << request->txn_id();
306
55.1k
        signal::SignalTaskIdKeeper keeper(request->id());
307
55.1k
        brpc::ClosureGuard closure_guard(done);
308
55.1k
        auto st = _exec_env->load_channel_mgr()->open(*request);
309
55.1k
        if (!st.ok()) {
310
0
            LOG(WARNING) << "load channel open failed, message=" << st << ", id=" << request->id()
311
0
                         << ", index_id=" << request->index_id()
312
0
                         << ", txn_id=" << request->txn_id();
313
0
        }
314
55.1k
        st.to_protobuf(response->mutable_status());
315
55.1k
    });
316
54.9k
    if (!ret) {
317
0
        offer_failed(response, done, _heavy_work_pool);
318
0
        return;
319
0
    }
320
54.9k
}
321
322
void PInternalService::exec_plan_fragment(google::protobuf::RpcController* controller,
323
                                          const PExecPlanFragmentRequest* request,
324
                                          PExecPlanFragmentResult* response,
325
167k
                                          google::protobuf::Closure* done) {
326
167k
    timeval tv {};
327
167k
    gettimeofday(&tv, nullptr);
328
167k
    response->set_received_time(tv.tv_sec * 1000LL + tv.tv_usec / 1000);
329
167k
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
330
167k
        _exec_plan_fragment_in_pthread(controller, request, response, done);
331
167k
    });
332
167k
    if (!ret) {
333
0
        offer_failed(response, done, _light_work_pool);
334
0
        return;
335
0
    }
336
167k
}
337
338
void PInternalService::_exec_plan_fragment_in_pthread(google::protobuf::RpcController* controller,
339
                                                      const PExecPlanFragmentRequest* request,
340
                                                      PExecPlanFragmentResult* response,
341
285k
                                                      google::protobuf::Closure* done) {
342
285k
    timeval tv1 {};
343
285k
    gettimeofday(&tv1, nullptr);
344
285k
    response->set_execution_time(tv1.tv_sec * 1000LL + tv1.tv_usec / 1000);
345
285k
    brpc::ClosureGuard closure_guard(done);
346
285k
    auto st = Status::OK();
347
18.4E
    bool compact = request->has_compact() ? request->compact() : false;
348
285k
    PFragmentRequestVersion version =
349
18.4E
            request->has_version() ? request->version() : PFragmentRequestVersion::VERSION_1;
350
285k
    try {
351
285k
        st = _exec_plan_fragment_impl(request->request(), version, compact);
352
285k
    } catch (const Exception& e) {
353
0
        st = e.to_status();
354
0
    } catch (const std::exception& e) {
355
0
        st = Status::Error(ErrorCode::INTERNAL_ERROR, e.what());
356
0
    } catch (...) {
357
0
        st = Status::Error(ErrorCode::INTERNAL_ERROR,
358
0
                           "_exec_plan_fragment_impl meet unknown error");
359
0
    }
360
285k
    if (!st.ok()) {
361
1.19k
        LOG(WARNING) << "exec plan fragment failed, errmsg=" << st;
362
1.19k
    }
363
285k
    st.to_protobuf(response->mutable_status());
364
285k
    timeval tv2 {};
365
285k
    gettimeofday(&tv2, nullptr);
366
285k
    response->set_execution_done_time(tv2.tv_sec * 1000LL + tv2.tv_usec / 1000);
367
285k
}
368
369
void PInternalService::exec_plan_fragment_prepare(google::protobuf::RpcController* controller,
370
                                                  const PExecPlanFragmentRequest* request,
371
                                                  PExecPlanFragmentResult* response,
372
117k
                                                  google::protobuf::Closure* done) {
373
117k
    timeval tv {};
374
117k
    gettimeofday(&tv, nullptr);
375
117k
    response->set_received_time(tv.tv_sec * 1000LL + tv.tv_usec / 1000);
376
117k
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
377
117k
        _exec_plan_fragment_in_pthread(controller, request, response, done);
378
117k
    });
379
117k
    if (!ret) {
380
0
        offer_failed(response, done, _light_work_pool);
381
0
        return;
382
0
    }
383
117k
}
384
385
void PInternalService::exec_plan_fragment_start(google::protobuf::RpcController* /*controller*/,
386
                                                const PExecPlanFragmentStartRequest* request,
387
                                                PExecPlanFragmentResult* result,
388
117k
                                                google::protobuf::Closure* done) {
389
117k
    timeval tv {};
390
117k
    gettimeofday(&tv, nullptr);
391
117k
    result->set_received_time(tv.tv_sec * 1000LL + tv.tv_usec / 1000);
392
117k
    bool ret = _light_work_pool.try_offer([this, request, result, done]() {
393
117k
        timeval tv1 {};
394
117k
        gettimeofday(&tv1, nullptr);
395
117k
        result->set_execution_time(tv1.tv_sec * 1000LL + tv1.tv_usec / 1000);
396
117k
        brpc::ClosureGuard closure_guard(done);
397
117k
        auto st = _exec_env->fragment_mgr()->start_query_execution(request);
398
117k
        st.to_protobuf(result->mutable_status());
399
117k
        timeval tv2 {};
400
117k
        gettimeofday(&tv2, nullptr);
401
117k
        result->set_execution_done_time(tv2.tv_sec * 1000LL + tv2.tv_usec / 1000);
402
117k
    });
403
117k
    if (!ret) {
404
0
        offer_failed(result, done, _light_work_pool);
405
0
        return;
406
0
    }
407
117k
}
408
409
void PInternalService::open_load_stream(google::protobuf::RpcController* controller,
410
                                        const POpenLoadStreamRequest* request,
411
                                        POpenLoadStreamResponse* response,
412
4.17k
                                        google::protobuf::Closure* done) {
413
4.17k
    bool ret = _heavy_work_pool.try_offer([this, controller, request, response, done]() {
414
4.17k
        signal::SignalTaskIdKeeper keeper(request->load_id());
415
4.17k
        brpc::ClosureGuard done_guard(done);
416
4.17k
        brpc::Controller* cntl = static_cast<brpc::Controller*>(controller);
417
4.17k
        brpc::StreamOptions stream_options;
418
419
4.17k
        LOG(INFO) << "open load stream, load_id=" << request->load_id()
420
4.17k
                  << ", src_id=" << request->src_id();
421
422
4.17k
        std::vector<BaseTabletSPtr> tablets;
423
4.17k
        for (const auto& req : request->tablets()) {
424
2.14k
            BaseTabletSPtr tablet;
425
2.14k
            if (auto res = ExecEnv::get_tablet(req.tablet_id()); !res.has_value()) [[unlikely]] {
426
0
                auto st = std::move(res).error();
427
0
                st.to_protobuf(response->mutable_status());
428
0
                cntl->SetFailed(st.to_string());
429
0
                return;
430
2.14k
            } else {
431
2.14k
                tablet = std::move(res).value();
432
2.14k
            }
433
2.14k
            auto resp = response->add_tablet_schemas();
434
2.14k
            resp->set_index_id(req.index_id());
435
2.14k
            resp->set_enable_unique_key_merge_on_write(tablet->enable_unique_key_merge_on_write());
436
2.14k
            tablet->tablet_schema()->to_schema_pb(resp->mutable_tablet_schema());
437
2.14k
            tablets.push_back(tablet);
438
2.14k
        }
439
4.17k
        if (!tablets.empty()) {
440
2.14k
            auto* tablet_load_infos = response->mutable_tablet_load_rowset_num_infos();
441
2.14k
            for (const auto& tablet : tablets) {
442
2.14k
                BaseDeltaWriter::collect_tablet_load_rowset_num_info(tablet.get(),
443
2.14k
                                                                     tablet_load_infos);
444
2.14k
            }
445
2.14k
        }
446
447
4.17k
        LoadStream* load_stream = nullptr;
448
4.17k
        auto st = _exec_env->load_stream_mgr()->open_load_stream(request, load_stream);
449
4.17k
        if (!st.ok()) {
450
0
            st.to_protobuf(response->mutable_status());
451
0
            return;
452
0
        }
453
454
4.17k
        stream_options.handler = load_stream;
455
4.17k
        stream_options.idle_timeout_ms = request->idle_timeout_ms();
456
4.17k
        DBUG_EXECUTE_IF("PInternalServiceImpl.open_load_stream.set_idle_timeout",
457
4.17k
                        { stream_options.idle_timeout_ms = 1; });
458
459
4.17k
        StreamId streamid;
460
4.17k
        if (brpc::StreamAccept(&streamid, *cntl, &stream_options) != 0) {
461
0
            st = Status::Cancelled("Fail to accept stream {}", streamid);
462
0
            st.to_protobuf(response->mutable_status());
463
0
            cntl->SetFailed(st.to_string());
464
0
            return;
465
0
        }
466
467
4.17k
        VLOG_DEBUG << "get streamid =" << streamid;
468
4.17k
        st.to_protobuf(response->mutable_status());
469
4.17k
    });
470
4.17k
    if (!ret) {
471
0
        offer_failed(response, done, _heavy_work_pool);
472
0
    }
473
4.17k
}
474
475
void PInternalService::tablet_writer_add_block_by_http(google::protobuf::RpcController* controller,
476
                                                       const ::doris::PEmptyRequest* request,
477
                                                       PTabletWriterAddBlockResult* response,
478
0
                                                       google::protobuf::Closure* done) {
479
0
    PTabletWriterAddBlockRequest* new_request = new PTabletWriterAddBlockRequest();
480
0
    google::protobuf::Closure* new_done =
481
0
            new NewHttpClosure<PTabletWriterAddBlockRequest>(new_request, done);
482
0
    brpc::Controller* cntl = static_cast<brpc::Controller*>(controller);
483
0
    Status st = attachment_extract_request_contain_block<PTabletWriterAddBlockRequest>(new_request,
484
0
                                                                                       cntl);
485
0
    if (st.ok()) {
486
0
        tablet_writer_add_block(controller, new_request, response, new_done);
487
0
    } else {
488
0
        st.to_protobuf(response->mutable_status());
489
0
    }
490
0
}
491
492
void PInternalService::tablet_writer_add_block(google::protobuf::RpcController* controller,
493
                                               const PTabletWriterAddBlockRequest* request,
494
                                               PTabletWriterAddBlockResult* response,
495
56.9k
                                               google::protobuf::Closure* done) {
496
56.9k
    int64_t submit_task_time_ns = MonotonicNanos();
497
57.1k
    bool ret = _heavy_work_pool.try_offer([request, response, done, submit_task_time_ns, this]() {
498
57.1k
        int64_t wait_execution_time_ns = MonotonicNanos() - submit_task_time_ns;
499
57.1k
        brpc::ClosureGuard closure_guard(done);
500
57.1k
        int64_t execution_time_ns = 0;
501
57.1k
        {
502
57.1k
            SCOPED_RAW_TIMER(&execution_time_ns);
503
57.1k
            signal::SignalTaskIdKeeper keeper(request->id());
504
57.1k
            auto st = _exec_env->load_channel_mgr()->add_batch(*request, response);
505
57.1k
            if (!st.ok()) {
506
47
                LOG(WARNING) << "tablet writer add block failed, message=" << st
507
47
                             << ", id=" << request->id() << ", index_id=" << request->index_id()
508
47
                             << ", sender_id=" << request->sender_id()
509
47
                             << ", backend id=" << request->backend_id();
510
47
            }
511
57.1k
            st.to_protobuf(response->mutable_status());
512
57.1k
        }
513
57.1k
        response->set_execution_time_us(execution_time_ns / NANOS_PER_MICRO);
514
57.1k
        response->set_wait_execution_time_us(wait_execution_time_ns / NANOS_PER_MICRO);
515
57.1k
    });
516
56.9k
    if (!ret) {
517
0
        offer_failed(response, done, _heavy_work_pool);
518
0
        return;
519
0
    }
520
56.9k
}
521
522
void PInternalService::tablet_writer_cancel(google::protobuf::RpcController* controller,
523
                                            const PTabletWriterCancelRequest* request,
524
                                            PTabletWriterCancelResult* response,
525
122
                                            google::protobuf::Closure* done) {
526
122
    bool ret = _heavy_work_pool.try_offer([this, request, done]() {
527
122
        VLOG_RPC << "tablet writer cancel, id=" << request->id()
528
0
                 << ", index_id=" << request->index_id() << ", sender_id=" << request->sender_id();
529
122
        signal::SignalTaskIdKeeper keeper(request->id());
530
122
        brpc::ClosureGuard closure_guard(done);
531
122
        auto st = _exec_env->load_channel_mgr()->cancel(*request);
532
122
        if (!st.ok()) {
533
0
            LOG(WARNING) << "tablet writer cancel failed, id=" << request->id()
534
0
                         << ", index_id=" << request->index_id()
535
0
                         << ", sender_id=" << request->sender_id();
536
0
        }
537
122
    });
538
122
    if (!ret) {
539
0
        offer_failed(response, done, _heavy_work_pool);
540
0
        return;
541
0
    }
542
122
}
543
544
Status PInternalService::_exec_plan_fragment_impl(
545
        const std::string& ser_request, PFragmentRequestVersion version, bool compact,
546
285k
        const std::function<void(RuntimeState*, Status*)>& cb) {
547
    // Sometimes the BE do not receive the first heartbeat message and it receives request from FE
548
    // If BE execute this fragment, it will core when it wants to get some property from master info.
549
285k
    if (ExecEnv::GetInstance()->cluster_info() == nullptr) {
550
0
        return Status::InternalError(
551
0
                "Have not receive the first heartbeat message from master, not ready to provide "
552
0
                "service");
553
0
    }
554
285k
    CHECK(version == PFragmentRequestVersion::VERSION_3)
555
2
            << "only support version 3, received " << version;
556
285k
    if (version == PFragmentRequestVersion::VERSION_3) {
557
285k
        TPipelineFragmentParamsList t_request;
558
285k
        {
559
285k
            const uint8_t* buf = (const uint8_t*)ser_request.data();
560
285k
            uint32_t len = ser_request.size();
561
285k
            RETURN_IF_ERROR(deserialize_thrift_msg(buf, &len, compact, &t_request));
562
285k
        }
563
564
285k
        const auto& fragment_list = t_request.params_list;
565
285k
        if (fragment_list.empty()) {
566
0
            return Status::InternalError("Invalid TPipelineFragmentParamsList!");
567
0
        }
568
285k
        MonotonicStopWatch timer;
569
285k
        timer.start();
570
571
        // work for old version frontend
572
285k
        if (!t_request.__isset.runtime_filter_info) {
573
117k
            TRuntimeFilterInfo runtime_filter_info;
574
117k
            auto local_param = fragment_list[0].local_params[0];
575
117k
            if (local_param.__isset.runtime_filter_params) {
576
117k
                runtime_filter_info.__set_runtime_filter_params(local_param.runtime_filter_params);
577
117k
            }
578
117k
            if (local_param.__isset.topn_filter_descs) {
579
0
                runtime_filter_info.__set_topn_filter_descs(local_param.topn_filter_descs);
580
0
            }
581
117k
            t_request.__set_runtime_filter_info(runtime_filter_info);
582
117k
        }
583
584
429k
        for (const TPipelineFragmentParams& fragment : fragment_list) {
585
429k
            if (cb) {
586
29
                RETURN_IF_ERROR(_exec_env->fragment_mgr()->exec_plan_fragment(
587
29
                        fragment, QuerySource::INTERNAL_FRONTEND, cb, t_request));
588
429k
            } else {
589
429k
                RETURN_IF_ERROR(_exec_env->fragment_mgr()->exec_plan_fragment(
590
429k
                        fragment, QuerySource::INTERNAL_FRONTEND, t_request));
591
429k
            }
592
429k
        }
593
284k
        timer.stop();
594
284k
        double cost_secs = static_cast<double>(timer.elapsed_time()) / 1000000000ULL;
595
284k
        if (cost_secs > 5) {
596
15
            LOG_WARNING("Prepare {} fragments of query {} costs {} seconds, it costs too much",
597
15
                        fragment_list.size(), print_id(fragment_list.front().query_id), cost_secs);
598
15
        }
599
600
284k
        return Status::OK();
601
285k
    } else {
602
2
        return Status::InternalError("invalid version");
603
2
    }
604
285k
}
605
606
void PInternalService::cancel_plan_fragment(google::protobuf::RpcController* /*controller*/,
607
                                            const PCancelPlanFragmentRequest* request,
608
                                            PCancelPlanFragmentResult* result,
609
167k
                                            google::protobuf::Closure* done) {
610
168k
    bool ret = _light_work_pool.try_offer([this, request, result, done]() {
611
168k
        brpc::ClosureGuard closure_guard(done);
612
168k
        signal::SignalTaskIdKeeper keeper(request->finst_id());
613
168k
        Status st = Status::OK();
614
615
168k
        const bool has_cancel_reason = request->has_cancel_reason();
616
168k
        const bool has_cancel_status = request->has_cancel_status();
617
        // During upgrade only LIMIT_REACH is used, other reason is changed to internal error
618
168k
        Status actual_cancel_status = Status::OK();
619
        // Convert PPlanFragmentCancelReason to Status
620
168k
        if (has_cancel_status) {
621
            // If fe set cancel status, then it is new FE now, should use cancel status.
622
168k
            actual_cancel_status = Status::create<false>(request->cancel_status());
623
168k
        } else if (has_cancel_reason) {
624
            // If fe not set cancel status, but set cancel reason, should convert cancel reason
625
            // to cancel status here.
626
0
            if (request->cancel_reason() == PPlanFragmentCancelReason::LIMIT_REACH) {
627
0
                actual_cancel_status = Status::Error<ErrorCode::LIMIT_REACH>("limit reach");
628
0
            } else {
629
                // Use cancel reason as error message
630
0
                actual_cancel_status = Status::InternalError(
631
0
                        PPlanFragmentCancelReason_Name(request->cancel_reason()));
632
0
            }
633
0
        } else {
634
0
            actual_cancel_status = Status::InternalError("unknown error");
635
0
        }
636
637
168k
        TUniqueId query_id;
638
168k
        query_id.__set_hi(request->query_id().hi());
639
168k
        query_id.__set_lo(request->query_id().lo());
640
168k
        LOG(INFO) << fmt::format("Cancel query {}, reason: {}", print_id(query_id),
641
168k
                                 actual_cancel_status.to_string());
642
168k
        _exec_env->fragment_mgr()->cancel_query(query_id, actual_cancel_status);
643
644
        // TODO: the logic seems useless, cancel only return Status::OK. remove it
645
168k
        st.to_protobuf(result->mutable_status());
646
168k
    });
647
167k
    if (!ret) {
648
0
        offer_failed(result, done, _light_work_pool);
649
0
        return;
650
0
    }
651
167k
}
652
653
void PInternalService::fetch_data(google::protobuf::RpcController* controller,
654
                                  const PFetchDataRequest* request, PFetchDataResult* result,
655
410k
                                  google::protobuf::Closure* done) {
656
    // fetch_data is a light operation which will put a request rather than wait inplace when there's no data ready.
657
    // when there's data ready, use brpc to send. there's queue in brpc service. won't take it too long.
658
410k
    auto ctx = GetResultBatchCtx::create_shared(result, done);
659
410k
    TUniqueId unique_id = UniqueId(request->finst_id()).to_thrift(); // query_id or instance_id
660
410k
    std::shared_ptr<MySQLResultBlockBuffer> buffer;
661
410k
    Status st = ExecEnv::GetInstance()->result_mgr()->find_buffer(unique_id, buffer);
662
410k
    if (!st.ok()) {
663
0
        LOG(WARNING) << "Result buffer not found! finst ID: " << print_id(unique_id);
664
0
        return;
665
0
    }
666
410k
    if (st = buffer->get_batch(ctx); !st.ok()) {
667
17
        LOG(WARNING) << "fetch_data failed: " << st.to_string();
668
17
    }
669
410k
}
670
671
void PInternalService::fetch_arrow_data(google::protobuf::RpcController* controller,
672
                                        const PFetchArrowDataRequest* request,
673
                                        PFetchArrowDataResult* result,
674
0
                                        google::protobuf::Closure* done) {
675
0
    bool ret = _arrow_flight_work_pool.try_offer([request, result, done]() {
676
0
        auto ctx = GetArrowResultBatchCtx::create_shared(result, done);
677
0
        TUniqueId unique_id = UniqueId(request->finst_id()).to_thrift(); // query_id or instance_id
678
0
        std::shared_ptr<ArrowFlightResultBlockBuffer> arrow_buffer;
679
0
        auto st = ExecEnv::GetInstance()->result_mgr()->find_buffer(unique_id, arrow_buffer);
680
0
        if (!st.ok()) {
681
0
            LOG(WARNING) << "Result buffer not found! Query ID: " << print_id(unique_id);
682
0
            return;
683
0
        }
684
0
        if (st = arrow_buffer->get_batch(ctx); !st.ok()) {
685
0
            LOG(WARNING) << "fetch_arrow_data failed: " << st.to_string();
686
0
        }
687
0
    });
688
0
    if (!ret) {
689
0
        offer_failed(result, done, _arrow_flight_work_pool);
690
0
        return;
691
0
    }
692
0
}
693
694
void PInternalService::outfile_write_success(google::protobuf::RpcController* controller,
695
                                             const POutfileWriteSuccessRequest* request,
696
                                             POutfileWriteSuccessResult* result,
697
4
                                             google::protobuf::Closure* done) {
698
4
    bool ret = _heavy_work_pool.try_offer([request, result, done]() {
699
4
        VLOG_RPC << "outfile write success file";
700
4
        brpc::ClosureGuard closure_guard(done);
701
4
        TResultFileSink result_file_sink;
702
4
        Status st = Status::OK();
703
4
        {
704
4
            const uint8_t* buf = (const uint8_t*)(request->result_file_sink().data());
705
4
            uint32_t len = request->result_file_sink().size();
706
4
            st = deserialize_thrift_msg(buf, &len, false, &result_file_sink);
707
4
            if (!st.ok()) {
708
0
                LOG(WARNING) << "outfile write success file failed, errmsg = " << st;
709
0
                st.to_protobuf(result->mutable_status());
710
0
                return;
711
0
            }
712
4
        }
713
714
4
        TResultFileSinkOptions file_options = result_file_sink.file_options;
715
4
        std::stringstream ss;
716
4
        ss << file_options.file_path << file_options.success_file_name;
717
4
        std::string file_name = ss.str();
718
4
        if (result_file_sink.storage_backend_type == TStorageBackendType::LOCAL) {
719
            // For local file writer, the file_path is a local dir.
720
            // Here we do a simple security verification by checking whether the file exists.
721
            // Because the file path is currently arbitrarily specified by the user,
722
            // Doris is not responsible for ensuring the correctness of the path.
723
            // This is just to prevent overwriting the existing file.
724
4
            bool exists = true;
725
4
            st = io::global_local_filesystem()->exists(file_name, &exists);
726
4
            if (!st.ok()) {
727
0
                LOG(WARNING) << "outfile write success filefailed, errmsg = " << st;
728
0
                st.to_protobuf(result->mutable_status());
729
0
                return;
730
0
            }
731
4
            if (exists) {
732
0
                st = Status::InternalError("File already exists: {}", file_name);
733
0
            }
734
4
            if (!st.ok()) {
735
0
                LOG(WARNING) << "outfile write success file failed, errmsg = " << st;
736
0
                st.to_protobuf(result->mutable_status());
737
0
                return;
738
0
            }
739
4
        }
740
741
4
        auto file_type_res =
742
4
                FileFactory::convert_storage_type(result_file_sink.storage_backend_type);
743
4
        if (!file_type_res.has_value()) [[unlikely]] {
744
0
            st = std::move(file_type_res).error();
745
0
            st.to_protobuf(result->mutable_status());
746
0
            LOG(WARNING) << "encounter unkonw type=" << result_file_sink.storage_backend_type
747
0
                         << ", st=" << st;
748
0
            return;
749
0
        }
750
751
4
        auto&& res = FileFactory::create_file_writer(file_type_res.value(), ExecEnv::GetInstance(),
752
4
                                                     file_options.broker_addresses,
753
4
                                                     file_options.broker_properties, file_name,
754
4
                                                     {
755
4
                                                             .write_file_cache = false,
756
4
                                                             .sync_file_data = false,
757
4
                                                     });
758
4
        using T = std::decay_t<decltype(res)>;
759
4
        if (!res.has_value()) [[unlikely]] {
760
0
            st = std::forward<T>(res).error();
761
0
            st.to_protobuf(result->mutable_status());
762
0
            return;
763
0
        }
764
765
4
        std::unique_ptr<doris::io::FileWriter> _file_writer_impl = std::forward<T>(res).value();
766
        // must write somthing because s3 file writer can not writer empty file
767
4
        st = _file_writer_impl->append({"success"});
768
4
        if (!st.ok()) {
769
0
            LOG(WARNING) << "outfile write success filefailed, errmsg=" << st;
770
0
            st.to_protobuf(result->mutable_status());
771
0
            return;
772
0
        }
773
4
        st = _file_writer_impl->close();
774
4
        if (!st.ok()) {
775
0
            LOG(WARNING) << "outfile write success filefailed, errmsg=" << st;
776
0
            st.to_protobuf(result->mutable_status());
777
0
            return;
778
0
        }
779
4
    });
780
4
    if (!ret) {
781
0
        offer_failed(result, done, _heavy_work_pool);
782
0
        return;
783
0
    }
784
4
}
785
786
void PInternalService::fetch_table_schema(google::protobuf::RpcController* controller,
787
                                          const PFetchTableSchemaRequest* request,
788
                                          PFetchTableSchemaResult* result,
789
4.03k
                                          google::protobuf::Closure* done) {
790
4.03k
    bool ret = _heavy_work_pool.try_offer([request, result, done]() {
791
4.03k
        VLOG_RPC << "fetch table schema";
792
4.03k
        brpc::ClosureGuard closure_guard(done);
793
4.03k
        TFileScanRange file_scan_range;
794
4.03k
        Status st = Status::OK();
795
4.03k
        {
796
4.03k
            const uint8_t* buf = (const uint8_t*)(request->file_scan_range().data());
797
4.03k
            uint32_t len = request->file_scan_range().size();
798
4.03k
            st = deserialize_thrift_msg(buf, &len, false, &file_scan_range);
799
4.03k
            if (!st.ok()) {
800
0
                LOG(WARNING) << "fetch table schema failed, errmsg=" << st;
801
0
                st.to_protobuf(result->mutable_status());
802
0
                return;
803
0
            }
804
4.03k
        }
805
4.03k
        if (file_scan_range.__isset.ranges == false) {
806
0
            st = Status::InternalError("can not get TFileRangeDesc.");
807
0
            st.to_protobuf(result->mutable_status());
808
0
            return;
809
0
        }
810
4.03k
        if (file_scan_range.__isset.params == false) {
811
0
            st = Status::InternalError("can not get TFileScanRangeParams.");
812
0
            st.to_protobuf(result->mutable_status());
813
0
            return;
814
0
        }
815
4.03k
        const TFileRangeDesc& range = file_scan_range.ranges.at(0);
816
4.03k
        const TFileScanRangeParams& params = file_scan_range.params;
817
818
4.03k
        std::shared_ptr<MemTrackerLimiter> mem_tracker = MemTrackerLimiter::create_shared(
819
4.03k
                MemTrackerLimiter::Type::OTHER,
820
4.03k
                fmt::format("InternalService::fetch_table_schema:{}#{}", params.format_type,
821
4.03k
                            params.file_type));
822
4.03k
        SCOPED_ATTACH_TASK(mem_tracker);
823
824
        // make sure profile is desctructed after reader cause PrefetchBufferedReader
825
        // might asynchronouslly access the profile
826
4.03k
        std::unique_ptr<RuntimeProfile> profile =
827
4.03k
                std::make_unique<RuntimeProfile>("FetchTableSchema");
828
4.03k
        std::unique_ptr<GenericReader> reader(nullptr);
829
4.03k
        auto io_ctx = std::make_shared<io::IOContext>();
830
4.03k
        auto file_cache_statis = std::make_shared<io::FileCacheStatistics>();
831
4.03k
        auto file_reader_stats = std::make_shared<io::FileReaderStats>();
832
4.03k
        io_ctx->file_cache_stats = file_cache_statis.get();
833
4.03k
        io_ctx->file_reader_stats = file_reader_stats.get();
834
4.03k
        constexpr size_t fetch_schema_batch_size = 4064;
835
        // file_slots is no use, but the lifetime should be longer than reader
836
4.03k
        std::vector<SlotDescriptor*> file_slots;
837
4.03k
        switch (params.format_type) {
838
807
        case TFileFormatType::FORMAT_CSV_PLAIN:
839
807
        case TFileFormatType::FORMAT_CSV_GZ:
840
807
        case TFileFormatType::FORMAT_CSV_BZ2:
841
807
        case TFileFormatType::FORMAT_CSV_LZ4FRAME:
842
807
        case TFileFormatType::FORMAT_CSV_LZ4BLOCK:
843
807
        case TFileFormatType::FORMAT_CSV_SNAPPYBLOCK:
844
807
        case TFileFormatType::FORMAT_CSV_LZOP:
845
807
        case TFileFormatType::FORMAT_CSV_DEFLATE: {
846
807
            reader = CsvReader::create_unique(nullptr, profile.get(), nullptr, params, range,
847
807
                                              file_slots, fetch_schema_batch_size, io_ctx.get(),
848
807
                                              io_ctx);
849
807
            break;
850
807
        }
851
4
        case TFileFormatType::FORMAT_TEXT: {
852
4
            reader = TextReader::create_unique(nullptr, profile.get(), nullptr, params, range,
853
4
                                               file_slots, fetch_schema_batch_size, io_ctx.get());
854
4
            break;
855
807
        }
856
2.42k
        case TFileFormatType::FORMAT_PARQUET: {
857
2.42k
            reader = ParquetReader::create_unique(params, range, io_ctx, nullptr);
858
2.42k
            break;
859
807
        }
860
716
        case TFileFormatType::FORMAT_ORC: {
861
716
            reader = OrcReader::create_unique(params, range, fetch_schema_batch_size, "", io_ctx);
862
716
            break;
863
807
        }
864
2
        case TFileFormatType::FORMAT_NATIVE: {
865
2
            reader = NativeReader::create_unique(profile.get(), params, range, io_ctx.get(),
866
2
                                                 nullptr);
867
2
            break;
868
807
        }
869
71
        case TFileFormatType::FORMAT_JSON: {
870
71
            reader = NewJsonReader::create_unique(profile.get(), params, range, file_slots,
871
71
                                                  fetch_schema_batch_size, io_ctx.get(), io_ctx);
872
71
            break;
873
807
        }
874
0
#ifdef BUILD_RUST_READERS
875
16
        case TFileFormatType::FORMAT_LANCE: {
876
16
            reader = LanceRustReader::create_unique(params, range, io_ctx.get());
877
16
            break;
878
807
        }
879
0
#endif
880
0
        default:
881
0
            st = Status::InternalError("Not supported file format in fetch table schema: {}",
882
0
                                       params.format_type);
883
0
            st.to_protobuf(result->mutable_status());
884
0
            return;
885
4.03k
        }
886
4.03k
        if (!st.ok()) {
887
0
            LOG(WARNING) << "failed to create reader, errmsg=" << st;
888
0
            st.to_protobuf(result->mutable_status());
889
0
            return;
890
0
        }
891
4.03k
        st = reader->init_schema_reader();
892
4.03k
        if (!st.ok()) {
893
19
            LOG(WARNING) << "failed to init reader, errmsg=" << st;
894
19
            st.to_protobuf(result->mutable_status());
895
19
            return;
896
19
        }
897
4.01k
        std::vector<std::string> col_names;
898
4.01k
        std::vector<DataTypePtr> col_types;
899
4.01k
        st = reader->get_parsed_schema(&col_names, &col_types);
900
4.01k
        if (!st.ok()) {
901
10
            LOG(WARNING) << "fetch table schema failed, errmsg=" << st;
902
10
            st.to_protobuf(result->mutable_status());
903
10
            return;
904
10
        }
905
4.00k
        result->set_column_nums(col_names.size());
906
29.0k
        for (size_t idx = 0; idx < col_names.size(); ++idx) {
907
25.0k
            result->add_column_names(col_names[idx]);
908
25.0k
        }
909
29.0k
        for (size_t idx = 0; idx < col_types.size(); ++idx) {
910
25.0k
            PTypeDesc* type_desc = result->add_column_types();
911
25.0k
            col_types[idx]->to_protobuf(type_desc);
912
25.0k
        }
913
4.00k
        st.to_protobuf(result->mutable_status());
914
4.00k
    });
915
4.03k
    if (!ret) {
916
0
        offer_failed(result, done, _heavy_work_pool);
917
0
        return;
918
0
    }
919
4.03k
}
920
921
void PInternalService::fetch_arrow_flight_schema(google::protobuf::RpcController* controller,
922
                                                 const PFetchArrowFlightSchemaRequest* request,
923
                                                 PFetchArrowFlightSchemaResult* result,
924
101
                                                 google::protobuf::Closure* done) {
925
101
    bool ret = _arrow_flight_work_pool.try_offer([request, result, done]() {
926
101
        brpc::ClosureGuard closure_guard(done);
927
101
        std::shared_ptr<arrow::Schema> schema;
928
101
        std::shared_ptr<ArrowFlightResultBlockBuffer> buffer;
929
101
        auto st = ExecEnv::GetInstance()->result_mgr()->find_buffer(
930
101
                UniqueId(request->finst_id()).to_thrift(), buffer);
931
101
        if (!st.ok()) {
932
0
            LOG(WARNING) << "fetch arrow flight schema failed, errmsg=" << st;
933
0
            st.to_protobuf(result->mutable_status());
934
0
            return;
935
0
        }
936
101
        st = buffer->get_schema(&schema);
937
101
        if (!st.ok()) {
938
0
            LOG(WARNING) << "fetch arrow flight schema failed, errmsg=" << st;
939
0
            st.to_protobuf(result->mutable_status());
940
0
            return;
941
0
        }
942
943
101
        std::string schema_str;
944
101
        st = serialize_arrow_schema(&schema, &schema_str);
945
101
        if (st.ok()) {
946
101
            result->set_schema(std::move(schema_str));
947
101
            if (!config::public_host.empty()) {
948
0
                result->set_be_arrow_flight_ip(config::public_host);
949
0
            }
950
101
            if (config::arrow_flight_sql_proxy_port != -1) {
951
0
                result->set_be_arrow_flight_port(config::arrow_flight_sql_proxy_port);
952
0
            }
953
101
        }
954
101
        st.to_protobuf(result->mutable_status());
955
101
    });
956
101
    if (!ret) {
957
0
        offer_failed(result, done, _arrow_flight_work_pool);
958
0
        return;
959
0
    }
960
101
}
961
962
Status PInternalService::_tablet_fetch_data(const PTabletKeyLookupRequest* request,
963
270
                                            PTabletKeyLookupResponse* response) {
964
270
    PointQueryExecutor executor;
965
270
    RETURN_IF_ERROR(executor.init(request, response));
966
270
    if (response->has_need_resend_query_context() && response->need_resend_query_context()) {
967
3
        return Status::OK();
968
3
    }
969
267
    RETURN_IF_ERROR(executor.lookup_up());
970
264
    executor.print_profile();
971
264
    return Status::OK();
972
267
}
973
974
void PInternalService::tablet_fetch_data(google::protobuf::RpcController* controller,
975
                                         const PTabletKeyLookupRequest* request,
976
                                         PTabletKeyLookupResponse* response,
977
270
                                         google::protobuf::Closure* done) {
978
270
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
979
270
        [[maybe_unused]] auto* cntl = static_cast<brpc::Controller*>(controller);
980
270
        brpc::ClosureGuard guard(done);
981
270
        Status st = _tablet_fetch_data(request, response);
982
270
        st.to_protobuf(response->mutable_status());
983
270
    });
984
270
    if (!ret) {
985
0
        offer_failed(response, done, _light_work_pool);
986
0
        return;
987
0
    }
988
270
}
989
990
void PInternalService::test_jdbc_connection(google::protobuf::RpcController* controller,
991
                                            const PJdbcTestConnectionRequest* request,
992
                                            PJdbcTestConnectionResult* result,
993
315
                                            google::protobuf::Closure* done) {
994
315
    bool ret = _heavy_work_pool.try_offer([request, result, done]() {
995
315
        VLOG_RPC << "test jdbc connection";
996
315
        brpc::ClosureGuard closure_guard(done);
997
315
        std::shared_ptr<MemTrackerLimiter> mem_tracker = MemTrackerLimiter::create_shared(
998
315
                MemTrackerLimiter::Type::OTHER,
999
315
                fmt::format("InternalService::test_jdbc_connection"));
1000
315
        SCOPED_ATTACH_TASK(mem_tracker);
1001
315
        TTableDescriptor table_desc;
1002
315
        Status st = Status::OK();
1003
315
        {
1004
315
            const uint8_t* buf = (const uint8_t*)request->jdbc_table().data();
1005
315
            uint32_t len = request->jdbc_table().size();
1006
315
            st = deserialize_thrift_msg(buf, &len, false, &table_desc);
1007
315
            if (!st.ok()) {
1008
0
                LOG(WARNING) << "test jdbc connection failed, errmsg=" << st;
1009
0
                st.to_protobuf(result->mutable_status());
1010
0
                return;
1011
0
            }
1012
315
        }
1013
315
        TJdbcTable jdbc_table = (table_desc.jdbcTable);
1014
1015
        // Resolve driver URL to absolute file:// path
1016
315
        std::string driver_url;
1017
315
        st = JdbcUtils::resolve_driver_url(jdbc_table.jdbc_driver_url, &driver_url);
1018
315
        if (!st.ok()) {
1019
0
            st.to_protobuf(result->mutable_status());
1020
0
            return;
1021
0
        }
1022
1023
        // Build params for JdbcConnectionTester
1024
315
        std::map<std::string, std::string> params;
1025
315
        params["jdbc_url"] = jdbc_table.jdbc_url;
1026
315
        params["jdbc_user"] = jdbc_table.jdbc_user;
1027
315
        params["jdbc_password"] = jdbc_table.jdbc_password;
1028
315
        params["jdbc_driver_class"] = jdbc_table.jdbc_driver_class;
1029
315
        params["jdbc_driver_url"] = driver_url;
1030
315
        params["query_sql"] = request->query_str();
1031
315
        params["catalog_id"] = std::to_string(jdbc_table.catalog_id);
1032
315
        params["connection_pool_min_size"] = std::to_string(jdbc_table.connection_pool_min_size);
1033
315
        params["connection_pool_max_size"] = std::to_string(jdbc_table.connection_pool_max_size);
1034
315
        params["connection_pool_max_wait_time"] =
1035
315
                std::to_string(jdbc_table.connection_pool_max_wait_time);
1036
315
        params["connection_pool_max_life_time"] =
1037
315
                std::to_string(jdbc_table.connection_pool_max_life_time);
1038
315
        params["connection_pool_keep_alive"] =
1039
315
                jdbc_table.connection_pool_keep_alive ? "true" : "false";
1040
315
        params["clean_datasource"] = "true";
1041
        // Map jdbc_table_type (TOdbcTableType enum value) to string name
1042
        // for JdbcTypeHandlerFactory to select the correct type handler.
1043
        // This ensures the right validation query is used (e.g. Oracle: "SELECT 1 FROM dual").
1044
315
        if (request->has_jdbc_table_type()) {
1045
315
            std::string type_name;
1046
315
            switch (request->jdbc_table_type()) {
1047
215
            case 0:
1048
215
                type_name = "MYSQL";
1049
215
                break;
1050
38
            case 1:
1051
38
                type_name = "ORACLE";
1052
38
                break;
1053
24
            case 2:
1054
24
                type_name = "POSTGRESQL";
1055
24
                break;
1056
14
            case 3:
1057
14
                type_name = "SQLSERVER";
1058
14
                break;
1059
16
            case 6:
1060
16
                type_name = "CLICKHOUSE";
1061
16
                break;
1062
0
            case 7:
1063
0
                type_name = "SAP_HANA";
1064
0
                break;
1065
0
            case 8:
1066
0
                type_name = "TRINO";
1067
0
                break;
1068
0
            case 9:
1069
0
                type_name = "PRESTO";
1070
0
                break;
1071
4
            case 10:
1072
4
                type_name = "OCEANBASE";
1073
4
                break;
1074
0
            case 11:
1075
0
                type_name = "OCEANBASE_ORACLE";
1076
0
                break;
1077
4
            case 13:
1078
4
                type_name = "DB2";
1079
4
                break;
1080
0
            case 14:
1081
0
                type_name = "GBASE";
1082
0
                break;
1083
0
            default:
1084
0
                break;
1085
315
            }
1086
315
            if (!type_name.empty()) {
1087
315
                params["table_type"] = type_name;
1088
315
            }
1089
315
        }
1090
        // required_fields and columns_types are required by JniReader
1091
315
        params["required_fields"] = "result";
1092
315
        params["columns_types"] = "int";
1093
1094
        // Use JniReader to create JdbcConnectionTester, which tests
1095
        // the connection in its open() method.
1096
315
        auto jni_reader =
1097
315
                std::make_unique<JniReader>("org/apache/doris/jdbc/JdbcConnectionTester", params);
1098
315
        st = jni_reader->open(nullptr, nullptr);
1099
315
        st.to_protobuf(result->mutable_status());
1100
1101
315
        Status close_st = jni_reader->close();
1102
315
        if (!close_st.ok()) {
1103
0
            LOG(WARNING) << "Failed to close JDBC connection tester: " << close_st.msg();
1104
0
        }
1105
315
    });
1106
1107
315
    if (!ret) {
1108
0
        offer_failed(result, done, _heavy_work_pool);
1109
0
        return;
1110
0
    }
1111
315
}
1112
1113
void PInternalServiceImpl::get_column_ids_by_tablet_ids(google::protobuf::RpcController* controller,
1114
                                                        const PFetchColIdsRequest* request,
1115
                                                        PFetchColIdsResponse* response,
1116
0
                                                        google::protobuf::Closure* done) {
1117
0
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
1118
0
        _get_column_ids_by_tablet_ids(controller, request, response, done);
1119
0
    });
1120
0
    if (!ret) {
1121
0
        offer_failed(response, done, _light_work_pool);
1122
0
        return;
1123
0
    }
1124
0
}
1125
1126
void PInternalServiceImpl::_get_column_ids_by_tablet_ids(
1127
        google::protobuf::RpcController* controller, const PFetchColIdsRequest* request,
1128
0
        PFetchColIdsResponse* response, google::protobuf::Closure* done) {
1129
0
    brpc::ClosureGuard guard(done);
1130
0
    [[maybe_unused]] auto* cntl = static_cast<brpc::Controller*>(controller);
1131
0
    TabletManager* tablet_mgr = _engine.tablet_manager();
1132
0
    const auto& params = request->params();
1133
0
    for (const auto& param : params) {
1134
0
        int64_t index_id = param.indexid();
1135
0
        const auto& tablet_ids = param.tablet_ids();
1136
0
        std::set<std::set<int32_t>> filter_set;
1137
0
        std::map<int32_t, const TabletColumn*> id_to_column;
1138
0
        for (const int64_t tablet_id : tablet_ids) {
1139
0
            TabletSharedPtr tablet = tablet_mgr->get_tablet(tablet_id);
1140
0
            if (tablet == nullptr) {
1141
0
                std::stringstream ss;
1142
0
                ss << "cannot get tablet by id:" << tablet_id;
1143
0
                LOG(WARNING) << ss.str();
1144
0
                response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE);
1145
0
                response->mutable_status()->add_error_msgs(ss.str());
1146
0
                return;
1147
0
            }
1148
            // check schema consistency, column ids should be the same
1149
0
            const auto& columns = tablet->tablet_schema()->columns();
1150
1151
0
            std::set<int32_t> column_ids;
1152
0
            for (const auto& col : columns) {
1153
0
                column_ids.insert(col->unique_id());
1154
0
            }
1155
0
            filter_set.insert(std::move(column_ids));
1156
1157
0
            if (id_to_column.empty()) {
1158
0
                for (const auto& col : columns) {
1159
0
                    id_to_column.insert(std::pair {col->unique_id(), col.get()});
1160
0
                }
1161
0
            } else {
1162
0
                for (const auto& col : columns) {
1163
0
                    auto it = id_to_column.find(col->unique_id());
1164
0
                    if (it == id_to_column.end() || *(it->second) != *col) {
1165
0
                        ColumnPB prev_col_pb;
1166
0
                        ColumnPB curr_col_pb;
1167
0
                        if (it != id_to_column.end()) {
1168
0
                            it->second->to_schema_pb(&prev_col_pb);
1169
0
                        }
1170
0
                        col->to_schema_pb(&curr_col_pb);
1171
0
                        std::stringstream ss;
1172
0
                        ss << "consistency check failed: index{ " << index_id << " }"
1173
0
                           << " got inconsistent schema, prev column: " << prev_col_pb.DebugString()
1174
0
                           << " current column: " << curr_col_pb.DebugString();
1175
0
                        LOG(WARNING) << ss.str();
1176
0
                        response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE);
1177
0
                        response->mutable_status()->add_error_msgs(ss.str());
1178
0
                        return;
1179
0
                    }
1180
0
                }
1181
0
            }
1182
0
        }
1183
1184
0
        if (filter_set.size() > 1) {
1185
            // consistecy check failed
1186
0
            std::stringstream ss;
1187
0
            ss << "consistency check failed: index{" << index_id << "}"
1188
0
               << "got inconsistent schema";
1189
0
            LOG(WARNING) << ss.str();
1190
0
            response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE);
1191
0
            response->mutable_status()->add_error_msgs(ss.str());
1192
0
            return;
1193
0
        }
1194
        // consistency check passed, use the first tablet to be the representative
1195
0
        TabletSharedPtr tablet = tablet_mgr->get_tablet(tablet_ids[0]);
1196
0
        const auto& columns = tablet->tablet_schema()->columns();
1197
0
        auto entry = response->add_entries();
1198
0
        entry->set_index_id(index_id);
1199
0
        auto col_name_to_id = entry->mutable_col_name_to_id();
1200
0
        for (const auto& column : columns) {
1201
0
            (*col_name_to_id)[column->name()] = column->unique_id();
1202
0
        }
1203
0
    }
1204
0
    response->mutable_status()->set_status_code(TStatusCode::OK);
1205
0
}
1206
1207
template <class RPCResponse>
1208
struct AsyncRPCContext {
1209
    RPCResponse response;
1210
    brpc::Controller cntl;
1211
    brpc::CallId cid;
1212
};
1213
1214
void PInternalService::fetch_remote_tablet_schema(google::protobuf::RpcController* controller,
1215
                                                  const PFetchRemoteSchemaRequest* request,
1216
                                                  PFetchRemoteSchemaResponse* response,
1217
212
                                                  google::protobuf::Closure* done) {
1218
212
    bool ret = _heavy_work_pool.try_offer([request, response, done]() {
1219
212
        brpc::ClosureGuard closure_guard(done);
1220
212
        Status st = Status::OK();
1221
212
        std::shared_ptr<MemTrackerLimiter> mem_tracker = MemTrackerLimiter::create_shared(
1222
212
                MemTrackerLimiter::Type::OTHER,
1223
212
                fmt::format("InternalService::fetch_remote_tablet_schema"));
1224
212
        SCOPED_ATTACH_TASK(mem_tracker);
1225
212
        if (request->is_coordinator()) {
1226
            // Spawn rpc request to none coordinator nodes, and finally merge them all
1227
106
            PFetchRemoteSchemaRequest remote_request(*request);
1228
            // set it none coordinator to get merged schema
1229
106
            remote_request.set_is_coordinator(false);
1230
106
            using PFetchRemoteTabletSchemaRpcContext = AsyncRPCContext<PFetchRemoteSchemaResponse>;
1231
106
            std::vector<PFetchRemoteTabletSchemaRpcContext> rpc_contexts(
1232
106
                    request->tablet_location_size());
1233
212
            for (int i = 0; i < request->tablet_location_size(); ++i) {
1234
106
                std::string host = request->tablet_location(i).host();
1235
106
                int32_t brpc_port = request->tablet_location(i).brpc_port();
1236
106
                std::shared_ptr<PBackendService_Stub> stub(
1237
106
                        ExecEnv::GetInstance()->brpc_internal_client_cache()->get_client(
1238
106
                                host, brpc_port));
1239
106
                if (stub == nullptr) {
1240
0
                    LOG(WARNING) << "Failed to init rpc to " << host << ":" << brpc_port;
1241
0
                    st = Status::InternalError("Failed to init rpc to {}:{}", host, brpc_port);
1242
0
                    continue;
1243
0
                }
1244
106
                rpc_contexts[i].cid = rpc_contexts[i].cntl.call_id();
1245
106
                rpc_contexts[i].cntl.set_timeout_ms(config::fetch_remote_schema_rpc_timeout_ms);
1246
106
                stub->fetch_remote_tablet_schema(&rpc_contexts[i].cntl, &remote_request,
1247
106
                                                 &rpc_contexts[i].response, brpc::DoNothing());
1248
106
            }
1249
106
            std::vector<TabletSchemaSPtr> schemas;
1250
106
            for (auto& rpc_context : rpc_contexts) {
1251
106
                brpc::Join(rpc_context.cid);
1252
106
                if (!st.ok()) {
1253
                    // make sure all flying rpc request is joined
1254
0
                    continue;
1255
0
                }
1256
106
                if (rpc_context.cntl.Failed()) {
1257
0
                    LOG(WARNING) << "fetch_remote_tablet_schema rpc err:"
1258
0
                                 << rpc_context.cntl.ErrorText();
1259
0
                    ExecEnv::GetInstance()->brpc_internal_client_cache()->erase(
1260
0
                            rpc_context.cntl.remote_side());
1261
0
                    st = Status::InternalError("fetch_remote_tablet_schema rpc err: {}",
1262
0
                                               rpc_context.cntl.ErrorText());
1263
0
                }
1264
106
                if (rpc_context.response.status().status_code() != 0) {
1265
0
                    st = Status::create(rpc_context.response.status());
1266
0
                }
1267
106
                if (rpc_context.response.has_merged_schema()) {
1268
106
                    TabletSchemaSPtr schema = std::make_shared<TabletSchema>();
1269
106
                    schema->init_from_pb(rpc_context.response.merged_schema());
1270
106
                    schemas.push_back(schema);
1271
106
                }
1272
106
            }
1273
106
            if (!schemas.empty() && st.ok()) {
1274
                // merge all
1275
106
                TabletSchemaSPtr merged_schema;
1276
106
                st = variant_util::get_least_common_schema(schemas, nullptr, merged_schema);
1277
106
                if (!st.ok()) {
1278
0
                    LOG(WARNING) << "Failed to get least common schema: " << st.to_string();
1279
0
                    st = Status::InternalError("Failed to get least common schema: {}",
1280
0
                                               st.to_string());
1281
0
                }
1282
106
                VLOG_DEBUG << "dump schema:" << merged_schema->dump_structure();
1283
106
                merged_schema->reserve_extracted_columns();
1284
106
                merged_schema->to_schema_pb(response->mutable_merged_schema());
1285
106
            }
1286
106
            st.to_protobuf(response->mutable_status());
1287
106
            return;
1288
106
        } else {
1289
            // This is not a coordinator, get it's tablet and merge schema
1290
106
            std::vector<int64_t> target_tablets;
1291
106
            for (int i = 0; i < request->tablet_location_size(); ++i) {
1292
106
                const auto& location = request->tablet_location(i);
1293
106
                auto backend = BackendOptions::get_local_backend();
1294
                // If this is the target backend
1295
106
                if (backend.host == location.host() && config::brpc_port == location.brpc_port()) {
1296
106
                    target_tablets.assign(location.tablet_id().begin(), location.tablet_id().end());
1297
106
                    break;
1298
106
                }
1299
106
            }
1300
106
            if (!target_tablets.empty()) {
1301
106
                std::vector<TabletSchemaSPtr> tablet_schemas;
1302
1.41k
                for (int64_t tablet_id : target_tablets) {
1303
1.41k
                    auto res = ExecEnv::get_tablet(tablet_id);
1304
1.41k
                    if (!res.has_value()) {
1305
                        // just ignore
1306
0
                        LOG(WARNING) << "tablet does not exist, tablet id is " << tablet_id;
1307
0
                        continue;
1308
0
                    }
1309
1.41k
                    auto tablet = res.value();
1310
1.41k
                    auto rowsets = tablet->get_snapshot_rowset();
1311
1.41k
                    auto schema =
1312
1.41k
                            variant_util::VariantCompactionUtil::calculate_variant_extended_schema(
1313
1.41k
                                    rowsets, tablet->tablet_schema());
1314
1.41k
                    tablet_schemas.push_back(schema);
1315
1.41k
                }
1316
106
                if (!tablet_schemas.empty()) {
1317
                    // merge all
1318
106
                    TabletSchemaSPtr merged_schema;
1319
106
                    st = variant_util::get_least_common_schema(tablet_schemas, nullptr,
1320
106
                                                               merged_schema);
1321
106
                    if (!st.ok()) {
1322
0
                        LOG(WARNING) << "Failed to get least common schema: " << st.to_string();
1323
0
                        st = Status::InternalError("Failed to get least common schema: {}",
1324
0
                                                   st.to_string());
1325
0
                    }
1326
106
                    merged_schema->to_schema_pb(response->mutable_merged_schema());
1327
106
                    VLOG_DEBUG << "dump schema:" << merged_schema->dump_structure();
1328
106
                }
1329
106
            }
1330
106
            st.to_protobuf(response->mutable_status());
1331
106
        }
1332
212
    });
1333
212
    if (!ret) {
1334
0
        offer_failed(response, done, _heavy_work_pool);
1335
0
    }
1336
212
}
1337
1338
void PInternalService::report_stream_load_status(google::protobuf::RpcController* controller,
1339
                                                 const PReportStreamLoadStatusRequest* request,
1340
                                                 PReportStreamLoadStatusResponse* response,
1341
0
                                                 google::protobuf::Closure* done) {
1342
0
    TUniqueId load_id;
1343
0
    load_id.__set_hi(request->load_id().hi());
1344
0
    load_id.__set_lo(request->load_id().lo());
1345
0
    Status st = Status::OK();
1346
0
    auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id);
1347
0
    if (!stream_load_ctx) {
1348
0
        st = Status::InternalError("unknown stream load id: {}", UniqueId(load_id).to_string());
1349
0
    }
1350
0
    stream_load_ctx->load_status_promise.set_value(st);
1351
0
    st.to_protobuf(response->mutable_status());
1352
0
}
1353
1354
void PInternalService::get_info(google::protobuf::RpcController* controller,
1355
                                const PProxyRequest* request, PProxyResult* response,
1356
426
                                google::protobuf::Closure* done) {
1357
426
    bool ret = _exec_env->routine_load_task_executor()->get_thread_pool().submit_func([this,
1358
426
                                                                                       request,
1359
426
                                                                                       response,
1360
426
                                                                                       done]() {
1361
426
        brpc::ClosureGuard closure_guard(done);
1362
        // PProxyRequest is defined in gensrc/proto/internal_service.proto
1363
        // Currently it supports 2 kinds of requests:
1364
        // 1. get all kafka partition ids for given topic
1365
        // 2. get all kafka partition offsets for given topic and timestamp.
1366
426
        int timeout_ms = request->has_timeout_secs() ? request->timeout_secs() * 1000 : 60 * 1000;
1367
426
        if (request->has_kafka_meta_request()) {
1368
426
            const PKafkaMetaProxyRequest& kafka_request = request->kafka_meta_request();
1369
426
            if (!kafka_request.offset_flags().empty()) {
1370
65
                std::vector<PIntegerPair> partition_offsets;
1371
65
                Status st = _exec_env->routine_load_task_executor()
1372
65
                                    ->get_kafka_real_offsets_for_partitions(
1373
65
                                            request->kafka_meta_request(), &partition_offsets,
1374
65
                                            timeout_ms);
1375
65
                if (st.ok()) {
1376
65
                    PKafkaPartitionOffsets* part_offsets = response->mutable_partition_offsets();
1377
65
                    for (const auto& entry : partition_offsets) {
1378
65
                        PIntegerPair* res = part_offsets->add_offset_times();
1379
65
                        res->set_key(entry.key());
1380
65
                        res->set_val(entry.val());
1381
65
                    }
1382
65
                }
1383
65
                st.to_protobuf(response->mutable_status());
1384
65
                return;
1385
361
            } else if (!kafka_request.partition_id_for_latest_offsets().empty()) {
1386
                // get latest offsets for specified partition ids
1387
286
                std::vector<PIntegerPair> partition_offsets;
1388
286
                Status st = _exec_env->routine_load_task_executor()
1389
286
                                    ->get_kafka_latest_offsets_for_partitions(
1390
286
                                            request->kafka_meta_request(), &partition_offsets,
1391
286
                                            timeout_ms);
1392
286
                if (st.ok()) {
1393
286
                    PKafkaPartitionOffsets* part_offsets = response->mutable_partition_offsets();
1394
286
                    for (const auto& entry : partition_offsets) {
1395
286
                        PIntegerPair* res = part_offsets->add_offset_times();
1396
286
                        res->set_key(entry.key());
1397
286
                        res->set_val(entry.val());
1398
286
                    }
1399
286
                }
1400
286
                st.to_protobuf(response->mutable_status());
1401
286
                return;
1402
286
            } else if (!kafka_request.offset_times().empty()) {
1403
                // if offset_times() has elements, which means this request is to get offset by timestamp.
1404
1
                std::vector<PIntegerPair> partition_offsets;
1405
1
                Status st = _exec_env->routine_load_task_executor()
1406
1
                                    ->get_kafka_partition_offsets_for_times(
1407
1
                                            request->kafka_meta_request(), &partition_offsets,
1408
1
                                            timeout_ms);
1409
1
                if (st.ok()) {
1410
1
                    PKafkaPartitionOffsets* part_offsets = response->mutable_partition_offsets();
1411
1
                    for (const auto& entry : partition_offsets) {
1412
1
                        PIntegerPair* res = part_offsets->add_offset_times();
1413
1
                        res->set_key(entry.key());
1414
1
                        res->set_val(entry.val());
1415
1
                    }
1416
1
                }
1417
1
                st.to_protobuf(response->mutable_status());
1418
1
                return;
1419
74
            } else {
1420
                // get partition ids of topic
1421
74
                std::vector<int32_t> partition_ids;
1422
74
                Status st = _exec_env->routine_load_task_executor()->get_kafka_partition_meta(
1423
74
                        request->kafka_meta_request(), &partition_ids);
1424
74
                if (st.ok()) {
1425
71
                    PKafkaMetaProxyResult* kafka_result = response->mutable_kafka_meta_result();
1426
71
                    for (int32_t id : partition_ids) {
1427
71
                        kafka_result->add_partition_ids(id);
1428
71
                    }
1429
71
                }
1430
74
                st.to_protobuf(response->mutable_status());
1431
74
                return;
1432
74
            }
1433
426
        }
1434
0
        Status::OK().to_protobuf(response->mutable_status());
1435
0
    });
1436
426
    if (!ret) {
1437
0
        offer_failed(response, done, _heavy_work_pool);
1438
0
        return;
1439
0
    }
1440
426
}
1441
1442
void PInternalService::update_cache(google::protobuf::RpcController* controller,
1443
                                    const PUpdateCacheRequest* request, PCacheResponse* response,
1444
64.7k
                                    google::protobuf::Closure* done) {
1445
64.7k
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1446
64.7k
        brpc::ClosureGuard closure_guard(done);
1447
64.7k
        _exec_env->result_cache()->update(request, response);
1448
64.7k
    });
1449
64.7k
    if (!ret) {
1450
0
        offer_failed(response, done, _light_work_pool);
1451
0
        return;
1452
0
    }
1453
64.7k
}
1454
1455
void PInternalService::fetch_cache(google::protobuf::RpcController* controller,
1456
                                   const PFetchCacheRequest* request, PFetchCacheResult* result,
1457
3.77k
                                   google::protobuf::Closure* done) {
1458
3.77k
    bool ret = _light_work_pool.try_offer([this, request, result, done]() {
1459
3.77k
        brpc::ClosureGuard closure_guard(done);
1460
3.77k
        _exec_env->result_cache()->fetch(request, result);
1461
3.77k
    });
1462
3.77k
    if (!ret) {
1463
0
        offer_failed(result, done, _light_work_pool);
1464
0
        return;
1465
0
    }
1466
3.77k
}
1467
1468
void PInternalService::clear_cache(google::protobuf::RpcController* controller,
1469
                                   const PClearCacheRequest* request, PCacheResponse* response,
1470
0
                                   google::protobuf::Closure* done) {
1471
0
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1472
0
        brpc::ClosureGuard closure_guard(done);
1473
0
        _exec_env->result_cache()->clear(request, response);
1474
0
    });
1475
0
    if (!ret) {
1476
0
        offer_failed(response, done, _light_work_pool);
1477
0
        return;
1478
0
    }
1479
0
}
1480
1481
void PInternalService::merge_filter(::google::protobuf::RpcController* controller,
1482
                                    const ::doris::PMergeFilterRequest* request,
1483
                                    ::doris::PMergeFilterResponse* response,
1484
2.49k
                                    ::google::protobuf::Closure* done) {
1485
2.50k
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
1486
2.50k
        signal::SignalTaskIdKeeper keeper(request->query_id());
1487
2.50k
        brpc::ClosureGuard closure_guard(done);
1488
2.50k
        auto attachment = static_cast<brpc::Controller*>(controller)->request_attachment();
1489
2.50k
        butil::IOBufAsZeroCopyInputStream zero_copy_input_stream(attachment);
1490
2.50k
        Status st;
1491
2.50k
        try {
1492
2.50k
            st = _exec_env->fragment_mgr()->merge_filter(request, &zero_copy_input_stream);
1493
2.50k
        } catch (Exception& e) {
1494
0
            st = e.to_status();
1495
0
        }
1496
2.50k
        st.to_protobuf(response->mutable_status());
1497
2.50k
    });
1498
2.49k
    if (!ret) {
1499
0
        offer_failed(response, done, _light_work_pool);
1500
0
        return;
1501
0
    }
1502
2.49k
}
1503
1504
void PInternalService::send_filter_size(::google::protobuf::RpcController* controller,
1505
                                        const ::doris::PSendFilterSizeRequest* request,
1506
                                        ::doris::PSendFilterSizeResponse* response,
1507
71
                                        ::google::protobuf::Closure* done) {
1508
71
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1509
71
        signal::SignalTaskIdKeeper keeper(request->query_id());
1510
71
        brpc::ClosureGuard closure_guard(done);
1511
71
        Status st;
1512
71
        try {
1513
71
            st = _exec_env->fragment_mgr()->send_filter_size(request);
1514
71
        } catch (Exception& e) {
1515
0
            st = e.to_status();
1516
0
        }
1517
71
        st.to_protobuf(response->mutable_status());
1518
71
    });
1519
71
    if (!ret) {
1520
0
        offer_failed(response, done, _light_work_pool);
1521
0
        return;
1522
0
    }
1523
71
}
1524
1525
void PInternalService::sync_filter_size(::google::protobuf::RpcController* controller,
1526
                                        const ::doris::PSyncFilterSizeRequest* request,
1527
                                        ::doris::PSyncFilterSizeResponse* response,
1528
71
                                        ::google::protobuf::Closure* done) {
1529
71
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1530
71
        signal::SignalTaskIdKeeper keeper(request->query_id());
1531
71
        brpc::ClosureGuard closure_guard(done);
1532
71
        Status st;
1533
71
        try {
1534
71
            st = _exec_env->fragment_mgr()->sync_filter_size(request);
1535
71
        } catch (Exception& e) {
1536
0
            st = e.to_status();
1537
0
        }
1538
71
        st.to_protobuf(response->mutable_status());
1539
71
    });
1540
71
    if (!ret) {
1541
0
        offer_failed(response, done, _light_work_pool);
1542
0
        return;
1543
0
    }
1544
71
}
1545
1546
void PInternalService::apply_filterv2(::google::protobuf::RpcController* controller,
1547
                                      const ::doris::PPublishFilterRequestV2* request,
1548
                                      ::doris::PPublishFilterResponse* response,
1549
1.60k
                                      ::google::protobuf::Closure* done) {
1550
1.60k
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
1551
1.60k
        signal::SignalTaskIdKeeper keeper(request->query_id());
1552
1.60k
        brpc::ClosureGuard closure_guard(done);
1553
1.60k
        auto attachment = static_cast<brpc::Controller*>(controller)->request_attachment();
1554
1.60k
        butil::IOBufAsZeroCopyInputStream zero_copy_input_stream(attachment);
1555
1.60k
        VLOG_NOTICE << "rpc apply_filterv2 recv";
1556
1.60k
        Status st;
1557
1.60k
        try {
1558
1.60k
            st = _exec_env->fragment_mgr()->apply_filterv2(request, &zero_copy_input_stream);
1559
1.60k
        } catch (Exception& e) {
1560
0
            st = e.to_status();
1561
0
        }
1562
1.60k
        if (!st.ok()) {
1563
0
            LOG(WARNING) << "apply filter meet error: " << st.to_string();
1564
0
        }
1565
1.59k
        st.to_protobuf(response->mutable_status());
1566
1.59k
    });
1567
1.60k
    if (!ret) {
1568
0
        offer_failed(response, done, _light_work_pool);
1569
0
        return;
1570
0
    }
1571
1.60k
}
1572
1573
void PInternalService::send_data(google::protobuf::RpcController* controller,
1574
                                 const PSendDataRequest* request, PSendDataResult* response,
1575
43
                                 google::protobuf::Closure* done) {
1576
43
    bool ret = _heavy_work_pool.try_offer([this, request, response, done]() {
1577
43
        brpc::ClosureGuard closure_guard(done);
1578
43
        TUniqueId load_id;
1579
43
        load_id.hi = request->load_id().hi();
1580
43
        load_id.lo = request->load_id().lo();
1581
        // On 1.2.3 we add load id to send data request and using load id to get pipe
1582
43
        auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id);
1583
43
        if (stream_load_ctx == nullptr) {
1584
0
            response->mutable_status()->set_status_code(1);
1585
0
            response->mutable_status()->add_error_msgs("could not find stream load context");
1586
43
        } else {
1587
43
            auto pipe = stream_load_ctx->pipe;
1588
154
            for (int i = 0; i < request->data_size(); ++i) {
1589
111
                std::unique_ptr<PDataRow> row(new PDataRow());
1590
111
                row->CopyFrom(request->data(i));
1591
111
                Status s = pipe->append(std::move(row));
1592
111
                if (!s.ok()) {
1593
0
                    response->mutable_status()->set_status_code(1);
1594
0
                    response->mutable_status()->add_error_msgs(s.to_string());
1595
0
                    return;
1596
0
                }
1597
111
            }
1598
43
            response->mutable_status()->set_status_code(0);
1599
43
        }
1600
43
    });
1601
43
    if (!ret) {
1602
0
        offer_failed(response, done, _heavy_work_pool);
1603
0
        return;
1604
0
    }
1605
43
}
1606
1607
void PInternalService::commit(google::protobuf::RpcController* controller,
1608
                              const PCommitRequest* request, PCommitResult* response,
1609
43
                              google::protobuf::Closure* done) {
1610
43
    bool ret = _heavy_work_pool.try_offer([this, request, response, done]() {
1611
43
        brpc::ClosureGuard closure_guard(done);
1612
43
        TUniqueId load_id;
1613
43
        load_id.hi = request->load_id().hi();
1614
43
        load_id.lo = request->load_id().lo();
1615
1616
43
        auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id);
1617
43
        if (stream_load_ctx == nullptr) {
1618
0
            response->mutable_status()->set_status_code(1);
1619
0
            response->mutable_status()->add_error_msgs("could not find stream load context");
1620
43
        } else {
1621
43
            static_cast<void>(stream_load_ctx->pipe->finish());
1622
43
            response->mutable_status()->set_status_code(0);
1623
43
        }
1624
43
    });
1625
43
    if (!ret) {
1626
0
        offer_failed(response, done, _heavy_work_pool);
1627
0
        return;
1628
0
    }
1629
43
}
1630
1631
void PInternalService::rollback(google::protobuf::RpcController* controller,
1632
                                const PRollbackRequest* request, PRollbackResult* response,
1633
5
                                google::protobuf::Closure* done) {
1634
5
    bool ret = _heavy_work_pool.try_offer([this, request, response, done]() {
1635
5
        brpc::ClosureGuard closure_guard(done);
1636
5
        TUniqueId load_id;
1637
5
        load_id.hi = request->load_id().hi();
1638
5
        load_id.lo = request->load_id().lo();
1639
5
        auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id);
1640
5
        if (stream_load_ctx == nullptr) {
1641
0
            response->mutable_status()->set_status_code(1);
1642
0
            response->mutable_status()->add_error_msgs("could not find stream load context");
1643
5
        } else {
1644
5
            stream_load_ctx->pipe->cancel("rollback");
1645
5
            response->mutable_status()->set_status_code(0);
1646
5
        }
1647
5
    });
1648
5
    if (!ret) {
1649
0
        offer_failed(response, done, _heavy_work_pool);
1650
0
        return;
1651
0
    }
1652
5
}
1653
1654
void PInternalService::fold_constant_expr(google::protobuf::RpcController* controller,
1655
                                          const PConstantExprRequest* request,
1656
                                          PConstantExprResult* response,
1657
570
                                          google::protobuf::Closure* done) {
1658
570
    bool ret = _light_work_pool.try_offer([request, response, done]() {
1659
570
        brpc::ClosureGuard closure_guard(done);
1660
570
        TFoldConstantParams t_request;
1661
570
        Status st = Status::OK();
1662
570
        {
1663
570
            const uint8_t* buf = (const uint8_t*)request->request().data();
1664
570
            uint32_t len = request->request().size();
1665
570
            st = deserialize_thrift_msg(buf, &len, false, &t_request);
1666
570
        }
1667
570
        if (!st.ok()) {
1668
0
            LOG(WARNING) << "exec fold constant expr failed, errmsg=" << st
1669
0
                         << " .and query_id_is: " << t_request.query_id;
1670
0
            st.to_protobuf(response->mutable_status());
1671
0
            return;
1672
0
        }
1673
570
        auto fold_func = [&]() -> Status {
1674
570
            std::unique_ptr<FoldConstantExecutor> fold_executor =
1675
570
                    std::make_unique<FoldConstantExecutor>();
1676
570
            RETURN_IF_ERROR_OR_CATCH_EXCEPTION(
1677
570
                    fold_executor->fold_constant_vexpr(t_request, response));
1678
532
            return Status::OK();
1679
570
        };
1680
570
        st = fold_func();
1681
570
        if (!st.ok()) {
1682
38
            LOG(WARNING) << "exec fold constant expr failed, errmsg=" << st
1683
38
                         << " .and query_id_is: " << t_request.query_id;
1684
38
        }
1685
570
        st.to_protobuf(response->mutable_status());
1686
570
    });
1687
570
    if (!ret) {
1688
0
        offer_failed(response, done, _light_work_pool);
1689
0
        return;
1690
0
    }
1691
570
}
1692
1693
void PInternalService::transmit_rec_cte_block(google::protobuf::RpcController* controller,
1694
                                              const PTransmitRecCTEBlockParams* request,
1695
                                              PTransmitRecCTEBlockResult* response,
1696
3.59k
                                              google::protobuf::Closure* done) {
1697
3.59k
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1698
3.59k
        brpc::ClosureGuard closure_guard(done);
1699
3.59k
        auto st = _exec_env->fragment_mgr()->transmit_rec_cte_block(
1700
3.59k
                UniqueId(request->query_id()).to_thrift(),
1701
3.59k
                UniqueId(request->fragment_instance_id()).to_thrift(), request->node_id(),
1702
3.59k
                request->blocks(), request->eos());
1703
3.59k
        st.to_protobuf(response->mutable_status());
1704
3.59k
    });
1705
3.59k
    if (!ret) {
1706
0
        offer_failed(response, done, _light_work_pool);
1707
0
        return;
1708
0
    }
1709
3.59k
}
1710
1711
void PInternalService::rerun_fragment(google::protobuf::RpcController* controller,
1712
                                      const PRerunFragmentParams* request,
1713
                                      PRerunFragmentResult* response,
1714
9.72k
                                      google::protobuf::Closure* done) {
1715
9.72k
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1716
        // Use shared_ptr<ClosureGuard> so we can transfer ownership to the PFC.
1717
        // For wait_for_destroy/final_close, the guard is stored in the PFC and the RPC
1718
        // response is deferred until the PFC is fully destroyed. For rebuild/submit,
1719
        // the guard fires immediately when this lambda returns.
1720
9.72k
        std::shared_ptr<brpc::ClosureGuard> closure_guard =
1721
9.72k
                std::make_shared<brpc::ClosureGuard>(done);
1722
9.72k
        auto st = _exec_env->fragment_mgr()->rerun_fragment(
1723
9.72k
                closure_guard, UniqueId(request->query_id()).to_thrift(), request->fragment_id(),
1724
9.72k
                request->stage());
1725
9.72k
        st.to_protobuf(response->mutable_status());
1726
9.72k
    });
1727
9.72k
    if (!ret) {
1728
0
        offer_failed(response, done, _light_work_pool);
1729
0
        return;
1730
0
    }
1731
9.72k
}
1732
1733
void PInternalService::reset_global_rf(google::protobuf::RpcController* controller,
1734
                                       const PResetGlobalRfParams* request,
1735
                                       PResetGlobalRfResult* response,
1736
1.74k
                                       google::protobuf::Closure* done) {
1737
1.74k
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1738
1.74k
        brpc::ClosureGuard closure_guard(done);
1739
1.74k
        auto st = _exec_env->fragment_mgr()->reset_global_rf(
1740
1.74k
                UniqueId(request->query_id()).to_thrift(), request->filter_ids());
1741
1.74k
        st.to_protobuf(response->mutable_status());
1742
1.74k
    });
1743
1.74k
    if (!ret) {
1744
0
        offer_failed(response, done, _light_work_pool);
1745
0
        return;
1746
0
    }
1747
1.74k
}
1748
1749
void PInternalService::transmit_block(google::protobuf::RpcController* controller,
1750
                                      const PTransmitDataParams* request,
1751
                                      PTransmitDataResult* response,
1752
2.45M
                                      google::protobuf::Closure* done) {
1753
2.45M
    int64_t receive_time = GetCurrentTimeNanos();
1754
2.47M
    if (config::enable_bthread_transmit_block) {
1755
2.47M
        response->set_receive_time(receive_time);
1756
        // under high concurrency, thread pool will have a lot of lock contention.
1757
        // May offer failed to the thread pool, so that we should avoid using thread
1758
        // pool here.
1759
2.47M
        _transmit_block(controller, request, response, done, Status::OK(), 0);
1760
18.4E
    } else {
1761
18.4E
        bool ret = _light_work_pool.try_offer([this, controller, request, response, done,
1762
18.4E
                                               receive_time]() {
1763
0
            response->set_receive_time(receive_time);
1764
            // Sometimes transmit block function is the last owner of PlanFragmentExecutor
1765
            // It will release the object. And the object maybe a JNIContext.
1766
            // JNIContext will hold some TLS object. It could not work correctly under bthread
1767
            // Context. So that put the logic into pthread.
1768
            // But this is rarely happens, so this config is disabled by default.
1769
0
            _transmit_block(controller, request, response, done, Status::OK(),
1770
0
                            GetCurrentTimeNanos() - receive_time);
1771
0
        });
1772
18.4E
        if (!ret) {
1773
0
            offer_failed(response, done, _light_work_pool);
1774
0
            return;
1775
0
        }
1776
18.4E
    }
1777
2.45M
}
1778
1779
void PInternalService::transmit_block_by_http(google::protobuf::RpcController* controller,
1780
                                              const PEmptyRequest* request,
1781
                                              PTransmitDataResult* response,
1782
0
                                              google::protobuf::Closure* done) {
1783
0
    int64_t receive_time = GetCurrentTimeNanos();
1784
0
    bool ret = _heavy_work_pool.try_offer([this, controller, response, done, receive_time]() {
1785
0
        PTransmitDataParams* new_request = new PTransmitDataParams();
1786
0
        google::protobuf::Closure* new_done =
1787
0
                new NewHttpClosure<PTransmitDataParams>(new_request, done);
1788
0
        brpc::Controller* cntl = static_cast<brpc::Controller*>(controller);
1789
0
        Status st =
1790
0
                attachment_extract_request_contain_block<PTransmitDataParams>(new_request, cntl);
1791
0
        _transmit_block(controller, new_request, response, new_done, st,
1792
0
                        GetCurrentTimeNanos() - receive_time);
1793
0
    });
1794
0
    if (!ret) {
1795
0
        offer_failed(response, done, _heavy_work_pool);
1796
0
        return;
1797
0
    }
1798
0
}
1799
1800
void PInternalService::_transmit_block(google::protobuf::RpcController* controller,
1801
                                       const PTransmitDataParams* request,
1802
                                       PTransmitDataResult* response,
1803
                                       google::protobuf::Closure* done, const Status& extract_st,
1804
2.45M
                                       const int64_t wait_for_worker) {
1805
2.46M
    if (request->has_query_id()) {
1806
18.4E
        VLOG_ROW << "transmit block: fragment_instance_id=" << print_id(request->finst_id())
1807
18.4E
                 << " query_id=" << print_id(request->query_id()) << " node=" << request->node_id();
1808
2.46M
    }
1809
1810
    // The response is accessed when done->Run is called in transmit_block(),
1811
    // give response a default value to avoid null pointers in high concurrency.
1812
2.45M
    Status st;
1813
2.45M
    if (extract_st.ok()) {
1814
2.45M
        st = _exec_env->vstream_mgr()->transmit_block(request, &done, wait_for_worker);
1815
2.45M
        if (!st.ok() && !st.is<END_OF_FILE>()) {
1816
0
            LOG(WARNING) << "transmit_block failed, message=" << st
1817
0
                         << ", fragment_instance_id=" << print_id(request->finst_id())
1818
0
                         << ", node=" << request->node_id()
1819
0
                         << ", from sender_id: " << request->sender_id()
1820
0
                         << ", be_number: " << request->be_number()
1821
0
                         << ", packet_seq: " << request->packet_seq();
1822
0
        }
1823
18.4E
    } else {
1824
18.4E
        st = extract_st;
1825
18.4E
    }
1826
2.46M
    if (done != nullptr) {
1827
2.46M
        st.to_protobuf(response->mutable_status());
1828
2.46M
        done->Run();
1829
2.46M
    }
1830
2.45M
}
1831
1832
void PInternalService::check_rpc_channel(google::protobuf::RpcController* controller,
1833
                                         const PCheckRPCChannelRequest* request,
1834
                                         PCheckRPCChannelResponse* response,
1835
0
                                         google::protobuf::Closure* done) {
1836
0
    bool ret = _light_work_pool.try_offer([request, response, done]() {
1837
0
        brpc::ClosureGuard closure_guard(done);
1838
0
        response->mutable_status()->set_status_code(0);
1839
0
        if (request->data().size() != request->size()) {
1840
0
            std::stringstream ss;
1841
0
            ss << "data size not same, expected: " << request->size()
1842
0
               << ", actual: " << request->data().size();
1843
0
            response->mutable_status()->add_error_msgs(ss.str());
1844
0
            response->mutable_status()->set_status_code(1);
1845
1846
0
        } else {
1847
0
            Md5Digest digest;
1848
0
            digest.update(static_cast<const void*>(request->data().c_str()),
1849
0
                          request->data().size());
1850
0
            digest.digest();
1851
0
            if (!iequal(digest.hex(), request->md5())) {
1852
0
                std::stringstream ss;
1853
0
                ss << "md5 not same, expected: " << request->md5() << ", actual: " << digest.hex();
1854
0
                response->mutable_status()->add_error_msgs(ss.str());
1855
0
                response->mutable_status()->set_status_code(1);
1856
0
            }
1857
0
        }
1858
0
    });
1859
0
    if (!ret) {
1860
0
        offer_failed(response, done, _light_work_pool);
1861
0
        return;
1862
0
    }
1863
0
}
1864
1865
void PInternalService::reset_rpc_channel(google::protobuf::RpcController* controller,
1866
                                         const PResetRPCChannelRequest* request,
1867
                                         PResetRPCChannelResponse* response,
1868
0
                                         google::protobuf::Closure* done) {
1869
0
    bool ret = _light_work_pool.try_offer([request, response, done]() {
1870
0
        brpc::ClosureGuard closure_guard(done);
1871
0
        response->mutable_status()->set_status_code(0);
1872
0
        if (request->all()) {
1873
0
            int size = ExecEnv::GetInstance()->brpc_internal_client_cache()->size();
1874
0
            if (size > 0) {
1875
0
                std::vector<std::string> endpoints;
1876
0
                ExecEnv::GetInstance()->brpc_internal_client_cache()->get_all(&endpoints);
1877
0
                ExecEnv::GetInstance()->brpc_internal_client_cache()->clear();
1878
0
                *response->mutable_channels() = {endpoints.begin(), endpoints.end()};
1879
0
            }
1880
0
        } else {
1881
0
            for (const std::string& endpoint : request->endpoints()) {
1882
0
                if (!ExecEnv::GetInstance()->brpc_internal_client_cache()->exist(endpoint)) {
1883
0
                    response->mutable_status()->add_error_msgs(endpoint + ": not found.");
1884
0
                    continue;
1885
0
                }
1886
1887
0
                if (ExecEnv::GetInstance()->brpc_internal_client_cache()->erase(endpoint)) {
1888
0
                    response->add_channels(endpoint);
1889
0
                } else {
1890
0
                    response->mutable_status()->add_error_msgs(endpoint + ": reset failed.");
1891
0
                }
1892
0
            }
1893
0
            if (request->endpoints_size() != response->channels_size()) {
1894
0
                response->mutable_status()->set_status_code(1);
1895
0
            }
1896
0
        }
1897
0
    });
1898
0
    if (!ret) {
1899
0
        offer_failed(response, done, _light_work_pool);
1900
0
        return;
1901
0
    }
1902
0
}
1903
1904
void PInternalService::hand_shake(google::protobuf::RpcController* controller,
1905
                                  const PHandShakeRequest* request, PHandShakeResponse* response,
1906
2.61k
                                  google::protobuf::Closure* done) {
1907
    // The light pool may be full. Handshake is used to check the connection state of brpc.
1908
    // Should not be interfered by the thread pool logic.
1909
2.61k
    brpc::ClosureGuard closure_guard(done);
1910
2.61k
    if (request->has_hello()) {
1911
2.61k
        response->set_hello(request->hello());
1912
2.61k
    }
1913
2.61k
    response->mutable_status()->set_status_code(0);
1914
2.61k
}
1915
1916
constexpr char HttpProtocol[] = "http://";
1917
constexpr char DownloadApiPath[] = "/api/_tablet/_download?token=";
1918
constexpr char FileParam[] = "&file=";
1919
1920
static std::string construct_url(const std::string& host_port, const std::string& token,
1921
0
                                 const std::string& path) {
1922
0
    return fmt::format("{}{}{}{}{}{}", HttpProtocol, host_port, DownloadApiPath, token, FileParam,
1923
0
                       path);
1924
0
}
1925
1926
static Status download_file_action(std::string& remote_file_url, std::string& local_file_path,
1927
0
                                   uint64_t estimate_timeout, uint64_t file_size) {
1928
0
    auto download_cb = [remote_file_url, estimate_timeout, local_file_path,
1929
0
                        file_size](HttpClient* client) {
1930
0
        RETURN_IF_ERROR(client->init(remote_file_url));
1931
0
        client->set_timeout_ms(estimate_timeout * 1000);
1932
0
        RETURN_IF_ERROR(client->download(local_file_path));
1933
1934
0
        if (file_size > 0) {
1935
            // Check file length
1936
0
            uint64_t local_file_size = std::filesystem::file_size(local_file_path);
1937
0
            if (local_file_size != file_size) {
1938
0
                LOG(WARNING) << "failed to pull rowset for slave replica. download file "
1939
0
                                "length error"
1940
0
                             << ", remote_path=" << remote_file_url << ", file_size=" << file_size
1941
0
                             << ", local_file_size=" << local_file_size;
1942
0
                return Status::InternalError("downloaded file size is not equal");
1943
0
            }
1944
0
        }
1945
1946
0
        return io::global_local_filesystem()->permission(local_file_path,
1947
0
                                                         io::LocalFileSystem::PERMS_OWNER_RW);
1948
0
    };
1949
0
    return HttpClient::execute_with_retry(DOWNLOAD_FILE_MAX_RETRY, 1, download_cb);
1950
0
}
1951
1952
void PInternalServiceImpl::request_slave_tablet_pull_rowset(
1953
        google::protobuf::RpcController* controller, const PTabletWriteSlaveRequest* request,
1954
0
        PTabletWriteSlaveResult* response, google::protobuf::Closure* done) {
1955
0
    brpc::ClosureGuard closure_guard(done);
1956
0
    const RowsetMetaPB& rowset_meta_pb = request->rowset_meta();
1957
0
    const std::string& rowset_path = request->rowset_path();
1958
0
    google::protobuf::Map<int64_t, int64_t> segments_size = request->segments_size();
1959
0
    google::protobuf::Map<int64_t, PTabletWriteSlaveRequest_IndexSizeMap> indices_size =
1960
0
            request->inverted_indices_size();
1961
0
    std::string host = request->host();
1962
0
    int64_t http_port = request->http_port();
1963
0
    int64_t brpc_port = request->brpc_port();
1964
0
    std::string token = request->token();
1965
0
    int64_t node_id = request->node_id();
1966
0
    bool ret = _heavy_work_pool.try_offer([rowset_meta_pb, host, brpc_port, node_id, segments_size,
1967
0
                                           indices_size, http_port, token, rowset_path, this]() {
1968
0
        TabletSharedPtr tablet = _engine.tablet_manager()->get_tablet(
1969
0
                rowset_meta_pb.tablet_id(), rowset_meta_pb.tablet_schema_hash());
1970
0
        if (tablet == nullptr) {
1971
0
            LOG(WARNING) << "failed to pull rowset for slave replica. tablet ["
1972
0
                         << rowset_meta_pb.tablet_id()
1973
0
                         << "] is not exist. txn_id=" << rowset_meta_pb.txn_id();
1974
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta_pb.txn_id(),
1975
0
                                        rowset_meta_pb.tablet_id(), node_id, false);
1976
0
            return;
1977
0
        }
1978
1979
0
        RowsetMetaSharedPtr rowset_meta(new RowsetMeta());
1980
0
        std::string rowset_meta_str;
1981
0
        bool ret = rowset_meta_pb.SerializeToString(&rowset_meta_str);
1982
0
        if (!ret) {
1983
0
            LOG(WARNING) << "failed to pull rowset for slave replica. serialize rowset meta "
1984
0
                            "failed. rowset_id="
1985
0
                         << rowset_meta_pb.rowset_id()
1986
0
                         << ", tablet_id=" << rowset_meta_pb.tablet_id()
1987
0
                         << ", txn_id=" << rowset_meta_pb.txn_id();
1988
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta_pb.txn_id(),
1989
0
                                        rowset_meta_pb.tablet_id(), node_id, false);
1990
0
            return;
1991
0
        }
1992
0
        bool parsed = rowset_meta->init(rowset_meta_str);
1993
0
        if (!parsed) {
1994
0
            LOG(WARNING) << "failed to pull rowset for slave replica. parse rowset meta string "
1995
0
                            "failed. rowset_id="
1996
0
                         << rowset_meta_pb.rowset_id()
1997
0
                         << ", tablet_id=" << rowset_meta_pb.tablet_id()
1998
0
                         << ", txn_id=" << rowset_meta_pb.txn_id();
1999
            // return false will break meta iterator, return true to skip this error
2000
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
2001
0
                                        rowset_meta->tablet_id(), node_id, false);
2002
0
            return;
2003
0
        }
2004
0
        RowsetId remote_rowset_id = rowset_meta->rowset_id();
2005
        // change rowset id because it maybe same as other local rowset
2006
0
        RowsetId new_rowset_id = _engine.next_rowset_id();
2007
0
        auto pending_rs_guard = _engine.pending_local_rowsets().add(new_rowset_id);
2008
0
        rowset_meta->set_rowset_id(new_rowset_id);
2009
0
        rowset_meta->set_tablet_uid(tablet->tablet_uid());
2010
0
        VLOG_CRITICAL << "succeed to init rowset meta for slave replica. rowset_id="
2011
0
                      << rowset_meta->rowset_id() << ", tablet_id=" << rowset_meta->tablet_id()
2012
0
                      << ", txn_id=" << rowset_meta->txn_id();
2013
2014
0
        auto tablet_scheme = rowset_meta->tablet_schema();
2015
0
        for (const auto& segment : segments_size) {
2016
0
            uint64_t file_size = segment.second;
2017
0
            uint64_t estimate_timeout = file_size / config::download_low_speed_limit_kbps / 1024;
2018
0
            if (estimate_timeout < config::download_low_speed_time) {
2019
0
                estimate_timeout = config::download_low_speed_time;
2020
0
            }
2021
2022
0
            std::string remote_file_path =
2023
0
                    local_segment_path(rowset_path, remote_rowset_id.to_string(), segment.first);
2024
0
            std::string remote_file_url =
2025
0
                    construct_url(get_host_port(host, http_port), token, remote_file_path);
2026
2027
0
            std::string local_file_path = local_segment_path(
2028
0
                    tablet->tablet_path(), rowset_meta->rowset_id().to_string(), segment.first);
2029
2030
0
            auto st = download_file_action(remote_file_url, local_file_path, estimate_timeout,
2031
0
                                           file_size);
2032
0
            if (!st.ok()) {
2033
0
                LOG(WARNING) << "failed to pull rowset for slave replica. failed to download "
2034
0
                                "file. url="
2035
0
                             << remote_file_url << ", local_path=" << local_file_path
2036
0
                             << ", txn_id=" << rowset_meta->txn_id();
2037
0
                _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
2038
0
                                            rowset_meta->tablet_id(), node_id, false);
2039
0
                return;
2040
0
            }
2041
0
            VLOG_CRITICAL << "succeed to download file for slave replica. url=" << remote_file_url
2042
0
                          << ", local_path=" << local_file_path
2043
0
                          << ", txn_id=" << rowset_meta->txn_id();
2044
0
            if (indices_size.find(segment.first) != indices_size.end()) {
2045
0
                PTabletWriteSlaveRequest_IndexSizeMap segment_indices_size =
2046
0
                        indices_size.at(segment.first);
2047
2048
0
                for (auto index_size : segment_indices_size.index_sizes()) {
2049
0
                    auto index_id = index_size.indexid();
2050
0
                    auto size = index_size.size();
2051
0
                    auto suffix_path = index_size.suffix_path();
2052
0
                    std::string remote_inverted_index_file;
2053
0
                    std::string local_inverted_index_file;
2054
0
                    std::string remote_inverted_index_file_url;
2055
0
                    if (tablet_scheme->get_inverted_index_storage_format() ==
2056
0
                        InvertedIndexStorageFormatPB::V1) {
2057
0
                        remote_inverted_index_file =
2058
0
                                InvertedIndexDescriptor::get_index_file_path_v1(
2059
0
                                        InvertedIndexDescriptor::get_index_file_path_prefix(
2060
0
                                                remote_file_path),
2061
0
                                        index_id, suffix_path);
2062
0
                        remote_inverted_index_file_url = construct_url(
2063
0
                                get_host_port(host, http_port), token, remote_inverted_index_file);
2064
2065
0
                        local_inverted_index_file = InvertedIndexDescriptor::get_index_file_path_v1(
2066
0
                                InvertedIndexDescriptor::get_index_file_path_prefix(
2067
0
                                        local_file_path),
2068
0
                                index_id, suffix_path);
2069
0
                    } else {
2070
0
                        remote_inverted_index_file =
2071
0
                                InvertedIndexDescriptor::get_index_file_path_v2(
2072
0
                                        InvertedIndexDescriptor::get_index_file_path_prefix(
2073
0
                                                remote_file_path));
2074
0
                        remote_inverted_index_file_url = construct_url(
2075
0
                                get_host_port(host, http_port), token, remote_inverted_index_file);
2076
2077
0
                        local_inverted_index_file = InvertedIndexDescriptor::get_index_file_path_v2(
2078
0
                                InvertedIndexDescriptor::get_index_file_path_prefix(
2079
0
                                        local_file_path));
2080
0
                    }
2081
0
                    st = download_file_action(remote_inverted_index_file_url,
2082
0
                                              local_inverted_index_file, estimate_timeout, size);
2083
0
                    if (!st.ok()) {
2084
0
                        LOG(WARNING) << "failed to pull rowset for slave replica. failed to "
2085
0
                                        "download "
2086
0
                                        "file. url="
2087
0
                                     << remote_inverted_index_file_url
2088
0
                                     << ", local_path=" << local_inverted_index_file
2089
0
                                     << ", txn_id=" << rowset_meta->txn_id();
2090
0
                        _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
2091
0
                                                    rowset_meta->tablet_id(), node_id, false);
2092
0
                        return;
2093
0
                    }
2094
2095
0
                    VLOG_CRITICAL
2096
0
                            << "succeed to download inverted index file for slave replica. url="
2097
0
                            << remote_inverted_index_file_url
2098
0
                            << ", local_path=" << local_inverted_index_file
2099
0
                            << ", txn_id=" << rowset_meta->txn_id();
2100
0
                }
2101
0
            }
2102
0
        }
2103
2104
0
        RowsetSharedPtr rowset;
2105
0
        Status create_status = RowsetFactory::create_rowset(
2106
0
                tablet->tablet_schema(), tablet->tablet_path(), rowset_meta, &rowset);
2107
0
        if (!create_status) {
2108
0
            LOG(WARNING) << "failed to create rowset from rowset meta for slave replica"
2109
0
                         << ". rowset_id: " << rowset_meta->rowset_id()
2110
0
                         << ", rowset_type: " << rowset_meta->rowset_type()
2111
0
                         << ", rowset_state: " << rowset_meta->rowset_state()
2112
0
                         << ", tablet_id=" << rowset_meta->tablet_id()
2113
0
                         << ", txn_id=" << rowset_meta->txn_id();
2114
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
2115
0
                                        rowset_meta->tablet_id(), node_id, false);
2116
0
            return;
2117
0
        }
2118
0
        if (rowset_meta->rowset_state() != RowsetStatePB::COMMITTED) {
2119
0
            LOG(WARNING) << "could not commit txn for slave replica because master rowset state is "
2120
0
                            "not committed, rowset_state="
2121
0
                         << rowset_meta->rowset_state()
2122
0
                         << ", tablet_id=" << rowset_meta->tablet_id()
2123
0
                         << ", txn_id=" << rowset_meta->txn_id();
2124
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
2125
0
                                        rowset_meta->tablet_id(), node_id, false);
2126
0
            return;
2127
0
        }
2128
0
        Status commit_txn_status = _engine.txn_manager()->commit_txn(
2129
0
                tablet->data_dir()->get_meta(), rowset_meta->partition_id(), rowset_meta->txn_id(),
2130
0
                rowset_meta->tablet_id(), tablet->tablet_uid(), rowset_meta->load_id(), rowset,
2131
0
                std::move(pending_rs_guard), false);
2132
0
        if (!commit_txn_status && !commit_txn_status.is<PUSH_TRANSACTION_ALREADY_EXIST>()) {
2133
0
            LOG(WARNING) << "failed to add committed rowset for slave replica. rowset_id="
2134
0
                         << rowset_meta->rowset_id() << ", tablet_id=" << rowset_meta->tablet_id()
2135
0
                         << ", txn_id=" << rowset_meta->txn_id();
2136
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
2137
0
                                        rowset_meta->tablet_id(), node_id, false);
2138
0
            return;
2139
0
        }
2140
0
        VLOG_CRITICAL << "succeed to pull rowset for slave replica. successfully to add committed "
2141
0
                         "rowset: "
2142
0
                      << rowset_meta->rowset_id()
2143
0
                      << " to tablet, tablet_id=" << rowset_meta->tablet_id()
2144
0
                      << ", schema_hash=" << rowset_meta->tablet_schema_hash()
2145
0
                      << ", txn_id=" << rowset_meta->txn_id();
2146
0
        _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
2147
0
                                    rowset_meta->tablet_id(), node_id, true);
2148
0
    });
2149
0
    if (!ret) {
2150
0
        offer_failed(response, closure_guard.release(), _heavy_work_pool);
2151
0
        return;
2152
0
    }
2153
0
    Status::OK().to_protobuf(response->mutable_status());
2154
0
}
2155
2156
void PInternalServiceImpl::_response_pull_slave_rowset(const std::string& remote_host,
2157
                                                       int64_t brpc_port, int64_t txn_id,
2158
                                                       int64_t tablet_id, int64_t node_id,
2159
0
                                                       bool is_succeed) {
2160
0
    std::shared_ptr<PBackendService_Stub> stub =
2161
0
            ExecEnv::GetInstance()->brpc_internal_client_cache()->get_client(remote_host,
2162
0
                                                                             brpc_port);
2163
0
    if (stub == nullptr) {
2164
0
        LOG(WARNING) << "failed to response result of slave replica to master replica. get rpc "
2165
0
                        "stub failed, master host="
2166
0
                     << remote_host << ", port=" << brpc_port << ", tablet_id=" << tablet_id
2167
0
                     << ", txn_id=" << txn_id;
2168
0
        return;
2169
0
    }
2170
2171
0
    auto request = std::make_shared<PTabletWriteSlaveDoneRequest>();
2172
0
    request->set_txn_id(txn_id);
2173
0
    request->set_tablet_id(tablet_id);
2174
0
    request->set_node_id(node_id);
2175
0
    request->set_is_succeed(is_succeed);
2176
0
    auto pull_rowset_callback = DummyBrpcCallback<PTabletWriteSlaveDoneResult>::create_shared();
2177
0
    auto closure = AutoReleaseClosure<
2178
0
            PTabletWriteSlaveDoneRequest,
2179
0
            DummyBrpcCallback<PTabletWriteSlaveDoneResult>>::create_unique(request,
2180
0
                                                                           pull_rowset_callback);
2181
0
    closure->cntl_->set_timeout_ms(config::slave_replica_writer_rpc_timeout_sec * 1000);
2182
0
    closure->cntl_->ignore_eovercrowded();
2183
0
    stub->response_slave_tablet_pull_rowset(closure->cntl_.get(), closure->request_.get(),
2184
0
                                            closure->response_.get(), closure.get());
2185
0
    closure.release();
2186
2187
0
    pull_rowset_callback->join();
2188
0
    if (pull_rowset_callback->cntl_->Failed()) {
2189
0
        LOG(WARNING) << "failed to response result of slave replica to master replica, error="
2190
0
                     << berror(pull_rowset_callback->cntl_->ErrorCode())
2191
0
                     << ", error_text=" << pull_rowset_callback->cntl_->ErrorText()
2192
0
                     << ", master host: " << remote_host << ", tablet_id=" << tablet_id
2193
0
                     << ", txn_id=" << txn_id;
2194
0
    }
2195
0
    VLOG_CRITICAL << "succeed to response the result of slave replica pull rowset to master "
2196
0
                     "replica. master host: "
2197
0
                  << remote_host << ". is_succeed=" << is_succeed << ", tablet_id=" << tablet_id
2198
0
                  << ", slave server=" << node_id << ", txn_id=" << txn_id;
2199
0
}
2200
2201
void PInternalServiceImpl::response_slave_tablet_pull_rowset(
2202
        google::protobuf::RpcController* controller, const PTabletWriteSlaveDoneRequest* request,
2203
0
        PTabletWriteSlaveDoneResult* response, google::protobuf::Closure* done) {
2204
0
    bool ret = _heavy_work_pool.try_offer([txn_mgr = _engine.txn_manager(), request, response,
2205
0
                                           done]() {
2206
0
        brpc::ClosureGuard closure_guard(done);
2207
0
        VLOG_CRITICAL << "receive the result of slave replica pull rowset from slave replica. "
2208
0
                         "slave server="
2209
0
                      << request->node_id() << ", is_succeed=" << request->is_succeed()
2210
0
                      << ", tablet_id=" << request->tablet_id() << ", txn_id=" << request->txn_id();
2211
0
        txn_mgr->finish_slave_tablet_pull_rowset(request->txn_id(), request->tablet_id(),
2212
0
                                                 request->node_id(), request->is_succeed());
2213
0
        Status::OK().to_protobuf(response->mutable_status());
2214
0
    });
2215
0
    if (!ret) {
2216
0
        offer_failed(response, done, _heavy_work_pool);
2217
0
        return;
2218
0
    }
2219
0
}
2220
2221
void PInternalService::multiget_data(google::protobuf::RpcController* controller,
2222
                                     const PMultiGetRequest* request, PMultiGetResponse* response,
2223
11
                                     google::protobuf::Closure* done) {
2224
11
    bool ret = _heavy_work_pool.try_offer([request, response, done]() {
2225
11
        signal::SignalTaskIdKeeper keeper(request->query_id());
2226
        // multi get data by rowid
2227
11
        MonotonicStopWatch watch;
2228
11
        watch.start();
2229
11
        brpc::ClosureGuard closure_guard(done);
2230
11
        response->mutable_status()->set_status_code(0);
2231
11
        SCOPED_ATTACH_TASK(ExecEnv::GetInstance()->rowid_storage_reader_tracker());
2232
11
        Status st = RowIdStorageReader::read_by_rowids(*request, response);
2233
11
        st.to_protobuf(response->mutable_status());
2234
11
        LOG(INFO) << "multiget_data finished, cost(us):" << watch.elapsed_time() / 1000;
2235
11
    });
2236
11
    if (!ret) {
2237
0
        offer_failed(response, done, _heavy_work_pool);
2238
0
        return;
2239
0
    }
2240
11
}
2241
2242
void PInternalService::multiget_data_v2(google::protobuf::RpcController* controller,
2243
                                        const PMultiGetRequestV2* request,
2244
                                        PMultiGetResponseV2* response,
2245
2.92k
                                        google::protobuf::Closure* done) {
2246
2.92k
    std::vector<uint64_t> id_set;
2247
2.92k
    id_set.push_back(request->wg_id());
2248
2.92k
    auto wg = ExecEnv::GetInstance()->workload_group_mgr()->get_group(id_set);
2249
2.92k
    Status st = Status::OK();
2250
2251
2.92k
    if (!wg) [[unlikely]] {
2252
0
        brpc::ClosureGuard closure_guard(done);
2253
0
        st = Status::Error<TStatusCode::CANCELLED>("fail to find wg: wg id:" +
2254
0
                                                   std::to_string(request->wg_id()));
2255
0
        st.to_protobuf(response->mutable_status());
2256
0
        return;
2257
0
    }
2258
2259
2.92k
    doris::TaskScheduler* exec_sched = nullptr;
2260
2.92k
    ScannerScheduler* scan_sched = nullptr;
2261
2.92k
    ScannerScheduler* remote_scan_sched = nullptr;
2262
2.92k
    wg->get_query_scheduler(&exec_sched, &scan_sched, &remote_scan_sched);
2263
2.92k
    DCHECK(remote_scan_sched);
2264
2265
2.92k
    st = remote_scan_sched->submit_scan_task(
2266
2.92k
            SimplifiedScanTask(
2267
2.92k
                    [request, response, done]() {
2268
2.92k
                        SCOPED_ATTACH_TASK(ExecEnv::GetInstance()->rowid_storage_reader_tracker());
2269
2.92k
                        signal::set_signal_task_id(request->query_id());
2270
                        // multi get data by rowid
2271
2.92k
                        MonotonicStopWatch watch;
2272
2.92k
                        watch.start();
2273
2.92k
                        brpc::ClosureGuard closure_guard(done);
2274
2.92k
                        response->mutable_status()->set_status_code(0);
2275
2.92k
                        Status st = RowIdStorageReader::read_by_rowids(*request, response);
2276
2.92k
                        st.to_protobuf(response->mutable_status());
2277
2.92k
                        LOG(INFO) << "multiget_data finished, cost(us):"
2278
2.92k
                                  << watch.elapsed_time() / 1000;
2279
2.92k
                        return true;
2280
2.92k
                    },
2281
2.92k
                    nullptr, nullptr),
2282
2.92k
            fmt::format("{}-multiget_data_v2", print_id(request->query_id())));
2283
2284
2.92k
    if (!st.ok()) {
2285
0
        brpc::ClosureGuard closure_guard(done);
2286
0
        st.to_protobuf(response->mutable_status());
2287
0
    }
2288
2.92k
}
2289
2290
void PInternalServiceImpl::get_tablet_rowset_versions(google::protobuf::RpcController* cntl_base,
2291
                                                      const PGetTabletVersionsRequest* request,
2292
                                                      PGetTabletVersionsResponse* response,
2293
0
                                                      google::protobuf::Closure* done) {
2294
0
    brpc::ClosureGuard closure_guard(done);
2295
0
    VLOG_DEBUG << "receive get tablet versions request: " << request->DebugString();
2296
0
    _engine.get_tablet_rowset_versions(request, response);
2297
0
}
2298
2299
void PInternalService::glob(google::protobuf::RpcController* controller,
2300
                            const PGlobRequest* request, PGlobResponse* response,
2301
447
                            google::protobuf::Closure* done) {
2302
447
    bool ret = _heavy_work_pool.try_offer([request, response, done]() {
2303
447
        brpc::ClosureGuard closure_guard(done);
2304
447
        std::vector<io::FileInfo> files;
2305
447
        Status st = io::global_local_filesystem()->safe_glob(request->pattern(), &files);
2306
447
        if (st.ok()) {
2307
452
            for (auto& file : files) {
2308
452
                PGlobResponse_PFileInfo* pfile = response->add_files();
2309
452
                pfile->set_file(file.file_name);
2310
452
                pfile->set_size(file.file_size);
2311
452
            }
2312
430
        }
2313
447
        st.to_protobuf(response->mutable_status());
2314
447
    });
2315
447
    if (!ret) {
2316
0
        offer_failed(response, done, _heavy_work_pool);
2317
0
        return;
2318
0
    }
2319
447
}
2320
2321
void PInternalService::group_commit_insert(google::protobuf::RpcController* controller,
2322
                                           const PGroupCommitInsertRequest* request,
2323
                                           PGroupCommitInsertResponse* response,
2324
29
                                           google::protobuf::Closure* done) {
2325
29
    TUniqueId load_id;
2326
29
    load_id.__set_hi(request->load_id().hi());
2327
29
    load_id.__set_lo(request->load_id().lo());
2328
29
    std::shared_ptr<std::mutex> lock = std::make_shared<std::mutex>();
2329
29
    std::shared_ptr<bool> is_done = std::make_shared<bool>(false);
2330
29
    bool ret = _heavy_work_pool.try_offer([this, request, response, done, load_id, lock,
2331
29
                                           is_done]() {
2332
29
        brpc::ClosureGuard closure_guard(done);
2333
29
        std::shared_ptr<StreamLoadContext> ctx = std::make_shared<StreamLoadContext>(_exec_env);
2334
29
        auto pipe = std::make_shared<io::StreamLoadPipe>(
2335
29
                io::kMaxPipeBufferedBytes /* max_buffered_bytes */, 64 * 1024 /* min_chunk_size */,
2336
29
                -1 /* total_length */, true /* use_proto */);
2337
29
        ctx->pipe = pipe;
2338
29
        Status st = _exec_env->new_load_stream_mgr()->put(load_id, ctx);
2339
29
        if (st.ok()) {
2340
29
            try {
2341
29
                st = _exec_plan_fragment_impl(
2342
29
                        request->exec_plan_fragment_request().request(),
2343
29
                        request->exec_plan_fragment_request().version(),
2344
29
                        request->exec_plan_fragment_request().compact(),
2345
29
                        [&, response, done, load_id, lock, is_done](RuntimeState* state,
2346
29
                                                                    Status* status) {
2347
29
                            std::lock_guard<std::mutex> lock1(*lock);
2348
29
                            if (*is_done) {
2349
0
                                return;
2350
0
                            }
2351
29
                            *is_done = true;
2352
29
                            brpc::ClosureGuard cb_closure_guard(done);
2353
29
                            response->set_label(state->import_label());
2354
29
                            response->set_txn_id(state->wal_id());
2355
29
                            response->set_loaded_rows(state->num_rows_load_success());
2356
29
                            response->set_filtered_rows(state->num_rows_load_filtered());
2357
29
                            status->to_protobuf(response->mutable_status());
2358
29
                            if (!state->get_error_log_file_path().empty()) {
2359
0
                                response->set_error_url(
2360
0
                                        to_load_error_http_path(state->get_error_log_file_path()));
2361
0
                            }
2362
29
                            if (!state->get_first_error_msg().empty()) {
2363
0
                                response->set_first_error_msg(state->get_first_error_msg());
2364
0
                            }
2365
29
                            _exec_env->new_load_stream_mgr()->remove(load_id);
2366
29
                        });
2367
29
            } catch (const Exception& e) {
2368
0
                st = e.to_status();
2369
0
            } catch (const std::exception& e) {
2370
0
                st = Status::Error(ErrorCode::INTERNAL_ERROR, e.what());
2371
0
            } catch (...) {
2372
0
                st = Status::Error(ErrorCode::INTERNAL_ERROR,
2373
0
                                   "_exec_plan_fragment_impl meet unknown error");
2374
0
            }
2375
29
            if (!st.ok()) {
2376
0
                LOG(WARNING) << "exec plan fragment failed, load_id=" << print_id(load_id)
2377
0
                             << ", errmsg=" << st;
2378
0
                std::lock_guard<std::mutex> lock1(*lock);
2379
0
                if (*is_done) {
2380
0
                    closure_guard.release();
2381
0
                } else {
2382
0
                    *is_done = true;
2383
0
                    st.to_protobuf(response->mutable_status());
2384
0
                    _exec_env->new_load_stream_mgr()->remove(load_id);
2385
0
                }
2386
29
            } else {
2387
29
                closure_guard.release();
2388
66
                for (int i = 0; i < request->data().size(); ++i) {
2389
37
                    std::unique_ptr<PDataRow> row(new PDataRow());
2390
37
                    row->CopyFrom(request->data(i));
2391
37
                    st = pipe->append(std::move(row));
2392
37
                    if (!st.ok()) {
2393
0
                        break;
2394
0
                    }
2395
37
                }
2396
29
                if (st.ok()) {
2397
29
                    static_cast<void>(pipe->finish());
2398
29
                }
2399
29
            }
2400
29
        }
2401
29
    });
2402
29
    if (!ret) {
2403
0
        _exec_env->new_load_stream_mgr()->remove(load_id);
2404
0
        offer_failed(response, done, _heavy_work_pool);
2405
0
        return;
2406
0
    }
2407
29
};
2408
2409
void PInternalService::get_wal_queue_size(google::protobuf::RpcController* controller,
2410
                                          const PGetWalQueueSizeRequest* request,
2411
                                          PGetWalQueueSizeResponse* response,
2412
1.15k
                                          google::protobuf::Closure* done) {
2413
1.15k
    bool ret = _heavy_work_pool.try_offer([this, request, response, done]() {
2414
1.15k
        brpc::ClosureGuard closure_guard(done);
2415
1.15k
        Status st = Status::OK();
2416
1.15k
        auto table_id = request->table_id();
2417
1.15k
        auto count = _exec_env->wal_mgr()->get_wal_queue_size(table_id);
2418
1.15k
        response->set_size(count);
2419
1.15k
        response->mutable_status()->set_status_code(st.code());
2420
1.15k
    });
2421
1.15k
    if (!ret) {
2422
0
        offer_failed(response, done, _heavy_work_pool);
2423
0
    }
2424
1.15k
}
2425
2426
void PInternalService::get_be_resource(google::protobuf::RpcController* controller,
2427
                                       const PGetBeResourceRequest* request,
2428
                                       PGetBeResourceResponse* response,
2429
0
                                       google::protobuf::Closure* done) {
2430
0
    bool ret = _heavy_work_pool.try_offer([response, done]() {
2431
0
        brpc::ClosureGuard closure_guard(done);
2432
0
        int64_t mem_limit = MemInfo::mem_limit();
2433
0
        int64_t mem_usage = PerfCounters::get_vm_rss();
2434
2435
0
        PGlobalResourceUsage* global_resource_usage = response->mutable_global_be_resource_usage();
2436
0
        global_resource_usage->set_mem_limit(mem_limit);
2437
0
        global_resource_usage->set_mem_usage(mem_usage);
2438
2439
0
        Status st = Status::OK();
2440
0
        response->mutable_status()->set_status_code(st.code());
2441
0
    });
2442
0
    if (!ret) {
2443
0
        offer_failed(response, done, _heavy_work_pool);
2444
0
    }
2445
0
}
2446
2447
void PInternalService::delete_dictionary(google::protobuf::RpcController* controller,
2448
                                         const PDeleteDictionaryRequest* request,
2449
                                         PDeleteDictionaryResponse* response,
2450
3
                                         google::protobuf::Closure* done) {
2451
3
    brpc::ClosureGuard closure_guard(done);
2452
3
    Status st = ExecEnv::GetInstance()->dict_factory()->delete_dict(request->dictionary_id());
2453
3
    st.to_protobuf(response->mutable_status());
2454
3
}
2455
2456
void PInternalService::commit_refresh_dictionary(google::protobuf::RpcController* controller,
2457
                                                 const PCommitRefreshDictionaryRequest* request,
2458
                                                 PCommitRefreshDictionaryResponse* response,
2459
85
                                                 google::protobuf::Closure* done) {
2460
85
    brpc::ClosureGuard closure_guard(done);
2461
85
    Status st = ExecEnv::GetInstance()->dict_factory()->commit_refresh_dict(
2462
85
            request->dictionary_id(), request->version_id());
2463
85
    st.to_protobuf(response->mutable_status());
2464
85
}
2465
2466
void PInternalService::abort_refresh_dictionary(google::protobuf::RpcController* controller,
2467
                                                const PAbortRefreshDictionaryRequest* request,
2468
                                                PAbortRefreshDictionaryResponse* response,
2469
4
                                                google::protobuf::Closure* done) {
2470
4
    brpc::ClosureGuard closure_guard(done);
2471
4
    Status st = ExecEnv::GetInstance()->dict_factory()->abort_refresh_dict(request->dictionary_id(),
2472
4
                                                                           request->version_id());
2473
4
    st.to_protobuf(response->mutable_status());
2474
4
}
2475
2476
void PInternalService::get_tablet_rowsets(google::protobuf::RpcController* controller,
2477
                                          const PGetTabletRowsetsRequest* request,
2478
                                          PGetTabletRowsetsResponse* response,
2479
0
                                          google::protobuf::Closure* done) {
2480
0
    DCHECK(config::is_cloud_mode());
2481
0
    auto start_time = GetMonoTimeMicros();
2482
0
    Defer defer {
2483
0
            [&]() { g_process_remote_fetch_rowsets_latency << GetMonoTimeMicros() - start_time; }};
2484
0
    brpc::ClosureGuard closure_guard(done);
2485
0
    LOG(INFO) << "process get tablet rowsets, request=" << request->ShortDebugString();
2486
0
    if (!request->has_tablet_id() || !request->has_version_start() || !request->has_version_end()) {
2487
0
        Status::InvalidArgument("missing params tablet/version_start/version_end")
2488
0
                .to_protobuf(response->mutable_status());
2489
0
        return;
2490
0
    }
2491
0
    CloudStorageEngine& storage = ExecEnv::GetInstance()->storage_engine().to_cloud();
2492
2493
0
    auto maybe_tablet =
2494
0
            storage.tablet_mgr().get_tablet(request->tablet_id(), /*warmup data*/ false,
2495
0
                                            /*syn_delete_bitmap*/ false, /*delete_bitmap*/ nullptr,
2496
0
                                            /*local_only*/ true);
2497
0
    if (!maybe_tablet) {
2498
0
        maybe_tablet.error().to_protobuf(response->mutable_status());
2499
0
        return;
2500
0
    }
2501
0
    auto tablet = maybe_tablet.value();
2502
0
    Result<CaptureRowsetResult> ret;
2503
0
    {
2504
0
        std::shared_lock l(tablet->get_header_lock());
2505
0
        ret = tablet->capture_consistent_rowsets_unlocked(
2506
0
                {request->version_start(), request->version_end()},
2507
0
                CaptureRowsetOps {.enable_fetch_rowsets_from_peers = false});
2508
0
    }
2509
0
    if (!ret) {
2510
0
        ret.error().to_protobuf(response->mutable_status());
2511
0
        return;
2512
0
    }
2513
0
    auto rowsets = std::move(ret.value().rowsets);
2514
0
    for (const auto& rs : rowsets) {
2515
0
        RowsetMetaPB meta;
2516
0
        rs->rowset_meta()->to_rowset_pb(&meta);
2517
0
        response->mutable_rowsets()->Add(std::move(meta));
2518
0
    }
2519
0
    if (request->has_delete_bitmap_keys()) {
2520
0
        DCHECK(tablet->enable_unique_key_merge_on_write());
2521
0
        auto delete_bitmap = std::move(ret.value().delete_bitmap);
2522
0
        auto keys_pb = request->delete_bitmap_keys();
2523
0
        size_t len = keys_pb.rowset_ids().size();
2524
0
        DCHECK_EQ(len, keys_pb.segment_ids().size());
2525
0
        DCHECK_EQ(len, keys_pb.versions().size());
2526
0
        std::set<DeleteBitmap::BitmapKey> keys;
2527
0
        for (size_t i = 0; i < len; ++i) {
2528
0
            RowsetId rs_id;
2529
0
            rs_id.init(keys_pb.rowset_ids(i));
2530
0
            keys.emplace(rs_id, keys_pb.segment_ids(i), keys_pb.versions(i));
2531
0
        }
2532
0
        auto diffset = delete_bitmap->diffset(keys).to_pb();
2533
0
        *response->mutable_delete_bitmap() = std::move(diffset);
2534
0
    }
2535
0
    Status::OK().to_protobuf(response->mutable_status());
2536
0
}
2537
2538
void PInternalService::request_cdc_client(google::protobuf::RpcController* controller,
2539
                                          const PRequestCdcClientRequest* request,
2540
                                          PRequestCdcClientResult* result,
2541
1.01k
                                          google::protobuf::Closure* done) {
2542
1.01k
    bool ret = _heavy_work_pool.try_offer([this, request, result, done]() {
2543
1.01k
        _exec_env->cdc_client_mgr()->request_cdc_client_impl(request, result, done);
2544
1.01k
    });
2545
2546
1.01k
    if (!ret) {
2547
0
        offer_failed(result, done, _heavy_work_pool);
2548
0
        return;
2549
0
    }
2550
1.01k
}
2551
2552
#include "common/compile_check_avoid_end.h"
2553
} // namespace doris