Coverage Report

Created: 2025-11-26 19:40

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/doris/be/src/service/internal_service.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "service/internal_service.h"
19
20
#include <assert.h>
21
#include <brpc/closure_guard.h>
22
#include <brpc/controller.h>
23
#include <bthread/bthread.h>
24
#include <bthread/types.h>
25
#include <butil/errno.h>
26
#include <butil/iobuf.h>
27
#include <fcntl.h>
28
#include <fmt/core.h>
29
#include <gen_cpp/DataSinks_types.h>
30
#include <gen_cpp/MasterService_types.h>
31
#include <gen_cpp/PaloInternalService_types.h>
32
#include <gen_cpp/PlanNodes_types.h>
33
#include <gen_cpp/Status_types.h>
34
#include <gen_cpp/Types_types.h>
35
#include <gen_cpp/internal_service.pb.h>
36
#include <gen_cpp/olap_file.pb.h>
37
#include <gen_cpp/segment_v2.pb.h>
38
#include <gen_cpp/types.pb.h>
39
#include <google/protobuf/stubs/callback.h>
40
#include <stddef.h>
41
#include <stdint.h>
42
#include <sys/stat.h>
43
#include <vec/data_types/data_type.h>
44
#include <vec/exec/vjdbc_connector.h>
45
#include <vec/sink/varrow_flight_result_writer.h>
46
47
#include <algorithm>
48
#include <exception>
49
#include <filesystem>
50
#include <memory>
51
#include <set>
52
#include <sstream>
53
#include <string>
54
#include <utility>
55
#include <vector>
56
57
#include "cloud/cloud_storage_engine.h"
58
#include "cloud/cloud_tablet_mgr.h"
59
#include "cloud/config.h"
60
#include "common/config.h"
61
#include "common/exception.h"
62
#include "common/logging.h"
63
#include "common/signal_handler.h"
64
#include "common/status.h"
65
#include "exec/rowid_fetcher.h"
66
#include "http/http_client.h"
67
#include "io/fs/local_file_system.h"
68
#include "io/fs/stream_load_pipe.h"
69
#include "io/io_common.h"
70
#include "olap/data_dir.h"
71
#include "olap/olap_common.h"
72
#include "olap/olap_define.h"
73
#include "olap/rowset/beta_rowset.h"
74
#include "olap/rowset/rowset.h"
75
#include "olap/rowset/rowset_factory.h"
76
#include "olap/rowset/rowset_meta.h"
77
#include "olap/rowset/segment_v2/column_reader.h"
78
#include "olap/rowset/segment_v2/inverted_index_desc.h"
79
#include "olap/storage_engine.h"
80
#include "olap/tablet_fwd.h"
81
#include "olap/tablet_manager.h"
82
#include "olap/tablet_schema.h"
83
#include "olap/txn_manager.h"
84
#include "olap/wal/wal_manager.h"
85
#include "runtime/cache/result_cache.h"
86
#include "runtime/descriptors.h"
87
#include "runtime/exec_env.h"
88
#include "runtime/fold_constant_executor.h"
89
#include "runtime/fragment_mgr.h"
90
#include "runtime/load_channel_mgr.h"
91
#include "runtime/load_stream_mgr.h"
92
#include "runtime/result_block_buffer.h"
93
#include "runtime/result_buffer_mgr.h"
94
#include "runtime/routine_load/routine_load_task_executor.h"
95
#include "runtime/stream_load/new_load_stream_mgr.h"
96
#include "runtime/stream_load/stream_load_context.h"
97
#include "runtime/thread_context.h"
98
#include "runtime/types.h"
99
#include "runtime/workload_group/workload_group.h"
100
#include "runtime/workload_group/workload_group_manager.h"
101
#include "service/backend_options.h"
102
#include "service/point_query_executor.h"
103
#include "util/arrow/row_batch.h"
104
#include "util/async_io.h"
105
#include "util/brpc_client_cache.h"
106
#include "util/brpc_closure.h"
107
#include "util/doris_metrics.h"
108
#include "util/md5.h"
109
#include "util/metrics.h"
110
#include "util/network_util.h"
111
#include "util/proto_util.h"
112
#include "util/runtime_profile.h"
113
#include "util/stopwatch.hpp"
114
#include "util/string_util.h"
115
#include "util/thrift_util.h"
116
#include "util/time.h"
117
#include "util/uid_util.h"
118
#include "vec/common/schema_util.h"
119
#include "vec/core/block.h"
120
#include "vec/exec/format/avro//avro_jni_reader.h"
121
#include "vec/exec/format/csv/csv_reader.h"
122
#include "vec/exec/format/generic_reader.h"
123
#include "vec/exec/format/json/new_json_reader.h"
124
#include "vec/exec/format/orc/vorc_reader.h"
125
#include "vec/exec/format/parquet/vparquet_reader.h"
126
#include "vec/exec/format/text/text_reader.h"
127
#include "vec/functions/dictionary_factory.h"
128
#include "vec/jsonb/serialize.h"
129
#include "vec/runtime/vdata_stream_mgr.h"
130
#include "vec/sink/vmysql_result_writer.h"
131
132
namespace google {
133
namespace protobuf {
134
class RpcController;
135
} // namespace protobuf
136
} // namespace google
137
138
namespace doris {
139
#include "common/compile_check_avoid_begin.h"
140
using namespace ErrorCode;
141
142
const uint32_t DOWNLOAD_FILE_MAX_RETRY = 3;
143
144
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_pool_queue_size, MetricUnit::NOUNIT);
145
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_pool_queue_size, MetricUnit::NOUNIT);
146
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_active_threads, MetricUnit::NOUNIT);
147
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_active_threads, MetricUnit::NOUNIT);
148
149
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_pool_max_queue_size, MetricUnit::NOUNIT);
150
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_pool_max_queue_size, MetricUnit::NOUNIT);
151
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_max_threads, MetricUnit::NOUNIT);
152
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_max_threads, MetricUnit::NOUNIT);
153
154
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_pool_queue_size, MetricUnit::NOUNIT);
155
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_active_threads, MetricUnit::NOUNIT);
156
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_pool_max_queue_size, MetricUnit::NOUNIT);
157
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_max_threads, MetricUnit::NOUNIT);
158
159
static bvar::LatencyRecorder g_process_remote_fetch_rowsets_latency("process_remote_fetch_rowsets");
160
161
bthread_key_t btls_key;
162
163
0
static void thread_context_deleter(void* d) {
164
0
    delete static_cast<ThreadContext*>(d);
165
0
}
166
167
template <typename T>
168
concept CanCancel = requires(T* response) { response->mutable_status(); };
169
170
template <typename T>
171
0
void offer_failed(T* response, google::protobuf::Closure* done, const FifoThreadPool& pool) {
172
0
    brpc::ClosureGuard closure_guard(done);
173
0
    LOG(WARNING) << "fail to offer request to the work pool, pool=" << pool.get_info();
174
0
}
Unexecuted instantiation: _ZN5doris12offer_failedINS_25PTabletWriterCancelResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedINS_14PCacheResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedINS_17PFetchCacheResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
175
176
template <CanCancel T>
177
0
void offer_failed(T* response, google::protobuf::Closure* done, const FifoThreadPool& pool) {
178
0
    brpc::ClosureGuard closure_guard(done);
179
    // Should use status to generate protobuf message, because it will encoding Backend Info
180
    // into the error message and then we could know which backend's pool is full.
181
0
    Status st = Status::Error<TStatusCode::CANCELLED>(
182
0
            "fail to offer request to the work pool, pool={}", pool.get_info());
183
0
    st.to_protobuf(response->mutable_status());
184
0
    LOG(WARNING) << "cancelled due to fail to offer request to the work pool, pool="
185
0
                 << pool.get_info();
186
0
}
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PTabletWriterOpenResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PExecPlanFragmentResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23POpenLoadStreamResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_27PTabletWriterAddBlockResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_25PCancelPlanFragmentResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_21PFetchArrowDataResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_26POutfileWriteSuccessResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PFetchTableSchemaResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_29PFetchArrowFlightSchemaResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_24PTabletKeyLookupResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_25PJdbcTestConnectionResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_20PFetchColIdsResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_26PFetchRemoteSchemaResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_12PProxyResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_20PMergeFilterResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PSendFilterSizeResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PSyncFilterSizeResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_22PPublishFilterResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_15PSendDataResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_13PCommitResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_15PRollbackResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_19PConstantExprResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_19PTransmitDataResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_24PCheckRPCChannelResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_24PResetRPCChannelResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_23PTabletWriteSlaveResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_27PTabletWriteSlaveDoneResultEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_17PMultiGetResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_13PGlobResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_26PGroupCommitInsertResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_24PGetWalQueueSizeResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
Unexecuted instantiation: _ZN5doris12offer_failedITkNS_9CanCancelENS_22PGetBeResourceResponseEEEvPT_PN6google8protobuf7ClosureERKNS_14WorkThreadPoolILb0EEE
187
188
template <typename T>
189
class NewHttpClosure : public ::google::protobuf::Closure {
190
public:
191
    NewHttpClosure(google::protobuf::Closure* done) : _done(done) {}
192
0
    NewHttpClosure(T* request, google::protobuf::Closure* done) : _request(request), _done(done) {}
Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_28PTabletWriterAddBlockRequestEEC2EPS1_PN6google8protobuf7ClosureE
Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_19PTransmitDataParamsEEC2EPS1_PN6google8protobuf7ClosureE
193
194
0
    void Run() override {
195
0
        if (_request != nullptr) {
196
0
            delete _request;
197
0
            _request = nullptr;
198
0
        }
199
0
        if (_done != nullptr) {
200
0
            _done->Run();
201
0
        }
202
0
        delete this;
203
0
    }
Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_28PTabletWriterAddBlockRequestEE3RunEv
Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_19PTransmitDataParamsEE3RunEv
204
205
private:
206
    T* _request = nullptr;
207
    google::protobuf::Closure* _done = nullptr;
208
};
209
210
PInternalService::PInternalService(ExecEnv* exec_env)
211
0
        : _exec_env(exec_env),
212
          // heavy threadpool is used for load process and other process that will read disk or access network.
213
0
          _heavy_work_pool(config::brpc_heavy_work_pool_threads != -1
214
0
                                   ? config::brpc_heavy_work_pool_threads
215
0
                                   : std::max(128, CpuInfo::num_cores() * 4),
216
0
                           config::brpc_heavy_work_pool_max_queue_size != -1
217
0
                                   ? config::brpc_heavy_work_pool_max_queue_size
218
0
                                   : std::max(10240, CpuInfo::num_cores() * 320),
219
0
                           "brpc_heavy"),
220
221
          // light threadpool should be only used in query processing logic. All hanlers should be very light, not locked, not access disk.
222
0
          _light_work_pool(config::brpc_light_work_pool_threads != -1
223
0
                                   ? config::brpc_light_work_pool_threads
224
0
                                   : std::max(128, CpuInfo::num_cores() * 4),
225
0
                           config::brpc_light_work_pool_max_queue_size != -1
226
0
                                   ? config::brpc_light_work_pool_max_queue_size
227
0
                                   : std::max(10240, CpuInfo::num_cores() * 320),
228
0
                           "brpc_light"),
229
0
          _arrow_flight_work_pool(config::brpc_arrow_flight_work_pool_threads != -1
230
0
                                          ? config::brpc_arrow_flight_work_pool_threads
231
0
                                          : std::max(512, CpuInfo::num_cores() * 2),
232
0
                                  config::brpc_arrow_flight_work_pool_max_queue_size != -1
233
0
                                          ? config::brpc_arrow_flight_work_pool_max_queue_size
234
0
                                          : std::max(20480, CpuInfo::num_cores() * 640),
235
0
                                  "brpc_arrow_flight") {
236
0
    REGISTER_HOOK_METRIC(heavy_work_pool_queue_size,
237
0
                         [this]() { return _heavy_work_pool.get_queue_size(); });
238
0
    REGISTER_HOOK_METRIC(light_work_pool_queue_size,
239
0
                         [this]() { return _light_work_pool.get_queue_size(); });
240
0
    REGISTER_HOOK_METRIC(heavy_work_active_threads,
241
0
                         [this]() { return _heavy_work_pool.get_active_threads(); });
242
0
    REGISTER_HOOK_METRIC(light_work_active_threads,
243
0
                         [this]() { return _light_work_pool.get_active_threads(); });
244
245
0
    REGISTER_HOOK_METRIC(heavy_work_pool_max_queue_size,
246
0
                         []() { return config::brpc_heavy_work_pool_max_queue_size; });
247
0
    REGISTER_HOOK_METRIC(light_work_pool_max_queue_size,
248
0
                         []() { return config::brpc_light_work_pool_max_queue_size; });
249
0
    REGISTER_HOOK_METRIC(heavy_work_max_threads,
250
0
                         []() { return config::brpc_heavy_work_pool_threads; });
251
0
    REGISTER_HOOK_METRIC(light_work_max_threads,
252
0
                         []() { return config::brpc_light_work_pool_threads; });
253
254
0
    REGISTER_HOOK_METRIC(arrow_flight_work_pool_queue_size,
255
0
                         [this]() { return _arrow_flight_work_pool.get_queue_size(); });
256
0
    REGISTER_HOOK_METRIC(arrow_flight_work_active_threads,
257
0
                         [this]() { return _arrow_flight_work_pool.get_active_threads(); });
258
0
    REGISTER_HOOK_METRIC(arrow_flight_work_pool_max_queue_size,
259
0
                         []() { return config::brpc_arrow_flight_work_pool_max_queue_size; });
260
0
    REGISTER_HOOK_METRIC(arrow_flight_work_max_threads,
261
0
                         []() { return config::brpc_arrow_flight_work_pool_threads; });
262
263
0
    _exec_env->load_stream_mgr()->set_heavy_work_pool(&_heavy_work_pool);
264
265
0
    CHECK_EQ(0, bthread_key_create(&btls_key, thread_context_deleter));
266
0
    CHECK_EQ(0, bthread_key_create(&AsyncIO::btls_io_ctx_key, AsyncIO::io_ctx_key_deleter));
267
0
}
268
269
PInternalServiceImpl::PInternalServiceImpl(StorageEngine& engine, ExecEnv* exec_env)
270
0
        : PInternalService(exec_env), _engine(engine) {}
271
272
0
PInternalServiceImpl::~PInternalServiceImpl() = default;
273
274
0
PInternalService::~PInternalService() {
275
0
    DEREGISTER_HOOK_METRIC(heavy_work_pool_queue_size);
276
0
    DEREGISTER_HOOK_METRIC(light_work_pool_queue_size);
277
0
    DEREGISTER_HOOK_METRIC(heavy_work_active_threads);
278
0
    DEREGISTER_HOOK_METRIC(light_work_active_threads);
279
280
0
    DEREGISTER_HOOK_METRIC(heavy_work_pool_max_queue_size);
281
0
    DEREGISTER_HOOK_METRIC(light_work_pool_max_queue_size);
282
0
    DEREGISTER_HOOK_METRIC(heavy_work_max_threads);
283
0
    DEREGISTER_HOOK_METRIC(light_work_max_threads);
284
285
0
    DEREGISTER_HOOK_METRIC(arrow_flight_work_pool_queue_size);
286
0
    DEREGISTER_HOOK_METRIC(arrow_flight_work_active_threads);
287
0
    DEREGISTER_HOOK_METRIC(arrow_flight_work_pool_max_queue_size);
288
0
    DEREGISTER_HOOK_METRIC(arrow_flight_work_max_threads);
289
290
0
    CHECK_EQ(0, bthread_key_delete(btls_key));
291
0
    CHECK_EQ(0, bthread_key_delete(AsyncIO::btls_io_ctx_key));
292
0
}
293
294
void PInternalService::tablet_writer_open(google::protobuf::RpcController* controller,
295
                                          const PTabletWriterOpenRequest* request,
296
                                          PTabletWriterOpenResult* response,
297
0
                                          google::protobuf::Closure* done) {
298
0
    bool ret = _heavy_work_pool.try_offer([this, request, response, done]() {
299
0
        VLOG_RPC << "tablet writer open, id=" << request->id()
300
0
                 << ", index_id=" << request->index_id() << ", txn_id=" << request->txn_id();
301
0
        signal::SignalTaskIdKeeper keeper(request->id());
302
0
        brpc::ClosureGuard closure_guard(done);
303
0
        auto st = _exec_env->load_channel_mgr()->open(*request);
304
0
        if (!st.ok()) {
305
0
            LOG(WARNING) << "load channel open failed, message=" << st << ", id=" << request->id()
306
0
                         << ", index_id=" << request->index_id()
307
0
                         << ", txn_id=" << request->txn_id();
308
0
        }
309
0
        st.to_protobuf(response->mutable_status());
310
0
    });
311
0
    if (!ret) {
312
0
        offer_failed(response, done, _heavy_work_pool);
313
0
        return;
314
0
    }
315
0
}
316
317
void PInternalService::exec_plan_fragment(google::protobuf::RpcController* controller,
318
                                          const PExecPlanFragmentRequest* request,
319
                                          PExecPlanFragmentResult* response,
320
0
                                          google::protobuf::Closure* done) {
321
0
    timeval tv {};
322
0
    gettimeofday(&tv, nullptr);
323
0
    response->set_received_time(tv.tv_sec * 1000LL + tv.tv_usec / 1000);
324
0
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
325
0
        _exec_plan_fragment_in_pthread(controller, request, response, done);
326
0
    });
327
0
    if (!ret) {
328
0
        offer_failed(response, done, _light_work_pool);
329
0
        return;
330
0
    }
331
0
}
332
333
void PInternalService::_exec_plan_fragment_in_pthread(google::protobuf::RpcController* controller,
334
                                                      const PExecPlanFragmentRequest* request,
335
                                                      PExecPlanFragmentResult* response,
336
0
                                                      google::protobuf::Closure* done) {
337
0
    timeval tv1 {};
338
0
    gettimeofday(&tv1, nullptr);
339
0
    response->set_execution_time(tv1.tv_sec * 1000LL + tv1.tv_usec / 1000);
340
0
    brpc::ClosureGuard closure_guard(done);
341
0
    auto st = Status::OK();
342
0
    bool compact = request->has_compact() ? request->compact() : false;
343
0
    PFragmentRequestVersion version =
344
0
            request->has_version() ? request->version() : PFragmentRequestVersion::VERSION_1;
345
0
    try {
346
0
        st = _exec_plan_fragment_impl(request->request(), version, compact);
347
0
    } catch (const Exception& e) {
348
0
        st = e.to_status();
349
0
    } catch (const std::exception& e) {
350
0
        st = Status::Error(ErrorCode::INTERNAL_ERROR, e.what());
351
0
    } catch (...) {
352
0
        st = Status::Error(ErrorCode::INTERNAL_ERROR,
353
0
                           "_exec_plan_fragment_impl meet unknown error");
354
0
    }
355
0
    if (!st.ok()) {
356
0
        LOG(WARNING) << "exec plan fragment failed, errmsg=" << st;
357
0
    }
358
0
    st.to_protobuf(response->mutable_status());
359
0
    timeval tv2 {};
360
0
    gettimeofday(&tv2, nullptr);
361
0
    response->set_execution_done_time(tv2.tv_sec * 1000LL + tv2.tv_usec / 1000);
362
0
}
363
364
void PInternalService::exec_plan_fragment_prepare(google::protobuf::RpcController* controller,
365
                                                  const PExecPlanFragmentRequest* request,
366
                                                  PExecPlanFragmentResult* response,
367
0
                                                  google::protobuf::Closure* done) {
368
0
    timeval tv {};
369
0
    gettimeofday(&tv, nullptr);
370
0
    response->set_received_time(tv.tv_sec * 1000LL + tv.tv_usec / 1000);
371
0
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
372
0
        _exec_plan_fragment_in_pthread(controller, request, response, done);
373
0
    });
374
0
    if (!ret) {
375
0
        offer_failed(response, done, _light_work_pool);
376
0
        return;
377
0
    }
378
0
}
379
380
void PInternalService::exec_plan_fragment_start(google::protobuf::RpcController* /*controller*/,
381
                                                const PExecPlanFragmentStartRequest* request,
382
                                                PExecPlanFragmentResult* result,
383
0
                                                google::protobuf::Closure* done) {
384
0
    timeval tv {};
385
0
    gettimeofday(&tv, nullptr);
386
0
    result->set_received_time(tv.tv_sec * 1000LL + tv.tv_usec / 1000);
387
0
    bool ret = _light_work_pool.try_offer([this, request, result, done]() {
388
0
        timeval tv1 {};
389
0
        gettimeofday(&tv1, nullptr);
390
0
        result->set_execution_time(tv1.tv_sec * 1000LL + tv1.tv_usec / 1000);
391
0
        brpc::ClosureGuard closure_guard(done);
392
0
        auto st = _exec_env->fragment_mgr()->start_query_execution(request);
393
0
        st.to_protobuf(result->mutable_status());
394
0
        timeval tv2 {};
395
0
        gettimeofday(&tv2, nullptr);
396
0
        result->set_execution_done_time(tv2.tv_sec * 1000LL + tv2.tv_usec / 1000);
397
0
    });
398
0
    if (!ret) {
399
0
        offer_failed(result, done, _light_work_pool);
400
0
        return;
401
0
    }
402
0
}
403
404
void PInternalService::open_load_stream(google::protobuf::RpcController* controller,
405
                                        const POpenLoadStreamRequest* request,
406
                                        POpenLoadStreamResponse* response,
407
0
                                        google::protobuf::Closure* done) {
408
0
    bool ret = _heavy_work_pool.try_offer([this, controller, request, response, done]() {
409
0
        signal::SignalTaskIdKeeper keeper(request->load_id());
410
0
        brpc::ClosureGuard done_guard(done);
411
0
        brpc::Controller* cntl = static_cast<brpc::Controller*>(controller);
412
0
        brpc::StreamOptions stream_options;
413
414
0
        LOG(INFO) << "open load stream, load_id=" << request->load_id()
415
0
                  << ", src_id=" << request->src_id();
416
417
0
        for (const auto& req : request->tablets()) {
418
0
            BaseTabletSPtr tablet;
419
0
            if (auto res = ExecEnv::get_tablet(req.tablet_id()); !res.has_value()) [[unlikely]] {
420
0
                auto st = std::move(res).error();
421
0
                st.to_protobuf(response->mutable_status());
422
0
                cntl->SetFailed(st.to_string());
423
0
                return;
424
0
            } else {
425
0
                tablet = std::move(res).value();
426
0
            }
427
0
            auto resp = response->add_tablet_schemas();
428
0
            resp->set_index_id(req.index_id());
429
0
            resp->set_enable_unique_key_merge_on_write(tablet->enable_unique_key_merge_on_write());
430
0
            tablet->tablet_schema()->to_schema_pb(resp->mutable_tablet_schema());
431
0
        }
432
433
0
        LoadStream* load_stream = nullptr;
434
0
        auto st = _exec_env->load_stream_mgr()->open_load_stream(request, load_stream);
435
0
        if (!st.ok()) {
436
0
            st.to_protobuf(response->mutable_status());
437
0
            return;
438
0
        }
439
440
0
        stream_options.handler = load_stream;
441
0
        stream_options.idle_timeout_ms = request->idle_timeout_ms();
442
0
        DBUG_EXECUTE_IF("PInternalServiceImpl.open_load_stream.set_idle_timeout",
443
0
                        { stream_options.idle_timeout_ms = 1; });
444
445
0
        StreamId streamid;
446
0
        if (brpc::StreamAccept(&streamid, *cntl, &stream_options) != 0) {
447
0
            st = Status::Cancelled("Fail to accept stream {}", streamid);
448
0
            st.to_protobuf(response->mutable_status());
449
0
            cntl->SetFailed(st.to_string());
450
0
            return;
451
0
        }
452
453
0
        VLOG_DEBUG << "get streamid =" << streamid;
454
0
        st.to_protobuf(response->mutable_status());
455
0
    });
456
0
    if (!ret) {
457
0
        offer_failed(response, done, _heavy_work_pool);
458
0
    }
459
0
}
460
461
void PInternalService::tablet_writer_add_block_by_http(google::protobuf::RpcController* controller,
462
                                                       const ::doris::PEmptyRequest* request,
463
                                                       PTabletWriterAddBlockResult* response,
464
0
                                                       google::protobuf::Closure* done) {
465
0
    PTabletWriterAddBlockRequest* new_request = new PTabletWriterAddBlockRequest();
466
0
    google::protobuf::Closure* new_done =
467
0
            new NewHttpClosure<PTabletWriterAddBlockRequest>(new_request, done);
468
0
    brpc::Controller* cntl = static_cast<brpc::Controller*>(controller);
469
0
    Status st = attachment_extract_request_contain_block<PTabletWriterAddBlockRequest>(new_request,
470
0
                                                                                       cntl);
471
0
    if (st.ok()) {
472
0
        tablet_writer_add_block(controller, new_request, response, new_done);
473
0
    } else {
474
0
        st.to_protobuf(response->mutable_status());
475
0
    }
476
0
}
477
478
void PInternalService::tablet_writer_add_block(google::protobuf::RpcController* controller,
479
                                               const PTabletWriterAddBlockRequest* request,
480
                                               PTabletWriterAddBlockResult* response,
481
0
                                               google::protobuf::Closure* done) {
482
0
    int64_t submit_task_time_ns = MonotonicNanos();
483
0
    bool ret = _heavy_work_pool.try_offer([request, response, done, submit_task_time_ns, this]() {
484
0
        int64_t wait_execution_time_ns = MonotonicNanos() - submit_task_time_ns;
485
0
        brpc::ClosureGuard closure_guard(done);
486
0
        int64_t execution_time_ns = 0;
487
0
        {
488
0
            SCOPED_RAW_TIMER(&execution_time_ns);
489
0
            signal::SignalTaskIdKeeper keeper(request->id());
490
0
            auto st = _exec_env->load_channel_mgr()->add_batch(*request, response);
491
0
            if (!st.ok()) {
492
0
                LOG(WARNING) << "tablet writer add block failed, message=" << st
493
0
                             << ", id=" << request->id() << ", index_id=" << request->index_id()
494
0
                             << ", sender_id=" << request->sender_id()
495
0
                             << ", backend id=" << request->backend_id();
496
0
            }
497
0
            st.to_protobuf(response->mutable_status());
498
0
        }
499
0
        response->set_execution_time_us(execution_time_ns / NANOS_PER_MICRO);
500
0
        response->set_wait_execution_time_us(wait_execution_time_ns / NANOS_PER_MICRO);
501
0
    });
502
0
    if (!ret) {
503
0
        offer_failed(response, done, _heavy_work_pool);
504
0
        return;
505
0
    }
506
0
}
507
508
void PInternalService::tablet_writer_cancel(google::protobuf::RpcController* controller,
509
                                            const PTabletWriterCancelRequest* request,
510
                                            PTabletWriterCancelResult* response,
511
0
                                            google::protobuf::Closure* done) {
512
0
    bool ret = _heavy_work_pool.try_offer([this, request, done]() {
513
0
        VLOG_RPC << "tablet writer cancel, id=" << request->id()
514
0
                 << ", index_id=" << request->index_id() << ", sender_id=" << request->sender_id();
515
0
        signal::SignalTaskIdKeeper keeper(request->id());
516
0
        brpc::ClosureGuard closure_guard(done);
517
0
        auto st = _exec_env->load_channel_mgr()->cancel(*request);
518
0
        if (!st.ok()) {
519
0
            LOG(WARNING) << "tablet writer cancel failed, id=" << request->id()
520
0
                         << ", index_id=" << request->index_id()
521
0
                         << ", sender_id=" << request->sender_id();
522
0
        }
523
0
    });
524
0
    if (!ret) {
525
0
        offer_failed(response, done, _heavy_work_pool);
526
0
        return;
527
0
    }
528
0
}
529
530
Status PInternalService::_exec_plan_fragment_impl(
531
        const std::string& ser_request, PFragmentRequestVersion version, bool compact,
532
0
        const std::function<void(RuntimeState*, Status*)>& cb) {
533
    // Sometimes the BE do not receive the first heartbeat message and it receives request from FE
534
    // If BE execute this fragment, it will core when it wants to get some property from master info.
535
0
    if (ExecEnv::GetInstance()->cluster_info() == nullptr) {
536
0
        return Status::InternalError(
537
0
                "Have not receive the first heartbeat message from master, not ready to provide "
538
0
                "service");
539
0
    }
540
0
    CHECK(version == PFragmentRequestVersion::VERSION_3)
541
0
            << "only support version 3, received " << version;
542
0
    if (version == PFragmentRequestVersion::VERSION_3) {
543
0
        TPipelineFragmentParamsList t_request;
544
0
        {
545
0
            const uint8_t* buf = (const uint8_t*)ser_request.data();
546
0
            uint32_t len = ser_request.size();
547
0
            RETURN_IF_ERROR(deserialize_thrift_msg(buf, &len, compact, &t_request));
548
0
        }
549
550
0
        const auto& fragment_list = t_request.params_list;
551
0
        if (fragment_list.empty()) {
552
0
            return Status::InternalError("Invalid TPipelineFragmentParamsList!");
553
0
        }
554
0
        MonotonicStopWatch timer;
555
0
        timer.start();
556
557
        // work for old version frontend
558
0
        if (!t_request.__isset.runtime_filter_info) {
559
0
            TRuntimeFilterInfo runtime_filter_info;
560
0
            auto local_param = fragment_list[0].local_params[0];
561
0
            if (local_param.__isset.runtime_filter_params) {
562
0
                runtime_filter_info.__set_runtime_filter_params(local_param.runtime_filter_params);
563
0
            }
564
0
            if (local_param.__isset.topn_filter_descs) {
565
0
                runtime_filter_info.__set_topn_filter_descs(local_param.topn_filter_descs);
566
0
            }
567
0
            t_request.__set_runtime_filter_info(runtime_filter_info);
568
0
        }
569
570
0
        for (const TPipelineFragmentParams& fragment : fragment_list) {
571
0
            if (cb) {
572
0
                RETURN_IF_ERROR(_exec_env->fragment_mgr()->exec_plan_fragment(
573
0
                        fragment, QuerySource::INTERNAL_FRONTEND, cb, t_request));
574
0
            } else {
575
0
                RETURN_IF_ERROR(_exec_env->fragment_mgr()->exec_plan_fragment(
576
0
                        fragment, QuerySource::INTERNAL_FRONTEND, t_request));
577
0
            }
578
0
        }
579
0
        timer.stop();
580
0
        double cost_secs = static_cast<double>(timer.elapsed_time()) / 1000000000ULL;
581
0
        if (cost_secs > 5) {
582
0
            LOG_WARNING("Prepare {} fragments of query {} costs {} seconds, it costs too much",
583
0
                        fragment_list.size(), print_id(fragment_list.front().query_id), cost_secs);
584
0
        }
585
586
0
        return Status::OK();
587
0
    } else {
588
0
        return Status::InternalError("invalid version");
589
0
    }
590
0
}
591
592
void PInternalService::cancel_plan_fragment(google::protobuf::RpcController* /*controller*/,
593
                                            const PCancelPlanFragmentRequest* request,
594
                                            PCancelPlanFragmentResult* result,
595
0
                                            google::protobuf::Closure* done) {
596
0
    bool ret = _light_work_pool.try_offer([this, request, result, done]() {
597
0
        brpc::ClosureGuard closure_guard(done);
598
0
        signal::SignalTaskIdKeeper keeper(request->finst_id());
599
0
        Status st = Status::OK();
600
601
0
        const bool has_cancel_reason = request->has_cancel_reason();
602
0
        const bool has_cancel_status = request->has_cancel_status();
603
        // During upgrade only LIMIT_REACH is used, other reason is changed to internal error
604
0
        Status actual_cancel_status = Status::OK();
605
        // Convert PPlanFragmentCancelReason to Status
606
0
        if (has_cancel_status) {
607
            // If fe set cancel status, then it is new FE now, should use cancel status.
608
0
            actual_cancel_status = Status::create<false>(request->cancel_status());
609
0
        } else if (has_cancel_reason) {
610
            // If fe not set cancel status, but set cancel reason, should convert cancel reason
611
            // to cancel status here.
612
0
            if (request->cancel_reason() == PPlanFragmentCancelReason::LIMIT_REACH) {
613
0
                actual_cancel_status = Status::Error<ErrorCode::LIMIT_REACH>("limit reach");
614
0
            } else {
615
                // Use cancel reason as error message
616
0
                actual_cancel_status = Status::InternalError(
617
0
                        PPlanFragmentCancelReason_Name(request->cancel_reason()));
618
0
            }
619
0
        } else {
620
0
            actual_cancel_status = Status::InternalError("unknown error");
621
0
        }
622
623
0
        TUniqueId query_id;
624
0
        query_id.__set_hi(request->query_id().hi());
625
0
        query_id.__set_lo(request->query_id().lo());
626
0
        LOG(INFO) << fmt::format("Cancel query {}, reason: {}", print_id(query_id),
627
0
                                 actual_cancel_status.to_string());
628
0
        _exec_env->fragment_mgr()->cancel_query(query_id, actual_cancel_status);
629
630
        // TODO: the logic seems useless, cancel only return Status::OK. remove it
631
0
        st.to_protobuf(result->mutable_status());
632
0
    });
633
0
    if (!ret) {
634
0
        offer_failed(result, done, _light_work_pool);
635
0
        return;
636
0
    }
637
0
}
638
639
void PInternalService::fetch_data(google::protobuf::RpcController* controller,
640
                                  const PFetchDataRequest* request, PFetchDataResult* result,
641
0
                                  google::protobuf::Closure* done) {
642
    // fetch_data is a light operation which will put a request rather than wait inplace when there's no data ready.
643
    // when there's data ready, use brpc to send. there's queue in brpc service. won't take it too long.
644
0
    auto ctx = vectorized::GetResultBatchCtx::create_shared(result, done);
645
0
    TUniqueId unique_id = UniqueId(request->finst_id()).to_thrift(); // query_id or instance_id
646
0
    std::shared_ptr<vectorized::MySQLResultBlockBuffer> buffer;
647
0
    Status st = ExecEnv::GetInstance()->result_mgr()->find_buffer(unique_id, buffer);
648
0
    if (!st.ok()) {
649
0
        LOG(WARNING) << "Result buffer not found! finst ID: " << print_id(unique_id);
650
0
        return;
651
0
    }
652
0
    if (st = buffer->get_batch(ctx); !st.ok()) {
653
0
        LOG(WARNING) << "fetch_data failed: " << st.to_string();
654
0
    }
655
0
}
656
657
void PInternalService::fetch_arrow_data(google::protobuf::RpcController* controller,
658
                                        const PFetchArrowDataRequest* request,
659
                                        PFetchArrowDataResult* result,
660
0
                                        google::protobuf::Closure* done) {
661
0
    bool ret = _arrow_flight_work_pool.try_offer([request, result, done]() {
662
0
        brpc::ClosureGuard closure_guard(done);
663
0
        auto ctx = vectorized::GetArrowResultBatchCtx::create_shared(result);
664
0
        TUniqueId unique_id = UniqueId(request->finst_id()).to_thrift(); // query_id or instance_id
665
0
        std::shared_ptr<vectorized::ArrowFlightResultBlockBuffer> arrow_buffer;
666
0
        auto st = ExecEnv::GetInstance()->result_mgr()->find_buffer(unique_id, arrow_buffer);
667
0
        if (!st.ok()) {
668
0
            LOG(WARNING) << "Result buffer not found! Query ID: " << print_id(unique_id);
669
0
            return;
670
0
        }
671
0
        if (st = arrow_buffer->get_batch(ctx); !st.ok()) {
672
0
            LOG(WARNING) << "fetch_arrow_data failed: " << st.to_string();
673
0
        }
674
0
    });
675
0
    if (!ret) {
676
0
        offer_failed(result, done, _arrow_flight_work_pool);
677
0
        return;
678
0
    }
679
0
}
680
681
void PInternalService::outfile_write_success(google::protobuf::RpcController* controller,
682
                                             const POutfileWriteSuccessRequest* request,
683
                                             POutfileWriteSuccessResult* result,
684
0
                                             google::protobuf::Closure* done) {
685
0
    bool ret = _heavy_work_pool.try_offer([request, result, done]() {
686
0
        VLOG_RPC << "outfile write success file";
687
0
        brpc::ClosureGuard closure_guard(done);
688
0
        TResultFileSink result_file_sink;
689
0
        Status st = Status::OK();
690
0
        {
691
0
            const uint8_t* buf = (const uint8_t*)(request->result_file_sink().data());
692
0
            uint32_t len = request->result_file_sink().size();
693
0
            st = deserialize_thrift_msg(buf, &len, false, &result_file_sink);
694
0
            if (!st.ok()) {
695
0
                LOG(WARNING) << "outfile write success file failed, errmsg = " << st;
696
0
                st.to_protobuf(result->mutable_status());
697
0
                return;
698
0
            }
699
0
        }
700
701
0
        TResultFileSinkOptions file_options = result_file_sink.file_options;
702
0
        std::stringstream ss;
703
0
        ss << file_options.file_path << file_options.success_file_name;
704
0
        std::string file_name = ss.str();
705
0
        if (result_file_sink.storage_backend_type == TStorageBackendType::LOCAL) {
706
            // For local file writer, the file_path is a local dir.
707
            // Here we do a simple security verification by checking whether the file exists.
708
            // Because the file path is currently arbitrarily specified by the user,
709
            // Doris is not responsible for ensuring the correctness of the path.
710
            // This is just to prevent overwriting the existing file.
711
0
            bool exists = true;
712
0
            st = io::global_local_filesystem()->exists(file_name, &exists);
713
0
            if (!st.ok()) {
714
0
                LOG(WARNING) << "outfile write success filefailed, errmsg = " << st;
715
0
                st.to_protobuf(result->mutable_status());
716
0
                return;
717
0
            }
718
0
            if (exists) {
719
0
                st = Status::InternalError("File already exists: {}", file_name);
720
0
            }
721
0
            if (!st.ok()) {
722
0
                LOG(WARNING) << "outfile write success file failed, errmsg = " << st;
723
0
                st.to_protobuf(result->mutable_status());
724
0
                return;
725
0
            }
726
0
        }
727
728
0
        auto file_type_res =
729
0
                FileFactory::convert_storage_type(result_file_sink.storage_backend_type);
730
0
        if (!file_type_res.has_value()) [[unlikely]] {
731
0
            st = std::move(file_type_res).error();
732
0
            st.to_protobuf(result->mutable_status());
733
0
            LOG(WARNING) << "encounter unkonw type=" << result_file_sink.storage_backend_type
734
0
                         << ", st=" << st;
735
0
            return;
736
0
        }
737
738
0
        auto&& res = FileFactory::create_file_writer(file_type_res.value(), ExecEnv::GetInstance(),
739
0
                                                     file_options.broker_addresses,
740
0
                                                     file_options.broker_properties, file_name,
741
0
                                                     {
742
0
                                                             .write_file_cache = false,
743
0
                                                             .sync_file_data = false,
744
0
                                                     });
745
0
        using T = std::decay_t<decltype(res)>;
746
0
        if (!res.has_value()) [[unlikely]] {
747
0
            st = std::forward<T>(res).error();
748
0
            st.to_protobuf(result->mutable_status());
749
0
            return;
750
0
        }
751
752
0
        std::unique_ptr<doris::io::FileWriter> _file_writer_impl = std::forward<T>(res).value();
753
        // must write somthing because s3 file writer can not writer empty file
754
0
        st = _file_writer_impl->append({"success"});
755
0
        if (!st.ok()) {
756
0
            LOG(WARNING) << "outfile write success filefailed, errmsg=" << st;
757
0
            st.to_protobuf(result->mutable_status());
758
0
            return;
759
0
        }
760
0
        st = _file_writer_impl->close();
761
0
        if (!st.ok()) {
762
0
            LOG(WARNING) << "outfile write success filefailed, errmsg=" << st;
763
0
            st.to_protobuf(result->mutable_status());
764
0
            return;
765
0
        }
766
0
    });
767
0
    if (!ret) {
768
0
        offer_failed(result, done, _heavy_work_pool);
769
0
        return;
770
0
    }
771
0
}
772
773
void PInternalService::fetch_table_schema(google::protobuf::RpcController* controller,
774
                                          const PFetchTableSchemaRequest* request,
775
                                          PFetchTableSchemaResult* result,
776
0
                                          google::protobuf::Closure* done) {
777
0
    bool ret = _heavy_work_pool.try_offer([request, result, done]() {
778
0
        VLOG_RPC << "fetch table schema";
779
0
        brpc::ClosureGuard closure_guard(done);
780
0
        TFileScanRange file_scan_range;
781
0
        Status st = Status::OK();
782
0
        {
783
0
            const uint8_t* buf = (const uint8_t*)(request->file_scan_range().data());
784
0
            uint32_t len = request->file_scan_range().size();
785
0
            st = deserialize_thrift_msg(buf, &len, false, &file_scan_range);
786
0
            if (!st.ok()) {
787
0
                LOG(WARNING) << "fetch table schema failed, errmsg=" << st;
788
0
                st.to_protobuf(result->mutable_status());
789
0
                return;
790
0
            }
791
0
        }
792
0
        if (file_scan_range.__isset.ranges == false) {
793
0
            st = Status::InternalError("can not get TFileRangeDesc.");
794
0
            st.to_protobuf(result->mutable_status());
795
0
            return;
796
0
        }
797
0
        if (file_scan_range.__isset.params == false) {
798
0
            st = Status::InternalError("can not get TFileScanRangeParams.");
799
0
            st.to_protobuf(result->mutable_status());
800
0
            return;
801
0
        }
802
0
        const TFileRangeDesc& range = file_scan_range.ranges.at(0);
803
0
        const TFileScanRangeParams& params = file_scan_range.params;
804
805
0
        std::shared_ptr<MemTrackerLimiter> mem_tracker = MemTrackerLimiter::create_shared(
806
0
                MemTrackerLimiter::Type::OTHER,
807
0
                fmt::format("InternalService::fetch_table_schema:{}#{}", params.format_type,
808
0
                            params.file_type));
809
0
        SCOPED_ATTACH_TASK(mem_tracker);
810
811
        // make sure profile is desctructed after reader cause PrefetchBufferedReader
812
        // might asynchronouslly access the profile
813
0
        std::unique_ptr<RuntimeProfile> profile =
814
0
                std::make_unique<RuntimeProfile>("FetchTableSchema");
815
0
        std::unique_ptr<vectorized::GenericReader> reader(nullptr);
816
0
        io::IOContext io_ctx;
817
0
        io::FileCacheStatistics file_cache_statis;
818
0
        io_ctx.file_cache_stats = &file_cache_statis;
819
0
        io::FileReaderStats file_reader_stats;
820
0
        io_ctx.file_reader_stats = &file_reader_stats;
821
        // file_slots is no use, but the lifetime should be longer than reader
822
0
        std::vector<SlotDescriptor*> file_slots;
823
0
        switch (params.format_type) {
824
0
        case TFileFormatType::FORMAT_CSV_PLAIN:
825
0
        case TFileFormatType::FORMAT_CSV_GZ:
826
0
        case TFileFormatType::FORMAT_CSV_BZ2:
827
0
        case TFileFormatType::FORMAT_CSV_LZ4FRAME:
828
0
        case TFileFormatType::FORMAT_CSV_LZ4BLOCK:
829
0
        case TFileFormatType::FORMAT_CSV_SNAPPYBLOCK:
830
0
        case TFileFormatType::FORMAT_CSV_LZOP:
831
0
        case TFileFormatType::FORMAT_CSV_DEFLATE: {
832
0
            reader = vectorized::CsvReader::create_unique(nullptr, profile.get(), nullptr, params,
833
0
                                                          range, file_slots, &io_ctx);
834
0
            break;
835
0
        }
836
0
        case TFileFormatType::FORMAT_TEXT: {
837
0
            reader = vectorized::TextReader::create_unique(nullptr, profile.get(), nullptr, params,
838
0
                                                           range, file_slots, &io_ctx);
839
0
            break;
840
0
        }
841
0
        case TFileFormatType::FORMAT_PARQUET: {
842
0
            reader = vectorized::ParquetReader::create_unique(params, range, &io_ctx, nullptr);
843
0
            break;
844
0
        }
845
0
        case TFileFormatType::FORMAT_ORC: {
846
0
            reader = vectorized::OrcReader::create_unique(params, range, "", &io_ctx);
847
0
            break;
848
0
        }
849
0
        case TFileFormatType::FORMAT_JSON: {
850
0
            reader = vectorized::NewJsonReader::create_unique(profile.get(), params, range,
851
0
                                                              file_slots, &io_ctx);
852
0
            break;
853
0
        }
854
0
        case TFileFormatType::FORMAT_AVRO: {
855
0
            reader = vectorized::AvroJNIReader::create_unique(profile.get(), params, range,
856
0
                                                              file_slots);
857
0
            break;
858
0
        }
859
0
        default:
860
0
            st = Status::InternalError("Not supported file format in fetch table schema: {}",
861
0
                                       params.format_type);
862
0
            st.to_protobuf(result->mutable_status());
863
0
            return;
864
0
        }
865
0
        if (!st.ok()) {
866
0
            LOG(WARNING) << "failed to create reader, errmsg=" << st;
867
0
            st.to_protobuf(result->mutable_status());
868
0
            return;
869
0
        }
870
0
        st = reader->init_schema_reader();
871
0
        if (!st.ok()) {
872
0
            LOG(WARNING) << "failed to init reader, errmsg=" << st;
873
0
            st.to_protobuf(result->mutable_status());
874
0
            return;
875
0
        }
876
0
        std::vector<std::string> col_names;
877
0
        std::vector<vectorized::DataTypePtr> col_types;
878
0
        st = reader->get_parsed_schema(&col_names, &col_types);
879
0
        if (!st.ok()) {
880
0
            LOG(WARNING) << "fetch table schema failed, errmsg=" << st;
881
0
            st.to_protobuf(result->mutable_status());
882
0
            return;
883
0
        }
884
0
        result->set_column_nums(col_names.size());
885
0
        for (size_t idx = 0; idx < col_names.size(); ++idx) {
886
0
            result->add_column_names(col_names[idx]);
887
0
        }
888
0
        for (size_t idx = 0; idx < col_types.size(); ++idx) {
889
0
            PTypeDesc* type_desc = result->add_column_types();
890
0
            col_types[idx]->to_protobuf(type_desc);
891
0
        }
892
0
        st.to_protobuf(result->mutable_status());
893
0
    });
894
0
    if (!ret) {
895
0
        offer_failed(result, done, _heavy_work_pool);
896
0
        return;
897
0
    }
898
0
}
899
900
void PInternalService::fetch_arrow_flight_schema(google::protobuf::RpcController* controller,
901
                                                 const PFetchArrowFlightSchemaRequest* request,
902
                                                 PFetchArrowFlightSchemaResult* result,
903
0
                                                 google::protobuf::Closure* done) {
904
0
    bool ret = _arrow_flight_work_pool.try_offer([request, result, done]() {
905
0
        brpc::ClosureGuard closure_guard(done);
906
0
        std::shared_ptr<arrow::Schema> schema;
907
0
        std::shared_ptr<vectorized::ArrowFlightResultBlockBuffer> buffer;
908
0
        auto st = ExecEnv::GetInstance()->result_mgr()->find_buffer(
909
0
                UniqueId(request->finst_id()).to_thrift(), buffer);
910
0
        if (!st.ok()) {
911
0
            LOG(WARNING) << "fetch arrow flight schema failed, errmsg=" << st;
912
0
            st.to_protobuf(result->mutable_status());
913
0
            return;
914
0
        }
915
0
        st = buffer->get_schema(&schema);
916
0
        if (!st.ok()) {
917
0
            LOG(WARNING) << "fetch arrow flight schema failed, errmsg=" << st;
918
0
            st.to_protobuf(result->mutable_status());
919
0
            return;
920
0
        }
921
922
0
        std::string schema_str;
923
0
        st = serialize_arrow_schema(&schema, &schema_str);
924
0
        if (st.ok()) {
925
0
            result->set_schema(std::move(schema_str));
926
0
            if (!config::public_host.empty()) {
927
0
                result->set_be_arrow_flight_ip(config::public_host);
928
0
            }
929
0
            if (config::arrow_flight_sql_proxy_port != -1) {
930
0
                result->set_be_arrow_flight_port(config::arrow_flight_sql_proxy_port);
931
0
            }
932
0
        }
933
0
        st.to_protobuf(result->mutable_status());
934
0
    });
935
0
    if (!ret) {
936
0
        offer_failed(result, done, _arrow_flight_work_pool);
937
0
        return;
938
0
    }
939
0
}
940
941
Status PInternalService::_tablet_fetch_data(const PTabletKeyLookupRequest* request,
942
0
                                            PTabletKeyLookupResponse* response) {
943
0
    PointQueryExecutor executor;
944
0
    RETURN_IF_ERROR(executor.init(request, response));
945
0
    RETURN_IF_ERROR(executor.lookup_up());
946
0
    executor.print_profile();
947
0
    return Status::OK();
948
0
}
949
950
void PInternalService::tablet_fetch_data(google::protobuf::RpcController* controller,
951
                                         const PTabletKeyLookupRequest* request,
952
                                         PTabletKeyLookupResponse* response,
953
0
                                         google::protobuf::Closure* done) {
954
0
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
955
0
        [[maybe_unused]] auto* cntl = static_cast<brpc::Controller*>(controller);
956
0
        brpc::ClosureGuard guard(done);
957
0
        Status st = _tablet_fetch_data(request, response);
958
0
        st.to_protobuf(response->mutable_status());
959
0
    });
960
0
    if (!ret) {
961
0
        offer_failed(response, done, _light_work_pool);
962
0
        return;
963
0
    }
964
0
}
965
966
void PInternalService::test_jdbc_connection(google::protobuf::RpcController* controller,
967
                                            const PJdbcTestConnectionRequest* request,
968
                                            PJdbcTestConnectionResult* result,
969
0
                                            google::protobuf::Closure* done) {
970
0
    bool ret = _heavy_work_pool.try_offer([request, result, done]() {
971
0
        VLOG_RPC << "test jdbc connection";
972
0
        brpc::ClosureGuard closure_guard(done);
973
0
        std::shared_ptr<MemTrackerLimiter> mem_tracker = MemTrackerLimiter::create_shared(
974
0
                MemTrackerLimiter::Type::OTHER,
975
0
                fmt::format("InternalService::test_jdbc_connection"));
976
0
        SCOPED_ATTACH_TASK(mem_tracker);
977
0
        TTableDescriptor table_desc;
978
0
        vectorized::JdbcConnectorParam jdbc_param;
979
0
        Status st = Status::OK();
980
0
        {
981
0
            const uint8_t* buf = (const uint8_t*)request->jdbc_table().data();
982
0
            uint32_t len = request->jdbc_table().size();
983
0
            st = deserialize_thrift_msg(buf, &len, false, &table_desc);
984
0
            if (!st.ok()) {
985
0
                LOG(WARNING) << "test jdbc connection failed, errmsg=" << st;
986
0
                st.to_protobuf(result->mutable_status());
987
0
                return;
988
0
            }
989
0
        }
990
0
        TJdbcTable jdbc_table = (table_desc.jdbcTable);
991
0
        jdbc_param.catalog_id = jdbc_table.catalog_id;
992
0
        jdbc_param.driver_class = jdbc_table.jdbc_driver_class;
993
0
        jdbc_param.driver_path = jdbc_table.jdbc_driver_url;
994
0
        jdbc_param.driver_checksum = jdbc_table.jdbc_driver_checksum;
995
0
        jdbc_param.jdbc_url = jdbc_table.jdbc_url;
996
0
        jdbc_param.user = jdbc_table.jdbc_user;
997
0
        jdbc_param.passwd = jdbc_table.jdbc_password;
998
0
        jdbc_param.query_string = request->query_str();
999
0
        jdbc_param.table_type = static_cast<TOdbcTableType::type>(request->jdbc_table_type());
1000
0
        jdbc_param.use_transaction = false;
1001
0
        jdbc_param.connection_pool_min_size = jdbc_table.connection_pool_min_size;
1002
0
        jdbc_param.connection_pool_max_size = jdbc_table.connection_pool_max_size;
1003
0
        jdbc_param.connection_pool_max_life_time = jdbc_table.connection_pool_max_life_time;
1004
0
        jdbc_param.connection_pool_max_wait_time = jdbc_table.connection_pool_max_wait_time;
1005
0
        jdbc_param.connection_pool_keep_alive = jdbc_table.connection_pool_keep_alive;
1006
1007
0
        std::unique_ptr<vectorized::JdbcConnector> jdbc_connector;
1008
0
        jdbc_connector.reset(new (std::nothrow) vectorized::JdbcConnector(jdbc_param));
1009
1010
0
        st = jdbc_connector->test_connection();
1011
0
        st.to_protobuf(result->mutable_status());
1012
1013
0
        Status clean_st = jdbc_connector->clean_datasource();
1014
0
        if (!clean_st.ok()) {
1015
0
            LOG(WARNING) << "Failed to clean JDBC datasource: " << clean_st.msg();
1016
0
        }
1017
0
        Status close_st = jdbc_connector->close();
1018
0
        if (!close_st.ok()) {
1019
0
            LOG(WARNING) << "Failed to close JDBC connector: " << close_st.msg();
1020
0
        }
1021
0
    });
1022
1023
0
    if (!ret) {
1024
0
        offer_failed(result, done, _heavy_work_pool);
1025
0
        return;
1026
0
    }
1027
0
}
1028
1029
void PInternalServiceImpl::get_column_ids_by_tablet_ids(google::protobuf::RpcController* controller,
1030
                                                        const PFetchColIdsRequest* request,
1031
                                                        PFetchColIdsResponse* response,
1032
0
                                                        google::protobuf::Closure* done) {
1033
0
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
1034
0
        _get_column_ids_by_tablet_ids(controller, request, response, done);
1035
0
    });
1036
0
    if (!ret) {
1037
0
        offer_failed(response, done, _light_work_pool);
1038
0
        return;
1039
0
    }
1040
0
}
1041
1042
void PInternalServiceImpl::_get_column_ids_by_tablet_ids(
1043
        google::protobuf::RpcController* controller, const PFetchColIdsRequest* request,
1044
0
        PFetchColIdsResponse* response, google::protobuf::Closure* done) {
1045
0
    brpc::ClosureGuard guard(done);
1046
0
    [[maybe_unused]] auto* cntl = static_cast<brpc::Controller*>(controller);
1047
0
    TabletManager* tablet_mgr = _engine.tablet_manager();
1048
0
    const auto& params = request->params();
1049
0
    for (const auto& param : params) {
1050
0
        int64_t index_id = param.indexid();
1051
0
        const auto& tablet_ids = param.tablet_ids();
1052
0
        std::set<std::set<int32_t>> filter_set;
1053
0
        std::map<int32_t, const TabletColumn*> id_to_column;
1054
0
        for (const int64_t tablet_id : tablet_ids) {
1055
0
            TabletSharedPtr tablet = tablet_mgr->get_tablet(tablet_id);
1056
0
            if (tablet == nullptr) {
1057
0
                std::stringstream ss;
1058
0
                ss << "cannot get tablet by id:" << tablet_id;
1059
0
                LOG(WARNING) << ss.str();
1060
0
                response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE);
1061
0
                response->mutable_status()->add_error_msgs(ss.str());
1062
0
                return;
1063
0
            }
1064
            // check schema consistency, column ids should be the same
1065
0
            const auto& columns = tablet->tablet_schema()->columns();
1066
1067
0
            std::set<int32_t> column_ids;
1068
0
            for (const auto& col : columns) {
1069
0
                column_ids.insert(col->unique_id());
1070
0
            }
1071
0
            filter_set.insert(std::move(column_ids));
1072
1073
0
            if (id_to_column.empty()) {
1074
0
                for (const auto& col : columns) {
1075
0
                    id_to_column.insert(std::pair {col->unique_id(), col.get()});
1076
0
                }
1077
0
            } else {
1078
0
                for (const auto& col : columns) {
1079
0
                    auto it = id_to_column.find(col->unique_id());
1080
0
                    if (it == id_to_column.end() || *(it->second) != *col) {
1081
0
                        ColumnPB prev_col_pb;
1082
0
                        ColumnPB curr_col_pb;
1083
0
                        if (it != id_to_column.end()) {
1084
0
                            it->second->to_schema_pb(&prev_col_pb);
1085
0
                        }
1086
0
                        col->to_schema_pb(&curr_col_pb);
1087
0
                        std::stringstream ss;
1088
0
                        ss << "consistency check failed: index{ " << index_id << " }"
1089
0
                           << " got inconsistent schema, prev column: " << prev_col_pb.DebugString()
1090
0
                           << " current column: " << curr_col_pb.DebugString();
1091
0
                        LOG(WARNING) << ss.str();
1092
0
                        response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE);
1093
0
                        response->mutable_status()->add_error_msgs(ss.str());
1094
0
                        return;
1095
0
                    }
1096
0
                }
1097
0
            }
1098
0
        }
1099
1100
0
        if (filter_set.size() > 1) {
1101
            // consistecy check failed
1102
0
            std::stringstream ss;
1103
0
            ss << "consistency check failed: index{" << index_id << "}"
1104
0
               << "got inconsistent schema";
1105
0
            LOG(WARNING) << ss.str();
1106
0
            response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE);
1107
0
            response->mutable_status()->add_error_msgs(ss.str());
1108
0
            return;
1109
0
        }
1110
        // consistency check passed, use the first tablet to be the representative
1111
0
        TabletSharedPtr tablet = tablet_mgr->get_tablet(tablet_ids[0]);
1112
0
        const auto& columns = tablet->tablet_schema()->columns();
1113
0
        auto entry = response->add_entries();
1114
0
        entry->set_index_id(index_id);
1115
0
        auto col_name_to_id = entry->mutable_col_name_to_id();
1116
0
        for (const auto& column : columns) {
1117
0
            (*col_name_to_id)[column->name()] = column->unique_id();
1118
0
        }
1119
0
    }
1120
0
    response->mutable_status()->set_status_code(TStatusCode::OK);
1121
0
}
1122
1123
template <class RPCResponse>
1124
struct AsyncRPCContext {
1125
    RPCResponse response;
1126
    brpc::Controller cntl;
1127
    brpc::CallId cid;
1128
};
1129
1130
void PInternalService::fetch_remote_tablet_schema(google::protobuf::RpcController* controller,
1131
                                                  const PFetchRemoteSchemaRequest* request,
1132
                                                  PFetchRemoteSchemaResponse* response,
1133
0
                                                  google::protobuf::Closure* done) {
1134
0
    bool ret = _heavy_work_pool.try_offer([request, response, done]() {
1135
0
        brpc::ClosureGuard closure_guard(done);
1136
0
        Status st = Status::OK();
1137
0
        std::shared_ptr<MemTrackerLimiter> mem_tracker = MemTrackerLimiter::create_shared(
1138
0
                MemTrackerLimiter::Type::OTHER,
1139
0
                fmt::format("InternalService::fetch_remote_tablet_schema"));
1140
0
        SCOPED_ATTACH_TASK(mem_tracker);
1141
0
        if (request->is_coordinator()) {
1142
            // Spawn rpc request to none coordinator nodes, and finally merge them all
1143
0
            PFetchRemoteSchemaRequest remote_request(*request);
1144
            // set it none coordinator to get merged schema
1145
0
            remote_request.set_is_coordinator(false);
1146
0
            using PFetchRemoteTabletSchemaRpcContext = AsyncRPCContext<PFetchRemoteSchemaResponse>;
1147
0
            std::vector<PFetchRemoteTabletSchemaRpcContext> rpc_contexts(
1148
0
                    request->tablet_location_size());
1149
0
            for (int i = 0; i < request->tablet_location_size(); ++i) {
1150
0
                std::string host = request->tablet_location(i).host();
1151
0
                int32_t brpc_port = request->tablet_location(i).brpc_port();
1152
0
                std::shared_ptr<PBackendService_Stub> stub(
1153
0
                        ExecEnv::GetInstance()->brpc_internal_client_cache()->get_client(
1154
0
                                host, brpc_port));
1155
0
                if (stub == nullptr) {
1156
0
                    LOG(WARNING) << "Failed to init rpc to " << host << ":" << brpc_port;
1157
0
                    st = Status::InternalError("Failed to init rpc to {}:{}", host, brpc_port);
1158
0
                    continue;
1159
0
                }
1160
0
                rpc_contexts[i].cid = rpc_contexts[i].cntl.call_id();
1161
0
                rpc_contexts[i].cntl.set_timeout_ms(config::fetch_remote_schema_rpc_timeout_ms);
1162
0
                stub->fetch_remote_tablet_schema(&rpc_contexts[i].cntl, &remote_request,
1163
0
                                                 &rpc_contexts[i].response, brpc::DoNothing());
1164
0
            }
1165
0
            std::vector<TabletSchemaSPtr> schemas;
1166
0
            for (auto& rpc_context : rpc_contexts) {
1167
0
                brpc::Join(rpc_context.cid);
1168
0
                if (!st.ok()) {
1169
                    // make sure all flying rpc request is joined
1170
0
                    continue;
1171
0
                }
1172
0
                if (rpc_context.cntl.Failed()) {
1173
0
                    LOG(WARNING) << "fetch_remote_tablet_schema rpc err:"
1174
0
                                 << rpc_context.cntl.ErrorText();
1175
0
                    ExecEnv::GetInstance()->brpc_internal_client_cache()->erase(
1176
0
                            rpc_context.cntl.remote_side());
1177
0
                    st = Status::InternalError("fetch_remote_tablet_schema rpc err: {}",
1178
0
                                               rpc_context.cntl.ErrorText());
1179
0
                }
1180
0
                if (rpc_context.response.status().status_code() != 0) {
1181
0
                    st = Status::create(rpc_context.response.status());
1182
0
                }
1183
0
                if (rpc_context.response.has_merged_schema()) {
1184
0
                    TabletSchemaSPtr schema = std::make_shared<TabletSchema>();
1185
0
                    schema->init_from_pb(rpc_context.response.merged_schema());
1186
0
                    schemas.push_back(schema);
1187
0
                }
1188
0
            }
1189
0
            if (!schemas.empty() && st.ok()) {
1190
                // merge all
1191
0
                TabletSchemaSPtr merged_schema;
1192
0
                st = vectorized::schema_util::get_least_common_schema(schemas, nullptr,
1193
0
                                                                      merged_schema);
1194
0
                if (!st.ok()) {
1195
0
                    LOG(WARNING) << "Failed to get least common schema: " << st.to_string();
1196
0
                    st = Status::InternalError("Failed to get least common schema: {}",
1197
0
                                               st.to_string());
1198
0
                }
1199
0
                VLOG_DEBUG << "dump schema:" << merged_schema->dump_structure();
1200
0
                merged_schema->reserve_extracted_columns();
1201
0
                merged_schema->to_schema_pb(response->mutable_merged_schema());
1202
0
            }
1203
0
            st.to_protobuf(response->mutable_status());
1204
0
            return;
1205
0
        } else {
1206
            // This is not a coordinator, get it's tablet and merge schema
1207
0
            std::vector<int64_t> target_tablets;
1208
0
            for (int i = 0; i < request->tablet_location_size(); ++i) {
1209
0
                const auto& location = request->tablet_location(i);
1210
0
                auto backend = BackendOptions::get_local_backend();
1211
                // If this is the target backend
1212
0
                if (backend.host == location.host() && config::brpc_port == location.brpc_port()) {
1213
0
                    target_tablets.assign(location.tablet_id().begin(), location.tablet_id().end());
1214
0
                    break;
1215
0
                }
1216
0
            }
1217
0
            if (!target_tablets.empty()) {
1218
0
                std::vector<TabletSchemaSPtr> tablet_schemas;
1219
0
                for (int64_t tablet_id : target_tablets) {
1220
0
                    auto res = ExecEnv::get_tablet(tablet_id);
1221
0
                    if (!res.has_value()) {
1222
                        // just ignore
1223
0
                        LOG(WARNING) << "tablet does not exist, tablet id is " << tablet_id;
1224
0
                        continue;
1225
0
                    }
1226
0
                    auto tablet = res.value();
1227
0
                    auto rowsets = tablet->get_snapshot_rowset();
1228
0
                    auto schema = vectorized::schema_util::VariantCompactionUtil::
1229
0
                            calculate_variant_extended_schema(rowsets, tablet->tablet_schema());
1230
0
                    tablet_schemas.push_back(schema);
1231
0
                }
1232
0
                if (!tablet_schemas.empty()) {
1233
                    // merge all
1234
0
                    TabletSchemaSPtr merged_schema;
1235
0
                    st = vectorized::schema_util::get_least_common_schema(tablet_schemas, nullptr,
1236
0
                                                                          merged_schema);
1237
0
                    if (!st.ok()) {
1238
0
                        LOG(WARNING) << "Failed to get least common schema: " << st.to_string();
1239
0
                        st = Status::InternalError("Failed to get least common schema: {}",
1240
0
                                                   st.to_string());
1241
0
                    }
1242
0
                    merged_schema->to_schema_pb(response->mutable_merged_schema());
1243
0
                    VLOG_DEBUG << "dump schema:" << merged_schema->dump_structure();
1244
0
                }
1245
0
            }
1246
0
            st.to_protobuf(response->mutable_status());
1247
0
        }
1248
0
    });
1249
0
    if (!ret) {
1250
0
        offer_failed(response, done, _heavy_work_pool);
1251
0
    }
1252
0
}
1253
1254
void PInternalService::report_stream_load_status(google::protobuf::RpcController* controller,
1255
                                                 const PReportStreamLoadStatusRequest* request,
1256
                                                 PReportStreamLoadStatusResponse* response,
1257
0
                                                 google::protobuf::Closure* done) {
1258
0
    TUniqueId load_id;
1259
0
    load_id.__set_hi(request->load_id().hi());
1260
0
    load_id.__set_lo(request->load_id().lo());
1261
0
    Status st = Status::OK();
1262
0
    auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id);
1263
0
    if (!stream_load_ctx) {
1264
0
        st = Status::InternalError("unknown stream load id: {}", UniqueId(load_id).to_string());
1265
0
    }
1266
0
    stream_load_ctx->promise.set_value(st);
1267
0
    st.to_protobuf(response->mutable_status());
1268
0
}
1269
1270
void PInternalService::get_info(google::protobuf::RpcController* controller,
1271
                                const PProxyRequest* request, PProxyResult* response,
1272
0
                                google::protobuf::Closure* done) {
1273
0
    bool ret = _exec_env->routine_load_task_executor()->get_thread_pool().submit_func([this,
1274
0
                                                                                       request,
1275
0
                                                                                       response,
1276
0
                                                                                       done]() {
1277
0
        brpc::ClosureGuard closure_guard(done);
1278
        // PProxyRequest is defined in gensrc/proto/internal_service.proto
1279
        // Currently it supports 2 kinds of requests:
1280
        // 1. get all kafka partition ids for given topic
1281
        // 2. get all kafka partition offsets for given topic and timestamp.
1282
0
        int timeout_ms = request->has_timeout_secs() ? request->timeout_secs() * 1000 : 60 * 1000;
1283
0
        if (request->has_kafka_meta_request()) {
1284
0
            const PKafkaMetaProxyRequest& kafka_request = request->kafka_meta_request();
1285
0
            if (!kafka_request.offset_flags().empty()) {
1286
0
                std::vector<PIntegerPair> partition_offsets;
1287
0
                Status st = _exec_env->routine_load_task_executor()
1288
0
                                    ->get_kafka_real_offsets_for_partitions(
1289
0
                                            request->kafka_meta_request(), &partition_offsets,
1290
0
                                            timeout_ms);
1291
0
                if (st.ok()) {
1292
0
                    PKafkaPartitionOffsets* part_offsets = response->mutable_partition_offsets();
1293
0
                    for (const auto& entry : partition_offsets) {
1294
0
                        PIntegerPair* res = part_offsets->add_offset_times();
1295
0
                        res->set_key(entry.key());
1296
0
                        res->set_val(entry.val());
1297
0
                    }
1298
0
                }
1299
0
                st.to_protobuf(response->mutable_status());
1300
0
                return;
1301
0
            } else if (!kafka_request.partition_id_for_latest_offsets().empty()) {
1302
                // get latest offsets for specified partition ids
1303
0
                std::vector<PIntegerPair> partition_offsets;
1304
0
                Status st = _exec_env->routine_load_task_executor()
1305
0
                                    ->get_kafka_latest_offsets_for_partitions(
1306
0
                                            request->kafka_meta_request(), &partition_offsets,
1307
0
                                            timeout_ms);
1308
0
                if (st.ok()) {
1309
0
                    PKafkaPartitionOffsets* part_offsets = response->mutable_partition_offsets();
1310
0
                    for (const auto& entry : partition_offsets) {
1311
0
                        PIntegerPair* res = part_offsets->add_offset_times();
1312
0
                        res->set_key(entry.key());
1313
0
                        res->set_val(entry.val());
1314
0
                    }
1315
0
                }
1316
0
                st.to_protobuf(response->mutable_status());
1317
0
                return;
1318
0
            } else if (!kafka_request.offset_times().empty()) {
1319
                // if offset_times() has elements, which means this request is to get offset by timestamp.
1320
0
                std::vector<PIntegerPair> partition_offsets;
1321
0
                Status st = _exec_env->routine_load_task_executor()
1322
0
                                    ->get_kafka_partition_offsets_for_times(
1323
0
                                            request->kafka_meta_request(), &partition_offsets,
1324
0
                                            timeout_ms);
1325
0
                if (st.ok()) {
1326
0
                    PKafkaPartitionOffsets* part_offsets = response->mutable_partition_offsets();
1327
0
                    for (const auto& entry : partition_offsets) {
1328
0
                        PIntegerPair* res = part_offsets->add_offset_times();
1329
0
                        res->set_key(entry.key());
1330
0
                        res->set_val(entry.val());
1331
0
                    }
1332
0
                }
1333
0
                st.to_protobuf(response->mutable_status());
1334
0
                return;
1335
0
            } else {
1336
                // get partition ids of topic
1337
0
                std::vector<int32_t> partition_ids;
1338
0
                Status st = _exec_env->routine_load_task_executor()->get_kafka_partition_meta(
1339
0
                        request->kafka_meta_request(), &partition_ids);
1340
0
                if (st.ok()) {
1341
0
                    PKafkaMetaProxyResult* kafka_result = response->mutable_kafka_meta_result();
1342
0
                    for (int32_t id : partition_ids) {
1343
0
                        kafka_result->add_partition_ids(id);
1344
0
                    }
1345
0
                }
1346
0
                st.to_protobuf(response->mutable_status());
1347
0
                return;
1348
0
            }
1349
0
        }
1350
0
        Status::OK().to_protobuf(response->mutable_status());
1351
0
    });
1352
0
    if (!ret) {
1353
0
        offer_failed(response, done, _heavy_work_pool);
1354
0
        return;
1355
0
    }
1356
0
}
1357
1358
void PInternalService::update_cache(google::protobuf::RpcController* controller,
1359
                                    const PUpdateCacheRequest* request, PCacheResponse* response,
1360
0
                                    google::protobuf::Closure* done) {
1361
0
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1362
0
        brpc::ClosureGuard closure_guard(done);
1363
0
        _exec_env->result_cache()->update(request, response);
1364
0
    });
1365
0
    if (!ret) {
1366
0
        offer_failed(response, done, _light_work_pool);
1367
0
        return;
1368
0
    }
1369
0
}
1370
1371
void PInternalService::fetch_cache(google::protobuf::RpcController* controller,
1372
                                   const PFetchCacheRequest* request, PFetchCacheResult* result,
1373
0
                                   google::protobuf::Closure* done) {
1374
0
    bool ret = _light_work_pool.try_offer([this, request, result, done]() {
1375
0
        brpc::ClosureGuard closure_guard(done);
1376
0
        _exec_env->result_cache()->fetch(request, result);
1377
0
    });
1378
0
    if (!ret) {
1379
0
        offer_failed(result, done, _light_work_pool);
1380
0
        return;
1381
0
    }
1382
0
}
1383
1384
void PInternalService::clear_cache(google::protobuf::RpcController* controller,
1385
                                   const PClearCacheRequest* request, PCacheResponse* response,
1386
0
                                   google::protobuf::Closure* done) {
1387
0
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1388
0
        brpc::ClosureGuard closure_guard(done);
1389
0
        _exec_env->result_cache()->clear(request, response);
1390
0
    });
1391
0
    if (!ret) {
1392
0
        offer_failed(response, done, _light_work_pool);
1393
0
        return;
1394
0
    }
1395
0
}
1396
1397
void PInternalService::merge_filter(::google::protobuf::RpcController* controller,
1398
                                    const ::doris::PMergeFilterRequest* request,
1399
                                    ::doris::PMergeFilterResponse* response,
1400
0
                                    ::google::protobuf::Closure* done) {
1401
0
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
1402
0
        signal::SignalTaskIdKeeper keeper(request->query_id());
1403
0
        brpc::ClosureGuard closure_guard(done);
1404
0
        auto attachment = static_cast<brpc::Controller*>(controller)->request_attachment();
1405
0
        butil::IOBufAsZeroCopyInputStream zero_copy_input_stream(attachment);
1406
0
        Status st;
1407
0
        try {
1408
0
            st = _exec_env->fragment_mgr()->merge_filter(request, &zero_copy_input_stream);
1409
0
        } catch (Exception& e) {
1410
0
            st = e.to_status();
1411
0
        }
1412
0
        st.to_protobuf(response->mutable_status());
1413
0
    });
1414
0
    if (!ret) {
1415
0
        offer_failed(response, done, _light_work_pool);
1416
0
        return;
1417
0
    }
1418
0
}
1419
1420
void PInternalService::send_filter_size(::google::protobuf::RpcController* controller,
1421
                                        const ::doris::PSendFilterSizeRequest* request,
1422
                                        ::doris::PSendFilterSizeResponse* response,
1423
0
                                        ::google::protobuf::Closure* done) {
1424
0
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1425
0
        signal::SignalTaskIdKeeper keeper(request->query_id());
1426
0
        brpc::ClosureGuard closure_guard(done);
1427
0
        Status st;
1428
0
        try {
1429
0
            st = _exec_env->fragment_mgr()->send_filter_size(request);
1430
0
        } catch (Exception& e) {
1431
0
            st = e.to_status();
1432
0
        }
1433
0
        st.to_protobuf(response->mutable_status());
1434
0
    });
1435
0
    if (!ret) {
1436
0
        offer_failed(response, done, _light_work_pool);
1437
0
        return;
1438
0
    }
1439
0
}
1440
1441
void PInternalService::sync_filter_size(::google::protobuf::RpcController* controller,
1442
                                        const ::doris::PSyncFilterSizeRequest* request,
1443
                                        ::doris::PSyncFilterSizeResponse* response,
1444
0
                                        ::google::protobuf::Closure* done) {
1445
0
    bool ret = _light_work_pool.try_offer([this, request, response, done]() {
1446
0
        signal::SignalTaskIdKeeper keeper(request->query_id());
1447
0
        brpc::ClosureGuard closure_guard(done);
1448
0
        Status st;
1449
0
        try {
1450
0
            st = _exec_env->fragment_mgr()->sync_filter_size(request);
1451
0
        } catch (Exception& e) {
1452
0
            st = e.to_status();
1453
0
        }
1454
0
        st.to_protobuf(response->mutable_status());
1455
0
    });
1456
0
    if (!ret) {
1457
0
        offer_failed(response, done, _light_work_pool);
1458
0
        return;
1459
0
    }
1460
0
}
1461
1462
void PInternalService::apply_filterv2(::google::protobuf::RpcController* controller,
1463
                                      const ::doris::PPublishFilterRequestV2* request,
1464
                                      ::doris::PPublishFilterResponse* response,
1465
0
                                      ::google::protobuf::Closure* done) {
1466
0
    bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() {
1467
0
        signal::SignalTaskIdKeeper keeper(request->query_id());
1468
0
        brpc::ClosureGuard closure_guard(done);
1469
0
        auto attachment = static_cast<brpc::Controller*>(controller)->request_attachment();
1470
0
        butil::IOBufAsZeroCopyInputStream zero_copy_input_stream(attachment);
1471
0
        VLOG_NOTICE << "rpc apply_filterv2 recv";
1472
0
        Status st;
1473
0
        try {
1474
0
            st = _exec_env->fragment_mgr()->apply_filterv2(request, &zero_copy_input_stream);
1475
0
        } catch (Exception& e) {
1476
0
            st = e.to_status();
1477
0
        }
1478
0
        if (!st.ok()) {
1479
0
            LOG(WARNING) << "apply filter meet error: " << st.to_string();
1480
0
        }
1481
0
        st.to_protobuf(response->mutable_status());
1482
0
    });
1483
0
    if (!ret) {
1484
0
        offer_failed(response, done, _light_work_pool);
1485
0
        return;
1486
0
    }
1487
0
}
1488
1489
void PInternalService::send_data(google::protobuf::RpcController* controller,
1490
                                 const PSendDataRequest* request, PSendDataResult* response,
1491
0
                                 google::protobuf::Closure* done) {
1492
0
    bool ret = _heavy_work_pool.try_offer([this, request, response, done]() {
1493
0
        brpc::ClosureGuard closure_guard(done);
1494
0
        TUniqueId load_id;
1495
0
        load_id.hi = request->load_id().hi();
1496
0
        load_id.lo = request->load_id().lo();
1497
        // On 1.2.3 we add load id to send data request and using load id to get pipe
1498
0
        auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id);
1499
0
        if (stream_load_ctx == nullptr) {
1500
0
            response->mutable_status()->set_status_code(1);
1501
0
            response->mutable_status()->add_error_msgs("could not find stream load context");
1502
0
        } else {
1503
0
            auto pipe = stream_load_ctx->pipe;
1504
0
            for (int i = 0; i < request->data_size(); ++i) {
1505
0
                std::unique_ptr<PDataRow> row(new PDataRow());
1506
0
                row->CopyFrom(request->data(i));
1507
0
                Status s = pipe->append(std::move(row));
1508
0
                if (!s.ok()) {
1509
0
                    response->mutable_status()->set_status_code(1);
1510
0
                    response->mutable_status()->add_error_msgs(s.to_string());
1511
0
                    return;
1512
0
                }
1513
0
            }
1514
0
            response->mutable_status()->set_status_code(0);
1515
0
        }
1516
0
    });
1517
0
    if (!ret) {
1518
0
        offer_failed(response, done, _heavy_work_pool);
1519
0
        return;
1520
0
    }
1521
0
}
1522
1523
void PInternalService::commit(google::protobuf::RpcController* controller,
1524
                              const PCommitRequest* request, PCommitResult* response,
1525
0
                              google::protobuf::Closure* done) {
1526
0
    bool ret = _heavy_work_pool.try_offer([this, request, response, done]() {
1527
0
        brpc::ClosureGuard closure_guard(done);
1528
0
        TUniqueId load_id;
1529
0
        load_id.hi = request->load_id().hi();
1530
0
        load_id.lo = request->load_id().lo();
1531
1532
0
        auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id);
1533
0
        if (stream_load_ctx == nullptr) {
1534
0
            response->mutable_status()->set_status_code(1);
1535
0
            response->mutable_status()->add_error_msgs("could not find stream load context");
1536
0
        } else {
1537
0
            static_cast<void>(stream_load_ctx->pipe->finish());
1538
0
            response->mutable_status()->set_status_code(0);
1539
0
        }
1540
0
    });
1541
0
    if (!ret) {
1542
0
        offer_failed(response, done, _heavy_work_pool);
1543
0
        return;
1544
0
    }
1545
0
}
1546
1547
void PInternalService::rollback(google::protobuf::RpcController* controller,
1548
                                const PRollbackRequest* request, PRollbackResult* response,
1549
0
                                google::protobuf::Closure* done) {
1550
0
    bool ret = _heavy_work_pool.try_offer([this, request, response, done]() {
1551
0
        brpc::ClosureGuard closure_guard(done);
1552
0
        TUniqueId load_id;
1553
0
        load_id.hi = request->load_id().hi();
1554
0
        load_id.lo = request->load_id().lo();
1555
0
        auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id);
1556
0
        if (stream_load_ctx == nullptr) {
1557
0
            response->mutable_status()->set_status_code(1);
1558
0
            response->mutable_status()->add_error_msgs("could not find stream load context");
1559
0
        } else {
1560
0
            stream_load_ctx->pipe->cancel("rollback");
1561
0
            response->mutable_status()->set_status_code(0);
1562
0
        }
1563
0
    });
1564
0
    if (!ret) {
1565
0
        offer_failed(response, done, _heavy_work_pool);
1566
0
        return;
1567
0
    }
1568
0
}
1569
1570
void PInternalService::fold_constant_expr(google::protobuf::RpcController* controller,
1571
                                          const PConstantExprRequest* request,
1572
                                          PConstantExprResult* response,
1573
0
                                          google::protobuf::Closure* done) {
1574
0
    bool ret = _light_work_pool.try_offer([request, response, done]() {
1575
0
        brpc::ClosureGuard closure_guard(done);
1576
0
        TFoldConstantParams t_request;
1577
0
        Status st = Status::OK();
1578
0
        {
1579
0
            const uint8_t* buf = (const uint8_t*)request->request().data();
1580
0
            uint32_t len = request->request().size();
1581
0
            st = deserialize_thrift_msg(buf, &len, false, &t_request);
1582
0
        }
1583
0
        if (!st.ok()) {
1584
0
            LOG(WARNING) << "exec fold constant expr failed, errmsg=" << st
1585
0
                         << " .and query_id_is: " << t_request.query_id;
1586
0
            st.to_protobuf(response->mutable_status());
1587
0
            return;
1588
0
        }
1589
0
        auto fold_func = [&]() -> Status {
1590
0
            std::unique_ptr<FoldConstantExecutor> fold_executor =
1591
0
                    std::make_unique<FoldConstantExecutor>();
1592
0
            RETURN_IF_ERROR_OR_CATCH_EXCEPTION(
1593
0
                    fold_executor->fold_constant_vexpr(t_request, response));
1594
0
            return Status::OK();
1595
0
        };
1596
0
        st = fold_func();
1597
0
        if (!st.ok()) {
1598
0
            LOG(WARNING) << "exec fold constant expr failed, errmsg=" << st
1599
0
                         << " .and query_id_is: " << t_request.query_id;
1600
0
        }
1601
0
        st.to_protobuf(response->mutable_status());
1602
0
    });
1603
0
    if (!ret) {
1604
0
        offer_failed(response, done, _light_work_pool);
1605
0
        return;
1606
0
    }
1607
0
}
1608
1609
void PInternalService::transmit_block(google::protobuf::RpcController* controller,
1610
                                      const PTransmitDataParams* request,
1611
                                      PTransmitDataResult* response,
1612
0
                                      google::protobuf::Closure* done) {
1613
0
    int64_t receive_time = GetCurrentTimeNanos();
1614
0
    if (config::enable_bthread_transmit_block) {
1615
0
        response->set_receive_time(receive_time);
1616
        // under high concurrency, thread pool will have a lot of lock contention.
1617
        // May offer failed to the thread pool, so that we should avoid using thread
1618
        // pool here.
1619
0
        _transmit_block(controller, request, response, done, Status::OK(), 0);
1620
0
    } else {
1621
0
        bool ret = _light_work_pool.try_offer([this, controller, request, response, done,
1622
0
                                               receive_time]() {
1623
0
            response->set_receive_time(receive_time);
1624
            // Sometimes transmit block function is the last owner of PlanFragmentExecutor
1625
            // It will release the object. And the object maybe a JNIContext.
1626
            // JNIContext will hold some TLS object. It could not work correctly under bthread
1627
            // Context. So that put the logic into pthread.
1628
            // But this is rarely happens, so this config is disabled by default.
1629
0
            _transmit_block(controller, request, response, done, Status::OK(),
1630
0
                            GetCurrentTimeNanos() - receive_time);
1631
0
        });
1632
0
        if (!ret) {
1633
0
            offer_failed(response, done, _light_work_pool);
1634
0
            return;
1635
0
        }
1636
0
    }
1637
0
}
1638
1639
void PInternalService::transmit_block_by_http(google::protobuf::RpcController* controller,
1640
                                              const PEmptyRequest* request,
1641
                                              PTransmitDataResult* response,
1642
0
                                              google::protobuf::Closure* done) {
1643
0
    int64_t receive_time = GetCurrentTimeNanos();
1644
0
    bool ret = _heavy_work_pool.try_offer([this, controller, response, done, receive_time]() {
1645
0
        PTransmitDataParams* new_request = new PTransmitDataParams();
1646
0
        google::protobuf::Closure* new_done =
1647
0
                new NewHttpClosure<PTransmitDataParams>(new_request, done);
1648
0
        brpc::Controller* cntl = static_cast<brpc::Controller*>(controller);
1649
0
        Status st =
1650
0
                attachment_extract_request_contain_block<PTransmitDataParams>(new_request, cntl);
1651
0
        _transmit_block(controller, new_request, response, new_done, st,
1652
0
                        GetCurrentTimeNanos() - receive_time);
1653
0
    });
1654
0
    if (!ret) {
1655
0
        offer_failed(response, done, _heavy_work_pool);
1656
0
        return;
1657
0
    }
1658
0
}
1659
1660
void PInternalService::_transmit_block(google::protobuf::RpcController* controller,
1661
                                       const PTransmitDataParams* request,
1662
                                       PTransmitDataResult* response,
1663
                                       google::protobuf::Closure* done, const Status& extract_st,
1664
0
                                       const int64_t wait_for_worker) {
1665
0
    if (request->has_query_id()) {
1666
0
        VLOG_ROW << "transmit block: fragment_instance_id=" << print_id(request->finst_id())
1667
0
                 << " query_id=" << print_id(request->query_id()) << " node=" << request->node_id();
1668
0
    }
1669
1670
    // The response is accessed when done->Run is called in transmit_block(),
1671
    // give response a default value to avoid null pointers in high concurrency.
1672
0
    Status st;
1673
0
    if (extract_st.ok()) {
1674
0
        st = _exec_env->vstream_mgr()->transmit_block(request, &done, wait_for_worker);
1675
0
        if (!st.ok() && !st.is<END_OF_FILE>()) {
1676
0
            LOG(WARNING) << "transmit_block failed, message=" << st
1677
0
                         << ", fragment_instance_id=" << print_id(request->finst_id())
1678
0
                         << ", node=" << request->node_id()
1679
0
                         << ", from sender_id: " << request->sender_id()
1680
0
                         << ", be_number: " << request->be_number()
1681
0
                         << ", packet_seq: " << request->packet_seq();
1682
0
        }
1683
0
    } else {
1684
0
        st = extract_st;
1685
0
    }
1686
0
    if (done != nullptr) {
1687
0
        st.to_protobuf(response->mutable_status());
1688
0
        done->Run();
1689
0
    }
1690
0
}
1691
1692
void PInternalService::check_rpc_channel(google::protobuf::RpcController* controller,
1693
                                         const PCheckRPCChannelRequest* request,
1694
                                         PCheckRPCChannelResponse* response,
1695
0
                                         google::protobuf::Closure* done) {
1696
0
    bool ret = _light_work_pool.try_offer([request, response, done]() {
1697
0
        brpc::ClosureGuard closure_guard(done);
1698
0
        response->mutable_status()->set_status_code(0);
1699
0
        if (request->data().size() != request->size()) {
1700
0
            std::stringstream ss;
1701
0
            ss << "data size not same, expected: " << request->size()
1702
0
               << ", actual: " << request->data().size();
1703
0
            response->mutable_status()->add_error_msgs(ss.str());
1704
0
            response->mutable_status()->set_status_code(1);
1705
1706
0
        } else {
1707
0
            Md5Digest digest;
1708
0
            digest.update(static_cast<const void*>(request->data().c_str()),
1709
0
                          request->data().size());
1710
0
            digest.digest();
1711
0
            if (!iequal(digest.hex(), request->md5())) {
1712
0
                std::stringstream ss;
1713
0
                ss << "md5 not same, expected: " << request->md5() << ", actual: " << digest.hex();
1714
0
                response->mutable_status()->add_error_msgs(ss.str());
1715
0
                response->mutable_status()->set_status_code(1);
1716
0
            }
1717
0
        }
1718
0
    });
1719
0
    if (!ret) {
1720
0
        offer_failed(response, done, _light_work_pool);
1721
0
        return;
1722
0
    }
1723
0
}
1724
1725
void PInternalService::reset_rpc_channel(google::protobuf::RpcController* controller,
1726
                                         const PResetRPCChannelRequest* request,
1727
                                         PResetRPCChannelResponse* response,
1728
0
                                         google::protobuf::Closure* done) {
1729
0
    bool ret = _light_work_pool.try_offer([request, response, done]() {
1730
0
        brpc::ClosureGuard closure_guard(done);
1731
0
        response->mutable_status()->set_status_code(0);
1732
0
        if (request->all()) {
1733
0
            int size = ExecEnv::GetInstance()->brpc_internal_client_cache()->size();
1734
0
            if (size > 0) {
1735
0
                std::vector<std::string> endpoints;
1736
0
                ExecEnv::GetInstance()->brpc_internal_client_cache()->get_all(&endpoints);
1737
0
                ExecEnv::GetInstance()->brpc_internal_client_cache()->clear();
1738
0
                *response->mutable_channels() = {endpoints.begin(), endpoints.end()};
1739
0
            }
1740
0
        } else {
1741
0
            for (const std::string& endpoint : request->endpoints()) {
1742
0
                if (!ExecEnv::GetInstance()->brpc_internal_client_cache()->exist(endpoint)) {
1743
0
                    response->mutable_status()->add_error_msgs(endpoint + ": not found.");
1744
0
                    continue;
1745
0
                }
1746
1747
0
                if (ExecEnv::GetInstance()->brpc_internal_client_cache()->erase(endpoint)) {
1748
0
                    response->add_channels(endpoint);
1749
0
                } else {
1750
0
                    response->mutable_status()->add_error_msgs(endpoint + ": reset failed.");
1751
0
                }
1752
0
            }
1753
0
            if (request->endpoints_size() != response->channels_size()) {
1754
0
                response->mutable_status()->set_status_code(1);
1755
0
            }
1756
0
        }
1757
0
    });
1758
0
    if (!ret) {
1759
0
        offer_failed(response, done, _light_work_pool);
1760
0
        return;
1761
0
    }
1762
0
}
1763
1764
void PInternalService::hand_shake(google::protobuf::RpcController* controller,
1765
                                  const PHandShakeRequest* request, PHandShakeResponse* response,
1766
0
                                  google::protobuf::Closure* done) {
1767
    // The light pool may be full. Handshake is used to check the connection state of brpc.
1768
    // Should not be interfered by the thread pool logic.
1769
0
    brpc::ClosureGuard closure_guard(done);
1770
0
    if (request->has_hello()) {
1771
0
        response->set_hello(request->hello());
1772
0
    }
1773
0
    response->mutable_status()->set_status_code(0);
1774
0
}
1775
1776
constexpr char HttpProtocol[] = "http://";
1777
constexpr char DownloadApiPath[] = "/api/_tablet/_download?token=";
1778
constexpr char FileParam[] = "&file=";
1779
1780
static std::string construct_url(const std::string& host_port, const std::string& token,
1781
0
                                 const std::string& path) {
1782
0
    return fmt::format("{}{}{}{}{}{}", HttpProtocol, host_port, DownloadApiPath, token, FileParam,
1783
0
                       path);
1784
0
}
1785
1786
static Status download_file_action(std::string& remote_file_url, std::string& local_file_path,
1787
0
                                   uint64_t estimate_timeout, uint64_t file_size) {
1788
0
    auto download_cb = [remote_file_url, estimate_timeout, local_file_path,
1789
0
                        file_size](HttpClient* client) {
1790
0
        RETURN_IF_ERROR(client->init(remote_file_url));
1791
0
        client->set_timeout_ms(estimate_timeout * 1000);
1792
0
        RETURN_IF_ERROR(client->download(local_file_path));
1793
1794
0
        if (file_size > 0) {
1795
            // Check file length
1796
0
            uint64_t local_file_size = std::filesystem::file_size(local_file_path);
1797
0
            if (local_file_size != file_size) {
1798
0
                LOG(WARNING) << "failed to pull rowset for slave replica. download file "
1799
0
                                "length error"
1800
0
                             << ", remote_path=" << remote_file_url << ", file_size=" << file_size
1801
0
                             << ", local_file_size=" << local_file_size;
1802
0
                return Status::InternalError("downloaded file size is not equal");
1803
0
            }
1804
0
        }
1805
1806
0
        return io::global_local_filesystem()->permission(local_file_path,
1807
0
                                                         io::LocalFileSystem::PERMS_OWNER_RW);
1808
0
    };
1809
0
    return HttpClient::execute_with_retry(DOWNLOAD_FILE_MAX_RETRY, 1, download_cb);
1810
0
}
1811
1812
void PInternalServiceImpl::request_slave_tablet_pull_rowset(
1813
        google::protobuf::RpcController* controller, const PTabletWriteSlaveRequest* request,
1814
0
        PTabletWriteSlaveResult* response, google::protobuf::Closure* done) {
1815
0
    brpc::ClosureGuard closure_guard(done);
1816
0
    const RowsetMetaPB& rowset_meta_pb = request->rowset_meta();
1817
0
    const std::string& rowset_path = request->rowset_path();
1818
0
    google::protobuf::Map<int64_t, int64_t> segments_size = request->segments_size();
1819
0
    google::protobuf::Map<int64_t, PTabletWriteSlaveRequest_IndexSizeMap> indices_size =
1820
0
            request->inverted_indices_size();
1821
0
    std::string host = request->host();
1822
0
    int64_t http_port = request->http_port();
1823
0
    int64_t brpc_port = request->brpc_port();
1824
0
    std::string token = request->token();
1825
0
    int64_t node_id = request->node_id();
1826
0
    bool ret = _heavy_work_pool.try_offer([rowset_meta_pb, host, brpc_port, node_id, segments_size,
1827
0
                                           indices_size, http_port, token, rowset_path, this]() {
1828
0
        TabletSharedPtr tablet = _engine.tablet_manager()->get_tablet(
1829
0
                rowset_meta_pb.tablet_id(), rowset_meta_pb.tablet_schema_hash());
1830
0
        if (tablet == nullptr) {
1831
0
            LOG(WARNING) << "failed to pull rowset for slave replica. tablet ["
1832
0
                         << rowset_meta_pb.tablet_id()
1833
0
                         << "] is not exist. txn_id=" << rowset_meta_pb.txn_id();
1834
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta_pb.txn_id(),
1835
0
                                        rowset_meta_pb.tablet_id(), node_id, false);
1836
0
            return;
1837
0
        }
1838
1839
0
        RowsetMetaSharedPtr rowset_meta(new RowsetMeta());
1840
0
        std::string rowset_meta_str;
1841
0
        bool ret = rowset_meta_pb.SerializeToString(&rowset_meta_str);
1842
0
        if (!ret) {
1843
0
            LOG(WARNING) << "failed to pull rowset for slave replica. serialize rowset meta "
1844
0
                            "failed. rowset_id="
1845
0
                         << rowset_meta_pb.rowset_id()
1846
0
                         << ", tablet_id=" << rowset_meta_pb.tablet_id()
1847
0
                         << ", txn_id=" << rowset_meta_pb.txn_id();
1848
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta_pb.txn_id(),
1849
0
                                        rowset_meta_pb.tablet_id(), node_id, false);
1850
0
            return;
1851
0
        }
1852
0
        bool parsed = rowset_meta->init(rowset_meta_str);
1853
0
        if (!parsed) {
1854
0
            LOG(WARNING) << "failed to pull rowset for slave replica. parse rowset meta string "
1855
0
                            "failed. rowset_id="
1856
0
                         << rowset_meta_pb.rowset_id()
1857
0
                         << ", tablet_id=" << rowset_meta_pb.tablet_id()
1858
0
                         << ", txn_id=" << rowset_meta_pb.txn_id();
1859
            // return false will break meta iterator, return true to skip this error
1860
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
1861
0
                                        rowset_meta->tablet_id(), node_id, false);
1862
0
            return;
1863
0
        }
1864
0
        RowsetId remote_rowset_id = rowset_meta->rowset_id();
1865
        // change rowset id because it maybe same as other local rowset
1866
0
        RowsetId new_rowset_id = _engine.next_rowset_id();
1867
0
        auto pending_rs_guard = _engine.pending_local_rowsets().add(new_rowset_id);
1868
0
        rowset_meta->set_rowset_id(new_rowset_id);
1869
0
        rowset_meta->set_tablet_uid(tablet->tablet_uid());
1870
0
        VLOG_CRITICAL << "succeed to init rowset meta for slave replica. rowset_id="
1871
0
                      << rowset_meta->rowset_id() << ", tablet_id=" << rowset_meta->tablet_id()
1872
0
                      << ", txn_id=" << rowset_meta->txn_id();
1873
1874
0
        auto tablet_scheme = rowset_meta->tablet_schema();
1875
0
        for (const auto& segment : segments_size) {
1876
0
            uint64_t file_size = segment.second;
1877
0
            uint64_t estimate_timeout = file_size / config::download_low_speed_limit_kbps / 1024;
1878
0
            if (estimate_timeout < config::download_low_speed_time) {
1879
0
                estimate_timeout = config::download_low_speed_time;
1880
0
            }
1881
1882
0
            std::string remote_file_path =
1883
0
                    local_segment_path(rowset_path, remote_rowset_id.to_string(), segment.first);
1884
0
            std::string remote_file_url =
1885
0
                    construct_url(get_host_port(host, http_port), token, remote_file_path);
1886
1887
0
            std::string local_file_path = local_segment_path(
1888
0
                    tablet->tablet_path(), rowset_meta->rowset_id().to_string(), segment.first);
1889
1890
0
            auto st = download_file_action(remote_file_url, local_file_path, estimate_timeout,
1891
0
                                           file_size);
1892
0
            if (!st.ok()) {
1893
0
                LOG(WARNING) << "failed to pull rowset for slave replica. failed to download "
1894
0
                                "file. url="
1895
0
                             << remote_file_url << ", local_path=" << local_file_path
1896
0
                             << ", txn_id=" << rowset_meta->txn_id();
1897
0
                _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
1898
0
                                            rowset_meta->tablet_id(), node_id, false);
1899
0
                return;
1900
0
            }
1901
0
            VLOG_CRITICAL << "succeed to download file for slave replica. url=" << remote_file_url
1902
0
                          << ", local_path=" << local_file_path
1903
0
                          << ", txn_id=" << rowset_meta->txn_id();
1904
0
            if (indices_size.find(segment.first) != indices_size.end()) {
1905
0
                PTabletWriteSlaveRequest_IndexSizeMap segment_indices_size =
1906
0
                        indices_size.at(segment.first);
1907
1908
0
                for (auto index_size : segment_indices_size.index_sizes()) {
1909
0
                    auto index_id = index_size.indexid();
1910
0
                    auto size = index_size.size();
1911
0
                    auto suffix_path = index_size.suffix_path();
1912
0
                    std::string remote_inverted_index_file;
1913
0
                    std::string local_inverted_index_file;
1914
0
                    std::string remote_inverted_index_file_url;
1915
0
                    if (tablet_scheme->get_inverted_index_storage_format() ==
1916
0
                        InvertedIndexStorageFormatPB::V1) {
1917
0
                        remote_inverted_index_file =
1918
0
                                InvertedIndexDescriptor::get_index_file_path_v1(
1919
0
                                        InvertedIndexDescriptor::get_index_file_path_prefix(
1920
0
                                                remote_file_path),
1921
0
                                        index_id, suffix_path);
1922
0
                        remote_inverted_index_file_url = construct_url(
1923
0
                                get_host_port(host, http_port), token, remote_inverted_index_file);
1924
1925
0
                        local_inverted_index_file = InvertedIndexDescriptor::get_index_file_path_v1(
1926
0
                                InvertedIndexDescriptor::get_index_file_path_prefix(
1927
0
                                        local_file_path),
1928
0
                                index_id, suffix_path);
1929
0
                    } else {
1930
0
                        remote_inverted_index_file =
1931
0
                                InvertedIndexDescriptor::get_index_file_path_v2(
1932
0
                                        InvertedIndexDescriptor::get_index_file_path_prefix(
1933
0
                                                remote_file_path));
1934
0
                        remote_inverted_index_file_url = construct_url(
1935
0
                                get_host_port(host, http_port), token, remote_inverted_index_file);
1936
1937
0
                        local_inverted_index_file = InvertedIndexDescriptor::get_index_file_path_v2(
1938
0
                                InvertedIndexDescriptor::get_index_file_path_prefix(
1939
0
                                        local_file_path));
1940
0
                    }
1941
0
                    st = download_file_action(remote_inverted_index_file_url,
1942
0
                                              local_inverted_index_file, estimate_timeout, size);
1943
0
                    if (!st.ok()) {
1944
0
                        LOG(WARNING) << "failed to pull rowset for slave replica. failed to "
1945
0
                                        "download "
1946
0
                                        "file. url="
1947
0
                                     << remote_inverted_index_file_url
1948
0
                                     << ", local_path=" << local_inverted_index_file
1949
0
                                     << ", txn_id=" << rowset_meta->txn_id();
1950
0
                        _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
1951
0
                                                    rowset_meta->tablet_id(), node_id, false);
1952
0
                        return;
1953
0
                    }
1954
1955
0
                    VLOG_CRITICAL
1956
0
                            << "succeed to download inverted index file for slave replica. url="
1957
0
                            << remote_inverted_index_file_url
1958
0
                            << ", local_path=" << local_inverted_index_file
1959
0
                            << ", txn_id=" << rowset_meta->txn_id();
1960
0
                }
1961
0
            }
1962
0
        }
1963
1964
0
        RowsetSharedPtr rowset;
1965
0
        Status create_status = RowsetFactory::create_rowset(
1966
0
                tablet->tablet_schema(), tablet->tablet_path(), rowset_meta, &rowset);
1967
0
        if (!create_status) {
1968
0
            LOG(WARNING) << "failed to create rowset from rowset meta for slave replica"
1969
0
                         << ". rowset_id: " << rowset_meta->rowset_id()
1970
0
                         << ", rowset_type: " << rowset_meta->rowset_type()
1971
0
                         << ", rowset_state: " << rowset_meta->rowset_state()
1972
0
                         << ", tablet_id=" << rowset_meta->tablet_id()
1973
0
                         << ", txn_id=" << rowset_meta->txn_id();
1974
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
1975
0
                                        rowset_meta->tablet_id(), node_id, false);
1976
0
            return;
1977
0
        }
1978
0
        if (rowset_meta->rowset_state() != RowsetStatePB::COMMITTED) {
1979
0
            LOG(WARNING) << "could not commit txn for slave replica because master rowset state is "
1980
0
                            "not committed, rowset_state="
1981
0
                         << rowset_meta->rowset_state()
1982
0
                         << ", tablet_id=" << rowset_meta->tablet_id()
1983
0
                         << ", txn_id=" << rowset_meta->txn_id();
1984
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
1985
0
                                        rowset_meta->tablet_id(), node_id, false);
1986
0
            return;
1987
0
        }
1988
0
        Status commit_txn_status = _engine.txn_manager()->commit_txn(
1989
0
                tablet->data_dir()->get_meta(), rowset_meta->partition_id(), rowset_meta->txn_id(),
1990
0
                rowset_meta->tablet_id(), tablet->tablet_uid(), rowset_meta->load_id(), rowset,
1991
0
                std::move(pending_rs_guard), false);
1992
0
        if (!commit_txn_status && !commit_txn_status.is<PUSH_TRANSACTION_ALREADY_EXIST>()) {
1993
0
            LOG(WARNING) << "failed to add committed rowset for slave replica. rowset_id="
1994
0
                         << rowset_meta->rowset_id() << ", tablet_id=" << rowset_meta->tablet_id()
1995
0
                         << ", txn_id=" << rowset_meta->txn_id();
1996
0
            _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
1997
0
                                        rowset_meta->tablet_id(), node_id, false);
1998
0
            return;
1999
0
        }
2000
0
        VLOG_CRITICAL << "succeed to pull rowset for slave replica. successfully to add committed "
2001
0
                         "rowset: "
2002
0
                      << rowset_meta->rowset_id()
2003
0
                      << " to tablet, tablet_id=" << rowset_meta->tablet_id()
2004
0
                      << ", schema_hash=" << rowset_meta->tablet_schema_hash()
2005
0
                      << ", txn_id=" << rowset_meta->txn_id();
2006
0
        _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(),
2007
0
                                    rowset_meta->tablet_id(), node_id, true);
2008
0
    });
2009
0
    if (!ret) {
2010
0
        offer_failed(response, closure_guard.release(), _heavy_work_pool);
2011
0
        return;
2012
0
    }
2013
0
    Status::OK().to_protobuf(response->mutable_status());
2014
0
}
2015
2016
void PInternalServiceImpl::_response_pull_slave_rowset(const std::string& remote_host,
2017
                                                       int64_t brpc_port, int64_t txn_id,
2018
                                                       int64_t tablet_id, int64_t node_id,
2019
0
                                                       bool is_succeed) {
2020
0
    std::shared_ptr<PBackendService_Stub> stub =
2021
0
            ExecEnv::GetInstance()->brpc_internal_client_cache()->get_client(remote_host,
2022
0
                                                                             brpc_port);
2023
0
    if (stub == nullptr) {
2024
0
        LOG(WARNING) << "failed to response result of slave replica to master replica. get rpc "
2025
0
                        "stub failed, master host="
2026
0
                     << remote_host << ", port=" << brpc_port << ", tablet_id=" << tablet_id
2027
0
                     << ", txn_id=" << txn_id;
2028
0
        return;
2029
0
    }
2030
2031
0
    auto request = std::make_shared<PTabletWriteSlaveDoneRequest>();
2032
0
    request->set_txn_id(txn_id);
2033
0
    request->set_tablet_id(tablet_id);
2034
0
    request->set_node_id(node_id);
2035
0
    request->set_is_succeed(is_succeed);
2036
0
    auto pull_rowset_callback = DummyBrpcCallback<PTabletWriteSlaveDoneResult>::create_shared();
2037
0
    auto closure = AutoReleaseClosure<
2038
0
            PTabletWriteSlaveDoneRequest,
2039
0
            DummyBrpcCallback<PTabletWriteSlaveDoneResult>>::create_unique(request,
2040
0
                                                                           pull_rowset_callback);
2041
0
    closure->cntl_->set_timeout_ms(config::slave_replica_writer_rpc_timeout_sec * 1000);
2042
0
    closure->cntl_->ignore_eovercrowded();
2043
0
    stub->response_slave_tablet_pull_rowset(closure->cntl_.get(), closure->request_.get(),
2044
0
                                            closure->response_.get(), closure.get());
2045
0
    closure.release();
2046
2047
0
    pull_rowset_callback->join();
2048
0
    if (pull_rowset_callback->cntl_->Failed()) {
2049
0
        LOG(WARNING) << "failed to response result of slave replica to master replica, error="
2050
0
                     << berror(pull_rowset_callback->cntl_->ErrorCode())
2051
0
                     << ", error_text=" << pull_rowset_callback->cntl_->ErrorText()
2052
0
                     << ", master host: " << remote_host << ", tablet_id=" << tablet_id
2053
0
                     << ", txn_id=" << txn_id;
2054
0
    }
2055
0
    VLOG_CRITICAL << "succeed to response the result of slave replica pull rowset to master "
2056
0
                     "replica. master host: "
2057
0
                  << remote_host << ". is_succeed=" << is_succeed << ", tablet_id=" << tablet_id
2058
0
                  << ", slave server=" << node_id << ", txn_id=" << txn_id;
2059
0
}
2060
2061
void PInternalServiceImpl::response_slave_tablet_pull_rowset(
2062
        google::protobuf::RpcController* controller, const PTabletWriteSlaveDoneRequest* request,
2063
0
        PTabletWriteSlaveDoneResult* response, google::protobuf::Closure* done) {
2064
0
    bool ret = _heavy_work_pool.try_offer([txn_mgr = _engine.txn_manager(), request, response,
2065
0
                                           done]() {
2066
0
        brpc::ClosureGuard closure_guard(done);
2067
0
        VLOG_CRITICAL << "receive the result of slave replica pull rowset from slave replica. "
2068
0
                         "slave server="
2069
0
                      << request->node_id() << ", is_succeed=" << request->is_succeed()
2070
0
                      << ", tablet_id=" << request->tablet_id() << ", txn_id=" << request->txn_id();
2071
0
        txn_mgr->finish_slave_tablet_pull_rowset(request->txn_id(), request->tablet_id(),
2072
0
                                                 request->node_id(), request->is_succeed());
2073
0
        Status::OK().to_protobuf(response->mutable_status());
2074
0
    });
2075
0
    if (!ret) {
2076
0
        offer_failed(response, done, _heavy_work_pool);
2077
0
        return;
2078
0
    }
2079
0
}
2080
2081
void PInternalService::multiget_data(google::protobuf::RpcController* controller,
2082
                                     const PMultiGetRequest* request, PMultiGetResponse* response,
2083
0
                                     google::protobuf::Closure* done) {
2084
0
    bool ret = _heavy_work_pool.try_offer([request, response, done]() {
2085
0
        signal::SignalTaskIdKeeper keeper(request->query_id());
2086
        // multi get data by rowid
2087
0
        MonotonicStopWatch watch;
2088
0
        watch.start();
2089
0
        brpc::ClosureGuard closure_guard(done);
2090
0
        response->mutable_status()->set_status_code(0);
2091
0
        SCOPED_ATTACH_TASK(ExecEnv::GetInstance()->rowid_storage_reader_tracker());
2092
0
        Status st = RowIdStorageReader::read_by_rowids(*request, response);
2093
0
        st.to_protobuf(response->mutable_status());
2094
0
        LOG(INFO) << "multiget_data finished, cost(us):" << watch.elapsed_time() / 1000;
2095
0
    });
2096
0
    if (!ret) {
2097
0
        offer_failed(response, done, _heavy_work_pool);
2098
0
        return;
2099
0
    }
2100
0
}
2101
2102
void PInternalService::multiget_data_v2(google::protobuf::RpcController* controller,
2103
                                        const PMultiGetRequestV2* request,
2104
                                        PMultiGetResponseV2* response,
2105
0
                                        google::protobuf::Closure* done) {
2106
0
    std::vector<uint64_t> id_set;
2107
0
    id_set.push_back(request->wg_id());
2108
0
    auto wg = ExecEnv::GetInstance()->workload_group_mgr()->get_group(id_set);
2109
0
    Status st = Status::OK();
2110
2111
0
    if (!wg) [[unlikely]] {
2112
0
        brpc::ClosureGuard closure_guard(done);
2113
0
        st = Status::Error<TStatusCode::CANCELLED>("fail to find wg: wg id:" +
2114
0
                                                   std::to_string(request->wg_id()));
2115
0
        st.to_protobuf(response->mutable_status());
2116
0
        return;
2117
0
    }
2118
2119
0
    doris::pipeline::TaskScheduler* exec_sched = nullptr;
2120
0
    vectorized::ScannerScheduler* scan_sched = nullptr;
2121
0
    vectorized::ScannerScheduler* remote_scan_sched = nullptr;
2122
0
    wg->get_query_scheduler(&exec_sched, &scan_sched, &remote_scan_sched);
2123
0
    DCHECK(remote_scan_sched);
2124
2125
0
    st = remote_scan_sched->submit_scan_task(
2126
0
            vectorized::SimplifiedScanTask(
2127
0
                    [request, response, done]() {
2128
0
                        SCOPED_ATTACH_TASK(ExecEnv::GetInstance()->rowid_storage_reader_tracker());
2129
0
                        signal::set_signal_task_id(request->query_id());
2130
                        // multi get data by rowid
2131
0
                        MonotonicStopWatch watch;
2132
0
                        watch.start();
2133
0
                        brpc::ClosureGuard closure_guard(done);
2134
0
                        response->mutable_status()->set_status_code(0);
2135
0
                        Status st = RowIdStorageReader::read_by_rowids(*request, response);
2136
0
                        st.to_protobuf(response->mutable_status());
2137
0
                        LOG(INFO) << "multiget_data finished, cost(us):"
2138
0
                                  << watch.elapsed_time() / 1000;
2139
0
                        return true;
2140
0
                    },
2141
0
                    nullptr, nullptr),
2142
0
            fmt::format("{}-multiget_data_v2", print_id(request->query_id())));
2143
2144
0
    if (!st.ok()) {
2145
0
        brpc::ClosureGuard closure_guard(done);
2146
0
        st.to_protobuf(response->mutable_status());
2147
0
    }
2148
0
}
2149
2150
void PInternalServiceImpl::get_tablet_rowset_versions(google::protobuf::RpcController* cntl_base,
2151
                                                      const PGetTabletVersionsRequest* request,
2152
                                                      PGetTabletVersionsResponse* response,
2153
0
                                                      google::protobuf::Closure* done) {
2154
0
    brpc::ClosureGuard closure_guard(done);
2155
0
    VLOG_DEBUG << "receive get tablet versions request: " << request->DebugString();
2156
0
    _engine.get_tablet_rowset_versions(request, response);
2157
0
}
2158
2159
void PInternalService::glob(google::protobuf::RpcController* controller,
2160
                            const PGlobRequest* request, PGlobResponse* response,
2161
0
                            google::protobuf::Closure* done) {
2162
0
    bool ret = _heavy_work_pool.try_offer([request, response, done]() {
2163
0
        brpc::ClosureGuard closure_guard(done);
2164
0
        std::vector<io::FileInfo> files;
2165
0
        Status st = io::global_local_filesystem()->safe_glob(request->pattern(), &files);
2166
0
        if (st.ok()) {
2167
0
            for (auto& file : files) {
2168
0
                PGlobResponse_PFileInfo* pfile = response->add_files();
2169
0
                pfile->set_file(file.file_name);
2170
0
                pfile->set_size(file.file_size);
2171
0
            }
2172
0
        }
2173
0
        st.to_protobuf(response->mutable_status());
2174
0
    });
2175
0
    if (!ret) {
2176
0
        offer_failed(response, done, _heavy_work_pool);
2177
0
        return;
2178
0
    }
2179
0
}
2180
2181
void PInternalService::group_commit_insert(google::protobuf::RpcController* controller,
2182
                                           const PGroupCommitInsertRequest* request,
2183
                                           PGroupCommitInsertResponse* response,
2184
0
                                           google::protobuf::Closure* done) {
2185
0
    TUniqueId load_id;
2186
0
    load_id.__set_hi(request->load_id().hi());
2187
0
    load_id.__set_lo(request->load_id().lo());
2188
0
    std::shared_ptr<std::mutex> lock = std::make_shared<std::mutex>();
2189
0
    std::shared_ptr<bool> is_done = std::make_shared<bool>(false);
2190
0
    bool ret = _heavy_work_pool.try_offer([this, request, response, done, load_id, lock,
2191
0
                                           is_done]() {
2192
0
        brpc::ClosureGuard closure_guard(done);
2193
0
        std::shared_ptr<StreamLoadContext> ctx = std::make_shared<StreamLoadContext>(_exec_env);
2194
0
        auto pipe = std::make_shared<io::StreamLoadPipe>(
2195
0
                io::kMaxPipeBufferedBytes /* max_buffered_bytes */, 64 * 1024 /* min_chunk_size */,
2196
0
                -1 /* total_length */, true /* use_proto */);
2197
0
        ctx->pipe = pipe;
2198
0
        Status st = _exec_env->new_load_stream_mgr()->put(load_id, ctx);
2199
0
        if (st.ok()) {
2200
0
            try {
2201
0
                st = _exec_plan_fragment_impl(
2202
0
                        request->exec_plan_fragment_request().request(),
2203
0
                        request->exec_plan_fragment_request().version(),
2204
0
                        request->exec_plan_fragment_request().compact(),
2205
0
                        [&, response, done, load_id, lock, is_done](RuntimeState* state,
2206
0
                                                                    Status* status) {
2207
0
                            std::lock_guard<std::mutex> lock1(*lock);
2208
0
                            if (*is_done) {
2209
0
                                return;
2210
0
                            }
2211
0
                            *is_done = true;
2212
0
                            brpc::ClosureGuard cb_closure_guard(done);
2213
0
                            response->set_label(state->import_label());
2214
0
                            response->set_txn_id(state->wal_id());
2215
0
                            response->set_loaded_rows(state->num_rows_load_success());
2216
0
                            response->set_filtered_rows(state->num_rows_load_filtered());
2217
0
                            status->to_protobuf(response->mutable_status());
2218
0
                            if (!state->get_error_log_file_path().empty()) {
2219
0
                                response->set_error_url(
2220
0
                                        to_load_error_http_path(state->get_error_log_file_path()));
2221
0
                            }
2222
0
                            if (!state->get_first_error_msg().empty()) {
2223
0
                                response->set_first_error_msg(state->get_first_error_msg());
2224
0
                            }
2225
0
                            _exec_env->new_load_stream_mgr()->remove(load_id);
2226
0
                        });
2227
0
            } catch (const Exception& e) {
2228
0
                st = e.to_status();
2229
0
            } catch (const std::exception& e) {
2230
0
                st = Status::Error(ErrorCode::INTERNAL_ERROR, e.what());
2231
0
            } catch (...) {
2232
0
                st = Status::Error(ErrorCode::INTERNAL_ERROR,
2233
0
                                   "_exec_plan_fragment_impl meet unknown error");
2234
0
            }
2235
0
            if (!st.ok()) {
2236
0
                LOG(WARNING) << "exec plan fragment failed, load_id=" << print_id(load_id)
2237
0
                             << ", errmsg=" << st;
2238
0
                std::lock_guard<std::mutex> lock1(*lock);
2239
0
                if (*is_done) {
2240
0
                    closure_guard.release();
2241
0
                } else {
2242
0
                    *is_done = true;
2243
0
                    st.to_protobuf(response->mutable_status());
2244
0
                    _exec_env->new_load_stream_mgr()->remove(load_id);
2245
0
                }
2246
0
            } else {
2247
0
                closure_guard.release();
2248
0
                for (int i = 0; i < request->data().size(); ++i) {
2249
0
                    std::unique_ptr<PDataRow> row(new PDataRow());
2250
0
                    row->CopyFrom(request->data(i));
2251
0
                    st = pipe->append(std::move(row));
2252
0
                    if (!st.ok()) {
2253
0
                        break;
2254
0
                    }
2255
0
                }
2256
0
                if (st.ok()) {
2257
0
                    static_cast<void>(pipe->finish());
2258
0
                }
2259
0
            }
2260
0
        }
2261
0
    });
2262
0
    if (!ret) {
2263
0
        _exec_env->new_load_stream_mgr()->remove(load_id);
2264
0
        offer_failed(response, done, _heavy_work_pool);
2265
0
        return;
2266
0
    }
2267
0
};
2268
2269
void PInternalService::get_wal_queue_size(google::protobuf::RpcController* controller,
2270
                                          const PGetWalQueueSizeRequest* request,
2271
                                          PGetWalQueueSizeResponse* response,
2272
0
                                          google::protobuf::Closure* done) {
2273
0
    bool ret = _heavy_work_pool.try_offer([this, request, response, done]() {
2274
0
        brpc::ClosureGuard closure_guard(done);
2275
0
        Status st = Status::OK();
2276
0
        auto table_id = request->table_id();
2277
0
        auto count = _exec_env->wal_mgr()->get_wal_queue_size(table_id);
2278
0
        response->set_size(count);
2279
0
        response->mutable_status()->set_status_code(st.code());
2280
0
    });
2281
0
    if (!ret) {
2282
0
        offer_failed(response, done, _heavy_work_pool);
2283
0
    }
2284
0
}
2285
2286
void PInternalService::get_be_resource(google::protobuf::RpcController* controller,
2287
                                       const PGetBeResourceRequest* request,
2288
                                       PGetBeResourceResponse* response,
2289
0
                                       google::protobuf::Closure* done) {
2290
0
    bool ret = _heavy_work_pool.try_offer([response, done]() {
2291
0
        brpc::ClosureGuard closure_guard(done);
2292
0
        int64_t mem_limit = MemInfo::mem_limit();
2293
0
        int64_t mem_usage = PerfCounters::get_vm_rss();
2294
2295
0
        PGlobalResourceUsage* global_resource_usage = response->mutable_global_be_resource_usage();
2296
0
        global_resource_usage->set_mem_limit(mem_limit);
2297
0
        global_resource_usage->set_mem_usage(mem_usage);
2298
2299
0
        Status st = Status::OK();
2300
0
        response->mutable_status()->set_status_code(st.code());
2301
0
    });
2302
0
    if (!ret) {
2303
0
        offer_failed(response, done, _heavy_work_pool);
2304
0
    }
2305
0
}
2306
2307
void PInternalService::delete_dictionary(google::protobuf::RpcController* controller,
2308
                                         const PDeleteDictionaryRequest* request,
2309
                                         PDeleteDictionaryResponse* response,
2310
0
                                         google::protobuf::Closure* done) {
2311
0
    brpc::ClosureGuard closure_guard(done);
2312
0
    Status st = ExecEnv::GetInstance()->dict_factory()->delete_dict(request->dictionary_id());
2313
0
    st.to_protobuf(response->mutable_status());
2314
0
}
2315
2316
void PInternalService::commit_refresh_dictionary(google::protobuf::RpcController* controller,
2317
                                                 const PCommitRefreshDictionaryRequest* request,
2318
                                                 PCommitRefreshDictionaryResponse* response,
2319
0
                                                 google::protobuf::Closure* done) {
2320
0
    brpc::ClosureGuard closure_guard(done);
2321
0
    Status st = ExecEnv::GetInstance()->dict_factory()->commit_refresh_dict(
2322
0
            request->dictionary_id(), request->version_id());
2323
0
    st.to_protobuf(response->mutable_status());
2324
0
}
2325
2326
void PInternalService::abort_refresh_dictionary(google::protobuf::RpcController* controller,
2327
                                                const PAbortRefreshDictionaryRequest* request,
2328
                                                PAbortRefreshDictionaryResponse* response,
2329
0
                                                google::protobuf::Closure* done) {
2330
0
    brpc::ClosureGuard closure_guard(done);
2331
0
    Status st = ExecEnv::GetInstance()->dict_factory()->abort_refresh_dict(request->dictionary_id(),
2332
0
                                                                           request->version_id());
2333
0
    st.to_protobuf(response->mutable_status());
2334
0
}
2335
2336
void PInternalService::get_tablet_rowsets(google::protobuf::RpcController* controller,
2337
                                          const PGetTabletRowsetsRequest* request,
2338
                                          PGetTabletRowsetsResponse* response,
2339
0
                                          google::protobuf::Closure* done) {
2340
0
    DCHECK(config::is_cloud_mode());
2341
0
    auto start_time = GetMonoTimeMicros();
2342
0
    Defer defer {
2343
0
            [&]() { g_process_remote_fetch_rowsets_latency << GetMonoTimeMicros() - start_time; }};
2344
0
    brpc::ClosureGuard closure_guard(done);
2345
0
    LOG(INFO) << "process get tablet rowsets, request=" << request->ShortDebugString();
2346
0
    if (!request->has_tablet_id() || !request->has_version_start() || !request->has_version_end()) {
2347
0
        Status::InvalidArgument("missing params tablet/version_start/version_end")
2348
0
                .to_protobuf(response->mutable_status());
2349
0
        return;
2350
0
    }
2351
0
    CloudStorageEngine& storage = ExecEnv::GetInstance()->storage_engine().to_cloud();
2352
2353
0
    auto maybe_tablet =
2354
0
            storage.tablet_mgr().get_tablet(request->tablet_id(), /*warmup data*/ false,
2355
0
                                            /*syn_delete_bitmap*/ false, /*delete_bitmap*/ nullptr,
2356
0
                                            /*local_only*/ true);
2357
0
    if (!maybe_tablet) {
2358
0
        maybe_tablet.error().to_protobuf(response->mutable_status());
2359
0
        return;
2360
0
    }
2361
0
    auto tablet = maybe_tablet.value();
2362
0
    Result<CaptureRowsetResult> ret;
2363
0
    {
2364
0
        std::shared_lock l(tablet->get_header_lock());
2365
0
        ret = tablet->capture_consistent_rowsets_unlocked(
2366
0
                {request->version_start(), request->version_end()},
2367
0
                CaptureRowsetOps {.enable_fetch_rowsets_from_peers = false});
2368
0
    }
2369
0
    if (!ret) {
2370
0
        ret.error().to_protobuf(response->mutable_status());
2371
0
        return;
2372
0
    }
2373
0
    auto rowsets = std::move(ret.value().rowsets);
2374
0
    for (const auto& rs : rowsets) {
2375
0
        RowsetMetaPB meta;
2376
0
        rs->rowset_meta()->to_rowset_pb(&meta);
2377
0
        response->mutable_rowsets()->Add(std::move(meta));
2378
0
    }
2379
0
    if (request->has_delete_bitmap_keys()) {
2380
0
        DCHECK(tablet->enable_unique_key_merge_on_write());
2381
0
        auto delete_bitmap = std::move(ret.value().delete_bitmap);
2382
0
        auto keys_pb = request->delete_bitmap_keys();
2383
0
        size_t len = keys_pb.rowset_ids().size();
2384
0
        DCHECK_EQ(len, keys_pb.segment_ids().size());
2385
0
        DCHECK_EQ(len, keys_pb.versions().size());
2386
0
        std::set<DeleteBitmap::BitmapKey> keys;
2387
0
        for (size_t i = 0; i < len; ++i) {
2388
0
            RowsetId rs_id;
2389
0
            rs_id.init(keys_pb.rowset_ids(i));
2390
0
            keys.emplace(rs_id, keys_pb.segment_ids(i), keys_pb.versions(i));
2391
0
        }
2392
0
        auto diffset = delete_bitmap->diffset(keys).to_pb();
2393
0
        *response->mutable_delete_bitmap() = std::move(diffset);
2394
0
    }
2395
0
    Status::OK().to_protobuf(response->mutable_status());
2396
0
}
2397
2398
#include "common/compile_check_avoid_end.h"
2399
} // namespace doris