/root/doris/be/src/service/internal_service.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "service/internal_service.h" |
19 | | |
20 | | #include <assert.h> |
21 | | #include <brpc/closure_guard.h> |
22 | | #include <brpc/controller.h> |
23 | | #include <bthread/bthread.h> |
24 | | #include <bthread/types.h> |
25 | | #include <butil/errno.h> |
26 | | #include <butil/iobuf.h> |
27 | | #include <fcntl.h> |
28 | | #include <fmt/core.h> |
29 | | #include <gen_cpp/DataSinks_types.h> |
30 | | #include <gen_cpp/MasterService_types.h> |
31 | | #include <gen_cpp/PaloInternalService_types.h> |
32 | | #include <gen_cpp/PlanNodes_types.h> |
33 | | #include <gen_cpp/Status_types.h> |
34 | | #include <gen_cpp/Types_types.h> |
35 | | #include <gen_cpp/internal_service.pb.h> |
36 | | #include <gen_cpp/olap_file.pb.h> |
37 | | #include <gen_cpp/segment_v2.pb.h> |
38 | | #include <gen_cpp/types.pb.h> |
39 | | #include <google/protobuf/stubs/callback.h> |
40 | | #include <stddef.h> |
41 | | #include <stdint.h> |
42 | | #include <sys/stat.h> |
43 | | #include <vec/exec/vjdbc_connector.h> |
44 | | |
45 | | #include <algorithm> |
46 | | #include <exception> |
47 | | #include <filesystem> |
48 | | #include <memory> |
49 | | #include <set> |
50 | | #include <sstream> |
51 | | #include <string> |
52 | | #include <unordered_map> |
53 | | #include <utility> |
54 | | #include <vector> |
55 | | |
56 | | #include "cloud/cloud_storage_engine.h" |
57 | | #include "cloud/cloud_tablet_mgr.h" |
58 | | #include "cloud/config.h" |
59 | | #include "common/config.h" |
60 | | #include "common/consts.h" |
61 | | #include "common/exception.h" |
62 | | #include "common/logging.h" |
63 | | #include "common/signal_handler.h" |
64 | | #include "common/status.h" |
65 | | #include "exec/rowid_fetcher.h" |
66 | | #include "gen_cpp/BackendService.h" |
67 | | #include "gen_cpp/PaloInternalService_types.h" |
68 | | #include "gen_cpp/internal_service.pb.h" |
69 | | #include "gutil/integral_types.h" |
70 | | #include "http/http_client.h" |
71 | | #include "io/fs/local_file_system.h" |
72 | | #include "io/fs/stream_load_pipe.h" |
73 | | #include "io/io_common.h" |
74 | | #include "olap/data_dir.h" |
75 | | #include "olap/olap_common.h" |
76 | | #include "olap/olap_define.h" |
77 | | #include "olap/rowset/beta_rowset.h" |
78 | | #include "olap/rowset/rowset.h" |
79 | | #include "olap/rowset/rowset_factory.h" |
80 | | #include "olap/rowset/rowset_meta.h" |
81 | | #include "olap/rowset/segment_v2/column_reader.h" |
82 | | #include "olap/rowset/segment_v2/common.h" |
83 | | #include "olap/rowset/segment_v2/inverted_index_desc.h" |
84 | | #include "olap/rowset/segment_v2/segment.h" |
85 | | #include "olap/rowset/segment_v2/segment_iterator.h" |
86 | | #include "olap/segment_loader.h" |
87 | | #include "olap/storage_engine.h" |
88 | | #include "olap/tablet.h" |
89 | | #include "olap/tablet_fwd.h" |
90 | | #include "olap/tablet_manager.h" |
91 | | #include "olap/tablet_schema.h" |
92 | | #include "olap/txn_manager.h" |
93 | | #include "olap/utils.h" |
94 | | #include "olap/wal/wal_manager.h" |
95 | | #include "runtime/buffer_control_block.h" |
96 | | #include "runtime/cache/result_cache.h" |
97 | | #include "runtime/define_primitive_type.h" |
98 | | #include "runtime/descriptors.h" |
99 | | #include "runtime/exec_env.h" |
100 | | #include "runtime/fold_constant_executor.h" |
101 | | #include "runtime/fragment_mgr.h" |
102 | | #include "runtime/load_channel_mgr.h" |
103 | | #include "runtime/load_stream_mgr.h" |
104 | | #include "runtime/result_buffer_mgr.h" |
105 | | #include "runtime/routine_load/routine_load_task_executor.h" |
106 | | #include "runtime/stream_load/new_load_stream_mgr.h" |
107 | | #include "runtime/stream_load/stream_load_context.h" |
108 | | #include "runtime/thread_context.h" |
109 | | #include "runtime/types.h" |
110 | | #include "service/backend_options.h" |
111 | | #include "service/point_query_executor.h" |
112 | | #include "util/arrow/row_batch.h" |
113 | | #include "util/async_io.h" |
114 | | #include "util/brpc_client_cache.h" |
115 | | #include "util/doris_metrics.h" |
116 | | #include "util/md5.h" |
117 | | #include "util/metrics.h" |
118 | | #include "util/network_util.h" |
119 | | #include "util/proto_util.h" |
120 | | #include "util/ref_count_closure.h" |
121 | | #include "util/runtime_profile.h" |
122 | | #include "util/stopwatch.hpp" |
123 | | #include "util/string_util.h" |
124 | | #include "util/thrift_util.h" |
125 | | #include "util/time.h" |
126 | | #include "util/uid_util.h" |
127 | | #include "vec/columns/column.h" |
128 | | #include "vec/columns/column_string.h" |
129 | | #include "vec/common/schema_util.h" |
130 | | #include "vec/core/block.h" |
131 | | #include "vec/core/column_with_type_and_name.h" |
132 | | #include "vec/data_types/data_type.h" |
133 | | #include "vec/exec/format/avro//avro_jni_reader.h" |
134 | | #include "vec/exec/format/csv/csv_reader.h" |
135 | | #include "vec/exec/format/generic_reader.h" |
136 | | #include "vec/exec/format/json/new_json_reader.h" |
137 | | #include "vec/exec/format/orc/vorc_reader.h" |
138 | | #include "vec/exec/format/parquet/vparquet_reader.h" |
139 | | #include "vec/exec/format/text/text_reader.h" |
140 | | #include "vec/jsonb/serialize.h" |
141 | | #include "vec/runtime/vdata_stream_mgr.h" |
142 | | |
143 | | namespace google { |
144 | | namespace protobuf { |
145 | | class RpcController; |
146 | | } // namespace protobuf |
147 | | } // namespace google |
148 | | |
149 | | namespace doris { |
150 | | using namespace ErrorCode; |
151 | | |
152 | | const uint32_t DOWNLOAD_FILE_MAX_RETRY = 3; |
153 | | |
154 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_pool_queue_size, MetricUnit::NOUNIT); |
155 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_pool_queue_size, MetricUnit::NOUNIT); |
156 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_active_threads, MetricUnit::NOUNIT); |
157 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_active_threads, MetricUnit::NOUNIT); |
158 | | |
159 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_pool_max_queue_size, MetricUnit::NOUNIT); |
160 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_pool_max_queue_size, MetricUnit::NOUNIT); |
161 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(heavy_work_max_threads, MetricUnit::NOUNIT); |
162 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(light_work_max_threads, MetricUnit::NOUNIT); |
163 | | |
164 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_pool_queue_size, MetricUnit::NOUNIT); |
165 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_active_threads, MetricUnit::NOUNIT); |
166 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_pool_max_queue_size, MetricUnit::NOUNIT); |
167 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(arrow_flight_work_max_threads, MetricUnit::NOUNIT); |
168 | | |
169 | | static bvar::LatencyRecorder g_process_remote_fetch_rowsets_latency("process_remote_fetch_rowsets"); |
170 | | |
171 | | bthread_key_t btls_key; |
172 | | |
173 | 0 | static void thread_context_deleter(void* d) { |
174 | 0 | delete static_cast<ThreadContext*>(d); |
175 | 0 | } |
176 | | |
177 | | template <typename T> |
178 | | class NewHttpClosure : public ::google::protobuf::Closure { |
179 | | public: |
180 | | NewHttpClosure(google::protobuf::Closure* done) : _done(done) {} |
181 | 0 | NewHttpClosure(T* request, google::protobuf::Closure* done) : _request(request), _done(done) {} Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_28PTabletWriterAddBlockRequestEEC2EPS1_PN6google8protobuf7ClosureE Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_19PTransmitDataParamsEEC2EPS1_PN6google8protobuf7ClosureE |
182 | | |
183 | 0 | void Run() override { |
184 | 0 | if (_request != nullptr) { Branch (184:13): [True: 0, False: 0]
Branch (184:13): [True: 0, False: 0]
|
185 | 0 | delete _request; |
186 | 0 | _request = nullptr; |
187 | 0 | } |
188 | 0 | if (_done != nullptr) { Branch (188:13): [True: 0, False: 0]
Branch (188:13): [True: 0, False: 0]
|
189 | 0 | _done->Run(); |
190 | 0 | } |
191 | 0 | delete this; |
192 | 0 | } Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_28PTabletWriterAddBlockRequestEE3RunEv Unexecuted instantiation: _ZN5doris14NewHttpClosureINS_19PTransmitDataParamsEE3RunEv |
193 | | |
194 | | private: |
195 | | T* _request = nullptr; |
196 | | google::protobuf::Closure* _done = nullptr; |
197 | | }; |
198 | | |
199 | | PInternalService::PInternalService(ExecEnv* exec_env) |
200 | | : _exec_env(exec_env), |
201 | | // heavy threadpool is used for load process and other process that will read disk or access network. |
202 | | _heavy_work_pool(config::brpc_heavy_work_pool_threads != -1 |
203 | | ? config::brpc_heavy_work_pool_threads |
204 | | : std::max(128, CpuInfo::num_cores() * 4), |
205 | | config::brpc_heavy_work_pool_max_queue_size != -1 |
206 | | ? config::brpc_heavy_work_pool_max_queue_size |
207 | | : std::max(10240, CpuInfo::num_cores() * 320), |
208 | | "brpc_heavy"), |
209 | | |
210 | | // light threadpool should be only used in query processing logic. All hanlers should be very light, not locked, not access disk. |
211 | | _light_work_pool(config::brpc_light_work_pool_threads != -1 |
212 | | ? config::brpc_light_work_pool_threads |
213 | | : std::max(128, CpuInfo::num_cores() * 4), |
214 | | config::brpc_light_work_pool_max_queue_size != -1 |
215 | | ? config::brpc_light_work_pool_max_queue_size |
216 | | : std::max(10240, CpuInfo::num_cores() * 320), |
217 | | "brpc_light"), |
218 | | _arrow_flight_work_pool(config::brpc_arrow_flight_work_pool_threads != -1 |
219 | | ? config::brpc_arrow_flight_work_pool_threads |
220 | | : std::max(512, CpuInfo::num_cores() * 2), |
221 | | config::brpc_arrow_flight_work_pool_max_queue_size != -1 |
222 | | ? config::brpc_arrow_flight_work_pool_max_queue_size |
223 | | : std::max(20480, CpuInfo::num_cores() * 640), |
224 | 0 | "brpc_arrow_flight") { |
225 | 0 | REGISTER_HOOK_METRIC(heavy_work_pool_queue_size, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
226 | 0 | [this]() { return _heavy_work_pool.get_queue_size(); }); |
227 | 0 | REGISTER_HOOK_METRIC(light_work_pool_queue_size, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
228 | 0 | [this]() { return _light_work_pool.get_queue_size(); }); |
229 | 0 | REGISTER_HOOK_METRIC(heavy_work_active_threads, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
230 | 0 | [this]() { return _heavy_work_pool.get_active_threads(); }); |
231 | 0 | REGISTER_HOOK_METRIC(light_work_active_threads, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
232 | 0 | [this]() { return _light_work_pool.get_active_threads(); }); |
233 | |
|
234 | 0 | REGISTER_HOOK_METRIC(heavy_work_pool_max_queue_size, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
235 | 0 | []() { return config::brpc_heavy_work_pool_max_queue_size; }); |
236 | 0 | REGISTER_HOOK_METRIC(light_work_pool_max_queue_size, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
237 | 0 | []() { return config::brpc_light_work_pool_max_queue_size; }); |
238 | 0 | REGISTER_HOOK_METRIC(heavy_work_max_threads, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
239 | 0 | []() { return config::brpc_heavy_work_pool_threads; }); |
240 | 0 | REGISTER_HOOK_METRIC(light_work_max_threads, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
241 | 0 | []() { return config::brpc_light_work_pool_threads; }); |
242 | |
|
243 | 0 | REGISTER_HOOK_METRIC(arrow_flight_work_pool_queue_size, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
244 | 0 | [this]() { return _arrow_flight_work_pool.get_queue_size(); }); |
245 | 0 | REGISTER_HOOK_METRIC(arrow_flight_work_active_threads, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
246 | 0 | [this]() { return _arrow_flight_work_pool.get_active_threads(); }); |
247 | 0 | REGISTER_HOOK_METRIC(arrow_flight_work_pool_max_queue_size, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
248 | 0 | []() { return config::brpc_arrow_flight_work_pool_max_queue_size; }); |
249 | 0 | REGISTER_HOOK_METRIC(arrow_flight_work_max_threads, Line | Count | Source | 38 | 0 | REGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), \ Line | Count | Source | 34 | 0 | owner->metric = (UIntGauge*)(entity->register_metric<UIntGauge>(&METRIC_##metric)); \ | 35 | 0 | entity->register_hook(#metric, [&]() { owner->metric->set_value(func()); }); |
| 39 | 0 | DorisMetrics::instance(), metric, func) |
|
250 | 0 | []() { return config::brpc_arrow_flight_work_pool_threads; }); |
251 | |
|
252 | 0 | _exec_env->load_stream_mgr()->set_heavy_work_pool(&_heavy_work_pool); |
253 | |
|
254 | 0 | CHECK_EQ(0, bthread_key_create(&btls_key, thread_context_deleter)); |
255 | 0 | CHECK_EQ(0, bthread_key_create(&AsyncIO::btls_io_ctx_key, AsyncIO::io_ctx_key_deleter)); |
256 | 0 | } |
257 | | |
258 | | PInternalServiceImpl::PInternalServiceImpl(StorageEngine& engine, ExecEnv* exec_env) |
259 | 0 | : PInternalService(exec_env), _engine(engine) {} |
260 | | |
261 | 0 | PInternalServiceImpl::~PInternalServiceImpl() = default; |
262 | | |
263 | 0 | PInternalService::~PInternalService() { |
264 | 0 | DEREGISTER_HOOK_METRIC(heavy_work_pool_queue_size); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
265 | 0 | DEREGISTER_HOOK_METRIC(light_work_pool_queue_size); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
266 | 0 | DEREGISTER_HOOK_METRIC(heavy_work_active_threads); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
267 | 0 | DEREGISTER_HOOK_METRIC(light_work_active_threads); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
268 | |
|
269 | 0 | DEREGISTER_HOOK_METRIC(heavy_work_pool_max_queue_size); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
270 | 0 | DEREGISTER_HOOK_METRIC(light_work_pool_max_queue_size); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
271 | 0 | DEREGISTER_HOOK_METRIC(heavy_work_max_threads); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
272 | 0 | DEREGISTER_HOOK_METRIC(light_work_max_threads); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
273 | |
|
274 | 0 | DEREGISTER_HOOK_METRIC(arrow_flight_work_pool_queue_size); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
275 | 0 | DEREGISTER_HOOK_METRIC(arrow_flight_work_active_threads); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
276 | 0 | DEREGISTER_HOOK_METRIC(arrow_flight_work_pool_max_queue_size); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
277 | 0 | DEREGISTER_HOOK_METRIC(arrow_flight_work_max_threads); Line | Count | Source | 46 | 0 | DEREGISTER_ENTITY_HOOK_METRIC(DorisMetrics::instance()->server_entity(), name) Line | Count | Source | 42 | 0 | entity->deregister_metric(&METRIC_##name); \ | 43 | 0 | entity->deregister_hook(#name); |
|
|
278 | |
|
279 | 0 | CHECK_EQ(0, bthread_key_delete(btls_key)); |
280 | 0 | CHECK_EQ(0, bthread_key_delete(AsyncIO::btls_io_ctx_key)); |
281 | 0 | } |
282 | | |
283 | | void PInternalService::transmit_data(google::protobuf::RpcController* controller, |
284 | | const PTransmitDataParams* request, |
285 | | PTransmitDataResult* response, |
286 | 0 | google::protobuf::Closure* done) {} |
287 | | |
288 | | void PInternalService::transmit_data_by_http(google::protobuf::RpcController* controller, |
289 | | const PEmptyRequest* request, |
290 | | PTransmitDataResult* response, |
291 | 0 | google::protobuf::Closure* done) {} |
292 | | |
293 | | void PInternalService::_transmit_data(google::protobuf::RpcController* controller, |
294 | | const PTransmitDataParams* request, |
295 | | PTransmitDataResult* response, |
296 | 0 | google::protobuf::Closure* done, const Status& extract_st) {} |
297 | | |
298 | | void PInternalService::tablet_writer_open(google::protobuf::RpcController* controller, |
299 | | const PTabletWriterOpenRequest* request, |
300 | | PTabletWriterOpenResult* response, |
301 | 0 | google::protobuf::Closure* done) { |
302 | 0 | bool ret = _heavy_work_pool.try_offer([this, request, response, done]() { |
303 | 0 | VLOG_RPC << "tablet writer open, id=" << request->id() Line | Count | Source | 35 | 0 | #define VLOG_RPC VLOG(8) |
|
304 | 0 | << ", index_id=" << request->index_id() << ", txn_id=" << request->txn_id(); |
305 | 0 | signal::set_signal_task_id(request->id()); |
306 | 0 | brpc::ClosureGuard closure_guard(done); |
307 | 0 | auto st = _exec_env->load_channel_mgr()->open(*request); |
308 | 0 | if (!st.ok()) { Branch (308:13): [True: 0, False: 0]
|
309 | 0 | LOG(WARNING) << "load channel open failed, message=" << st << ", id=" << request->id() |
310 | 0 | << ", index_id=" << request->index_id() |
311 | 0 | << ", txn_id=" << request->txn_id(); |
312 | 0 | } |
313 | 0 | st.to_protobuf(response->mutable_status()); |
314 | 0 | }); |
315 | 0 | if (!ret) { Branch (315:9): [True: 0, False: 0]
|
316 | 0 | offer_failed(response, done, _heavy_work_pool); |
317 | 0 | return; |
318 | 0 | } |
319 | 0 | } |
320 | | |
321 | | void PInternalService::exec_plan_fragment(google::protobuf::RpcController* controller, |
322 | | const PExecPlanFragmentRequest* request, |
323 | | PExecPlanFragmentResult* response, |
324 | 0 | google::protobuf::Closure* done) { |
325 | 0 | timeval tv {}; |
326 | 0 | gettimeofday(&tv, nullptr); |
327 | 0 | response->set_received_time(tv.tv_sec * 1000LL + tv.tv_usec / 1000); |
328 | 0 | bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() { |
329 | 0 | _exec_plan_fragment_in_pthread(controller, request, response, done); |
330 | 0 | }); |
331 | 0 | if (!ret) { Branch (331:9): [True: 0, False: 0]
|
332 | 0 | offer_failed(response, done, _light_work_pool); |
333 | 0 | return; |
334 | 0 | } |
335 | 0 | } |
336 | | |
337 | | void PInternalService::_exec_plan_fragment_in_pthread(google::protobuf::RpcController* controller, |
338 | | const PExecPlanFragmentRequest* request, |
339 | | PExecPlanFragmentResult* response, |
340 | 0 | google::protobuf::Closure* done) { |
341 | 0 | timeval tv1 {}; |
342 | 0 | gettimeofday(&tv1, nullptr); |
343 | 0 | response->set_execution_time(tv1.tv_sec * 1000LL + tv1.tv_usec / 1000); |
344 | 0 | brpc::ClosureGuard closure_guard(done); |
345 | 0 | auto st = Status::OK(); |
346 | 0 | bool compact = request->has_compact() ? request->compact() : false; Branch (346:20): [True: 0, False: 0]
|
347 | 0 | PFragmentRequestVersion version = |
348 | 0 | request->has_version() ? request->version() : PFragmentRequestVersion::VERSION_1; Branch (348:13): [True: 0, False: 0]
|
349 | 0 | try { |
350 | 0 | st = _exec_plan_fragment_impl(request->request(), version, compact); |
351 | 0 | } catch (const Exception& e) { |
352 | 0 | st = e.to_status(); |
353 | 0 | } catch (...) { |
354 | 0 | st = Status::Error(ErrorCode::INTERNAL_ERROR, |
355 | 0 | "_exec_plan_fragment_impl meet unknown error"); |
356 | 0 | } |
357 | 0 | if (!st.ok()) { Branch (357:9): [True: 0, False: 0]
|
358 | 0 | LOG(WARNING) << "exec plan fragment failed, errmsg=" << st; |
359 | 0 | } |
360 | 0 | st.to_protobuf(response->mutable_status()); |
361 | 0 | timeval tv2 {}; |
362 | 0 | gettimeofday(&tv2, nullptr); |
363 | 0 | response->set_execution_done_time(tv2.tv_sec * 1000LL + tv2.tv_usec / 1000); |
364 | 0 | } |
365 | | |
366 | | void PInternalService::exec_plan_fragment_prepare(google::protobuf::RpcController* controller, |
367 | | const PExecPlanFragmentRequest* request, |
368 | | PExecPlanFragmentResult* response, |
369 | 0 | google::protobuf::Closure* done) { |
370 | 0 | timeval tv {}; |
371 | 0 | gettimeofday(&tv, nullptr); |
372 | 0 | response->set_received_time(tv.tv_sec * 1000LL + tv.tv_usec / 1000); |
373 | 0 | bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() { |
374 | 0 | _exec_plan_fragment_in_pthread(controller, request, response, done); |
375 | 0 | }); |
376 | 0 | if (!ret) { Branch (376:9): [True: 0, False: 0]
|
377 | 0 | offer_failed(response, done, _light_work_pool); |
378 | 0 | return; |
379 | 0 | } |
380 | 0 | } |
381 | | |
382 | | void PInternalService::exec_plan_fragment_start(google::protobuf::RpcController* /*controller*/, |
383 | | const PExecPlanFragmentStartRequest* request, |
384 | | PExecPlanFragmentResult* result, |
385 | 0 | google::protobuf::Closure* done) { |
386 | 0 | timeval tv {}; |
387 | 0 | gettimeofday(&tv, nullptr); |
388 | 0 | result->set_received_time(tv.tv_sec * 1000LL + tv.tv_usec / 1000); |
389 | 0 | bool ret = _light_work_pool.try_offer([this, request, result, done]() { |
390 | 0 | timeval tv1 {}; |
391 | 0 | gettimeofday(&tv1, nullptr); |
392 | 0 | result->set_execution_time(tv1.tv_sec * 1000LL + tv1.tv_usec / 1000); |
393 | 0 | brpc::ClosureGuard closure_guard(done); |
394 | 0 | auto st = _exec_env->fragment_mgr()->start_query_execution(request); |
395 | 0 | st.to_protobuf(result->mutable_status()); |
396 | 0 | timeval tv2 {}; |
397 | 0 | gettimeofday(&tv2, nullptr); |
398 | 0 | result->set_execution_done_time(tv2.tv_sec * 1000LL + tv2.tv_usec / 1000); |
399 | 0 | }); |
400 | 0 | if (!ret) { Branch (400:9): [True: 0, False: 0]
|
401 | 0 | offer_failed(result, done, _light_work_pool); |
402 | 0 | return; |
403 | 0 | } |
404 | 0 | } |
405 | | |
406 | | void PInternalService::open_load_stream(google::protobuf::RpcController* controller, |
407 | | const POpenLoadStreamRequest* request, |
408 | | POpenLoadStreamResponse* response, |
409 | 0 | google::protobuf::Closure* done) { |
410 | 0 | bool ret = _heavy_work_pool.try_offer([this, controller, request, response, done]() { |
411 | 0 | signal::set_signal_task_id(request->load_id()); |
412 | 0 | brpc::ClosureGuard done_guard(done); |
413 | 0 | brpc::Controller* cntl = static_cast<brpc::Controller*>(controller); |
414 | 0 | brpc::StreamOptions stream_options; |
415 | |
|
416 | 0 | LOG(INFO) << "open load stream, load_id=" << request->load_id() |
417 | 0 | << ", src_id=" << request->src_id(); |
418 | |
|
419 | 0 | for (const auto& req : request->tablets()) { Branch (419:30): [True: 0, False: 0]
|
420 | 0 | BaseTabletSPtr tablet; |
421 | 0 | if (auto res = ExecEnv::get_tablet(req.tablet_id()); !res.has_value()) [[unlikely]] { Branch (421:66): [True: 0, False: 0]
|
422 | 0 | auto st = std::move(res).error(); |
423 | 0 | st.to_protobuf(response->mutable_status()); |
424 | 0 | cntl->SetFailed(st.to_string()); |
425 | 0 | return; |
426 | 0 | } else { |
427 | 0 | tablet = std::move(res).value(); |
428 | 0 | } |
429 | 0 | auto resp = response->add_tablet_schemas(); |
430 | 0 | resp->set_index_id(req.index_id()); |
431 | 0 | resp->set_enable_unique_key_merge_on_write(tablet->enable_unique_key_merge_on_write()); |
432 | 0 | tablet->tablet_schema()->to_schema_pb(resp->mutable_tablet_schema()); |
433 | 0 | } |
434 | | |
435 | 0 | LoadStream* load_stream = nullptr; |
436 | 0 | auto st = _exec_env->load_stream_mgr()->open_load_stream(request, load_stream); |
437 | 0 | if (!st.ok()) { Branch (437:13): [True: 0, False: 0]
|
438 | 0 | st.to_protobuf(response->mutable_status()); |
439 | 0 | return; |
440 | 0 | } |
441 | | |
442 | 0 | stream_options.handler = load_stream; |
443 | 0 | stream_options.idle_timeout_ms = request->idle_timeout_ms(); |
444 | 0 | DBUG_EXECUTE_IF("PInternalServiceImpl.open_load_stream.set_idle_timeout", Line | Count | Source | 37 | 0 | if (UNLIKELY(config::enable_debug_points)) { \ | 38 | 0 | auto dp = DebugPoints::instance()->get_debug_point(debug_point_name); \ | 39 | 0 | if (dp) { \ Branch (39:13): [True: 0, False: 0]
| 40 | 0 | [[maybe_unused]] auto DP_NAME = debug_point_name; \ | 41 | 0 | { code; } \ | 42 | 0 | } \ | 43 | 0 | } |
|
445 | 0 | { stream_options.idle_timeout_ms = 1; }); |
446 | |
|
447 | 0 | StreamId streamid; |
448 | 0 | if (brpc::StreamAccept(&streamid, *cntl, &stream_options) != 0) { Branch (448:13): [True: 0, False: 0]
|
449 | 0 | st = Status::Cancelled("Fail to accept stream {}", streamid); |
450 | 0 | st.to_protobuf(response->mutable_status()); |
451 | 0 | cntl->SetFailed(st.to_string()); |
452 | 0 | return; |
453 | 0 | } |
454 | | |
455 | 0 | VLOG_DEBUG << "get streamid =" << streamid; Line | Count | Source | 41 | 0 | #define VLOG_DEBUG VLOG(7) |
|
456 | 0 | st.to_protobuf(response->mutable_status()); |
457 | 0 | }); |
458 | 0 | if (!ret) { Branch (458:9): [True: 0, False: 0]
|
459 | 0 | offer_failed(response, done, _heavy_work_pool); |
460 | 0 | } |
461 | 0 | } |
462 | | |
463 | | void PInternalService::tablet_writer_add_block_by_http(google::protobuf::RpcController* controller, |
464 | | const ::doris::PEmptyRequest* request, |
465 | | PTabletWriterAddBlockResult* response, |
466 | 0 | google::protobuf::Closure* done) { |
467 | 0 | PTabletWriterAddBlockRequest* new_request = new PTabletWriterAddBlockRequest(); |
468 | 0 | google::protobuf::Closure* new_done = |
469 | 0 | new NewHttpClosure<PTabletWriterAddBlockRequest>(new_request, done); |
470 | 0 | brpc::Controller* cntl = static_cast<brpc::Controller*>(controller); |
471 | 0 | Status st = attachment_extract_request_contain_block<PTabletWriterAddBlockRequest>(new_request, |
472 | 0 | cntl); |
473 | 0 | if (st.ok()) { Branch (473:9): [True: 0, False: 0]
|
474 | 0 | tablet_writer_add_block(controller, new_request, response, new_done); |
475 | 0 | } else { |
476 | 0 | st.to_protobuf(response->mutable_status()); |
477 | 0 | } |
478 | 0 | } |
479 | | |
480 | | void PInternalService::tablet_writer_add_block(google::protobuf::RpcController* controller, |
481 | | const PTabletWriterAddBlockRequest* request, |
482 | | PTabletWriterAddBlockResult* response, |
483 | 0 | google::protobuf::Closure* done) { |
484 | 0 | int64_t submit_task_time_ns = MonotonicNanos(); |
485 | 0 | bool ret = _heavy_work_pool.try_offer([request, response, done, submit_task_time_ns, this]() { |
486 | 0 | int64_t wait_execution_time_ns = MonotonicNanos() - submit_task_time_ns; |
487 | 0 | brpc::ClosureGuard closure_guard(done); |
488 | 0 | int64_t execution_time_ns = 0; |
489 | 0 | { |
490 | 0 | SCOPED_RAW_TIMER(&execution_time_ns); Line | Count | Source | 77 | 0 | doris::ScopedRawTimer<doris::MonotonicStopWatch, int64_t> MACRO_CONCAT(SCOPED_RAW_TIMER, \ Line | Count | Source | 52 | 0 | #define MACRO_CONCAT(x, y) CONCAT_IMPL(x, y) Line | Count | Source | 51 | 0 | #define CONCAT_IMPL(x, y) x##y |
|
| 78 | 0 | __COUNTER__)(c) |
|
491 | 0 | signal::set_signal_task_id(request->id()); |
492 | 0 | auto st = _exec_env->load_channel_mgr()->add_batch(*request, response); |
493 | 0 | if (!st.ok()) { Branch (493:17): [True: 0, False: 0]
|
494 | 0 | LOG(WARNING) << "tablet writer add block failed, message=" << st |
495 | 0 | << ", id=" << request->id() << ", index_id=" << request->index_id() |
496 | 0 | << ", sender_id=" << request->sender_id() |
497 | 0 | << ", backend id=" << request->backend_id(); |
498 | 0 | } |
499 | 0 | st.to_protobuf(response->mutable_status()); |
500 | 0 | } |
501 | 0 | response->set_execution_time_us(execution_time_ns / NANOS_PER_MICRO); |
502 | 0 | response->set_wait_execution_time_us(wait_execution_time_ns / NANOS_PER_MICRO); |
503 | 0 | }); |
504 | 0 | if (!ret) { Branch (504:9): [True: 0, False: 0]
|
505 | 0 | offer_failed(response, done, _heavy_work_pool); |
506 | 0 | return; |
507 | 0 | } |
508 | 0 | } |
509 | | |
510 | | void PInternalService::tablet_writer_cancel(google::protobuf::RpcController* controller, |
511 | | const PTabletWriterCancelRequest* request, |
512 | | PTabletWriterCancelResult* response, |
513 | 0 | google::protobuf::Closure* done) { |
514 | 0 | bool ret = _heavy_work_pool.try_offer([this, request, done]() { |
515 | 0 | VLOG_RPC << "tablet writer cancel, id=" << request->id() Line | Count | Source | 35 | 0 | #define VLOG_RPC VLOG(8) |
|
516 | 0 | << ", index_id=" << request->index_id() << ", sender_id=" << request->sender_id(); |
517 | 0 | signal::set_signal_task_id(request->id()); |
518 | 0 | brpc::ClosureGuard closure_guard(done); |
519 | 0 | auto st = _exec_env->load_channel_mgr()->cancel(*request); |
520 | 0 | if (!st.ok()) { Branch (520:13): [True: 0, False: 0]
|
521 | 0 | LOG(WARNING) << "tablet writer cancel failed, id=" << request->id() |
522 | 0 | << ", index_id=" << request->index_id() |
523 | 0 | << ", sender_id=" << request->sender_id(); |
524 | 0 | } |
525 | 0 | }); |
526 | 0 | if (!ret) { Branch (526:9): [True: 0, False: 0]
|
527 | 0 | offer_failed(response, done, _heavy_work_pool); |
528 | 0 | return; |
529 | 0 | } |
530 | 0 | } |
531 | | |
532 | | Status PInternalService::_exec_plan_fragment_impl( |
533 | | const std::string& ser_request, PFragmentRequestVersion version, bool compact, |
534 | 0 | const std::function<void(RuntimeState*, Status*)>& cb) { |
535 | | // Sometimes the BE do not receive the first heartbeat message and it receives request from FE |
536 | | // If BE execute this fragment, it will core when it wants to get some property from master info. |
537 | 0 | if (ExecEnv::GetInstance()->cluster_info() == nullptr) { Branch (537:9): [True: 0, False: 0]
|
538 | 0 | return Status::InternalError( |
539 | 0 | "Have not receive the first heartbeat message from master, not ready to provide " |
540 | 0 | "service"); |
541 | 0 | } |
542 | 0 | if (version == PFragmentRequestVersion::VERSION_1) { Branch (542:9): [True: 0, False: 0]
|
543 | | // VERSION_1 should be removed in v1.2 |
544 | 0 | TExecPlanFragmentParams t_request; |
545 | 0 | { |
546 | 0 | const uint8_t* buf = (const uint8_t*)ser_request.data(); |
547 | 0 | uint32_t len = ser_request.size(); |
548 | 0 | RETURN_IF_ERROR(deserialize_thrift_msg(buf, &len, compact, &t_request)); |
549 | 0 | } |
550 | 0 | if (cb) { Branch (550:13): [True: 0, False: 0]
|
551 | 0 | return _exec_env->fragment_mgr()->exec_plan_fragment( |
552 | 0 | t_request, QuerySource::INTERNAL_FRONTEND, cb); |
553 | 0 | } else { |
554 | 0 | return _exec_env->fragment_mgr()->exec_plan_fragment(t_request, |
555 | 0 | QuerySource::INTERNAL_FRONTEND); |
556 | 0 | } |
557 | 0 | } else if (version == PFragmentRequestVersion::VERSION_2) { Branch (557:16): [True: 0, False: 0]
|
558 | 0 | TExecPlanFragmentParamsList t_request; |
559 | 0 | { |
560 | 0 | const uint8_t* buf = (const uint8_t*)ser_request.data(); |
561 | 0 | uint32_t len = ser_request.size(); |
562 | 0 | RETURN_IF_ERROR(deserialize_thrift_msg(buf, &len, compact, &t_request)); |
563 | 0 | } |
564 | 0 | const auto& fragment_list = t_request.paramsList; |
565 | 0 | MonotonicStopWatch timer; |
566 | 0 | timer.start(); |
567 | |
|
568 | 0 | for (const TExecPlanFragmentParams& params : t_request.paramsList) { Branch (568:52): [True: 0, False: 0]
|
569 | 0 | if (cb) { Branch (569:17): [True: 0, False: 0]
|
570 | 0 | RETURN_IF_ERROR(_exec_env->fragment_mgr()->exec_plan_fragment( |
571 | 0 | params, QuerySource::INTERNAL_FRONTEND, cb)); |
572 | 0 | } else { |
573 | 0 | RETURN_IF_ERROR(_exec_env->fragment_mgr()->exec_plan_fragment( |
574 | 0 | params, QuerySource::INTERNAL_FRONTEND)); |
575 | 0 | } |
576 | 0 | } |
577 | | |
578 | 0 | timer.stop(); |
579 | 0 | double cost_secs = static_cast<double>(timer.elapsed_time()) / 1000000000ULL; |
580 | 0 | if (cost_secs > 5) { Branch (580:13): [True: 0, False: 0]
|
581 | 0 | LOG_WARNING("Prepare {} fragments of query {} costs {} seconds, it costs too much", Line | Count | Source | 119 | 0 | #define LOG_WARNING TaggableLogger(__FILE__, __LINE__, google::GLOG_WARNING) |
|
582 | 0 | fragment_list.size(), print_id(fragment_list.front().params.query_id), |
583 | 0 | cost_secs); |
584 | 0 | } |
585 | |
|
586 | 0 | return Status::OK(); |
587 | 0 | } else if (version == PFragmentRequestVersion::VERSION_3) { Branch (587:16): [True: 0, False: 0]
|
588 | 0 | TPipelineFragmentParamsList t_request; |
589 | 0 | { |
590 | 0 | const uint8_t* buf = (const uint8_t*)ser_request.data(); |
591 | 0 | uint32_t len = ser_request.size(); |
592 | 0 | RETURN_IF_ERROR(deserialize_thrift_msg(buf, &len, compact, &t_request)); |
593 | 0 | } |
594 | | |
595 | 0 | const auto& fragment_list = t_request.params_list; |
596 | 0 | if (fragment_list.empty()) { Branch (596:13): [True: 0, False: 0]
|
597 | 0 | return Status::InternalError("Invalid TPipelineFragmentParamsList!"); |
598 | 0 | } |
599 | 0 | MonotonicStopWatch timer; |
600 | 0 | timer.start(); |
601 | 0 | for (const TPipelineFragmentParams& fragment : fragment_list) { Branch (601:54): [True: 0, False: 0]
|
602 | 0 | if (cb) { Branch (602:17): [True: 0, False: 0]
|
603 | 0 | RETURN_IF_ERROR(_exec_env->fragment_mgr()->exec_plan_fragment( |
604 | 0 | fragment, QuerySource::INTERNAL_FRONTEND, cb)); |
605 | 0 | } else { |
606 | 0 | RETURN_IF_ERROR(_exec_env->fragment_mgr()->exec_plan_fragment( |
607 | 0 | fragment, QuerySource::INTERNAL_FRONTEND)); |
608 | 0 | } |
609 | 0 | } |
610 | 0 | timer.stop(); |
611 | 0 | double cost_secs = static_cast<double>(timer.elapsed_time()) / 1000000000ULL; |
612 | 0 | if (cost_secs > 5) { Branch (612:13): [True: 0, False: 0]
|
613 | 0 | LOG_WARNING("Prepare {} fragments of query {} costs {} seconds, it costs too much", Line | Count | Source | 119 | 0 | #define LOG_WARNING TaggableLogger(__FILE__, __LINE__, google::GLOG_WARNING) |
|
614 | 0 | fragment_list.size(), print_id(fragment_list.front().query_id), cost_secs); |
615 | 0 | } |
616 | |
|
617 | 0 | return Status::OK(); |
618 | 0 | } else { |
619 | 0 | return Status::InternalError("invalid version"); |
620 | 0 | } |
621 | 0 | } |
622 | | |
623 | | void PInternalService::cancel_plan_fragment(google::protobuf::RpcController* /*controller*/, |
624 | | const PCancelPlanFragmentRequest* request, |
625 | | PCancelPlanFragmentResult* result, |
626 | 0 | google::protobuf::Closure* done) { |
627 | 0 | bool ret = _light_work_pool.try_offer([this, request, result, done]() { |
628 | 0 | brpc::ClosureGuard closure_guard(done); |
629 | 0 | TUniqueId tid; |
630 | 0 | tid.__set_hi(request->finst_id().hi()); |
631 | 0 | tid.__set_lo(request->finst_id().lo()); |
632 | 0 | signal::set_signal_task_id(tid); |
633 | 0 | Status st = Status::OK(); |
634 | |
|
635 | 0 | const bool has_cancel_reason = request->has_cancel_reason(); |
636 | 0 | const bool has_cancel_status = request->has_cancel_status(); |
637 | | // During upgrade only LIMIT_REACH is used, other reason is changed to internal error |
638 | 0 | Status actual_cancel_status = Status::OK(); |
639 | | // Convert PPlanFragmentCancelReason to Status |
640 | 0 | if (has_cancel_status) { Branch (640:13): [True: 0, False: 0]
|
641 | | // If fe set cancel status, then it is new FE now, should use cancel status. |
642 | 0 | actual_cancel_status = Status::create(request->cancel_status()); |
643 | 0 | } else if (has_cancel_reason) { Branch (643:20): [True: 0, False: 0]
|
644 | | // If fe not set cancel status, but set cancel reason, should convert cancel reason |
645 | | // to cancel status here. |
646 | 0 | if (request->cancel_reason() == PPlanFragmentCancelReason::LIMIT_REACH) { Branch (646:17): [True: 0, False: 0]
|
647 | 0 | actual_cancel_status = Status::Error<ErrorCode::LIMIT_REACH>("limit reach"); |
648 | 0 | } else { |
649 | | // Use cancel reason as error message |
650 | 0 | actual_cancel_status = Status::InternalError( |
651 | 0 | PPlanFragmentCancelReason_Name(request->cancel_reason())); |
652 | 0 | } |
653 | 0 | } else { |
654 | 0 | actual_cancel_status = Status::InternalError("unknown error"); |
655 | 0 | } |
656 | |
|
657 | 0 | TUniqueId query_id; |
658 | 0 | query_id.__set_hi(request->query_id().hi()); |
659 | 0 | query_id.__set_lo(request->query_id().lo()); |
660 | 0 | LOG(INFO) << fmt::format("Cancel query {}, reason: {}", print_id(query_id), |
661 | 0 | actual_cancel_status.to_string()); |
662 | 0 | _exec_env->fragment_mgr()->cancel_query(query_id, actual_cancel_status); |
663 | | |
664 | | // TODO: the logic seems useless, cancel only return Status::OK. remove it |
665 | 0 | st.to_protobuf(result->mutable_status()); |
666 | 0 | }); |
667 | 0 | if (!ret) { Branch (667:9): [True: 0, False: 0]
|
668 | 0 | offer_failed(result, done, _light_work_pool); |
669 | 0 | return; |
670 | 0 | } |
671 | 0 | } |
672 | | |
673 | | void PInternalService::fetch_data(google::protobuf::RpcController* controller, |
674 | | const PFetchDataRequest* request, PFetchDataResult* result, |
675 | 0 | google::protobuf::Closure* done) { |
676 | | // fetch_data is a light operation which will put a request rather than wait inplace when there's no data ready. |
677 | | // when there's data ready, use brpc to send. there's queue in brpc service. won't take it too long. |
678 | 0 | auto* cntl = static_cast<brpc::Controller*>(controller); |
679 | 0 | auto* ctx = new GetResultBatchCtx(cntl, result, done); |
680 | 0 | _exec_env->result_mgr()->fetch_data(request->finst_id(), ctx); |
681 | 0 | } |
682 | | |
683 | | void PInternalService::fetch_arrow_data(google::protobuf::RpcController* controller, |
684 | | const PFetchArrowDataRequest* request, |
685 | | PFetchArrowDataResult* result, |
686 | 0 | google::protobuf::Closure* done) { |
687 | 0 | bool ret = _arrow_flight_work_pool.try_offer([this, controller, request, result, done]() { |
688 | 0 | brpc::ClosureGuard closure_guard(done); |
689 | 0 | auto* cntl = static_cast<brpc::Controller*>(controller); |
690 | 0 | auto* ctx = new GetArrowResultBatchCtx(cntl, result, done); |
691 | 0 | _exec_env->result_mgr()->fetch_arrow_data(request->finst_id(), ctx); |
692 | 0 | }); |
693 | 0 | if (!ret) { Branch (693:9): [True: 0, False: 0]
|
694 | 0 | offer_failed(result, done, _arrow_flight_work_pool); |
695 | 0 | return; |
696 | 0 | } |
697 | 0 | } |
698 | | |
699 | | void PInternalService::outfile_write_success(google::protobuf::RpcController* controller, |
700 | | const POutfileWriteSuccessRequest* request, |
701 | | POutfileWriteSuccessResult* result, |
702 | 0 | google::protobuf::Closure* done) { |
703 | 0 | bool ret = _heavy_work_pool.try_offer([request, result, done]() { |
704 | 0 | VLOG_RPC << "outfile write success file"; Line | Count | Source | 35 | 0 | #define VLOG_RPC VLOG(8) |
|
705 | 0 | brpc::ClosureGuard closure_guard(done); |
706 | 0 | TResultFileSink result_file_sink; |
707 | 0 | Status st = Status::OK(); |
708 | 0 | { |
709 | 0 | const uint8_t* buf = (const uint8_t*)(request->result_file_sink().data()); |
710 | 0 | uint32_t len = request->result_file_sink().size(); |
711 | 0 | st = deserialize_thrift_msg(buf, &len, false, &result_file_sink); |
712 | 0 | if (!st.ok()) { Branch (712:17): [True: 0, False: 0]
|
713 | 0 | LOG(WARNING) << "outfile write success file failed, errmsg = " << st; |
714 | 0 | st.to_protobuf(result->mutable_status()); |
715 | 0 | return; |
716 | 0 | } |
717 | 0 | } |
718 | | |
719 | 0 | TResultFileSinkOptions file_options = result_file_sink.file_options; |
720 | 0 | std::stringstream ss; |
721 | 0 | ss << file_options.file_path << file_options.success_file_name; |
722 | 0 | std::string file_name = ss.str(); |
723 | 0 | if (result_file_sink.storage_backend_type == TStorageBackendType::LOCAL) { Branch (723:13): [True: 0, False: 0]
|
724 | | // For local file writer, the file_path is a local dir. |
725 | | // Here we do a simple security verification by checking whether the file exists. |
726 | | // Because the file path is currently arbitrarily specified by the user, |
727 | | // Doris is not responsible for ensuring the correctness of the path. |
728 | | // This is just to prevent overwriting the existing file. |
729 | 0 | bool exists = true; |
730 | 0 | st = io::global_local_filesystem()->exists(file_name, &exists); |
731 | 0 | if (!st.ok()) { Branch (731:17): [True: 0, False: 0]
|
732 | 0 | LOG(WARNING) << "outfile write success filefailed, errmsg = " << st; |
733 | 0 | st.to_protobuf(result->mutable_status()); |
734 | 0 | return; |
735 | 0 | } |
736 | 0 | if (exists) { Branch (736:17): [True: 0, False: 0]
|
737 | 0 | st = Status::InternalError("File already exists: {}", file_name); |
738 | 0 | } |
739 | 0 | if (!st.ok()) { Branch (739:17): [True: 0, False: 0]
|
740 | 0 | LOG(WARNING) << "outfile write success file failed, errmsg = " << st; |
741 | 0 | st.to_protobuf(result->mutable_status()); |
742 | 0 | return; |
743 | 0 | } |
744 | 0 | } |
745 | | |
746 | 0 | auto&& res = FileFactory::create_file_writer( |
747 | 0 | FileFactory::convert_storage_type(result_file_sink.storage_backend_type), |
748 | 0 | ExecEnv::GetInstance(), file_options.broker_addresses, |
749 | 0 | file_options.broker_properties, file_name, |
750 | 0 | { |
751 | 0 | .write_file_cache = false, |
752 | 0 | .sync_file_data = false, |
753 | 0 | }); |
754 | 0 | using T = std::decay_t<decltype(res)>; |
755 | 0 | if (!res.has_value()) [[unlikely]] { Branch (755:13): [True: 0, False: 0]
|
756 | 0 | st = std::forward<T>(res).error(); |
757 | 0 | st.to_protobuf(result->mutable_status()); |
758 | 0 | return; |
759 | 0 | } |
760 | | |
761 | 0 | std::unique_ptr<doris::io::FileWriter> _file_writer_impl = std::forward<T>(res).value(); |
762 | | // must write somthing because s3 file writer can not writer empty file |
763 | 0 | st = _file_writer_impl->append({"success"}); |
764 | 0 | if (!st.ok()) { Branch (764:13): [True: 0, False: 0]
|
765 | 0 | LOG(WARNING) << "outfile write success filefailed, errmsg=" << st; |
766 | 0 | st.to_protobuf(result->mutable_status()); |
767 | 0 | return; |
768 | 0 | } |
769 | 0 | st = _file_writer_impl->close(); |
770 | 0 | if (!st.ok()) { Branch (770:13): [True: 0, False: 0]
|
771 | 0 | LOG(WARNING) << "outfile write success filefailed, errmsg=" << st; |
772 | 0 | st.to_protobuf(result->mutable_status()); |
773 | 0 | return; |
774 | 0 | } |
775 | 0 | }); |
776 | 0 | if (!ret) { Branch (776:9): [True: 0, False: 0]
|
777 | 0 | offer_failed(result, done, _heavy_work_pool); |
778 | 0 | return; |
779 | 0 | } |
780 | 0 | } |
781 | | |
782 | | void PInternalService::fetch_table_schema(google::protobuf::RpcController* controller, |
783 | | const PFetchTableSchemaRequest* request, |
784 | | PFetchTableSchemaResult* result, |
785 | 0 | google::protobuf::Closure* done) { |
786 | 0 | bool ret = _heavy_work_pool.try_offer([request, result, done]() { |
787 | 0 | VLOG_RPC << "fetch table schema"; Line | Count | Source | 35 | 0 | #define VLOG_RPC VLOG(8) |
|
788 | 0 | brpc::ClosureGuard closure_guard(done); |
789 | 0 | TFileScanRange file_scan_range; |
790 | 0 | Status st = Status::OK(); |
791 | 0 | { |
792 | 0 | const uint8_t* buf = (const uint8_t*)(request->file_scan_range().data()); |
793 | 0 | uint32_t len = request->file_scan_range().size(); |
794 | 0 | st = deserialize_thrift_msg(buf, &len, false, &file_scan_range); |
795 | 0 | if (!st.ok()) { Branch (795:17): [True: 0, False: 0]
|
796 | 0 | LOG(WARNING) << "fetch table schema failed, errmsg=" << st; |
797 | 0 | st.to_protobuf(result->mutable_status()); |
798 | 0 | return; |
799 | 0 | } |
800 | 0 | } |
801 | 0 | if (file_scan_range.__isset.ranges == false) { Branch (801:13): [True: 0, False: 0]
|
802 | 0 | st = Status::InternalError("can not get TFileRangeDesc."); |
803 | 0 | st.to_protobuf(result->mutable_status()); |
804 | 0 | return; |
805 | 0 | } |
806 | 0 | if (file_scan_range.__isset.params == false) { Branch (806:13): [True: 0, False: 0]
|
807 | 0 | st = Status::InternalError("can not get TFileScanRangeParams."); |
808 | 0 | st.to_protobuf(result->mutable_status()); |
809 | 0 | return; |
810 | 0 | } |
811 | 0 | const TFileRangeDesc& range = file_scan_range.ranges.at(0); |
812 | 0 | const TFileScanRangeParams& params = file_scan_range.params; |
813 | |
|
814 | 0 | std::shared_ptr<MemTrackerLimiter> mem_tracker = MemTrackerLimiter::create_shared( |
815 | 0 | MemTrackerLimiter::Type::OTHER, |
816 | 0 | fmt::format("InternalService::fetch_table_schema:{}#{}", params.format_type, |
817 | 0 | params.file_type)); |
818 | 0 | SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(mem_tracker); Line | Count | Source | 76 | 0 | auto VARNAME_LINENUM(scoped_tls_stmtl) = doris::ScopedInitThreadContext() |
|
819 | | |
820 | | // make sure profile is desctructed after reader cause PrefetchBufferedReader |
821 | | // might asynchronouslly access the profile |
822 | 0 | std::unique_ptr<RuntimeProfile> profile = |
823 | 0 | std::make_unique<RuntimeProfile>("FetchTableSchema"); |
824 | 0 | std::unique_ptr<vectorized::GenericReader> reader(nullptr); |
825 | 0 | io::IOContext io_ctx; |
826 | 0 | io::FileCacheStatistics file_cache_statis; |
827 | 0 | io_ctx.file_cache_stats = &file_cache_statis; |
828 | | // file_slots is no use, but the lifetime should be longer than reader |
829 | 0 | std::vector<SlotDescriptor*> file_slots; |
830 | 0 | switch (params.format_type) { |
831 | 0 | case TFileFormatType::FORMAT_CSV_PLAIN: Branch (831:9): [True: 0, False: 0]
|
832 | 0 | case TFileFormatType::FORMAT_CSV_GZ: Branch (832:9): [True: 0, False: 0]
|
833 | 0 | case TFileFormatType::FORMAT_CSV_BZ2: Branch (833:9): [True: 0, False: 0]
|
834 | 0 | case TFileFormatType::FORMAT_CSV_LZ4FRAME: Branch (834:9): [True: 0, False: 0]
|
835 | 0 | case TFileFormatType::FORMAT_CSV_LZ4BLOCK: Branch (835:9): [True: 0, False: 0]
|
836 | 0 | case TFileFormatType::FORMAT_CSV_SNAPPYBLOCK: Branch (836:9): [True: 0, False: 0]
|
837 | 0 | case TFileFormatType::FORMAT_CSV_LZOP: Branch (837:9): [True: 0, False: 0]
|
838 | 0 | case TFileFormatType::FORMAT_CSV_DEFLATE: { Branch (838:9): [True: 0, False: 0]
|
839 | 0 | reader = vectorized::CsvReader::create_unique(nullptr, profile.get(), nullptr, params, |
840 | 0 | range, file_slots, &io_ctx); |
841 | 0 | break; |
842 | 0 | } |
843 | 0 | case TFileFormatType::FORMAT_TEXT: { Branch (843:9): [True: 0, False: 0]
|
844 | 0 | reader = vectorized::TextReader::create_unique(nullptr, profile.get(), nullptr, params, |
845 | 0 | range, file_slots, &io_ctx); |
846 | 0 | break; |
847 | 0 | } |
848 | 0 | case TFileFormatType::FORMAT_PARQUET: { Branch (848:9): [True: 0, False: 0]
|
849 | 0 | reader = vectorized::ParquetReader::create_unique(params, range, &io_ctx, nullptr); |
850 | 0 | break; |
851 | 0 | } |
852 | 0 | case TFileFormatType::FORMAT_ORC: { Branch (852:9): [True: 0, False: 0]
|
853 | 0 | reader = vectorized::OrcReader::create_unique(params, range, "", &io_ctx); |
854 | 0 | break; |
855 | 0 | } |
856 | 0 | case TFileFormatType::FORMAT_JSON: { Branch (856:9): [True: 0, False: 0]
|
857 | 0 | reader = vectorized::NewJsonReader::create_unique(profile.get(), params, range, |
858 | 0 | file_slots, &io_ctx); |
859 | 0 | break; |
860 | 0 | } |
861 | 0 | case TFileFormatType::FORMAT_AVRO: { Branch (861:9): [True: 0, False: 0]
|
862 | 0 | reader = vectorized::AvroJNIReader::create_unique(profile.get(), params, range, |
863 | 0 | file_slots); |
864 | 0 | break; |
865 | 0 | } |
866 | 0 | default: Branch (866:9): [True: 0, False: 0]
|
867 | 0 | st = Status::InternalError("Not supported file format in fetch table schema: {}", |
868 | 0 | params.format_type); |
869 | 0 | st.to_protobuf(result->mutable_status()); |
870 | 0 | return; |
871 | 0 | } |
872 | 0 | if (!st.ok()) { Branch (872:13): [True: 0, False: 0]
|
873 | 0 | LOG(WARNING) << "failed to create reader, errmsg=" << st; |
874 | 0 | st.to_protobuf(result->mutable_status()); |
875 | 0 | return; |
876 | 0 | } |
877 | 0 | st = reader->init_schema_reader(); |
878 | 0 | if (!st.ok()) { Branch (878:13): [True: 0, False: 0]
|
879 | 0 | LOG(WARNING) << "failed to init reader, errmsg=" << st; |
880 | 0 | st.to_protobuf(result->mutable_status()); |
881 | 0 | return; |
882 | 0 | } |
883 | 0 | std::vector<std::string> col_names; |
884 | 0 | std::vector<TypeDescriptor> col_types; |
885 | 0 | st = reader->get_parsed_schema(&col_names, &col_types); |
886 | 0 | if (!st.ok()) { Branch (886:13): [True: 0, False: 0]
|
887 | 0 | LOG(WARNING) << "fetch table schema failed, errmsg=" << st; |
888 | 0 | st.to_protobuf(result->mutable_status()); |
889 | 0 | return; |
890 | 0 | } |
891 | 0 | result->set_column_nums(col_names.size()); |
892 | 0 | for (size_t idx = 0; idx < col_names.size(); ++idx) { Branch (892:30): [True: 0, False: 0]
|
893 | 0 | result->add_column_names(col_names[idx]); |
894 | 0 | } |
895 | 0 | for (size_t idx = 0; idx < col_types.size(); ++idx) { Branch (895:30): [True: 0, False: 0]
|
896 | 0 | PTypeDesc* type_desc = result->add_column_types(); |
897 | 0 | col_types[idx].to_protobuf(type_desc); |
898 | 0 | } |
899 | 0 | st.to_protobuf(result->mutable_status()); |
900 | 0 | }); |
901 | 0 | if (!ret) { Branch (901:9): [True: 0, False: 0]
|
902 | 0 | offer_failed(result, done, _heavy_work_pool); |
903 | 0 | return; |
904 | 0 | } |
905 | 0 | } |
906 | | |
907 | | void PInternalService::fetch_arrow_flight_schema(google::protobuf::RpcController* controller, |
908 | | const PFetchArrowFlightSchemaRequest* request, |
909 | | PFetchArrowFlightSchemaResult* result, |
910 | 0 | google::protobuf::Closure* done) { |
911 | 0 | bool ret = _arrow_flight_work_pool.try_offer([request, result, done]() { |
912 | 0 | brpc::ClosureGuard closure_guard(done); |
913 | 0 | std::shared_ptr<arrow::Schema> schema; |
914 | 0 | auto st = ExecEnv::GetInstance()->result_mgr()->find_arrow_schema( |
915 | 0 | UniqueId(request->finst_id()).to_thrift(), &schema); |
916 | 0 | if (!st.ok()) { Branch (916:13): [True: 0, False: 0]
|
917 | 0 | LOG(WARNING) << "fetch arrow flight schema failed, errmsg=" << st; |
918 | 0 | st.to_protobuf(result->mutable_status()); |
919 | 0 | return; |
920 | 0 | } |
921 | | |
922 | 0 | std::string schema_str; |
923 | 0 | st = serialize_arrow_schema(&schema, &schema_str); |
924 | 0 | if (st.ok()) { Branch (924:13): [True: 0, False: 0]
|
925 | 0 | result->set_schema(std::move(schema_str)); |
926 | 0 | if (!config::public_host.empty()) { Branch (926:17): [True: 0, False: 0]
|
927 | 0 | result->set_be_arrow_flight_ip(config::public_host); |
928 | 0 | } |
929 | 0 | if (config::arrow_flight_sql_proxy_port != -1) { Branch (929:17): [True: 0, False: 0]
|
930 | 0 | result->set_be_arrow_flight_port(config::arrow_flight_sql_proxy_port); |
931 | 0 | } |
932 | 0 | } |
933 | 0 | st.to_protobuf(result->mutable_status()); |
934 | 0 | }); |
935 | 0 | if (!ret) { Branch (935:9): [True: 0, False: 0]
|
936 | 0 | offer_failed(result, done, _arrow_flight_work_pool); |
937 | 0 | return; |
938 | 0 | } |
939 | 0 | } |
940 | | |
941 | | Status PInternalService::_tablet_fetch_data(const PTabletKeyLookupRequest* request, |
942 | 0 | PTabletKeyLookupResponse* response) { |
943 | 0 | PointQueryExecutor executor; |
944 | 0 | RETURN_IF_ERROR(executor.init(request, response)); |
945 | 0 | RETURN_IF_ERROR(executor.lookup_up()); |
946 | 0 | executor.print_profile(); |
947 | 0 | return Status::OK(); |
948 | 0 | } |
949 | | |
950 | | void PInternalService::tablet_fetch_data(google::protobuf::RpcController* controller, |
951 | | const PTabletKeyLookupRequest* request, |
952 | | PTabletKeyLookupResponse* response, |
953 | 0 | google::protobuf::Closure* done) { |
954 | 0 | bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() { |
955 | 0 | [[maybe_unused]] auto* cntl = static_cast<brpc::Controller*>(controller); |
956 | 0 | brpc::ClosureGuard guard(done); |
957 | 0 | Status st = _tablet_fetch_data(request, response); |
958 | 0 | st.to_protobuf(response->mutable_status()); |
959 | 0 | }); |
960 | 0 | if (!ret) { Branch (960:9): [True: 0, False: 0]
|
961 | 0 | offer_failed(response, done, _light_work_pool); |
962 | 0 | return; |
963 | 0 | } |
964 | 0 | } |
965 | | |
966 | | void PInternalService::test_jdbc_connection(google::protobuf::RpcController* controller, |
967 | | const PJdbcTestConnectionRequest* request, |
968 | | PJdbcTestConnectionResult* result, |
969 | 0 | google::protobuf::Closure* done) { |
970 | 0 | bool ret = _heavy_work_pool.try_offer([request, result, done]() { |
971 | 0 | VLOG_RPC << "test jdbc connection"; Line | Count | Source | 35 | 0 | #define VLOG_RPC VLOG(8) |
|
972 | 0 | brpc::ClosureGuard closure_guard(done); |
973 | 0 | TTableDescriptor table_desc; |
974 | 0 | vectorized::JdbcConnectorParam jdbc_param; |
975 | 0 | Status st = Status::OK(); |
976 | 0 | { |
977 | 0 | const uint8_t* buf = (const uint8_t*)request->jdbc_table().data(); |
978 | 0 | uint32_t len = request->jdbc_table().size(); |
979 | 0 | st = deserialize_thrift_msg(buf, &len, false, &table_desc); |
980 | 0 | if (!st.ok()) { Branch (980:17): [True: 0, False: 0]
|
981 | 0 | LOG(WARNING) << "test jdbc connection failed, errmsg=" << st; |
982 | 0 | st.to_protobuf(result->mutable_status()); |
983 | 0 | return; |
984 | 0 | } |
985 | 0 | } |
986 | 0 | TJdbcTable jdbc_table = (table_desc.jdbcTable); |
987 | 0 | jdbc_param.catalog_id = jdbc_table.catalog_id; |
988 | 0 | jdbc_param.driver_class = jdbc_table.jdbc_driver_class; |
989 | 0 | jdbc_param.driver_path = jdbc_table.jdbc_driver_url; |
990 | 0 | jdbc_param.driver_checksum = jdbc_table.jdbc_driver_checksum; |
991 | 0 | jdbc_param.jdbc_url = jdbc_table.jdbc_url; |
992 | 0 | jdbc_param.user = jdbc_table.jdbc_user; |
993 | 0 | jdbc_param.passwd = jdbc_table.jdbc_password; |
994 | 0 | jdbc_param.query_string = request->query_str(); |
995 | 0 | jdbc_param.table_type = static_cast<TOdbcTableType::type>(request->jdbc_table_type()); |
996 | 0 | jdbc_param.use_transaction = false; |
997 | 0 | jdbc_param.connection_pool_min_size = jdbc_table.connection_pool_min_size; |
998 | 0 | jdbc_param.connection_pool_max_size = jdbc_table.connection_pool_max_size; |
999 | 0 | jdbc_param.connection_pool_max_life_time = jdbc_table.connection_pool_max_life_time; |
1000 | 0 | jdbc_param.connection_pool_max_wait_time = jdbc_table.connection_pool_max_wait_time; |
1001 | 0 | jdbc_param.connection_pool_keep_alive = jdbc_table.connection_pool_keep_alive; |
1002 | |
|
1003 | 0 | std::unique_ptr<vectorized::JdbcConnector> jdbc_connector; |
1004 | 0 | jdbc_connector.reset(new (std::nothrow) vectorized::JdbcConnector(jdbc_param)); |
1005 | |
|
1006 | 0 | st = jdbc_connector->test_connection(); |
1007 | 0 | st.to_protobuf(result->mutable_status()); |
1008 | |
|
1009 | 0 | Status clean_st = jdbc_connector->clean_datasource(); |
1010 | 0 | if (!clean_st.ok()) { Branch (1010:13): [True: 0, False: 0]
|
1011 | 0 | LOG(WARNING) << "Failed to clean JDBC datasource: " << clean_st.msg(); |
1012 | 0 | } |
1013 | 0 | Status close_st = jdbc_connector->close(); |
1014 | 0 | if (!close_st.ok()) { Branch (1014:13): [True: 0, False: 0]
|
1015 | 0 | LOG(WARNING) << "Failed to close JDBC connector: " << close_st.msg(); |
1016 | 0 | } |
1017 | 0 | }); |
1018 | |
|
1019 | 0 | if (!ret) { Branch (1019:9): [True: 0, False: 0]
|
1020 | 0 | offer_failed(result, done, _heavy_work_pool); |
1021 | 0 | return; |
1022 | 0 | } |
1023 | 0 | } |
1024 | | |
1025 | | void PInternalServiceImpl::get_column_ids_by_tablet_ids(google::protobuf::RpcController* controller, |
1026 | | const PFetchColIdsRequest* request, |
1027 | | PFetchColIdsResponse* response, |
1028 | 0 | google::protobuf::Closure* done) { |
1029 | 0 | bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() { |
1030 | 0 | _get_column_ids_by_tablet_ids(controller, request, response, done); |
1031 | 0 | }); |
1032 | 0 | if (!ret) { Branch (1032:9): [True: 0, False: 0]
|
1033 | 0 | offer_failed(response, done, _light_work_pool); |
1034 | 0 | return; |
1035 | 0 | } |
1036 | 0 | } |
1037 | | |
1038 | | void PInternalServiceImpl::_get_column_ids_by_tablet_ids( |
1039 | | google::protobuf::RpcController* controller, const PFetchColIdsRequest* request, |
1040 | 0 | PFetchColIdsResponse* response, google::protobuf::Closure* done) { |
1041 | 0 | brpc::ClosureGuard guard(done); |
1042 | 0 | [[maybe_unused]] auto* cntl = static_cast<brpc::Controller*>(controller); |
1043 | 0 | TabletManager* tablet_mgr = _engine.tablet_manager(); |
1044 | 0 | const auto& params = request->params(); |
1045 | 0 | for (const auto& param : params) { Branch (1045:28): [True: 0, False: 0]
|
1046 | 0 | int64_t index_id = param.indexid(); |
1047 | 0 | const auto& tablet_ids = param.tablet_ids(); |
1048 | 0 | std::set<std::set<int32_t>> filter_set; |
1049 | 0 | std::map<int32_t, const TabletColumn*> id_to_column; |
1050 | 0 | for (const int64_t tablet_id : tablet_ids) { Branch (1050:38): [True: 0, False: 0]
|
1051 | 0 | TabletSharedPtr tablet = tablet_mgr->get_tablet(tablet_id); |
1052 | 0 | if (tablet == nullptr) { Branch (1052:17): [True: 0, False: 0]
|
1053 | 0 | std::stringstream ss; |
1054 | 0 | ss << "cannot get tablet by id:" << tablet_id; |
1055 | 0 | LOG(WARNING) << ss.str(); |
1056 | 0 | response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE); |
1057 | 0 | response->mutable_status()->add_error_msgs(ss.str()); |
1058 | 0 | return; |
1059 | 0 | } |
1060 | | // check schema consistency, column ids should be the same |
1061 | 0 | const auto& columns = tablet->tablet_schema()->columns(); |
1062 | |
|
1063 | 0 | std::set<int32_t> column_ids; |
1064 | 0 | for (const auto& col : columns) { Branch (1064:34): [True: 0, False: 0]
|
1065 | 0 | column_ids.insert(col->unique_id()); |
1066 | 0 | } |
1067 | 0 | filter_set.insert(std::move(column_ids)); |
1068 | |
|
1069 | 0 | if (id_to_column.empty()) { Branch (1069:17): [True: 0, False: 0]
|
1070 | 0 | for (const auto& col : columns) { Branch (1070:38): [True: 0, False: 0]
|
1071 | 0 | id_to_column.insert(std::pair {col->unique_id(), col.get()}); |
1072 | 0 | } |
1073 | 0 | } else { |
1074 | 0 | for (const auto& col : columns) { Branch (1074:38): [True: 0, False: 0]
|
1075 | 0 | auto it = id_to_column.find(col->unique_id()); |
1076 | 0 | if (it == id_to_column.end() || *(it->second) != *col) { Branch (1076:25): [True: 0, False: 0]
Branch (1076:25): [True: 0, False: 0]
Branch (1076:53): [True: 0, False: 0]
|
1077 | 0 | ColumnPB prev_col_pb; |
1078 | 0 | ColumnPB curr_col_pb; |
1079 | 0 | if (it != id_to_column.end()) { Branch (1079:29): [True: 0, False: 0]
|
1080 | 0 | it->second->to_schema_pb(&prev_col_pb); |
1081 | 0 | } |
1082 | 0 | col->to_schema_pb(&curr_col_pb); |
1083 | 0 | std::stringstream ss; |
1084 | 0 | ss << "consistency check failed: index{ " << index_id << " }" |
1085 | 0 | << " got inconsistent schema, prev column: " << prev_col_pb.DebugString() |
1086 | 0 | << " current column: " << curr_col_pb.DebugString(); |
1087 | 0 | LOG(WARNING) << ss.str(); |
1088 | 0 | response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE); |
1089 | 0 | response->mutable_status()->add_error_msgs(ss.str()); |
1090 | 0 | return; |
1091 | 0 | } |
1092 | 0 | } |
1093 | 0 | } |
1094 | 0 | } |
1095 | | |
1096 | 0 | if (filter_set.size() > 1) { Branch (1096:13): [True: 0, False: 0]
|
1097 | | // consistecy check failed |
1098 | 0 | std::stringstream ss; |
1099 | 0 | ss << "consistency check failed: index{" << index_id << "}" |
1100 | 0 | << "got inconsistent schema"; |
1101 | 0 | LOG(WARNING) << ss.str(); |
1102 | 0 | response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE); |
1103 | 0 | response->mutable_status()->add_error_msgs(ss.str()); |
1104 | 0 | return; |
1105 | 0 | } |
1106 | | // consistency check passed, use the first tablet to be the representative |
1107 | 0 | TabletSharedPtr tablet = tablet_mgr->get_tablet(tablet_ids[0]); |
1108 | 0 | const auto& columns = tablet->tablet_schema()->columns(); |
1109 | 0 | auto entry = response->add_entries(); |
1110 | 0 | entry->set_index_id(index_id); |
1111 | 0 | auto col_name_to_id = entry->mutable_col_name_to_id(); |
1112 | 0 | for (const auto& column : columns) { Branch (1112:33): [True: 0, False: 0]
|
1113 | 0 | (*col_name_to_id)[column->name()] = column->unique_id(); |
1114 | 0 | } |
1115 | 0 | } |
1116 | 0 | response->mutable_status()->set_status_code(TStatusCode::OK); |
1117 | 0 | } |
1118 | | |
1119 | | template <class RPCResponse> |
1120 | | struct AsyncRPCContext { |
1121 | | RPCResponse response; |
1122 | | brpc::Controller cntl; |
1123 | | brpc::CallId cid; |
1124 | | }; |
1125 | | |
1126 | | void PInternalService::fetch_remote_tablet_schema(google::protobuf::RpcController* controller, |
1127 | | const PFetchRemoteSchemaRequest* request, |
1128 | | PFetchRemoteSchemaResponse* response, |
1129 | 0 | google::protobuf::Closure* done) { |
1130 | 0 | bool ret = _heavy_work_pool.try_offer([request, response, done]() { |
1131 | 0 | brpc::ClosureGuard closure_guard(done); |
1132 | 0 | Status st = Status::OK(); |
1133 | 0 | if (request->is_coordinator()) { Branch (1133:13): [True: 0, False: 0]
|
1134 | | // Spawn rpc request to none coordinator nodes, and finally merge them all |
1135 | 0 | PFetchRemoteSchemaRequest remote_request(*request); |
1136 | | // set it none coordinator to get merged schema |
1137 | 0 | remote_request.set_is_coordinator(false); |
1138 | 0 | using PFetchRemoteTabletSchemaRpcContext = AsyncRPCContext<PFetchRemoteSchemaResponse>; |
1139 | 0 | std::vector<PFetchRemoteTabletSchemaRpcContext> rpc_contexts( |
1140 | 0 | request->tablet_location_size()); |
1141 | 0 | for (int i = 0; i < request->tablet_location_size(); ++i) { Branch (1141:29): [True: 0, False: 0]
|
1142 | 0 | std::string host = request->tablet_location(i).host(); |
1143 | 0 | int32_t brpc_port = request->tablet_location(i).brpc_port(); |
1144 | 0 | std::shared_ptr<PBackendService_Stub> stub( |
1145 | 0 | ExecEnv::GetInstance()->brpc_internal_client_cache()->get_client( |
1146 | 0 | host, brpc_port)); |
1147 | 0 | if (stub == nullptr) { Branch (1147:21): [True: 0, False: 0]
|
1148 | 0 | LOG(WARNING) << "Failed to init rpc to " << host << ":" << brpc_port; |
1149 | 0 | st = Status::InternalError("Failed to init rpc to {}:{}", host, brpc_port); |
1150 | 0 | continue; |
1151 | 0 | } |
1152 | 0 | rpc_contexts[i].cid = rpc_contexts[i].cntl.call_id(); |
1153 | 0 | rpc_contexts[i].cntl.set_timeout_ms(config::fetch_remote_schema_rpc_timeout_ms); |
1154 | 0 | stub->fetch_remote_tablet_schema(&rpc_contexts[i].cntl, &remote_request, |
1155 | 0 | &rpc_contexts[i].response, brpc::DoNothing()); |
1156 | 0 | } |
1157 | 0 | std::vector<TabletSchemaSPtr> schemas; |
1158 | 0 | for (auto& rpc_context : rpc_contexts) { Branch (1158:36): [True: 0, False: 0]
|
1159 | 0 | brpc::Join(rpc_context.cid); |
1160 | 0 | if (!st.ok()) { Branch (1160:21): [True: 0, False: 0]
|
1161 | | // make sure all flying rpc request is joined |
1162 | 0 | continue; |
1163 | 0 | } |
1164 | 0 | if (rpc_context.cntl.Failed()) { Branch (1164:21): [True: 0, False: 0]
|
1165 | 0 | LOG(WARNING) << "fetch_remote_tablet_schema rpc err:" |
1166 | 0 | << rpc_context.cntl.ErrorText(); |
1167 | 0 | ExecEnv::GetInstance()->brpc_internal_client_cache()->erase( |
1168 | 0 | rpc_context.cntl.remote_side()); |
1169 | 0 | st = Status::InternalError("fetch_remote_tablet_schema rpc err: {}", |
1170 | 0 | rpc_context.cntl.ErrorText()); |
1171 | 0 | } |
1172 | 0 | if (rpc_context.response.status().status_code() != 0) { Branch (1172:21): [True: 0, False: 0]
|
1173 | 0 | st = Status::create(rpc_context.response.status()); |
1174 | 0 | } |
1175 | 0 | if (rpc_context.response.has_merged_schema()) { Branch (1175:21): [True: 0, False: 0]
|
1176 | 0 | TabletSchemaSPtr schema = std::make_shared<TabletSchema>(); |
1177 | 0 | schema->init_from_pb(rpc_context.response.merged_schema()); |
1178 | 0 | schemas.push_back(schema); |
1179 | 0 | } |
1180 | 0 | } |
1181 | 0 | if (!schemas.empty() && st.ok()) { Branch (1181:17): [True: 0, False: 0]
Branch (1181:37): [True: 0, False: 0]
|
1182 | | // merge all |
1183 | 0 | TabletSchemaSPtr merged_schema; |
1184 | 0 | static_cast<void>(vectorized::schema_util::get_least_common_schema(schemas, nullptr, |
1185 | 0 | merged_schema)); |
1186 | 0 | VLOG_DEBUG << "dump schema:" << merged_schema->dump_structure(); Line | Count | Source | 41 | 0 | #define VLOG_DEBUG VLOG(7) |
|
1187 | 0 | merged_schema->reserve_extracted_columns(); |
1188 | 0 | merged_schema->to_schema_pb(response->mutable_merged_schema()); |
1189 | 0 | } |
1190 | 0 | st.to_protobuf(response->mutable_status()); |
1191 | 0 | return; |
1192 | 0 | } else { |
1193 | | // This is not a coordinator, get it's tablet and merge schema |
1194 | 0 | std::vector<int64_t> target_tablets; |
1195 | 0 | for (int i = 0; i < request->tablet_location_size(); ++i) { Branch (1195:29): [True: 0, False: 0]
|
1196 | 0 | const auto& location = request->tablet_location(i); |
1197 | 0 | auto backend = BackendOptions::get_local_backend(); |
1198 | | // If this is the target backend |
1199 | 0 | if (backend.host == location.host() && config::brpc_port == location.brpc_port()) { Branch (1199:21): [True: 0, False: 0]
Branch (1199:56): [True: 0, False: 0]
|
1200 | 0 | target_tablets.assign(location.tablet_id().begin(), location.tablet_id().end()); |
1201 | 0 | break; |
1202 | 0 | } |
1203 | 0 | } |
1204 | 0 | if (!target_tablets.empty()) { Branch (1204:17): [True: 0, False: 0]
|
1205 | 0 | std::vector<TabletSchemaSPtr> tablet_schemas; |
1206 | 0 | for (int64_t tablet_id : target_tablets) { Branch (1206:40): [True: 0, False: 0]
|
1207 | 0 | auto res = ExecEnv::get_tablet(tablet_id); |
1208 | 0 | if (!res.has_value()) { Branch (1208:25): [True: 0, False: 0]
|
1209 | | // just ignore |
1210 | 0 | LOG(WARNING) << "tablet does not exist, tablet id is " << tablet_id; |
1211 | 0 | continue; |
1212 | 0 | } |
1213 | 0 | auto schema = res.value()->merged_tablet_schema(); |
1214 | 0 | if (schema != nullptr) { Branch (1214:25): [True: 0, False: 0]
|
1215 | 0 | if (!schema->need_record_variant_extended_schema()) { Branch (1215:29): [True: 0, False: 0]
|
1216 | 0 | schema = res.value()->calculate_variant_extended_schema(); |
1217 | 0 | } |
1218 | 0 | if (schema != nullptr) { Branch (1218:29): [True: 0, False: 0]
|
1219 | 0 | tablet_schemas.push_back(schema); |
1220 | 0 | } |
1221 | 0 | } |
1222 | 0 | } |
1223 | 0 | if (!tablet_schemas.empty()) { Branch (1223:21): [True: 0, False: 0]
|
1224 | | // merge all |
1225 | 0 | TabletSchemaSPtr merged_schema; |
1226 | 0 | static_cast<void>(vectorized::schema_util::get_least_common_schema( |
1227 | 0 | tablet_schemas, nullptr, merged_schema)); |
1228 | 0 | merged_schema->to_schema_pb(response->mutable_merged_schema()); |
1229 | 0 | VLOG_DEBUG << "dump schema:" << merged_schema->dump_structure(); Line | Count | Source | 41 | 0 | #define VLOG_DEBUG VLOG(7) |
|
1230 | 0 | } |
1231 | 0 | } |
1232 | 0 | st.to_protobuf(response->mutable_status()); |
1233 | 0 | } |
1234 | 0 | }); |
1235 | 0 | if (!ret) { Branch (1235:9): [True: 0, False: 0]
|
1236 | 0 | offer_failed(response, done, _heavy_work_pool); |
1237 | 0 | } |
1238 | 0 | } |
1239 | | |
1240 | | void PInternalService::report_stream_load_status(google::protobuf::RpcController* controller, |
1241 | | const PReportStreamLoadStatusRequest* request, |
1242 | | PReportStreamLoadStatusResponse* response, |
1243 | 0 | google::protobuf::Closure* done) { |
1244 | 0 | TUniqueId load_id; |
1245 | 0 | load_id.__set_hi(request->load_id().hi()); |
1246 | 0 | load_id.__set_lo(request->load_id().lo()); |
1247 | 0 | Status st = Status::OK(); |
1248 | 0 | auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id); |
1249 | 0 | if (!stream_load_ctx) { Branch (1249:9): [True: 0, False: 0]
|
1250 | 0 | st = Status::InternalError("unknown stream load id: {}", UniqueId(load_id).to_string()); |
1251 | 0 | } |
1252 | 0 | stream_load_ctx->promise.set_value(st); |
1253 | 0 | st.to_protobuf(response->mutable_status()); |
1254 | 0 | } |
1255 | | |
1256 | | void PInternalService::get_info(google::protobuf::RpcController* controller, |
1257 | | const PProxyRequest* request, PProxyResult* response, |
1258 | 0 | google::protobuf::Closure* done) { |
1259 | 0 | bool ret = _exec_env->routine_load_task_executor()->get_thread_pool().submit_func([this, |
1260 | 0 | request, |
1261 | 0 | response, |
1262 | 0 | done]() { |
1263 | 0 | brpc::ClosureGuard closure_guard(done); |
1264 | | // PProxyRequest is defined in gensrc/proto/internal_service.proto |
1265 | | // Currently it supports 2 kinds of requests: |
1266 | | // 1. get all kafka partition ids for given topic |
1267 | | // 2. get all kafka partition offsets for given topic and timestamp. |
1268 | 0 | int timeout_ms = request->has_timeout_secs() ? request->timeout_secs() * 1000 : 60 * 1000; Branch (1268:26): [True: 0, False: 0]
|
1269 | 0 | if (request->has_kafka_meta_request()) { Branch (1269:13): [True: 0, False: 0]
|
1270 | 0 | const PKafkaMetaProxyRequest& kafka_request = request->kafka_meta_request(); |
1271 | 0 | if (!kafka_request.offset_flags().empty()) { Branch (1271:17): [True: 0, False: 0]
|
1272 | 0 | std::vector<PIntegerPair> partition_offsets; |
1273 | 0 | Status st = _exec_env->routine_load_task_executor() |
1274 | 0 | ->get_kafka_real_offsets_for_partitions( |
1275 | 0 | request->kafka_meta_request(), &partition_offsets, |
1276 | 0 | timeout_ms); |
1277 | 0 | if (st.ok()) { Branch (1277:21): [True: 0, False: 0]
|
1278 | 0 | PKafkaPartitionOffsets* part_offsets = response->mutable_partition_offsets(); |
1279 | 0 | for (const auto& entry : partition_offsets) { Branch (1279:44): [True: 0, False: 0]
|
1280 | 0 | PIntegerPair* res = part_offsets->add_offset_times(); |
1281 | 0 | res->set_key(entry.key()); |
1282 | 0 | res->set_val(entry.val()); |
1283 | 0 | } |
1284 | 0 | } |
1285 | 0 | st.to_protobuf(response->mutable_status()); |
1286 | 0 | return; |
1287 | 0 | } else if (!kafka_request.partition_id_for_latest_offsets().empty()) { Branch (1287:24): [True: 0, False: 0]
|
1288 | | // get latest offsets for specified partition ids |
1289 | 0 | std::vector<PIntegerPair> partition_offsets; |
1290 | 0 | Status st = _exec_env->routine_load_task_executor() |
1291 | 0 | ->get_kafka_latest_offsets_for_partitions( |
1292 | 0 | request->kafka_meta_request(), &partition_offsets, |
1293 | 0 | timeout_ms); |
1294 | 0 | if (st.ok()) { Branch (1294:21): [True: 0, False: 0]
|
1295 | 0 | PKafkaPartitionOffsets* part_offsets = response->mutable_partition_offsets(); |
1296 | 0 | for (const auto& entry : partition_offsets) { Branch (1296:44): [True: 0, False: 0]
|
1297 | 0 | PIntegerPair* res = part_offsets->add_offset_times(); |
1298 | 0 | res->set_key(entry.key()); |
1299 | 0 | res->set_val(entry.val()); |
1300 | 0 | } |
1301 | 0 | } |
1302 | 0 | st.to_protobuf(response->mutable_status()); |
1303 | 0 | return; |
1304 | 0 | } else if (!kafka_request.offset_times().empty()) { Branch (1304:24): [True: 0, False: 0]
|
1305 | | // if offset_times() has elements, which means this request is to get offset by timestamp. |
1306 | 0 | std::vector<PIntegerPair> partition_offsets; |
1307 | 0 | Status st = _exec_env->routine_load_task_executor() |
1308 | 0 | ->get_kafka_partition_offsets_for_times( |
1309 | 0 | request->kafka_meta_request(), &partition_offsets, |
1310 | 0 | timeout_ms); |
1311 | 0 | if (st.ok()) { Branch (1311:21): [True: 0, False: 0]
|
1312 | 0 | PKafkaPartitionOffsets* part_offsets = response->mutable_partition_offsets(); |
1313 | 0 | for (const auto& entry : partition_offsets) { Branch (1313:44): [True: 0, False: 0]
|
1314 | 0 | PIntegerPair* res = part_offsets->add_offset_times(); |
1315 | 0 | res->set_key(entry.key()); |
1316 | 0 | res->set_val(entry.val()); |
1317 | 0 | } |
1318 | 0 | } |
1319 | 0 | st.to_protobuf(response->mutable_status()); |
1320 | 0 | return; |
1321 | 0 | } else { |
1322 | | // get partition ids of topic |
1323 | 0 | std::vector<int32_t> partition_ids; |
1324 | 0 | Status st = _exec_env->routine_load_task_executor()->get_kafka_partition_meta( |
1325 | 0 | request->kafka_meta_request(), &partition_ids); |
1326 | 0 | if (st.ok()) { Branch (1326:21): [True: 0, False: 0]
|
1327 | 0 | PKafkaMetaProxyResult* kafka_result = response->mutable_kafka_meta_result(); |
1328 | 0 | for (int32_t id : partition_ids) { Branch (1328:37): [True: 0, False: 0]
|
1329 | 0 | kafka_result->add_partition_ids(id); |
1330 | 0 | } |
1331 | 0 | } |
1332 | 0 | st.to_protobuf(response->mutable_status()); |
1333 | 0 | return; |
1334 | 0 | } |
1335 | 0 | } |
1336 | 0 | Status::OK().to_protobuf(response->mutable_status()); |
1337 | 0 | }); |
1338 | 0 | if (!ret) { Branch (1338:9): [True: 0, False: 0]
|
1339 | 0 | offer_failed(response, done, _heavy_work_pool); |
1340 | 0 | return; |
1341 | 0 | } |
1342 | 0 | } |
1343 | | |
1344 | | void PInternalService::update_cache(google::protobuf::RpcController* controller, |
1345 | | const PUpdateCacheRequest* request, PCacheResponse* response, |
1346 | 0 | google::protobuf::Closure* done) { |
1347 | 0 | bool ret = _light_work_pool.try_offer([this, request, response, done]() { |
1348 | 0 | brpc::ClosureGuard closure_guard(done); |
1349 | 0 | _exec_env->result_cache()->update(request, response); |
1350 | 0 | }); |
1351 | 0 | if (!ret) { Branch (1351:9): [True: 0, False: 0]
|
1352 | 0 | offer_failed(response, done, _light_work_pool); |
1353 | 0 | return; |
1354 | 0 | } |
1355 | 0 | } |
1356 | | |
1357 | | void PInternalService::fetch_cache(google::protobuf::RpcController* controller, |
1358 | | const PFetchCacheRequest* request, PFetchCacheResult* result, |
1359 | 0 | google::protobuf::Closure* done) { |
1360 | 0 | bool ret = _light_work_pool.try_offer([this, request, result, done]() { |
1361 | 0 | brpc::ClosureGuard closure_guard(done); |
1362 | 0 | _exec_env->result_cache()->fetch(request, result); |
1363 | 0 | }); |
1364 | 0 | if (!ret) { Branch (1364:9): [True: 0, False: 0]
|
1365 | 0 | offer_failed(result, done, _light_work_pool); |
1366 | 0 | return; |
1367 | 0 | } |
1368 | 0 | } |
1369 | | |
1370 | | void PInternalService::clear_cache(google::protobuf::RpcController* controller, |
1371 | | const PClearCacheRequest* request, PCacheResponse* response, |
1372 | 0 | google::protobuf::Closure* done) { |
1373 | 0 | bool ret = _light_work_pool.try_offer([this, request, response, done]() { |
1374 | 0 | brpc::ClosureGuard closure_guard(done); |
1375 | 0 | _exec_env->result_cache()->clear(request, response); |
1376 | 0 | }); |
1377 | 0 | if (!ret) { Branch (1377:9): [True: 0, False: 0]
|
1378 | 0 | offer_failed(response, done, _light_work_pool); |
1379 | 0 | return; |
1380 | 0 | } |
1381 | 0 | } |
1382 | | |
1383 | | void PInternalService::merge_filter(::google::protobuf::RpcController* controller, |
1384 | | const ::doris::PMergeFilterRequest* request, |
1385 | | ::doris::PMergeFilterResponse* response, |
1386 | 0 | ::google::protobuf::Closure* done) { |
1387 | 0 | bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() { |
1388 | 0 | brpc::ClosureGuard closure_guard(done); |
1389 | 0 | auto attachment = static_cast<brpc::Controller*>(controller)->request_attachment(); |
1390 | 0 | butil::IOBufAsZeroCopyInputStream zero_copy_input_stream(attachment); |
1391 | 0 | Status st = _exec_env->fragment_mgr()->merge_filter(request, &zero_copy_input_stream); |
1392 | 0 | st.to_protobuf(response->mutable_status()); |
1393 | 0 | }); |
1394 | 0 | if (!ret) { Branch (1394:9): [True: 0, False: 0]
|
1395 | 0 | offer_failed(response, done, _light_work_pool); |
1396 | 0 | return; |
1397 | 0 | } |
1398 | 0 | } |
1399 | | |
1400 | | void PInternalService::send_filter_size(::google::protobuf::RpcController* controller, |
1401 | | const ::doris::PSendFilterSizeRequest* request, |
1402 | | ::doris::PSendFilterSizeResponse* response, |
1403 | 0 | ::google::protobuf::Closure* done) { |
1404 | 0 | bool ret = _light_work_pool.try_offer([this, request, response, done]() { |
1405 | 0 | brpc::ClosureGuard closure_guard(done); |
1406 | 0 | Status st = _exec_env->fragment_mgr()->send_filter_size(request); |
1407 | 0 | st.to_protobuf(response->mutable_status()); |
1408 | 0 | }); |
1409 | 0 | if (!ret) { Branch (1409:9): [True: 0, False: 0]
|
1410 | 0 | offer_failed(response, done, _light_work_pool); |
1411 | 0 | return; |
1412 | 0 | } |
1413 | 0 | } |
1414 | | |
1415 | | void PInternalService::sync_filter_size(::google::protobuf::RpcController* controller, |
1416 | | const ::doris::PSyncFilterSizeRequest* request, |
1417 | | ::doris::PSyncFilterSizeResponse* response, |
1418 | 0 | ::google::protobuf::Closure* done) { |
1419 | 0 | bool ret = _light_work_pool.try_offer([this, request, response, done]() { |
1420 | 0 | brpc::ClosureGuard closure_guard(done); |
1421 | 0 | Status st = _exec_env->fragment_mgr()->sync_filter_size(request); |
1422 | 0 | st.to_protobuf(response->mutable_status()); |
1423 | 0 | }); |
1424 | 0 | if (!ret) { Branch (1424:9): [True: 0, False: 0]
|
1425 | 0 | offer_failed(response, done, _light_work_pool); |
1426 | 0 | return; |
1427 | 0 | } |
1428 | 0 | } |
1429 | | |
1430 | | void PInternalService::apply_filterv2(::google::protobuf::RpcController* controller, |
1431 | | const ::doris::PPublishFilterRequestV2* request, |
1432 | | ::doris::PPublishFilterResponse* response, |
1433 | 0 | ::google::protobuf::Closure* done) { |
1434 | 0 | bool ret = _light_work_pool.try_offer([this, controller, request, response, done]() { |
1435 | 0 | brpc::ClosureGuard closure_guard(done); |
1436 | 0 | auto attachment = static_cast<brpc::Controller*>(controller)->request_attachment(); |
1437 | 0 | butil::IOBufAsZeroCopyInputStream zero_copy_input_stream(attachment); |
1438 | 0 | UniqueId unique_id(request->query_id()); |
1439 | 0 | VLOG_NOTICE << "rpc apply_filterv2 recv"; Line | Count | Source | 42 | 0 | #define VLOG_NOTICE VLOG(3) |
|
1440 | 0 | Status st = _exec_env->fragment_mgr()->apply_filterv2(request, &zero_copy_input_stream); |
1441 | 0 | if (!st.ok()) { Branch (1441:13): [True: 0, False: 0]
|
1442 | 0 | LOG(WARNING) << "apply filter meet error: " << st.to_string(); |
1443 | 0 | } |
1444 | 0 | st.to_protobuf(response->mutable_status()); |
1445 | 0 | }); |
1446 | 0 | if (!ret) { Branch (1446:9): [True: 0, False: 0]
|
1447 | 0 | offer_failed(response, done, _light_work_pool); |
1448 | 0 | return; |
1449 | 0 | } |
1450 | 0 | } |
1451 | | |
1452 | | void PInternalService::send_data(google::protobuf::RpcController* controller, |
1453 | | const PSendDataRequest* request, PSendDataResult* response, |
1454 | 0 | google::protobuf::Closure* done) { |
1455 | 0 | bool ret = _heavy_work_pool.try_offer([this, request, response, done]() { |
1456 | 0 | brpc::ClosureGuard closure_guard(done); |
1457 | 0 | TUniqueId load_id; |
1458 | 0 | load_id.hi = request->load_id().hi(); |
1459 | 0 | load_id.lo = request->load_id().lo(); |
1460 | | // On 1.2.3 we add load id to send data request and using load id to get pipe |
1461 | 0 | auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id); |
1462 | 0 | if (stream_load_ctx == nullptr) { Branch (1462:13): [True: 0, False: 0]
|
1463 | 0 | response->mutable_status()->set_status_code(1); |
1464 | 0 | response->mutable_status()->add_error_msgs("could not find stream load context"); |
1465 | 0 | } else { |
1466 | 0 | auto pipe = stream_load_ctx->pipe; |
1467 | 0 | for (int i = 0; i < request->data_size(); ++i) { Branch (1467:29): [True: 0, False: 0]
|
1468 | 0 | std::unique_ptr<PDataRow> row(new PDataRow()); |
1469 | 0 | row->CopyFrom(request->data(i)); |
1470 | 0 | Status s = pipe->append(std::move(row)); |
1471 | 0 | if (!s.ok()) { Branch (1471:21): [True: 0, False: 0]
|
1472 | 0 | response->mutable_status()->set_status_code(1); |
1473 | 0 | response->mutable_status()->add_error_msgs(s.to_string()); |
1474 | 0 | return; |
1475 | 0 | } |
1476 | 0 | } |
1477 | 0 | response->mutable_status()->set_status_code(0); |
1478 | 0 | } |
1479 | 0 | }); |
1480 | 0 | if (!ret) { Branch (1480:9): [True: 0, False: 0]
|
1481 | 0 | offer_failed(response, done, _heavy_work_pool); |
1482 | 0 | return; |
1483 | 0 | } |
1484 | 0 | } |
1485 | | |
1486 | | void PInternalService::commit(google::protobuf::RpcController* controller, |
1487 | | const PCommitRequest* request, PCommitResult* response, |
1488 | 0 | google::protobuf::Closure* done) { |
1489 | 0 | bool ret = _heavy_work_pool.try_offer([this, request, response, done]() { |
1490 | 0 | brpc::ClosureGuard closure_guard(done); |
1491 | 0 | TUniqueId load_id; |
1492 | 0 | load_id.hi = request->load_id().hi(); |
1493 | 0 | load_id.lo = request->load_id().lo(); |
1494 | |
|
1495 | 0 | auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id); |
1496 | 0 | if (stream_load_ctx == nullptr) { Branch (1496:13): [True: 0, False: 0]
|
1497 | 0 | response->mutable_status()->set_status_code(1); |
1498 | 0 | response->mutable_status()->add_error_msgs("could not find stream load context"); |
1499 | 0 | } else { |
1500 | 0 | static_cast<void>(stream_load_ctx->pipe->finish()); |
1501 | 0 | response->mutable_status()->set_status_code(0); |
1502 | 0 | } |
1503 | 0 | }); |
1504 | 0 | if (!ret) { Branch (1504:9): [True: 0, False: 0]
|
1505 | 0 | offer_failed(response, done, _heavy_work_pool); |
1506 | 0 | return; |
1507 | 0 | } |
1508 | 0 | } |
1509 | | |
1510 | | void PInternalService::rollback(google::protobuf::RpcController* controller, |
1511 | | const PRollbackRequest* request, PRollbackResult* response, |
1512 | 0 | google::protobuf::Closure* done) { |
1513 | 0 | bool ret = _heavy_work_pool.try_offer([this, request, response, done]() { |
1514 | 0 | brpc::ClosureGuard closure_guard(done); |
1515 | 0 | TUniqueId load_id; |
1516 | 0 | load_id.hi = request->load_id().hi(); |
1517 | 0 | load_id.lo = request->load_id().lo(); |
1518 | 0 | auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(load_id); |
1519 | 0 | if (stream_load_ctx == nullptr) { Branch (1519:13): [True: 0, False: 0]
|
1520 | 0 | response->mutable_status()->set_status_code(1); |
1521 | 0 | response->mutable_status()->add_error_msgs("could not find stream load context"); |
1522 | 0 | } else { |
1523 | 0 | stream_load_ctx->pipe->cancel("rollback"); |
1524 | 0 | response->mutable_status()->set_status_code(0); |
1525 | 0 | } |
1526 | 0 | }); |
1527 | 0 | if (!ret) { Branch (1527:9): [True: 0, False: 0]
|
1528 | 0 | offer_failed(response, done, _heavy_work_pool); |
1529 | 0 | return; |
1530 | 0 | } |
1531 | 0 | } |
1532 | | |
1533 | | void PInternalService::fold_constant_expr(google::protobuf::RpcController* controller, |
1534 | | const PConstantExprRequest* request, |
1535 | | PConstantExprResult* response, |
1536 | 0 | google::protobuf::Closure* done) { |
1537 | 0 | bool ret = _light_work_pool.try_offer([this, request, response, done]() { |
1538 | 0 | brpc::ClosureGuard closure_guard(done); |
1539 | 0 | TFoldConstantParams t_request; |
1540 | 0 | Status st = Status::OK(); |
1541 | 0 | { |
1542 | 0 | const uint8_t* buf = (const uint8_t*)request->request().data(); |
1543 | 0 | uint32_t len = request->request().size(); |
1544 | 0 | st = deserialize_thrift_msg(buf, &len, false, &t_request); |
1545 | 0 | } |
1546 | 0 | if (!st.ok()) { Branch (1546:13): [True: 0, False: 0]
|
1547 | 0 | LOG(WARNING) << "exec fold constant expr failed, errmsg=" << st |
1548 | 0 | << " .and query_id_is: " << t_request.query_id; |
1549 | 0 | } |
1550 | 0 | st = _fold_constant_expr(request->request(), response); |
1551 | 0 | if (!st.ok()) { Branch (1551:13): [True: 0, False: 0]
|
1552 | 0 | LOG(WARNING) << "exec fold constant expr failed, errmsg=" << st |
1553 | 0 | << " .and query_id_is: " << t_request.query_id; |
1554 | 0 | } |
1555 | 0 | st.to_protobuf(response->mutable_status()); |
1556 | 0 | }); |
1557 | 0 | if (!ret) { Branch (1557:9): [True: 0, False: 0]
|
1558 | 0 | offer_failed(response, done, _light_work_pool); |
1559 | 0 | return; |
1560 | 0 | } |
1561 | 0 | } |
1562 | | |
1563 | | Status PInternalService::_fold_constant_expr(const std::string& ser_request, |
1564 | 0 | PConstantExprResult* response) { |
1565 | 0 | TFoldConstantParams t_request; |
1566 | 0 | { |
1567 | 0 | const uint8_t* buf = (const uint8_t*)ser_request.data(); |
1568 | 0 | uint32_t len = ser_request.size(); |
1569 | 0 | RETURN_IF_ERROR(deserialize_thrift_msg(buf, &len, false, &t_request)); |
1570 | 0 | } |
1571 | 0 | std::unique_ptr<FoldConstantExecutor> fold_executor = std::make_unique<FoldConstantExecutor>(); |
1572 | 0 | RETURN_IF_ERROR_OR_CATCH_EXCEPTION(fold_executor->fold_constant_vexpr(t_request, response)); Line | Count | Source | 96 | 0 | do { \ | 97 | 0 | try { \ | 98 | 0 | doris::enable_thread_catch_bad_alloc++; \ | 99 | 0 | Defer defer {[&]() { doris::enable_thread_catch_bad_alloc--; }}; \ | 100 | 0 | { \ | 101 | 0 | Status _status_ = (stmt); \ | 102 | 0 | if (UNLIKELY(!_status_.ok())) { \ | 103 | 0 | return _status_; \ | 104 | 0 | } \ | 105 | 0 | } \ | 106 | 0 | } catch (const doris::Exception& e) { \ | 107 | 0 | if (e.code() == doris::ErrorCode::MEM_ALLOC_FAILED) { \ Branch (107:17): [True: 0, False: 0]
| 108 | 0 | return Status::MemoryLimitExceeded(fmt::format( \ | 109 | 0 | "PreCatch error code:{}, {}, __FILE__:{}, __LINE__:{}, __FUNCTION__:{}", \ | 110 | 0 | e.code(), e.to_string(), __FILE__, __LINE__, __PRETTY_FUNCTION__)); \ | 111 | 0 | } \ | 112 | 0 | return Status::Error<false>(e.code(), e.to_string()); \ | 113 | 0 | } \ | 114 | 0 | } while (0) Branch (114:14): [Folded - Ignored]
|
|
1573 | 0 | return Status::OK(); |
1574 | 0 | } |
1575 | | |
1576 | | void PInternalService::transmit_block(google::protobuf::RpcController* controller, |
1577 | | const PTransmitDataParams* request, |
1578 | | PTransmitDataResult* response, |
1579 | 0 | google::protobuf::Closure* done) { |
1580 | 0 | int64_t receive_time = GetCurrentTimeNanos(); |
1581 | 0 | if (config::enable_bthread_transmit_block) { Branch (1581:9): [True: 0, False: 0]
|
1582 | 0 | response->set_receive_time(receive_time); |
1583 | | // under high concurrency, thread pool will have a lot of lock contention. |
1584 | | // May offer failed to the thread pool, so that we should avoid using thread |
1585 | | // pool here. |
1586 | 0 | _transmit_block(controller, request, response, done, Status::OK(), 0); |
1587 | 0 | } else { |
1588 | 0 | bool ret = _light_work_pool.try_offer([this, controller, request, response, done, |
1589 | 0 | receive_time]() { |
1590 | 0 | response->set_receive_time(receive_time); |
1591 | | // Sometimes transmit block function is the last owner of PlanFragmentExecutor |
1592 | | // It will release the object. And the object maybe a JNIContext. |
1593 | | // JNIContext will hold some TLS object. It could not work correctly under bthread |
1594 | | // Context. So that put the logic into pthread. |
1595 | | // But this is rarely happens, so this config is disabled by default. |
1596 | 0 | _transmit_block(controller, request, response, done, Status::OK(), |
1597 | 0 | GetCurrentTimeNanos() - receive_time); |
1598 | 0 | }); |
1599 | 0 | if (!ret) { Branch (1599:13): [True: 0, False: 0]
|
1600 | 0 | offer_failed(response, done, _light_work_pool); |
1601 | 0 | return; |
1602 | 0 | } |
1603 | 0 | } |
1604 | 0 | } |
1605 | | |
1606 | | void PInternalService::transmit_block_by_http(google::protobuf::RpcController* controller, |
1607 | | const PEmptyRequest* request, |
1608 | | PTransmitDataResult* response, |
1609 | 0 | google::protobuf::Closure* done) { |
1610 | 0 | int64_t receive_time = GetCurrentTimeNanos(); |
1611 | 0 | bool ret = _heavy_work_pool.try_offer([this, controller, response, done, receive_time]() { |
1612 | 0 | PTransmitDataParams* new_request = new PTransmitDataParams(); |
1613 | 0 | google::protobuf::Closure* new_done = |
1614 | 0 | new NewHttpClosure<PTransmitDataParams>(new_request, done); |
1615 | 0 | brpc::Controller* cntl = static_cast<brpc::Controller*>(controller); |
1616 | 0 | Status st = |
1617 | 0 | attachment_extract_request_contain_block<PTransmitDataParams>(new_request, cntl); |
1618 | 0 | _transmit_block(controller, new_request, response, new_done, st, |
1619 | 0 | GetCurrentTimeNanos() - receive_time); |
1620 | 0 | }); |
1621 | 0 | if (!ret) { Branch (1621:9): [True: 0, False: 0]
|
1622 | 0 | offer_failed(response, done, _heavy_work_pool); |
1623 | 0 | return; |
1624 | 0 | } |
1625 | 0 | } |
1626 | | |
1627 | | void PInternalService::_transmit_block(google::protobuf::RpcController* controller, |
1628 | | const PTransmitDataParams* request, |
1629 | | PTransmitDataResult* response, |
1630 | | google::protobuf::Closure* done, const Status& extract_st, |
1631 | 0 | const int64_t wait_for_worker) { |
1632 | 0 | if (request->has_query_id()) { Branch (1632:9): [True: 0, False: 0]
|
1633 | 0 | VLOG_ROW << "transmit block: fragment_instance_id=" << print_id(request->finst_id()) Line | Count | Source | 38 | 0 | #define VLOG_ROW VLOG(10) |
|
1634 | 0 | << " query_id=" << print_id(request->query_id()) << " node=" << request->node_id(); |
1635 | 0 | } |
1636 | | |
1637 | | // The response is accessed when done->Run is called in transmit_block(), |
1638 | | // give response a default value to avoid null pointers in high concurrency. |
1639 | 0 | Status st; |
1640 | 0 | if (extract_st.ok()) { Branch (1640:9): [True: 0, False: 0]
|
1641 | 0 | st = _exec_env->vstream_mgr()->transmit_block(request, &done, wait_for_worker); |
1642 | 0 | if (!st.ok() && !st.is<END_OF_FILE>()) { Branch (1642:13): [True: 0, False: 0]
Branch (1642:25): [True: 0, False: 0]
|
1643 | 0 | LOG(WARNING) << "transmit_block failed, message=" << st |
1644 | 0 | << ", fragment_instance_id=" << print_id(request->finst_id()) |
1645 | 0 | << ", node=" << request->node_id() |
1646 | 0 | << ", from sender_id: " << request->sender_id() |
1647 | 0 | << ", be_number: " << request->be_number() |
1648 | 0 | << ", packet_seq: " << request->packet_seq(); |
1649 | 0 | } |
1650 | 0 | } else { |
1651 | 0 | st = extract_st; |
1652 | 0 | } |
1653 | 0 | if (done != nullptr) { Branch (1653:9): [True: 0, False: 0]
|
1654 | 0 | st.to_protobuf(response->mutable_status()); |
1655 | 0 | done->Run(); |
1656 | 0 | } |
1657 | 0 | } |
1658 | | |
1659 | | void PInternalService::check_rpc_channel(google::protobuf::RpcController* controller, |
1660 | | const PCheckRPCChannelRequest* request, |
1661 | | PCheckRPCChannelResponse* response, |
1662 | 0 | google::protobuf::Closure* done) { |
1663 | 0 | bool ret = _light_work_pool.try_offer([request, response, done]() { |
1664 | 0 | brpc::ClosureGuard closure_guard(done); |
1665 | 0 | response->mutable_status()->set_status_code(0); |
1666 | 0 | if (request->data().size() != request->size()) { Branch (1666:13): [True: 0, False: 0]
|
1667 | 0 | std::stringstream ss; |
1668 | 0 | ss << "data size not same, expected: " << request->size() |
1669 | 0 | << ", actual: " << request->data().size(); |
1670 | 0 | response->mutable_status()->add_error_msgs(ss.str()); |
1671 | 0 | response->mutable_status()->set_status_code(1); |
1672 | |
|
1673 | 0 | } else { |
1674 | 0 | Md5Digest digest; |
1675 | 0 | digest.update(static_cast<const void*>(request->data().c_str()), |
1676 | 0 | request->data().size()); |
1677 | 0 | digest.digest(); |
1678 | 0 | if (!iequal(digest.hex(), request->md5())) { Branch (1678:17): [True: 0, False: 0]
|
1679 | 0 | std::stringstream ss; |
1680 | 0 | ss << "md5 not same, expected: " << request->md5() << ", actual: " << digest.hex(); |
1681 | 0 | response->mutable_status()->add_error_msgs(ss.str()); |
1682 | 0 | response->mutable_status()->set_status_code(1); |
1683 | 0 | } |
1684 | 0 | } |
1685 | 0 | }); |
1686 | 0 | if (!ret) { Branch (1686:9): [True: 0, False: 0]
|
1687 | 0 | offer_failed(response, done, _light_work_pool); |
1688 | 0 | return; |
1689 | 0 | } |
1690 | 0 | } |
1691 | | |
1692 | | void PInternalService::reset_rpc_channel(google::protobuf::RpcController* controller, |
1693 | | const PResetRPCChannelRequest* request, |
1694 | | PResetRPCChannelResponse* response, |
1695 | 0 | google::protobuf::Closure* done) { |
1696 | 0 | bool ret = _light_work_pool.try_offer([request, response, done]() { |
1697 | 0 | brpc::ClosureGuard closure_guard(done); |
1698 | 0 | response->mutable_status()->set_status_code(0); |
1699 | 0 | if (request->all()) { Branch (1699:13): [True: 0, False: 0]
|
1700 | 0 | int size = ExecEnv::GetInstance()->brpc_internal_client_cache()->size(); |
1701 | 0 | if (size > 0) { Branch (1701:17): [True: 0, False: 0]
|
1702 | 0 | std::vector<std::string> endpoints; |
1703 | 0 | ExecEnv::GetInstance()->brpc_internal_client_cache()->get_all(&endpoints); |
1704 | 0 | ExecEnv::GetInstance()->brpc_internal_client_cache()->clear(); |
1705 | 0 | *response->mutable_channels() = {endpoints.begin(), endpoints.end()}; |
1706 | 0 | } |
1707 | 0 | } else { |
1708 | 0 | for (const std::string& endpoint : request->endpoints()) { Branch (1708:46): [True: 0, False: 0]
|
1709 | 0 | if (!ExecEnv::GetInstance()->brpc_internal_client_cache()->exist(endpoint)) { Branch (1709:21): [True: 0, False: 0]
|
1710 | 0 | response->mutable_status()->add_error_msgs(endpoint + ": not found."); |
1711 | 0 | continue; |
1712 | 0 | } |
1713 | | |
1714 | 0 | if (ExecEnv::GetInstance()->brpc_internal_client_cache()->erase(endpoint)) { Branch (1714:21): [True: 0, False: 0]
|
1715 | 0 | response->add_channels(endpoint); |
1716 | 0 | } else { |
1717 | 0 | response->mutable_status()->add_error_msgs(endpoint + ": reset failed."); |
1718 | 0 | } |
1719 | 0 | } |
1720 | 0 | if (request->endpoints_size() != response->channels_size()) { Branch (1720:17): [True: 0, False: 0]
|
1721 | 0 | response->mutable_status()->set_status_code(1); |
1722 | 0 | } |
1723 | 0 | } |
1724 | 0 | }); |
1725 | 0 | if (!ret) { Branch (1725:9): [True: 0, False: 0]
|
1726 | 0 | offer_failed(response, done, _light_work_pool); |
1727 | 0 | return; |
1728 | 0 | } |
1729 | 0 | } |
1730 | | |
1731 | | void PInternalService::hand_shake(google::protobuf::RpcController* controller, |
1732 | | const PHandShakeRequest* request, PHandShakeResponse* response, |
1733 | 0 | google::protobuf::Closure* done) { |
1734 | | // The light pool may be full. Handshake is used to check the connection state of brpc. |
1735 | | // Should not be interfered by the thread pool logic. |
1736 | 0 | brpc::ClosureGuard closure_guard(done); |
1737 | 0 | if (request->has_hello()) { Branch (1737:9): [True: 0, False: 0]
|
1738 | 0 | response->set_hello(request->hello()); |
1739 | 0 | } |
1740 | 0 | response->mutable_status()->set_status_code(0); |
1741 | 0 | } |
1742 | | |
1743 | | constexpr char HttpProtocol[] = "http://"; |
1744 | | constexpr char DownloadApiPath[] = "/api/_tablet/_download?token="; |
1745 | | constexpr char FileParam[] = "&file="; |
1746 | | |
1747 | | static std::string construct_url(const std::string& host_port, const std::string& token, |
1748 | 0 | const std::string& path) { |
1749 | 0 | return fmt::format("{}{}{}{}{}{}", HttpProtocol, host_port, DownloadApiPath, token, FileParam, |
1750 | 0 | path); |
1751 | 0 | } |
1752 | | |
1753 | | static Status download_file_action(std::string& remote_file_url, std::string& local_file_path, |
1754 | 0 | uint64_t estimate_timeout, uint64_t file_size) { |
1755 | 0 | auto download_cb = [remote_file_url, estimate_timeout, local_file_path, |
1756 | 0 | file_size](HttpClient* client) { |
1757 | 0 | RETURN_IF_ERROR(client->init(remote_file_url)); |
1758 | 0 | client->set_timeout_ms(estimate_timeout * 1000); |
1759 | 0 | RETURN_IF_ERROR(client->download(local_file_path)); |
1760 | | |
1761 | 0 | if (file_size > 0) { Branch (1761:13): [True: 0, False: 0]
|
1762 | | // Check file length |
1763 | 0 | uint64_t local_file_size = std::filesystem::file_size(local_file_path); |
1764 | 0 | if (local_file_size != file_size) { Branch (1764:17): [True: 0, False: 0]
|
1765 | 0 | LOG(WARNING) << "failed to pull rowset for slave replica. download file " |
1766 | 0 | "length error" |
1767 | 0 | << ", remote_path=" << remote_file_url << ", file_size=" << file_size |
1768 | 0 | << ", local_file_size=" << local_file_size; |
1769 | 0 | return Status::InternalError("downloaded file size is not equal"); |
1770 | 0 | } |
1771 | 0 | } |
1772 | | |
1773 | 0 | return io::global_local_filesystem()->permission(local_file_path, |
1774 | 0 | io::LocalFileSystem::PERMS_OWNER_RW); |
1775 | 0 | }; |
1776 | 0 | return HttpClient::execute_with_retry(DOWNLOAD_FILE_MAX_RETRY, 1, download_cb); |
1777 | 0 | } |
1778 | | |
1779 | | void PInternalServiceImpl::request_slave_tablet_pull_rowset( |
1780 | | google::protobuf::RpcController* controller, const PTabletWriteSlaveRequest* request, |
1781 | 0 | PTabletWriteSlaveResult* response, google::protobuf::Closure* done) { |
1782 | 0 | brpc::ClosureGuard closure_guard(done); |
1783 | 0 | const RowsetMetaPB& rowset_meta_pb = request->rowset_meta(); |
1784 | 0 | const std::string& rowset_path = request->rowset_path(); |
1785 | 0 | google::protobuf::Map<int64, int64> segments_size = request->segments_size(); |
1786 | 0 | google::protobuf::Map<int64, PTabletWriteSlaveRequest_IndexSizeMap> indices_size = |
1787 | 0 | request->inverted_indices_size(); |
1788 | 0 | std::string host = request->host(); |
1789 | 0 | int64_t http_port = request->http_port(); |
1790 | 0 | int64_t brpc_port = request->brpc_port(); |
1791 | 0 | std::string token = request->token(); |
1792 | 0 | int64_t node_id = request->node_id(); |
1793 | 0 | bool ret = _heavy_work_pool.try_offer([rowset_meta_pb, host, brpc_port, node_id, segments_size, |
1794 | 0 | indices_size, http_port, token, rowset_path, this]() { |
1795 | 0 | TabletSharedPtr tablet = _engine.tablet_manager()->get_tablet( |
1796 | 0 | rowset_meta_pb.tablet_id(), rowset_meta_pb.tablet_schema_hash()); |
1797 | 0 | if (tablet == nullptr) { Branch (1797:13): [True: 0, False: 0]
|
1798 | 0 | LOG(WARNING) << "failed to pull rowset for slave replica. tablet [" |
1799 | 0 | << rowset_meta_pb.tablet_id() |
1800 | 0 | << "] is not exist. txn_id=" << rowset_meta_pb.txn_id(); |
1801 | 0 | _response_pull_slave_rowset(host, brpc_port, rowset_meta_pb.txn_id(), |
1802 | 0 | rowset_meta_pb.tablet_id(), node_id, false); |
1803 | 0 | return; |
1804 | 0 | } |
1805 | | |
1806 | 0 | RowsetMetaSharedPtr rowset_meta(new RowsetMeta()); |
1807 | 0 | std::string rowset_meta_str; |
1808 | 0 | bool ret = rowset_meta_pb.SerializeToString(&rowset_meta_str); |
1809 | 0 | if (!ret) { Branch (1809:13): [True: 0, False: 0]
|
1810 | 0 | LOG(WARNING) << "failed to pull rowset for slave replica. serialize rowset meta " |
1811 | 0 | "failed. rowset_id=" |
1812 | 0 | << rowset_meta_pb.rowset_id() |
1813 | 0 | << ", tablet_id=" << rowset_meta_pb.tablet_id() |
1814 | 0 | << ", txn_id=" << rowset_meta_pb.txn_id(); |
1815 | 0 | _response_pull_slave_rowset(host, brpc_port, rowset_meta_pb.txn_id(), |
1816 | 0 | rowset_meta_pb.tablet_id(), node_id, false); |
1817 | 0 | return; |
1818 | 0 | } |
1819 | 0 | bool parsed = rowset_meta->init(rowset_meta_str); |
1820 | 0 | if (!parsed) { Branch (1820:13): [True: 0, False: 0]
|
1821 | 0 | LOG(WARNING) << "failed to pull rowset for slave replica. parse rowset meta string " |
1822 | 0 | "failed. rowset_id=" |
1823 | 0 | << rowset_meta_pb.rowset_id() |
1824 | 0 | << ", tablet_id=" << rowset_meta_pb.tablet_id() |
1825 | 0 | << ", txn_id=" << rowset_meta_pb.txn_id(); |
1826 | | // return false will break meta iterator, return true to skip this error |
1827 | 0 | _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(), |
1828 | 0 | rowset_meta->tablet_id(), node_id, false); |
1829 | 0 | return; |
1830 | 0 | } |
1831 | 0 | RowsetId remote_rowset_id = rowset_meta->rowset_id(); |
1832 | | // change rowset id because it maybe same as other local rowset |
1833 | 0 | RowsetId new_rowset_id = _engine.next_rowset_id(); |
1834 | 0 | auto pending_rs_guard = _engine.pending_local_rowsets().add(new_rowset_id); |
1835 | 0 | rowset_meta->set_rowset_id(new_rowset_id); |
1836 | 0 | rowset_meta->set_tablet_uid(tablet->tablet_uid()); |
1837 | 0 | VLOG_CRITICAL << "succeed to init rowset meta for slave replica. rowset_id=" Line | Count | Source | 43 | 0 | #define VLOG_CRITICAL VLOG(1) |
|
1838 | 0 | << rowset_meta->rowset_id() << ", tablet_id=" << rowset_meta->tablet_id() |
1839 | 0 | << ", txn_id=" << rowset_meta->txn_id(); |
1840 | |
|
1841 | 0 | auto tablet_scheme = rowset_meta->tablet_schema(); |
1842 | 0 | for (const auto& segment : segments_size) { Branch (1842:34): [True: 0, False: 0]
|
1843 | 0 | uint64_t file_size = segment.second; |
1844 | 0 | uint64_t estimate_timeout = file_size / config::download_low_speed_limit_kbps / 1024; |
1845 | 0 | if (estimate_timeout < config::download_low_speed_time) { Branch (1845:17): [True: 0, False: 0]
|
1846 | 0 | estimate_timeout = config::download_low_speed_time; |
1847 | 0 | } |
1848 | |
|
1849 | 0 | std::string remote_file_path = |
1850 | 0 | local_segment_path(rowset_path, remote_rowset_id.to_string(), segment.first); |
1851 | 0 | std::string remote_file_url = |
1852 | 0 | construct_url(get_host_port(host, http_port), token, remote_file_path); |
1853 | |
|
1854 | 0 | std::string local_file_path = local_segment_path( |
1855 | 0 | tablet->tablet_path(), rowset_meta->rowset_id().to_string(), segment.first); |
1856 | |
|
1857 | 0 | auto st = download_file_action(remote_file_url, local_file_path, estimate_timeout, |
1858 | 0 | file_size); |
1859 | 0 | if (!st.ok()) { Branch (1859:17): [True: 0, False: 0]
|
1860 | 0 | LOG(WARNING) << "failed to pull rowset for slave replica. failed to download " |
1861 | 0 | "file. url=" |
1862 | 0 | << remote_file_url << ", local_path=" << local_file_path |
1863 | 0 | << ", txn_id=" << rowset_meta->txn_id(); |
1864 | 0 | _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(), |
1865 | 0 | rowset_meta->tablet_id(), node_id, false); |
1866 | 0 | return; |
1867 | 0 | } |
1868 | 0 | VLOG_CRITICAL << "succeed to download file for slave replica. url=" << remote_file_url Line | Count | Source | 43 | 0 | #define VLOG_CRITICAL VLOG(1) |
|
1869 | 0 | << ", local_path=" << local_file_path |
1870 | 0 | << ", txn_id=" << rowset_meta->txn_id(); |
1871 | 0 | if (indices_size.find(segment.first) != indices_size.end()) { Branch (1871:17): [True: 0, False: 0]
|
1872 | 0 | PTabletWriteSlaveRequest_IndexSizeMap segment_indices_size = |
1873 | 0 | indices_size.at(segment.first); |
1874 | |
|
1875 | 0 | for (auto index_size : segment_indices_size.index_sizes()) { Branch (1875:38): [True: 0, False: 0]
|
1876 | 0 | auto index_id = index_size.indexid(); |
1877 | 0 | auto size = index_size.size(); |
1878 | 0 | auto suffix_path = index_size.suffix_path(); |
1879 | 0 | std::string remote_inverted_index_file; |
1880 | 0 | std::string local_inverted_index_file; |
1881 | 0 | std::string remote_inverted_index_file_url; |
1882 | 0 | if (tablet_scheme->get_inverted_index_storage_format() == Branch (1882:25): [True: 0, False: 0]
|
1883 | 0 | InvertedIndexStorageFormatPB::V1) { |
1884 | 0 | remote_inverted_index_file = |
1885 | 0 | InvertedIndexDescriptor::get_index_file_path_v1( |
1886 | 0 | InvertedIndexDescriptor::get_index_file_path_prefix( |
1887 | 0 | remote_file_path), |
1888 | 0 | index_id, suffix_path); |
1889 | 0 | remote_inverted_index_file_url = construct_url( |
1890 | 0 | get_host_port(host, http_port), token, remote_inverted_index_file); |
1891 | |
|
1892 | 0 | local_inverted_index_file = InvertedIndexDescriptor::get_index_file_path_v1( |
1893 | 0 | InvertedIndexDescriptor::get_index_file_path_prefix( |
1894 | 0 | local_file_path), |
1895 | 0 | index_id, suffix_path); |
1896 | 0 | } else { |
1897 | 0 | remote_inverted_index_file = |
1898 | 0 | InvertedIndexDescriptor::get_index_file_path_v2( |
1899 | 0 | InvertedIndexDescriptor::get_index_file_path_prefix( |
1900 | 0 | remote_file_path)); |
1901 | 0 | remote_inverted_index_file_url = construct_url( |
1902 | 0 | get_host_port(host, http_port), token, remote_inverted_index_file); |
1903 | |
|
1904 | 0 | local_inverted_index_file = InvertedIndexDescriptor::get_index_file_path_v2( |
1905 | 0 | InvertedIndexDescriptor::get_index_file_path_prefix( |
1906 | 0 | local_file_path)); |
1907 | 0 | } |
1908 | 0 | st = download_file_action(remote_inverted_index_file_url, |
1909 | 0 | local_inverted_index_file, estimate_timeout, size); |
1910 | 0 | if (!st.ok()) { Branch (1910:25): [True: 0, False: 0]
|
1911 | 0 | LOG(WARNING) << "failed to pull rowset for slave replica. failed to " |
1912 | 0 | "download " |
1913 | 0 | "file. url=" |
1914 | 0 | << remote_inverted_index_file_url |
1915 | 0 | << ", local_path=" << local_inverted_index_file |
1916 | 0 | << ", txn_id=" << rowset_meta->txn_id(); |
1917 | 0 | _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(), |
1918 | 0 | rowset_meta->tablet_id(), node_id, false); |
1919 | 0 | return; |
1920 | 0 | } |
1921 | | |
1922 | 0 | VLOG_CRITICAL Line | Count | Source | 43 | 0 | #define VLOG_CRITICAL VLOG(1) |
|
1923 | 0 | << "succeed to download inverted index file for slave replica. url=" |
1924 | 0 | << remote_inverted_index_file_url |
1925 | 0 | << ", local_path=" << local_inverted_index_file |
1926 | 0 | << ", txn_id=" << rowset_meta->txn_id(); |
1927 | 0 | } |
1928 | 0 | } |
1929 | 0 | } |
1930 | | |
1931 | 0 | RowsetSharedPtr rowset; |
1932 | 0 | Status create_status = RowsetFactory::create_rowset( |
1933 | 0 | tablet->tablet_schema(), tablet->tablet_path(), rowset_meta, &rowset); |
1934 | 0 | if (!create_status) { Branch (1934:13): [True: 0, False: 0]
|
1935 | 0 | LOG(WARNING) << "failed to create rowset from rowset meta for slave replica" |
1936 | 0 | << ". rowset_id: " << rowset_meta->rowset_id() |
1937 | 0 | << ", rowset_type: " << rowset_meta->rowset_type() |
1938 | 0 | << ", rowset_state: " << rowset_meta->rowset_state() |
1939 | 0 | << ", tablet_id=" << rowset_meta->tablet_id() |
1940 | 0 | << ", txn_id=" << rowset_meta->txn_id(); |
1941 | 0 | _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(), |
1942 | 0 | rowset_meta->tablet_id(), node_id, false); |
1943 | 0 | return; |
1944 | 0 | } |
1945 | 0 | if (rowset_meta->rowset_state() != RowsetStatePB::COMMITTED) { Branch (1945:13): [True: 0, False: 0]
|
1946 | 0 | LOG(WARNING) << "could not commit txn for slave replica because master rowset state is " |
1947 | 0 | "not committed, rowset_state=" |
1948 | 0 | << rowset_meta->rowset_state() |
1949 | 0 | << ", tablet_id=" << rowset_meta->tablet_id() |
1950 | 0 | << ", txn_id=" << rowset_meta->txn_id(); |
1951 | 0 | _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(), |
1952 | 0 | rowset_meta->tablet_id(), node_id, false); |
1953 | 0 | return; |
1954 | 0 | } |
1955 | 0 | Status commit_txn_status = _engine.txn_manager()->commit_txn( |
1956 | 0 | tablet->data_dir()->get_meta(), rowset_meta->partition_id(), rowset_meta->txn_id(), |
1957 | 0 | rowset_meta->tablet_id(), tablet->tablet_uid(), rowset_meta->load_id(), rowset, |
1958 | 0 | std::move(pending_rs_guard), false); |
1959 | 0 | if (!commit_txn_status && !commit_txn_status.is<PUSH_TRANSACTION_ALREADY_EXIST>()) { Branch (1959:13): [True: 0, False: 0]
Branch (1959:35): [True: 0, False: 0]
|
1960 | 0 | LOG(WARNING) << "failed to add committed rowset for slave replica. rowset_id=" |
1961 | 0 | << rowset_meta->rowset_id() << ", tablet_id=" << rowset_meta->tablet_id() |
1962 | 0 | << ", txn_id=" << rowset_meta->txn_id(); |
1963 | 0 | _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(), |
1964 | 0 | rowset_meta->tablet_id(), node_id, false); |
1965 | 0 | return; |
1966 | 0 | } |
1967 | 0 | VLOG_CRITICAL << "succeed to pull rowset for slave replica. successfully to add committed " Line | Count | Source | 43 | 0 | #define VLOG_CRITICAL VLOG(1) |
|
1968 | 0 | "rowset: " |
1969 | 0 | << rowset_meta->rowset_id() |
1970 | 0 | << " to tablet, tablet_id=" << rowset_meta->tablet_id() |
1971 | 0 | << ", schema_hash=" << rowset_meta->tablet_schema_hash() |
1972 | 0 | << ", txn_id=" << rowset_meta->txn_id(); |
1973 | 0 | _response_pull_slave_rowset(host, brpc_port, rowset_meta->txn_id(), |
1974 | 0 | rowset_meta->tablet_id(), node_id, true); |
1975 | 0 | }); |
1976 | 0 | if (!ret) { Branch (1976:9): [True: 0, False: 0]
|
1977 | 0 | offer_failed(response, closure_guard.release(), _heavy_work_pool); |
1978 | 0 | return; |
1979 | 0 | } |
1980 | 0 | Status::OK().to_protobuf(response->mutable_status()); |
1981 | 0 | } |
1982 | | |
1983 | | void PInternalServiceImpl::_response_pull_slave_rowset(const std::string& remote_host, |
1984 | | int64_t brpc_port, int64_t txn_id, |
1985 | | int64_t tablet_id, int64_t node_id, |
1986 | 0 | bool is_succeed) { |
1987 | 0 | std::shared_ptr<PBackendService_Stub> stub = |
1988 | 0 | ExecEnv::GetInstance()->brpc_internal_client_cache()->get_client(remote_host, |
1989 | 0 | brpc_port); |
1990 | 0 | if (stub == nullptr) { Branch (1990:9): [True: 0, False: 0]
|
1991 | 0 | LOG(WARNING) << "failed to response result of slave replica to master replica. get rpc " |
1992 | 0 | "stub failed, master host=" |
1993 | 0 | << remote_host << ", port=" << brpc_port << ", tablet_id=" << tablet_id |
1994 | 0 | << ", txn_id=" << txn_id; |
1995 | 0 | return; |
1996 | 0 | } |
1997 | | |
1998 | 0 | auto request = std::make_shared<PTabletWriteSlaveDoneRequest>(); |
1999 | 0 | request->set_txn_id(txn_id); |
2000 | 0 | request->set_tablet_id(tablet_id); |
2001 | 0 | request->set_node_id(node_id); |
2002 | 0 | request->set_is_succeed(is_succeed); |
2003 | 0 | auto pull_rowset_callback = DummyBrpcCallback<PTabletWriteSlaveDoneResult>::create_shared(); |
2004 | 0 | auto closure = AutoReleaseClosure< |
2005 | 0 | PTabletWriteSlaveDoneRequest, |
2006 | 0 | DummyBrpcCallback<PTabletWriteSlaveDoneResult>>::create_unique(request, |
2007 | 0 | pull_rowset_callback); |
2008 | 0 | closure->cntl_->set_timeout_ms(config::slave_replica_writer_rpc_timeout_sec * 1000); |
2009 | 0 | closure->cntl_->ignore_eovercrowded(); |
2010 | 0 | stub->response_slave_tablet_pull_rowset(closure->cntl_.get(), closure->request_.get(), |
2011 | 0 | closure->response_.get(), closure.get()); |
2012 | 0 | closure.release(); |
2013 | |
|
2014 | 0 | pull_rowset_callback->join(); |
2015 | 0 | if (pull_rowset_callback->cntl_->Failed()) { Branch (2015:9): [True: 0, False: 0]
|
2016 | 0 | LOG(WARNING) << "failed to response result of slave replica to master replica, error=" |
2017 | 0 | << berror(pull_rowset_callback->cntl_->ErrorCode()) |
2018 | 0 | << ", error_text=" << pull_rowset_callback->cntl_->ErrorText() |
2019 | 0 | << ", master host: " << remote_host << ", tablet_id=" << tablet_id |
2020 | 0 | << ", txn_id=" << txn_id; |
2021 | 0 | } |
2022 | 0 | VLOG_CRITICAL << "succeed to response the result of slave replica pull rowset to master " Line | Count | Source | 43 | 0 | #define VLOG_CRITICAL VLOG(1) |
|
2023 | 0 | "replica. master host: " |
2024 | 0 | << remote_host << ". is_succeed=" << is_succeed << ", tablet_id=" << tablet_id |
2025 | 0 | << ", slave server=" << node_id << ", txn_id=" << txn_id; |
2026 | 0 | } |
2027 | | |
2028 | | void PInternalServiceImpl::response_slave_tablet_pull_rowset( |
2029 | | google::protobuf::RpcController* controller, const PTabletWriteSlaveDoneRequest* request, |
2030 | 0 | PTabletWriteSlaveDoneResult* response, google::protobuf::Closure* done) { |
2031 | 0 | bool ret = _heavy_work_pool.try_offer([txn_mgr = _engine.txn_manager(), request, response, |
2032 | 0 | done]() { |
2033 | 0 | brpc::ClosureGuard closure_guard(done); |
2034 | 0 | VLOG_CRITICAL << "receive the result of slave replica pull rowset from slave replica. " Line | Count | Source | 43 | 0 | #define VLOG_CRITICAL VLOG(1) |
|
2035 | 0 | "slave server=" |
2036 | 0 | << request->node_id() << ", is_succeed=" << request->is_succeed() |
2037 | 0 | << ", tablet_id=" << request->tablet_id() << ", txn_id=" << request->txn_id(); |
2038 | 0 | txn_mgr->finish_slave_tablet_pull_rowset(request->txn_id(), request->tablet_id(), |
2039 | 0 | request->node_id(), request->is_succeed()); |
2040 | 0 | Status::OK().to_protobuf(response->mutable_status()); |
2041 | 0 | }); |
2042 | 0 | if (!ret) { Branch (2042:9): [True: 0, False: 0]
|
2043 | 0 | offer_failed(response, done, _heavy_work_pool); |
2044 | 0 | return; |
2045 | 0 | } |
2046 | 0 | } |
2047 | | |
2048 | | void PInternalService::multiget_data(google::protobuf::RpcController* controller, |
2049 | | const PMultiGetRequest* request, PMultiGetResponse* response, |
2050 | 0 | google::protobuf::Closure* done) { |
2051 | 0 | bool ret = _heavy_work_pool.try_offer([request, response, done]() { |
2052 | 0 | signal::set_signal_task_id(request->query_id()); |
2053 | | // multi get data by rowid |
2054 | 0 | MonotonicStopWatch watch; |
2055 | 0 | watch.start(); |
2056 | 0 | brpc::ClosureGuard closure_guard(done); |
2057 | 0 | response->mutable_status()->set_status_code(0); |
2058 | 0 | SCOPED_ATTACH_TASK(ExecEnv::GetInstance()->rowid_storage_reader_tracker()); Line | Count | Source | 74 | 0 | auto VARNAME_LINENUM(scoped_tls_at) = doris::ScopedInitThreadContext() |
|
2059 | 0 | Status st = RowIdStorageReader::read_by_rowids(*request, response); |
2060 | 0 | st.to_protobuf(response->mutable_status()); |
2061 | 0 | LOG(INFO) << "multiget_data finished, cost(us):" << watch.elapsed_time() / 1000; |
2062 | 0 | }); |
2063 | 0 | if (!ret) { Branch (2063:9): [True: 0, False: 0]
|
2064 | 0 | offer_failed(response, done, _heavy_work_pool); |
2065 | 0 | return; |
2066 | 0 | } |
2067 | 0 | } |
2068 | | |
2069 | | void PInternalServiceImpl::get_tablet_rowset_versions(google::protobuf::RpcController* cntl_base, |
2070 | | const PGetTabletVersionsRequest* request, |
2071 | | PGetTabletVersionsResponse* response, |
2072 | 0 | google::protobuf::Closure* done) { |
2073 | 0 | brpc::ClosureGuard closure_guard(done); |
2074 | 0 | VLOG_DEBUG << "receive get tablet versions request: " << request->DebugString(); Line | Count | Source | 41 | 0 | #define VLOG_DEBUG VLOG(7) |
|
2075 | 0 | _engine.get_tablet_rowset_versions(request, response); |
2076 | 0 | } |
2077 | | |
2078 | | void PInternalService::glob(google::protobuf::RpcController* controller, |
2079 | | const PGlobRequest* request, PGlobResponse* response, |
2080 | 0 | google::protobuf::Closure* done) { |
2081 | 0 | bool ret = _heavy_work_pool.try_offer([request, response, done]() { |
2082 | 0 | brpc::ClosureGuard closure_guard(done); |
2083 | 0 | std::vector<io::FileInfo> files; |
2084 | 0 | Status st = io::global_local_filesystem()->safe_glob(request->pattern(), &files); |
2085 | 0 | if (st.ok()) { Branch (2085:13): [True: 0, False: 0]
|
2086 | 0 | for (auto& file : files) { Branch (2086:29): [True: 0, False: 0]
|
2087 | 0 | PGlobResponse_PFileInfo* pfile = response->add_files(); |
2088 | 0 | pfile->set_file(file.file_name); |
2089 | 0 | pfile->set_size(file.file_size); |
2090 | 0 | } |
2091 | 0 | } |
2092 | 0 | st.to_protobuf(response->mutable_status()); |
2093 | 0 | }); |
2094 | 0 | if (!ret) { Branch (2094:9): [True: 0, False: 0]
|
2095 | 0 | offer_failed(response, done, _heavy_work_pool); |
2096 | 0 | return; |
2097 | 0 | } |
2098 | 0 | } |
2099 | | |
2100 | | void PInternalService::group_commit_insert(google::protobuf::RpcController* controller, |
2101 | | const PGroupCommitInsertRequest* request, |
2102 | | PGroupCommitInsertResponse* response, |
2103 | 0 | google::protobuf::Closure* done) { |
2104 | 0 | TUniqueId load_id; |
2105 | 0 | load_id.__set_hi(request->load_id().hi()); |
2106 | 0 | load_id.__set_lo(request->load_id().lo()); |
2107 | 0 | std::shared_ptr<std::mutex> lock = std::make_shared<std::mutex>(); |
2108 | 0 | std::shared_ptr<bool> is_done = std::make_shared<bool>(false); |
2109 | 0 | bool ret = _heavy_work_pool.try_offer([this, request, response, done, load_id, lock, |
2110 | 0 | is_done]() { |
2111 | 0 | brpc::ClosureGuard closure_guard(done); |
2112 | 0 | std::shared_ptr<StreamLoadContext> ctx = std::make_shared<StreamLoadContext>(_exec_env); |
2113 | 0 | auto pipe = std::make_shared<io::StreamLoadPipe>( |
2114 | 0 | io::kMaxPipeBufferedBytes /* max_buffered_bytes */, 64 * 1024 /* min_chunk_size */, |
2115 | 0 | -1 /* total_length */, true /* use_proto */); |
2116 | 0 | ctx->pipe = pipe; |
2117 | 0 | Status st = _exec_env->new_load_stream_mgr()->put(load_id, ctx); |
2118 | 0 | if (st.ok()) { Branch (2118:13): [True: 0, False: 0]
|
2119 | 0 | try { |
2120 | 0 | st = _exec_plan_fragment_impl( |
2121 | 0 | request->exec_plan_fragment_request().request(), |
2122 | 0 | request->exec_plan_fragment_request().version(), |
2123 | 0 | request->exec_plan_fragment_request().compact(), |
2124 | 0 | [&, response, done, load_id, lock, is_done](RuntimeState* state, |
2125 | 0 | Status* status) { |
2126 | 0 | std::lock_guard<std::mutex> lock1(*lock); |
2127 | 0 | if (*is_done) { Branch (2127:33): [True: 0, False: 0]
|
2128 | 0 | return; |
2129 | 0 | } |
2130 | 0 | *is_done = true; |
2131 | 0 | brpc::ClosureGuard cb_closure_guard(done); |
2132 | 0 | response->set_label(state->import_label()); |
2133 | 0 | response->set_txn_id(state->wal_id()); |
2134 | 0 | response->set_loaded_rows(state->num_rows_load_success()); |
2135 | 0 | response->set_filtered_rows(state->num_rows_load_filtered()); |
2136 | 0 | status->to_protobuf(response->mutable_status()); |
2137 | 0 | if (!state->get_error_log_file_path().empty()) { Branch (2137:33): [True: 0, False: 0]
|
2138 | 0 | response->set_error_url( |
2139 | 0 | to_load_error_http_path(state->get_error_log_file_path())); |
2140 | 0 | } |
2141 | 0 | _exec_env->new_load_stream_mgr()->remove(load_id); |
2142 | 0 | }); |
2143 | 0 | } catch (const Exception& e) { |
2144 | 0 | st = e.to_status(); |
2145 | 0 | } catch (...) { |
2146 | 0 | st = Status::Error(ErrorCode::INTERNAL_ERROR, |
2147 | 0 | "_exec_plan_fragment_impl meet unknown error"); |
2148 | 0 | } |
2149 | 0 | if (!st.ok()) { Branch (2149:17): [True: 0, False: 0]
|
2150 | 0 | LOG(WARNING) << "exec plan fragment failed, load_id=" << print_id(load_id) |
2151 | 0 | << ", errmsg=" << st; |
2152 | 0 | std::lock_guard<std::mutex> lock1(*lock); |
2153 | 0 | if (*is_done) { Branch (2153:21): [True: 0, False: 0]
|
2154 | 0 | closure_guard.release(); |
2155 | 0 | } else { |
2156 | 0 | *is_done = true; |
2157 | 0 | st.to_protobuf(response->mutable_status()); |
2158 | 0 | _exec_env->new_load_stream_mgr()->remove(load_id); |
2159 | 0 | } |
2160 | 0 | } else { |
2161 | 0 | closure_guard.release(); |
2162 | 0 | for (int i = 0; i < request->data().size(); ++i) { Branch (2162:33): [True: 0, False: 0]
|
2163 | 0 | std::unique_ptr<PDataRow> row(new PDataRow()); |
2164 | 0 | row->CopyFrom(request->data(i)); |
2165 | 0 | st = pipe->append(std::move(row)); |
2166 | 0 | if (!st.ok()) { Branch (2166:25): [True: 0, False: 0]
|
2167 | 0 | break; |
2168 | 0 | } |
2169 | 0 | } |
2170 | 0 | if (st.ok()) { Branch (2170:21): [True: 0, False: 0]
|
2171 | 0 | static_cast<void>(pipe->finish()); |
2172 | 0 | } |
2173 | 0 | } |
2174 | 0 | } |
2175 | 0 | }); |
2176 | 0 | if (!ret) { Branch (2176:9): [True: 0, False: 0]
|
2177 | 0 | _exec_env->new_load_stream_mgr()->remove(load_id); |
2178 | 0 | offer_failed(response, done, _heavy_work_pool); |
2179 | 0 | return; |
2180 | 0 | } |
2181 | 0 | }; |
2182 | | |
2183 | | void PInternalService::get_wal_queue_size(google::protobuf::RpcController* controller, |
2184 | | const PGetWalQueueSizeRequest* request, |
2185 | | PGetWalQueueSizeResponse* response, |
2186 | 0 | google::protobuf::Closure* done) { |
2187 | 0 | bool ret = _heavy_work_pool.try_offer([this, request, response, done]() { |
2188 | 0 | brpc::ClosureGuard closure_guard(done); |
2189 | 0 | Status st = Status::OK(); |
2190 | 0 | auto table_id = request->table_id(); |
2191 | 0 | auto count = _exec_env->wal_mgr()->get_wal_queue_size(table_id); |
2192 | 0 | response->set_size(count); |
2193 | 0 | response->mutable_status()->set_status_code(st.code()); |
2194 | 0 | }); |
2195 | 0 | if (!ret) { Branch (2195:9): [True: 0, False: 0]
|
2196 | 0 | offer_failed(response, done, _heavy_work_pool); |
2197 | 0 | } |
2198 | 0 | } |
2199 | | |
2200 | | void PInternalService::get_be_resource(google::protobuf::RpcController* controller, |
2201 | | const PGetBeResourceRequest* request, |
2202 | | PGetBeResourceResponse* response, |
2203 | 0 | google::protobuf::Closure* done) { |
2204 | 0 | bool ret = _heavy_work_pool.try_offer([response, done]() { |
2205 | 0 | brpc::ClosureGuard closure_guard(done); |
2206 | 0 | int64_t mem_limit = MemInfo::mem_limit(); |
2207 | 0 | int64_t mem_usage = PerfCounters::get_vm_rss(); |
2208 | |
|
2209 | 0 | PGlobalResourceUsage* global_resource_usage = response->mutable_global_be_resource_usage(); |
2210 | 0 | global_resource_usage->set_mem_limit(mem_limit); |
2211 | 0 | global_resource_usage->set_mem_usage(mem_usage); |
2212 | |
|
2213 | 0 | Status st = Status::OK(); |
2214 | 0 | response->mutable_status()->set_status_code(st.code()); |
2215 | 0 | }); |
2216 | 0 | if (!ret) { Branch (2216:9): [True: 0, False: 0]
|
2217 | 0 | offer_failed(response, done, _heavy_work_pool); |
2218 | 0 | } |
2219 | 0 | } |
2220 | | |
2221 | | void PInternalService::get_tablet_rowsets(google::protobuf::RpcController* controller, |
2222 | | const PGetTabletRowsetsRequest* request, |
2223 | | PGetTabletRowsetsResponse* response, |
2224 | 0 | google::protobuf::Closure* done) { |
2225 | 0 | DCHECK(config::is_cloud_mode()); |
2226 | 0 | auto start_time = GetMonoTimeMicros(); |
2227 | 0 | Defer defer { |
2228 | 0 | [&]() { g_process_remote_fetch_rowsets_latency << GetMonoTimeMicros() - start_time; }}; |
2229 | 0 | brpc::ClosureGuard closure_guard(done); |
2230 | 0 | LOG(INFO) << "process get tablet rowsets, request=" << request->ShortDebugString(); |
2231 | 0 | if (!request->has_tablet_id() || !request->has_version_start() || !request->has_version_end()) { Branch (2231:9): [True: 0, False: 0]
Branch (2231:38): [True: 0, False: 0]
Branch (2231:71): [True: 0, False: 0]
|
2232 | 0 | Status::InvalidArgument("missing params tablet/version_start/version_end") |
2233 | 0 | .to_protobuf(response->mutable_status()); |
2234 | 0 | return; |
2235 | 0 | } |
2236 | 0 | CloudStorageEngine& storage = ExecEnv::GetInstance()->storage_engine().to_cloud(); |
2237 | |
|
2238 | 0 | auto maybe_tablet = |
2239 | 0 | storage.tablet_mgr().get_tablet(request->tablet_id(), /*warmup data*/ false, |
2240 | 0 | /*syn_delete_bitmap*/ false, /*delete_bitmap*/ nullptr, |
2241 | 0 | /*local_only*/ true); |
2242 | 0 | if (!maybe_tablet) { Branch (2242:9): [True: 0, False: 0]
|
2243 | 0 | maybe_tablet.error().to_protobuf(response->mutable_status()); |
2244 | 0 | return; |
2245 | 0 | } |
2246 | 0 | auto tablet = maybe_tablet.value(); |
2247 | 0 | Result<CaptureRowsetResult> ret; |
2248 | 0 | { |
2249 | 0 | std::shared_lock l(tablet->get_header_lock()); |
2250 | 0 | ret = tablet->capture_consistent_rowsets_unlocked( |
2251 | 0 | {request->version_start(), request->version_end()}, |
2252 | 0 | CaptureRowsetOps {.enable_fetch_rowsets_from_peers = false}); |
2253 | 0 | } |
2254 | 0 | if (!ret) { Branch (2254:9): [True: 0, False: 0]
|
2255 | 0 | ret.error().to_protobuf(response->mutable_status()); |
2256 | 0 | return; |
2257 | 0 | } |
2258 | 0 | auto rowsets = std::move(ret.value().rowsets); |
2259 | 0 | for (const auto& rs : rowsets) { Branch (2259:25): [True: 0, False: 0]
|
2260 | 0 | RowsetMetaPB meta; |
2261 | 0 | rs->rowset_meta()->to_rowset_pb(&meta); |
2262 | 0 | response->mutable_rowsets()->Add(std::move(meta)); |
2263 | 0 | } |
2264 | 0 | if (request->has_delete_bitmap_keys()) { Branch (2264:9): [True: 0, False: 0]
|
2265 | 0 | DCHECK(tablet->enable_unique_key_merge_on_write()); |
2266 | 0 | auto delete_bitmap = std::move(ret.value().delete_bitmap); |
2267 | 0 | auto keys_pb = request->delete_bitmap_keys(); |
2268 | 0 | size_t len = keys_pb.rowset_ids().size(); |
2269 | 0 | DCHECK_EQ(len, keys_pb.segment_ids().size()); |
2270 | 0 | DCHECK_EQ(len, keys_pb.versions().size()); |
2271 | 0 | std::set<DeleteBitmap::BitmapKey> keys; |
2272 | 0 | for (size_t i = 0; i < len; ++i) { Branch (2272:28): [True: 0, False: 0]
|
2273 | 0 | RowsetId rs_id; |
2274 | 0 | rs_id.init(keys_pb.rowset_ids(i)); |
2275 | 0 | keys.emplace(rs_id, keys_pb.segment_ids(i), keys_pb.versions(i)); |
2276 | 0 | } |
2277 | 0 | auto diffset = delete_bitmap->diffset(keys).to_pb(); |
2278 | 0 | *response->mutable_delete_bitmap() = std::move(diffset); |
2279 | 0 | } |
2280 | 0 | Status::OK().to_protobuf(response->mutable_status()); |
2281 | 0 | } |
2282 | | |
2283 | | } // namespace doris |