/root/doris/be/src/runtime/query_context.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #pragma once |
19 | | |
20 | | #include <gen_cpp/PaloInternalService_types.h> |
21 | | #include <gen_cpp/RuntimeProfile_types.h> |
22 | | #include <gen_cpp/Types_types.h> |
23 | | #include <glog/logging.h> |
24 | | |
25 | | #include <atomic> |
26 | | #include <cstdint> |
27 | | #include <memory> |
28 | | #include <mutex> |
29 | | #include <string> |
30 | | #include <unordered_map> |
31 | | |
32 | | #include "common/config.h" |
33 | | #include "common/factory_creator.h" |
34 | | #include "common/object_pool.h" |
35 | | #include "pipeline/dependency.h" |
36 | | #include "runtime/exec_env.h" |
37 | | #include "runtime/memory/mem_tracker_limiter.h" |
38 | | #include "runtime/runtime_predicate.h" |
39 | | #include "runtime/workload_management/resource_context.h" |
40 | | #include "runtime_filter/runtime_filter_mgr.h" |
41 | | #include "util/hash_util.hpp" |
42 | | #include "util/threadpool.h" |
43 | | #include "vec/exec/scan/scanner_scheduler.h" |
44 | | #include "workload_group/workload_group.h" |
45 | | |
46 | | namespace doris { |
47 | | |
48 | | namespace pipeline { |
49 | | class PipelineFragmentContext; |
50 | | class PipelineTask; |
51 | | } // namespace pipeline |
52 | | |
53 | | struct ReportStatusRequest { |
54 | | const Status status; |
55 | | std::vector<RuntimeState*> runtime_states; |
56 | | bool done; |
57 | | TNetworkAddress coord_addr; |
58 | | TUniqueId query_id; |
59 | | int fragment_id; |
60 | | TUniqueId fragment_instance_id; |
61 | | int backend_num; |
62 | | RuntimeState* runtime_state; |
63 | | std::string load_error_url; |
64 | | std::function<void(const Status&)> cancel_fn; |
65 | | }; |
66 | | |
67 | | enum class QuerySource { |
68 | | INTERNAL_FRONTEND, |
69 | | STREAM_LOAD, |
70 | | GROUP_COMMIT_LOAD, |
71 | | ROUTINE_LOAD, |
72 | | EXTERNAL_CONNECTOR |
73 | | }; |
74 | | |
75 | | const std::string toString(QuerySource query_source); |
76 | | |
77 | | // Save the common components of fragments in a query. |
78 | | // Some components like DescriptorTbl may be very large |
79 | | // that will slow down each execution of fragments when DeSer them every time. |
80 | | class DescriptorTbl; |
81 | | class QueryContext : public std::enable_shared_from_this<QueryContext> { |
82 | | ENABLE_FACTORY_CREATOR(QueryContext); |
83 | | |
84 | | public: |
85 | | class QueryTaskController : public TaskController { |
86 | | ENABLE_FACTORY_CREATOR(QueryTaskController); |
87 | | |
88 | | public: |
89 | | static std::unique_ptr<TaskController> create(QueryContext* query_ctx); |
90 | | |
91 | | bool is_cancelled() const override; |
92 | | Status cancel(const Status& reason, int fragment_id); |
93 | 0 | Status cancel(const Status& reason) override { return cancel(reason, -1); } |
94 | | |
95 | | private: |
96 | | QueryTaskController(const std::shared_ptr<QueryContext>& query_ctx) |
97 | 59 | : query_ctx_(query_ctx) {} |
98 | | |
99 | | const std::weak_ptr<QueryContext> query_ctx_; |
100 | | }; |
101 | | |
102 | | class QueryMemoryContext : public MemoryContext { |
103 | | ENABLE_FACTORY_CREATOR(QueryMemoryContext); |
104 | | |
105 | | public: |
106 | | static std::unique_ptr<MemoryContext> create(); |
107 | | |
108 | 0 | int64_t revokable_bytes() override { |
109 | | // TODO |
110 | 0 | return 0; |
111 | 0 | } |
112 | | |
113 | 0 | bool ready_do_revoke() override { |
114 | | // TODO |
115 | 0 | return true; |
116 | 0 | } |
117 | | |
118 | 0 | Status revoke(int64_t bytes) override { |
119 | | // TODO |
120 | 0 | return Status::OK(); |
121 | 0 | } |
122 | | |
123 | 0 | Status enter_arbitration(Status reason) override { |
124 | | // TODO, pause the pipeline |
125 | 0 | return Status::OK(); |
126 | 0 | } |
127 | | |
128 | 0 | Status leave_arbitration(Status reason) override { |
129 | | // TODO, start pipeline |
130 | 0 | return Status::OK(); |
131 | 0 | } |
132 | | |
133 | | private: |
134 | 419 | QueryMemoryContext() = default; |
135 | | }; |
136 | | |
137 | | static std::shared_ptr<QueryContext> create(TUniqueId query_id, ExecEnv* exec_env, |
138 | | const TQueryOptions& query_options, |
139 | | TNetworkAddress coord_addr, bool is_nereids, |
140 | | TNetworkAddress current_connect_fe, |
141 | | QuerySource query_type); |
142 | | |
143 | | // use QueryContext::create, cannot be made private because of ENABLE_FACTORY_CREATOR::create_shared. |
144 | | QueryContext(TUniqueId query_id, ExecEnv* exec_env, const TQueryOptions& query_options, |
145 | | TNetworkAddress coord_addr, bool is_nereids, TNetworkAddress current_connect_fe, |
146 | | QuerySource query_type); |
147 | | |
148 | | ~QueryContext(); |
149 | | |
150 | | void init_query_task_controller(); |
151 | | |
152 | 0 | ExecEnv* exec_env() const { return _exec_env; } |
153 | | |
154 | 0 | bool is_timeout(timespec now) const { |
155 | 0 | if (_timeout_second <= 0) { |
156 | 0 | return false; |
157 | 0 | } |
158 | 0 | return _query_watcher.elapsed_time_seconds(now) > _timeout_second; |
159 | 0 | } |
160 | | |
161 | 0 | void set_thread_token(int concurrency, bool is_serial) { |
162 | 0 | _thread_token = _exec_env->scanner_scheduler()->new_limited_scan_pool_token( |
163 | 0 | is_serial ? ThreadPool::ExecutionMode::SERIAL |
164 | 0 | : ThreadPool::ExecutionMode::CONCURRENT, |
165 | 0 | concurrency); |
166 | 0 | } |
167 | | |
168 | 0 | ThreadPoolToken* get_token() { return _thread_token.get(); } |
169 | | |
170 | | void set_ready_to_execute(Status reason); |
171 | | |
172 | 2.24M | [[nodiscard]] bool is_cancelled() const { return !_exec_status.ok(); } |
173 | | |
174 | | void cancel_all_pipeline_context(const Status& reason, int fragment_id = -1); |
175 | | std::string print_all_pipeline_context(); |
176 | | void set_pipeline_context(const int fragment_id, |
177 | | std::shared_ptr<pipeline::PipelineFragmentContext> pip_ctx); |
178 | | void cancel(Status new_status, int fragment_id = -1); |
179 | | |
180 | 15 | [[nodiscard]] Status exec_status() { return _exec_status.status(); } |
181 | | |
182 | | void set_execution_dependency_ready(); |
183 | | |
184 | | void set_memory_sufficient(bool sufficient); |
185 | | |
186 | | void set_ready_to_execute_only(); |
187 | | |
188 | 22 | bool has_runtime_predicate(int source_node_id) { |
189 | 22 | return _runtime_predicates.contains(source_node_id); |
190 | 22 | } |
191 | | |
192 | 0 | vectorized::RuntimePredicate& get_runtime_predicate(int source_node_id) { |
193 | 0 | DCHECK(has_runtime_predicate(source_node_id)); |
194 | 0 | return _runtime_predicates.find(source_node_id)->second; |
195 | 0 | } |
196 | | |
197 | 0 | void init_runtime_predicates(const std::vector<TTopnFilterDesc>& topn_filter_descs) { |
198 | 0 | for (auto desc : topn_filter_descs) { |
199 | 0 | _runtime_predicates.try_emplace(desc.source_node_id, desc); |
200 | 0 | } |
201 | 0 | } |
202 | | |
203 | | void set_workload_group(WorkloadGroupPtr& wg); |
204 | | |
205 | 6 | int execution_timeout() const { |
206 | 6 | return _query_options.__isset.execution_timeout ? _query_options.execution_timeout |
207 | 6 | : _query_options.query_timeout; |
208 | 6 | } |
209 | | |
210 | 115 | int32_t runtime_filter_wait_time_ms() const { |
211 | 115 | return _query_options.runtime_filter_wait_time_ms; |
212 | 115 | } |
213 | | |
214 | 121 | bool runtime_filter_wait_infinitely() const { |
215 | 121 | return _query_options.__isset.runtime_filter_wait_infinitely && |
216 | 121 | _query_options.runtime_filter_wait_infinitely; |
217 | 121 | } |
218 | | |
219 | 0 | int be_exec_version() const { |
220 | 0 | if (!_query_options.__isset.be_exec_version) { |
221 | 0 | return 0; |
222 | 0 | } |
223 | 0 | return _query_options.be_exec_version; |
224 | 0 | } |
225 | | |
226 | 0 | [[nodiscard]] int64_t get_fe_process_uuid() const { |
227 | 0 | return _query_options.__isset.fe_process_uuid ? _query_options.fe_process_uuid : 0; |
228 | 0 | } |
229 | | |
230 | 0 | bool ignore_runtime_filter_error() const { |
231 | 0 | return _query_options.__isset.ignore_runtime_filter_error |
232 | 0 | ? _query_options.ignore_runtime_filter_error |
233 | 0 | : false; |
234 | 0 | } |
235 | | |
236 | 0 | bool enable_force_spill() const { |
237 | 0 | return _query_options.__isset.enable_force_spill && _query_options.enable_force_spill; |
238 | 0 | } |
239 | 270 | const TQueryOptions& query_options() const { return _query_options; } |
240 | | |
241 | | // global runtime filter mgr, the runtime filter have remote target or |
242 | | // need local merge should regist here. before publish() or push_to_remote() |
243 | | // the runtime filter should do the local merge work |
244 | 100 | RuntimeFilterMgr* runtime_filter_mgr() { return _runtime_filter_mgr.get(); } |
245 | | |
246 | 287 | TUniqueId query_id() const { return _query_id; } |
247 | | |
248 | 0 | vectorized::SimplifiedScanScheduler* get_scan_scheduler() { return _scan_task_scheduler; } |
249 | | |
250 | 0 | vectorized::SimplifiedScanScheduler* get_remote_scan_scheduler() { |
251 | 0 | return _remote_scan_task_scheduler; |
252 | 0 | } |
253 | | |
254 | 88 | pipeline::Dependency* get_execution_dependency() { return _execution_dependency.get(); } |
255 | 79 | pipeline::Dependency* get_memory_sufficient_dependency() { |
256 | 79 | return _memory_sufficient_dependency.get(); |
257 | 79 | } |
258 | | |
259 | | std::vector<pipeline::PipelineTask*> get_revocable_tasks() const; |
260 | | |
261 | | Status revoke_memory(); |
262 | | |
263 | | doris::pipeline::TaskScheduler* get_pipe_exec_scheduler(); |
264 | | |
265 | | void set_merge_controller_handler( |
266 | 0 | std::shared_ptr<RuntimeFilterMergeControllerEntity>& handler) { |
267 | 0 | _merge_controller_handler = handler; |
268 | 0 | } |
269 | 0 | std::shared_ptr<RuntimeFilterMergeControllerEntity> get_merge_controller_handler() const { |
270 | 0 | return _merge_controller_handler; |
271 | 0 | } |
272 | | |
273 | 0 | bool is_nereids() const { return _is_nereids; } |
274 | | |
275 | 597k | WorkloadGroupPtr workload_group() const { return _resource_ctx->workload_group(); } |
276 | 1.05k | std::shared_ptr<MemTrackerLimiter> query_mem_tracker() const { |
277 | 1.05k | return _resource_ctx->memory_context()->mem_tracker(); |
278 | 1.05k | } |
279 | | |
280 | 14 | void increase_revoking_tasks_count() { _revoking_tasks_count.fetch_add(1); } |
281 | | |
282 | | void decrease_revoking_tasks_count(); |
283 | | |
284 | 0 | int get_revoking_tasks_count() const { return _revoking_tasks_count.load(); } |
285 | | |
286 | | void get_revocable_info(size_t* revocable_size, size_t* memory_usage, |
287 | | bool* has_running_task) const; |
288 | | size_t get_revocable_size() const; |
289 | | |
290 | | // This method is called by workload group manager to set query's memlimit using slot |
291 | | // If user set query limit explicitly, then should use less one |
292 | 86 | void set_mem_limit(int64_t new_mem_limit) { |
293 | 86 | _resource_ctx->memory_context()->mem_tracker()->set_limit(new_mem_limit); |
294 | 86 | } |
295 | | |
296 | 223 | int64_t get_mem_limit() const { |
297 | 223 | return _resource_ctx->memory_context()->mem_tracker()->limit(); |
298 | 223 | } |
299 | | |
300 | | // The new memlimit should be less than user set memlimit. |
301 | 83 | void set_adjusted_mem_limit(int64_t new_mem_limit) { |
302 | 83 | _adjusted_mem_limit = std::min<int64_t>(new_mem_limit, _user_set_mem_limit); |
303 | 83 | } |
304 | | |
305 | | // Expected mem limit is the limit when workload group reached limit. |
306 | 127 | int64_t adjusted_mem_limit() { return _adjusted_mem_limit; } |
307 | | |
308 | 249 | MemTrackerLimiter* get_mem_tracker() { |
309 | 249 | return _resource_ctx->memory_context()->mem_tracker().get(); |
310 | 249 | } |
311 | | |
312 | 166 | int32_t get_slot_count() const { |
313 | 166 | return _query_options.__isset.query_slot_count ? _query_options.query_slot_count : 1; |
314 | 166 | } |
315 | | |
316 | | DescriptorTbl* desc_tbl = nullptr; |
317 | | bool set_rsc_info = false; |
318 | | std::string user; |
319 | | std::string group; |
320 | | TNetworkAddress coord_addr; |
321 | | TNetworkAddress current_connect_fe; |
322 | | TQueryGlobals query_globals; |
323 | | |
324 | | ObjectPool obj_pool; |
325 | | |
326 | 256 | std::shared_ptr<ResourceContext> resource_ctx() { return _resource_ctx; } |
327 | | |
328 | | // plan node id -> TFileScanRangeParams |
329 | | // only for file scan node |
330 | | std::map<int, TFileScanRangeParams> file_scan_range_params_map; |
331 | | |
332 | | void add_using_brpc_stub(const TNetworkAddress& network_address, |
333 | 0 | std::shared_ptr<PBackendService_Stub> brpc_stub) { |
334 | 0 | if (network_address.port == 0) { |
335 | 0 | return; |
336 | 0 | } |
337 | 0 | std::lock_guard<std::mutex> lock(_brpc_stubs_mutex); |
338 | 0 | if (!_using_brpc_stubs.contains(network_address)) { |
339 | 0 | _using_brpc_stubs.emplace(network_address, brpc_stub); |
340 | 0 | } |
341 | |
|
342 | 0 | DCHECK_EQ(_using_brpc_stubs[network_address].get(), brpc_stub.get()); |
343 | 0 | } |
344 | | |
345 | | std::unordered_map<TNetworkAddress, std::shared_ptr<PBackendService_Stub>> |
346 | 0 | get_using_brpc_stubs() { |
347 | 0 | std::lock_guard<std::mutex> lock(_brpc_stubs_mutex); |
348 | 0 | return _using_brpc_stubs; |
349 | 0 | } |
350 | | |
351 | 10 | void set_low_memory_mode() { _low_memory_mode = true; } |
352 | | |
353 | 597k | bool low_memory_mode() { return _low_memory_mode; } |
354 | | |
355 | 2 | void disable_reserve_memory() { _enable_reserve_memory = false; } |
356 | | |
357 | 13 | bool enable_reserve_memory() const { |
358 | 13 | return _query_options.__isset.enable_reserve_memory && |
359 | 13 | _query_options.enable_reserve_memory && _enable_reserve_memory; |
360 | 13 | } |
361 | | |
362 | 10 | void update_paused_reason(const Status& st) { |
363 | 10 | std::lock_guard l(_paused_mutex); |
364 | 10 | if (_paused_reason.is<ErrorCode::QUERY_MEMORY_EXCEEDED>()) { |
365 | 0 | return; |
366 | 10 | } else if (_paused_reason.is<ErrorCode::WORKLOAD_GROUP_MEMORY_EXCEEDED>()) { |
367 | 0 | if (st.is<ErrorCode::QUERY_MEMORY_EXCEEDED>()) { |
368 | 0 | _paused_reason = st; |
369 | 0 | return; |
370 | 0 | } else { |
371 | 0 | return; |
372 | 0 | } |
373 | 10 | } else { |
374 | 10 | _paused_reason = st; |
375 | 10 | } |
376 | 10 | } |
377 | | |
378 | 383 | Status paused_reason() { |
379 | 383 | std::lock_guard l(_paused_mutex); |
380 | 383 | return _paused_reason; |
381 | 383 | } |
382 | | |
383 | 585 | bool is_pure_load_task() { |
384 | 585 | return _query_source == QuerySource::STREAM_LOAD || |
385 | 585 | _query_source == QuerySource::ROUTINE_LOAD || |
386 | 585 | _query_source == QuerySource::GROUP_COMMIT_LOAD; |
387 | 585 | } |
388 | | |
389 | | std::string debug_string(); |
390 | | |
391 | | void set_load_error_url(std::string error_url); |
392 | | std::string get_load_error_url(); |
393 | | |
394 | | private: |
395 | | int _timeout_second; |
396 | | TUniqueId _query_id; |
397 | | ExecEnv* _exec_env = nullptr; |
398 | | MonotonicStopWatch _query_watcher; |
399 | | bool _is_nereids = false; |
400 | | |
401 | | std::shared_ptr<ResourceContext> _resource_ctx; |
402 | | |
403 | | std::mutex _revoking_tasks_mutex; |
404 | | std::atomic<int> _revoking_tasks_count = 0; |
405 | | |
406 | | // A token used to submit olap scanner to the "_limited_scan_thread_pool", |
407 | | // This thread pool token is created from "_limited_scan_thread_pool" from exec env. |
408 | | // And will be shared by all instances of this query. |
409 | | // So that we can control the max thread that a query can be used to execute. |
410 | | // If this token is not set, the scanner will be executed in "_scan_thread_pool" in exec env. |
411 | | std::unique_ptr<ThreadPoolToken> _thread_token {nullptr}; |
412 | | |
413 | | void _init_resource_context(); |
414 | | void _init_query_mem_tracker(); |
415 | | |
416 | | std::unordered_map<int, vectorized::RuntimePredicate> _runtime_predicates; |
417 | | |
418 | | std::unique_ptr<RuntimeFilterMgr> _runtime_filter_mgr; |
419 | | const TQueryOptions _query_options; |
420 | | |
421 | | // All pipeline tasks use the same query context to report status. So we need a `_exec_status` |
422 | | // to report the real message if failed. |
423 | | AtomicStatus _exec_status; |
424 | | |
425 | | doris::pipeline::TaskScheduler* _task_scheduler = nullptr; |
426 | | vectorized::SimplifiedScanScheduler* _scan_task_scheduler = nullptr; |
427 | | vectorized::SimplifiedScanScheduler* _remote_scan_task_scheduler = nullptr; |
428 | | // This dependency indicates if the 2nd phase RPC received from FE. |
429 | | std::unique_ptr<pipeline::Dependency> _execution_dependency; |
430 | | // This dependency indicates if memory is sufficient to execute. |
431 | | std::unique_ptr<pipeline::Dependency> _memory_sufficient_dependency; |
432 | | |
433 | | // This shared ptr is never used. It is just a reference to hold the object. |
434 | | // There is a weak ptr in runtime filter manager to reference this object. |
435 | | std::shared_ptr<RuntimeFilterMergeControllerEntity> _merge_controller_handler; |
436 | | |
437 | | std::map<int, std::weak_ptr<pipeline::PipelineFragmentContext>> _fragment_id_to_pipeline_ctx; |
438 | | std::mutex _pipeline_map_write_lock; |
439 | | |
440 | | std::mutex _paused_mutex; |
441 | | Status _paused_reason; |
442 | | std::atomic<int64_t> _paused_count = 0; |
443 | | std::atomic<bool> _low_memory_mode = false; |
444 | | std::atomic<bool> _enable_reserve_memory = true; |
445 | | int64_t _user_set_mem_limit = 0; |
446 | | std::atomic<int64_t> _adjusted_mem_limit = 0; |
447 | | |
448 | | std::mutex _profile_mutex; |
449 | | timespec _query_arrival_timestamp; |
450 | | // Distinguish the query source, for query that comes from fe, we will have some memory structure on FE to |
451 | | // help us manage the query. |
452 | | QuerySource _query_source; |
453 | | |
454 | | std::mutex _brpc_stubs_mutex; |
455 | | std::unordered_map<TNetworkAddress, std::shared_ptr<PBackendService_Stub>> _using_brpc_stubs; |
456 | | |
457 | | // when fragment of pipeline is closed, it will register its profile to this map by using add_fragment_profile |
458 | | // flatten profile of one fragment: |
459 | | // Pipeline 0 |
460 | | // PipelineTask 0 |
461 | | // Operator 1 |
462 | | // Operator 2 |
463 | | // Scanner |
464 | | // PipelineTask 1 |
465 | | // Operator 1 |
466 | | // Operator 2 |
467 | | // Scanner |
468 | | // Pipeline 1 |
469 | | // PipelineTask 2 |
470 | | // Operator 3 |
471 | | // PipelineTask 3 |
472 | | // Operator 3 |
473 | | // fragment_id -> list<profile> |
474 | | std::unordered_map<int, std::vector<std::shared_ptr<TRuntimeProfileTree>>> _profile_map; |
475 | | std::unordered_map<int, std::shared_ptr<TRuntimeProfileTree>> _load_channel_profile_map; |
476 | | |
477 | | void _report_query_profile(); |
478 | | |
479 | | std::unordered_map<int, std::vector<std::shared_ptr<TRuntimeProfileTree>>> |
480 | | _collect_realtime_query_profile() const; |
481 | | |
482 | | std::mutex _error_url_lock; |
483 | | std::string _load_error_url; |
484 | | |
485 | | public: |
486 | | // when fragment of pipeline is closed, it will register its profile to this map by using add_fragment_profile |
487 | | void add_fragment_profile( |
488 | | int fragment_id, |
489 | | const std::vector<std::shared_ptr<TRuntimeProfileTree>>& pipeline_profile, |
490 | | std::shared_ptr<TRuntimeProfileTree> load_channel_profile); |
491 | | |
492 | | TReportExecStatusParams get_realtime_exec_status() const; |
493 | | |
494 | 419 | bool enable_profile() const { |
495 | 419 | return _query_options.__isset.enable_profile && _query_options.enable_profile; |
496 | 419 | } |
497 | | |
498 | 0 | timespec get_query_arrival_timestamp() const { return this->_query_arrival_timestamp; } |
499 | 0 | QuerySource get_query_source() const { return this->_query_source; } |
500 | | }; |
501 | | |
502 | | } // namespace doris |