Coverage Report

Created: 2025-09-30 22:18

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/doris/be/src/runtime/query_context.h
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#pragma once
19
20
#include <gen_cpp/PaloInternalService_types.h>
21
#include <gen_cpp/RuntimeProfile_types.h>
22
#include <gen_cpp/Types_types.h>
23
#include <glog/logging.h>
24
25
#include <atomic>
26
#include <cstdint>
27
#include <memory>
28
#include <mutex>
29
#include <string>
30
#include <unordered_map>
31
32
#include "common/config.h"
33
#include "common/factory_creator.h"
34
#include "common/object_pool.h"
35
#include "runtime/exec_env.h"
36
#include "runtime/memory/mem_tracker_limiter.h"
37
#include "runtime/runtime_predicate.h"
38
#include "runtime/workload_management/resource_context.h"
39
#include "runtime_filter/runtime_filter_mgr.h"
40
#include "util/hash_util.hpp"
41
#include "util/threadpool.h"
42
#include "vec/exec/scan/scanner_scheduler.h"
43
#include "workload_group/workload_group.h"
44
45
namespace doris {
46
47
namespace pipeline {
48
class PipelineFragmentContext;
49
class PipelineTask;
50
class Dependency;
51
} // namespace pipeline
52
53
struct ReportStatusRequest {
54
    const Status status;
55
    std::vector<RuntimeState*> runtime_states;
56
    bool done;
57
    TNetworkAddress coord_addr;
58
    TUniqueId query_id;
59
    int fragment_id;
60
    TUniqueId fragment_instance_id;
61
    int backend_num;
62
    RuntimeState* runtime_state;
63
    std::string load_error_url;
64
    std::string first_error_msg;
65
    std::function<void(const Status&)> cancel_fn;
66
};
67
68
enum class QuerySource {
69
    INTERNAL_FRONTEND,
70
    STREAM_LOAD,
71
    GROUP_COMMIT_LOAD,
72
    ROUTINE_LOAD,
73
    EXTERNAL_CONNECTOR
74
};
75
76
const std::string toString(QuerySource query_source);
77
78
// Save the common components of fragments in a query.
79
// Some components like DescriptorTbl may be very large
80
// that will slow down each execution of fragments when DeSer them every time.
81
class DescriptorTbl;
82
class QueryContext : public std::enable_shared_from_this<QueryContext> {
83
    ENABLE_FACTORY_CREATOR(QueryContext);
84
85
public:
86
    static std::shared_ptr<QueryContext> create(TUniqueId query_id, ExecEnv* exec_env,
87
                                                const TQueryOptions& query_options,
88
                                                TNetworkAddress coord_addr, bool is_nereids,
89
                                                TNetworkAddress current_connect_fe,
90
                                                QuerySource query_type);
91
92
    // use QueryContext::create, cannot be made private because of ENABLE_FACTORY_CREATOR::create_shared.
93
    QueryContext(TUniqueId query_id, ExecEnv* exec_env, const TQueryOptions& query_options,
94
                 TNetworkAddress coord_addr, bool is_nereids, TNetworkAddress current_connect_fe,
95
                 QuerySource query_type);
96
97
    ~QueryContext();
98
99
    void init_query_task_controller();
100
101
0
    ExecEnv* exec_env() const { return _exec_env; }
102
103
0
    bool is_timeout(timespec now) const {
104
0
        if (_timeout_second <= 0) {
105
0
            return false;
106
0
        }
107
0
        return _query_watcher.elapsed_time_seconds(now) > _timeout_second;
108
0
    }
109
110
0
    int64_t get_remaining_query_time_seconds() const {
111
0
        timespec now;
112
0
        clock_gettime(CLOCK_MONOTONIC, &now);
113
0
        if (is_timeout(now)) {
114
0
            return -1;
115
0
        }
116
0
        int64_t elapsed_seconds = _query_watcher.elapsed_time_seconds(now);
117
0
        return _timeout_second - elapsed_seconds;
118
0
    }
119
120
    void set_ready_to_execute(Status reason);
121
122
3.24M
    [[nodiscard]] bool is_cancelled() const { return !_exec_status.ok(); }
123
124
    void cancel_all_pipeline_context(const Status& reason, int fragment_id = -1);
125
    std::string print_all_pipeline_context();
126
    void set_pipeline_context(const int fragment_id,
127
                              std::shared_ptr<pipeline::PipelineFragmentContext> pip_ctx);
128
    void cancel(Status new_status, int fragment_id = -1);
129
130
20
    [[nodiscard]] Status exec_status() { return _exec_status.status(); }
131
132
    void set_execution_dependency_ready();
133
134
    void set_memory_sufficient(bool sufficient);
135
136
    void set_ready_to_execute_only();
137
138
22
    bool has_runtime_predicate(int source_node_id) {
139
22
        return _runtime_predicates.contains(source_node_id);
140
22
    }
141
142
0
    vectorized::RuntimePredicate& get_runtime_predicate(int source_node_id) {
143
0
        DCHECK(has_runtime_predicate(source_node_id));
144
0
        return _runtime_predicates.find(source_node_id)->second;
145
0
    }
146
147
0
    void init_runtime_predicates(const std::vector<TTopnFilterDesc>& topn_filter_descs) {
148
0
        for (auto desc : topn_filter_descs) {
149
0
            _runtime_predicates.try_emplace(desc.source_node_id, desc);
150
0
        }
151
0
    }
152
153
    Status set_workload_group(WorkloadGroupPtr& wg);
154
155
6
    int execution_timeout() const {
156
6
        return _query_options.__isset.execution_timeout ? _query_options.execution_timeout
157
6
                                                        : _query_options.query_timeout;
158
6
    }
159
160
115
    int32_t runtime_filter_wait_time_ms() const {
161
115
        return _query_options.runtime_filter_wait_time_ms;
162
115
    }
163
164
121
    bool runtime_filter_wait_infinitely() const {
165
121
        return _query_options.__isset.runtime_filter_wait_infinitely &&
166
121
               _query_options.runtime_filter_wait_infinitely;
167
121
    }
168
169
0
    int be_exec_version() const {
170
0
        if (!_query_options.__isset.be_exec_version) {
171
0
            return 0;
172
0
        }
173
0
        return _query_options.be_exec_version;
174
0
    }
175
176
0
    [[nodiscard]] int64_t get_fe_process_uuid() const {
177
0
        return _query_options.__isset.fe_process_uuid ? _query_options.fe_process_uuid : 0;
178
0
    }
179
180
0
    bool ignore_runtime_filter_error() const {
181
0
        return _query_options.__isset.ignore_runtime_filter_error
182
0
                       ? _query_options.ignore_runtime_filter_error
183
0
                       : false;
184
0
    }
185
186
0
    bool enable_force_spill() const {
187
0
        return _query_options.__isset.enable_force_spill && _query_options.enable_force_spill;
188
0
    }
189
7.49k
    const TQueryOptions& query_options() const { return _query_options; }
190
191
    // global runtime filter mgr, the runtime filter have remote target or
192
    // need local merge should regist here. before publish() or push_to_remote()
193
    // the runtime filter should do the local merge work
194
107
    RuntimeFilterMgr* runtime_filter_mgr() { return _runtime_filter_mgr.get(); }
195
196
72.1k
    TUniqueId query_id() const { return _query_id; }
197
198
0
    vectorized::SimplifiedScanScheduler* get_scan_scheduler() { return _scan_task_scheduler; }
199
200
0
    vectorized::SimplifiedScanScheduler* get_remote_scan_scheduler() {
201
0
        return _remote_scan_task_scheduler;
202
0
    }
203
204
72.1k
    pipeline::Dependency* get_execution_dependency() { return _execution_dependency.get(); }
205
72.1k
    pipeline::Dependency* get_memory_sufficient_dependency() {
206
72.1k
        return _memory_sufficient_dependency.get();
207
72.1k
    }
208
209
    doris::pipeline::TaskScheduler* get_pipe_exec_scheduler();
210
211
    void set_merge_controller_handler(
212
0
            std::shared_ptr<RuntimeFilterMergeControllerEntity>& handler) {
213
0
        _merge_controller_handler = handler;
214
0
    }
215
0
    std::shared_ptr<RuntimeFilterMergeControllerEntity> get_merge_controller_handler() const {
216
0
        return _merge_controller_handler;
217
0
    }
218
219
60
    bool is_nereids() const { return _is_nereids; }
220
221
122k
    WorkloadGroupPtr workload_group() const { return _resource_ctx->workload_group(); }
222
447k
    std::shared_ptr<MemTrackerLimiter> query_mem_tracker() const {
223
447k
        DCHECK(_resource_ctx->memory_context()->mem_tracker() != nullptr);
224
447k
        return _resource_ctx->memory_context()->mem_tracker();
225
447k
    }
226
227
11
    int32_t get_slot_count() const {
228
11
        return _query_options.__isset.query_slot_count ? _query_options.query_slot_count : 1;
229
11
    }
230
231
    DescriptorTbl* desc_tbl = nullptr;
232
    bool set_rsc_info = false;
233
    std::string user;
234
    std::string group;
235
    TNetworkAddress coord_addr;
236
    TNetworkAddress current_connect_fe;
237
    TQueryGlobals query_globals;
238
0
    const TQueryGlobals get_query_globals() const { return query_globals; }
239
240
    ObjectPool obj_pool;
241
242
6.19k
    std::shared_ptr<ResourceContext> resource_ctx() { return _resource_ctx; }
243
244
    // plan node id -> TFileScanRangeParams
245
    // only for file scan node
246
    std::map<int, TFileScanRangeParams> file_scan_range_params_map;
247
248
    void add_using_brpc_stub(const TNetworkAddress& network_address,
249
0
                             std::shared_ptr<PBackendService_Stub> brpc_stub) {
250
0
        if (network_address.port == 0) {
251
0
            return;
252
0
        }
253
0
        std::lock_guard<std::mutex> lock(_brpc_stubs_mutex);
254
0
        if (!_using_brpc_stubs.contains(network_address)) {
255
0
            _using_brpc_stubs.emplace(network_address, brpc_stub);
256
0
        }
257
258
0
        DCHECK_EQ(_using_brpc_stubs[network_address].get(), brpc_stub.get());
259
0
    }
260
261
72.5k
    void set_ai_resources(std::map<std::string, TAIResource> ai_resources) {
262
72.5k
        _ai_resources =
263
72.5k
                std::make_unique<std::map<std::string, TAIResource>>(std::move(ai_resources));
264
72.5k
    }
265
266
15
    const std::map<std::string, TAIResource>& get_ai_resources() const {
267
15
        if (_ai_resources == nullptr) {
268
0
            throw Status::InternalError("AI resources not found");
269
0
        }
270
15
        return *_ai_resources;
271
15
    }
272
273
    std::unordered_map<TNetworkAddress, std::shared_ptr<PBackendService_Stub>>
274
0
    get_using_brpc_stubs() {
275
0
        std::lock_guard<std::mutex> lock(_brpc_stubs_mutex);
276
0
        return _using_brpc_stubs;
277
0
    }
278
279
1.51k
    void set_low_memory_mode() {
280
        // will not return from low memory mode to non-low memory mode.
281
1.51k
        _resource_ctx->task_controller()->set_low_memory_mode(true);
282
1.51k
    }
283
1.07M
    bool low_memory_mode() { return _resource_ctx->task_controller()->low_memory_mode(); }
284
285
122k
    bool is_pure_load_task() {
286
122k
        return _query_source == QuerySource::STREAM_LOAD ||
287
122k
               _query_source == QuerySource::ROUTINE_LOAD ||
288
122k
               _query_source == QuerySource::GROUP_COMMIT_LOAD;
289
122k
    }
290
291
    void set_load_error_url(std::string error_url);
292
    std::string get_load_error_url();
293
    void set_first_error_msg(std::string error_msg);
294
    std::string get_first_error_msg();
295
296
private:
297
    friend class QueryTaskController;
298
299
    int _timeout_second;
300
    TUniqueId _query_id;
301
    ExecEnv* _exec_env = nullptr;
302
    MonotonicStopWatch _query_watcher;
303
    bool _is_nereids = false;
304
305
    std::shared_ptr<ResourceContext> _resource_ctx;
306
307
    void _init_resource_context();
308
    void _init_query_mem_tracker();
309
310
    std::unordered_map<int, vectorized::RuntimePredicate> _runtime_predicates;
311
312
    std::unique_ptr<RuntimeFilterMgr> _runtime_filter_mgr;
313
    const TQueryOptions _query_options;
314
315
    // All pipeline tasks use the same query context to report status. So we need a `_exec_status`
316
    // to report the real message if failed.
317
    AtomicStatus _exec_status;
318
319
    doris::pipeline::TaskScheduler* _task_scheduler = nullptr;
320
    vectorized::SimplifiedScanScheduler* _scan_task_scheduler = nullptr;
321
    vectorized::SimplifiedScanScheduler* _remote_scan_task_scheduler = nullptr;
322
    // This dependency indicates if the 2nd phase RPC received from FE.
323
    std::unique_ptr<pipeline::Dependency> _execution_dependency;
324
    // This dependency indicates if memory is sufficient to execute.
325
    std::unique_ptr<pipeline::Dependency> _memory_sufficient_dependency;
326
327
    // This shared ptr is never used. It is just a reference to hold the object.
328
    // There is a weak ptr in runtime filter manager to reference this object.
329
    std::shared_ptr<RuntimeFilterMergeControllerEntity> _merge_controller_handler;
330
331
    std::map<int, std::weak_ptr<pipeline::PipelineFragmentContext>> _fragment_id_to_pipeline_ctx;
332
    std::mutex _pipeline_map_write_lock;
333
334
    std::mutex _profile_mutex;
335
    timespec _query_arrival_timestamp;
336
    // Distinguish the query source, for query that comes from fe, we will have some memory structure on FE to
337
    // help us manage the query.
338
    QuerySource _query_source;
339
340
    std::mutex _brpc_stubs_mutex;
341
    std::unordered_map<TNetworkAddress, std::shared_ptr<PBackendService_Stub>> _using_brpc_stubs;
342
343
    // when fragment of pipeline is closed, it will register its profile to this map by using add_fragment_profile
344
    // flatten profile of one fragment:
345
    // Pipeline 0
346
    //      PipelineTask 0
347
    //              Operator 1
348
    //              Operator 2
349
    //              Scanner
350
    //      PipelineTask 1
351
    //              Operator 1
352
    //              Operator 2
353
    //              Scanner
354
    // Pipeline 1
355
    //      PipelineTask 2
356
    //              Operator 3
357
    //      PipelineTask 3
358
    //              Operator 3
359
    // fragment_id -> list<profile>
360
    std::unordered_map<int, std::vector<std::shared_ptr<TRuntimeProfileTree>>> _profile_map;
361
    std::unordered_map<int, std::shared_ptr<TRuntimeProfileTree>> _load_channel_profile_map;
362
363
    std::unique_ptr<std::map<std::string, TAIResource>> _ai_resources;
364
365
    void _report_query_profile();
366
367
    std::unordered_map<int, std::vector<std::shared_ptr<TRuntimeProfileTree>>>
368
    _collect_realtime_query_profile();
369
370
    std::mutex _error_url_lock;
371
    std::string _load_error_url;
372
    std::string _first_error_msg;
373
374
public:
375
    // when fragment of pipeline is closed, it will register its profile to this map by using add_fragment_profile
376
    void add_fragment_profile(
377
            int fragment_id,
378
            const std::vector<std::shared_ptr<TRuntimeProfileTree>>& pipeline_profile,
379
            std::shared_ptr<TRuntimeProfileTree> load_channel_profile);
380
381
    TReportExecStatusParams get_realtime_exec_status();
382
383
122k
    bool enable_profile() const {
384
122k
        return _query_options.__isset.enable_profile && _query_options.enable_profile;
385
122k
    }
386
387
0
    timespec get_query_arrival_timestamp() const { return this->_query_arrival_timestamp; }
388
0
    QuerySource get_query_source() const { return this->_query_source; }
389
390
0
    const TQueryOptions get_query_options() const { return _query_options; }
391
};
392
393
} // namespace doris