Coverage Report

Created: 2024-11-20 21:05

/root/doris/be/src/runtime/query_context.cpp
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "runtime/query_context.h"
19
20
#include <fmt/core.h>
21
#include <gen_cpp/FrontendService_types.h>
22
#include <gen_cpp/RuntimeProfile_types.h>
23
#include <gen_cpp/Types_types.h>
24
#include <glog/logging.h>
25
26
#include <exception>
27
#include <memory>
28
#include <mutex>
29
#include <utility>
30
31
#include "common/logging.h"
32
#include "pipeline/dependency.h"
33
#include "pipeline/pipeline_fragment_context.h"
34
#include "runtime/exec_env.h"
35
#include "runtime/fragment_mgr.h"
36
#include "runtime/runtime_query_statistics_mgr.h"
37
#include "runtime/runtime_state.h"
38
#include "runtime/thread_context.h"
39
#include "runtime/workload_group/workload_group_manager.h"
40
#include "util/mem_info.h"
41
#include "util/uid_util.h"
42
#include "vec/spill/spill_stream_manager.h"
43
44
namespace doris {
45
46
class DelayReleaseToken : public Runnable {
47
    ENABLE_FACTORY_CREATOR(DelayReleaseToken);
48
49
public:
50
0
    DelayReleaseToken(std::unique_ptr<ThreadPoolToken>&& token) { token_ = std::move(token); }
51
0
    ~DelayReleaseToken() override = default;
52
0
    void run() override {}
53
    std::unique_ptr<ThreadPoolToken> token_;
54
};
55
56
0
const std::string toString(QuerySource queryType) {
57
0
    switch (queryType) {
58
0
    case QuerySource::INTERNAL_FRONTEND:
59
0
        return "INTERNAL_FRONTEND";
60
0
    case QuerySource::STREAM_LOAD:
61
0
        return "STREAM_LOAD";
62
0
    case QuerySource::GROUP_COMMIT_LOAD:
63
0
        return "EXTERNAL_QUERY";
64
0
    case QuerySource::ROUTINE_LOAD:
65
0
        return "ROUTINE_LOAD";
66
0
    case QuerySource::EXTERNAL_CONNECTOR:
67
0
        return "EXTERNAL_CONNECTOR";
68
0
    default:
69
0
        return "UNKNOWN";
70
0
    }
71
0
}
72
73
QueryContext::QueryContext(TUniqueId query_id, ExecEnv* exec_env,
74
                           const TQueryOptions& query_options, TNetworkAddress coord_addr,
75
                           bool is_nereids, TNetworkAddress current_connect_fe,
76
                           QuerySource query_source)
77
        : _timeout_second(-1),
78
          _query_id(query_id),
79
          _exec_env(exec_env),
80
          _is_nereids(is_nereids),
81
          _query_options(query_options),
82
0
          _query_source(query_source) {
83
0
    _init_query_mem_tracker();
84
0
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(query_mem_tracker);
85
0
    _query_watcher.start();
86
0
    _shared_hash_table_controller.reset(new vectorized::SharedHashTableController());
87
0
    _execution_dependency = pipeline::Dependency::create_unique(-1, -1, "ExecutionDependency");
88
0
    _runtime_filter_mgr = std::make_unique<RuntimeFilterMgr>(
89
0
            TUniqueId(), RuntimeFilterParamsContext::create(this), query_mem_tracker, true);
90
91
0
    _timeout_second = query_options.execution_timeout;
92
93
0
    bool is_query_type_valid = query_options.query_type == TQueryType::SELECT ||
94
0
                               query_options.query_type == TQueryType::LOAD ||
95
0
                               query_options.query_type == TQueryType::EXTERNAL;
96
0
    DCHECK_EQ(is_query_type_valid, true);
97
98
0
    this->coord_addr = coord_addr;
99
    // current_connect_fe is used for report query statistics
100
0
    this->current_connect_fe = current_connect_fe;
101
    // external query has no current_connect_fe
102
0
    if (query_options.query_type != TQueryType::EXTERNAL) {
103
0
        bool is_report_fe_addr_valid =
104
0
                !this->current_connect_fe.hostname.empty() && this->current_connect_fe.port != 0;
105
0
        DCHECK_EQ(is_report_fe_addr_valid, true);
106
0
    }
107
0
    clock_gettime(CLOCK_MONOTONIC, &this->_query_arrival_timestamp);
108
0
    register_memory_statistics();
109
0
    register_cpu_statistics();
110
0
    DorisMetrics::instance()->query_ctx_cnt->increment(1);
111
0
}
112
113
0
void QueryContext::_init_query_mem_tracker() {
114
0
    bool has_query_mem_limit = _query_options.__isset.mem_limit && (_query_options.mem_limit > 0);
115
0
    int64_t _bytes_limit = has_query_mem_limit ? _query_options.mem_limit : -1;
116
0
    if (_bytes_limit > MemInfo::mem_limit()) {
117
0
        VLOG_NOTICE << "Query memory limit " << PrettyPrinter::print(_bytes_limit, TUnit::BYTES)
118
0
                    << " exceeds process memory limit of "
119
0
                    << PrettyPrinter::print(MemInfo::mem_limit(), TUnit::BYTES)
120
0
                    << ". Using process memory limit instead";
121
0
        _bytes_limit = MemInfo::mem_limit();
122
0
    }
123
0
    if (_query_options.query_type == TQueryType::SELECT) {
124
0
        query_mem_tracker = MemTrackerLimiter::create_shared(
125
0
                MemTrackerLimiter::Type::QUERY, fmt::format("Query#Id={}", print_id(_query_id)),
126
0
                _bytes_limit);
127
0
    } else if (_query_options.query_type == TQueryType::LOAD) {
128
0
        query_mem_tracker = MemTrackerLimiter::create_shared(
129
0
                MemTrackerLimiter::Type::LOAD, fmt::format("Load#Id={}", print_id(_query_id)),
130
0
                _bytes_limit);
131
0
    } else { // EXTERNAL
132
0
        query_mem_tracker = MemTrackerLimiter::create_shared(
133
0
                MemTrackerLimiter::Type::LOAD, fmt::format("External#Id={}", print_id(_query_id)),
134
0
                _bytes_limit);
135
0
    }
136
0
    if (_query_options.__isset.is_report_success && _query_options.is_report_success) {
137
0
        query_mem_tracker->enable_print_log_usage();
138
0
    }
139
0
}
140
141
0
QueryContext::~QueryContext() {
142
0
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(query_mem_tracker);
143
    // query mem tracker consumption is equal to 0, it means that after QueryContext is created,
144
    // it is found that query already exists in _query_ctx_map, and query mem tracker is not used.
145
    // query mem tracker consumption is not equal to 0 after use, because there is memory consumed
146
    // on query mem tracker, released on other trackers.
147
0
    std::string mem_tracker_msg;
148
0
    if (query_mem_tracker->peak_consumption() != 0) {
149
0
        mem_tracker_msg = fmt::format(
150
0
                "deregister query/load memory tracker, queryId={}, Limit={}, CurrUsed={}, "
151
0
                "PeakUsed={}",
152
0
                print_id(_query_id), MemCounter::print_bytes(query_mem_tracker->limit()),
153
0
                MemCounter::print_bytes(query_mem_tracker->consumption()),
154
0
                MemCounter::print_bytes(query_mem_tracker->peak_consumption()));
155
0
    }
156
0
    uint64_t group_id = 0;
157
0
    if (_workload_group) {
158
0
        group_id = _workload_group->id(); // before remove
159
0
    }
160
161
0
    _exec_env->runtime_query_statistics_mgr()->set_query_finished(print_id(_query_id));
162
163
0
    if (enable_profile()) {
164
0
        _report_query_profile();
165
0
    }
166
167
    // Not release the the thread token in query context's dector method, because the query
168
    // conext may be dectored in the thread token it self. It is very dangerous and may core.
169
    // And also thread token need shutdown, it may take some time, may cause the thread that
170
    // release the token hang, the thread maybe a pipeline task scheduler thread.
171
0
    if (_thread_token) {
172
0
        Status submit_st = ExecEnv::GetInstance()->lazy_release_obj_pool()->submit(
173
0
                DelayReleaseToken::create_shared(std::move(_thread_token)));
174
0
        if (!submit_st.ok()) {
175
0
            LOG(WARNING) << "Failed to release query context thread token, query_id "
176
0
                         << print_id(_query_id) << ", error status " << submit_st;
177
0
        }
178
0
    }
179
180
0
    if (ExecEnv::GetInstance()->pipeline_tracer_context()->enabled()) [[unlikely]] {
181
0
        try {
182
0
            ExecEnv::GetInstance()->pipeline_tracer_context()->end_query(_query_id, group_id);
183
0
        } catch (std::exception& e) {
184
0
            LOG(WARNING) << "Dump trace log failed bacause " << e.what();
185
0
        }
186
0
    }
187
0
    _runtime_filter_mgr.reset();
188
0
    _execution_dependency.reset();
189
0
    _shared_hash_table_controller.reset();
190
0
    _runtime_predicates.clear();
191
0
    file_scan_range_params_map.clear();
192
0
    obj_pool.clear();
193
0
    _merge_controller_handler.reset();
194
195
0
    _exec_env->spill_stream_mgr()->async_cleanup_query(_query_id);
196
0
    DorisMetrics::instance()->query_ctx_cnt->increment(-1);
197
    // the only one msg shows query's end. any other msg should append to it if need.
198
0
    LOG_INFO("Query {} deconstructed, mem_tracker: {}", print_id(this->_query_id), mem_tracker_msg);
199
0
}
200
201
0
void QueryContext::set_ready_to_execute(Status reason) {
202
0
    set_execution_dependency_ready();
203
0
    _exec_status.update(reason);
204
0
    if (query_mem_tracker && !reason.ok()) {
205
0
        query_mem_tracker->set_is_query_cancelled(!reason.ok());
206
0
    }
207
0
}
208
209
0
void QueryContext::set_ready_to_execute_only() {
210
0
    set_execution_dependency_ready();
211
0
}
212
213
0
void QueryContext::set_execution_dependency_ready() {
214
0
    _execution_dependency->set_ready();
215
0
}
216
217
0
void QueryContext::cancel(Status new_status, int fragment_id) {
218
0
    if (!_exec_status.update(new_status)) {
219
0
        return;
220
0
    }
221
222
0
    set_ready_to_execute(new_status);
223
0
    cancel_all_pipeline_context(new_status, fragment_id);
224
0
}
225
226
0
void QueryContext::cancel_all_pipeline_context(const Status& reason, int fragment_id) {
227
0
    std::vector<std::weak_ptr<pipeline::PipelineFragmentContext>> ctx_to_cancel;
228
0
    {
229
0
        std::lock_guard<std::mutex> lock(_pipeline_map_write_lock);
230
0
        for (auto& [f_id, f_context] : _fragment_id_to_pipeline_ctx) {
231
0
            if (fragment_id == f_id) {
232
0
                continue;
233
0
            }
234
0
            ctx_to_cancel.push_back(f_context);
235
0
        }
236
0
    }
237
0
    for (auto& f_context : ctx_to_cancel) {
238
0
        if (auto pipeline_ctx = f_context.lock()) {
239
0
            pipeline_ctx->cancel(reason);
240
0
        }
241
0
    }
242
0
}
243
244
0
std::string QueryContext::print_all_pipeline_context() {
245
0
    std::vector<std::weak_ptr<pipeline::PipelineFragmentContext>> ctx_to_print;
246
0
    fmt::memory_buffer debug_string_buffer;
247
0
    size_t i = 0;
248
0
    {
249
0
        fmt::format_to(debug_string_buffer, "{} pipeline fragment contexts in query {}. \n",
250
0
                       _fragment_id_to_pipeline_ctx.size(), print_id(_query_id));
251
252
0
        {
253
0
            std::lock_guard<std::mutex> lock(_pipeline_map_write_lock);
254
0
            for (auto& [f_id, f_context] : _fragment_id_to_pipeline_ctx) {
255
0
                ctx_to_print.push_back(f_context);
256
0
            }
257
0
        }
258
0
        for (auto& f_context : ctx_to_print) {
259
0
            if (auto pipeline_ctx = f_context.lock()) {
260
0
                auto elapsed = pipeline_ctx->elapsed_time() / 1000000000.0;
261
0
                fmt::format_to(debug_string_buffer,
262
0
                               "No.{} (elapse_second={}s, fragment_id={}) : {}\n", i, elapsed,
263
0
                               pipeline_ctx->get_fragment_id(), pipeline_ctx->debug_string());
264
0
                i++;
265
0
            }
266
0
        }
267
0
    }
268
0
    return fmt::to_string(debug_string_buffer);
269
0
}
270
271
void QueryContext::set_pipeline_context(
272
0
        const int fragment_id, std::shared_ptr<pipeline::PipelineFragmentContext> pip_ctx) {
273
0
    std::lock_guard<std::mutex> lock(_pipeline_map_write_lock);
274
0
    _fragment_id_to_pipeline_ctx.insert({fragment_id, pip_ctx});
275
0
}
276
277
0
void QueryContext::register_query_statistics(std::shared_ptr<QueryStatistics> qs) {
278
0
    _exec_env->runtime_query_statistics_mgr()->register_query_statistics(
279
0
            print_id(_query_id), qs, current_connect_fe, _query_options.query_type);
280
0
}
281
282
0
std::shared_ptr<QueryStatistics> QueryContext::get_query_statistics() {
283
0
    return _exec_env->runtime_query_statistics_mgr()->get_runtime_query_statistics(
284
0
            print_id(_query_id));
285
0
}
286
287
0
void QueryContext::register_memory_statistics() {
288
0
    if (query_mem_tracker) {
289
0
        std::shared_ptr<QueryStatistics> qs = query_mem_tracker->get_query_statistics();
290
0
        std::string query_id = print_id(_query_id);
291
0
        if (qs) {
292
0
            _exec_env->runtime_query_statistics_mgr()->register_query_statistics(
293
0
                    query_id, qs, current_connect_fe, _query_options.query_type);
294
0
        } else {
295
0
            LOG(INFO) << " query " << query_id << " get memory query statistics failed ";
296
0
        }
297
0
    }
298
0
}
299
300
0
void QueryContext::register_cpu_statistics() {
301
0
    if (!_cpu_statistics) {
302
0
        _cpu_statistics = std::make_shared<QueryStatistics>();
303
0
        _exec_env->runtime_query_statistics_mgr()->register_query_statistics(
304
0
                print_id(_query_id), _cpu_statistics, current_connect_fe,
305
0
                _query_options.query_type);
306
0
    }
307
0
}
308
309
0
doris::pipeline::TaskScheduler* QueryContext::get_pipe_exec_scheduler() {
310
0
    if (_workload_group) {
311
0
        if (_task_scheduler) {
312
0
            return _task_scheduler;
313
0
        }
314
0
    }
315
0
    return _exec_env->pipeline_task_scheduler();
316
0
}
317
318
0
ThreadPool* QueryContext::get_memtable_flush_pool() {
319
0
    if (_workload_group) {
320
0
        return _memtable_flush_pool;
321
0
    } else {
322
0
        return nullptr;
323
0
    }
324
0
}
325
326
0
Status QueryContext::set_workload_group(WorkloadGroupPtr& tg) {
327
0
    _workload_group = tg;
328
    // Should add query first, then the workload group will not be deleted.
329
    // see task_group_manager::delete_workload_group_by_ids
330
0
    _workload_group->add_mem_tracker_limiter(query_mem_tracker);
331
0
    _workload_group->get_query_scheduler(&_task_scheduler, &_scan_task_scheduler,
332
0
                                         &_memtable_flush_pool, &_remote_scan_task_scheduler);
333
0
    return Status::OK();
334
0
}
335
336
void QueryContext::add_fragment_profile(
337
        int fragment_id, const std::vector<std::shared_ptr<TRuntimeProfileTree>>& pipeline_profiles,
338
0
        std::shared_ptr<TRuntimeProfileTree> load_channel_profile) {
339
0
    if (pipeline_profiles.empty()) {
340
0
        std::string msg = fmt::format("Add pipeline profile failed, query {}, fragment {}",
341
0
                                      print_id(this->_query_id), fragment_id);
342
0
        LOG_ERROR(msg);
343
0
        DCHECK(false) << msg;
344
0
        return;
345
0
    }
346
347
0
#ifndef NDEBUG
348
0
    for (const auto& p : pipeline_profiles) {
349
0
        DCHECK(p != nullptr) << fmt::format("Add pipeline profile failed, query {}, fragment {}",
350
0
                                            print_id(this->_query_id), fragment_id);
351
0
    }
352
0
#endif
353
354
0
    std::lock_guard<std::mutex> l(_profile_mutex);
355
0
    LOG_INFO("Query X add fragment profile, query {}, fragment {}, pipeline profile count {} ",
356
0
             print_id(this->_query_id), fragment_id, pipeline_profiles.size());
357
358
0
    _profile_map.insert(std::make_pair(fragment_id, pipeline_profiles));
359
360
0
    if (load_channel_profile != nullptr) {
361
0
        _load_channel_profile_map.insert(std::make_pair(fragment_id, load_channel_profile));
362
0
    }
363
0
}
364
365
0
void QueryContext::_report_query_profile() {
366
0
    std::lock_guard<std::mutex> lg(_profile_mutex);
367
368
0
    for (auto& [fragment_id, fragment_profile] : _profile_map) {
369
0
        std::shared_ptr<TRuntimeProfileTree> load_channel_profile = nullptr;
370
371
0
        if (_load_channel_profile_map.contains(fragment_id)) {
372
0
            load_channel_profile = _load_channel_profile_map[fragment_id];
373
0
        }
374
375
0
        ExecEnv::GetInstance()->runtime_query_statistics_mgr()->register_fragment_profile(
376
0
                _query_id, this->coord_addr, fragment_id, fragment_profile, load_channel_profile);
377
0
    }
378
379
0
    ExecEnv::GetInstance()->runtime_query_statistics_mgr()->trigger_report_profile();
380
0
}
381
382
std::unordered_map<int, std::vector<std::shared_ptr<TRuntimeProfileTree>>>
383
0
QueryContext::_collect_realtime_query_profile() const {
384
0
    std::unordered_map<int, std::vector<std::shared_ptr<TRuntimeProfileTree>>> res;
385
386
0
    for (auto& [fragment_id, fragment_ctx_wptr] : _fragment_id_to_pipeline_ctx) {
387
0
        if (auto fragment_ctx = fragment_ctx_wptr.lock()) {
388
0
            if (fragment_ctx == nullptr) {
389
0
                std::string msg =
390
0
                        fmt::format("PipelineFragmentContext is nullptr, query {} fragment_id: {}",
391
0
                                    print_id(_query_id), fragment_id);
392
0
                LOG_ERROR(msg);
393
0
                DCHECK(false) << msg;
394
0
                continue;
395
0
            }
396
397
0
            auto profile = fragment_ctx->collect_realtime_profile();
398
399
0
            if (profile.empty()) {
400
0
                std::string err_msg = fmt::format(
401
0
                        "Get nothing when collecting profile, query {}, fragment_id: {}",
402
0
                        print_id(_query_id), fragment_id);
403
0
                LOG_ERROR(err_msg);
404
0
                DCHECK(false) << err_msg;
405
0
                continue;
406
0
            }
407
408
0
            res.insert(std::make_pair(fragment_id, profile));
409
0
        }
410
0
    }
411
412
0
    return res;
413
0
}
414
415
0
TReportExecStatusParams QueryContext::get_realtime_exec_status() const {
416
0
    TReportExecStatusParams exec_status;
417
418
0
    auto realtime_query_profile = _collect_realtime_query_profile();
419
0
    std::vector<std::shared_ptr<TRuntimeProfileTree>> load_channel_profiles;
420
421
0
    for (auto load_channel_profile : _load_channel_profile_map) {
422
0
        if (load_channel_profile.second != nullptr) {
423
0
            load_channel_profiles.push_back(load_channel_profile.second);
424
0
        }
425
0
    }
426
427
0
    exec_status = RuntimeQueryStatisticsMgr::create_report_exec_status_params(
428
0
            this->_query_id, std::move(realtime_query_profile), std::move(load_channel_profiles),
429
0
            /*is_done=*/false);
430
431
0
    return exec_status;
432
0
}
433
434
} // namespace doris