/root/doris/be/src/runtime/query_context.cpp
| Line | Count | Source (jump to first uncovered line) | 
| 1 |  | // Licensed to the Apache Software Foundation (ASF) under one | 
| 2 |  | // or more contributor license agreements.  See the NOTICE file | 
| 3 |  | // distributed with this work for additional information | 
| 4 |  | // regarding copyright ownership.  The ASF licenses this file | 
| 5 |  | // to you under the Apache License, Version 2.0 (the | 
| 6 |  | // "License"); you may not use this file except in compliance | 
| 7 |  | // with the License.  You may obtain a copy of the License at | 
| 8 |  | // | 
| 9 |  | //   http://www.apache.org/licenses/LICENSE-2.0 | 
| 10 |  | // | 
| 11 |  | // Unless required by applicable law or agreed to in writing, | 
| 12 |  | // software distributed under the License is distributed on an | 
| 13 |  | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | 
| 14 |  | // KIND, either express or implied.  See the License for the | 
| 15 |  | // specific language governing permissions and limitations | 
| 16 |  | // under the License. | 
| 17 |  |  | 
| 18 |  | #include "runtime/query_context.h" | 
| 19 |  |  | 
| 20 |  | #include <fmt/core.h> | 
| 21 |  | #include <gen_cpp/FrontendService_types.h> | 
| 22 |  | #include <gen_cpp/RuntimeProfile_types.h> | 
| 23 |  | #include <gen_cpp/Types_types.h> | 
| 24 |  | #include <glog/logging.h> | 
| 25 |  |  | 
| 26 |  | #include <exception> | 
| 27 |  | #include <memory> | 
| 28 |  | #include <mutex> | 
| 29 |  | #include <utility> | 
| 30 |  |  | 
| 31 |  | #include "common/logging.h" | 
| 32 |  | #include "pipeline/dependency.h" | 
| 33 |  | #include "pipeline/pipeline_fragment_context.h" | 
| 34 |  | #include "runtime/exec_env.h" | 
| 35 |  | #include "runtime/fragment_mgr.h" | 
| 36 |  | #include "runtime/runtime_query_statistics_mgr.h" | 
| 37 |  | #include "runtime/runtime_state.h" | 
| 38 |  | #include "runtime/thread_context.h" | 
| 39 |  | #include "runtime/workload_group/workload_group_manager.h" | 
| 40 |  | #include "util/mem_info.h" | 
| 41 |  | #include "util/uid_util.h" | 
| 42 |  | #include "vec/spill/spill_stream_manager.h" | 
| 43 |  |  | 
| 44 |  | namespace doris { | 
| 45 |  |  | 
| 46 |  | class DelayReleaseToken : public Runnable { | 
| 47 |  |     ENABLE_FACTORY_CREATOR(DelayReleaseToken); | 
| 48 |  |  | 
| 49 |  | public: | 
| 50 | 0 |     DelayReleaseToken(std::unique_ptr<ThreadPoolToken>&& token) { token_ = std::move(token); } | 
| 51 | 0 |     ~DelayReleaseToken() override = default; | 
| 52 | 0 |     void run() override {} | 
| 53 |  |     std::unique_ptr<ThreadPoolToken> token_; | 
| 54 |  | }; | 
| 55 |  |  | 
| 56 | 0 | const std::string toString(QuerySource queryType) { | 
| 57 | 0 |     switch (queryType) { | 
| 58 | 0 |     case QuerySource::INTERNAL_FRONTEND:   Branch (58:5): [True: 0, False: 0]
 | 
| 59 | 0 |         return "INTERNAL_FRONTEND"; | 
| 60 | 0 |     case QuerySource::STREAM_LOAD:   Branch (60:5): [True: 0, False: 0]
 | 
| 61 | 0 |         return "STREAM_LOAD"; | 
| 62 | 0 |     case QuerySource::GROUP_COMMIT_LOAD:   Branch (62:5): [True: 0, False: 0]
 | 
| 63 | 0 |         return "EXTERNAL_QUERY"; | 
| 64 | 0 |     case QuerySource::ROUTINE_LOAD:   Branch (64:5): [True: 0, False: 0]
 | 
| 65 | 0 |         return "ROUTINE_LOAD"; | 
| 66 | 0 |     case QuerySource::EXTERNAL_CONNECTOR:   Branch (66:5): [True: 0, False: 0]
 | 
| 67 | 0 |         return "EXTERNAL_CONNECTOR"; | 
| 68 | 0 |     default:   Branch (68:5): [True: 0, False: 0]
 | 
| 69 | 0 |         return "UNKNOWN"; | 
| 70 | 0 |     } | 
| 71 | 0 | } | 
| 72 |  |  | 
| 73 |  | QueryContext::QueryContext(TUniqueId query_id, ExecEnv* exec_env, | 
| 74 |  |                            const TQueryOptions& query_options, TNetworkAddress coord_addr, | 
| 75 |  |                            bool is_pipeline, bool is_nereids, TNetworkAddress current_connect_fe, | 
| 76 |  |                            QuerySource query_source) | 
| 77 |  |         : _timeout_second(-1), | 
| 78 |  |           _query_id(query_id), | 
| 79 |  |           _exec_env(exec_env), | 
| 80 |  |           _is_pipeline(is_pipeline), | 
| 81 |  |           _is_nereids(is_nereids), | 
| 82 |  |           _query_options(query_options), | 
| 83 | 0 |           _query_source(query_source) { | 
| 84 | 0 |     _init_query_mem_tracker(); | 
| 85 | 0 |     SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(query_mem_tracker); | Line | Count | Source |  | 76 | 0 |     auto VARNAME_LINENUM(scoped_tls_stmtl) = doris::ScopedInitThreadContext() | 
 | 
| 86 | 0 |     _query_watcher.start(); | 
| 87 | 0 |     _shared_hash_table_controller.reset(new vectorized::SharedHashTableController()); | 
| 88 | 0 |     _execution_dependency = pipeline::Dependency::create_unique(-1, -1, "ExecutionDependency"); | 
| 89 | 0 |     _runtime_filter_mgr = std::make_unique<RuntimeFilterMgr>( | 
| 90 | 0 |             TUniqueId(), RuntimeFilterParamsContext::create(this), query_mem_tracker, true); | 
| 91 |  | 
 | 
| 92 | 0 |     _timeout_second = query_options.execution_timeout; | 
| 93 |  | 
 | 
| 94 | 0 |     bool is_query_type_valid = query_options.query_type == TQueryType::SELECT ||   Branch (94:32): [True: 0, False: 0]
 | 
| 95 | 0 |                                query_options.query_type == TQueryType::LOAD ||   Branch (95:32): [True: 0, False: 0]
 | 
| 96 | 0 |                                query_options.query_type == TQueryType::EXTERNAL;   Branch (96:32): [True: 0, False: 0]
 | 
| 97 | 0 |     DCHECK_EQ(is_query_type_valid, true); | 
| 98 |  | 
 | 
| 99 | 0 |     this->coord_addr = coord_addr; | 
| 100 |  |     // current_connect_fe is used for report query statistics | 
| 101 | 0 |     this->current_connect_fe = current_connect_fe; | 
| 102 |  |     // external query has no current_connect_fe | 
| 103 | 0 |     if (query_options.query_type != TQueryType::EXTERNAL) {  Branch (103:9): [True: 0, False: 0]
 | 
| 104 | 0 |         bool is_report_fe_addr_valid = | 
| 105 | 0 |                 !this->current_connect_fe.hostname.empty() && this->current_connect_fe.port != 0;   Branch (105:17): [True: 0, False: 0]
  Branch (105:63): [True: 0, False: 0]
 | 
| 106 | 0 |         DCHECK_EQ(is_report_fe_addr_valid, true); | 
| 107 | 0 |     } | 
| 108 | 0 |     clock_gettime(CLOCK_MONOTONIC, &this->_query_arrival_timestamp); | 
| 109 | 0 |     register_memory_statistics(); | 
| 110 | 0 |     register_cpu_statistics(); | 
| 111 | 0 |     DorisMetrics::instance()->query_ctx_cnt->increment(1); | 
| 112 | 0 | } | 
| 113 |  |  | 
| 114 | 0 | void QueryContext::_init_query_mem_tracker() { | 
| 115 | 0 |     bool has_query_mem_limit = _query_options.__isset.mem_limit && (_query_options.mem_limit > 0);   Branch (115:32): [True: 0, False: 0]
  Branch (115:68): [True: 0, False: 0]
 | 
| 116 | 0 |     int64_t _bytes_limit = has_query_mem_limit ? _query_options.mem_limit : -1;   Branch (116:28): [True: 0, False: 0]
 | 
| 117 | 0 |     if (_bytes_limit > MemInfo::mem_limit()) {  Branch (117:9): [True: 0, False: 0]
 | 
| 118 | 0 |         VLOG_NOTICE << "Query memory limit " << PrettyPrinter::print(_bytes_limit, TUnit::BYTES) | Line | Count | Source |  | 42 | 0 | #define VLOG_NOTICE VLOG(3) | 
 | 
| 119 | 0 |                     << " exceeds process memory limit of " | 
| 120 | 0 |                     << PrettyPrinter::print(MemInfo::mem_limit(), TUnit::BYTES) | 
| 121 | 0 |                     << ". Using process memory limit instead"; | 
| 122 | 0 |         _bytes_limit = MemInfo::mem_limit(); | 
| 123 | 0 |     } | 
| 124 | 0 |     if (_query_options.query_type == TQueryType::SELECT) {  Branch (124:9): [True: 0, False: 0]
 | 
| 125 | 0 |         query_mem_tracker = MemTrackerLimiter::create_shared( | 
| 126 | 0 |                 MemTrackerLimiter::Type::QUERY, fmt::format("Query#Id={}", print_id(_query_id)), | 
| 127 | 0 |                 _bytes_limit); | 
| 128 | 0 |     } else if (_query_options.query_type == TQueryType::LOAD) {  Branch (128:16): [True: 0, False: 0]
 | 
| 129 | 0 |         query_mem_tracker = MemTrackerLimiter::create_shared( | 
| 130 | 0 |                 MemTrackerLimiter::Type::LOAD, fmt::format("Load#Id={}", print_id(_query_id)), | 
| 131 | 0 |                 _bytes_limit); | 
| 132 | 0 |     } else { // EXTERNAL | 
| 133 | 0 |         query_mem_tracker = MemTrackerLimiter::create_shared( | 
| 134 | 0 |                 MemTrackerLimiter::Type::LOAD, fmt::format("External#Id={}", print_id(_query_id)), | 
| 135 | 0 |                 _bytes_limit); | 
| 136 | 0 |     } | 
| 137 | 0 |     if (_query_options.__isset.is_report_success && _query_options.is_report_success) {  Branch (137:9): [True: 0, False: 0]
  Branch (137:53): [True: 0, False: 0]
 | 
| 138 | 0 |         query_mem_tracker->enable_print_log_usage(); | 
| 139 | 0 |     } | 
| 140 | 0 | } | 
| 141 |  |  | 
| 142 | 0 | QueryContext::~QueryContext() { | 
| 143 | 0 |     SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(query_mem_tracker); | Line | Count | Source |  | 76 | 0 |     auto VARNAME_LINENUM(scoped_tls_stmtl) = doris::ScopedInitThreadContext() | 
 | 
| 144 |  |     // query mem tracker consumption is equal to 0, it means that after QueryContext is created, | 
| 145 |  |     // it is found that query already exists in _query_ctx_map, and query mem tracker is not used. | 
| 146 |  |     // query mem tracker consumption is not equal to 0 after use, because there is memory consumed | 
| 147 |  |     // on query mem tracker, released on other trackers. | 
| 148 | 0 |     std::string mem_tracker_msg; | 
| 149 | 0 |     if (query_mem_tracker->peak_consumption() != 0) {  Branch (149:9): [True: 0, False: 0]
 | 
| 150 | 0 |         mem_tracker_msg = fmt::format( | 
| 151 | 0 |                 "deregister query/load memory tracker, queryId={}, Limit={}, CurrUsed={}, " | 
| 152 | 0 |                 "PeakUsed={}", | 
| 153 | 0 |                 print_id(_query_id), MemCounter::print_bytes(query_mem_tracker->limit()), | 
| 154 | 0 |                 MemCounter::print_bytes(query_mem_tracker->consumption()), | 
| 155 | 0 |                 MemCounter::print_bytes(query_mem_tracker->peak_consumption())); | 
| 156 | 0 |     } | 
| 157 | 0 |     uint64_t group_id = 0; | 
| 158 | 0 |     if (_workload_group) {  Branch (158:9): [True: 0, False: 0]
 | 
| 159 | 0 |         group_id = _workload_group->id(); // before remove | 
| 160 | 0 |     } | 
| 161 |  | 
 | 
| 162 | 0 |     _exec_env->runtime_query_statistics_mgr()->set_query_finished(print_id(_query_id)); | 
| 163 |  | 
 | 
| 164 | 0 |     if (enable_profile()) {  Branch (164:9): [True: 0, False: 0]
 | 
| 165 | 0 |         _report_query_profile(); | 
| 166 | 0 |     } | 
| 167 |  |  | 
| 168 |  |     // Not release the the thread token in query context's dector method, because the query | 
| 169 |  |     // conext may be dectored in the thread token it self. It is very dangerous and may core. | 
| 170 |  |     // And also thread token need shutdown, it may take some time, may cause the thread that | 
| 171 |  |     // release the token hang, the thread maybe a pipeline task scheduler thread. | 
| 172 | 0 |     if (_thread_token) {  Branch (172:9): [True: 0, False: 0]
 | 
| 173 | 0 |         Status submit_st = ExecEnv::GetInstance()->lazy_release_obj_pool()->submit( | 
| 174 | 0 |                 DelayReleaseToken::create_shared(std::move(_thread_token))); | 
| 175 | 0 |         if (!submit_st.ok()) {  Branch (175:13): [True: 0, False: 0]
 | 
| 176 | 0 |             LOG(WARNING) << "Failed to release query context thread token, query_id " | 
| 177 | 0 |                          << print_id(_query_id) << ", error status " << submit_st; | 
| 178 | 0 |         } | 
| 179 | 0 |     } | 
| 180 |  |  | 
| 181 |  |     //TODO: check if pipeline and tracing both enabled | 
| 182 | 0 |     if (_is_pipeline && ExecEnv::GetInstance()->pipeline_tracer_context()->enabled()) [[unlikely]] {  Branch (182:9): [True: 0, False: 0]
  Branch (182:25): [True: 0, False: 0]
 | 
| 183 | 0 |         try { | 
| 184 | 0 |             ExecEnv::GetInstance()->pipeline_tracer_context()->end_query(_query_id, group_id); | 
| 185 | 0 |         } catch (std::exception& e) { | 
| 186 | 0 |             LOG(WARNING) << "Dump trace log failed bacause " << e.what(); | 
| 187 | 0 |         } | 
| 188 | 0 |     } | 
| 189 | 0 |     _runtime_filter_mgr.reset(); | 
| 190 | 0 |     _execution_dependency.reset(); | 
| 191 | 0 |     _shared_hash_table_controller.reset(); | 
| 192 | 0 |     _runtime_predicates.clear(); | 
| 193 | 0 |     file_scan_range_params_map.clear(); | 
| 194 | 0 |     obj_pool.clear(); | 
| 195 | 0 |     _merge_controller_handler.reset(); | 
| 196 |  | 
 | 
| 197 | 0 |     _exec_env->spill_stream_mgr()->async_cleanup_query(_query_id); | 
| 198 | 0 |     DorisMetrics::instance()->query_ctx_cnt->increment(-1); | 
| 199 |  |     // TODO(gabriel): we need to clear outdated query contexts on time | 
| 200 |  |     // ExecEnv::GetInstance()->fragment_mgr()->remove_query_context(this->_query_id); | 
| 201 |  |     // the only one msg shows query's end. any other msg should append to it if need. | 
| 202 | 0 |     LOG_INFO("Query {} deconstructed, mem_tracker: {}", print_id(this->_query_id), mem_tracker_msg);| Line | Count | Source |  | 118 | 0 | #define LOG_INFO TaggableLogger(__FILE__, __LINE__, google::GLOG_INFO) | 
 | 
| 203 | 0 | } | 
| 204 |  |  | 
| 205 | 0 | void QueryContext::set_ready_to_execute(Status reason) { | 
| 206 | 0 |     set_execution_dependency_ready(); | 
| 207 | 0 |     _exec_status.update(reason); | 
| 208 | 0 |     if (query_mem_tracker && !reason.ok()) {  Branch (208:9): [True: 0, False: 0]
  Branch (208:30): [True: 0, False: 0]
 | 
| 209 | 0 |         query_mem_tracker->set_is_query_cancelled(!reason.ok()); | 
| 210 | 0 |     } | 
| 211 | 0 | } | 
| 212 |  |  | 
| 213 | 0 | void QueryContext::set_ready_to_execute_only() { | 
| 214 | 0 |     set_execution_dependency_ready(); | 
| 215 | 0 | } | 
| 216 |  |  | 
| 217 | 0 | void QueryContext::set_execution_dependency_ready() { | 
| 218 | 0 |     _execution_dependency->set_ready(); | 
| 219 | 0 | } | 
| 220 |  |  | 
| 221 | 0 | void QueryContext::cancel(Status new_status, int fragment_id) { | 
| 222 | 0 |     if (!_exec_status.update(new_status)) {  Branch (222:9): [True: 0, False: 0]
 | 
| 223 | 0 |         return; | 
| 224 | 0 |     } | 
| 225 |  |  | 
| 226 | 0 |     set_ready_to_execute(new_status); | 
| 227 | 0 |     cancel_all_pipeline_context(new_status, fragment_id); | 
| 228 | 0 | } | 
| 229 |  |  | 
| 230 | 0 | void QueryContext::set_load_error_url(std::string error_url) { | 
| 231 | 0 |     std::lock_guard<std::mutex> lock(_error_url_lock); | 
| 232 | 0 |     _load_error_url = error_url; | 
| 233 | 0 | } | 
| 234 |  |  | 
| 235 | 0 | std::string QueryContext::get_load_error_url() { | 
| 236 | 0 |     std::lock_guard<std::mutex> lock(_error_url_lock); | 
| 237 | 0 |     return _load_error_url; | 
| 238 | 0 | } | 
| 239 |  |  | 
| 240 | 0 | void QueryContext::cancel_all_pipeline_context(const Status& reason, int fragment_id) { | 
| 241 | 0 |     std::vector<std::weak_ptr<pipeline::PipelineFragmentContext>> ctx_to_cancel; | 
| 242 | 0 |     { | 
| 243 | 0 |         std::lock_guard<std::mutex> lock(_pipeline_map_write_lock); | 
| 244 | 0 |         for (auto& [f_id, f_context] : _fragment_id_to_pipeline_ctx) {  Branch (244:38): [True: 0, False: 0]
 | 
| 245 | 0 |             if (fragment_id == f_id) {  Branch (245:17): [True: 0, False: 0]
 | 
| 246 | 0 |                 continue; | 
| 247 | 0 |             } | 
| 248 | 0 |             ctx_to_cancel.push_back(f_context); | 
| 249 | 0 |         } | 
| 250 | 0 |     } | 
| 251 | 0 |     for (auto& f_context : ctx_to_cancel) {  Branch (251:26): [True: 0, False: 0]
 | 
| 252 | 0 |         if (auto pipeline_ctx = f_context.lock()) {  Branch (252:18): [True: 0, False: 0]
 | 
| 253 | 0 |             pipeline_ctx->cancel(reason); | 
| 254 | 0 |         } | 
| 255 | 0 |     } | 
| 256 | 0 | } | 
| 257 |  |  | 
| 258 | 0 | std::string QueryContext::print_all_pipeline_context() { | 
| 259 | 0 |     std::vector<std::weak_ptr<pipeline::PipelineFragmentContext>> ctx_to_print; | 
| 260 | 0 |     fmt::memory_buffer debug_string_buffer; | 
| 261 | 0 |     size_t i = 0; | 
| 262 | 0 |     { | 
| 263 | 0 |         fmt::format_to(debug_string_buffer, "{} pipeline fragment contexts in query {}. \n", | 
| 264 | 0 |                        _fragment_id_to_pipeline_ctx.size(), print_id(_query_id)); | 
| 265 |  | 
 | 
| 266 | 0 |         { | 
| 267 | 0 |             std::lock_guard<std::mutex> lock(_pipeline_map_write_lock); | 
| 268 | 0 |             for (auto& [f_id, f_context] : _fragment_id_to_pipeline_ctx) {  Branch (268:42): [True: 0, False: 0]
 | 
| 269 | 0 |                 ctx_to_print.push_back(f_context); | 
| 270 | 0 |             } | 
| 271 | 0 |         } | 
| 272 | 0 |         for (auto& f_context : ctx_to_print) {  Branch (272:30): [True: 0, False: 0]
 | 
| 273 | 0 |             if (auto pipeline_ctx = f_context.lock()) {  Branch (273:22): [True: 0, False: 0]
 | 
| 274 | 0 |                 auto elapsed = pipeline_ctx->elapsed_time() / 1000000000.0; | 
| 275 | 0 |                 fmt::format_to(debug_string_buffer, | 
| 276 | 0 |                                "No.{} (elapse_second={}s, fragment_id={}) : {}\n", i, elapsed, | 
| 277 | 0 |                                pipeline_ctx->get_fragment_id(), pipeline_ctx->debug_string()); | 
| 278 | 0 |                 i++; | 
| 279 | 0 |             } | 
| 280 | 0 |         } | 
| 281 | 0 |     } | 
| 282 | 0 |     return fmt::to_string(debug_string_buffer); | 
| 283 | 0 | } | 
| 284 |  |  | 
| 285 |  | void QueryContext::set_pipeline_context( | 
| 286 | 0 |         const int fragment_id, std::shared_ptr<pipeline::PipelineFragmentContext> pip_ctx) { | 
| 287 | 0 |     std::lock_guard<std::mutex> lock(_pipeline_map_write_lock); | 
| 288 | 0 |     _fragment_id_to_pipeline_ctx.insert({fragment_id, pip_ctx}); | 
| 289 | 0 | } | 
| 290 |  |  | 
| 291 | 0 | void QueryContext::register_query_statistics(std::shared_ptr<QueryStatistics> qs) { | 
| 292 | 0 |     _exec_env->runtime_query_statistics_mgr()->register_query_statistics( | 
| 293 | 0 |             print_id(_query_id), qs, current_connect_fe, _query_options.query_type); | 
| 294 | 0 | } | 
| 295 |  |  | 
| 296 | 0 | std::shared_ptr<QueryStatistics> QueryContext::get_query_statistics() { | 
| 297 | 0 |     return _exec_env->runtime_query_statistics_mgr()->get_runtime_query_statistics( | 
| 298 | 0 |             print_id(_query_id)); | 
| 299 | 0 | } | 
| 300 |  |  | 
| 301 | 0 | void QueryContext::register_memory_statistics() { | 
| 302 | 0 |     if (query_mem_tracker) {  Branch (302:9): [True: 0, False: 0]
 | 
| 303 | 0 |         std::shared_ptr<QueryStatistics> qs = query_mem_tracker->get_query_statistics(); | 
| 304 | 0 |         std::string query_id = print_id(_query_id); | 
| 305 | 0 |         if (qs) {  Branch (305:13): [True: 0, False: 0]
 | 
| 306 | 0 |             _exec_env->runtime_query_statistics_mgr()->register_query_statistics( | 
| 307 | 0 |                     query_id, qs, current_connect_fe, _query_options.query_type); | 
| 308 | 0 |         } else { | 
| 309 | 0 |             LOG(INFO) << " query " << query_id << " get memory query statistics failed "; | 
| 310 | 0 |         } | 
| 311 | 0 |     } | 
| 312 | 0 | } | 
| 313 |  |  | 
| 314 | 0 | void QueryContext::register_cpu_statistics() { | 
| 315 | 0 |     if (!_cpu_statistics) {  Branch (315:9): [True: 0, False: 0]
 | 
| 316 | 0 |         _cpu_statistics = std::make_shared<QueryStatistics>(); | 
| 317 | 0 |         _exec_env->runtime_query_statistics_mgr()->register_query_statistics( | 
| 318 | 0 |                 print_id(_query_id), _cpu_statistics, current_connect_fe, | 
| 319 | 0 |                 _query_options.query_type); | 
| 320 | 0 |     } | 
| 321 | 0 | } | 
| 322 |  |  | 
| 323 | 0 | doris::pipeline::TaskScheduler* QueryContext::get_pipe_exec_scheduler() { | 
| 324 | 0 |     if (_workload_group) {  Branch (324:9): [True: 0, False: 0]
 | 
| 325 | 0 |         if (_task_scheduler) {  Branch (325:13): [True: 0, False: 0]
 | 
| 326 | 0 |             return _task_scheduler; | 
| 327 | 0 |         } | 
| 328 | 0 |     } | 
| 329 | 0 |     return _exec_env->pipeline_task_scheduler(); | 
| 330 | 0 | } | 
| 331 |  |  | 
| 332 | 0 | ThreadPool* QueryContext::get_memtable_flush_pool() { | 
| 333 | 0 |     if (_workload_group) {  Branch (333:9): [True: 0, False: 0]
 | 
| 334 | 0 |         return _memtable_flush_pool; | 
| 335 | 0 |     } else { | 
| 336 | 0 |         return nullptr; | 
| 337 | 0 |     } | 
| 338 | 0 | } | 
| 339 |  |  | 
| 340 | 0 | void QueryContext::set_workload_group(WorkloadGroupPtr& tg) { | 
| 341 | 0 |     _workload_group = tg; | 
| 342 |  |     // Should add query first, then the workload group will not be deleted. | 
| 343 |  |     // see task_group_manager::delete_workload_group_by_ids | 
| 344 | 0 |     _workload_group->add_mem_tracker_limiter(query_mem_tracker); | 
| 345 | 0 |     _workload_group->get_query_scheduler(&_task_scheduler, &_scan_task_scheduler, | 
| 346 | 0 |                                          &_memtable_flush_pool, &_remote_scan_task_scheduler); | 
| 347 | 0 | } | 
| 348 |  |  | 
| 349 |  | void QueryContext::add_fragment_profile( | 
| 350 |  |         int fragment_id, const std::vector<std::shared_ptr<TRuntimeProfileTree>>& pipeline_profiles, | 
| 351 | 0 |         std::shared_ptr<TRuntimeProfileTree> load_channel_profile) { | 
| 352 | 0 |     if (pipeline_profiles.empty()) {  Branch (352:9): [True: 0, False: 0]
 | 
| 353 | 0 |         std::string msg = fmt::format("Add pipeline profile failed, query {}, fragment {}", | 
| 354 | 0 |                                       print_id(this->_query_id), fragment_id); | 
| 355 | 0 |         LOG_ERROR(msg); | Line | Count | Source |  | 120 | 0 | #define LOG_ERROR TaggableLogger(__FILE__, __LINE__, google::GLOG_ERROR) | 
 | 
| 356 | 0 |         DCHECK(false) << msg; | 
| 357 | 0 |         return; | 
| 358 | 0 |     } | 
| 359 |  |  | 
| 360 | 0 | #ifndef NDEBUG | 
| 361 | 0 |     for (const auto& p : pipeline_profiles) {  Branch (361:24): [True: 0, False: 0]
 | 
| 362 | 0 |         DCHECK(p != nullptr) << fmt::format("Add pipeline profile failed, query {}, fragment {}", | 
| 363 | 0 |                                             print_id(this->_query_id), fragment_id); | 
| 364 | 0 |     } | 
| 365 | 0 | #endif | 
| 366 |  | 
 | 
| 367 | 0 |     std::lock_guard<std::mutex> l(_profile_mutex); | 
| 368 | 0 |     VLOG_ROW << fmt::format( | Line | Count | Source |  | 38 | 0 | #define VLOG_ROW VLOG(10) | 
 | 
| 369 | 0 |             "Query add fragment profile, query {}, fragment {}, pipeline profile count {} ", | 
| 370 | 0 |             print_id(this->_query_id), fragment_id, pipeline_profiles.size()); | 
| 371 |  | 
 | 
| 372 | 0 |     _profile_map.insert(std::make_pair(fragment_id, pipeline_profiles)); | 
| 373 |  | 
 | 
| 374 | 0 |     if (load_channel_profile != nullptr) {  Branch (374:9): [True: 0, False: 0]
 | 
| 375 | 0 |         _load_channel_profile_map.insert(std::make_pair(fragment_id, load_channel_profile)); | 
| 376 | 0 |     } | 
| 377 | 0 | } | 
| 378 |  |  | 
| 379 | 0 | void QueryContext::_report_query_profile() { | 
| 380 | 0 |     std::lock_guard<std::mutex> lg(_profile_mutex); | 
| 381 |  | 
 | 
| 382 | 0 |     for (auto& [fragment_id, fragment_profile] : _profile_map) {  Branch (382:48): [True: 0, False: 0]
 | 
| 383 | 0 |         std::shared_ptr<TRuntimeProfileTree> load_channel_profile = nullptr; | 
| 384 |  | 
 | 
| 385 | 0 |         if (_load_channel_profile_map.contains(fragment_id)) {  Branch (385:13): [True: 0, False: 0]
 | 
| 386 | 0 |             load_channel_profile = _load_channel_profile_map[fragment_id]; | 
| 387 | 0 |         } | 
| 388 |  | 
 | 
| 389 | 0 |         ExecEnv::GetInstance()->runtime_query_statistics_mgr()->register_fragment_profile( | 
| 390 | 0 |                 _query_id, this->coord_addr, fragment_id, fragment_profile, load_channel_profile); | 
| 391 | 0 |     } | 
| 392 |  | 
 | 
| 393 | 0 |     ExecEnv::GetInstance()->runtime_query_statistics_mgr()->trigger_report_profile(); | 
| 394 | 0 | } | 
| 395 |  |  | 
| 396 |  | std::unordered_map<int, std::vector<std::shared_ptr<TRuntimeProfileTree>>> | 
| 397 | 0 | QueryContext::_collect_realtime_query_profile() const { | 
| 398 | 0 |     std::unordered_map<int, std::vector<std::shared_ptr<TRuntimeProfileTree>>> res; | 
| 399 |  | 
 | 
| 400 | 0 |     for (auto& [fragment_id, fragment_ctx_wptr] : _fragment_id_to_pipeline_ctx) {  Branch (400:49): [True: 0, False: 0]
 | 
| 401 | 0 |         if (auto fragment_ctx = fragment_ctx_wptr.lock()) {  Branch (401:18): [True: 0, False: 0]
 | 
| 402 | 0 |             if (fragment_ctx == nullptr) {  Branch (402:17): [True: 0, False: 0]
 | 
| 403 | 0 |                 std::string msg = | 
| 404 | 0 |                         fmt::format("PipelineFragmentContext is nullptr, query {} fragment_id: {}", | 
| 405 | 0 |                                     print_id(_query_id), fragment_id); | 
| 406 | 0 |                 LOG_ERROR(msg); | Line | Count | Source |  | 120 | 0 | #define LOG_ERROR TaggableLogger(__FILE__, __LINE__, google::GLOG_ERROR) | 
 | 
| 407 | 0 |                 DCHECK(false) << msg; | 
| 408 | 0 |                 continue; | 
| 409 | 0 |             } | 
| 410 |  |  | 
| 411 | 0 |             auto profile = fragment_ctx->collect_realtime_profile(); | 
| 412 |  | 
 | 
| 413 | 0 |             if (profile.empty()) {  Branch (413:17): [True: 0, False: 0]
 | 
| 414 | 0 |                 std::string err_msg = fmt::format( | 
| 415 | 0 |                         "Get nothing when collecting profile, query {}, fragment_id: {}", | 
| 416 | 0 |                         print_id(_query_id), fragment_id); | 
| 417 | 0 |                 LOG_ERROR(err_msg); | Line | Count | Source |  | 120 | 0 | #define LOG_ERROR TaggableLogger(__FILE__, __LINE__, google::GLOG_ERROR) | 
 | 
| 418 | 0 |                 DCHECK(false) << err_msg; | 
| 419 | 0 |                 continue; | 
| 420 | 0 |             } | 
| 421 |  |  | 
| 422 | 0 |             res.insert(std::make_pair(fragment_id, profile)); | 
| 423 | 0 |         } | 
| 424 | 0 |     } | 
| 425 |  | 
 | 
| 426 | 0 |     return res; | 
| 427 | 0 | } | 
| 428 |  |  | 
| 429 | 0 | TReportExecStatusParams QueryContext::get_realtime_exec_status() const { | 
| 430 | 0 |     TReportExecStatusParams exec_status; | 
| 431 |  | 
 | 
| 432 | 0 |     auto realtime_query_profile = _collect_realtime_query_profile(); | 
| 433 | 0 |     std::vector<std::shared_ptr<TRuntimeProfileTree>> load_channel_profiles; | 
| 434 |  | 
 | 
| 435 | 0 |     for (auto load_channel_profile : _load_channel_profile_map) {  Branch (435:36): [True: 0, False: 0]
 | 
| 436 | 0 |         if (load_channel_profile.second != nullptr) {  Branch (436:13): [True: 0, False: 0]
 | 
| 437 | 0 |             load_channel_profiles.push_back(load_channel_profile.second); | 
| 438 | 0 |         } | 
| 439 | 0 |     } | 
| 440 |  | 
 | 
| 441 | 0 |     exec_status = RuntimeQueryStatisticsMgr::create_report_exec_status_params( | 
| 442 | 0 |             this->_query_id, std::move(realtime_query_profile), std::move(load_channel_profiles), | 
| 443 | 0 |             /*is_done=*/false); | 
| 444 |  | 
 | 
| 445 | 0 |     return exec_status; | 
| 446 | 0 | } | 
| 447 |  |  | 
| 448 |  | } // namespace doris |