/root/doris/be/src/runtime/runtime_state.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | // This file is copied from |
18 | | // https://github.com/apache/impala/blob/branch-2.9.0/be/src/runtime/runtime-state.cpp |
19 | | // and modified by Doris |
20 | | |
21 | | #include "runtime/runtime_state.h" |
22 | | |
23 | | #include <fmt/format.h> |
24 | | #include <gen_cpp/PaloInternalService_types.h> |
25 | | #include <gen_cpp/Types_types.h> |
26 | | #include <glog/logging.h> |
27 | | |
28 | | #include <fstream> |
29 | | #include <memory> |
30 | | #include <string> |
31 | | |
32 | | #include "cloud/cloud_storage_engine.h" |
33 | | #include "cloud/config.h" |
34 | | #include "common/config.h" |
35 | | #include "common/logging.h" |
36 | | #include "common/object_pool.h" |
37 | | #include "common/status.h" |
38 | | #include "io/fs/s3_file_system.h" |
39 | | #include "olap/id_manager.h" |
40 | | #include "olap/storage_engine.h" |
41 | | #include "pipeline/exec/operator.h" |
42 | | #include "pipeline/pipeline_task.h" |
43 | | #include "runtime/exec_env.h" |
44 | | #include "runtime/fragment_mgr.h" |
45 | | #include "runtime/load_path_mgr.h" |
46 | | #include "runtime/memory/mem_tracker_limiter.h" |
47 | | #include "runtime/memory/thread_mem_tracker_mgr.h" |
48 | | #include "runtime/query_context.h" |
49 | | #include "runtime/thread_context.h" |
50 | | #include "runtime_filter/runtime_filter_mgr.h" |
51 | | #include "util/timezone_utils.h" |
52 | | #include "util/uid_util.h" |
53 | | #include "vec/runtime/vdatetime_value.h" |
54 | | |
55 | | namespace doris { |
56 | | #include "common/compile_check_begin.h" |
57 | | using namespace ErrorCode; |
58 | | |
59 | | RuntimeState::RuntimeState(const TPlanFragmentExecParams& fragment_exec_params, |
60 | | const TQueryOptions& query_options, const TQueryGlobals& query_globals, |
61 | | ExecEnv* exec_env, QueryContext* ctx, |
62 | | const std::shared_ptr<MemTrackerLimiter>& query_mem_tracker) |
63 | | : _profile("Fragment " + print_id(fragment_exec_params.fragment_instance_id)), |
64 | | _load_channel_profile("<unnamed>"), |
65 | | _obj_pool(new ObjectPool()), |
66 | | _unreported_error_idx(0), |
67 | | _query_id(fragment_exec_params.query_id), |
68 | | _per_fragment_instance_idx(0), |
69 | | _num_rows_load_total(0), |
70 | | _num_rows_load_filtered(0), |
71 | | _num_rows_load_unselected(0), |
72 | | _num_print_error_rows(0), |
73 | | _num_bytes_load_total(0), |
74 | | _num_finished_scan_range(0), |
75 | | _error_row_number(0), |
76 | 0 | _query_ctx(ctx) { |
77 | 0 | Status status = |
78 | 0 | init(fragment_exec_params.fragment_instance_id, query_options, query_globals, exec_env); |
79 | 0 | DCHECK(status.ok()); |
80 | 0 | _query_mem_tracker = query_mem_tracker; |
81 | 0 | DCHECK(_query_mem_tracker != nullptr); |
82 | 0 | } |
83 | | |
84 | | RuntimeState::RuntimeState(const TUniqueId& instance_id, const TUniqueId& query_id, |
85 | | int32_t fragment_id, const TQueryOptions& query_options, |
86 | | const TQueryGlobals& query_globals, ExecEnv* exec_env, QueryContext* ctx) |
87 | | : _profile("Fragment " + print_id(instance_id)), |
88 | | _load_channel_profile("<unnamed>"), |
89 | | _obj_pool(new ObjectPool()), |
90 | | _unreported_error_idx(0), |
91 | | _query_id(query_id), |
92 | | _fragment_id(fragment_id), |
93 | | _per_fragment_instance_idx(0), |
94 | | _num_rows_load_total(0), |
95 | | _num_rows_load_filtered(0), |
96 | | _num_rows_load_unselected(0), |
97 | | _num_rows_filtered_in_strict_mode_partial_update(0), |
98 | | _num_print_error_rows(0), |
99 | | _num_bytes_load_total(0), |
100 | | _num_finished_scan_range(0), |
101 | | _error_row_number(0), |
102 | 49 | _query_ctx(ctx) { |
103 | 49 | [[maybe_unused]] auto status = init(instance_id, query_options, query_globals, exec_env); |
104 | 49 | DCHECK(status.ok()); |
105 | 49 | _query_mem_tracker = ctx->query_mem_tracker(); |
106 | 49 | } |
107 | | |
108 | | RuntimeState::RuntimeState(const TUniqueId& query_id, int32_t fragment_id, |
109 | | const TQueryOptions& query_options, const TQueryGlobals& query_globals, |
110 | | ExecEnv* exec_env, QueryContext* ctx) |
111 | | : _profile("PipelineX " + std::to_string(fragment_id)), |
112 | | _load_channel_profile("<unnamed>"), |
113 | | _obj_pool(new ObjectPool()), |
114 | | _unreported_error_idx(0), |
115 | | _query_id(query_id), |
116 | | _fragment_id(fragment_id), |
117 | | _per_fragment_instance_idx(0), |
118 | | _num_rows_load_total(0), |
119 | | _num_rows_load_filtered(0), |
120 | | _num_rows_load_unselected(0), |
121 | | _num_rows_filtered_in_strict_mode_partial_update(0), |
122 | | _num_print_error_rows(0), |
123 | | _num_bytes_load_total(0), |
124 | | _num_finished_scan_range(0), |
125 | | _error_row_number(0), |
126 | 102 | _query_ctx(ctx) { |
127 | | // TODO: do we really need instance id? |
128 | 102 | Status status = init(TUniqueId(), query_options, query_globals, exec_env); |
129 | 102 | DCHECK(status.ok()); |
130 | 102 | _query_mem_tracker = ctx->query_mem_tracker(); |
131 | 102 | } |
132 | | |
133 | | RuntimeState::RuntimeState(const TQueryGlobals& query_globals) |
134 | | : _profile("<unnamed>"), |
135 | | _load_channel_profile("<unnamed>"), |
136 | | _obj_pool(new ObjectPool()), |
137 | | _unreported_error_idx(0), |
138 | 11.7k | _per_fragment_instance_idx(0) { |
139 | 11.7k | _query_options.batch_size = DEFAULT_BATCH_SIZE; |
140 | 11.7k | if (query_globals.__isset.time_zone && query_globals.__isset.nano_seconds) { |
141 | 0 | _timezone = query_globals.time_zone; |
142 | 0 | _timestamp_ms = query_globals.timestamp_ms; |
143 | 0 | _nano_seconds = query_globals.nano_seconds; |
144 | 11.7k | } else if (query_globals.__isset.time_zone) { |
145 | 11.7k | _timezone = query_globals.time_zone; |
146 | 11.7k | _timestamp_ms = query_globals.timestamp_ms; |
147 | 11.7k | _nano_seconds = 0; |
148 | 11.7k | } else if (!query_globals.now_string.empty()) { |
149 | 0 | _timezone = TimezoneUtils::default_time_zone; |
150 | 0 | VecDateTimeValue dt; |
151 | 0 | dt.from_date_str(query_globals.now_string.c_str(), query_globals.now_string.size()); |
152 | 0 | int64_t timestamp; |
153 | 0 | dt.unix_timestamp(×tamp, _timezone); |
154 | 0 | _timestamp_ms = timestamp * 1000; |
155 | 0 | _nano_seconds = 0; |
156 | 33 | } else { |
157 | | //Unit test may set into here |
158 | 33 | _timezone = TimezoneUtils::default_time_zone; |
159 | 33 | _timestamp_ms = 0; |
160 | 33 | _nano_seconds = 0; |
161 | 33 | } |
162 | 11.7k | TimezoneUtils::find_cctz_time_zone(_timezone, _timezone_obj); |
163 | 11.7k | init_mem_trackers("<unnamed>"); |
164 | 11.7k | } |
165 | | |
166 | | RuntimeState::RuntimeState() |
167 | | : _profile("<unnamed>"), |
168 | | _load_channel_profile("<unnamed>"), |
169 | | _obj_pool(new ObjectPool()), |
170 | | _unreported_error_idx(0), |
171 | 78.5k | _per_fragment_instance_idx(0) { |
172 | 78.5k | _query_options.batch_size = DEFAULT_BATCH_SIZE; |
173 | 78.5k | _query_options.be_exec_version = BeExecVersionManager::get_newest_version(); |
174 | 78.5k | _timezone = TimezoneUtils::default_time_zone; |
175 | 78.5k | _timestamp_ms = 0; |
176 | 78.5k | _nano_seconds = 0; |
177 | 78.5k | TimezoneUtils::find_cctz_time_zone(_timezone, _timezone_obj); |
178 | 78.5k | _exec_env = ExecEnv::GetInstance(); |
179 | 78.5k | init_mem_trackers("<unnamed>"); |
180 | 78.5k | } |
181 | | |
182 | 90.4k | RuntimeState::~RuntimeState() { |
183 | 90.4k | SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_mem_tracker); |
184 | | // close error log file |
185 | 90.4k | if (_error_log_file != nullptr && _error_log_file->is_open()) { |
186 | 0 | _error_log_file->close(); |
187 | 0 | } |
188 | | |
189 | 90.4k | _obj_pool->clear(); |
190 | 90.4k | } |
191 | | |
192 | | Status RuntimeState::init(const TUniqueId& fragment_instance_id, const TQueryOptions& query_options, |
193 | 155 | const TQueryGlobals& query_globals, ExecEnv* exec_env) { |
194 | 155 | _fragment_instance_id = fragment_instance_id; |
195 | 155 | _query_options = query_options; |
196 | 155 | if (query_globals.__isset.time_zone && query_globals.__isset.nano_seconds) { |
197 | 0 | _timezone = query_globals.time_zone; |
198 | 0 | _timestamp_ms = query_globals.timestamp_ms; |
199 | 0 | _nano_seconds = query_globals.nano_seconds; |
200 | 155 | } else if (query_globals.__isset.time_zone) { |
201 | 0 | _timezone = query_globals.time_zone; |
202 | 0 | _timestamp_ms = query_globals.timestamp_ms; |
203 | 0 | _nano_seconds = 0; |
204 | 155 | } else if (!query_globals.now_string.empty()) { |
205 | 0 | _timezone = TimezoneUtils::default_time_zone; |
206 | 0 | VecDateTimeValue dt; |
207 | 0 | dt.from_date_str(query_globals.now_string.c_str(), query_globals.now_string.size()); |
208 | 0 | int64_t timestamp; |
209 | 0 | dt.unix_timestamp(×tamp, _timezone); |
210 | 0 | _timestamp_ms = timestamp * 1000; |
211 | 0 | _nano_seconds = 0; |
212 | 155 | } else { |
213 | | //Unit test may set into here |
214 | 155 | _timezone = TimezoneUtils::default_time_zone; |
215 | 155 | _timestamp_ms = 0; |
216 | 155 | _nano_seconds = 0; |
217 | 155 | } |
218 | 155 | TimezoneUtils::find_cctz_time_zone(_timezone, _timezone_obj); |
219 | | |
220 | 155 | if (query_globals.__isset.load_zero_tolerance) { |
221 | 155 | _load_zero_tolerance = query_globals.load_zero_tolerance; |
222 | 155 | } |
223 | | |
224 | 155 | _exec_env = exec_env; |
225 | | |
226 | 155 | if (_query_options.max_errors <= 0) { |
227 | | // TODO: fix linker error and uncomment this |
228 | | //_query_options.max_errors = config::max_errors; |
229 | 155 | _query_options.max_errors = 100; |
230 | 155 | } |
231 | | |
232 | 155 | if (_query_options.batch_size <= 0) { |
233 | 113 | _query_options.batch_size = DEFAULT_BATCH_SIZE; |
234 | 113 | } |
235 | | |
236 | 155 | _db_name = "insert_stmt"; |
237 | 155 | _import_label = print_id(fragment_instance_id); |
238 | | |
239 | 155 | _profile_level = query_options.__isset.profile_level ? query_options.profile_level : 2; |
240 | | |
241 | 155 | return Status::OK(); |
242 | 155 | } |
243 | | |
244 | 0 | std::weak_ptr<QueryContext> RuntimeState::get_query_ctx_weak() { |
245 | 0 | return _exec_env->fragment_mgr()->get_query_ctx(_query_ctx->query_id()); |
246 | 0 | } |
247 | | |
248 | 90.2k | void RuntimeState::init_mem_trackers(const std::string& name, const TUniqueId& id) { |
249 | 90.2k | _query_mem_tracker = MemTrackerLimiter::create_shared( |
250 | 90.2k | MemTrackerLimiter::Type::OTHER, fmt::format("{}#Id={}", name, print_id(id))); |
251 | 90.2k | } |
252 | | |
253 | 175 | std::shared_ptr<MemTrackerLimiter> RuntimeState::query_mem_tracker() const { |
254 | 175 | CHECK(_query_mem_tracker != nullptr); |
255 | 175 | return _query_mem_tracker; |
256 | 175 | } |
257 | | |
258 | 11 | WorkloadGroupPtr RuntimeState::workload_group() { |
259 | 11 | return _query_ctx->workload_group(); |
260 | 11 | } |
261 | | |
262 | 0 | bool RuntimeState::log_error(const std::string& error) { |
263 | 0 | std::lock_guard<std::mutex> l(_error_log_lock); |
264 | |
|
265 | 0 | if (_error_log.size() < _query_options.max_errors) { |
266 | 0 | _error_log.push_back(error); |
267 | 0 | return true; |
268 | 0 | } |
269 | | |
270 | 0 | return false; |
271 | 0 | } |
272 | | |
273 | 0 | void RuntimeState::get_unreported_errors(std::vector<std::string>* new_errors) { |
274 | 0 | std::lock_guard<std::mutex> l(_error_log_lock); |
275 | |
|
276 | 0 | if (_unreported_error_idx < _error_log.size()) { |
277 | 0 | new_errors->assign(_error_log.begin() + _unreported_error_idx, _error_log.end()); |
278 | 0 | _unreported_error_idx = (int)_error_log.size(); |
279 | 0 | } |
280 | 0 | } |
281 | | |
282 | 1.10M | bool RuntimeState::is_cancelled() const { |
283 | | // Maybe we should just return _is_cancelled.load() |
284 | 1.10M | return !_exec_status.ok() || (_query_ctx && _query_ctx->is_cancelled()); |
285 | 1.10M | } |
286 | | |
287 | 0 | Status RuntimeState::cancel_reason() const { |
288 | 0 | if (!_exec_status.ok()) { |
289 | 0 | return _exec_status.status(); |
290 | 0 | } |
291 | | |
292 | 0 | if (_query_ctx) { |
293 | 0 | return _query_ctx->exec_status(); |
294 | 0 | } |
295 | | |
296 | 0 | return Status::Cancelled("Query cancelled"); |
297 | 0 | } |
298 | | |
299 | | const int64_t MAX_ERROR_NUM = 50; |
300 | | |
301 | 0 | Status RuntimeState::create_error_log_file() { |
302 | 0 | if (config::save_load_error_log_to_s3 && config::is_cloud_mode()) { |
303 | 0 | _s3_error_fs = std::dynamic_pointer_cast<io::S3FileSystem>( |
304 | 0 | ExecEnv::GetInstance()->storage_engine().to_cloud().latest_fs()); |
305 | 0 | if (_s3_error_fs) { |
306 | 0 | std::stringstream ss; |
307 | | // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_basic_err_packet.html |
308 | | // shorten the path as much as possible to prevent the length of the presigned URL from |
309 | | // exceeding the MySQL error packet size limit |
310 | 0 | ss << "error_log/" << std::hex << _query_id.hi; |
311 | 0 | _s3_error_log_file_path = ss.str(); |
312 | 0 | } |
313 | 0 | } |
314 | |
|
315 | 0 | static_cast<void>(_exec_env->load_path_mgr()->get_load_error_file_name( |
316 | 0 | _db_name, _import_label, _fragment_instance_id, &_error_log_file_path)); |
317 | 0 | std::string error_log_absolute_path = |
318 | 0 | _exec_env->load_path_mgr()->get_load_error_absolute_path(_error_log_file_path); |
319 | 0 | _error_log_file = std::make_unique<std::ofstream>(error_log_absolute_path, std::ifstream::out); |
320 | 0 | if (!_error_log_file->is_open()) { |
321 | 0 | std::stringstream error_msg; |
322 | 0 | error_msg << "Fail to open error file: [" << _error_log_file_path << "]."; |
323 | 0 | LOG(WARNING) << error_msg.str(); |
324 | 0 | return Status::InternalError(error_msg.str()); |
325 | 0 | } |
326 | 0 | LOG(INFO) << "create error log file: " << _error_log_file_path |
327 | 0 | << ", query id: " << print_id(_query_id) |
328 | 0 | << ", fragment instance id: " << print_id(_fragment_instance_id); |
329 | |
|
330 | 0 | return Status::OK(); |
331 | 0 | } |
332 | | |
333 | | Status RuntimeState::append_error_msg_to_file(std::function<std::string()> line, |
334 | | std::function<std::string()> error_msg, |
335 | 0 | bool is_summary) { |
336 | 0 | if (query_type() != TQueryType::LOAD) { |
337 | 0 | return Status::OK(); |
338 | 0 | } |
339 | | // If file haven't been opened, open it here |
340 | 0 | if (_error_log_file == nullptr) { |
341 | 0 | Status status = create_error_log_file(); |
342 | 0 | if (!status.ok()) { |
343 | 0 | LOG(WARNING) << "Create error file log failed. because: " << status; |
344 | 0 | if (_error_log_file != nullptr) { |
345 | 0 | _error_log_file->close(); |
346 | 0 | } |
347 | 0 | return status; |
348 | 0 | } |
349 | 0 | } |
350 | | |
351 | | // if num of printed error row exceeds the limit, and this is not a summary message, |
352 | | // if _load_zero_tolerance, return Error to stop the load process immediately. |
353 | 0 | if (_num_print_error_rows.fetch_add(1, std::memory_order_relaxed) > MAX_ERROR_NUM && |
354 | 0 | !is_summary) { |
355 | 0 | if (_load_zero_tolerance) { |
356 | 0 | return Status::DataQualityError( |
357 | 0 | "Encountered unqualified data, stop processing. Please check if the source " |
358 | 0 | "data matches the schema, and consider disabling strict mode or increasing " |
359 | 0 | "max_filter_ratio."); |
360 | 0 | } |
361 | 0 | return Status::OK(); |
362 | 0 | } |
363 | | |
364 | 0 | fmt::memory_buffer out; |
365 | 0 | if (is_summary) { |
366 | 0 | fmt::format_to(out, "Summary: {}", error_msg()); |
367 | 0 | } else { |
368 | 0 | if (_error_row_number < MAX_ERROR_NUM) { |
369 | | // Note: export reason first in case src line too long and be truncated. |
370 | 0 | fmt::format_to(out, "Reason: {}. src line [{}]; ", error_msg(), line()); |
371 | 0 | } else if (_error_row_number == MAX_ERROR_NUM) { |
372 | 0 | fmt::format_to(out, "TOO MUCH ERROR! already reach {}. show no more next error.", |
373 | 0 | MAX_ERROR_NUM); |
374 | 0 | } |
375 | 0 | } |
376 | |
|
377 | 0 | size_t error_row_size = out.size(); |
378 | 0 | if (error_row_size > 0) { |
379 | 0 | if (error_row_size > config::load_error_log_limit_bytes) { |
380 | 0 | fmt::memory_buffer limit_byte_out; |
381 | 0 | limit_byte_out.append(out.data(), out.data() + config::load_error_log_limit_bytes); |
382 | 0 | (*_error_log_file) << fmt::to_string(limit_byte_out) + "error log is too long" |
383 | 0 | << std::endl; |
384 | 0 | } else { |
385 | 0 | (*_error_log_file) << fmt::to_string(out) << std::endl; |
386 | 0 | } |
387 | 0 | } |
388 | 0 | return Status::OK(); |
389 | 0 | } |
390 | | |
391 | 0 | std::string RuntimeState::get_error_log_file_path() { |
392 | 0 | DBUG_EXECUTE_IF("RuntimeState::get_error_log_file_path.block", { |
393 | 0 | if (!_error_log_file_path.empty()) { |
394 | 0 | std::this_thread::sleep_for(std::chrono::seconds(1)); |
395 | 0 | } |
396 | 0 | }); |
397 | 0 | std::lock_guard<std::mutex> l(_s3_error_log_file_lock); |
398 | 0 | if (_s3_error_fs && _error_log_file && _error_log_file->is_open()) { |
399 | | // close error log file |
400 | 0 | _error_log_file->close(); |
401 | 0 | std::string error_log_absolute_path = |
402 | 0 | _exec_env->load_path_mgr()->get_load_error_absolute_path(_error_log_file_path); |
403 | | // upload error log file to s3 |
404 | 0 | Status st = _s3_error_fs->upload(error_log_absolute_path, _s3_error_log_file_path); |
405 | 0 | if (!st.ok()) { |
406 | | // upload failed and return local error log file path |
407 | 0 | LOG(WARNING) << "Fail to upload error file to s3, error_log_file_path=" |
408 | 0 | << _error_log_file_path << ", error=" << st; |
409 | 0 | return _error_log_file_path; |
410 | 0 | } |
411 | | // expiration must be less than a week (in seconds) for presigned url |
412 | 0 | static const unsigned EXPIRATION_SECONDS = 7 * 24 * 60 * 60 - 1; |
413 | | // We should return a public endpoint to user. |
414 | 0 | _error_log_file_path = _s3_error_fs->generate_presigned_url(_s3_error_log_file_path, |
415 | 0 | EXPIRATION_SECONDS, true); |
416 | 0 | } |
417 | 0 | return _error_log_file_path; |
418 | 0 | } |
419 | | |
420 | 78.3k | void RuntimeState::resize_op_id_to_local_state(int operator_size) { |
421 | 78.3k | _op_id_to_local_state.resize(-operator_size); |
422 | 78.3k | } |
423 | | |
424 | | void RuntimeState::emplace_local_state( |
425 | 26.2k | int id, std::unique_ptr<doris::pipeline::PipelineXLocalStateBase> state) { |
426 | 26.2k | id = -id; |
427 | 26.2k | DCHECK_LT(id, _op_id_to_local_state.size()) |
428 | 0 | << state->parent()->get_name() << " node id = " << state->parent()->node_id(); |
429 | 26.2k | DCHECK(!_op_id_to_local_state[id]); |
430 | 26.2k | _op_id_to_local_state[id] = std::move(state); |
431 | 26.2k | } |
432 | | |
433 | 192k | doris::pipeline::PipelineXLocalStateBase* RuntimeState::get_local_state(int id) { |
434 | 192k | id = -id; |
435 | 192k | return _op_id_to_local_state[id].get(); |
436 | 192k | } |
437 | | |
438 | 26.0k | Result<RuntimeState::LocalState*> RuntimeState::get_local_state_result(int id) { |
439 | 26.0k | id = -id; |
440 | 26.0k | if (id >= _op_id_to_local_state.size()) { |
441 | 1 | return ResultError(Status::InternalError("get_local_state out of range size:{} , id:{}", |
442 | 1 | _op_id_to_local_state.size(), id)); |
443 | 1 | } |
444 | 26.0k | if (!_op_id_to_local_state[id]) { |
445 | 0 | return ResultError(Status::InternalError("get_local_state id:{} is null", id)); |
446 | 0 | } |
447 | 26.0k | return _op_id_to_local_state[id].get(); |
448 | 26.0k | }; |
449 | | |
450 | | void RuntimeState::emplace_sink_local_state( |
451 | 78.2k | int id, std::unique_ptr<doris::pipeline::PipelineXSinkLocalStateBase> state) { |
452 | 78.2k | DCHECK(!_sink_local_state) << " id=" << id << " state: " << state->debug_string(0); |
453 | 78.2k | _sink_local_state = std::move(state); |
454 | 78.2k | } |
455 | | |
456 | 420k | doris::pipeline::PipelineXSinkLocalStateBase* RuntimeState::get_sink_local_state() { |
457 | 420k | return _sink_local_state.get(); |
458 | 420k | } |
459 | | |
460 | 184k | Result<RuntimeState::SinkLocalState*> RuntimeState::get_sink_local_state_result() { |
461 | 184k | if (!_sink_local_state) { |
462 | 0 | return ResultError(Status::InternalError("_op_id_to_sink_local_state not exist")); |
463 | 0 | } |
464 | 184k | return _sink_local_state.get(); |
465 | 184k | } |
466 | | |
467 | 0 | bool RuntimeState::enable_page_cache() const { |
468 | 0 | return !config::disable_storage_page_cache && |
469 | 0 | (_query_options.__isset.enable_page_cache && _query_options.enable_page_cache); |
470 | 0 | } |
471 | | |
472 | 59 | RuntimeFilterMgr* RuntimeState::global_runtime_filter_mgr() { |
473 | 59 | return _query_ctx->runtime_filter_mgr(); |
474 | 59 | } |
475 | | |
476 | | Status RuntimeState::register_producer_runtime_filter( |
477 | 29 | const TRuntimeFilterDesc& desc, std::shared_ptr<RuntimeFilterProducer>* producer_filter) { |
478 | | // Producers are created by local runtime filter mgr and shared by global runtime filter manager. |
479 | | // When RF is published, consumers in both global and local RF mgr will be found. |
480 | 29 | RETURN_IF_ERROR(local_runtime_filter_mgr()->register_producer_filter(_query_ctx, desc, |
481 | 29 | producer_filter)); |
482 | 29 | RETURN_IF_ERROR(global_runtime_filter_mgr()->register_local_merger_producer_filter( |
483 | 29 | _query_ctx, desc, *producer_filter)); |
484 | 29 | return Status::OK(); |
485 | 29 | } |
486 | | |
487 | | Status RuntimeState::register_consumer_runtime_filter( |
488 | | const TRuntimeFilterDesc& desc, bool need_local_merge, int node_id, |
489 | 6 | std::shared_ptr<RuntimeFilterConsumer>* consumer_filter) { |
490 | 6 | bool need_merge = desc.has_remote_targets || need_local_merge; |
491 | 6 | RuntimeFilterMgr* mgr = need_merge ? global_runtime_filter_mgr() : local_runtime_filter_mgr(); |
492 | 6 | return mgr->register_consumer_filter(_query_ctx, desc, node_id, consumer_filter); |
493 | 6 | } |
494 | | |
495 | 133 | bool RuntimeState::is_nereids() const { |
496 | 133 | return _query_ctx->is_nereids(); |
497 | 133 | } |
498 | | |
499 | 0 | std::vector<std::shared_ptr<RuntimeProfile>> RuntimeState::pipeline_id_to_profile() { |
500 | 0 | std::shared_lock lc(_pipeline_profile_lock); |
501 | 0 | return _pipeline_id_to_profile; |
502 | 0 | } |
503 | | |
504 | | std::vector<std::shared_ptr<RuntimeProfile>> RuntimeState::build_pipeline_profile( |
505 | 0 | std::size_t pipeline_size) { |
506 | 0 | std::unique_lock lc(_pipeline_profile_lock); |
507 | 0 | if (!_pipeline_id_to_profile.empty()) { |
508 | 0 | throw Exception(ErrorCode::INTERNAL_ERROR, |
509 | 0 | "build_pipeline_profile can only be called once."); |
510 | 0 | } |
511 | 0 | _pipeline_id_to_profile.resize(pipeline_size); |
512 | 0 | { |
513 | 0 | size_t pip_idx = 0; |
514 | 0 | for (auto& pipeline_profile : _pipeline_id_to_profile) { |
515 | 0 | pipeline_profile = |
516 | 0 | std::make_shared<RuntimeProfile>("Pipeline : " + std::to_string(pip_idx)); |
517 | 0 | pip_idx++; |
518 | 0 | } |
519 | 0 | } |
520 | 0 | return _pipeline_id_to_profile; |
521 | 0 | } |
522 | | |
523 | 54.3k | bool RuntimeState::low_memory_mode() const { |
524 | 54.3k | #ifdef BE_TEST |
525 | 54.3k | if (!_query_ctx) { |
526 | 0 | return false; |
527 | 0 | } |
528 | 54.3k | #endif |
529 | 54.3k | return _query_ctx->low_memory_mode(); |
530 | 54.3k | } |
531 | | |
532 | 0 | void RuntimeState::set_id_file_map() { |
533 | 0 | _id_file_map = _exec_env->get_id_manager()->add_id_file_map(_query_id, execution_timeout()); |
534 | 0 | } |
535 | | #include "common/compile_check_end.h" |
536 | | } // end namespace doris |