/root/doris/be/src/runtime/runtime_state.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | // This file is copied from |
18 | | // https://github.com/apache/impala/blob/branch-2.9.0/be/src/runtime/runtime-state.cpp |
19 | | // and modified by Doris |
20 | | |
21 | | #include "runtime/runtime_state.h" |
22 | | |
23 | | #include <fmt/format.h> |
24 | | #include <gen_cpp/PaloInternalService_types.h> |
25 | | #include <gen_cpp/Types_types.h> |
26 | | #include <glog/logging.h> |
27 | | |
28 | | #include <fstream> |
29 | | #include <memory> |
30 | | #include <string> |
31 | | |
32 | | #include "cloud/cloud_storage_engine.h" |
33 | | #include "cloud/config.h" |
34 | | #include "common/config.h" |
35 | | #include "common/logging.h" |
36 | | #include "common/object_pool.h" |
37 | | #include "common/status.h" |
38 | | #include "io/fs/s3_file_system.h" |
39 | | #include "olap/storage_engine.h" |
40 | | #include "pipeline/exec/operator.h" |
41 | | #include "pipeline/pipeline_task.h" |
42 | | #include "runtime/exec_env.h" |
43 | | #include "runtime/fragment_mgr.h" |
44 | | #include "runtime/load_path_mgr.h" |
45 | | #include "runtime/memory/mem_tracker_limiter.h" |
46 | | #include "runtime/memory/thread_mem_tracker_mgr.h" |
47 | | #include "runtime/query_context.h" |
48 | | #include "runtime/runtime_filter_mgr.h" |
49 | | #include "runtime/thread_context.h" |
50 | | #include "util/timezone_utils.h" |
51 | | #include "util/uid_util.h" |
52 | | #include "vec/runtime/vdatetime_value.h" |
53 | | |
54 | | namespace doris { |
55 | | using namespace ErrorCode; |
56 | | |
57 | | RuntimeState::RuntimeState(const TPlanFragmentExecParams& fragment_exec_params, |
58 | | const TQueryOptions& query_options, const TQueryGlobals& query_globals, |
59 | | ExecEnv* exec_env, QueryContext* ctx, |
60 | | const std::shared_ptr<MemTrackerLimiter>& query_mem_tracker) |
61 | | : _profile("Fragment " + print_id(fragment_exec_params.fragment_instance_id)), |
62 | | _load_channel_profile("<unnamed>"), |
63 | | _obj_pool(new ObjectPool()), |
64 | | _unreported_error_idx(0), |
65 | | _query_id(fragment_exec_params.query_id), |
66 | | _per_fragment_instance_idx(0), |
67 | | _num_rows_load_total(0), |
68 | | _num_rows_load_filtered(0), |
69 | | _num_rows_load_unselected(0), |
70 | | _num_print_error_rows(0), |
71 | | _num_bytes_load_total(0), |
72 | | _num_finished_scan_range(0), |
73 | | _normal_row_number(0), |
74 | | _error_row_number(0), |
75 | 0 | _query_ctx(ctx) { |
76 | 0 | Status status = |
77 | 0 | init(fragment_exec_params.fragment_instance_id, query_options, query_globals, exec_env); |
78 | 0 | DCHECK(status.ok()); |
79 | 0 | if (query_mem_tracker != nullptr) { |
80 | 0 | _query_mem_tracker = query_mem_tracker; |
81 | 0 | } else { |
82 | 0 | DCHECK(ctx != nullptr); |
83 | 0 | _query_mem_tracker = ctx->query_mem_tracker; |
84 | 0 | } |
85 | 0 | #ifdef BE_TEST |
86 | 0 | if (_query_mem_tracker == nullptr) { |
87 | 0 | init_mem_trackers(); |
88 | 0 | } |
89 | 0 | #endif |
90 | 0 | DCHECK(_query_mem_tracker != nullptr && _query_mem_tracker->label() != "Orphan"); |
91 | 0 | if (fragment_exec_params.__isset.runtime_filter_params) { |
92 | 0 | _query_ctx->runtime_filter_mgr()->set_runtime_filter_params( |
93 | 0 | fragment_exec_params.runtime_filter_params); |
94 | 0 | } |
95 | 0 | } |
96 | | |
97 | | RuntimeState::RuntimeState(const TUniqueId& instance_id, const TUniqueId& query_id, |
98 | | int32_t fragment_id, const TQueryOptions& query_options, |
99 | | const TQueryGlobals& query_globals, ExecEnv* exec_env, QueryContext* ctx) |
100 | | : _profile("Fragment " + print_id(instance_id)), |
101 | | _load_channel_profile("<unnamed>"), |
102 | | _obj_pool(new ObjectPool()), |
103 | | _unreported_error_idx(0), |
104 | | _query_id(query_id), |
105 | | _fragment_id(fragment_id), |
106 | | _per_fragment_instance_idx(0), |
107 | | _num_rows_load_total(0), |
108 | | _num_rows_load_filtered(0), |
109 | | _num_rows_load_unselected(0), |
110 | | _num_rows_filtered_in_strict_mode_partial_update(0), |
111 | | _num_print_error_rows(0), |
112 | | _num_bytes_load_total(0), |
113 | | _num_finished_scan_range(0), |
114 | | _normal_row_number(0), |
115 | | _error_row_number(0), |
116 | 0 | _query_ctx(ctx) { |
117 | 0 | [[maybe_unused]] auto status = init(instance_id, query_options, query_globals, exec_env); |
118 | 0 | DCHECK(status.ok()); |
119 | 0 | _query_mem_tracker = ctx->query_mem_tracker; |
120 | 0 | #ifdef BE_TEST |
121 | 0 | if (_query_mem_tracker == nullptr) { |
122 | 0 | init_mem_trackers(); |
123 | 0 | } |
124 | 0 | #endif |
125 | 0 | DCHECK(_query_mem_tracker != nullptr && _query_mem_tracker->label() != "Orphan"); |
126 | 0 | } |
127 | | |
128 | | RuntimeState::RuntimeState(pipeline::PipelineFragmentContext*, const TUniqueId& instance_id, |
129 | | const TUniqueId& query_id, int32_t fragment_id, |
130 | | const TQueryOptions& query_options, const TQueryGlobals& query_globals, |
131 | | ExecEnv* exec_env, QueryContext* ctx) |
132 | | : _profile("Fragment " + print_id(instance_id)), |
133 | | _load_channel_profile("<unnamed>"), |
134 | | _obj_pool(new ObjectPool()), |
135 | | _unreported_error_idx(0), |
136 | | _query_id(query_id), |
137 | | _fragment_id(fragment_id), |
138 | | _per_fragment_instance_idx(0), |
139 | | _num_rows_load_total(0), |
140 | | _num_rows_load_filtered(0), |
141 | | _num_rows_load_unselected(0), |
142 | | _num_rows_filtered_in_strict_mode_partial_update(0), |
143 | | _num_print_error_rows(0), |
144 | | _num_bytes_load_total(0), |
145 | | _num_finished_scan_range(0), |
146 | | _normal_row_number(0), |
147 | | _error_row_number(0), |
148 | 0 | _query_ctx(ctx) { |
149 | 0 | [[maybe_unused]] auto status = init(instance_id, query_options, query_globals, exec_env); |
150 | 0 | _query_mem_tracker = ctx->query_mem_tracker; |
151 | 0 | #ifdef BE_TEST |
152 | 0 | if (_query_mem_tracker == nullptr) { |
153 | 0 | init_mem_trackers(); |
154 | 0 | } |
155 | 0 | #endif |
156 | 0 | DCHECK(_query_mem_tracker != nullptr && _query_mem_tracker->label() != "Orphan"); |
157 | 0 | DCHECK(status.ok()); |
158 | 0 | } |
159 | | |
160 | | RuntimeState::RuntimeState(const TUniqueId& query_id, int32_t fragment_id, |
161 | | const TQueryOptions& query_options, const TQueryGlobals& query_globals, |
162 | | ExecEnv* exec_env, QueryContext* ctx) |
163 | | : _profile("PipelineX " + std::to_string(fragment_id)), |
164 | | _load_channel_profile("<unnamed>"), |
165 | | _obj_pool(new ObjectPool()), |
166 | | _unreported_error_idx(0), |
167 | | _query_id(query_id), |
168 | | _fragment_id(fragment_id), |
169 | | _per_fragment_instance_idx(0), |
170 | | _num_rows_load_total(0), |
171 | | _num_rows_load_filtered(0), |
172 | | _num_rows_load_unselected(0), |
173 | | _num_rows_filtered_in_strict_mode_partial_update(0), |
174 | | _num_print_error_rows(0), |
175 | | _num_bytes_load_total(0), |
176 | | _num_finished_scan_range(0), |
177 | | _normal_row_number(0), |
178 | | _error_row_number(0), |
179 | 0 | _query_ctx(ctx) { |
180 | | // TODO: do we really need instance id? |
181 | 0 | Status status = init(TUniqueId(), query_options, query_globals, exec_env); |
182 | 0 | DCHECK(status.ok()); |
183 | 0 | _query_mem_tracker = ctx->query_mem_tracker; |
184 | 0 | #ifdef BE_TEST |
185 | 0 | if (_query_mem_tracker == nullptr) { |
186 | 0 | init_mem_trackers(); |
187 | 0 | } |
188 | 0 | #endif |
189 | 0 | DCHECK(_query_mem_tracker != nullptr && _query_mem_tracker->label() != "Orphan"); |
190 | 0 | } |
191 | | |
192 | | RuntimeState::RuntimeState(const TQueryGlobals& query_globals) |
193 | | : _profile("<unnamed>"), |
194 | | _load_channel_profile("<unnamed>"), |
195 | | _obj_pool(new ObjectPool()), |
196 | | _unreported_error_idx(0), |
197 | 11.0k | _per_fragment_instance_idx(0) { |
198 | 11.0k | _query_options.batch_size = DEFAULT_BATCH_SIZE; |
199 | 11.0k | if (query_globals.__isset.time_zone && query_globals.__isset.nano_seconds) { |
200 | 0 | _timezone = query_globals.time_zone; |
201 | 0 | _timestamp_ms = query_globals.timestamp_ms; |
202 | 0 | _nano_seconds = query_globals.nano_seconds; |
203 | 11.0k | } else if (query_globals.__isset.time_zone) { |
204 | 11.0k | _timezone = query_globals.time_zone; |
205 | 11.0k | _timestamp_ms = query_globals.timestamp_ms; |
206 | 11.0k | _nano_seconds = 0; |
207 | 11.0k | } else if (!query_globals.now_string.empty()) { |
208 | 0 | _timezone = TimezoneUtils::default_time_zone; |
209 | 0 | VecDateTimeValue dt; |
210 | 0 | dt.from_date_str(query_globals.now_string.c_str(), query_globals.now_string.size()); |
211 | 0 | int64_t timestamp; |
212 | 0 | dt.unix_timestamp(×tamp, _timezone); |
213 | 0 | _timestamp_ms = timestamp * 1000; |
214 | 0 | _nano_seconds = 0; |
215 | 15 | } else { |
216 | | //Unit test may set into here |
217 | 15 | _timezone = TimezoneUtils::default_time_zone; |
218 | 15 | _timestamp_ms = 0; |
219 | 15 | _nano_seconds = 0; |
220 | 15 | } |
221 | 11.0k | TimezoneUtils::find_cctz_time_zone(_timezone, _timezone_obj); |
222 | 11.0k | init_mem_trackers("<unnamed>"); |
223 | 11.0k | } |
224 | | |
225 | | RuntimeState::RuntimeState() |
226 | | : _profile("<unnamed>"), |
227 | | _load_channel_profile("<unnamed>"), |
228 | | _obj_pool(new ObjectPool()), |
229 | | _unreported_error_idx(0), |
230 | 9 | _per_fragment_instance_idx(0) { |
231 | 9 | _query_options.batch_size = DEFAULT_BATCH_SIZE; |
232 | 9 | _query_options.be_exec_version = BeExecVersionManager::get_newest_version(); |
233 | 9 | _timezone = TimezoneUtils::default_time_zone; |
234 | 9 | _timestamp_ms = 0; |
235 | 9 | _nano_seconds = 0; |
236 | 9 | TimezoneUtils::find_cctz_time_zone(_timezone, _timezone_obj); |
237 | 9 | _exec_env = ExecEnv::GetInstance(); |
238 | 9 | init_mem_trackers("<unnamed>"); |
239 | 9 | } |
240 | | |
241 | 11.0k | RuntimeState::~RuntimeState() { |
242 | 11.0k | SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_mem_tracker); |
243 | | // close error log file |
244 | 11.0k | if (_error_log_file != nullptr && _error_log_file->is_open()) { |
245 | 0 | _error_log_file->close(); |
246 | 0 | } |
247 | | |
248 | 11.0k | _obj_pool->clear(); |
249 | 11.0k | } |
250 | | |
251 | | Status RuntimeState::init(const TUniqueId& fragment_instance_id, const TQueryOptions& query_options, |
252 | 4 | const TQueryGlobals& query_globals, ExecEnv* exec_env) { |
253 | 4 | _fragment_instance_id = fragment_instance_id; |
254 | 4 | _query_options = query_options; |
255 | 4 | if (query_globals.__isset.time_zone && query_globals.__isset.nano_seconds) { |
256 | 0 | _timezone = query_globals.time_zone; |
257 | 0 | _timestamp_ms = query_globals.timestamp_ms; |
258 | 0 | _nano_seconds = query_globals.nano_seconds; |
259 | 4 | } else if (query_globals.__isset.time_zone) { |
260 | 0 | _timezone = query_globals.time_zone; |
261 | 0 | _timestamp_ms = query_globals.timestamp_ms; |
262 | 0 | _nano_seconds = 0; |
263 | 4 | } else if (!query_globals.now_string.empty()) { |
264 | 0 | _timezone = TimezoneUtils::default_time_zone; |
265 | 0 | VecDateTimeValue dt; |
266 | 0 | dt.from_date_str(query_globals.now_string.c_str(), query_globals.now_string.size()); |
267 | 0 | int64_t timestamp; |
268 | 0 | dt.unix_timestamp(×tamp, _timezone); |
269 | 0 | _timestamp_ms = timestamp * 1000; |
270 | 0 | _nano_seconds = 0; |
271 | 4 | } else { |
272 | | //Unit test may set into here |
273 | 4 | _timezone = TimezoneUtils::default_time_zone; |
274 | 4 | _timestamp_ms = 0; |
275 | 4 | _nano_seconds = 0; |
276 | 4 | } |
277 | 4 | TimezoneUtils::find_cctz_time_zone(_timezone, _timezone_obj); |
278 | | |
279 | 4 | if (query_globals.__isset.load_zero_tolerance) { |
280 | 4 | _load_zero_tolerance = query_globals.load_zero_tolerance; |
281 | 4 | } |
282 | | |
283 | 4 | _exec_env = exec_env; |
284 | | |
285 | 4 | if (_query_options.max_errors <= 0) { |
286 | | // TODO: fix linker error and uncomment this |
287 | | //_query_options.max_errors = config::max_errors; |
288 | 4 | _query_options.max_errors = 100; |
289 | 4 | } |
290 | | |
291 | 4 | if (_query_options.batch_size <= 0) { |
292 | 4 | _query_options.batch_size = DEFAULT_BATCH_SIZE; |
293 | 4 | } |
294 | | |
295 | 4 | _db_name = "insert_stmt"; |
296 | 4 | _import_label = print_id(fragment_instance_id); |
297 | | |
298 | 4 | return Status::OK(); |
299 | 4 | } |
300 | | |
301 | 0 | std::weak_ptr<QueryContext> RuntimeState::get_query_ctx_weak() { |
302 | 0 | return _exec_env->fragment_mgr()->get_query_ctx(_query_ctx->query_id()); |
303 | 0 | } |
304 | | |
305 | 11.0k | void RuntimeState::init_mem_trackers(const std::string& name, const TUniqueId& id) { |
306 | 11.0k | _query_mem_tracker = MemTrackerLimiter::create_shared( |
307 | 11.0k | MemTrackerLimiter::Type::OTHER, fmt::format("{}#Id={}", name, print_id(id))); |
308 | 11.0k | } |
309 | | |
310 | 0 | std::shared_ptr<MemTrackerLimiter> RuntimeState::query_mem_tracker() const { |
311 | 0 | CHECK(_query_mem_tracker != nullptr); |
312 | 0 | return _query_mem_tracker; |
313 | 0 | } |
314 | | |
315 | 0 | bool RuntimeState::log_error(const std::string& error) { |
316 | 0 | std::lock_guard<std::mutex> l(_error_log_lock); |
317 | |
|
318 | 0 | if (_error_log.size() < _query_options.max_errors) { |
319 | 0 | _error_log.push_back(error); |
320 | 0 | return true; |
321 | 0 | } |
322 | | |
323 | 0 | return false; |
324 | 0 | } |
325 | | |
326 | 0 | void RuntimeState::get_unreported_errors(std::vector<std::string>* new_errors) { |
327 | 0 | std::lock_guard<std::mutex> l(_error_log_lock); |
328 | |
|
329 | 0 | if (_unreported_error_idx < _error_log.size()) { |
330 | 0 | new_errors->assign(_error_log.begin() + _unreported_error_idx, _error_log.end()); |
331 | 0 | _unreported_error_idx = _error_log.size(); |
332 | 0 | } |
333 | 0 | } |
334 | | |
335 | 20 | bool RuntimeState::is_cancelled() const { |
336 | | // Maybe we should just return _is_cancelled.load() |
337 | 20 | return !_exec_status.ok() || (_query_ctx && _query_ctx->is_cancelled()); |
338 | 20 | } |
339 | | |
340 | 0 | Status RuntimeState::cancel_reason() const { |
341 | 0 | if (!_exec_status.ok()) { |
342 | 0 | return _exec_status.status(); |
343 | 0 | } |
344 | | |
345 | 0 | if (_query_ctx) { |
346 | 0 | return _query_ctx->exec_status(); |
347 | 0 | } |
348 | | |
349 | 0 | return Status::Cancelled("Query cancelled"); |
350 | 0 | } |
351 | | |
352 | | const int64_t MAX_ERROR_NUM = 50; |
353 | | |
354 | 0 | Status RuntimeState::create_error_log_file() { |
355 | 0 | if (config::save_load_error_log_to_s3 && config::is_cloud_mode()) { |
356 | 0 | _s3_error_fs = std::dynamic_pointer_cast<io::S3FileSystem>( |
357 | 0 | ExecEnv::GetInstance()->storage_engine().to_cloud().latest_fs()); |
358 | 0 | if (_s3_error_fs) { |
359 | 0 | std::stringstream ss; |
360 | | // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_basic_err_packet.html |
361 | | // shorten the path as much as possible to prevent the length of the presigned URL from |
362 | | // exceeding the MySQL error packet size limit |
363 | 0 | ss << "error_log/" << std::hex << _query_id.hi; |
364 | 0 | _s3_error_log_file_path = ss.str(); |
365 | 0 | } |
366 | 0 | } |
367 | |
|
368 | 0 | static_cast<void>(_exec_env->load_path_mgr()->get_load_error_file_name( |
369 | 0 | _db_name, _import_label, _fragment_instance_id, &_error_log_file_path)); |
370 | 0 | std::string error_log_absolute_path = |
371 | 0 | _exec_env->load_path_mgr()->get_load_error_absolute_path(_error_log_file_path); |
372 | 0 | _error_log_file = std::make_unique<std::ofstream>(error_log_absolute_path, std::ifstream::out); |
373 | 0 | if (!_error_log_file->is_open()) { |
374 | 0 | std::stringstream error_msg; |
375 | 0 | error_msg << "Fail to open error file: [" << _error_log_file_path << "]."; |
376 | 0 | LOG(WARNING) << error_msg.str(); |
377 | 0 | return Status::InternalError(error_msg.str()); |
378 | 0 | } |
379 | 0 | LOG(INFO) << "create error log file: " << _error_log_file_path |
380 | 0 | << ", query id: " << print_id(_query_id) |
381 | 0 | << ", fragment instance id: " << print_id(_fragment_instance_id); |
382 | |
|
383 | 0 | return Status::OK(); |
384 | 0 | } |
385 | | |
386 | | Status RuntimeState::append_error_msg_to_file(std::function<std::string()> line, |
387 | | std::function<std::string()> error_msg, |
388 | 0 | bool is_summary) { |
389 | 0 | if (query_type() != TQueryType::LOAD) { |
390 | 0 | return Status::OK(); |
391 | 0 | } |
392 | | // If file haven't been opened, open it here |
393 | 0 | if (_error_log_file == nullptr) { |
394 | 0 | Status status = create_error_log_file(); |
395 | 0 | if (!status.ok()) { |
396 | 0 | LOG(WARNING) << "Create error file log failed. because: " << status; |
397 | 0 | if (_error_log_file != nullptr) { |
398 | 0 | _error_log_file->close(); |
399 | 0 | } |
400 | 0 | return status; |
401 | 0 | } |
402 | 0 | } |
403 | | |
404 | | // if num of printed error row exceeds the limit, and this is not a summary message, |
405 | | // if _load_zero_tolerance, return Error to stop the load process immediately. |
406 | 0 | if (_num_print_error_rows.fetch_add(1, std::memory_order_relaxed) > MAX_ERROR_NUM && |
407 | 0 | !is_summary) { |
408 | 0 | if (_load_zero_tolerance) { |
409 | 0 | return Status::DataQualityError( |
410 | 0 | "Encountered unqualified data, stop processing. Please check if the source " |
411 | 0 | "data matches the schema, and consider disabling strict mode or increasing " |
412 | 0 | "max_filter_ratio."); |
413 | 0 | } |
414 | 0 | return Status::OK(); |
415 | 0 | } |
416 | | |
417 | 0 | fmt::memory_buffer out; |
418 | 0 | if (is_summary) { |
419 | 0 | fmt::format_to(out, "Summary: {}", error_msg()); |
420 | 0 | } else { |
421 | 0 | if (_error_row_number < MAX_ERROR_NUM) { |
422 | | // Note: export reason first in case src line too long and be truncated. |
423 | 0 | fmt::format_to(out, "Reason: {}. src line [{}]; ", error_msg(), line()); |
424 | 0 | } else if (_error_row_number == MAX_ERROR_NUM) { |
425 | 0 | fmt::format_to(out, "TOO MUCH ERROR! already reach {}. show no more next error.", |
426 | 0 | MAX_ERROR_NUM); |
427 | 0 | } |
428 | 0 | } |
429 | |
|
430 | 0 | size_t error_row_size = out.size(); |
431 | 0 | if (error_row_size > 0) { |
432 | 0 | if (error_row_size > config::load_error_log_limit_bytes) { |
433 | 0 | fmt::memory_buffer limit_byte_out; |
434 | 0 | limit_byte_out.append(out.data(), out.data() + config::load_error_log_limit_bytes); |
435 | 0 | (*_error_log_file) << fmt::to_string(limit_byte_out) + "error log is too long" |
436 | 0 | << std::endl; |
437 | 0 | } else { |
438 | 0 | (*_error_log_file) << fmt::to_string(out) << std::endl; |
439 | 0 | } |
440 | 0 | } |
441 | 0 | return Status::OK(); |
442 | 0 | } |
443 | | |
444 | 0 | std::string RuntimeState::get_error_log_file_path() { |
445 | 0 | DBUG_EXECUTE_IF("RuntimeState::get_error_log_file_path.block", { |
446 | 0 | if (!_error_log_file_path.empty()) { |
447 | 0 | std::this_thread::sleep_for(std::chrono::seconds(1)); |
448 | 0 | } |
449 | 0 | }); |
450 | 0 | std::lock_guard<std::mutex> l(_s3_error_log_file_lock); |
451 | 0 | if (_s3_error_fs && _error_log_file && _error_log_file->is_open()) { |
452 | | // close error log file |
453 | 0 | _error_log_file->close(); |
454 | 0 | std::string error_log_absolute_path = |
455 | 0 | _exec_env->load_path_mgr()->get_load_error_absolute_path(_error_log_file_path); |
456 | | // upload error log file to s3 |
457 | 0 | Status st = _s3_error_fs->upload(error_log_absolute_path, _s3_error_log_file_path); |
458 | 0 | if (!st.ok()) { |
459 | | // upload failed and return local error log file path |
460 | 0 | LOG(WARNING) << "Fail to upload error file to s3, error_log_file_path=" |
461 | 0 | << _error_log_file_path << ", error=" << st; |
462 | 0 | return _error_log_file_path; |
463 | 0 | } |
464 | | // expiration must be less than a week (in seconds) for presigned url |
465 | 0 | static const unsigned EXPIRATION_SECONDS = 7 * 24 * 60 * 60 - 1; |
466 | | // We should return a public endpoint to user. |
467 | 0 | _error_log_file_path = _s3_error_fs->generate_presigned_url(_s3_error_log_file_path, |
468 | 0 | EXPIRATION_SECONDS, true); |
469 | 0 | } |
470 | 0 | return _error_log_file_path; |
471 | 0 | } |
472 | | |
473 | 4 | void RuntimeState::resize_op_id_to_local_state(int operator_size) { |
474 | 4 | _op_id_to_local_state.resize(-operator_size); |
475 | 4 | } |
476 | | |
477 | | void RuntimeState::emplace_local_state( |
478 | 4 | int id, std::unique_ptr<doris::pipeline::PipelineXLocalStateBase> state) { |
479 | 4 | id = -id; |
480 | 4 | DCHECK(id < _op_id_to_local_state.size()); |
481 | 4 | DCHECK(!_op_id_to_local_state[id]); |
482 | 4 | _op_id_to_local_state[id] = std::move(state); |
483 | 4 | } |
484 | | |
485 | 4 | doris::pipeline::PipelineXLocalStateBase* RuntimeState::get_local_state(int id) { |
486 | 4 | id = -id; |
487 | 4 | return _op_id_to_local_state[id].get(); |
488 | 4 | } |
489 | | |
490 | 4 | Result<RuntimeState::LocalState*> RuntimeState::get_local_state_result(int id) { |
491 | 4 | id = -id; |
492 | 4 | if (id >= _op_id_to_local_state.size()) { |
493 | 0 | return ResultError(Status::InternalError("get_local_state out of range size:{} , id:{}", |
494 | 0 | _op_id_to_local_state.size(), id)); |
495 | 0 | } |
496 | 4 | if (!_op_id_to_local_state[id]) { |
497 | 0 | return ResultError(Status::InternalError("get_local_state id:{} is null", id)); |
498 | 0 | } |
499 | 4 | return _op_id_to_local_state[id].get(); |
500 | 4 | }; |
501 | | |
502 | | void RuntimeState::emplace_sink_local_state( |
503 | 0 | int id, std::unique_ptr<doris::pipeline::PipelineXSinkLocalStateBase> state) { |
504 | 0 | DCHECK(!_sink_local_state) << " id=" << id << " state: " << state->debug_string(0); |
505 | 0 | _sink_local_state = std::move(state); |
506 | 0 | } |
507 | | |
508 | 0 | doris::pipeline::PipelineXSinkLocalStateBase* RuntimeState::get_sink_local_state() { |
509 | 0 | return _sink_local_state.get(); |
510 | 0 | } |
511 | | |
512 | 0 | Result<RuntimeState::SinkLocalState*> RuntimeState::get_sink_local_state_result() { |
513 | 0 | if (!_sink_local_state) { |
514 | 0 | return ResultError(Status::InternalError("_op_id_to_sink_local_state not exist")); |
515 | 0 | } |
516 | 0 | return _sink_local_state.get(); |
517 | 0 | } |
518 | | |
519 | 0 | bool RuntimeState::enable_page_cache() const { |
520 | 0 | return !config::disable_storage_page_cache && |
521 | 0 | (_query_options.__isset.enable_page_cache && _query_options.enable_page_cache); |
522 | 0 | } |
523 | | |
524 | 0 | RuntimeFilterMgr* RuntimeState::global_runtime_filter_mgr() { |
525 | 0 | return _query_ctx->runtime_filter_mgr(); |
526 | 0 | } |
527 | | |
528 | | Status RuntimeState::register_producer_runtime_filter( |
529 | 0 | const TRuntimeFilterDesc& desc, std::shared_ptr<IRuntimeFilter>* producer_filter) { |
530 | | // Producers are created by local runtime filter mgr and shared by global runtime filter manager. |
531 | | // When RF is published, consumers in both global and local RF mgr will be found. |
532 | 0 | RETURN_IF_ERROR(local_runtime_filter_mgr()->register_producer_filter(desc, query_options(), |
533 | 0 | producer_filter)); |
534 | 0 | RETURN_IF_ERROR(global_runtime_filter_mgr()->register_local_merge_producer_filter( |
535 | 0 | desc, query_options(), *producer_filter)); |
536 | 0 | return Status::OK(); |
537 | 0 | } |
538 | | |
539 | | Status RuntimeState::register_consumer_runtime_filter( |
540 | | const doris::TRuntimeFilterDesc& desc, bool need_local_merge, int node_id, |
541 | 0 | std::shared_ptr<IRuntimeFilter>* consumer_filter) { |
542 | 0 | if (desc.has_remote_targets || need_local_merge) { |
543 | 0 | return global_runtime_filter_mgr()->register_consumer_filter(desc, query_options(), node_id, |
544 | 0 | consumer_filter, true); |
545 | 0 | } else { |
546 | 0 | return local_runtime_filter_mgr()->register_consumer_filter(desc, query_options(), node_id, |
547 | 0 | consumer_filter, false); |
548 | 0 | } |
549 | 0 | } |
550 | | |
551 | 0 | bool RuntimeState::is_nereids() const { |
552 | 0 | return _query_ctx->is_nereids(); |
553 | 0 | } |
554 | | |
555 | 0 | std::vector<std::shared_ptr<RuntimeProfile>> RuntimeState::pipeline_id_to_profile() { |
556 | 0 | std::shared_lock lc(_pipeline_profile_lock); |
557 | 0 | return _pipeline_id_to_profile; |
558 | 0 | } |
559 | | |
560 | | std::vector<std::shared_ptr<RuntimeProfile>> RuntimeState::build_pipeline_profile( |
561 | 0 | std::size_t pipeline_size) { |
562 | 0 | std::unique_lock lc(_pipeline_profile_lock); |
563 | 0 | if (!_pipeline_id_to_profile.empty()) { |
564 | 0 | throw Exception(ErrorCode::INTERNAL_ERROR, |
565 | 0 | "build_pipeline_profile can only be called once."); |
566 | 0 | } |
567 | 0 | _pipeline_id_to_profile.resize(pipeline_size); |
568 | 0 | { |
569 | 0 | size_t pip_idx = 0; |
570 | 0 | for (auto& pipeline_profile : _pipeline_id_to_profile) { |
571 | 0 | pipeline_profile = |
572 | 0 | std::make_shared<RuntimeProfile>("Pipeline : " + std::to_string(pip_idx)); |
573 | 0 | pip_idx++; |
574 | 0 | } |
575 | 0 | } |
576 | 0 | return _pipeline_id_to_profile; |
577 | 0 | } |
578 | | |
579 | | } // end namespace doris |