be/src/exec/pipeline/task_scheduler.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "exec/pipeline/task_scheduler.h" |
19 | | |
20 | | #include <fmt/format.h> |
21 | | #include <gen_cpp/types.pb.h> |
22 | | #include <glog/logging.h> |
23 | | #include <sched.h> |
24 | | |
25 | | // IWYU pragma: no_include <bits/chrono.h> |
26 | | #include <algorithm> |
27 | | #include <chrono> // IWYU pragma: keep |
28 | | #include <cstddef> |
29 | | #include <functional> |
30 | | #include <memory> |
31 | | #include <mutex> |
32 | | #include <ostream> |
33 | | #include <string> |
34 | | #include <utility> |
35 | | |
36 | | #include "common/logging.h" |
37 | | #include "common/status.h" |
38 | | #include "core/value/vdatetime_value.h" |
39 | | #include "exec/pipeline/pipeline_fragment_context.h" |
40 | | #include "exec/pipeline/pipeline_task.h" |
41 | | #include "runtime/query_context.h" |
42 | | #include "runtime/thread_context.h" |
43 | | #include "util/thread.h" |
44 | | #include "util/threadpool.h" |
45 | | #include "util/time.h" |
46 | | #include "util/uid_util.h" |
47 | | |
48 | | namespace doris { |
49 | 67 | TaskScheduler::~TaskScheduler() { |
50 | 67 | stop(); |
51 | 67 | LOG(INFO) << "Task scheduler " << _name << " shutdown"; |
52 | 67 | } |
53 | | |
54 | 70 | Status TaskScheduler::start() { |
55 | 70 | RETURN_IF_ERROR(ThreadPoolBuilder(_name) |
56 | 70 | .set_min_threads(_num_threads) |
57 | 70 | .set_max_threads(_num_threads) |
58 | 70 | .set_max_queue_size(0) |
59 | 70 | .set_cgroup_cpu_ctl(_cgroup_cpu_ctl) |
60 | 70 | .build(&_fix_thread_pool)); |
61 | 70 | LOG_INFO("TaskScheduler set cores").tag("size", _num_threads); |
62 | 3.19k | for (int32_t i = 0; i < _num_threads; ++i) { |
63 | 3.12k | RETURN_IF_ERROR(_fix_thread_pool->submit_func([this, i] { _do_work(i); })); |
64 | 3.12k | } |
65 | 70 | return Status::OK(); |
66 | 70 | } |
67 | | |
68 | 7.32M | Status TaskScheduler::submit(PipelineTaskSPtr task) { |
69 | 7.32M | return _task_queue.push_back(task); |
70 | 7.32M | } |
71 | | |
72 | | // after close_task, task maybe destructed. |
73 | 2.01M | void close_task(PipelineTask* task, Status exec_status, PipelineFragmentContext* ctx) { |
74 | | // Has to attach memory tracker here, because the close task will also release some memory. |
75 | | // Should count the memory to the query or the query's memory will not decrease when part of |
76 | | // task finished. |
77 | 2.01M | SCOPED_ATTACH_TASK(task->runtime_state()); |
78 | 2.01M | if (!exec_status.ok()) { |
79 | 4.01k | ctx->cancel(exec_status); |
80 | 4.01k | LOG(WARNING) << fmt::format("Pipeline task failed. query_id: {} reason: {}", |
81 | 4.01k | print_id(ctx->get_query_id()), exec_status.to_string()); |
82 | 4.01k | } |
83 | 2.01M | Status status = task->close(exec_status); |
84 | 2.01M | if (!status.ok()) { |
85 | 1.37k | ctx->cancel(status); |
86 | 1.37k | } |
87 | 2.01M | status = task->finalize(); |
88 | 2.01M | if (!status.ok()) { |
89 | 0 | ctx->cancel(status); |
90 | 0 | } |
91 | 2.01M | } |
92 | | |
93 | 3.11k | void TaskScheduler::_do_work(int index) { |
94 | 10.0M | while (!_need_to_stop) { |
95 | 10.0M | auto task = _task_queue.take(index); |
96 | 10.0M | if (!task) { |
97 | 1.18M | continue; |
98 | 1.18M | } |
99 | | |
100 | | // The task is already running, maybe block in now dependency wake up by other thread |
101 | | // but the block thread still hold the task, so put it back to the queue, until the hold |
102 | | // thread set task->set_running(false) |
103 | | // set_running return the old value |
104 | 8.89M | if (task->set_running(true)) { |
105 | 637k | static_cast<void>(_task_queue.push_back(task, index)); |
106 | 637k | continue; |
107 | 637k | } |
108 | | |
109 | 8.25M | if (task->is_finalized()) { |
110 | 14 | task->set_running(false); |
111 | 14 | continue; |
112 | 14 | } |
113 | | |
114 | 8.25M | auto fragment_context = task->fragment_context().lock(); |
115 | 8.25M | if (!fragment_context) { |
116 | | // Fragment already finished |
117 | 0 | task->set_running(false); |
118 | 0 | continue; |
119 | 0 | } |
120 | | |
121 | 8.25M | task->set_thread_id(index); |
122 | | |
123 | 8.25M | bool done = false; |
124 | 8.25M | auto status = Status::OK(); |
125 | 8.25M | int64_t exec_ns = 0; |
126 | 8.25M | SCOPED_RAW_TIMER(&exec_ns); |
127 | 8.25M | Defer task_running_defer {[&]() { |
128 | | // If fragment is finished, fragment context will be de-constructed with all tasks in it. |
129 | 7.31M | if (done || !status.ok()) { |
130 | 2.01M | auto id = task->pipeline_id(); |
131 | 2.01M | close_task(task.get(), status, fragment_context.get()); |
132 | 2.01M | task->set_running(false); |
133 | 2.01M | fragment_context->decrement_running_task(id); |
134 | 5.29M | } else { |
135 | 5.29M | task->set_running(false); |
136 | 5.29M | } |
137 | 7.31M | _task_queue.update_statistics(task.get(), exec_ns); |
138 | 7.31M | }}; |
139 | 8.25M | bool canceled = fragment_context->is_canceled(); |
140 | | |
141 | | // Close task if canceled |
142 | 8.25M | if (canceled) { |
143 | 2.66k | status = fragment_context->get_query_ctx()->exec_status(); |
144 | 2.66k | DCHECK(!status.ok()); |
145 | 2.66k | continue; |
146 | 2.66k | } |
147 | | |
148 | | // Main logics of execution |
149 | 8.25M | ASSIGN_STATUS_IF_CATCH_EXCEPTION(status = task->execute(&done), status); |
150 | 7.31M | fragment_context->trigger_report_if_necessary(); |
151 | 7.31M | } |
152 | 3.11k | } |
153 | | |
154 | 93 | void TaskScheduler::stop() { |
155 | 93 | if (!_shutdown) { |
156 | 67 | _task_queue.close(); |
157 | 67 | if (_fix_thread_pool) { |
158 | 26 | _need_to_stop = true; |
159 | 26 | _fix_thread_pool->shutdown(); |
160 | 26 | _fix_thread_pool->wait(); |
161 | 26 | } |
162 | | // Should set at the ending of the stop to ensure that the |
163 | | // pool is stopped. For example, if there are 2 threads call stop |
164 | | // then if one thread set shutdown = false, then another thread will |
165 | | // not check it and will free task scheduler. |
166 | 67 | _shutdown = true; |
167 | 67 | } |
168 | 93 | } |
169 | | |
170 | 7.32M | Status HybridTaskScheduler::submit(PipelineTaskSPtr task) { |
171 | 7.32M | if (task->is_blockable()) { |
172 | 7.61k | return _blocking_scheduler.submit(task); |
173 | 7.31M | } else { |
174 | 7.31M | return _simple_scheduler.submit(task); |
175 | 7.31M | } |
176 | 7.32M | } |
177 | | |
178 | 35 | Status HybridTaskScheduler::start() { |
179 | 35 | RETURN_IF_ERROR(_blocking_scheduler.start()); |
180 | 35 | RETURN_IF_ERROR(_simple_scheduler.start()); |
181 | 35 | return Status::OK(); |
182 | 35 | } |
183 | | |
184 | 13 | void HybridTaskScheduler::stop() { |
185 | 13 | _blocking_scheduler.stop(); |
186 | 13 | _simple_scheduler.stop(); |
187 | 13 | } |
188 | | |
189 | | } // namespace doris |