/root/doris/be/src/pipeline/task_queue.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "task_queue.h" |
19 | | |
20 | | // IWYU pragma: no_include <bits/chrono.h> |
21 | | #include <chrono> // IWYU pragma: keep |
22 | | #include <memory> |
23 | | #include <string> |
24 | | |
25 | | #include "common/logging.h" |
26 | | #include "pipeline/pipeline_task.h" |
27 | | #include "runtime/workload_group/workload_group.h" |
28 | | |
29 | | namespace doris::pipeline { |
30 | | |
31 | 0 | TaskQueue::~TaskQueue() = default; |
32 | | |
33 | 0 | PipelineTask* SubTaskQueue::try_take(bool is_steal) { |
34 | 0 | if (_queue.empty()) { |
35 | 0 | return nullptr; |
36 | 0 | } |
37 | 0 | auto task = _queue.front(); |
38 | 0 | _queue.pop(); |
39 | 0 | return task; |
40 | 0 | } |
41 | | |
42 | | //////////////////// PriorityTaskQueue //////////////////// |
43 | | |
44 | 0 | PriorityTaskQueue::PriorityTaskQueue() : _closed(false) { |
45 | 0 | double factor = 1; |
46 | 0 | for (int i = SUB_QUEUE_LEVEL - 1; i >= 0; i--) { |
47 | 0 | _sub_queues[i].set_level_factor(factor); |
48 | 0 | factor *= LEVEL_QUEUE_TIME_FACTOR; |
49 | 0 | } |
50 | 0 | } |
51 | | |
52 | 0 | void PriorityTaskQueue::close() { |
53 | 0 | std::unique_lock<std::mutex> lock(_work_size_mutex); |
54 | 0 | _closed = true; |
55 | 0 | _wait_task.notify_all(); |
56 | 0 | } |
57 | | |
58 | 0 | PipelineTask* PriorityTaskQueue::_try_take_unprotected(bool is_steal) { |
59 | 0 | if (_total_task_size == 0 || _closed) { |
60 | 0 | return nullptr; |
61 | 0 | } |
62 | | |
63 | 0 | double min_vruntime = 0; |
64 | 0 | int level = -1; |
65 | 0 | for (int i = 0; i < SUB_QUEUE_LEVEL; ++i) { |
66 | 0 | double cur_queue_vruntime = _sub_queues[i].get_vruntime(); |
67 | 0 | if (!_sub_queues[i].empty()) { |
68 | 0 | if (level == -1 || cur_queue_vruntime < min_vruntime) { |
69 | 0 | level = i; |
70 | 0 | min_vruntime = cur_queue_vruntime; |
71 | 0 | } |
72 | 0 | } |
73 | 0 | } |
74 | 0 | DCHECK(level != -1); |
75 | 0 | _queue_level_min_vruntime = uint64_t(min_vruntime); |
76 | |
|
77 | 0 | auto task = _sub_queues[level].try_take(is_steal); |
78 | 0 | if (task) { |
79 | 0 | task->update_queue_level(level); |
80 | 0 | _total_task_size--; |
81 | 0 | } |
82 | 0 | return task; |
83 | 0 | } |
84 | | |
85 | 0 | int PriorityTaskQueue::_compute_level(uint64_t runtime) { |
86 | 0 | for (int i = 0; i < SUB_QUEUE_LEVEL - 1; ++i) { |
87 | 0 | if (runtime <= _queue_level_limit[i]) { |
88 | 0 | return i; |
89 | 0 | } |
90 | 0 | } |
91 | 0 | return SUB_QUEUE_LEVEL - 1; |
92 | 0 | } |
93 | | |
94 | 0 | PipelineTask* PriorityTaskQueue::try_take(bool is_steal) { |
95 | | // TODO other efficient lock? e.g. if get lock fail, return null_ptr |
96 | 0 | std::unique_lock<std::mutex> lock(_work_size_mutex); |
97 | 0 | return _try_take_unprotected(is_steal); |
98 | 0 | } |
99 | | |
100 | 0 | PipelineTask* PriorityTaskQueue::take(uint32_t timeout_ms) { |
101 | 0 | std::unique_lock<std::mutex> lock(_work_size_mutex); |
102 | 0 | auto task = _try_take_unprotected(false); |
103 | 0 | if (task) { |
104 | 0 | return task; |
105 | 0 | } else { |
106 | 0 | if (timeout_ms > 0) { |
107 | 0 | _wait_task.wait_for(lock, std::chrono::milliseconds(timeout_ms)); |
108 | 0 | } else { |
109 | 0 | _wait_task.wait(lock); |
110 | 0 | } |
111 | 0 | return _try_take_unprotected(false); |
112 | 0 | } |
113 | 0 | } |
114 | | |
115 | 0 | Status PriorityTaskQueue::push(PipelineTask* task) { |
116 | 0 | if (_closed) { |
117 | 0 | return Status::InternalError("WorkTaskQueue closed"); |
118 | 0 | } |
119 | 0 | auto level = _compute_level(task->get_runtime_ns()); |
120 | 0 | std::unique_lock<std::mutex> lock(_work_size_mutex); |
121 | | |
122 | | // update empty queue's runtime, to avoid too high priority |
123 | 0 | if (_sub_queues[level].empty() && |
124 | 0 | _queue_level_min_vruntime > _sub_queues[level].get_vruntime()) { |
125 | 0 | _sub_queues[level].adjust_runtime(_queue_level_min_vruntime); |
126 | 0 | } |
127 | |
|
128 | 0 | _sub_queues[level].push_back(task); |
129 | 0 | _total_task_size++; |
130 | 0 | _wait_task.notify_one(); |
131 | 0 | return Status::OK(); |
132 | 0 | } |
133 | | |
134 | 0 | MultiCoreTaskQueue::~MultiCoreTaskQueue() = default; |
135 | | |
136 | 0 | MultiCoreTaskQueue::MultiCoreTaskQueue(int core_size) : TaskQueue(core_size), _closed(false) { |
137 | 0 | _prio_task_queue_list = |
138 | 0 | std::make_shared<std::vector<std::unique_ptr<PriorityTaskQueue>>>(core_size); |
139 | 0 | for (int i = 0; i < core_size; i++) { |
140 | 0 | (*_prio_task_queue_list)[i] = std::make_unique<PriorityTaskQueue>(); |
141 | 0 | } |
142 | 0 | } |
143 | | |
144 | 0 | void MultiCoreTaskQueue::close() { |
145 | 0 | if (_closed) { |
146 | 0 | return; |
147 | 0 | } |
148 | 0 | _closed = true; |
149 | 0 | for (int i = 0; i < _core_size; ++i) { |
150 | 0 | (*_prio_task_queue_list)[i]->close(); |
151 | 0 | } |
152 | 0 | std::atomic_store(&_prio_task_queue_list, |
153 | 0 | std::shared_ptr<std::vector<std::unique_ptr<PriorityTaskQueue>>>(nullptr)); |
154 | 0 | } |
155 | | |
156 | 0 | PipelineTask* MultiCoreTaskQueue::take(int core_id) { |
157 | 0 | PipelineTask* task = nullptr; |
158 | 0 | auto prio_task_queue_list = |
159 | 0 | std::atomic_load_explicit(&_prio_task_queue_list, std::memory_order_relaxed); |
160 | 0 | while (!_closed) { |
161 | 0 | DCHECK(prio_task_queue_list->size() > core_id) |
162 | 0 | << " list size: " << prio_task_queue_list->size() << " core_id: " << core_id |
163 | 0 | << " _core_size: " << _core_size << " _next_core: " << _next_core.load(); |
164 | 0 | task = (*prio_task_queue_list)[core_id]->try_take(false); |
165 | 0 | if (task) { |
166 | 0 | task->set_core_id(core_id); |
167 | 0 | break; |
168 | 0 | } |
169 | 0 | task = _steal_take(core_id, *prio_task_queue_list); |
170 | 0 | if (task) { |
171 | 0 | break; |
172 | 0 | } |
173 | 0 | task = (*prio_task_queue_list)[core_id]->take(WAIT_CORE_TASK_TIMEOUT_MS /* timeout_ms */); |
174 | 0 | if (task) { |
175 | 0 | task->set_core_id(core_id); |
176 | 0 | break; |
177 | 0 | } |
178 | 0 | } |
179 | 0 | if (task) { |
180 | 0 | task->pop_out_runnable_queue(); |
181 | 0 | } |
182 | 0 | return task; |
183 | 0 | } |
184 | | |
185 | | PipelineTask* MultiCoreTaskQueue::_steal_take( |
186 | 0 | int core_id, std::vector<std::unique_ptr<PriorityTaskQueue>>& prio_task_queue_list) { |
187 | 0 | DCHECK(core_id < _core_size); |
188 | 0 | int next_id = core_id; |
189 | 0 | for (int i = 1; i < _core_size; ++i) { |
190 | 0 | ++next_id; |
191 | 0 | if (next_id == _core_size) { |
192 | 0 | next_id = 0; |
193 | 0 | } |
194 | 0 | DCHECK(next_id < _core_size); |
195 | 0 | auto task = prio_task_queue_list[next_id]->try_take(true); |
196 | 0 | if (task) { |
197 | 0 | task->set_core_id(next_id); |
198 | 0 | return task; |
199 | 0 | } |
200 | 0 | } |
201 | 0 | return nullptr; |
202 | 0 | } |
203 | | |
204 | 0 | Status MultiCoreTaskQueue::push_back(PipelineTask* task) { |
205 | 0 | int core_id = task->get_previous_core_id(); |
206 | 0 | if (core_id < 0) { |
207 | 0 | core_id = _next_core.fetch_add(1) % _core_size; |
208 | 0 | } |
209 | 0 | return push_back(task, core_id); |
210 | 0 | } |
211 | | |
212 | 0 | Status MultiCoreTaskQueue::push_back(PipelineTask* task, int core_id) { |
213 | 0 | DCHECK(core_id < _core_size); |
214 | 0 | task->put_in_runnable_queue(); |
215 | 0 | auto prio_task_queue_list = |
216 | 0 | std::atomic_load_explicit(&_prio_task_queue_list, std::memory_order_relaxed); |
217 | 0 | return (*prio_task_queue_list)[core_id]->push(task); |
218 | 0 | } |
219 | | |
220 | 0 | void MultiCoreTaskQueue::update_statistics(PipelineTask* task, int64_t time_spent) { |
221 | 0 | task->inc_runtime_ns(time_spent); |
222 | 0 | auto prio_task_queue_list = |
223 | 0 | std::atomic_load_explicit(&_prio_task_queue_list, std::memory_order_relaxed); |
224 | 0 | (*prio_task_queue_list)[task->get_core_id()]->inc_sub_queue_runtime(task->get_queue_level(), |
225 | 0 | time_spent); |
226 | 0 | } |
227 | | |
228 | | } // namespace doris::pipeline |