Coverage Report

Created: 2026-04-15 12:36

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/exec/pipeline/task_queue.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "exec/pipeline/task_queue.h"
19
20
// IWYU pragma: no_include <bits/chrono.h>
21
#include <chrono> // IWYU pragma: keep
22
#include <memory>
23
#include <string>
24
25
#include "common/logging.h"
26
#include "exec/pipeline/pipeline_task.h"
27
#include "runtime/workload_group/workload_group.h"
28
29
namespace doris {
30
31
11
PipelineTaskSPtr SubTaskQueue::try_take(bool is_steal) {
32
11
    if (_queue.empty()) {
33
0
        return nullptr;
34
0
    }
35
11
    auto task = _queue.front();
36
11
    _queue.pop();
37
11
    return task;
38
11
}
39
40
////////////////////  PriorityTaskQueue ////////////////////
41
42
26
PriorityTaskQueue::PriorityTaskQueue() : _closed(false) {
43
26
    double factor = 1;
44
182
    for (int i = SUB_QUEUE_LEVEL - 1; i >= 0; i--) {
45
156
        _sub_queues[i].set_level_factor(factor);
46
156
        factor *= LEVEL_QUEUE_TIME_FACTOR;
47
156
    }
48
26
}
49
50
0
void PriorityTaskQueue::close() {
51
0
    std::unique_lock<std::mutex> lock(_work_size_mutex);
52
0
    _closed = true;
53
0
    _wait_task.notify_all();
54
0
    DorisMetrics::instance()->pipeline_task_queue_size->increment(-_total_task_size);
55
0
}
56
57
14
PipelineTaskSPtr PriorityTaskQueue::_try_take_unprotected(bool is_steal) {
58
14
    if (_total_task_size == 0 || _closed) {
59
3
        return nullptr;
60
3
    }
61
62
11
    double min_vruntime = 0;
63
11
    int level = -1;
64
77
    for (int i = 0; i < SUB_QUEUE_LEVEL; ++i) {
65
66
        double cur_queue_vruntime = _sub_queues[i].get_vruntime();
66
66
        if (!_sub_queues[i].empty()) {
67
11
            if (level == -1 || cur_queue_vruntime < min_vruntime) {
68
11
                level = i;
69
11
                min_vruntime = cur_queue_vruntime;
70
11
            }
71
11
        }
72
66
    }
73
11
    DCHECK(level != -1);
74
11
    _queue_level_min_vruntime = uint64_t(min_vruntime);
75
76
11
    auto task = _sub_queues[level].try_take(is_steal);
77
11
    if (task) {
78
11
        task->update_queue_level(level);
79
11
        _total_task_size--;
80
11
        DorisMetrics::instance()->pipeline_task_queue_size->increment(-1);
81
11
    }
82
11
    return task;
83
14
}
84
85
31
int PriorityTaskQueue::_compute_level(uint64_t runtime) {
86
31
    for (int i = 0; i < SUB_QUEUE_LEVEL - 1; ++i) {
87
31
        if (runtime <= _queue_level_limit[i]) {
88
31
            return i;
89
31
        }
90
31
    }
91
0
    return SUB_QUEUE_LEVEL - 1;
92
31
}
93
94
12
PipelineTaskSPtr PriorityTaskQueue::try_take(bool is_steal) {
95
    // TODO other efficient lock? e.g. if get lock fail, return null_ptr
96
12
    std::unique_lock<std::mutex> lock(_work_size_mutex);
97
12
    return _try_take_unprotected(is_steal);
98
12
}
99
100
1
PipelineTaskSPtr PriorityTaskQueue::take(uint32_t timeout_ms) {
101
1
    std::unique_lock<std::mutex> lock(_work_size_mutex);
102
1
    auto task = _try_take_unprotected(false);
103
1
    if (task) {
104
0
        return task;
105
1
    } else {
106
1
        if (timeout_ms > 0) {
107
1
            _wait_task.wait_for(lock, std::chrono::milliseconds(timeout_ms));
108
1
        } else {
109
0
            _wait_task.wait(lock);
110
0
        }
111
1
        return _try_take_unprotected(false);
112
1
    }
113
1
}
114
115
31
Status PriorityTaskQueue::push(PipelineTaskSPtr task) {
116
31
    if (_closed) {
117
0
        return Status::InternalError("WorkTaskQueue closed");
118
0
    }
119
31
    auto level = _compute_level(task->get_runtime_ns());
120
31
    std::unique_lock<std::mutex> lock(_work_size_mutex);
121
122
    // update empty queue's  runtime, to avoid too high priority
123
31
    if (_sub_queues[level].empty() &&
124
31
        double(_queue_level_min_vruntime) > _sub_queues[level].get_vruntime()) {
125
0
        _sub_queues[level].adjust_runtime(_queue_level_min_vruntime);
126
0
    }
127
128
31
    _sub_queues[level].push_back(task);
129
31
    _total_task_size++;
130
31
    DorisMetrics::instance()->pipeline_task_queue_size->increment(1);
131
31
    _wait_task.notify_one();
132
31
    return Status::OK();
133
31
}
134
135
52
MultiCoreTaskQueue::~MultiCoreTaskQueue() = default;
136
137
MultiCoreTaskQueue::MultiCoreTaskQueue(int core_size)
138
52
        : _prio_task_queues(core_size), _closed(false), _core_size(core_size) {}
139
140
26
void MultiCoreTaskQueue::close() {
141
26
    if (_closed) {
142
0
        return;
143
0
    }
144
26
    _closed = true;
145
    // close all priority task queue
146
26
    std::ranges::for_each(_prio_task_queues,
147
26
                          [](auto& prio_task_queue) { prio_task_queue.close(); });
148
26
}
149
150
0
PipelineTaskSPtr MultiCoreTaskQueue::take(int core_id) {
151
0
    PipelineTaskSPtr task = nullptr;
152
0
    while (!_closed) {
153
0
        DCHECK(_prio_task_queues.size() > core_id)
154
0
                << " list size: " << _prio_task_queues.size() << " core_id: " << core_id
155
0
                << " _core_size: " << _core_size << " _next_core: " << _next_core.load();
156
0
        task = _prio_task_queues[core_id].try_take(false);
157
0
        if (task) {
158
0
            break;
159
0
        }
160
0
        task = _steal_take(core_id);
161
0
        if (task) {
162
0
            break;
163
0
        }
164
0
        task = _prio_task_queues[core_id].take(WAIT_CORE_TASK_TIMEOUT_MS /* timeout_ms */);
165
0
        if (task) {
166
0
            break;
167
0
        }
168
0
    }
169
0
    if (task) {
170
0
        task->pop_out_runnable_queue();
171
0
    }
172
0
    return task;
173
0
}
174
175
1
PipelineTaskSPtr MultiCoreTaskQueue::_steal_take(int core_id) {
176
1
    DCHECK(core_id < _core_size);
177
1
    int next_id = core_id;
178
1
    for (int i = 1; i < _core_size; ++i) {
179
0
        ++next_id;
180
0
        if (next_id == _core_size) {
181
0
            next_id = 0;
182
0
        }
183
0
        DCHECK(next_id < _core_size);
184
0
        auto task = _prio_task_queues[next_id].try_take(true);
185
0
        if (task) {
186
0
            return task;
187
0
        }
188
0
    }
189
1
    return nullptr;
190
1
}
191
192
31
Status MultiCoreTaskQueue::push_back(PipelineTaskSPtr task) {
193
31
    int thread_id = task->get_thread_id(_core_size);
194
31
    if (thread_id < 0) {
195
31
        thread_id = _next_core.fetch_add(1) % _core_size;
196
31
    }
197
31
    return push_back(task, thread_id);
198
31
}
199
200
31
Status MultiCoreTaskQueue::push_back(PipelineTaskSPtr task, int core_id) {
201
31
    DCHECK(core_id < _core_size);
202
31
    task->put_in_runnable_queue();
203
31
    return _prio_task_queues[core_id].push(task);
204
31
}
205
206
0
void MultiCoreTaskQueue::update_statistics(PipelineTask* task, int64_t time_spent) {
207
    // if the task not execute but exception early close, core_id == -1
208
    // should not do update_statistics
209
0
    if (auto core_id = task->get_thread_id(_core_size); core_id >= 0) {
210
0
        task->inc_runtime_ns(time_spent);
211
0
        _prio_task_queues[core_id].inc_sub_queue_runtime(task->get_queue_level(), time_spent);
212
0
    }
213
0
}
214
215
} // namespace doris