Coverage Report

Created: 2025-09-12 18:20

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/doris/be/src/pipeline/task_scheduler.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "task_scheduler.h"
19
20
#include <fmt/format.h>
21
#include <gen_cpp/Types_types.h>
22
#include <gen_cpp/types.pb.h>
23
#include <glog/logging.h>
24
#include <sched.h>
25
26
// IWYU pragma: no_include <bits/chrono.h>
27
#include <algorithm>
28
#include <chrono> // IWYU pragma: keep
29
#include <cstddef>
30
#include <functional>
31
#include <memory>
32
#include <mutex>
33
#include <ostream>
34
#include <string>
35
#include <thread>
36
#include <utility>
37
38
#include "common/logging.h"
39
#include "common/status.h"
40
#include "pipeline/pipeline_task.h"
41
#include "pipeline_fragment_context.h"
42
#include "runtime/exec_env.h"
43
#include "runtime/query_context.h"
44
#include "runtime/thread_context.h"
45
#include "util/thread.h"
46
#include "util/threadpool.h"
47
#include "util/time.h"
48
#include "util/uid_util.h"
49
#include "vec/runtime/vdatetime_value.h"
50
51
namespace doris::pipeline {
52
#include "common/compile_check_begin.h"
53
20
TaskScheduler::~TaskScheduler() {
54
20
    stop();
55
20
    LOG(INFO) << "Task scheduler " << _name << " shutdown";
56
20
}
57
58
0
Status TaskScheduler::start() {
59
0
    RETURN_IF_ERROR(ThreadPoolBuilder(_name)
60
0
                            .set_min_threads(_num_threads)
61
0
                            .set_max_threads(_num_threads)
62
0
                            .set_max_queue_size(0)
63
0
                            .set_cgroup_cpu_ctl(_cgroup_cpu_ctl)
64
0
                            .build(&_fix_thread_pool));
65
0
    LOG_INFO("TaskScheduler set cores").tag("size", _num_threads);
66
0
    for (int32_t i = 0; i < _num_threads; ++i) {
67
0
        RETURN_IF_ERROR(_fix_thread_pool->submit_func([this, i] { _do_work(i); }));
68
0
    }
69
0
    return Status::OK();
70
0
}
71
72
0
Status TaskScheduler::submit(PipelineTaskSPtr task) {
73
0
    return _task_queue.push_back(task);
74
0
}
75
76
// after close_task, task maybe destructed.
77
0
void close_task(PipelineTask* task, Status exec_status, PipelineFragmentContext* ctx) {
78
    // Has to attach memory tracker here, because the close task will also release some memory.
79
    // Should count the memory to the query or the query's memory will not decrease when part of
80
    // task finished.
81
0
    SCOPED_ATTACH_TASK(task->runtime_state());
82
0
    if (!exec_status.ok()) {
83
0
        ctx->cancel(exec_status);
84
0
        LOG(WARNING) << fmt::format("Pipeline task failed. query_id: {} reason: {}",
85
0
                                    print_id(ctx->get_query_id()), exec_status.to_string());
86
0
    }
87
0
    Status status = task->close(exec_status);
88
0
    if (!status.ok()) {
89
0
        ctx->cancel(status);
90
0
    }
91
0
    status = task->finalize();
92
0
    if (!status.ok()) {
93
0
        ctx->cancel(status);
94
0
    }
95
0
}
96
97
0
void TaskScheduler::_do_work(int index) {
98
0
    while (!_need_to_stop) {
99
0
        auto task = _task_queue.take(index);
100
0
        if (!task) {
101
0
            continue;
102
0
        }
103
104
        // The task is already running, maybe block in now dependency wake up by other thread
105
        // but the block thread still hold the task, so put it back to the queue, until the hold
106
        // thread set task->set_running(false)
107
0
        if (task->is_running()) {
108
0
            static_cast<void>(_task_queue.push_back(task, index));
109
0
            continue;
110
0
        }
111
0
        if (task->is_finalized()) {
112
0
            continue;
113
0
        }
114
0
        auto fragment_context = task->fragment_context().lock();
115
0
        if (!fragment_context) {
116
            // Fragment already finished
117
0
            continue;
118
0
        }
119
0
        task->set_running(true).set_thread_id(index);
120
0
        bool done = false;
121
0
        auto status = Status::OK();
122
0
        int64_t exec_ns = 0;
123
0
        SCOPED_RAW_TIMER(&exec_ns);
124
0
        Defer task_running_defer {[&]() {
125
            // If fragment is finished, fragment context will be de-constructed with all tasks in it.
126
0
            if (done || !status.ok()) {
127
0
                auto id = task->pipeline_id();
128
0
                close_task(task.get(), status, fragment_context.get());
129
0
                task->set_running(false);
130
0
                fragment_context->decrement_running_task(id);
131
0
            } else {
132
0
                task->set_running(false);
133
0
            }
134
0
            _task_queue.update_statistics(task.get(), exec_ns);
135
0
        }};
136
0
        bool canceled = fragment_context->is_canceled();
137
138
        // Close task if canceled
139
0
        if (canceled) {
140
0
            status = fragment_context->get_query_ctx()->exec_status();
141
0
            DCHECK(!status.ok());
142
0
            continue;
143
0
        }
144
145
        // Main logics of execution
146
0
        ASSIGN_STATUS_IF_CATCH_EXCEPTION(
147
                //TODO: use a better enclose to abstracting these
148
0
                if (ExecEnv::GetInstance()->pipeline_tracer_context()->enabled()) {
149
0
                    TUniqueId query_id = fragment_context->get_query_id();
150
0
                    std::string task_name = task->task_name();
151
152
0
                    std::thread::id tid = std::this_thread::get_id();
153
0
                    uint64_t thread_id = *reinterpret_cast<uint64_t*>(&tid);
154
0
                    uint64_t start_time = MonotonicMicros();
155
156
0
                    status = task->execute(&done);
157
158
0
                    uint64_t end_time = MonotonicMicros();
159
0
                    ExecEnv::GetInstance()->pipeline_tracer_context()->record(
160
0
                            {query_id, task_name, static_cast<uint32_t>(index), thread_id,
161
0
                             start_time, end_time});
162
0
                } else { status = task->execute(&done); },
163
0
                status);
164
0
        fragment_context->trigger_report_if_necessary();
165
0
    }
166
0
}
167
168
20
void TaskScheduler::stop() {
169
20
    if (!_shutdown) {
170
20
        _task_queue.close();
171
20
        if (_fix_thread_pool) {
172
0
            _need_to_stop = true;
173
0
            _fix_thread_pool->shutdown();
174
0
            _fix_thread_pool->wait();
175
0
        }
176
        // Should set at the ending of the stop to ensure that the
177
        // pool is stopped. For example, if there are 2 threads call stop
178
        // then if one thread set shutdown = false, then another thread will
179
        // not check it and will free task scheduler.
180
20
        _shutdown = true;
181
20
    }
182
20
}
183
184
0
Status HybridTaskScheduler::submit(PipelineTaskSPtr task) {
185
0
    if (task->is_blockable()) {
186
0
        return _blocking_scheduler.submit(task);
187
0
    } else {
188
0
        return _simple_scheduler.submit(task);
189
0
    }
190
0
}
191
192
0
Status HybridTaskScheduler::start() {
193
0
    RETURN_IF_ERROR(_blocking_scheduler.start());
194
0
    RETURN_IF_ERROR(_simple_scheduler.start());
195
0
    return Status::OK();
196
0
}
197
198
0
void HybridTaskScheduler::stop() {
199
0
    _blocking_scheduler.stop();
200
0
    _simple_scheduler.stop();
201
0
}
202
203
} // namespace doris::pipeline