Coverage Report

Created: 2026-04-17 17:49

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/exec/pipeline/pipeline_task.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "exec/pipeline/pipeline_task.h"
19
20
#include <fmt/core.h>
21
#include <fmt/format.h>
22
#include <gen_cpp/Metrics_types.h>
23
#include <glog/logging.h>
24
25
#include <algorithm>
26
#include <memory>
27
#include <ostream>
28
#include <vector>
29
30
#include "common/logging.h"
31
#include "common/status.h"
32
#include "core/block/block.h"
33
#include "exec/operator/exchange_source_operator.h"
34
#include "exec/operator/operator.h"
35
#include "exec/operator/rec_cte_source_operator.h"
36
#include "exec/operator/scan_operator.h"
37
#include "exec/pipeline/dependency.h"
38
#include "exec/pipeline/pipeline.h"
39
#include "exec/pipeline/pipeline_fragment_context.h"
40
#include "exec/pipeline/revokable_task.h"
41
#include "exec/pipeline/task_queue.h"
42
#include "exec/pipeline/task_scheduler.h"
43
#include "exec/spill/spill_file.h"
44
#include "runtime/descriptors.h"
45
#include "runtime/exec_env.h"
46
#include "runtime/query_context.h"
47
#include "runtime/runtime_profile.h"
48
#include "runtime/runtime_profile_counter_names.h"
49
#include "runtime/thread_context.h"
50
#include "runtime/workload_group/workload_group_manager.h"
51
#include "util/defer_op.h"
52
#include "util/mem_info.h"
53
#include "util/uid_util.h"
54
55
namespace doris {
56
class RuntimeState;
57
} // namespace doris
58
59
namespace doris {
60
61
PipelineTask::PipelineTask(PipelinePtr& pipeline, uint32_t task_id, RuntimeState* state,
62
                           std::shared_ptr<PipelineFragmentContext> fragment_context,
63
                           RuntimeProfile* parent_profile,
64
                           std::map<int, std::pair<std::shared_ptr<BasicSharedState>,
65
                                                   std::vector<std::shared_ptr<Dependency>>>>
66
                                   shared_state_map,
67
                           int task_idx)
68
        :
69
#ifdef BE_TEST
70
72.1k
          _query_id(fragment_context ? fragment_context->get_query_id() : TUniqueId()),
71
#else
72
          _query_id(fragment_context->get_query_id()),
73
#endif
74
72.1k
          _index(task_id),
75
72.1k
          _pipeline(pipeline),
76
72.1k
          _opened(false),
77
72.1k
          _state(state),
78
72.1k
          _fragment_context(fragment_context),
79
72.1k
          _parent_profile(parent_profile),
80
72.1k
          _operators(pipeline->operators()),
81
72.1k
          _source(_operators.front().get()),
82
72.1k
          _root(_operators.back().get()),
83
72.1k
          _sink(pipeline->sink_shared_pointer()),
84
72.1k
          _shared_state_map(std::move(shared_state_map)),
85
72.1k
          _task_idx(task_idx),
86
72.1k
          _memory_sufficient_dependency(state->get_query_ctx()->get_memory_sufficient_dependency()),
87
72.1k
          _pipeline_name(_pipeline->name()) {
88
#ifndef BE_TEST
89
    _query_mem_tracker = fragment_context->get_query_ctx()->query_mem_tracker();
90
#endif
91
72.1k
    _execution_dependencies.push_back(state->get_query_ctx()->get_execution_dependency());
92
72.1k
    if (!_shared_state_map.contains(_sink->dests_id().front())) {
93
72.1k
        auto shared_state = _sink->create_shared_state();
94
72.1k
        if (shared_state) {
95
35
            _sink_shared_state = shared_state;
96
35
        }
97
72.1k
    }
98
72.1k
}
99
100
72.1k
PipelineTask::~PipelineTask() {
101
72.1k
    auto reset_member = [&]() {
102
72.1k
        _shared_state_map.clear();
103
72.1k
        _sink_shared_state.reset();
104
72.1k
        _op_shared_states.clear();
105
72.1k
        _sink.reset();
106
72.1k
        _operators.clear();
107
72.1k
        _block.reset();
108
72.1k
        _pipeline.reset();
109
72.1k
    };
110
// PipelineTask is also hold by task queue( https://github.com/apache/doris/pull/49753),
111
// so that it maybe the last one to be destructed.
112
// But pipeline task hold some objects, like operators, shared state, etc. So that should release
113
// memory manually.
114
#ifndef BE_TEST
115
    if (_query_mem_tracker) {
116
        SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_mem_tracker);
117
        reset_member();
118
        return;
119
    }
120
#endif
121
72.1k
    reset_member();
122
72.1k
}
123
124
Status PipelineTask::prepare(const std::vector<TScanRangeParams>& scan_range, const int sender_id,
125
21
                             const TDataSink& tsink) {
126
21
    DCHECK(_sink);
127
21
    _init_profile();
128
21
    SCOPED_TIMER(_task_profile->total_time_counter());
129
21
    SCOPED_CPU_TIMER(_task_cpu_timer);
130
21
    SCOPED_TIMER(_prepare_timer);
131
21
    DBUG_EXECUTE_IF("fault_inject::PipelineXTask::prepare", {
132
21
        Status status = Status::Error<INTERNAL_ERROR>("fault_inject pipeline_task prepare failed");
133
21
        return status;
134
21
    });
135
21
    {
136
        // set sink local state
137
21
        LocalSinkStateInfo info {_task_idx,         _task_profile.get(),
138
21
                                 sender_id,         get_sink_shared_state().get(),
139
21
                                 _shared_state_map, tsink};
140
21
        RETURN_IF_ERROR(_sink->setup_local_state(_state, info));
141
21
    }
142
143
21
    _scan_ranges = scan_range;
144
21
    auto* parent_profile = _state->get_sink_local_state()->operator_profile();
145
146
44
    for (int op_idx = cast_set<int>(_operators.size() - 1); op_idx >= 0; op_idx--) {
147
23
        auto& op = _operators[op_idx];
148
23
        LocalStateInfo info {parent_profile, _scan_ranges, get_op_shared_state(op->operator_id()),
149
23
                             _shared_state_map, _task_idx};
150
23
        RETURN_IF_ERROR(op->setup_local_state(_state, info));
151
23
        parent_profile = _state->get_local_state(op->operator_id())->operator_profile();
152
23
    }
153
21
    {
154
21
        const auto& deps =
155
21
                _state->get_local_state(_source->operator_id())->execution_dependencies();
156
21
        std::unique_lock<std::mutex> lc(_dependency_lock);
157
21
        std::copy(deps.begin(), deps.end(),
158
21
                  std::inserter(_execution_dependencies, _execution_dependencies.end()));
159
21
    }
160
21
    if (auto fragment = _fragment_context.lock()) {
161
20
        if (fragment->get_query_ctx()->is_cancelled()) {
162
0
            unblock_all_dependencies();
163
0
            return fragment->get_query_ctx()->exec_status();
164
0
        }
165
20
    } else {
166
1
        return Status::InternalError("Fragment already finished! Query: {}", print_id(_query_id));
167
1
    }
168
20
    _block = doris::Block::create_unique();
169
20
    return _state_transition(State::RUNNABLE);
170
21
}
171
172
14
Status PipelineTask::_extract_dependencies() {
173
14
    std::vector<std::vector<Dependency*>> read_dependencies;
174
14
    std::vector<Dependency*> write_dependencies;
175
14
    std::vector<Dependency*> finish_dependencies;
176
14
    read_dependencies.resize(_operators.size());
177
14
    size_t i = 0;
178
16
    for (auto& op : _operators) {
179
16
        auto* local_state = _state->get_local_state(op->operator_id());
180
16
        DCHECK(local_state);
181
16
        read_dependencies[i] = local_state->dependencies();
182
16
        auto* fin_dep = local_state->finishdependency();
183
16
        if (fin_dep) {
184
9
            finish_dependencies.push_back(fin_dep);
185
9
        }
186
16
        i++;
187
16
    }
188
14
    DBUG_EXECUTE_IF("fault_inject::PipelineXTask::_extract_dependencies", {
189
14
        Status status = Status::Error<INTERNAL_ERROR>(
190
14
                "fault_inject pipeline_task _extract_dependencies failed");
191
14
        return status;
192
14
    });
193
14
    {
194
14
        auto* local_state = _state->get_sink_local_state();
195
14
        write_dependencies = local_state->dependencies();
196
14
        auto* fin_dep = local_state->finishdependency();
197
14
        if (fin_dep) {
198
14
            finish_dependencies.push_back(fin_dep);
199
14
        }
200
14
    }
201
14
    {
202
14
        std::unique_lock<std::mutex> lc(_dependency_lock);
203
14
        read_dependencies.swap(_read_dependencies);
204
14
        write_dependencies.swap(_write_dependencies);
205
14
        finish_dependencies.swap(_finish_dependencies);
206
14
    }
207
14
    return Status::OK();
208
14
}
209
210
6
bool PipelineTask::inject_shared_state(std::shared_ptr<BasicSharedState> shared_state) {
211
6
    if (!shared_state) {
212
1
        return false;
213
1
    }
214
    // Shared state is created by upstream task's sink operator and shared by source operator of
215
    // this task.
216
7
    for (auto& op : _operators) {
217
7
        if (shared_state->related_op_ids.contains(op->operator_id())) {
218
3
            _op_shared_states.insert({op->operator_id(), shared_state});
219
3
            return true;
220
3
        }
221
7
    }
222
    // Shared state is created by the first sink operator and shared by sink operator of this task.
223
    // For example, Set operations.
224
2
    if (shared_state->related_op_ids.contains(_sink->dests_id().front())) {
225
1
        DCHECK_EQ(_sink_shared_state, nullptr)
226
0
                << " Sink: " << _sink->get_name() << " dest id: " << _sink->dests_id().front();
227
1
        _sink_shared_state = shared_state;
228
1
        return true;
229
1
    }
230
1
    return false;
231
2
}
232
233
21
void PipelineTask::_init_profile() {
234
21
    _task_profile = std::make_unique<RuntimeProfile>(fmt::format("PipelineTask(index={})", _index));
235
21
    _parent_profile->add_child(_task_profile.get(), true, nullptr);
236
21
    _task_cpu_timer = ADD_TIMER(_task_profile, profile::TASK_CPU_TIME);
237
238
21
    static const char* exec_time = profile::EXECUTE_TIME;
239
21
    _exec_timer = ADD_TIMER(_task_profile, exec_time);
240
21
    _prepare_timer = ADD_CHILD_TIMER(_task_profile, profile::PREPARE_TIME, exec_time);
241
21
    _open_timer = ADD_CHILD_TIMER(_task_profile, profile::OPEN_TIME, exec_time);
242
21
    _get_block_timer = ADD_CHILD_TIMER(_task_profile, profile::GET_BLOCK_TIME, exec_time);
243
21
    _get_block_counter = ADD_COUNTER(_task_profile, profile::GET_BLOCK_COUNTER, TUnit::UNIT);
244
21
    _sink_timer = ADD_CHILD_TIMER(_task_profile, profile::SINK_TIME, exec_time);
245
21
    _close_timer = ADD_CHILD_TIMER(_task_profile, profile::CLOSE_TIME, exec_time);
246
247
21
    _wait_worker_timer = ADD_TIMER_WITH_LEVEL(_task_profile, profile::WAIT_WORKER_TIME, 1);
248
249
21
    _schedule_counts = ADD_COUNTER(_task_profile, profile::NUM_SCHEDULE_TIMES, TUnit::UNIT);
250
21
    _yield_counts = ADD_COUNTER(_task_profile, profile::NUM_YIELD_TIMES, TUnit::UNIT);
251
21
    _core_change_times = ADD_COUNTER(_task_profile, profile::CORE_CHANGE_TIMES, TUnit::UNIT);
252
21
    _memory_reserve_times = ADD_COUNTER(_task_profile, profile::MEMORY_RESERVE_TIMES, TUnit::UNIT);
253
21
    _memory_reserve_failed_times =
254
21
            ADD_COUNTER(_task_profile, profile::MEMORY_RESERVE_FAILED_TIMES, TUnit::UNIT);
255
21
}
256
257
6
void PipelineTask::_fresh_profile_counter() {
258
6
    COUNTER_SET(_schedule_counts, (int64_t)_schedule_time);
259
6
    COUNTER_SET(_wait_worker_timer, (int64_t)_wait_worker_watcher.elapsed_time());
260
6
}
261
262
14
Status PipelineTask::_open() {
263
14
    SCOPED_TIMER(_task_profile->total_time_counter());
264
14
    SCOPED_CPU_TIMER(_task_cpu_timer);
265
14
    SCOPED_TIMER(_open_timer);
266
14
    _dry_run = _sink->should_dry_run(_state);
267
16
    for (auto& o : _operators) {
268
16
        RETURN_IF_ERROR(_state->get_local_state(o->operator_id())->open(_state));
269
16
    }
270
14
    RETURN_IF_ERROR(_state->get_sink_local_state()->open(_state));
271
14
    RETURN_IF_ERROR(_extract_dependencies());
272
14
    DBUG_EXECUTE_IF("fault_inject::PipelineXTask::open", {
273
14
        Status status = Status::Error<INTERNAL_ERROR>("fault_inject pipeline_task open failed");
274
14
        return status;
275
14
    });
276
14
    _opened = true;
277
14
    return Status::OK();
278
14
}
279
280
68
Status PipelineTask::_prepare() {
281
68
    SCOPED_TIMER(_task_profile->total_time_counter());
282
68
    SCOPED_CPU_TIMER(_task_cpu_timer);
283
80
    for (auto& o : _operators) {
284
80
        RETURN_IF_ERROR(_state->get_local_state(o->operator_id())->prepare(_state));
285
80
    }
286
68
    RETURN_IF_ERROR(_state->get_sink_local_state()->prepare(_state));
287
68
    return Status::OK();
288
68
}
289
290
45
bool PipelineTask::_wait_to_start() {
291
    // Before task starting, we should make sure
292
    // 1. Execution dependency is ready (which is controlled by FE 2-phase commit)
293
    // 2. Runtime filter dependencies are ready
294
    // 3. All tablets are loaded into local storage
295
45
    return std::any_of(
296
45
            _execution_dependencies.begin(), _execution_dependencies.end(),
297
62
            [&](Dependency* dep) -> bool { return dep->is_blocked_by(shared_from_this()); });
298
45
}
299
300
18
bool PipelineTask::_is_pending_finish() {
301
    // Spilling may be in progress if eos is true.
302
25
    return std::ranges::any_of(_finish_dependencies, [&](Dependency* dep) -> bool {
303
25
        return dep->is_blocked_by(shared_from_this());
304
25
    });
305
18
}
306
307
0
bool PipelineTask::is_blockable() const {
308
    // Before task starting, we should make sure
309
    // 1. Execution dependency is ready (which is controlled by FE 2-phase commit)
310
    // 2. Runtime filter dependencies are ready
311
    // 3. All tablets are loaded into local storage
312
313
0
    if (_state->enable_fuzzy_blockable_task()) {
314
0
        if ((_schedule_time + _task_idx) % 2 == 0) {
315
0
            return true;
316
0
        }
317
0
    }
318
319
0
    return std::ranges::any_of(_operators,
320
0
                               [&](OperatorPtr op) -> bool { return op->is_blockable(_state); }) ||
321
0
           _sink->is_blockable(_state);
322
0
}
323
324
1.47M
bool PipelineTask::_is_blocked() {
325
    // `_dry_run = true` means we do not need data from source operator.
326
1.47M
    if (!_dry_run) {
327
2.95M
        for (int i = cast_set<int>(_read_dependencies.size() - 1); i >= 0; i--) {
328
            // `_read_dependencies` is organized according to operators. For each operator, running condition is met iff all dependencies are ready.
329
1.47M
            for (auto* dep : _read_dependencies[i]) {
330
1.47M
                if (dep->is_blocked_by(shared_from_this())) {
331
15
                    return true;
332
15
                }
333
1.47M
            }
334
            // If all dependencies are ready for this operator, we can execute this task if no datum is needed from upstream operators.
335
1.47M
            if (!_operators[i]->need_more_input_data(_state)) {
336
2
                break;
337
2
            }
338
1.47M
        }
339
1.47M
    }
340
1.47M
    return _memory_sufficient_dependency->is_blocked_by(shared_from_this()) ||
341
1.47M
           std::ranges::any_of(_write_dependencies, [&](Dependency* dep) -> bool {
342
1.47M
               return dep->is_blocked_by(shared_from_this());
343
1.47M
           });
344
1.47M
}
345
346
4
void PipelineTask::unblock_all_dependencies() {
347
    // We use a lock to assure all dependencies are not deconstructed here.
348
4
    std::unique_lock<std::mutex> lc(_dependency_lock);
349
4
    auto fragment = _fragment_context.lock();
350
4
    if (!is_finalized() && fragment) {
351
4
        try {
352
4
            DCHECK(_wake_up_early || fragment->is_canceled());
353
4
            std::ranges::for_each(_write_dependencies,
354
4
                                  [&](Dependency* dep) { dep->set_always_ready(); });
355
4
            std::ranges::for_each(_finish_dependencies,
356
6
                                  [&](Dependency* dep) { dep->set_always_ready(); });
357
4
            std::ranges::for_each(_read_dependencies, [&](std::vector<Dependency*>& deps) {
358
3
                std::ranges::for_each(deps, [&](Dependency* dep) { dep->set_always_ready(); });
359
3
            });
360
            // All `_execution_deps` will never be set blocking from ready. So we just set ready here.
361
4
            std::ranges::for_each(_execution_dependencies,
362
8
                                  [&](Dependency* dep) { dep->set_ready(); });
363
4
            _memory_sufficient_dependency->set_ready();
364
4
        } catch (const doris::Exception& e) {
365
0
            LOG(WARNING) << "unblock_all_dependencies failed: " << e.code() << ", "
366
0
                         << e.to_string();
367
0
        }
368
4
    }
369
4
}
370
371
// When current memory pressure is low, memory usage may increase significantly in the next
372
// operator run, while there is no revocable memory available for spilling.
373
// Trigger memory revoking when pressure is high and revocable memory is significant.
374
// Memory pressure is evaluated using two signals:
375
// 1. Query memory usage exceeds a threshold ratio of the query memory limit.
376
// 2. Workload group memory usage reaches the workload group low-watermark threshold.
377
2.08k
bool PipelineTask::_should_trigger_revoking(const size_t reserve_size) const {
378
2.08k
    if (!_state->enable_spill()) {
379
3
        return false;
380
3
    }
381
382
2.08k
    auto query_mem_tracker = _state->get_query_ctx()->query_mem_tracker();
383
2.08k
    auto wg = _state->get_query_ctx()->workload_group();
384
2.08k
    if (!query_mem_tracker || !wg) {
385
2.07k
        return false;
386
2.07k
    }
387
388
8
    const auto parallelism = std::max(1, _pipeline->num_tasks());
389
8
    const auto query_water_mark = 90; // 90%
390
8
    const auto group_mem_limit = wg->memory_limit();
391
8
    auto query_limit = query_mem_tracker->limit();
392
8
    if (query_limit <= 0) {
393
1
        query_limit = group_mem_limit;
394
7
    } else if (query_limit > group_mem_limit && group_mem_limit > 0) {
395
1
        query_limit = group_mem_limit;
396
1
    }
397
398
8
    if (query_limit <= 0) {
399
1
        return false;
400
1
    }
401
402
7
    if ((reserve_size * parallelism) <= (query_limit / 5)) {
403
1
        return false;
404
1
    }
405
406
6
    bool is_high_memory_pressure = false;
407
6
    const auto used_mem = query_mem_tracker->consumption() + reserve_size * parallelism;
408
6
    if (used_mem >= int64_t((double(query_limit) * query_water_mark / 100))) {
409
2
        is_high_memory_pressure = true;
410
2
    }
411
412
6
    if (!is_high_memory_pressure) {
413
4
        bool is_low_watermark;
414
4
        bool is_high_watermark;
415
4
        wg->check_mem_used(&is_low_watermark, &is_high_watermark);
416
4
        is_high_memory_pressure = is_low_watermark || is_high_watermark;
417
4
    }
418
419
6
    if (is_high_memory_pressure) {
420
4
        const auto revocable_size = _get_revocable_size();
421
4
        const auto total_estimated_revocable = revocable_size * parallelism;
422
4
        return total_estimated_revocable >= int64_t(double(query_limit) * 0.2);
423
4
    }
424
425
2
    return false;
426
6
}
427
428
/**
429
 * `_eos` indicates whether the execution phase is done. `done` indicates whether we could close
430
 * this task.
431
 *
432
 * For example,
433
 * 1. if `_eos` is false which means we should continue to get next block so we cannot close (e.g.
434
 *    `done` is false)
435
 * 2. if `_eos` is true which means all blocks from source are exhausted but `_is_pending_finish()`
436
 *    is true which means we should wait for a pending dependency ready (maybe a running rpc), so we
437
 *    cannot close (e.g. `done` is false)
438
 * 3. if `_eos` is true which means all blocks from source are exhausted and `_is_pending_finish()`
439
 *    is false which means we can close immediately (e.g. `done` is true)
440
 * @param done
441
 * @return
442
 */
443
39
Status PipelineTask::execute(bool* done) {
444
39
    if (_exec_state != State::RUNNABLE || _blocked_dep != nullptr) [[unlikely]] {
445
1
#ifdef BE_TEST
446
1
        return Status::InternalError("Pipeline task is not runnable! Task info: {}",
447
1
                                     debug_string());
448
#else
449
        return Status::FatalError("Pipeline task is not runnable! Task info: {}", debug_string());
450
#endif
451
1
    }
452
453
38
    auto fragment_context = _fragment_context.lock();
454
38
    if (!fragment_context) {
455
0
        return Status::InternalError("Fragment already finished! Query: {}", print_id(_query_id));
456
0
    }
457
38
    int64_t time_spent = 0;
458
38
    ThreadCpuStopWatch cpu_time_stop_watch;
459
38
    cpu_time_stop_watch.start();
460
38
    SCOPED_ATTACH_TASK(_state);
461
38
    Defer running_defer {[&]() {
462
38
        int64_t delta_cpu_time = cpu_time_stop_watch.elapsed_time();
463
38
        _task_cpu_timer->update(delta_cpu_time);
464
38
        fragment_context->get_query_ctx()->resource_ctx()->cpu_context()->update_cpu_cost_ms(
465
38
                delta_cpu_time);
466
467
        // If task is woke up early, we should terminate all operators, and this task could be closed immediately.
468
38
        if (_wake_up_early) {
469
3
            _eos = true;
470
3
            *done = true;
471
35
        } else if (_eos && !_spilling &&
472
35
                   (fragment_context->is_canceled() || !_is_pending_finish())) {
473
            // Debug point for testing the race condition fix: inject set_wake_up_early() +
474
            // unblock_all_dependencies() here to simulate Thread B writing A then B between
475
            // Thread A's two reads of _wake_up_early.
476
11
            DBUG_EXECUTE_IF("PipelineTask::execute.wake_up_early_in_else_if", {
477
11
                set_wake_up_early();
478
11
                unblock_all_dependencies();
479
11
            });
480
11
            *done = true;
481
11
        }
482
483
        // NOTE: The operator terminate() call is intentionally placed AFTER the
484
        // _is_pending_finish() check above, not before. This ordering is critical to avoid a race
485
        // condition with the seq_cst memory ordering guarantee:
486
        //
487
        // Pipeline::make_all_runnable() writes in this order:
488
        //   (A) set_wake_up_early()  ->  (B) unblock_all_dependencies() [sets finish_dep._always_ready]
489
        //
490
        // If we checked _wake_up_early (A) before _is_pending_finish() (B), there would be a
491
        // window where Thread A reads _wake_up_early=false, then Thread B writes both A and B,
492
        // then Thread A reads _is_pending_finish()=false (due to _always_ready). Thread A would
493
        // then set *done=true without ever calling operator terminate(), causing close() to run
494
        // on operators that were never properly terminated (e.g. RuntimeFilterProducer still in
495
        // WAITING_FOR_SYNCED_SIZE state when insert() is called).
496
        //
497
        // By reading _is_pending_finish() (B) before the second read of _wake_up_early (A),
498
        // if Thread A observes B's effect (_always_ready=true), it is guaranteed to also observe
499
        // A's effect (_wake_up_early=true) on this second read, ensuring operator terminate() is
500
        // called. This relies on _wake_up_early and _always_ready both being std::atomic with the
501
        // default seq_cst ordering — do not weaken them to relaxed or acq/rel.
502
38
        if (_wake_up_early) {
503
4
            THROW_IF_ERROR(_root->terminate(_state));
504
4
            THROW_IF_ERROR(_sink->terminate(_state));
505
4
        }
506
38
    }};
507
38
    const auto query_id = _state->query_id();
508
    // If this task is already EOS and block is empty (which means we already output all blocks),
509
    // just return here.
510
38
    if (_eos && !_spilling) {
511
3
        return Status::OK();
512
3
    }
513
    // If this task is blocked by a spilling request and waken up immediately, the spilling
514
    // dependency will not block this task and we should just run here.
515
35
    if (!_block->empty()) {
516
0
        LOG(INFO) << "Query: " << print_id(query_id) << " has pending block, size: "
517
0
                  << PrettyPrinter::print_bytes(_block->allocated_bytes());
518
0
        DCHECK(_spilling);
519
0
    }
520
521
35
    SCOPED_TIMER(_task_profile->total_time_counter());
522
35
    SCOPED_TIMER(_exec_timer);
523
524
35
    if (!_wake_up_early) {
525
35
        RETURN_IF_ERROR(_prepare());
526
35
    }
527
35
    DBUG_EXECUTE_IF("fault_inject::PipelineXTask::execute", {
528
35
        Status status = Status::Error<INTERNAL_ERROR>("fault_inject pipeline_task execute failed");
529
35
        return status;
530
35
    });
531
    // `_wake_up_early` must be after `_wait_to_start()`
532
35
    if (_wait_to_start() || _wake_up_early) {
533
2
        return Status::OK();
534
2
    }
535
33
    RETURN_IF_ERROR(_prepare());
536
537
    // The status must be runnable
538
33
    if (!_opened && !fragment_context->is_canceled()) {
539
13
        DBUG_EXECUTE_IF("PipelineTask::execute.open_sleep", {
540
13
            auto required_pipeline_id =
541
13
                    DebugPoints::instance()->get_debug_param_or_default<int32_t>(
542
13
                            "PipelineTask::execute.open_sleep", "pipeline_id", -1);
543
13
            auto required_task_id = DebugPoints::instance()->get_debug_param_or_default<int32_t>(
544
13
                    "PipelineTask::execute.open_sleep", "task_id", -1);
545
13
            if (required_pipeline_id == pipeline_id() && required_task_id == task_id()) {
546
13
                LOG(WARNING) << "PipelineTask::execute.open_sleep sleep 5s";
547
13
                sleep(5);
548
13
            }
549
13
        });
550
551
13
        SCOPED_RAW_TIMER(&time_spent);
552
13
        RETURN_IF_ERROR(_open());
553
13
    }
554
555
1.47M
    while (!fragment_context->is_canceled()) {
556
1.47M
        SCOPED_RAW_TIMER(&time_spent);
557
1.47M
        Defer defer {[&]() {
558
            // If this run is pended by a spilling request, the block will be output in next run.
559
1.47M
            if (!_spilling) {
560
1.47M
                _block->clear_column_data(_root->row_desc().num_materialized_slots());
561
1.47M
            }
562
1.47M
        }};
563
        // `_wake_up_early` must be after `_is_blocked()`
564
1.47M
        if (_is_blocked() || _wake_up_early) {
565
17
            return Status::OK();
566
17
        }
567
568
        /// When a task is cancelled,
569
        /// its blocking state will be cleared and it will transition to a ready state (though it is not truly ready).
570
        /// Here, checking whether it is cancelled to prevent tasks in a blocking state from being re-executed.
571
1.47M
        if (fragment_context->is_canceled()) {
572
0
            break;
573
0
        }
574
575
1.47M
        if (time_spent > _exec_time_slice) {
576
4
            COUNTER_UPDATE(_yield_counts, 1);
577
4
            break;
578
4
        }
579
1.47M
        auto* block = _block.get();
580
581
1.47M
        DBUG_EXECUTE_IF("fault_inject::PipelineXTask::executing", {
582
1.47M
            Status status =
583
1.47M
                    Status::Error<INTERNAL_ERROR>("fault_inject pipeline_task executing failed");
584
1.47M
            return status;
585
1.47M
        });
586
587
        // `_sink->is_finished(_state)` means sink operator should be finished
588
1.47M
        if (_sink->is_finished(_state)) {
589
1
            set_wake_up_early();
590
1
            return Status::OK();
591
1
        }
592
593
        // `_dry_run` means sink operator need no more data
594
1.47M
        _eos = _dry_run || _eos;
595
1.47M
        _spilling = false;
596
1.47M
        auto workload_group = _state->workload_group();
597
        // If last run is pended by a spilling request, `_block` is produced with some rows in last
598
        // run, so we will resume execution using the block.
599
1.47M
        if (!_eos && _block->empty()) {
600
1.47M
            SCOPED_TIMER(_get_block_timer);
601
1.47M
            if (_state->low_memory_mode()) {
602
0
                _sink->set_low_memory_mode(_state);
603
0
                for (auto& op : _operators) {
604
0
                    op->set_low_memory_mode(_state);
605
0
                }
606
0
            }
607
1.47M
            DEFER_RELEASE_RESERVED();
608
1.47M
            _get_block_counter->update(1);
609
            // Sum reserve sizes across all operators in this pipeline.
610
            // Each operator reports only its own requirement (non-recursive).
611
1.47M
            size_t reserve_size = 0;
612
1.47M
            for (auto& op : _operators) {
613
1.47M
                reserve_size += op->get_reserve_mem_size(_state);
614
1.47M
                op->reset_reserve_mem_size(_state);
615
1.47M
            }
616
1.47M
            if (workload_group &&
617
1.47M
                _state->get_query_ctx()
618
46.8k
                        ->resource_ctx()
619
46.8k
                        ->task_controller()
620
46.8k
                        ->is_enable_reserve_memory() &&
621
1.47M
                reserve_size > 0) {
622
1.10k
                if (_should_trigger_revoking(reserve_size)) {
623
0
                    LOG(INFO) << fmt::format(
624
0
                            "Query: {} sink: {}, node id: {}, task id: {}, reserve size: {} when "
625
0
                            "high memory pressure, try to spill",
626
0
                            print_id(_query_id), _sink->get_name(), _sink->node_id(),
627
0
                            _state->task_id(), reserve_size);
628
0
                    ExecEnv::GetInstance()->workload_group_mgr()->add_paused_query(
629
0
                            _state->get_query_ctx()->resource_ctx()->shared_from_this(),
630
0
                            reserve_size,
631
0
                            Status::Error<ErrorCode::QUERY_MEMORY_EXCEEDED>(
632
0
                                    "high memory pressure, try to spill"));
633
0
                    _spilling = true;
634
0
                    continue;
635
0
                }
636
1.10k
                if (!_try_to_reserve_memory(reserve_size, _root)) {
637
1.10k
                    continue;
638
1.10k
                }
639
1.10k
            }
640
641
1.47M
            bool eos = false;
642
1.47M
            RETURN_IF_ERROR(_root->get_block_after_projects(_state, block, &eos));
643
1.47M
            RETURN_IF_ERROR(block->check_type_and_column());
644
1.47M
            _eos = eos;
645
1.47M
        }
646
647
1.47M
        if (!_block->empty() || _eos) {
648
987
            SCOPED_TIMER(_sink_timer);
649
987
            Status status = Status::OK();
650
987
            DEFER_RELEASE_RESERVED();
651
987
            if (_state->get_query_ctx()
652
987
                        ->resource_ctx()
653
987
                        ->task_controller()
654
987
                        ->is_enable_reserve_memory() &&
655
987
                workload_group && !(_wake_up_early || _dry_run)) {
656
971
                const auto sink_reserve_size = _sink->get_reserve_mem_size(_state, _eos);
657
658
971
                if (sink_reserve_size > 0 && _should_trigger_revoking(sink_reserve_size)) {
659
0
                    LOG(INFO) << fmt::format(
660
0
                            "Query: {} sink: {}, node id: {}, task id: {}, reserve size: {} when "
661
0
                            "high memory pressure, try to spill",
662
0
                            print_id(_query_id), _sink->get_name(), _sink->node_id(),
663
0
                            _state->task_id(), sink_reserve_size);
664
0
                    ExecEnv::GetInstance()->workload_group_mgr()->add_paused_query(
665
0
                            _state->get_query_ctx()->resource_ctx()->shared_from_this(),
666
0
                            sink_reserve_size,
667
0
                            Status::Error<ErrorCode::QUERY_MEMORY_EXCEEDED>(
668
0
                                    "high memory pressure, try to spill"));
669
0
                    _spilling = true;
670
0
                    continue;
671
0
                }
672
673
971
                if (sink_reserve_size > 0 &&
674
971
                    !_try_to_reserve_memory(sink_reserve_size, _sink.get())) {
675
970
                    continue;
676
970
                }
677
971
            }
678
679
17
            DBUG_EXECUTE_IF("PipelineTask::execute.sink_eos_sleep", {
680
17
                auto required_pipeline_id =
681
17
                        DebugPoints::instance()->get_debug_param_or_default<int32_t>(
682
17
                                "PipelineTask::execute.sink_eos_sleep", "pipeline_id", -1);
683
17
                auto required_task_id =
684
17
                        DebugPoints::instance()->get_debug_param_or_default<int32_t>(
685
17
                                "PipelineTask::execute.sink_eos_sleep", "task_id", -1);
686
17
                if (required_pipeline_id == pipeline_id() && required_task_id == task_id()) {
687
17
                    LOG(WARNING) << "PipelineTask::execute.sink_eos_sleep sleep 10s";
688
17
                    sleep(10);
689
17
                }
690
17
            });
691
692
17
            DBUG_EXECUTE_IF("PipelineTask::execute.terminate", {
693
17
                if (_eos) {
694
17
                    auto required_pipeline_id =
695
17
                            DebugPoints::instance()->get_debug_param_or_default<int32_t>(
696
17
                                    "PipelineTask::execute.terminate", "pipeline_id", -1);
697
17
                    auto required_task_id =
698
17
                            DebugPoints::instance()->get_debug_param_or_default<int32_t>(
699
17
                                    "PipelineTask::execute.terminate", "task_id", -1);
700
17
                    auto required_fragment_id =
701
17
                            DebugPoints::instance()->get_debug_param_or_default<int32_t>(
702
17
                                    "PipelineTask::execute.terminate", "fragment_id", -1);
703
17
                    if (required_pipeline_id == pipeline_id() && required_task_id == task_id() &&
704
17
                        fragment_context->get_fragment_id() == required_fragment_id) {
705
17
                        _wake_up_early = true;
706
17
                        unblock_all_dependencies();
707
17
                    } else if (required_pipeline_id == pipeline_id() &&
708
17
                               fragment_context->get_fragment_id() == required_fragment_id) {
709
17
                        LOG(WARNING) << "PipelineTask::execute.terminate sleep 5s";
710
17
                        sleep(5);
711
17
                    }
712
17
                }
713
17
            });
714
17
            RETURN_IF_ERROR(block->check_type_and_column());
715
17
            status = _sink->sink(_state, block, _eos);
716
717
17
            if (_eos) {
718
11
                if (_sink->reset_to_rerun(_state, _root)) {
719
0
                    _eos = false;
720
11
                } else {
721
11
                    RETURN_IF_ERROR(close(Status::OK(), false));
722
11
                }
723
11
            }
724
725
17
            if (status.is<ErrorCode::END_OF_FILE>()) {
726
1
                set_wake_up_early();
727
1
                return Status::OK();
728
16
            } else if (!status) {
729
0
                return status;
730
0
            }
731
732
16
            if (_eos) { // just return, the scheduler will do finish work
733
10
                return Status::OK();
734
10
            }
735
16
        }
736
1.47M
    }
737
738
4
    RETURN_IF_ERROR(_state->get_query_ctx()->get_pipe_exec_scheduler()->submit(shared_from_this()));
739
4
    return Status::OK();
740
4
}
741
742
7
Status PipelineTask::do_revoke_memory(const std::shared_ptr<SpillContext>& spill_context) {
743
7
    auto fragment_context = _fragment_context.lock();
744
7
    if (!fragment_context) {
745
1
        return Status::InternalError("Fragment already finished! Query: {}", print_id(_query_id));
746
1
    }
747
748
6
    SCOPED_ATTACH_TASK(_state);
749
6
    ThreadCpuStopWatch cpu_time_stop_watch;
750
6
    cpu_time_stop_watch.start();
751
6
    Defer running_defer {[&]() {
752
6
        int64_t delta_cpu_time = cpu_time_stop_watch.elapsed_time();
753
6
        _task_cpu_timer->update(delta_cpu_time);
754
6
        fragment_context->get_query_ctx()->resource_ctx()->cpu_context()->update_cpu_cost_ms(
755
6
                delta_cpu_time);
756
757
        // If task is woke up early, unblock all dependencies and terminate all operators,
758
        // so this task could be closed immediately.
759
6
        if (_wake_up_early) {
760
1
            unblock_all_dependencies();
761
1
            THROW_IF_ERROR(_root->terminate(_state));
762
1
            THROW_IF_ERROR(_sink->terminate(_state));
763
1
            _eos = true;
764
1
        }
765
766
        // SpillContext tracks pipeline task count, not operator count.
767
        // Notify completion once after all operators + sink have finished revoking.
768
6
        if (spill_context) {
769
3
            spill_context->on_task_finished();
770
3
        }
771
6
    }};
772
773
    // Revoke memory from every operator that has enough revocable memory,
774
    // then revoke from the sink.
775
6
    for (auto& op : _operators) {
776
6
        if (op->revocable_mem_size(_state) >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
777
2
            RETURN_IF_ERROR(op->revoke_memory(_state));
778
2
        }
779
6
    }
780
781
6
    if (_sink->revocable_mem_size(_state) >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
782
1
        RETURN_IF_ERROR(_sink->revoke_memory(_state));
783
1
    }
784
6
    return Status::OK();
785
6
}
786
787
2.07k
bool PipelineTask::_try_to_reserve_memory(const size_t reserve_size, OperatorBase* op) {
788
2.07k
    auto st = thread_context()->thread_mem_tracker_mgr->try_reserve(reserve_size);
789
    // If reserve memory failed and the query is not enable spill, just disable reserve memory(this will enable
790
    // memory hard limit check, and will cancel the query if allocate memory failed) and let it run.
791
2.07k
    if (!st.ok() && !_state->enable_spill()) {
792
2
        LOG(INFO) << print_id(_query_id) << " reserve memory failed due to " << st
793
2
                  << ", and it is not enable spill, disable reserve memory and let it run";
794
2
        _state->get_query_ctx()->resource_ctx()->task_controller()->disable_reserve_memory();
795
2
        return true;
796
2
    }
797
2.07k
    COUNTER_UPDATE(_memory_reserve_times, 1);
798
799
    // Compute total revocable memory across all operators and the sink.
800
2.07k
    size_t total_revocable_mem_size = 0;
801
2.07k
    size_t operator_max_revocable_mem_size = 0;
802
803
2.07k
    if (!st.ok() || _state->enable_force_spill()) {
804
        // Compute total revocable memory across all operators and the sink.
805
2.07k
        total_revocable_mem_size = _sink->revocable_mem_size(_state);
806
2.07k
        operator_max_revocable_mem_size = total_revocable_mem_size;
807
2.07k
        for (auto& cur_op : _operators) {
808
2.07k
            total_revocable_mem_size += cur_op->revocable_mem_size(_state);
809
2.07k
            operator_max_revocable_mem_size =
810
2.07k
                    std::max(cur_op->revocable_mem_size(_state), operator_max_revocable_mem_size);
811
2.07k
        }
812
2.07k
    }
813
814
    // During enable force spill, other operators like scan opeartor will also try to reserve memory and will failed
815
    // here, if not add this check, it will always paused and resumed again.
816
2.07k
    if (st.ok() && _state->enable_force_spill()) {
817
0
        if (operator_max_revocable_mem_size >= _state->spill_min_revocable_mem()) {
818
0
            st = Status::Error<ErrorCode::QUERY_MEMORY_EXCEEDED>(
819
0
                    "force spill and there is an operator has memory "
820
0
                    "size {} exceeds min mem size {}",
821
0
                    PrettyPrinter::print_bytes(operator_max_revocable_mem_size),
822
0
                    PrettyPrinter::print_bytes(_state->spill_min_revocable_mem()));
823
0
        }
824
0
    }
825
826
2.07k
    if (!st.ok()) {
827
2.07k
        COUNTER_UPDATE(_memory_reserve_failed_times, 1);
828
        // build per-operator revocable memory info string for debugging
829
2.07k
        std::string ops_revocable_info;
830
2.07k
        {
831
2.07k
            fmt::memory_buffer buf;
832
2.07k
            for (auto& cur_op : _operators) {
833
2.07k
                fmt::format_to(buf, "{}({})-> ", cur_op->get_name(),
834
2.07k
                               PrettyPrinter::print_bytes(cur_op->revocable_mem_size(_state)));
835
2.07k
            }
836
2.07k
            if (_sink) {
837
2.07k
                fmt::format_to(buf, "{}({}) ", _sink->get_name(),
838
2.07k
                               PrettyPrinter::print_bytes(_sink->revocable_mem_size(_state)));
839
2.07k
            }
840
2.07k
            ops_revocable_info = fmt::to_string(buf);
841
2.07k
        }
842
843
2.07k
        auto debug_msg = fmt::format(
844
2.07k
                "Query: {} , try to reserve: {}, total revocable mem size: {}, failed reason: {}",
845
2.07k
                print_id(_query_id), PrettyPrinter::print_bytes(reserve_size),
846
2.07k
                PrettyPrinter::print_bytes(total_revocable_mem_size), st.to_string());
847
2.07k
        if (!ops_revocable_info.empty()) {
848
2.07k
            debug_msg += fmt::format(", ops_revocable=[{}]", ops_revocable_info);
849
2.07k
        }
850
        // PROCESS_MEMORY_EXCEEDED error msg already contains process_mem_log_str
851
2.07k
        if (!st.is<ErrorCode::PROCESS_MEMORY_EXCEEDED>()) {
852
2.07k
            debug_msg +=
853
2.07k
                    fmt::format(", debug info: {}", GlobalMemoryArbitrator::process_mem_log_str());
854
2.07k
        }
855
2.07k
        LOG(INFO) << debug_msg;
856
2.07k
        ExecEnv::GetInstance()->workload_group_mgr()->add_paused_query(
857
2.07k
                _state->get_query_ctx()->resource_ctx()->shared_from_this(), reserve_size, st);
858
2.07k
        _spilling = true;
859
2.07k
        return false;
860
2.07k
    }
861
0
    return true;
862
2.07k
}
863
864
147k
void PipelineTask::stop_if_finished() {
865
147k
    auto fragment = _fragment_context.lock();
866
147k
    if (!fragment) {
867
0
        return;
868
0
    }
869
147k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(fragment->get_query_ctx()->query_mem_tracker());
870
147k
    if (auto sink = _sink) {
871
147k
        if (sink->is_finished(_state)) {
872
1
            set_wake_up_early();
873
1
            unblock_all_dependencies();
874
1
        }
875
147k
    }
876
147k
}
877
878
1
Status PipelineTask::finalize() {
879
1
    auto fragment = _fragment_context.lock();
880
1
    if (!fragment) {
881
0
        return Status::OK();
882
0
    }
883
1
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(fragment->get_query_ctx()->query_mem_tracker());
884
1
    RETURN_IF_ERROR(_state_transition(State::FINALIZED));
885
1
    std::unique_lock<std::mutex> lc(_dependency_lock);
886
1
    _sink_shared_state.reset();
887
1
    _op_shared_states.clear();
888
1
    _shared_state_map.clear();
889
1
    _block.reset();
890
1
    _operators.clear();
891
1
    _sink.reset();
892
1
    _pipeline.reset();
893
1
    return Status::OK();
894
1
}
895
896
17
Status PipelineTask::close(Status exec_status, bool close_sink) {
897
17
    int64_t close_ns = 0;
898
17
    Status s;
899
17
    {
900
17
        SCOPED_RAW_TIMER(&close_ns);
901
17
        if (close_sink) {
902
6
            s = _sink->close(_state, exec_status);
903
6
        }
904
21
        for (auto& op : _operators) {
905
21
            auto tem = op->close(_state);
906
21
            if (!tem.ok() && s.ok()) {
907
0
                s = std::move(tem);
908
0
            }
909
21
        }
910
17
    }
911
17
    if (_opened) {
912
17
        COUNTER_UPDATE(_close_timer, close_ns);
913
17
        COUNTER_UPDATE(_task_profile->total_time_counter(), close_ns);
914
17
    }
915
916
17
    if (close_sink && _opened) {
917
6
        _task_profile->add_info_string("WakeUpEarly", std::to_string(_wake_up_early.load()));
918
6
        _fresh_profile_counter();
919
6
    }
920
921
17
    if (close_sink) {
922
6
        RETURN_IF_ERROR(_state_transition(State::FINISHED));
923
6
    }
924
17
    return s;
925
17
}
926
927
16.7k
std::string PipelineTask::debug_string() {
928
16.7k
    fmt::memory_buffer debug_string_buffer;
929
930
16.7k
    fmt::format_to(debug_string_buffer, "QueryId: {}\n", print_id(_query_id));
931
16.7k
    fmt::format_to(debug_string_buffer, "InstanceId: {}\n",
932
16.7k
                   print_id(_state->fragment_instance_id()));
933
934
16.7k
    fmt::format_to(debug_string_buffer,
935
16.7k
                   "PipelineTask[id = {}, open = {}, eos = {}, state = {}, dry run = "
936
16.7k
                   "{}, _wake_up_early = {}, _wake_up_by = {}, time elapsed since last state "
937
16.7k
                   "changing = {}s, spilling = {}, is running = {}]",
938
16.7k
                   _index, _opened, _eos, _to_string(_exec_state), _dry_run, _wake_up_early.load(),
939
16.7k
                   _wake_by, _state_change_watcher.elapsed_time() / NANOS_PER_SEC, _spilling,
940
16.7k
                   is_running());
941
16.7k
    std::unique_lock<std::mutex> lc(_dependency_lock);
942
16.7k
    auto* cur_blocked_dep = _blocked_dep;
943
16.7k
    auto fragment = _fragment_context.lock();
944
16.7k
    if (is_finalized() || !fragment) {
945
9
        fmt::format_to(debug_string_buffer, " pipeline name = {}", _pipeline_name);
946
9
        return fmt::to_string(debug_string_buffer);
947
9
    }
948
16.6k
    auto elapsed = fragment->elapsed_time() / NANOS_PER_SEC;
949
16.6k
    fmt::format_to(debug_string_buffer, " elapse time = {}s, block dependency = [{}]\n", elapsed,
950
16.6k
                   cur_blocked_dep && !is_finalized() ? cur_blocked_dep->debug_string() : "NULL");
951
952
16.6k
    if (_state && _state->local_runtime_filter_mgr()) {
953
0
        fmt::format_to(debug_string_buffer, "local_runtime_filter_mgr: [{}]\n",
954
0
                       _state->local_runtime_filter_mgr()->debug_string());
955
0
    }
956
957
16.6k
    fmt::format_to(debug_string_buffer, "operators: ");
958
33.3k
    for (size_t i = 0; i < _operators.size(); i++) {
959
16.6k
        fmt::format_to(debug_string_buffer, "\n{}",
960
16.6k
                       _opened && !is_finalized()
961
16.6k
                               ? _operators[i]->debug_string(_state, cast_set<int>(i))
962
16.6k
                               : _operators[i]->debug_string(cast_set<int>(i)));
963
16.6k
    }
964
16.6k
    fmt::format_to(debug_string_buffer, "\n{}\n",
965
16.6k
                   _opened && !is_finalized()
966
16.6k
                           ? _sink->debug_string(_state, cast_set<int>(_operators.size()))
967
16.6k
                           : _sink->debug_string(cast_set<int>(_operators.size())));
968
969
16.6k
    fmt::format_to(debug_string_buffer, "\nRead Dependency Information: \n");
970
971
16.6k
    size_t i = 0;
972
33.3k
    for (; i < _read_dependencies.size(); i++) {
973
33.3k
        for (size_t j = 0; j < _read_dependencies[i].size(); j++) {
974
16.6k
            fmt::format_to(debug_string_buffer, "{}. {}\n", i,
975
16.6k
                           _read_dependencies[i][j]->debug_string(cast_set<int>(i) + 1));
976
16.6k
        }
977
16.6k
    }
978
979
16.6k
    fmt::format_to(debug_string_buffer, "{}. {}\n", i,
980
16.6k
                   _memory_sufficient_dependency->debug_string(cast_set<int>(i++)));
981
982
16.6k
    fmt::format_to(debug_string_buffer, "\nWrite Dependency Information: \n");
983
33.3k
    for (size_t j = 0; j < _write_dependencies.size(); j++, i++) {
984
16.6k
        fmt::format_to(debug_string_buffer, "{}. {}\n", i,
985
16.6k
                       _write_dependencies[j]->debug_string(cast_set<int>(j) + 1));
986
16.6k
    }
987
988
16.6k
    fmt::format_to(debug_string_buffer, "\nExecution Dependency Information: \n");
989
50.0k
    for (size_t j = 0; j < _execution_dependencies.size(); j++, i++) {
990
33.3k
        fmt::format_to(debug_string_buffer, "{}. {}\n", i,
991
33.3k
                       _execution_dependencies[j]->debug_string(cast_set<int>(i) + 1));
992
33.3k
    }
993
994
16.6k
    fmt::format_to(debug_string_buffer, "Finish Dependency Information: \n");
995
50.0k
    for (size_t j = 0; j < _finish_dependencies.size(); j++, i++) {
996
33.3k
        fmt::format_to(debug_string_buffer, "{}. {}\n", i,
997
33.3k
                       _finish_dependencies[j]->debug_string(cast_set<int>(i) + 1));
998
33.3k
    }
999
16.6k
    return fmt::to_string(debug_string_buffer);
1000
16.7k
}
1001
1002
6
size_t PipelineTask::_get_revocable_size() const {
1003
    // Sum revocable memory from every operator in the pipeline + the sink.
1004
    // Each operator reports only its own revocable memory (no child recursion).
1005
6
    size_t total = 0;
1006
6
    size_t sink_revocable_size = _sink->revocable_mem_size(_state);
1007
6
    if (sink_revocable_size >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
1008
3
        total += sink_revocable_size;
1009
3
    }
1010
6
    for (const auto& op : _operators) {
1011
6
        size_t ops_revocable_size = op->revocable_mem_size(_state);
1012
6
        if (ops_revocable_size >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
1013
4
            total += ops_revocable_size;
1014
4
        }
1015
6
    }
1016
6
    return total;
1017
6
}
1018
1019
2
size_t PipelineTask::get_revocable_size() const {
1020
2
    if (!_opened || is_finalized() || _running || (_eos && !_spilling)) {
1021
0
        return 0;
1022
0
    }
1023
1024
2
    return _get_revocable_size();
1025
2
}
1026
1027
3
Status PipelineTask::revoke_memory(const std::shared_ptr<SpillContext>& spill_context) {
1028
3
    DCHECK(spill_context);
1029
3
    if (is_finalized()) {
1030
1
        spill_context->on_task_finished();
1031
1
        VLOG_DEBUG << "Query: " << print_id(_state->query_id()) << ", task: " << ((void*)this)
1032
0
                   << " finalized";
1033
1
        return Status::OK();
1034
1
    }
1035
1036
2
    const auto revocable_size = get_revocable_size();
1037
2
    if (revocable_size >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
1038
1
        auto revokable_task = std::make_shared<RevokableTask>(shared_from_this(), spill_context);
1039
        // Submit a revocable task to run, the run method will call revoke memory. Currently the
1040
        // underline pipeline task is still blocked.
1041
1
        RETURN_IF_ERROR(_state->get_query_ctx()->get_pipe_exec_scheduler()->submit(revokable_task));
1042
1
    } else {
1043
1
        spill_context->on_task_finished();
1044
1
        VLOG_DEBUG << "Query: " << print_id(_state->query_id()) << ", task: " << ((void*)this)
1045
0
                   << " has not enough data to revoke: " << revocable_size;
1046
1
    }
1047
2
    return Status::OK();
1048
2
}
1049
1050
28
void PipelineTask::wake_up(Dependency* dep, std::unique_lock<std::mutex>& /* dep_lock */) {
1051
54
    auto cancel_if_error = [&](const Status& st) {
1052
54
        if (!st.ok()) {
1053
0
            if (auto frag = fragment_context().lock()) {
1054
0
                frag->cancel(st);
1055
0
            }
1056
0
        }
1057
54
    };
1058
    // call by dependency
1059
28
    DCHECK_EQ(_blocked_dep, dep) << "dep : " << dep->debug_string(0) << "task: " << debug_string();
1060
28
    _blocked_dep = nullptr;
1061
28
    auto holder = std::dynamic_pointer_cast<PipelineTask>(shared_from_this());
1062
28
    cancel_if_error(_state_transition(PipelineTask::State::RUNNABLE));
1063
    // Under _wake_up_early, FINISHED/FINALIZED → RUNNABLE is a legal no-op
1064
    // (_state_transition returns OK but state stays unchanged). We must not
1065
    // resubmit a terminated task: finalize() clears _sink/_operators, and
1066
    // submit() → is_blockable() would dereference them → SIGSEGV.
1067
28
    if (_exec_state == State::FINISHED || _exec_state == State::FINALIZED) {
1068
2
        return;
1069
2
    }
1070
26
    if (auto f = _fragment_context.lock(); f) {
1071
26
        cancel_if_error(_state->get_query_ctx()->get_pipe_exec_scheduler()->submit(holder));
1072
26
    }
1073
26
}
1074
1075
133
Status PipelineTask::_state_transition(State new_state) {
1076
133
    const auto& table =
1077
133
            _wake_up_early ? WAKE_UP_EARLY_LEGAL_STATE_TRANSITION : LEGAL_STATE_TRANSITION;
1078
133
    if (!table[(int)new_state].contains(_exec_state)) {
1079
31
        return Status::InternalError(
1080
31
                "Task state transition from {} to {} is not allowed! Task info: {}",
1081
31
                _to_string(_exec_state), _to_string(new_state), debug_string());
1082
31
    }
1083
    // FINISHED/FINALIZED → RUNNABLE is legal under wake_up_early (delayed wake_up() arriving
1084
    // after the task already terminated), but we must not actually move the state backwards
1085
    // or update profile info (which would misleadingly show RUNNABLE for a terminated task).
1086
102
    bool need_move = !((_exec_state == State::FINISHED || _exec_state == State::FINALIZED) &&
1087
102
                       new_state == State::RUNNABLE);
1088
102
    if (need_move) {
1089
96
        if (_exec_state != new_state) {
1090
94
            _state_change_watcher.reset();
1091
94
            _state_change_watcher.start();
1092
94
        }
1093
96
        _task_profile->add_info_string("TaskState", _to_string(new_state));
1094
96
        _task_profile->add_info_string("BlockedByDependency",
1095
96
                                       _blocked_dep ? _blocked_dep->name() : "");
1096
96
        _exec_state = new_state;
1097
96
    }
1098
102
    return Status::OK();
1099
133
}
1100
1101
} // namespace doris