Coverage Report

Created: 2026-04-13 17:16

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/exec/pipeline/pipeline_fragment_context.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "exec/pipeline/pipeline_fragment_context.h"
19
20
#include <gen_cpp/DataSinks_types.h>
21
#include <gen_cpp/PaloInternalService_types.h>
22
#include <gen_cpp/PlanNodes_types.h>
23
#include <pthread.h>
24
25
#include <algorithm>
26
#include <cstdlib>
27
// IWYU pragma: no_include <bits/chrono.h>
28
#include <fmt/format.h>
29
30
#include <chrono> // IWYU pragma: keep
31
#include <map>
32
#include <memory>
33
#include <ostream>
34
#include <utility>
35
36
#include "cloud/config.h"
37
#include "common/cast_set.h"
38
#include "common/config.h"
39
#include "common/exception.h"
40
#include "common/logging.h"
41
#include "common/status.h"
42
#include "exec/exchange/local_exchange_sink_operator.h"
43
#include "exec/exchange/local_exchange_source_operator.h"
44
#include "exec/exchange/local_exchanger.h"
45
#include "exec/exchange/vdata_stream_mgr.h"
46
#include "exec/operator/aggregation_sink_operator.h"
47
#include "exec/operator/aggregation_source_operator.h"
48
#include "exec/operator/analytic_sink_operator.h"
49
#include "exec/operator/analytic_source_operator.h"
50
#include "exec/operator/assert_num_rows_operator.h"
51
#include "exec/operator/blackhole_sink_operator.h"
52
#include "exec/operator/cache_sink_operator.h"
53
#include "exec/operator/cache_source_operator.h"
54
#include "exec/operator/datagen_operator.h"
55
#include "exec/operator/dict_sink_operator.h"
56
#include "exec/operator/distinct_streaming_aggregation_operator.h"
57
#include "exec/operator/empty_set_operator.h"
58
#include "exec/operator/es_scan_operator.h"
59
#include "exec/operator/exchange_sink_operator.h"
60
#include "exec/operator/exchange_source_operator.h"
61
#include "exec/operator/file_scan_operator.h"
62
#include "exec/operator/group_commit_block_sink_operator.h"
63
#include "exec/operator/group_commit_scan_operator.h"
64
#include "exec/operator/hashjoin_build_sink.h"
65
#include "exec/operator/hashjoin_probe_operator.h"
66
#include "exec/operator/hive_table_sink_operator.h"
67
#include "exec/operator/iceberg_delete_sink_operator.h"
68
#include "exec/operator/iceberg_merge_sink_operator.h"
69
#include "exec/operator/iceberg_table_sink_operator.h"
70
#include "exec/operator/jdbc_scan_operator.h"
71
#include "exec/operator/jdbc_table_sink_operator.h"
72
#include "exec/operator/local_merge_sort_source_operator.h"
73
#include "exec/operator/materialization_opertor.h"
74
#include "exec/operator/maxcompute_table_sink_operator.h"
75
#include "exec/operator/memory_scratch_sink_operator.h"
76
#include "exec/operator/meta_scan_operator.h"
77
#include "exec/operator/multi_cast_data_stream_sink.h"
78
#include "exec/operator/multi_cast_data_stream_source.h"
79
#include "exec/operator/nested_loop_join_build_operator.h"
80
#include "exec/operator/nested_loop_join_probe_operator.h"
81
#include "exec/operator/olap_scan_operator.h"
82
#include "exec/operator/olap_table_sink_operator.h"
83
#include "exec/operator/olap_table_sink_v2_operator.h"
84
#include "exec/operator/partition_sort_sink_operator.h"
85
#include "exec/operator/partition_sort_source_operator.h"
86
#include "exec/operator/partitioned_aggregation_sink_operator.h"
87
#include "exec/operator/partitioned_aggregation_source_operator.h"
88
#include "exec/operator/partitioned_hash_join_probe_operator.h"
89
#include "exec/operator/partitioned_hash_join_sink_operator.h"
90
#include "exec/operator/rec_cte_anchor_sink_operator.h"
91
#include "exec/operator/rec_cte_scan_operator.h"
92
#include "exec/operator/rec_cte_sink_operator.h"
93
#include "exec/operator/rec_cte_source_operator.h"
94
#include "exec/operator/repeat_operator.h"
95
#include "exec/operator/result_file_sink_operator.h"
96
#include "exec/operator/result_sink_operator.h"
97
#include "exec/operator/schema_scan_operator.h"
98
#include "exec/operator/select_operator.h"
99
#include "exec/operator/set_probe_sink_operator.h"
100
#include "exec/operator/set_sink_operator.h"
101
#include "exec/operator/set_source_operator.h"
102
#include "exec/operator/sort_sink_operator.h"
103
#include "exec/operator/sort_source_operator.h"
104
#include "exec/operator/spill_iceberg_table_sink_operator.h"
105
#include "exec/operator/spill_sort_sink_operator.h"
106
#include "exec/operator/spill_sort_source_operator.h"
107
#include "exec/operator/streaming_aggregation_operator.h"
108
#include "exec/operator/table_function_operator.h"
109
#include "exec/operator/tvf_table_sink_operator.h"
110
#include "exec/operator/union_sink_operator.h"
111
#include "exec/operator/union_source_operator.h"
112
#include "exec/pipeline/dependency.h"
113
#include "exec/pipeline/pipeline_task.h"
114
#include "exec/pipeline/task_scheduler.h"
115
#include "exec/runtime_filter/runtime_filter_mgr.h"
116
#include "exec/sort/topn_sorter.h"
117
#include "exec/spill/spill_file.h"
118
#include "io/fs/stream_load_pipe.h"
119
#include "load/stream_load/new_load_stream_mgr.h"
120
#include "runtime/exec_env.h"
121
#include "runtime/fragment_mgr.h"
122
#include "runtime/result_buffer_mgr.h"
123
#include "runtime/runtime_state.h"
124
#include "runtime/thread_context.h"
125
#include "util/countdown_latch.h"
126
#include "util/debug_util.h"
127
#include "util/uid_util.h"
128
129
namespace doris {
130
PipelineFragmentContext::PipelineFragmentContext(
131
        TUniqueId query_id, const TPipelineFragmentParams& request,
132
        std::shared_ptr<QueryContext> query_ctx, ExecEnv* exec_env,
133
        const std::function<void(RuntimeState*, Status*)>& call_back,
134
        report_status_callback report_status_cb)
135
429k
        : _query_id(std::move(query_id)),
136
429k
          _fragment_id(request.fragment_id),
137
429k
          _exec_env(exec_env),
138
429k
          _query_ctx(std::move(query_ctx)),
139
429k
          _call_back(call_back),
140
429k
          _is_report_on_cancel(true),
141
429k
          _report_status_cb(std::move(report_status_cb)),
142
429k
          _params(request),
143
429k
          _parallel_instances(_params.__isset.parallel_instances ? _params.parallel_instances : 0),
144
429k
          _need_notify_close(request.__isset.need_notify_close ? request.need_notify_close
145
429k
                                                               : false) {
146
429k
    _fragment_watcher.start();
147
429k
}
148
149
429k
PipelineFragmentContext::~PipelineFragmentContext() {
150
429k
    LOG_INFO("PipelineFragmentContext::~PipelineFragmentContext")
151
429k
            .tag("query_id", print_id(_query_id))
152
429k
            .tag("fragment_id", _fragment_id);
153
429k
    _release_resource();
154
429k
    {
155
        // The memory released by the query end is recorded in the query mem tracker.
156
429k
        SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_ctx->query_mem_tracker());
157
429k
        _runtime_state.reset();
158
429k
        _query_ctx.reset();
159
429k
    }
160
429k
}
161
162
158
bool PipelineFragmentContext::is_timeout(timespec now) const {
163
158
    if (_timeout <= 0) {
164
0
        return false;
165
0
    }
166
158
    return _fragment_watcher.elapsed_time_seconds(now) > _timeout;
167
158
}
168
169
// notify_close() transitions the PFC from "waiting for external close notification" to
170
// "self-managed close". For recursive CTE fragments, the old PFC is kept alive until
171
// the rerun_fragment(wait_for_destroy) RPC calls this to trigger shutdown.
172
// Returns true if all tasks have already closed (i.e., the PFC can be safely destroyed).
173
10.3k
bool PipelineFragmentContext::notify_close() {
174
10.3k
    bool all_closed = false;
175
10.3k
    bool need_remove = false;
176
10.3k
    {
177
10.3k
        std::lock_guard<std::mutex> l(_task_mutex);
178
10.3k
        if (_closed_tasks >= _total_tasks) {
179
3.51k
            if (_need_notify_close) {
180
                // Fragment was cancelled and waiting for notify to close.
181
                // Record that we need to remove from fragment mgr, but do it
182
                // after releasing _task_mutex to avoid ABBA deadlock with
183
                // dump_pipeline_tasks() (which acquires _pipeline_map lock
184
                // first, then _task_mutex via debug_string()).
185
3.42k
                need_remove = true;
186
3.42k
            }
187
3.51k
            all_closed = true;
188
3.51k
        }
189
        // make fragment release by self after cancel
190
10.3k
        _need_notify_close = false;
191
10.3k
    }
192
10.3k
    if (need_remove) {
193
3.42k
        _exec_env->fragment_mgr()->remove_pipeline_context({_query_id, _fragment_id});
194
3.42k
    }
195
10.3k
    return all_closed;
196
10.3k
}
197
198
// Must not add lock in this method. Because it will call query ctx cancel. And
199
// QueryCtx cancel will call fragment ctx cancel. And Also Fragment ctx's running
200
// Method like exchange sink buffer will call query ctx cancel. If we add lock here
201
// There maybe dead lock.
202
6.91k
void PipelineFragmentContext::cancel(const Status reason) {
203
6.91k
    LOG_INFO("PipelineFragmentContext::cancel")
204
6.91k
            .tag("query_id", print_id(_query_id))
205
6.91k
            .tag("fragment_id", _fragment_id)
206
6.91k
            .tag("reason", reason.to_string());
207
6.91k
    if (notify_close()) {
208
104
        return;
209
104
    }
210
    // Timeout is a special error code, we need print current stack to debug timeout issue.
211
6.81k
    if (reason.is<ErrorCode::TIMEOUT>()) {
212
8
        auto dbg_str = fmt::format("PipelineFragmentContext is cancelled due to timeout:\n{}",
213
8
                                   debug_string());
214
8
        LOG_LONG_STRING(WARNING, dbg_str);
215
8
    }
216
217
    // `ILLEGAL_STATE` means queries this fragment belongs to was not found in FE (maybe finished)
218
6.81k
    if (reason.is<ErrorCode::ILLEGAL_STATE>()) {
219
0
        LOG_WARNING("PipelineFragmentContext is cancelled due to illegal state : {}",
220
0
                    debug_string());
221
0
    }
222
223
6.81k
    if (reason.is<ErrorCode::MEM_LIMIT_EXCEEDED>() || reason.is<ErrorCode::MEM_ALLOC_FAILED>()) {
224
12
        print_profile("cancel pipeline, reason: " + reason.to_string());
225
12
    }
226
227
6.81k
    if (auto error_url = get_load_error_url(); !error_url.empty()) {
228
22
        _query_ctx->set_load_error_url(error_url);
229
22
    }
230
231
6.81k
    if (auto first_error_msg = get_first_error_msg(); !first_error_msg.empty()) {
232
22
        _query_ctx->set_first_error_msg(first_error_msg);
233
22
    }
234
235
6.81k
    _query_ctx->cancel(reason, _fragment_id);
236
6.81k
    if (reason.is<ErrorCode::LIMIT_REACH>()) {
237
421
        _is_report_on_cancel = false;
238
6.38k
    } else {
239
43.3k
        for (auto& id : _fragment_instance_ids) {
240
43.3k
            LOG(WARNING) << "PipelineFragmentContext cancel instance: " << print_id(id);
241
43.3k
        }
242
6.38k
    }
243
    // Get pipe from new load stream manager and send cancel to it or the fragment may hang to wait read from pipe
244
    // For stream load the fragment's query_id == load id, it is set in FE.
245
6.81k
    auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(_query_id);
246
6.81k
    if (stream_load_ctx != nullptr) {
247
30
        stream_load_ctx->pipe->cancel(reason.to_string());
248
        // Set error URL here because after pipe is cancelled, stream load execution may return early.
249
        // We need to set the error URL at this point to ensure error information is properly
250
        // propagated to the client.
251
30
        stream_load_ctx->error_url = get_load_error_url();
252
30
        stream_load_ctx->first_error_msg = get_first_error_msg();
253
30
    }
254
255
44.8k
    for (auto& tasks : _tasks) {
256
93.1k
        for (auto& task : tasks) {
257
93.1k
            task.first->unblock_all_dependencies();
258
93.1k
        }
259
44.8k
    }
260
6.81k
}
261
262
663k
PipelinePtr PipelineFragmentContext::add_pipeline(PipelinePtr parent, int idx) {
263
663k
    PipelineId id = _next_pipeline_id++;
264
663k
    auto pipeline = std::make_shared<Pipeline>(
265
663k
            id, parent ? std::min(parent->num_tasks(), _num_instances) : _num_instances,
266
663k
            parent ? parent->num_tasks() : _num_instances);
267
663k
    if (idx >= 0) {
268
109k
        _pipelines.insert(_pipelines.begin() + idx, pipeline);
269
554k
    } else {
270
554k
        _pipelines.emplace_back(pipeline);
271
554k
    }
272
663k
    if (parent) {
273
229k
        parent->set_children(pipeline);
274
229k
    }
275
663k
    return pipeline;
276
663k
}
277
278
429k
Status PipelineFragmentContext::_build_and_prepare_full_pipeline(ThreadPool* thread_pool) {
279
429k
    {
280
429k
        SCOPED_TIMER(_build_pipelines_timer);
281
        // 2. Build pipelines with operators in this fragment.
282
429k
        auto root_pipeline = add_pipeline();
283
429k
        RETURN_IF_ERROR(_build_pipelines(_runtime_state->obj_pool(), *_query_ctx->desc_tbl,
284
429k
                                         &_root_op, root_pipeline));
285
286
        // 3. Create sink operator
287
429k
        if (!_params.fragment.__isset.output_sink) {
288
0
            return Status::InternalError("No output sink in this fragment!");
289
0
        }
290
429k
        RETURN_IF_ERROR(_create_data_sink(_runtime_state->obj_pool(), _params.fragment.output_sink,
291
429k
                                          _params.fragment.output_exprs, _params,
292
429k
                                          root_pipeline->output_row_desc(), _runtime_state.get(),
293
429k
                                          *_desc_tbl, root_pipeline->id()));
294
429k
        RETURN_IF_ERROR(_sink->init(_params.fragment.output_sink));
295
429k
        RETURN_IF_ERROR(root_pipeline->set_sink(_sink));
296
297
553k
        for (PipelinePtr& pipeline : _pipelines) {
298
18.4E
            DCHECK(pipeline->sink() != nullptr) << pipeline->operators().size();
299
553k
            RETURN_IF_ERROR(pipeline->sink()->set_child(pipeline->operators().back()));
300
553k
        }
301
429k
    }
302
    // 4. Build local exchanger
303
429k
    if (_runtime_state->enable_local_shuffle()) {
304
426k
        SCOPED_TIMER(_plan_local_exchanger_timer);
305
426k
        RETURN_IF_ERROR(_plan_local_exchange(_params.num_buckets,
306
426k
                                             _params.bucket_seq_to_instance_idx,
307
426k
                                             _params.shuffle_idx_to_instance_idx));
308
426k
    }
309
310
    // 5. Initialize global states in pipelines.
311
664k
    for (PipelinePtr& pipeline : _pipelines) {
312
664k
        SCOPED_TIMER(_prepare_all_pipelines_timer);
313
664k
        pipeline->children().clear();
314
664k
        RETURN_IF_ERROR(pipeline->prepare(_runtime_state.get()));
315
664k
    }
316
317
427k
    {
318
427k
        SCOPED_TIMER(_build_tasks_timer);
319
        // 6. Build pipeline tasks and initialize local state.
320
427k
        RETURN_IF_ERROR(_build_pipeline_tasks(thread_pool));
321
427k
    }
322
323
427k
    return Status::OK();
324
427k
}
325
326
429k
Status PipelineFragmentContext::prepare(ThreadPool* thread_pool) {
327
429k
    if (_prepared) {
328
0
        return Status::InternalError("Already prepared");
329
0
    }
330
429k
    if (_params.__isset.query_options && _params.query_options.__isset.execution_timeout) {
331
429k
        _timeout = _params.query_options.execution_timeout;
332
429k
    }
333
334
429k
    _fragment_level_profile = std::make_unique<RuntimeProfile>("PipelineContext");
335
429k
    _prepare_timer = ADD_TIMER(_fragment_level_profile, "PrepareTime");
336
429k
    SCOPED_TIMER(_prepare_timer);
337
429k
    _build_pipelines_timer = ADD_TIMER(_fragment_level_profile, "BuildPipelinesTime");
338
429k
    _init_context_timer = ADD_TIMER(_fragment_level_profile, "InitContextTime");
339
429k
    _plan_local_exchanger_timer = ADD_TIMER(_fragment_level_profile, "PlanLocalLocalExchangerTime");
340
429k
    _build_tasks_timer = ADD_TIMER(_fragment_level_profile, "BuildTasksTime");
341
429k
    _prepare_all_pipelines_timer = ADD_TIMER(_fragment_level_profile, "PrepareAllPipelinesTime");
342
429k
    {
343
429k
        SCOPED_TIMER(_init_context_timer);
344
429k
        cast_set(_num_instances, _params.local_params.size());
345
429k
        _total_instances =
346
429k
                _params.__isset.total_instances ? _params.total_instances : _num_instances;
347
348
429k
        auto* fragment_context = this;
349
350
429k
        if (_params.query_options.__isset.is_report_success) {
351
427k
            fragment_context->set_is_report_success(_params.query_options.is_report_success);
352
427k
        }
353
354
        // 1. Set up the global runtime state.
355
429k
        _runtime_state = RuntimeState::create_unique(
356
429k
                _params.query_id, _params.fragment_id, _params.query_options,
357
429k
                _query_ctx->query_globals, _exec_env, _query_ctx.get());
358
429k
        _runtime_state->set_task_execution_context(shared_from_this());
359
429k
        SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_runtime_state->query_mem_tracker());
360
429k
        if (_params.__isset.backend_id) {
361
425k
            _runtime_state->set_backend_id(_params.backend_id);
362
425k
        }
363
429k
        if (_params.__isset.import_label) {
364
240
            _runtime_state->set_import_label(_params.import_label);
365
240
        }
366
429k
        if (_params.__isset.db_name) {
367
192
            _runtime_state->set_db_name(_params.db_name);
368
192
        }
369
429k
        if (_params.__isset.load_job_id) {
370
0
            _runtime_state->set_load_job_id(_params.load_job_id);
371
0
        }
372
373
429k
        if (_params.is_simplified_param) {
374
143k
            _desc_tbl = _query_ctx->desc_tbl;
375
286k
        } else {
376
286k
            DCHECK(_params.__isset.desc_tbl);
377
286k
            RETURN_IF_ERROR(DescriptorTbl::create(_runtime_state->obj_pool(), _params.desc_tbl,
378
286k
                                                  &_desc_tbl));
379
286k
        }
380
429k
        _runtime_state->set_desc_tbl(_desc_tbl);
381
429k
        _runtime_state->set_num_per_fragment_instances(_params.num_senders);
382
429k
        _runtime_state->set_load_stream_per_node(_params.load_stream_per_node);
383
429k
        _runtime_state->set_total_load_streams(_params.total_load_streams);
384
429k
        _runtime_state->set_num_local_sink(_params.num_local_sink);
385
386
        // init fragment_instance_ids
387
429k
        const auto target_size = _params.local_params.size();
388
429k
        _fragment_instance_ids.resize(target_size);
389
1.50M
        for (size_t i = 0; i < _params.local_params.size(); i++) {
390
1.07M
            auto fragment_instance_id = _params.local_params[i].fragment_instance_id;
391
1.07M
            _fragment_instance_ids[i] = fragment_instance_id;
392
1.07M
        }
393
429k
    }
394
395
429k
    RETURN_IF_ERROR(_build_and_prepare_full_pipeline(thread_pool));
396
397
428k
    _init_next_report_time();
398
399
428k
    _prepared = true;
400
428k
    return Status::OK();
401
429k
}
402
403
Status PipelineFragmentContext::_build_pipeline_tasks_for_instance(
404
        int instance_idx,
405
1.07M
        const std::vector<std::shared_ptr<RuntimeProfile>>& pipeline_id_to_profile) {
406
1.07M
    const auto& local_params = _params.local_params[instance_idx];
407
1.07M
    auto fragment_instance_id = local_params.fragment_instance_id;
408
1.07M
    auto runtime_filter_mgr = std::make_unique<RuntimeFilterMgr>(false);
409
1.07M
    std::map<PipelineId, PipelineTask*> pipeline_id_to_task;
410
1.07M
    auto get_shared_state = [&](PipelinePtr pipeline)
411
1.07M
            -> std::map<int, std::pair<std::shared_ptr<BasicSharedState>,
412
1.90M
                                       std::vector<std::shared_ptr<Dependency>>>> {
413
1.90M
        std::map<int, std::pair<std::shared_ptr<BasicSharedState>,
414
1.90M
                                std::vector<std::shared_ptr<Dependency>>>>
415
1.90M
                shared_state_map;
416
2.35M
        for (auto& op : pipeline->operators()) {
417
2.35M
            auto source_id = op->operator_id();
418
2.35M
            if (auto iter = _op_id_to_shared_state.find(source_id);
419
2.35M
                iter != _op_id_to_shared_state.end()) {
420
701k
                shared_state_map.insert({source_id, iter->second});
421
701k
            }
422
2.35M
        }
423
1.90M
        for (auto sink_to_source_id : pipeline->sink()->dests_id()) {
424
1.90M
            if (auto iter = _op_id_to_shared_state.find(sink_to_source_id);
425
1.90M
                iter != _op_id_to_shared_state.end()) {
426
294k
                shared_state_map.insert({sink_to_source_id, iter->second});
427
294k
            }
428
1.90M
        }
429
1.90M
        return shared_state_map;
430
1.90M
    };
431
432
3.38M
    for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) {
433
2.30M
        auto& pipeline = _pipelines[pip_idx];
434
2.30M
        if (pipeline->num_tasks() > 1 || instance_idx == 0) {
435
1.89M
            auto task_runtime_state = RuntimeState::create_unique(
436
1.89M
                    local_params.fragment_instance_id, _params.query_id, _params.fragment_id,
437
1.89M
                    _params.query_options, _query_ctx->query_globals, _exec_env, _query_ctx.get());
438
1.89M
            {
439
                // Initialize runtime state for this task
440
1.89M
                task_runtime_state->set_query_mem_tracker(_query_ctx->query_mem_tracker());
441
442
1.89M
                task_runtime_state->set_task_execution_context(shared_from_this());
443
1.89M
                task_runtime_state->set_be_number(local_params.backend_num);
444
445
1.89M
                if (_params.__isset.backend_id) {
446
1.89M
                    task_runtime_state->set_backend_id(_params.backend_id);
447
1.89M
                }
448
1.89M
                if (_params.__isset.import_label) {
449
241
                    task_runtime_state->set_import_label(_params.import_label);
450
241
                }
451
1.89M
                if (_params.__isset.db_name) {
452
193
                    task_runtime_state->set_db_name(_params.db_name);
453
193
                }
454
1.89M
                if (_params.__isset.load_job_id) {
455
0
                    task_runtime_state->set_load_job_id(_params.load_job_id);
456
0
                }
457
1.89M
                if (_params.__isset.wal_id) {
458
114
                    task_runtime_state->set_wal_id(_params.wal_id);
459
114
                }
460
1.89M
                if (_params.__isset.content_length) {
461
31
                    task_runtime_state->set_content_length(_params.content_length);
462
31
                }
463
464
1.89M
                task_runtime_state->set_desc_tbl(_desc_tbl);
465
1.89M
                task_runtime_state->set_per_fragment_instance_idx(local_params.sender_id);
466
1.89M
                task_runtime_state->set_num_per_fragment_instances(_params.num_senders);
467
1.89M
                task_runtime_state->resize_op_id_to_local_state(max_operator_id());
468
1.89M
                task_runtime_state->set_max_operator_id(max_operator_id());
469
1.89M
                task_runtime_state->set_load_stream_per_node(_params.load_stream_per_node);
470
1.89M
                task_runtime_state->set_total_load_streams(_params.total_load_streams);
471
1.89M
                task_runtime_state->set_num_local_sink(_params.num_local_sink);
472
473
1.89M
                task_runtime_state->set_runtime_filter_mgr(runtime_filter_mgr.get());
474
1.89M
            }
475
1.89M
            auto cur_task_id = _total_tasks++;
476
1.89M
            task_runtime_state->set_task_id(cur_task_id);
477
1.89M
            task_runtime_state->set_task_num(pipeline->num_tasks());
478
1.89M
            auto task = std::make_shared<PipelineTask>(
479
1.89M
                    pipeline, cur_task_id, task_runtime_state.get(),
480
1.89M
                    std::dynamic_pointer_cast<PipelineFragmentContext>(shared_from_this()),
481
1.89M
                    pipeline_id_to_profile[pip_idx].get(), get_shared_state(pipeline),
482
1.89M
                    instance_idx);
483
1.89M
            pipeline->incr_created_tasks(instance_idx, task.get());
484
1.89M
            pipeline_id_to_task.insert({pipeline->id(), task.get()});
485
1.89M
            _tasks[instance_idx].emplace_back(
486
1.89M
                    std::pair<std::shared_ptr<PipelineTask>, std::unique_ptr<RuntimeState>> {
487
1.89M
                            std::move(task), std::move(task_runtime_state)});
488
1.89M
        }
489
2.30M
    }
490
491
    /**
492
         * Build DAG for pipeline tasks.
493
         * For example, we have
494
         *
495
         *   ExchangeSink (Pipeline1)     JoinBuildSink (Pipeline2)
496
         *            \                      /
497
         *          JoinProbeOperator1 (Pipeline1)    JoinBuildSink (Pipeline3)
498
         *                 \                          /
499
         *               JoinProbeOperator2 (Pipeline1)
500
         *
501
         * In this fragment, we have three pipelines and pipeline 1 depends on pipeline 2 and pipeline 3.
502
         * To build this DAG, `_dag` manage dependencies between pipelines by pipeline ID and
503
         * `pipeline_id_to_task` is used to find the task by a unique pipeline ID.
504
         *
505
         * Finally, we have two upstream dependencies in Pipeline1 corresponding to JoinProbeOperator1
506
         * and JoinProbeOperator2.
507
         */
508
2.30M
    for (auto& _pipeline : _pipelines) {
509
2.30M
        if (pipeline_id_to_task.contains(_pipeline->id())) {
510
1.89M
            auto* task = pipeline_id_to_task[_pipeline->id()];
511
1.89M
            DCHECK(task != nullptr);
512
513
            // If this task has upstream dependency, then inject it into this task.
514
1.89M
            if (_dag.contains(_pipeline->id())) {
515
1.22M
                auto& deps = _dag[_pipeline->id()];
516
1.90M
                for (auto& dep : deps) {
517
1.90M
                    if (pipeline_id_to_task.contains(dep)) {
518
1.09M
                        auto ss = pipeline_id_to_task[dep]->get_sink_shared_state();
519
1.09M
                        if (ss) {
520
515k
                            task->inject_shared_state(ss);
521
579k
                        } else {
522
579k
                            pipeline_id_to_task[dep]->inject_shared_state(
523
579k
                                    task->get_source_shared_state());
524
579k
                        }
525
1.09M
                    }
526
1.90M
                }
527
1.22M
            }
528
1.89M
        }
529
2.30M
    }
530
3.38M
    for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) {
531
2.30M
        if (pipeline_id_to_task.contains(_pipelines[pip_idx]->id())) {
532
1.89M
            auto* task = pipeline_id_to_task[_pipelines[pip_idx]->id()];
533
1.89M
            DCHECK(pipeline_id_to_profile[pip_idx]);
534
1.89M
            std::vector<TScanRangeParams> scan_ranges;
535
1.89M
            auto node_id = _pipelines[pip_idx]->operators().front()->node_id();
536
1.89M
            if (local_params.per_node_scan_ranges.contains(node_id)) {
537
340k
                scan_ranges = local_params.per_node_scan_ranges.find(node_id)->second;
538
340k
            }
539
1.89M
            RETURN_IF_ERROR_OR_CATCH_EXCEPTION(task->prepare(scan_ranges, local_params.sender_id,
540
1.89M
                                                             _params.fragment.output_sink));
541
1.89M
        }
542
2.30M
    }
543
1.08M
    {
544
1.08M
        std::lock_guard<std::mutex> l(_state_map_lock);
545
1.08M
        _runtime_filter_mgr_map[instance_idx] = std::move(runtime_filter_mgr);
546
1.08M
    }
547
1.08M
    return Status::OK();
548
1.07M
}
549
550
428k
Status PipelineFragmentContext::_build_pipeline_tasks(ThreadPool* thread_pool) {
551
428k
    _total_tasks = 0;
552
428k
    _closed_tasks = 0;
553
428k
    const auto target_size = _params.local_params.size();
554
428k
    _tasks.resize(target_size);
555
428k
    _runtime_filter_mgr_map.resize(target_size);
556
1.09M
    for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) {
557
663k
        _pip_id_to_pipeline[_pipelines[pip_idx]->id()] = _pipelines[pip_idx].get();
558
663k
    }
559
428k
    auto pipeline_id_to_profile = _runtime_state->build_pipeline_profile(_pipelines.size());
560
561
428k
    if (target_size > 1 &&
562
428k
        (_runtime_state->query_options().__isset.parallel_prepare_threshold &&
563
127k
         target_size > _runtime_state->query_options().parallel_prepare_threshold)) {
564
        // If instances parallelism is big enough ( > parallel_prepare_threshold), we will prepare all tasks by multi-threads
565
13.7k
        std::vector<Status> prepare_status(target_size);
566
13.7k
        int submitted_tasks = 0;
567
13.7k
        Status submit_status;
568
13.7k
        CountDownLatch latch((int)target_size);
569
198k
        for (int i = 0; i < target_size; i++) {
570
184k
            submit_status = thread_pool->submit_func([&, i]() {
571
184k
                SCOPED_ATTACH_TASK(_query_ctx.get());
572
184k
                prepare_status[i] = _build_pipeline_tasks_for_instance(i, pipeline_id_to_profile);
573
184k
                latch.count_down();
574
184k
            });
575
184k
            if (LIKELY(submit_status.ok())) {
576
184k
                submitted_tasks++;
577
18.4E
            } else {
578
18.4E
                break;
579
18.4E
            }
580
184k
        }
581
13.7k
        latch.arrive_and_wait(target_size - submitted_tasks);
582
13.7k
        if (UNLIKELY(!submit_status.ok())) {
583
0
            return submit_status;
584
0
        }
585
198k
        for (int i = 0; i < submitted_tasks; i++) {
586
184k
            if (!prepare_status[i].ok()) {
587
0
                return prepare_status[i];
588
0
            }
589
184k
        }
590
414k
    } else {
591
1.30M
        for (int i = 0; i < target_size; i++) {
592
895k
            RETURN_IF_ERROR(_build_pipeline_tasks_for_instance(i, pipeline_id_to_profile));
593
895k
        }
594
414k
    }
595
428k
    _pipeline_parent_map.clear();
596
428k
    _op_id_to_shared_state.clear();
597
598
428k
    return Status::OK();
599
428k
}
600
601
426k
void PipelineFragmentContext::_init_next_report_time() {
602
426k
    auto interval_s = config::pipeline_status_report_interval;
603
426k
    if (_is_report_success && interval_s > 0 && _timeout > interval_s) {
604
41.7k
        VLOG_FILE << "enable period report: fragment id=" << _fragment_id;
605
41.7k
        uint64_t report_fragment_offset = (uint64_t)(rand() % interval_s) * NANOS_PER_SEC;
606
        // We don't want to wait longer than it takes to run the entire fragment.
607
41.7k
        _previous_report_time =
608
41.7k
                MonotonicNanos() + report_fragment_offset - (uint64_t)(interval_s)*NANOS_PER_SEC;
609
41.7k
        _disable_period_report = false;
610
41.7k
    }
611
426k
}
612
613
4.81k
void PipelineFragmentContext::refresh_next_report_time() {
614
4.81k
    auto disable = _disable_period_report.load(std::memory_order_acquire);
615
4.81k
    DCHECK(disable == true);
616
4.81k
    _previous_report_time.store(MonotonicNanos(), std::memory_order_release);
617
4.81k
    _disable_period_report.compare_exchange_strong(disable, false);
618
4.81k
}
619
620
6.92M
void PipelineFragmentContext::trigger_report_if_necessary() {
621
6.92M
    if (!_is_report_success) {
622
6.39M
        return;
623
6.39M
    }
624
529k
    auto disable = _disable_period_report.load(std::memory_order_acquire);
625
529k
    if (disable) {
626
8.61k
        return;
627
8.61k
    }
628
520k
    int32_t interval_s = config::pipeline_status_report_interval;
629
520k
    if (interval_s <= 0) {
630
0
        LOG(WARNING) << "config::status_report_interval is equal to or less than zero, do not "
631
0
                        "trigger "
632
0
                        "report.";
633
0
    }
634
520k
    uint64_t next_report_time = _previous_report_time.load(std::memory_order_acquire) +
635
520k
                                (uint64_t)(interval_s)*NANOS_PER_SEC;
636
520k
    if (MonotonicNanos() > next_report_time) {
637
4.83k
        if (!_disable_period_report.compare_exchange_strong(disable, true,
638
4.83k
                                                            std::memory_order_acq_rel)) {
639
17
            return;
640
17
        }
641
4.81k
        if (VLOG_FILE_IS_ON) {
642
0
            VLOG_FILE << "Reporting "
643
0
                      << "profile for query_id " << print_id(_query_id)
644
0
                      << ", fragment id: " << _fragment_id;
645
646
0
            std::stringstream ss;
647
0
            _runtime_state->runtime_profile()->compute_time_in_profile();
648
0
            _runtime_state->runtime_profile()->pretty_print(&ss);
649
0
            if (_runtime_state->load_channel_profile()) {
650
0
                _runtime_state->load_channel_profile()->pretty_print(&ss);
651
0
            }
652
653
0
            VLOG_FILE << "Query " << print_id(get_query_id()) << " fragment " << get_fragment_id()
654
0
                      << " profile:\n"
655
0
                      << ss.str();
656
0
        }
657
4.81k
        auto st = send_report(false);
658
4.81k
        if (!st.ok()) {
659
0
            disable = true;
660
0
            _disable_period_report.compare_exchange_strong(disable, false,
661
0
                                                           std::memory_order_acq_rel);
662
0
        }
663
4.81k
    }
664
520k
}
665
666
Status PipelineFragmentContext::_build_pipelines(ObjectPool* pool, const DescriptorTbl& descs,
667
426k
                                                 OperatorPtr* root, PipelinePtr cur_pipe) {
668
426k
    if (_params.fragment.plan.nodes.empty()) {
669
0
        throw Exception(ErrorCode::INTERNAL_ERROR, "Invalid plan which has no plan node!");
670
0
    }
671
672
426k
    int node_idx = 0;
673
674
426k
    RETURN_IF_ERROR(_create_tree_helper(pool, _params.fragment.plan.nodes, descs, nullptr,
675
426k
                                        &node_idx, root, cur_pipe, 0, false, false));
676
677
426k
    if (node_idx + 1 != _params.fragment.plan.nodes.size()) {
678
0
        return Status::InternalError(
679
0
                "Plan tree only partially reconstructed. Not all thrift nodes were used.");
680
0
    }
681
426k
    return Status::OK();
682
426k
}
683
684
Status PipelineFragmentContext::_create_tree_helper(
685
        ObjectPool* pool, const std::vector<TPlanNode>& tnodes, const DescriptorTbl& descs,
686
        OperatorPtr parent, int* node_idx, OperatorPtr* root, PipelinePtr& cur_pipe, int child_idx,
687
658k
        const bool followed_by_shuffled_operator, const bool require_bucket_distribution) {
688
    // propagate error case
689
658k
    if (*node_idx >= tnodes.size()) {
690
0
        return Status::InternalError(
691
0
                "Failed to reconstruct plan tree from thrift. Node id: {}, number of nodes: {}",
692
0
                *node_idx, tnodes.size());
693
0
    }
694
658k
    const TPlanNode& tnode = tnodes[*node_idx];
695
696
658k
    int num_children = tnodes[*node_idx].num_children;
697
658k
    bool current_followed_by_shuffled_operator = followed_by_shuffled_operator;
698
658k
    bool current_require_bucket_distribution = require_bucket_distribution;
699
    // TODO: Create CacheOperator is confused now
700
658k
    OperatorPtr op = nullptr;
701
658k
    OperatorPtr cache_op = nullptr;
702
658k
    RETURN_IF_ERROR(_create_operator(pool, tnodes[*node_idx], descs, op, cur_pipe,
703
658k
                                     parent == nullptr ? -1 : parent->node_id(), child_idx,
704
658k
                                     followed_by_shuffled_operator,
705
658k
                                     current_require_bucket_distribution, cache_op));
706
    // Initialization must be done here. For example, group by expressions in agg will be used to
707
    // decide if a local shuffle should be planed, so it must be initialized here.
708
658k
    RETURN_IF_ERROR(op->init(tnode, _runtime_state.get()));
709
    // assert(parent != nullptr || (node_idx == 0 && root_expr != nullptr));
710
658k
    if (parent != nullptr) {
711
        // add to parent's child(s)
712
231k
        RETURN_IF_ERROR(parent->set_child(cache_op ? cache_op : op));
713
426k
    } else {
714
426k
        *root = op;
715
426k
    }
716
    /**
717
     * `ExchangeType::HASH_SHUFFLE` should be used if an operator is followed by a shuffled operator (shuffled hash join, union operator followed by co-located operators).
718
     *
719
     * For plan:
720
     * LocalExchange(id=0) -> Aggregation(id=1) -> ShuffledHashJoin(id=2)
721
     *                           Exchange(id=3) -> ShuffledHashJoinBuild(id=2)
722
     * We must ensure data distribution of `LocalExchange(id=0)` is same as Exchange(id=3).
723
     *
724
     * If an operator's is followed by a local exchange without shuffle (e.g. passthrough), a
725
     * shuffled local exchanger will be used before join so it is not followed by shuffle join.
726
     */
727
658k
    auto required_data_distribution =
728
658k
            cur_pipe->operators().empty()
729
658k
                    ? cur_pipe->sink()->required_data_distribution(_runtime_state.get())
730
658k
                    : op->required_data_distribution(_runtime_state.get());
731
658k
    current_followed_by_shuffled_operator =
732
658k
            ((followed_by_shuffled_operator ||
733
658k
              (cur_pipe->operators().empty() ? cur_pipe->sink()->is_shuffled_operator()
734
600k
                                             : op->is_shuffled_operator())) &&
735
658k
             Pipeline::is_hash_exchange(required_data_distribution.distribution_type)) ||
736
658k
            (followed_by_shuffled_operator &&
737
548k
             required_data_distribution.distribution_type == ExchangeType::NOOP);
738
739
658k
    current_require_bucket_distribution =
740
658k
            ((require_bucket_distribution ||
741
658k
              (cur_pipe->operators().empty() ? cur_pipe->sink()->is_colocated_operator()
742
603k
                                             : op->is_colocated_operator())) &&
743
658k
             Pipeline::is_hash_exchange(required_data_distribution.distribution_type)) ||
744
658k
            (require_bucket_distribution &&
745
553k
             required_data_distribution.distribution_type == ExchangeType::NOOP);
746
747
658k
    if (num_children == 0) {
748
441k
        _use_serial_source = op->is_serial_operator();
749
441k
    }
750
    // rely on that tnodes is preorder of the plan
751
889k
    for (int i = 0; i < num_children; i++) {
752
230k
        ++*node_idx;
753
230k
        RETURN_IF_ERROR(_create_tree_helper(pool, tnodes, descs, op, node_idx, nullptr, cur_pipe, i,
754
230k
                                            current_followed_by_shuffled_operator,
755
230k
                                            current_require_bucket_distribution));
756
757
        // we are expecting a child, but have used all nodes
758
        // this means we have been given a bad tree and must fail
759
230k
        if (*node_idx >= tnodes.size()) {
760
0
            return Status::InternalError(
761
0
                    "Failed to reconstruct plan tree from thrift. Node id: {}, number of "
762
0
                    "nodes: {}",
763
0
                    *node_idx, tnodes.size());
764
0
        }
765
230k
    }
766
767
658k
    return Status::OK();
768
658k
}
769
770
void PipelineFragmentContext::_inherit_pipeline_properties(
771
        const DataDistribution& data_distribution, PipelinePtr pipe_with_source,
772
109k
        PipelinePtr pipe_with_sink) {
773
109k
    pipe_with_sink->set_num_tasks(pipe_with_source->num_tasks());
774
109k
    pipe_with_source->set_num_tasks(_num_instances);
775
109k
    pipe_with_source->set_data_distribution(data_distribution);
776
109k
}
777
778
Status PipelineFragmentContext::_add_local_exchange_impl(
779
        int idx, ObjectPool* pool, PipelinePtr cur_pipe, PipelinePtr new_pip,
780
        DataDistribution data_distribution, bool* do_local_exchange, int num_buckets,
781
        const std::map<int, int>& bucket_seq_to_instance_idx,
782
109k
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
783
109k
    auto& operators = cur_pipe->operators();
784
109k
    const auto downstream_pipeline_id = cur_pipe->id();
785
109k
    auto local_exchange_id = next_operator_id();
786
    // 1. Create a new pipeline with local exchange sink.
787
109k
    DataSinkOperatorPtr sink;
788
109k
    auto sink_id = next_sink_operator_id();
789
790
    /**
791
     * `bucket_seq_to_instance_idx` is empty if no scan operator is contained in this fragment.
792
     * So co-located operators(e.g. Agg, Analytic) should use `HASH_SHUFFLE` instead of `BUCKET_HASH_SHUFFLE`.
793
     */
794
109k
    const bool followed_by_shuffled_operator =
795
109k
            operators.size() > idx ? operators[idx]->followed_by_shuffled_operator()
796
109k
                                   : cur_pipe->sink()->followed_by_shuffled_operator();
797
109k
    const bool use_global_hash_shuffle = bucket_seq_to_instance_idx.empty() &&
798
109k
                                         !shuffle_idx_to_instance_idx.contains(-1) &&
799
109k
                                         followed_by_shuffled_operator && !_use_serial_source;
800
109k
    sink = std::make_shared<LocalExchangeSinkOperatorX>(
801
109k
            sink_id, local_exchange_id, use_global_hash_shuffle ? _total_instances : _num_instances,
802
109k
            data_distribution.partition_exprs, bucket_seq_to_instance_idx);
803
109k
    if (bucket_seq_to_instance_idx.empty() &&
804
109k
        data_distribution.distribution_type == ExchangeType::BUCKET_HASH_SHUFFLE) {
805
6
        data_distribution.distribution_type = ExchangeType::HASH_SHUFFLE;
806
6
    }
807
109k
    RETURN_IF_ERROR(new_pip->set_sink(sink));
808
109k
    RETURN_IF_ERROR(new_pip->sink()->init(_runtime_state.get(), data_distribution.distribution_type,
809
109k
                                          num_buckets, use_global_hash_shuffle,
810
109k
                                          shuffle_idx_to_instance_idx));
811
812
    // 2. Create and initialize LocalExchangeSharedState.
813
109k
    std::shared_ptr<LocalExchangeSharedState> shared_state =
814
109k
            LocalExchangeSharedState::create_shared(_num_instances);
815
109k
    switch (data_distribution.distribution_type) {
816
16.0k
    case ExchangeType::HASH_SHUFFLE:
817
16.0k
        shared_state->exchanger = ShuffleExchanger::create_unique(
818
16.0k
                std::max(cur_pipe->num_tasks(), _num_instances), _num_instances,
819
16.0k
                use_global_hash_shuffle ? _total_instances : _num_instances,
820
16.0k
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
821
16.0k
                        ? cast_set<int>(
822
16.0k
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
823
16.0k
                        : 0);
824
16.0k
        break;
825
514
    case ExchangeType::BUCKET_HASH_SHUFFLE:
826
514
        shared_state->exchanger = BucketShuffleExchanger::create_unique(
827
514
                std::max(cur_pipe->num_tasks(), _num_instances), _num_instances, num_buckets,
828
514
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
829
514
                        ? cast_set<int>(
830
514
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
831
514
                        : 0);
832
514
        break;
833
88.8k
    case ExchangeType::PASSTHROUGH:
834
88.8k
        shared_state->exchanger = PassthroughExchanger::create_unique(
835
88.8k
                cur_pipe->num_tasks(), _num_instances,
836
88.8k
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
837
88.8k
                        ? cast_set<int>(
838
88.8k
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
839
88.8k
                        : 0);
840
88.8k
        break;
841
326
    case ExchangeType::BROADCAST:
842
326
        shared_state->exchanger = BroadcastExchanger::create_unique(
843
326
                cur_pipe->num_tasks(), _num_instances,
844
326
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
845
326
                        ? cast_set<int>(
846
326
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
847
326
                        : 0);
848
326
        break;
849
2.67k
    case ExchangeType::PASS_TO_ONE:
850
2.67k
        if (_runtime_state->enable_share_hash_table_for_broadcast_join()) {
851
            // If shared hash table is enabled for BJ, hash table will be built by only one task
852
1.61k
            shared_state->exchanger = PassToOneExchanger::create_unique(
853
1.61k
                    cur_pipe->num_tasks(), _num_instances,
854
1.61k
                    _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
855
1.61k
                            ? cast_set<int>(_runtime_state->query_options()
856
1.61k
                                                    .local_exchange_free_blocks_limit)
857
1.61k
                            : 0);
858
1.61k
        } else {
859
1.06k
            shared_state->exchanger = BroadcastExchanger::create_unique(
860
1.06k
                    cur_pipe->num_tasks(), _num_instances,
861
1.06k
                    _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
862
1.06k
                            ? cast_set<int>(_runtime_state->query_options()
863
1.06k
                                                    .local_exchange_free_blocks_limit)
864
1.06k
                            : 0);
865
1.06k
        }
866
2.67k
        break;
867
734
    case ExchangeType::ADAPTIVE_PASSTHROUGH:
868
734
        shared_state->exchanger = AdaptivePassthroughExchanger::create_unique(
869
734
                std::max(cur_pipe->num_tasks(), _num_instances), _num_instances,
870
734
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
871
734
                        ? cast_set<int>(
872
734
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
873
734
                        : 0);
874
734
        break;
875
0
    default:
876
0
        return Status::InternalError("Unsupported local exchange type : " +
877
0
                                     std::to_string((int)data_distribution.distribution_type));
878
109k
    }
879
109k
    shared_state->create_source_dependencies(_num_instances, local_exchange_id, local_exchange_id,
880
109k
                                             "LOCAL_EXCHANGE_OPERATOR");
881
109k
    shared_state->create_sink_dependency(sink_id, local_exchange_id, "LOCAL_EXCHANGE_SINK");
882
109k
    _op_id_to_shared_state.insert({local_exchange_id, {shared_state, shared_state->sink_deps}});
883
884
    // 3. Set two pipelines' operator list. For example, split pipeline [Scan - AggSink] to
885
    // pipeline1 [Scan - LocalExchangeSink] and pipeline2 [LocalExchangeSource - AggSink].
886
887
    // 3.1 Initialize new pipeline's operator list.
888
109k
    std::copy(operators.begin(), operators.begin() + idx,
889
109k
              std::inserter(new_pip->operators(), new_pip->operators().end()));
890
891
    // 3.2 Erase unused operators in previous pipeline.
892
109k
    operators.erase(operators.begin(), operators.begin() + idx);
893
894
    // 4. Initialize LocalExchangeSource and insert it into this pipeline.
895
109k
    OperatorPtr source_op;
896
109k
    source_op = std::make_shared<LocalExchangeSourceOperatorX>(pool, local_exchange_id);
897
109k
    RETURN_IF_ERROR(source_op->set_child(new_pip->operators().back()));
898
109k
    RETURN_IF_ERROR(source_op->init(data_distribution.distribution_type));
899
109k
    if (!operators.empty()) {
900
39.2k
        RETURN_IF_ERROR(operators.front()->set_child(nullptr));
901
39.2k
        RETURN_IF_ERROR(operators.front()->set_child(source_op));
902
39.2k
    }
903
109k
    operators.insert(operators.begin(), source_op);
904
905
    // 5. Set children for two pipelines separately.
906
109k
    std::vector<std::shared_ptr<Pipeline>> new_children;
907
109k
    std::vector<PipelineId> edges_with_source;
908
126k
    for (auto child : cur_pipe->children()) {
909
126k
        bool found = false;
910
139k
        for (auto op : new_pip->operators()) {
911
139k
            if (child->sink()->node_id() == op->node_id()) {
912
12.1k
                new_pip->set_children(child);
913
12.1k
                found = true;
914
12.1k
            };
915
139k
        }
916
126k
        if (!found) {
917
114k
            new_children.push_back(child);
918
114k
            edges_with_source.push_back(child->id());
919
114k
        }
920
126k
    }
921
109k
    new_children.push_back(new_pip);
922
109k
    edges_with_source.push_back(new_pip->id());
923
924
    // 6. Set DAG for new pipelines.
925
109k
    if (!new_pip->children().empty()) {
926
7.13k
        std::vector<PipelineId> edges_with_sink;
927
12.1k
        for (auto child : new_pip->children()) {
928
12.1k
            edges_with_sink.push_back(child->id());
929
12.1k
        }
930
7.13k
        _dag.insert({new_pip->id(), edges_with_sink});
931
7.13k
    }
932
109k
    cur_pipe->set_children(new_children);
933
109k
    _dag[downstream_pipeline_id] = edges_with_source;
934
109k
    RETURN_IF_ERROR(new_pip->sink()->set_child(new_pip->operators().back()));
935
109k
    RETURN_IF_ERROR(cur_pipe->sink()->set_child(nullptr));
936
109k
    RETURN_IF_ERROR(cur_pipe->sink()->set_child(cur_pipe->operators().back()));
937
938
    // 7. Inherit properties from current pipeline.
939
109k
    _inherit_pipeline_properties(data_distribution, cur_pipe, new_pip);
940
109k
    return Status::OK();
941
109k
}
942
943
Status PipelineFragmentContext::_add_local_exchange(
944
        int pip_idx, int idx, int node_id, ObjectPool* pool, PipelinePtr cur_pipe,
945
        DataDistribution data_distribution, bool* do_local_exchange, int num_buckets,
946
        const std::map<int, int>& bucket_seq_to_instance_idx,
947
185k
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
948
185k
    if (_num_instances <= 1 || cur_pipe->num_tasks_of_parent() <= 1) {
949
51.3k
        return Status::OK();
950
51.3k
    }
951
952
134k
    if (!cur_pipe->need_to_local_exchange(data_distribution, idx)) {
953
42.0k
        return Status::OK();
954
42.0k
    }
955
92.3k
    *do_local_exchange = true;
956
957
92.3k
    auto& operators = cur_pipe->operators();
958
92.3k
    auto total_op_num = operators.size();
959
92.3k
    auto new_pip = add_pipeline(cur_pipe, pip_idx + 1);
960
92.3k
    RETURN_IF_ERROR(_add_local_exchange_impl(
961
92.3k
            idx, pool, cur_pipe, new_pip, data_distribution, do_local_exchange, num_buckets,
962
92.3k
            bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx));
963
964
18.4E
    CHECK(total_op_num + 1 == cur_pipe->operators().size() + new_pip->operators().size())
965
18.4E
            << "total_op_num: " << total_op_num
966
18.4E
            << " cur_pipe->operators().size(): " << cur_pipe->operators().size()
967
18.4E
            << " new_pip->operators().size(): " << new_pip->operators().size();
968
969
    // There are some local shuffles with relatively heavy operations on the sink.
970
    // If the local sink concurrency is 1 and the local source concurrency is n, the sink becomes a bottleneck.
971
    // Therefore, local passthrough is used to increase the concurrency of the sink.
972
    // op -> local sink(1) -> local source (n)
973
    // op -> local passthrough(1) -> local passthrough(n) ->  local sink(n) -> local source (n)
974
92.7k
    if (cur_pipe->num_tasks() > 1 && new_pip->num_tasks() == 1 &&
975
92.3k
        Pipeline::heavy_operations_on_the_sink(data_distribution.distribution_type)) {
976
16.3k
        RETURN_IF_ERROR(_add_local_exchange_impl(
977
16.3k
                cast_set<int>(new_pip->operators().size()), pool, new_pip,
978
16.3k
                add_pipeline(new_pip, pip_idx + 2), DataDistribution(ExchangeType::PASSTHROUGH),
979
16.3k
                do_local_exchange, num_buckets, bucket_seq_to_instance_idx,
980
16.3k
                shuffle_idx_to_instance_idx));
981
16.3k
    }
982
92.3k
    return Status::OK();
983
92.3k
}
984
985
Status PipelineFragmentContext::_plan_local_exchange(
986
        int num_buckets, const std::map<int, int>& bucket_seq_to_instance_idx,
987
425k
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
988
977k
    for (int pip_idx = cast_set<int>(_pipelines.size()) - 1; pip_idx >= 0; pip_idx--) {
989
551k
        _pipelines[pip_idx]->init_data_distribution(_runtime_state.get());
990
        // Set property if child pipeline is not join operator's child.
991
551k
        if (!_pipelines[pip_idx]->children().empty()) {
992
120k
            for (auto& child : _pipelines[pip_idx]->children()) {
993
120k
                if (child->sink()->node_id() ==
994
120k
                    _pipelines[pip_idx]->operators().front()->node_id()) {
995
108k
                    _pipelines[pip_idx]->set_data_distribution(child->data_distribution());
996
108k
                }
997
120k
            }
998
116k
        }
999
1000
        // if 'num_buckets == 0' means the fragment is colocated by exchange node not the
1001
        // scan node. so here use `_num_instance` to replace the `num_buckets` to prevent dividing 0
1002
        // still keep colocate plan after local shuffle
1003
551k
        RETURN_IF_ERROR(_plan_local_exchange(num_buckets, pip_idx, _pipelines[pip_idx],
1004
551k
                                             bucket_seq_to_instance_idx,
1005
551k
                                             shuffle_idx_to_instance_idx));
1006
551k
    }
1007
425k
    return Status::OK();
1008
425k
}
1009
1010
Status PipelineFragmentContext::_plan_local_exchange(
1011
        int num_buckets, int pip_idx, PipelinePtr pip,
1012
        const std::map<int, int>& bucket_seq_to_instance_idx,
1013
551k
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
1014
551k
    int idx = 1;
1015
551k
    bool do_local_exchange = false;
1016
590k
    do {
1017
590k
        auto& ops = pip->operators();
1018
590k
        do_local_exchange = false;
1019
        // Plan local exchange for each operator.
1020
662k
        for (; idx < ops.size();) {
1021
111k
            if (ops[idx]->required_data_distribution(_runtime_state.get()).need_local_exchange()) {
1022
100k
                RETURN_IF_ERROR(_add_local_exchange(
1023
100k
                        pip_idx, idx, ops[idx]->node_id(), _runtime_state->obj_pool(), pip,
1024
100k
                        ops[idx]->required_data_distribution(_runtime_state.get()),
1025
100k
                        &do_local_exchange, num_buckets, bucket_seq_to_instance_idx,
1026
100k
                        shuffle_idx_to_instance_idx));
1027
100k
            }
1028
111k
            if (do_local_exchange) {
1029
                // If local exchange is needed for current operator, we will split this pipeline to
1030
                // two pipelines by local exchange sink/source. And then we need to process remaining
1031
                // operators in this pipeline so we set idx to 2 (0 is local exchange source and 1
1032
                // is current operator was already processed) and continue to plan local exchange.
1033
39.3k
                idx = 2;
1034
39.3k
                break;
1035
39.3k
            }
1036
72.0k
            idx++;
1037
72.0k
        }
1038
590k
    } while (do_local_exchange);
1039
551k
    if (pip->sink()->required_data_distribution(_runtime_state.get()).need_local_exchange()) {
1040
85.4k
        RETURN_IF_ERROR(_add_local_exchange(
1041
85.4k
                pip_idx, idx, pip->sink()->node_id(), _runtime_state->obj_pool(), pip,
1042
85.4k
                pip->sink()->required_data_distribution(_runtime_state.get()), &do_local_exchange,
1043
85.4k
                num_buckets, bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx));
1044
85.4k
    }
1045
551k
    return Status::OK();
1046
551k
}
1047
1048
Status PipelineFragmentContext::_create_data_sink(ObjectPool* pool, const TDataSink& thrift_sink,
1049
                                                  const std::vector<TExpr>& output_exprs,
1050
                                                  const TPipelineFragmentParams& params,
1051
                                                  const RowDescriptor& row_desc,
1052
                                                  RuntimeState* state, DescriptorTbl& desc_tbl,
1053
429k
                                                  PipelineId cur_pipeline_id) {
1054
429k
    switch (thrift_sink.type) {
1055
141k
    case TDataSinkType::DATA_STREAM_SINK: {
1056
141k
        if (!thrift_sink.__isset.stream_sink) {
1057
0
            return Status::InternalError("Missing data stream sink.");
1058
0
        }
1059
141k
        _sink = std::make_shared<ExchangeSinkOperatorX>(
1060
141k
                state, row_desc, next_sink_operator_id(), thrift_sink.stream_sink,
1061
141k
                params.destinations, _fragment_instance_ids);
1062
141k
        break;
1063
141k
    }
1064
250k
    case TDataSinkType::RESULT_SINK: {
1065
250k
        if (!thrift_sink.__isset.result_sink) {
1066
0
            return Status::InternalError("Missing data buffer sink.");
1067
0
        }
1068
1069
250k
        _sink = std::make_shared<ResultSinkOperatorX>(next_sink_operator_id(), row_desc,
1070
250k
                                                      output_exprs, thrift_sink.result_sink);
1071
250k
        break;
1072
250k
    }
1073
105
    case TDataSinkType::DICTIONARY_SINK: {
1074
105
        if (!thrift_sink.__isset.dictionary_sink) {
1075
0
            return Status::InternalError("Missing dict sink.");
1076
0
        }
1077
1078
105
        _sink = std::make_shared<DictSinkOperatorX>(next_sink_operator_id(), row_desc, output_exprs,
1079
105
                                                    thrift_sink.dictionary_sink);
1080
105
        break;
1081
105
    }
1082
0
    case TDataSinkType::GROUP_COMMIT_OLAP_TABLE_SINK:
1083
31.0k
    case TDataSinkType::OLAP_TABLE_SINK: {
1084
31.0k
        if (state->query_options().enable_memtable_on_sink_node &&
1085
31.0k
            !_has_inverted_index_v1_or_partial_update(thrift_sink.olap_table_sink) &&
1086
31.0k
            !config::is_cloud_mode()) {
1087
2.06k
            _sink = std::make_shared<OlapTableSinkV2OperatorX>(pool, next_sink_operator_id(),
1088
2.06k
                                                               row_desc, output_exprs);
1089
29.0k
        } else {
1090
29.0k
            _sink = std::make_shared<OlapTableSinkOperatorX>(pool, next_sink_operator_id(),
1091
29.0k
                                                             row_desc, output_exprs);
1092
29.0k
        }
1093
31.0k
        break;
1094
0
    }
1095
165
    case TDataSinkType::GROUP_COMMIT_BLOCK_SINK: {
1096
165
        DCHECK(thrift_sink.__isset.olap_table_sink);
1097
165
        DCHECK(state->get_query_ctx() != nullptr);
1098
165
        state->get_query_ctx()->query_mem_tracker()->is_group_commit_load = true;
1099
165
        _sink = std::make_shared<GroupCommitBlockSinkOperatorX>(next_sink_operator_id(), row_desc,
1100
165
                                                                output_exprs);
1101
165
        break;
1102
0
    }
1103
1.46k
    case TDataSinkType::HIVE_TABLE_SINK: {
1104
1.46k
        if (!thrift_sink.__isset.hive_table_sink) {
1105
0
            return Status::InternalError("Missing hive table sink.");
1106
0
        }
1107
1.46k
        _sink = std::make_shared<HiveTableSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1108
1.46k
                                                         output_exprs);
1109
1.46k
        break;
1110
1.46k
    }
1111
1.72k
    case TDataSinkType::ICEBERG_TABLE_SINK: {
1112
1.72k
        if (!thrift_sink.__isset.iceberg_table_sink) {
1113
0
            return Status::InternalError("Missing iceberg table sink.");
1114
0
        }
1115
1.72k
        if (thrift_sink.iceberg_table_sink.__isset.sort_info) {
1116
0
            _sink = std::make_shared<SpillIcebergTableSinkOperatorX>(pool, next_sink_operator_id(),
1117
0
                                                                     row_desc, output_exprs);
1118
1.72k
        } else {
1119
1.72k
            _sink = std::make_shared<IcebergTableSinkOperatorX>(pool, next_sink_operator_id(),
1120
1.72k
                                                                row_desc, output_exprs);
1121
1.72k
        }
1122
1.72k
        break;
1123
1.72k
    }
1124
20
    case TDataSinkType::ICEBERG_DELETE_SINK: {
1125
20
        if (!thrift_sink.__isset.iceberg_delete_sink) {
1126
0
            return Status::InternalError("Missing iceberg delete sink.");
1127
0
        }
1128
20
        _sink = std::make_shared<IcebergDeleteSinkOperatorX>(pool, next_sink_operator_id(),
1129
20
                                                             row_desc, output_exprs);
1130
20
        break;
1131
20
    }
1132
80
    case TDataSinkType::ICEBERG_MERGE_SINK: {
1133
80
        if (!thrift_sink.__isset.iceberg_merge_sink) {
1134
0
            return Status::InternalError("Missing iceberg merge sink.");
1135
0
        }
1136
80
        _sink = std::make_shared<IcebergMergeSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1137
80
                                                            output_exprs);
1138
80
        break;
1139
80
    }
1140
0
    case TDataSinkType::MAXCOMPUTE_TABLE_SINK: {
1141
0
        if (!thrift_sink.__isset.max_compute_table_sink) {
1142
0
            return Status::InternalError("Missing max compute table sink.");
1143
0
        }
1144
0
        _sink = std::make_shared<MCTableSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1145
0
                                                       output_exprs);
1146
0
        break;
1147
0
    }
1148
80
    case TDataSinkType::JDBC_TABLE_SINK: {
1149
80
        if (!thrift_sink.__isset.jdbc_table_sink) {
1150
0
            return Status::InternalError("Missing data jdbc sink.");
1151
0
        }
1152
80
        if (config::enable_java_support) {
1153
80
            _sink = std::make_shared<JdbcTableSinkOperatorX>(row_desc, next_sink_operator_id(),
1154
80
                                                             output_exprs);
1155
80
        } else {
1156
0
            return Status::InternalError(
1157
0
                    "Jdbc table sink is not enabled, you can change be config "
1158
0
                    "enable_java_support to true and restart be.");
1159
0
        }
1160
80
        break;
1161
80
    }
1162
80
    case TDataSinkType::MEMORY_SCRATCH_SINK: {
1163
3
        if (!thrift_sink.__isset.memory_scratch_sink) {
1164
0
            return Status::InternalError("Missing data buffer sink.");
1165
0
        }
1166
1167
3
        _sink = std::make_shared<MemoryScratchSinkOperatorX>(row_desc, next_sink_operator_id(),
1168
3
                                                             output_exprs);
1169
3
        break;
1170
3
    }
1171
501
    case TDataSinkType::RESULT_FILE_SINK: {
1172
501
        if (!thrift_sink.__isset.result_file_sink) {
1173
0
            return Status::InternalError("Missing result file sink.");
1174
0
        }
1175
1176
        // Result file sink is not the top sink
1177
501
        if (params.__isset.destinations && !params.destinations.empty()) {
1178
0
            _sink = std::make_shared<ResultFileSinkOperatorX>(
1179
0
                    next_sink_operator_id(), row_desc, thrift_sink.result_file_sink,
1180
0
                    params.destinations, output_exprs, desc_tbl);
1181
501
        } else {
1182
501
            _sink = std::make_shared<ResultFileSinkOperatorX>(next_sink_operator_id(), row_desc,
1183
501
                                                              output_exprs);
1184
501
        }
1185
501
        break;
1186
501
    }
1187
1.87k
    case TDataSinkType::MULTI_CAST_DATA_STREAM_SINK: {
1188
1.87k
        DCHECK(thrift_sink.__isset.multi_cast_stream_sink);
1189
1.87k
        DCHECK_GT(thrift_sink.multi_cast_stream_sink.sinks.size(), 0);
1190
1.87k
        auto sink_id = next_sink_operator_id();
1191
1.87k
        const int multi_cast_node_id = sink_id;
1192
1.87k
        auto sender_size = thrift_sink.multi_cast_stream_sink.sinks.size();
1193
        // one sink has multiple sources.
1194
1.87k
        std::vector<int> sources;
1195
7.29k
        for (int i = 0; i < sender_size; ++i) {
1196
5.41k
            auto source_id = next_operator_id();
1197
5.41k
            sources.push_back(source_id);
1198
5.41k
        }
1199
1200
1.87k
        _sink = std::make_shared<MultiCastDataStreamSinkOperatorX>(
1201
1.87k
                sink_id, multi_cast_node_id, sources, pool, thrift_sink.multi_cast_stream_sink);
1202
7.29k
        for (int i = 0; i < sender_size; ++i) {
1203
5.41k
            auto new_pipeline = add_pipeline();
1204
            // use to exchange sink
1205
5.41k
            RowDescriptor* exchange_row_desc = nullptr;
1206
5.41k
            {
1207
5.41k
                const auto& tmp_row_desc =
1208
5.41k
                        !thrift_sink.multi_cast_stream_sink.sinks[i].output_exprs.empty()
1209
5.41k
                                ? RowDescriptor(state->desc_tbl(),
1210
5.41k
                                                {thrift_sink.multi_cast_stream_sink.sinks[i]
1211
5.41k
                                                         .output_tuple_id})
1212
5.41k
                                : row_desc;
1213
5.41k
                exchange_row_desc = pool->add(new RowDescriptor(tmp_row_desc));
1214
5.41k
            }
1215
5.41k
            auto source_id = sources[i];
1216
5.41k
            OperatorPtr source_op;
1217
            // 1. create and set the source operator of multi_cast_data_stream_source for new pipeline
1218
5.41k
            source_op = std::make_shared<MultiCastDataStreamerSourceOperatorX>(
1219
5.41k
                    /*node_id*/ source_id, /*consumer_id*/ i, pool,
1220
5.41k
                    thrift_sink.multi_cast_stream_sink.sinks[i], row_desc,
1221
5.41k
                    /*operator_id=*/source_id);
1222
5.41k
            RETURN_IF_ERROR(new_pipeline->add_operator(
1223
5.41k
                    source_op, params.__isset.parallel_instances ? params.parallel_instances : 0));
1224
            // 2. create and set sink operator of data stream sender for new pipeline
1225
1226
5.41k
            DataSinkOperatorPtr sink_op;
1227
5.41k
            sink_op = std::make_shared<ExchangeSinkOperatorX>(
1228
5.41k
                    state, *exchange_row_desc, next_sink_operator_id(),
1229
5.41k
                    thrift_sink.multi_cast_stream_sink.sinks[i],
1230
5.41k
                    thrift_sink.multi_cast_stream_sink.destinations[i], _fragment_instance_ids);
1231
1232
5.41k
            RETURN_IF_ERROR(new_pipeline->set_sink(sink_op));
1233
5.41k
            {
1234
5.41k
                TDataSink* t = pool->add(new TDataSink());
1235
5.41k
                t->stream_sink = thrift_sink.multi_cast_stream_sink.sinks[i];
1236
5.41k
                RETURN_IF_ERROR(sink_op->init(*t));
1237
5.41k
            }
1238
1239
            // 3. set dependency dag
1240
5.41k
            _dag[new_pipeline->id()].push_back(cur_pipeline_id);
1241
5.41k
        }
1242
1.87k
        if (sources.empty()) {
1243
0
            return Status::InternalError("size of sources must be greater than 0");
1244
0
        }
1245
1.87k
        break;
1246
1.87k
    }
1247
1.87k
    case TDataSinkType::BLACKHOLE_SINK: {
1248
13
        if (!thrift_sink.__isset.blackhole_sink) {
1249
0
            return Status::InternalError("Missing blackhole sink.");
1250
0
        }
1251
1252
13
        _sink.reset(new BlackholeSinkOperatorX(next_sink_operator_id()));
1253
13
        break;
1254
13
    }
1255
156
    case TDataSinkType::TVF_TABLE_SINK: {
1256
156
        if (!thrift_sink.__isset.tvf_table_sink) {
1257
0
            return Status::InternalError("Missing TVF table sink.");
1258
0
        }
1259
156
        _sink = std::make_shared<TVFTableSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1260
156
                                                        output_exprs);
1261
156
        break;
1262
156
    }
1263
0
    default:
1264
0
        return Status::InternalError("Unsuported sink type in pipeline: {}", thrift_sink.type);
1265
429k
    }
1266
427k
    return Status::OK();
1267
429k
}
1268
1269
// NOLINTBEGIN(readability-function-size)
1270
// NOLINTBEGIN(readability-function-cognitive-complexity)
1271
Status PipelineFragmentContext::_create_operator(ObjectPool* pool, const TPlanNode& tnode,
1272
                                                 const DescriptorTbl& descs, OperatorPtr& op,
1273
                                                 PipelinePtr& cur_pipe, int parent_idx,
1274
                                                 int child_idx,
1275
                                                 const bool followed_by_shuffled_operator,
1276
                                                 const bool require_bucket_distribution,
1277
660k
                                                 OperatorPtr& cache_op) {
1278
660k
    std::vector<DataSinkOperatorPtr> sink_ops;
1279
660k
    Defer defer = Defer([&]() {
1280
659k
        if (op) {
1281
659k
            op->update_operator(tnode, followed_by_shuffled_operator, require_bucket_distribution);
1282
659k
        }
1283
659k
        for (auto& s : sink_ops) {
1284
120k
            s->update_operator(tnode, followed_by_shuffled_operator, require_bucket_distribution);
1285
120k
        }
1286
659k
    });
1287
    // We directly construct the operator from Thrift because the given array is in the order of preorder traversal.
1288
    // Therefore, here we need to use a stack-like structure.
1289
660k
    _pipeline_parent_map.pop(cur_pipe, parent_idx, child_idx);
1290
660k
    std::stringstream error_msg;
1291
660k
    bool enable_query_cache = _params.fragment.__isset.query_cache_param;
1292
1293
660k
    bool fe_with_old_version = false;
1294
660k
    switch (tnode.node_type) {
1295
208k
    case TPlanNodeType::OLAP_SCAN_NODE: {
1296
208k
        op = std::make_shared<OlapScanOperatorX>(
1297
208k
                pool, tnode, next_operator_id(), descs, _num_instances,
1298
208k
                enable_query_cache ? _params.fragment.query_cache_param : TQueryCacheParam {});
1299
208k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1300
208k
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1301
208k
        break;
1302
208k
    }
1303
79
    case TPlanNodeType::GROUP_COMMIT_SCAN_NODE: {
1304
79
        DCHECK(_query_ctx != nullptr);
1305
79
        _query_ctx->query_mem_tracker()->is_group_commit_load = true;
1306
79
        op = std::make_shared<GroupCommitOperatorX>(pool, tnode, next_operator_id(), descs,
1307
79
                                                    _num_instances);
1308
79
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1309
79
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1310
79
        break;
1311
79
    }
1312
0
    case TPlanNodeType::JDBC_SCAN_NODE: {
1313
0
        if (config::enable_java_support) {
1314
0
            op = std::make_shared<JDBCScanOperatorX>(pool, tnode, next_operator_id(), descs,
1315
0
                                                     _num_instances);
1316
0
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1317
0
        } else {
1318
0
            return Status::InternalError(
1319
0
                    "Jdbc scan node is disabled, you can change be config enable_java_support "
1320
0
                    "to true and restart be.");
1321
0
        }
1322
0
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1323
0
        break;
1324
0
    }
1325
22.9k
    case TPlanNodeType::FILE_SCAN_NODE: {
1326
22.9k
        op = std::make_shared<FileScanOperatorX>(pool, tnode, next_operator_id(), descs,
1327
22.9k
                                                 _num_instances);
1328
22.9k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1329
22.9k
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1330
22.9k
        break;
1331
22.9k
    }
1332
0
    case TPlanNodeType::ES_SCAN_NODE:
1333
592
    case TPlanNodeType::ES_HTTP_SCAN_NODE: {
1334
592
        op = std::make_shared<EsScanOperatorX>(pool, tnode, next_operator_id(), descs,
1335
592
                                               _num_instances);
1336
592
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1337
592
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1338
592
        break;
1339
592
    }
1340
144k
    case TPlanNodeType::EXCHANGE_NODE: {
1341
144k
        int num_senders = _params.per_exch_num_senders.contains(tnode.node_id)
1342
144k
                                  ? _params.per_exch_num_senders.find(tnode.node_id)->second
1343
144k
                                  : 0;
1344
144k
        DCHECK_GT(num_senders, 0);
1345
144k
        op = std::make_shared<ExchangeSourceOperatorX>(pool, tnode, next_operator_id(), descs,
1346
144k
                                                       num_senders);
1347
144k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1348
144k
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1349
144k
        break;
1350
144k
    }
1351
154k
    case TPlanNodeType::AGGREGATION_NODE: {
1352
154k
        if (tnode.agg_node.grouping_exprs.empty() &&
1353
154k
            descs.get_tuple_descriptor(tnode.agg_node.output_tuple_id)->slots().empty()) {
1354
0
            return Status::InternalError("Illegal aggregate node " + std::to_string(tnode.node_id) +
1355
0
                                         ": group by and output is empty");
1356
0
        }
1357
154k
        bool need_create_cache_op =
1358
154k
                enable_query_cache && tnode.node_id == _params.fragment.query_cache_param.node_id;
1359
154k
        auto create_query_cache_operator = [&](PipelinePtr& new_pipe) {
1360
10
            auto cache_node_id = _params.local_params[0].per_node_scan_ranges.begin()->first;
1361
10
            auto cache_source_id = next_operator_id();
1362
10
            op = std::make_shared<CacheSourceOperatorX>(pool, cache_node_id, cache_source_id,
1363
10
                                                        _params.fragment.query_cache_param);
1364
10
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1365
1366
10
            const auto downstream_pipeline_id = cur_pipe->id();
1367
10
            if (!_dag.contains(downstream_pipeline_id)) {
1368
10
                _dag.insert({downstream_pipeline_id, {}});
1369
10
            }
1370
10
            new_pipe = add_pipeline(cur_pipe);
1371
10
            _dag[downstream_pipeline_id].push_back(new_pipe->id());
1372
1373
10
            DataSinkOperatorPtr cache_sink(new CacheSinkOperatorX(
1374
10
                    next_sink_operator_id(), op->node_id(), op->operator_id()));
1375
10
            RETURN_IF_ERROR(new_pipe->set_sink(cache_sink));
1376
10
            return Status::OK();
1377
10
        };
1378
154k
        const bool group_by_limit_opt =
1379
154k
                tnode.agg_node.__isset.agg_sort_info_by_group_key && tnode.limit > 0;
1380
1381
        /// PartitionedAggSourceOperatorX does not support "group by limit opt(#29641)" yet.
1382
        /// If `group_by_limit_opt` is true, then it might not need to spill at all.
1383
154k
        const bool enable_spill = _runtime_state->enable_spill() &&
1384
154k
                                  !tnode.agg_node.grouping_exprs.empty() && !group_by_limit_opt;
1385
154k
        const bool is_streaming_agg = tnode.agg_node.__isset.use_streaming_preaggregation &&
1386
154k
                                      tnode.agg_node.use_streaming_preaggregation &&
1387
154k
                                      !tnode.agg_node.grouping_exprs.empty();
1388
        // TODO: distinct streaming agg does not support spill.
1389
154k
        const bool can_use_distinct_streaming_agg =
1390
154k
                (!enable_spill || is_streaming_agg) && tnode.agg_node.aggregate_functions.empty() &&
1391
154k
                !tnode.agg_node.__isset.agg_sort_info_by_group_key &&
1392
154k
                _params.query_options.__isset.enable_distinct_streaming_aggregation &&
1393
154k
                _params.query_options.enable_distinct_streaming_aggregation;
1394
1395
154k
        if (can_use_distinct_streaming_agg) {
1396
90.8k
            if (need_create_cache_op) {
1397
8
                PipelinePtr new_pipe;
1398
8
                RETURN_IF_ERROR(create_query_cache_operator(new_pipe));
1399
1400
8
                cache_op = op;
1401
8
                op = std::make_shared<DistinctStreamingAggOperatorX>(pool, next_operator_id(),
1402
8
                                                                     tnode, descs);
1403
8
                RETURN_IF_ERROR(new_pipe->add_operator(op, _parallel_instances));
1404
8
                RETURN_IF_ERROR(cur_pipe->operators().front()->set_child(op));
1405
8
                cur_pipe = new_pipe;
1406
90.8k
            } else {
1407
90.8k
                op = std::make_shared<DistinctStreamingAggOperatorX>(pool, next_operator_id(),
1408
90.8k
                                                                     tnode, descs);
1409
90.8k
                RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1410
90.8k
            }
1411
90.8k
        } else if (is_streaming_agg) {
1412
3.36k
            if (need_create_cache_op) {
1413
0
                PipelinePtr new_pipe;
1414
0
                RETURN_IF_ERROR(create_query_cache_operator(new_pipe));
1415
0
                cache_op = op;
1416
0
                op = std::make_shared<StreamingAggOperatorX>(pool, next_operator_id(), tnode,
1417
0
                                                             descs);
1418
0
                RETURN_IF_ERROR(cur_pipe->operators().front()->set_child(op));
1419
0
                RETURN_IF_ERROR(new_pipe->add_operator(op, _parallel_instances));
1420
0
                cur_pipe = new_pipe;
1421
3.36k
            } else {
1422
3.36k
                op = std::make_shared<StreamingAggOperatorX>(pool, next_operator_id(), tnode,
1423
3.36k
                                                             descs);
1424
3.36k
                RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1425
3.36k
            }
1426
60.3k
        } else {
1427
            // create new pipeline to add query cache operator
1428
60.3k
            PipelinePtr new_pipe;
1429
60.3k
            if (need_create_cache_op) {
1430
2
                RETURN_IF_ERROR(create_query_cache_operator(new_pipe));
1431
2
                cache_op = op;
1432
2
            }
1433
1434
60.3k
            if (enable_spill) {
1435
45
                op = std::make_shared<PartitionedAggSourceOperatorX>(pool, tnode,
1436
45
                                                                     next_operator_id(), descs);
1437
60.3k
            } else {
1438
60.3k
                op = std::make_shared<AggSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1439
60.3k
            }
1440
60.3k
            if (need_create_cache_op) {
1441
2
                RETURN_IF_ERROR(cur_pipe->operators().front()->set_child(op));
1442
2
                RETURN_IF_ERROR(new_pipe->add_operator(op, _parallel_instances));
1443
2
                cur_pipe = new_pipe;
1444
60.3k
            } else {
1445
60.3k
                RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1446
60.3k
            }
1447
1448
60.3k
            const auto downstream_pipeline_id = cur_pipe->id();
1449
60.3k
            if (!_dag.contains(downstream_pipeline_id)) {
1450
57.9k
                _dag.insert({downstream_pipeline_id, {}});
1451
57.9k
            }
1452
60.3k
            cur_pipe = add_pipeline(cur_pipe);
1453
60.3k
            _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1454
1455
60.3k
            if (enable_spill) {
1456
45
                sink_ops.push_back(std::make_shared<PartitionedAggSinkOperatorX>(
1457
45
                        pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1458
60.3k
            } else {
1459
60.3k
                sink_ops.push_back(std::make_shared<AggSinkOperatorX>(
1460
60.3k
                        pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1461
60.3k
            }
1462
60.3k
            RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1463
60.3k
            RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1464
60.3k
        }
1465
154k
        break;
1466
154k
    }
1467
154k
    case TPlanNodeType::HASH_JOIN_NODE: {
1468
7.69k
        const auto is_broadcast_join = tnode.hash_join_node.__isset.is_broadcast_join &&
1469
7.69k
                                       tnode.hash_join_node.is_broadcast_join;
1470
7.69k
        const auto enable_spill = _runtime_state->enable_spill();
1471
7.69k
        if (enable_spill && !is_broadcast_join) {
1472
0
            auto tnode_ = tnode;
1473
0
            tnode_.runtime_filters.clear();
1474
0
            auto inner_probe_operator =
1475
0
                    std::make_shared<HashJoinProbeOperatorX>(pool, tnode_, 0, descs);
1476
1477
            // probe side inner sink operator is used to build hash table on probe side when data is spilled.
1478
            // So here use `tnode_` which has no runtime filters.
1479
0
            auto probe_side_inner_sink_operator =
1480
0
                    std::make_shared<HashJoinBuildSinkOperatorX>(pool, 0, 0, tnode_, descs);
1481
1482
0
            RETURN_IF_ERROR(inner_probe_operator->init(tnode_, _runtime_state.get()));
1483
0
            RETURN_IF_ERROR(probe_side_inner_sink_operator->init(tnode_, _runtime_state.get()));
1484
1485
0
            auto probe_operator = std::make_shared<PartitionedHashJoinProbeOperatorX>(
1486
0
                    pool, tnode_, next_operator_id(), descs);
1487
0
            probe_operator->set_inner_operators(probe_side_inner_sink_operator,
1488
0
                                                inner_probe_operator);
1489
0
            op = std::move(probe_operator);
1490
0
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1491
1492
0
            const auto downstream_pipeline_id = cur_pipe->id();
1493
0
            if (!_dag.contains(downstream_pipeline_id)) {
1494
0
                _dag.insert({downstream_pipeline_id, {}});
1495
0
            }
1496
0
            PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1497
0
            _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1498
1499
0
            auto inner_sink_operator =
1500
0
                    std::make_shared<HashJoinBuildSinkOperatorX>(pool, 0, 0, tnode, descs);
1501
0
            auto sink_operator = std::make_shared<PartitionedHashJoinSinkOperatorX>(
1502
0
                    pool, next_sink_operator_id(), op->operator_id(), tnode_, descs);
1503
0
            RETURN_IF_ERROR(inner_sink_operator->init(tnode, _runtime_state.get()));
1504
1505
0
            sink_operator->set_inner_operators(inner_sink_operator, inner_probe_operator);
1506
0
            sink_ops.push_back(std::move(sink_operator));
1507
0
            RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1508
0
            RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode_, _runtime_state.get()));
1509
1510
0
            _pipeline_parent_map.push(op->node_id(), cur_pipe);
1511
0
            _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1512
7.69k
        } else {
1513
7.69k
            op = std::make_shared<HashJoinProbeOperatorX>(pool, tnode, next_operator_id(), descs);
1514
7.69k
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1515
1516
7.69k
            const auto downstream_pipeline_id = cur_pipe->id();
1517
7.69k
            if (!_dag.contains(downstream_pipeline_id)) {
1518
6.92k
                _dag.insert({downstream_pipeline_id, {}});
1519
6.92k
            }
1520
7.69k
            PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1521
7.69k
            _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1522
1523
7.69k
            sink_ops.push_back(std::make_shared<HashJoinBuildSinkOperatorX>(
1524
7.69k
                    pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1525
7.69k
            RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1526
7.69k
            RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode, _runtime_state.get()));
1527
1528
7.69k
            _pipeline_parent_map.push(op->node_id(), cur_pipe);
1529
7.69k
            _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1530
7.69k
        }
1531
7.69k
        if (is_broadcast_join && _runtime_state->enable_share_hash_table_for_broadcast_join()) {
1532
3.17k
            std::shared_ptr<HashJoinSharedState> shared_state =
1533
3.17k
                    HashJoinSharedState::create_shared(_num_instances);
1534
20.4k
            for (int i = 0; i < _num_instances; i++) {
1535
17.3k
                auto sink_dep = std::make_shared<Dependency>(op->operator_id(), op->node_id(),
1536
17.3k
                                                             "HASH_JOIN_BUILD_DEPENDENCY");
1537
17.3k
                sink_dep->set_shared_state(shared_state.get());
1538
17.3k
                shared_state->sink_deps.push_back(sink_dep);
1539
17.3k
            }
1540
3.17k
            shared_state->create_source_dependencies(_num_instances, op->operator_id(),
1541
3.17k
                                                     op->node_id(), "HASH_JOIN_PROBE");
1542
3.17k
            _op_id_to_shared_state.insert(
1543
3.17k
                    {op->operator_id(), {shared_state, shared_state->sink_deps}});
1544
3.17k
        }
1545
7.69k
        break;
1546
7.69k
    }
1547
4.43k
    case TPlanNodeType::CROSS_JOIN_NODE: {
1548
4.43k
        op = std::make_shared<NestedLoopJoinProbeOperatorX>(pool, tnode, next_operator_id(), descs);
1549
4.43k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1550
1551
4.43k
        const auto downstream_pipeline_id = cur_pipe->id();
1552
4.43k
        if (!_dag.contains(downstream_pipeline_id)) {
1553
4.20k
            _dag.insert({downstream_pipeline_id, {}});
1554
4.20k
        }
1555
4.43k
        PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1556
4.43k
        _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1557
1558
4.43k
        sink_ops.push_back(std::make_shared<NestedLoopJoinBuildSinkOperatorX>(
1559
4.43k
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1560
4.43k
        RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1561
4.43k
        RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode, _runtime_state.get()));
1562
4.43k
        _pipeline_parent_map.push(op->node_id(), cur_pipe);
1563
4.43k
        _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1564
4.43k
        break;
1565
4.43k
    }
1566
53.2k
    case TPlanNodeType::UNION_NODE: {
1567
53.2k
        int child_count = tnode.num_children;
1568
53.2k
        op = std::make_shared<UnionSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1569
53.2k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1570
1571
53.2k
        const auto downstream_pipeline_id = cur_pipe->id();
1572
53.2k
        if (!_dag.contains(downstream_pipeline_id)) {
1573
53.0k
            _dag.insert({downstream_pipeline_id, {}});
1574
53.0k
        }
1575
54.9k
        for (int i = 0; i < child_count; i++) {
1576
1.66k
            PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1577
1.66k
            _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1578
1.66k
            sink_ops.push_back(std::make_shared<UnionSinkOperatorX>(
1579
1.66k
                    i, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1580
1.66k
            RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1581
1.66k
            RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode, _runtime_state.get()));
1582
            // preset children pipelines. if any pipeline found this as its father, will use the prepared pipeline to build.
1583
1.66k
            _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1584
1.66k
        }
1585
53.2k
        break;
1586
53.2k
    }
1587
53.2k
    case TPlanNodeType::SORT_NODE: {
1588
44.3k
        const auto should_spill = _runtime_state->enable_spill() &&
1589
44.3k
                                  tnode.sort_node.algorithm == TSortAlgorithm::FULL_SORT;
1590
44.3k
        const bool use_local_merge =
1591
44.3k
                tnode.sort_node.__isset.use_local_merge && tnode.sort_node.use_local_merge;
1592
44.3k
        if (should_spill) {
1593
9
            op = std::make_shared<SpillSortSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1594
44.3k
        } else if (use_local_merge) {
1595
42.1k
            op = std::make_shared<LocalMergeSortSourceOperatorX>(pool, tnode, next_operator_id(),
1596
42.1k
                                                                 descs);
1597
42.1k
        } else {
1598
2.21k
            op = std::make_shared<SortSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1599
2.21k
        }
1600
44.3k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1601
1602
44.3k
        const auto downstream_pipeline_id = cur_pipe->id();
1603
44.3k
        if (!_dag.contains(downstream_pipeline_id)) {
1604
44.3k
            _dag.insert({downstream_pipeline_id, {}});
1605
44.3k
        }
1606
44.3k
        cur_pipe = add_pipeline(cur_pipe);
1607
44.3k
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1608
1609
44.3k
        if (should_spill) {
1610
9
            sink_ops.push_back(std::make_shared<SpillSortSinkOperatorX>(
1611
9
                    pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1612
44.3k
        } else {
1613
44.3k
            sink_ops.push_back(std::make_shared<SortSinkOperatorX>(
1614
44.3k
                    pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1615
44.3k
        }
1616
44.3k
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1617
44.3k
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1618
44.3k
        break;
1619
44.3k
    }
1620
44.3k
    case TPlanNodeType::PARTITION_SORT_NODE: {
1621
62
        op = std::make_shared<PartitionSortSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1622
62
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1623
1624
62
        const auto downstream_pipeline_id = cur_pipe->id();
1625
62
        if (!_dag.contains(downstream_pipeline_id)) {
1626
62
            _dag.insert({downstream_pipeline_id, {}});
1627
62
        }
1628
62
        cur_pipe = add_pipeline(cur_pipe);
1629
62
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1630
1631
62
        sink_ops.push_back(std::make_shared<PartitionSortSinkOperatorX>(
1632
62
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1633
62
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1634
62
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1635
62
        break;
1636
62
    }
1637
1.52k
    case TPlanNodeType::ANALYTIC_EVAL_NODE: {
1638
1.52k
        op = std::make_shared<AnalyticSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1639
1.52k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1640
1641
1.52k
        const auto downstream_pipeline_id = cur_pipe->id();
1642
1.52k
        if (!_dag.contains(downstream_pipeline_id)) {
1643
1.51k
            _dag.insert({downstream_pipeline_id, {}});
1644
1.51k
        }
1645
1.52k
        cur_pipe = add_pipeline(cur_pipe);
1646
1.52k
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1647
1648
1.52k
        sink_ops.push_back(std::make_shared<AnalyticSinkOperatorX>(
1649
1.52k
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1650
1.52k
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1651
1.52k
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1652
1.52k
        break;
1653
1.52k
    }
1654
1.60k
    case TPlanNodeType::MATERIALIZATION_NODE: {
1655
1.60k
        op = std::make_shared<MaterializationOperator>(pool, tnode, next_operator_id(), descs);
1656
1.60k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1657
1.60k
        break;
1658
1.60k
    }
1659
1.60k
    case TPlanNodeType::INTERSECT_NODE: {
1660
119
        RETURN_IF_ERROR(_build_operators_for_set_operation_node<true>(pool, tnode, descs, op,
1661
119
                                                                      cur_pipe, sink_ops));
1662
119
        break;
1663
119
    }
1664
128
    case TPlanNodeType::EXCEPT_NODE: {
1665
128
        RETURN_IF_ERROR(_build_operators_for_set_operation_node<false>(pool, tnode, descs, op,
1666
128
                                                                       cur_pipe, sink_ops));
1667
128
        break;
1668
128
    }
1669
298
    case TPlanNodeType::REPEAT_NODE: {
1670
298
        op = std::make_shared<RepeatOperatorX>(pool, tnode, next_operator_id(), descs);
1671
298
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1672
298
        break;
1673
298
    }
1674
921
    case TPlanNodeType::TABLE_FUNCTION_NODE: {
1675
921
        op = std::make_shared<TableFunctionOperatorX>(pool, tnode, next_operator_id(), descs);
1676
921
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1677
921
        break;
1678
921
    }
1679
921
    case TPlanNodeType::ASSERT_NUM_ROWS_NODE: {
1680
218
        op = std::make_shared<AssertNumRowsOperatorX>(pool, tnode, next_operator_id(), descs);
1681
218
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1682
218
        break;
1683
218
    }
1684
1.55k
    case TPlanNodeType::EMPTY_SET_NODE: {
1685
1.55k
        op = std::make_shared<EmptySetSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1686
1.55k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1687
1.55k
        break;
1688
1.55k
    }
1689
1.55k
    case TPlanNodeType::DATA_GEN_SCAN_NODE: {
1690
459
        op = std::make_shared<DataGenSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1691
459
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1692
459
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1693
459
        break;
1694
459
    }
1695
2.32k
    case TPlanNodeType::SCHEMA_SCAN_NODE: {
1696
2.32k
        op = std::make_shared<SchemaScanOperatorX>(pool, tnode, next_operator_id(), descs);
1697
2.32k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1698
2.32k
        break;
1699
2.32k
    }
1700
6.82k
    case TPlanNodeType::META_SCAN_NODE: {
1701
6.82k
        op = std::make_shared<MetaScanOperatorX>(pool, tnode, next_operator_id(), descs);
1702
6.82k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1703
6.82k
        break;
1704
6.82k
    }
1705
6.82k
    case TPlanNodeType::SELECT_NODE: {
1706
1.66k
        op = std::make_shared<SelectOperatorX>(pool, tnode, next_operator_id(), descs);
1707
1.66k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1708
1.66k
        break;
1709
1.66k
    }
1710
1.66k
    case TPlanNodeType::REC_CTE_NODE: {
1711
151
        op = std::make_shared<RecCTESourceOperatorX>(pool, tnode, next_operator_id(), descs);
1712
151
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1713
1714
151
        const auto downstream_pipeline_id = cur_pipe->id();
1715
151
        if (!_dag.contains(downstream_pipeline_id)) {
1716
148
            _dag.insert({downstream_pipeline_id, {}});
1717
148
        }
1718
1719
151
        PipelinePtr anchor_side_pipe = add_pipeline(cur_pipe);
1720
151
        _dag[downstream_pipeline_id].push_back(anchor_side_pipe->id());
1721
1722
151
        DataSinkOperatorPtr anchor_sink;
1723
151
        anchor_sink = std::make_shared<RecCTEAnchorSinkOperatorX>(next_sink_operator_id(),
1724
151
                                                                  op->operator_id(), tnode, descs);
1725
151
        RETURN_IF_ERROR(anchor_side_pipe->set_sink(anchor_sink));
1726
151
        RETURN_IF_ERROR(anchor_side_pipe->sink()->init(tnode, _runtime_state.get()));
1727
151
        _pipeline_parent_map.push(op->node_id(), anchor_side_pipe);
1728
1729
151
        PipelinePtr rec_side_pipe = add_pipeline(cur_pipe);
1730
151
        _dag[downstream_pipeline_id].push_back(rec_side_pipe->id());
1731
1732
151
        DataSinkOperatorPtr rec_sink;
1733
151
        rec_sink = std::make_shared<RecCTESinkOperatorX>(next_sink_operator_id(), op->operator_id(),
1734
151
                                                         tnode, descs);
1735
151
        RETURN_IF_ERROR(rec_side_pipe->set_sink(rec_sink));
1736
151
        RETURN_IF_ERROR(rec_side_pipe->sink()->init(tnode, _runtime_state.get()));
1737
151
        _pipeline_parent_map.push(op->node_id(), rec_side_pipe);
1738
1739
151
        break;
1740
151
    }
1741
1.95k
    case TPlanNodeType::REC_CTE_SCAN_NODE: {
1742
1.95k
        op = std::make_shared<RecCTEScanOperatorX>(pool, tnode, next_operator_id(), descs);
1743
1.95k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1744
1.95k
        break;
1745
1.95k
    }
1746
1.95k
    default:
1747
0
        return Status::InternalError("Unsupported exec type in pipeline: {}",
1748
0
                                     print_plan_node_type(tnode.node_type));
1749
660k
    }
1750
658k
    if (_params.__isset.parallel_instances && fe_with_old_version) {
1751
0
        cur_pipe->set_num_tasks(_params.parallel_instances);
1752
0
        op->set_serial_operator();
1753
0
    }
1754
1755
658k
    return Status::OK();
1756
660k
}
1757
// NOLINTEND(readability-function-cognitive-complexity)
1758
// NOLINTEND(readability-function-size)
1759
1760
template <bool is_intersect>
1761
Status PipelineFragmentContext::_build_operators_for_set_operation_node(
1762
        ObjectPool* pool, const TPlanNode& tnode, const DescriptorTbl& descs, OperatorPtr& op,
1763
247
        PipelinePtr& cur_pipe, std::vector<DataSinkOperatorPtr>& sink_ops) {
1764
247
    op.reset(new SetSourceOperatorX<is_intersect>(pool, tnode, next_operator_id(), descs));
1765
247
    RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1766
1767
247
    const auto downstream_pipeline_id = cur_pipe->id();
1768
247
    if (!_dag.contains(downstream_pipeline_id)) {
1769
230
        _dag.insert({downstream_pipeline_id, {}});
1770
230
    }
1771
1772
834
    for (int child_id = 0; child_id < tnode.num_children; child_id++) {
1773
587
        PipelinePtr probe_side_pipe = add_pipeline(cur_pipe);
1774
587
        _dag[downstream_pipeline_id].push_back(probe_side_pipe->id());
1775
1776
587
        if (child_id == 0) {
1777
247
            sink_ops.push_back(std::make_shared<SetSinkOperatorX<is_intersect>>(
1778
247
                    child_id, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1779
340
        } else {
1780
340
            sink_ops.push_back(std::make_shared<SetProbeSinkOperatorX<is_intersect>>(
1781
340
                    child_id, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1782
340
        }
1783
587
        RETURN_IF_ERROR(probe_side_pipe->set_sink(sink_ops.back()));
1784
587
        RETURN_IF_ERROR(probe_side_pipe->sink()->init(tnode, _runtime_state.get()));
1785
        // prepare children pipelines. if any pipeline found this as its father, will use the prepared pipeline to build.
1786
587
        _pipeline_parent_map.push(op->node_id(), probe_side_pipe);
1787
587
    }
1788
1789
247
    return Status::OK();
1790
247
}
_ZN5doris23PipelineFragmentContext39_build_operators_for_set_operation_nodeILb1EEENS_6StatusEPNS_10ObjectPoolERKNS_9TPlanNodeERKNS_13DescriptorTblERSt10shared_ptrINS_13OperatorXBaseEERSB_INS_8PipelineEERSt6vectorISB_INS_21DataSinkOperatorXBaseEESaISK_EE
Line
Count
Source
1763
119
        PipelinePtr& cur_pipe, std::vector<DataSinkOperatorPtr>& sink_ops) {
1764
119
    op.reset(new SetSourceOperatorX<is_intersect>(pool, tnode, next_operator_id(), descs));
1765
119
    RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1766
1767
119
    const auto downstream_pipeline_id = cur_pipe->id();
1768
119
    if (!_dag.contains(downstream_pipeline_id)) {
1769
110
        _dag.insert({downstream_pipeline_id, {}});
1770
110
    }
1771
1772
435
    for (int child_id = 0; child_id < tnode.num_children; child_id++) {
1773
316
        PipelinePtr probe_side_pipe = add_pipeline(cur_pipe);
1774
316
        _dag[downstream_pipeline_id].push_back(probe_side_pipe->id());
1775
1776
316
        if (child_id == 0) {
1777
119
            sink_ops.push_back(std::make_shared<SetSinkOperatorX<is_intersect>>(
1778
119
                    child_id, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1779
197
        } else {
1780
197
            sink_ops.push_back(std::make_shared<SetProbeSinkOperatorX<is_intersect>>(
1781
197
                    child_id, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1782
197
        }
1783
316
        RETURN_IF_ERROR(probe_side_pipe->set_sink(sink_ops.back()));
1784
316
        RETURN_IF_ERROR(probe_side_pipe->sink()->init(tnode, _runtime_state.get()));
1785
        // prepare children pipelines. if any pipeline found this as its father, will use the prepared pipeline to build.
1786
316
        _pipeline_parent_map.push(op->node_id(), probe_side_pipe);
1787
316
    }
1788
1789
119
    return Status::OK();
1790
119
}
_ZN5doris23PipelineFragmentContext39_build_operators_for_set_operation_nodeILb0EEENS_6StatusEPNS_10ObjectPoolERKNS_9TPlanNodeERKNS_13DescriptorTblERSt10shared_ptrINS_13OperatorXBaseEERSB_INS_8PipelineEERSt6vectorISB_INS_21DataSinkOperatorXBaseEESaISK_EE
Line
Count
Source
1763
128
        PipelinePtr& cur_pipe, std::vector<DataSinkOperatorPtr>& sink_ops) {
1764
128
    op.reset(new SetSourceOperatorX<is_intersect>(pool, tnode, next_operator_id(), descs));
1765
128
    RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1766
1767
128
    const auto downstream_pipeline_id = cur_pipe->id();
1768
128
    if (!_dag.contains(downstream_pipeline_id)) {
1769
120
        _dag.insert({downstream_pipeline_id, {}});
1770
120
    }
1771
1772
399
    for (int child_id = 0; child_id < tnode.num_children; child_id++) {
1773
271
        PipelinePtr probe_side_pipe = add_pipeline(cur_pipe);
1774
271
        _dag[downstream_pipeline_id].push_back(probe_side_pipe->id());
1775
1776
271
        if (child_id == 0) {
1777
128
            sink_ops.push_back(std::make_shared<SetSinkOperatorX<is_intersect>>(
1778
128
                    child_id, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1779
143
        } else {
1780
143
            sink_ops.push_back(std::make_shared<SetProbeSinkOperatorX<is_intersect>>(
1781
143
                    child_id, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1782
143
        }
1783
271
        RETURN_IF_ERROR(probe_side_pipe->set_sink(sink_ops.back()));
1784
271
        RETURN_IF_ERROR(probe_side_pipe->sink()->init(tnode, _runtime_state.get()));
1785
        // prepare children pipelines. if any pipeline found this as its father, will use the prepared pipeline to build.
1786
271
        _pipeline_parent_map.push(op->node_id(), probe_side_pipe);
1787
271
    }
1788
1789
128
    return Status::OK();
1790
128
}
1791
1792
426k
Status PipelineFragmentContext::submit() {
1793
426k
    if (_submitted) {
1794
0
        return Status::InternalError("submitted");
1795
0
    }
1796
426k
    _submitted = true;
1797
1798
426k
    int submit_tasks = 0;
1799
426k
    Status st;
1800
426k
    auto* scheduler = _query_ctx->get_pipe_exec_scheduler();
1801
1.07M
    for (auto& task : _tasks) {
1802
1.90M
        for (auto& t : task) {
1803
1.90M
            st = scheduler->submit(t.first);
1804
1.90M
            DBUG_EXECUTE_IF("PipelineFragmentContext.submit.failed",
1805
1.90M
                            { st = Status::Aborted("PipelineFragmentContext.submit.failed"); });
1806
1.90M
            if (!st) {
1807
0
                cancel(Status::InternalError("submit context to executor fail"));
1808
0
                std::lock_guard<std::mutex> l(_task_mutex);
1809
0
                _total_tasks = submit_tasks;
1810
0
                break;
1811
0
            }
1812
1.90M
            submit_tasks++;
1813
1.90M
        }
1814
1.07M
    }
1815
426k
    if (!st.ok()) {
1816
0
        bool need_remove = false;
1817
0
        {
1818
0
            std::lock_guard<std::mutex> l(_task_mutex);
1819
0
            if (_closed_tasks >= _total_tasks) {
1820
0
                need_remove = _close_fragment_instance();
1821
0
            }
1822
0
        }
1823
        // Call remove_pipeline_context() outside _task_mutex to avoid ABBA deadlock.
1824
0
        if (need_remove) {
1825
0
            _exec_env->fragment_mgr()->remove_pipeline_context({_query_id, _fragment_id});
1826
0
        }
1827
0
        return Status::InternalError("Submit pipeline failed. err = {}, BE: {}", st.to_string(),
1828
0
                                     BackendOptions::get_localhost());
1829
426k
    } else {
1830
426k
        return st;
1831
426k
    }
1832
426k
}
1833
1834
12
void PipelineFragmentContext::print_profile(const std::string& extra_info) {
1835
12
    if (_runtime_state->enable_profile()) {
1836
0
        std::stringstream ss;
1837
0
        for (auto runtime_profile_ptr : _runtime_state->pipeline_id_to_profile()) {
1838
0
            runtime_profile_ptr->pretty_print(&ss);
1839
0
        }
1840
1841
0
        if (_runtime_state->load_channel_profile()) {
1842
0
            _runtime_state->load_channel_profile()->pretty_print(&ss);
1843
0
        }
1844
1845
0
        auto profile_str =
1846
0
                fmt::format("Query {} fragment {} {}, profile, {}", print_id(this->_query_id),
1847
0
                            this->_fragment_id, extra_info, ss.str());
1848
0
        LOG_LONG_STRING(INFO, profile_str);
1849
0
    }
1850
12
}
1851
// If all pipeline tasks binded to the fragment instance are finished, then we could
1852
// close the fragment instance.
1853
// Returns true if the caller should call remove_pipeline_context() **after** releasing
1854
// _task_mutex. We must not call remove_pipeline_context() here because it acquires
1855
// _pipeline_map's shard lock, and this function is called while _task_mutex is held.
1856
// Acquiring _pipeline_map while holding _task_mutex creates an ABBA deadlock with
1857
// dump_pipeline_tasks(), which acquires _pipeline_map first and then _task_mutex
1858
// (via debug_string()).
1859
428k
bool PipelineFragmentContext::_close_fragment_instance() {
1860
428k
    if (_is_fragment_instance_closed) {
1861
0
        return false;
1862
0
    }
1863
428k
    Defer defer_op {[&]() { _is_fragment_instance_closed = true; }};
1864
428k
    _fragment_level_profile->total_time_counter()->update(_fragment_watcher.elapsed_time());
1865
428k
    if (!_need_notify_close) {
1866
425k
        auto st = send_report(true);
1867
425k
        if (!st) {
1868
0
            LOG(WARNING) << fmt::format("Failed to send report for query {}, fragment {}: {}",
1869
0
                                        print_id(_query_id), _fragment_id, st.to_string());
1870
0
        }
1871
425k
    }
1872
    // Print profile content in info log is a tempoeray solution for stream load and external_connector.
1873
    // Since stream load does not have someting like coordinator on FE, so
1874
    // backend can not report profile to FE, ant its profile can not be shown
1875
    // in the same way with other query. So we print the profile content to info log.
1876
1877
428k
    if (_runtime_state->enable_profile() &&
1878
428k
        (_query_ctx->get_query_source() == QuerySource::STREAM_LOAD ||
1879
2.84k
         _query_ctx->get_query_source() == QuerySource::EXTERNAL_CONNECTOR ||
1880
2.84k
         _query_ctx->get_query_source() == QuerySource::GROUP_COMMIT_LOAD)) {
1881
0
        std::stringstream ss;
1882
        // Compute the _local_time_percent before pretty_print the runtime_profile
1883
        // Before add this operation, the print out like that:
1884
        // UNION_NODE (id=0):(Active: 56.720us, non-child: 00.00%)
1885
        // After add the operation, the print out like that:
1886
        // UNION_NODE (id=0):(Active: 56.720us, non-child: 82.53%)
1887
        // We can easily know the exec node execute time without child time consumed.
1888
0
        for (auto runtime_profile_ptr : _runtime_state->pipeline_id_to_profile()) {
1889
0
            runtime_profile_ptr->pretty_print(&ss);
1890
0
        }
1891
1892
0
        if (_runtime_state->load_channel_profile()) {
1893
0
            _runtime_state->load_channel_profile()->pretty_print(&ss);
1894
0
        }
1895
1896
0
        LOG_INFO("Query {} fragment {} profile:\n {}", print_id(_query_id), _fragment_id, ss.str());
1897
0
    }
1898
1899
428k
    if (_query_ctx->enable_profile()) {
1900
2.84k
        _query_ctx->add_fragment_profile(_fragment_id, collect_realtime_profile(),
1901
2.84k
                                         collect_realtime_load_channel_profile());
1902
2.84k
    }
1903
1904
    // Return whether the caller needs to remove from the pipeline map.
1905
    // The caller must do this after releasing _task_mutex.
1906
428k
    return !_need_notify_close;
1907
428k
}
1908
1909
1.89M
void PipelineFragmentContext::decrement_running_task(PipelineId pipeline_id) {
1910
    // If all tasks of this pipeline has been closed, upstream tasks is never needed, and we just make those runnable here
1911
1.89M
    DCHECK(_pip_id_to_pipeline.contains(pipeline_id));
1912
1.89M
    if (_pip_id_to_pipeline[pipeline_id]->close_task()) {
1913
663k
        if (_dag.contains(pipeline_id)) {
1914
344k
            for (auto dep : _dag[pipeline_id]) {
1915
344k
                _pip_id_to_pipeline[dep]->make_all_runnable(pipeline_id);
1916
344k
            }
1917
278k
        }
1918
663k
    }
1919
1.89M
    bool need_remove = false;
1920
1.89M
    {
1921
1.89M
        std::lock_guard<std::mutex> l(_task_mutex);
1922
1.89M
        ++_closed_tasks;
1923
1.89M
        if (_closed_tasks >= _total_tasks) {
1924
428k
            need_remove = _close_fragment_instance();
1925
428k
        }
1926
1.89M
    }
1927
    // Call remove_pipeline_context() outside _task_mutex to avoid ABBA deadlock.
1928
1.89M
    if (need_remove) {
1929
425k
        _exec_env->fragment_mgr()->remove_pipeline_context({_query_id, _fragment_id});
1930
425k
    }
1931
1.89M
}
1932
1933
54.8k
std::string PipelineFragmentContext::get_load_error_url() {
1934
54.8k
    if (const auto& str = _runtime_state->get_error_log_file_path(); !str.empty()) {
1935
0
        return to_load_error_http_path(str);
1936
0
    }
1937
163k
    for (auto& tasks : _tasks) {
1938
267k
        for (auto& task : tasks) {
1939
267k
            if (const auto& str = task.second->get_error_log_file_path(); !str.empty()) {
1940
161
                return to_load_error_http_path(str);
1941
161
            }
1942
267k
        }
1943
163k
    }
1944
54.6k
    return "";
1945
54.8k
}
1946
1947
54.8k
std::string PipelineFragmentContext::get_first_error_msg() {
1948
54.8k
    if (const auto& str = _runtime_state->get_first_error_msg(); !str.empty()) {
1949
0
        return str;
1950
0
    }
1951
163k
    for (auto& tasks : _tasks) {
1952
267k
        for (auto& task : tasks) {
1953
267k
            if (const auto& str = task.second->get_first_error_msg(); !str.empty()) {
1954
161
                return str;
1955
161
            }
1956
267k
        }
1957
163k
    }
1958
54.6k
    return "";
1959
54.8k
}
1960
1961
430k
Status PipelineFragmentContext::send_report(bool done) {
1962
430k
    Status exec_status = _query_ctx->exec_status();
1963
    // If plan is done successfully, but _is_report_success is false,
1964
    // no need to send report.
1965
    // Load will set _is_report_success to true because load wants to know
1966
    // the process.
1967
430k
    if (!_is_report_success && done && exec_status.ok()) {
1968
381k
        return Status::OK();
1969
381k
    }
1970
1971
    // If both _is_report_success and _is_report_on_cancel are false,
1972
    // which means no matter query is success or failed, no report is needed.
1973
    // This may happen when the query limit reached and
1974
    // a internal cancellation being processed
1975
    // When limit is reached the fragment is also cancelled, but _is_report_on_cancel will
1976
    // be set to false, to avoid sending fault report to FE.
1977
48.3k
    if (!_is_report_success && !_is_report_on_cancel) {
1978
332
        if (done) {
1979
            // if done is true, which means the query is finished successfully, we can safely close the fragment instance without sending report to FE, and just return OK status here.
1980
332
            return Status::OK();
1981
332
        }
1982
0
        return Status::NeedSendAgain("");
1983
332
    }
1984
1985
48.0k
    std::vector<RuntimeState*> runtime_states;
1986
1987
118k
    for (auto& tasks : _tasks) {
1988
174k
        for (auto& task : tasks) {
1989
174k
            runtime_states.push_back(task.second.get());
1990
174k
        }
1991
118k
    }
1992
1993
48.0k
    std::string load_eror_url = _query_ctx->get_load_error_url().empty()
1994
48.0k
                                        ? get_load_error_url()
1995
48.0k
                                        : _query_ctx->get_load_error_url();
1996
48.0k
    std::string first_error_msg = _query_ctx->get_first_error_msg().empty()
1997
48.0k
                                          ? get_first_error_msg()
1998
48.0k
                                          : _query_ctx->get_first_error_msg();
1999
2000
48.0k
    ReportStatusRequest req {.status = exec_status,
2001
48.0k
                             .runtime_states = runtime_states,
2002
48.0k
                             .done = done || !exec_status.ok(),
2003
48.0k
                             .coord_addr = _query_ctx->coord_addr,
2004
48.0k
                             .query_id = _query_id,
2005
48.0k
                             .fragment_id = _fragment_id,
2006
48.0k
                             .fragment_instance_id = TUniqueId(),
2007
48.0k
                             .backend_num = -1,
2008
48.0k
                             .runtime_state = _runtime_state.get(),
2009
48.0k
                             .load_error_url = load_eror_url,
2010
48.0k
                             .first_error_msg = first_error_msg,
2011
48.0k
                             .cancel_fn = [this](const Status& reason) { cancel(reason); }};
2012
2013
48.0k
    return _report_status_cb(
2014
48.0k
            req, std::dynamic_pointer_cast<PipelineFragmentContext>(shared_from_this()));
2015
48.3k
}
2016
2017
4
size_t PipelineFragmentContext::get_revocable_size(bool* has_running_task) const {
2018
4
    size_t res = 0;
2019
    // _tasks will be cleared during ~PipelineFragmentContext, so that it's safe
2020
    // here to traverse the vector.
2021
4
    for (const auto& task_instances : _tasks) {
2022
6
        for (const auto& task : task_instances) {
2023
6
            if (task.first->is_running()) {
2024
0
                LOG_EVERY_N(INFO, 50) << "Query: " << print_id(_query_id)
2025
0
                                      << " is running, task: " << (void*)task.first.get()
2026
0
                                      << ", is_running: " << task.first->is_running();
2027
0
                *has_running_task = true;
2028
0
                return 0;
2029
0
            }
2030
2031
6
            size_t revocable_size = task.first->get_revocable_size();
2032
6
            if (revocable_size >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
2033
2
                res += revocable_size;
2034
2
            }
2035
6
        }
2036
4
    }
2037
4
    return res;
2038
4
}
2039
2040
8
std::vector<PipelineTask*> PipelineFragmentContext::get_revocable_tasks() const {
2041
8
    std::vector<PipelineTask*> revocable_tasks;
2042
8
    for (const auto& task_instances : _tasks) {
2043
12
        for (const auto& task : task_instances) {
2044
12
            size_t revocable_size_ = task.first->get_revocable_size();
2045
2046
12
            if (revocable_size_ >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
2047
4
                revocable_tasks.emplace_back(task.first.get());
2048
4
            }
2049
12
        }
2050
8
    }
2051
8
    return revocable_tasks;
2052
8
}
2053
2054
166
std::string PipelineFragmentContext::debug_string() {
2055
166
    std::lock_guard<std::mutex> l(_task_mutex);
2056
166
    fmt::memory_buffer debug_string_buffer;
2057
166
    fmt::format_to(debug_string_buffer,
2058
166
                   "PipelineFragmentContext Info: _closed_tasks={}, _total_tasks={}, "
2059
166
                   "need_notify_close={}, fragment_id={}, _rec_cte_stage={}\n",
2060
166
                   _closed_tasks, _total_tasks, _need_notify_close, _fragment_id, _rec_cte_stage);
2061
860
    for (size_t j = 0; j < _tasks.size(); j++) {
2062
694
        fmt::format_to(debug_string_buffer, "Tasks in instance {}:\n", j);
2063
1.77k
        for (size_t i = 0; i < _tasks[j].size(); i++) {
2064
1.07k
            fmt::format_to(debug_string_buffer, "Task {}: {}\n", i,
2065
1.07k
                           _tasks[j][i].first->debug_string());
2066
1.07k
        }
2067
694
    }
2068
2069
166
    return fmt::to_string(debug_string_buffer);
2070
166
}
2071
2072
std::vector<std::shared_ptr<TRuntimeProfileTree>>
2073
2.84k
PipelineFragmentContext::collect_realtime_profile() const {
2074
2.84k
    std::vector<std::shared_ptr<TRuntimeProfileTree>> res;
2075
2076
    // we do not have mutex to protect pipeline_id_to_profile
2077
    // so we need to make sure this funciton is invoked after fragment context
2078
    // has already been prepared.
2079
2.84k
    if (!_prepared) {
2080
0
        std::string msg =
2081
0
                "Query " + print_id(_query_id) + " collecting profile, but its not prepared";
2082
0
        DCHECK(false) << msg;
2083
0
        LOG_ERROR(msg);
2084
0
        return res;
2085
0
    }
2086
2087
    // Make sure first profile is fragment level profile
2088
2.84k
    auto fragment_profile = std::make_shared<TRuntimeProfileTree>();
2089
2.84k
    _fragment_level_profile->to_thrift(fragment_profile.get(), _runtime_state->profile_level());
2090
2.84k
    res.push_back(fragment_profile);
2091
2092
    // pipeline_id_to_profile is initialized in prepare stage
2093
5.24k
    for (auto pipeline_profile : _runtime_state->pipeline_id_to_profile()) {
2094
5.24k
        auto profile_ptr = std::make_shared<TRuntimeProfileTree>();
2095
5.24k
        pipeline_profile->to_thrift(profile_ptr.get(), _runtime_state->profile_level());
2096
5.24k
        res.push_back(profile_ptr);
2097
5.24k
    }
2098
2099
2.84k
    return res;
2100
2.84k
}
2101
2102
std::shared_ptr<TRuntimeProfileTree>
2103
2.84k
PipelineFragmentContext::collect_realtime_load_channel_profile() const {
2104
    // we do not have mutex to protect pipeline_id_to_profile
2105
    // so we need to make sure this funciton is invoked after fragment context
2106
    // has already been prepared.
2107
2.84k
    if (!_prepared) {
2108
0
        std::string msg =
2109
0
                "Query " + print_id(_query_id) + " collecting profile, but its not prepared";
2110
0
        DCHECK(false) << msg;
2111
0
        LOG_ERROR(msg);
2112
0
        return nullptr;
2113
0
    }
2114
2115
7.34k
    for (const auto& tasks : _tasks) {
2116
15.2k
        for (const auto& task : tasks) {
2117
15.2k
            if (task.second->load_channel_profile() == nullptr) {
2118
0
                continue;
2119
0
            }
2120
2121
15.2k
            auto tmp_load_channel_profile = std::make_shared<TRuntimeProfileTree>();
2122
2123
15.2k
            task.second->load_channel_profile()->to_thrift(tmp_load_channel_profile.get(),
2124
15.2k
                                                           _runtime_state->profile_level());
2125
15.2k
            _runtime_state->load_channel_profile()->update(*tmp_load_channel_profile);
2126
15.2k
        }
2127
7.34k
    }
2128
2129
2.84k
    auto load_channel_profile = std::make_shared<TRuntimeProfileTree>();
2130
2.84k
    _runtime_state->load_channel_profile()->to_thrift(load_channel_profile.get(),
2131
2.84k
                                                      _runtime_state->profile_level());
2132
2.84k
    return load_channel_profile;
2133
2.84k
}
2134
2135
// Collect runtime filter IDs registered by all tasks in this PFC.
2136
// Used during recursive CTE stage transitions to know which filters to deregister
2137
// before creating the new PFC for the next recursion round.
2138
// Called from rerun_fragment(wait_for_destroy) while tasks are still closing.
2139
// Thread safety: safe because _tasks is structurally immutable after prepare() —
2140
// the vector sizes do not change, and individual RuntimeState filter sets are
2141
// written only during open() which has completed by the time we reach rerun.
2142
3.29k
std::set<int> PipelineFragmentContext::get_deregister_runtime_filter() const {
2143
3.29k
    std::set<int> result;
2144
5.80k
    for (const auto& _task : _tasks) {
2145
11.5k
        for (const auto& task : _task) {
2146
11.5k
            auto set = task.first->runtime_state()->get_deregister_runtime_filter();
2147
11.5k
            result.merge(set);
2148
11.5k
        }
2149
5.80k
    }
2150
3.29k
    if (_runtime_state) {
2151
3.29k
        auto set = _runtime_state->get_deregister_runtime_filter();
2152
3.29k
        result.merge(set);
2153
3.29k
    }
2154
3.29k
    return result;
2155
3.29k
}
2156
2157
429k
void PipelineFragmentContext::_release_resource() {
2158
429k
    std::lock_guard<std::mutex> l(_task_mutex);
2159
    // The memory released by the query end is recorded in the query mem tracker.
2160
429k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_ctx->query_mem_tracker());
2161
429k
    auto st = _query_ctx->exec_status();
2162
1.08M
    for (auto& _task : _tasks) {
2163
1.08M
        if (!_task.empty()) {
2164
1.08M
            _call_back(_task.front().first->runtime_state(), &st);
2165
1.08M
        }
2166
1.08M
    }
2167
429k
    _tasks.clear();
2168
429k
    _dag.clear();
2169
429k
    _pip_id_to_pipeline.clear();
2170
429k
    _pipelines.clear();
2171
429k
    _sink.reset();
2172
429k
    _root_op.reset();
2173
429k
    _runtime_filter_mgr_map.clear();
2174
429k
    _op_id_to_shared_state.clear();
2175
429k
}
2176
2177
} // namespace doris