Coverage Report

Created: 2026-05-12 20:22

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/exec/pipeline/pipeline_fragment_context.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "exec/pipeline/pipeline_fragment_context.h"
19
20
#include <gen_cpp/DataSinks_types.h>
21
#include <gen_cpp/FrontendService.h>
22
#include <gen_cpp/FrontendService_types.h>
23
#include <gen_cpp/PaloInternalService_types.h>
24
#include <gen_cpp/PlanNodes_types.h>
25
#include <pthread.h>
26
27
#include <algorithm>
28
#include <cstdlib>
29
// IWYU pragma: no_include <bits/chrono.h>
30
#include <fmt/format.h>
31
#include <thrift/Thrift.h>
32
#include <thrift/protocol/TDebugProtocol.h>
33
#include <thrift/transport/TTransportException.h>
34
35
#include <chrono> // IWYU pragma: keep
36
#include <map>
37
#include <memory>
38
#include <ostream>
39
#include <utility>
40
41
#include "cloud/config.h"
42
#include "common/cast_set.h"
43
#include "common/config.h"
44
#include "common/exception.h"
45
#include "common/logging.h"
46
#include "common/status.h"
47
#include "exec/exchange/local_exchange_sink_operator.h"
48
#include "exec/exchange/local_exchange_source_operator.h"
49
#include "exec/exchange/local_exchanger.h"
50
#include "exec/exchange/vdata_stream_mgr.h"
51
#include "exec/operator/aggregation_sink_operator.h"
52
#include "exec/operator/aggregation_source_operator.h"
53
#include "exec/operator/analytic_sink_operator.h"
54
#include "exec/operator/analytic_source_operator.h"
55
#include "exec/operator/assert_num_rows_operator.h"
56
#include "exec/operator/blackhole_sink_operator.h"
57
#include "exec/operator/bucketed_aggregation_sink_operator.h"
58
#include "exec/operator/bucketed_aggregation_source_operator.h"
59
#include "exec/operator/cache_sink_operator.h"
60
#include "exec/operator/cache_source_operator.h"
61
#include "exec/operator/datagen_operator.h"
62
#include "exec/operator/dict_sink_operator.h"
63
#include "exec/operator/distinct_streaming_aggregation_operator.h"
64
#include "exec/operator/empty_set_operator.h"
65
#include "exec/operator/exchange_sink_operator.h"
66
#include "exec/operator/exchange_source_operator.h"
67
#include "exec/operator/file_scan_operator.h"
68
#include "exec/operator/group_commit_block_sink_operator.h"
69
#include "exec/operator/group_commit_scan_operator.h"
70
#include "exec/operator/hashjoin_build_sink.h"
71
#include "exec/operator/hashjoin_probe_operator.h"
72
#include "exec/operator/hive_table_sink_operator.h"
73
#include "exec/operator/iceberg_delete_sink_operator.h"
74
#include "exec/operator/iceberg_merge_sink_operator.h"
75
#include "exec/operator/iceberg_table_sink_operator.h"
76
#include "exec/operator/jdbc_scan_operator.h"
77
#include "exec/operator/jdbc_table_sink_operator.h"
78
#include "exec/operator/local_merge_sort_source_operator.h"
79
#include "exec/operator/materialization_opertor.h"
80
#include "exec/operator/maxcompute_table_sink_operator.h"
81
#include "exec/operator/memory_scratch_sink_operator.h"
82
#include "exec/operator/meta_scan_operator.h"
83
#include "exec/operator/multi_cast_data_stream_sink.h"
84
#include "exec/operator/multi_cast_data_stream_source.h"
85
#include "exec/operator/nested_loop_join_build_operator.h"
86
#include "exec/operator/nested_loop_join_probe_operator.h"
87
#include "exec/operator/olap_scan_operator.h"
88
#include "exec/operator/olap_table_sink_operator.h"
89
#include "exec/operator/olap_table_sink_v2_operator.h"
90
#include "exec/operator/partition_sort_sink_operator.h"
91
#include "exec/operator/partition_sort_source_operator.h"
92
#include "exec/operator/partitioned_aggregation_sink_operator.h"
93
#include "exec/operator/partitioned_aggregation_source_operator.h"
94
#include "exec/operator/partitioned_hash_join_probe_operator.h"
95
#include "exec/operator/partitioned_hash_join_sink_operator.h"
96
#include "exec/operator/rec_cte_anchor_sink_operator.h"
97
#include "exec/operator/rec_cte_scan_operator.h"
98
#include "exec/operator/rec_cte_sink_operator.h"
99
#include "exec/operator/rec_cte_source_operator.h"
100
#include "exec/operator/repeat_operator.h"
101
#include "exec/operator/result_file_sink_operator.h"
102
#include "exec/operator/result_sink_operator.h"
103
#include "exec/operator/schema_scan_operator.h"
104
#include "exec/operator/select_operator.h"
105
#include "exec/operator/set_probe_sink_operator.h"
106
#include "exec/operator/set_sink_operator.h"
107
#include "exec/operator/set_source_operator.h"
108
#include "exec/operator/sort_sink_operator.h"
109
#include "exec/operator/sort_source_operator.h"
110
#include "exec/operator/spill_iceberg_table_sink_operator.h"
111
#include "exec/operator/spill_sort_sink_operator.h"
112
#include "exec/operator/spill_sort_source_operator.h"
113
#include "exec/operator/streaming_aggregation_operator.h"
114
#include "exec/operator/table_function_operator.h"
115
#include "exec/operator/tvf_table_sink_operator.h"
116
#include "exec/operator/union_sink_operator.h"
117
#include "exec/operator/union_source_operator.h"
118
#include "exec/pipeline/dependency.h"
119
#include "exec/pipeline/pipeline_task.h"
120
#include "exec/pipeline/task_scheduler.h"
121
#include "exec/runtime_filter/runtime_filter_mgr.h"
122
#include "exec/sort/topn_sorter.h"
123
#include "exec/spill/spill_file.h"
124
#include "io/fs/stream_load_pipe.h"
125
#include "load/stream_load/new_load_stream_mgr.h"
126
#include "runtime/exec_env.h"
127
#include "runtime/fragment_mgr.h"
128
#include "runtime/result_buffer_mgr.h"
129
#include "runtime/runtime_state.h"
130
#include "runtime/thread_context.h"
131
#include "service/backend_options.h"
132
#include "util/client_cache.h"
133
#include "util/countdown_latch.h"
134
#include "util/debug_util.h"
135
#include "util/network_util.h"
136
#include "util/uid_util.h"
137
138
namespace doris {
139
PipelineFragmentContext::PipelineFragmentContext(
140
        TUniqueId query_id, const TPipelineFragmentParams& request,
141
        std::shared_ptr<QueryContext> query_ctx, ExecEnv* exec_env,
142
        const std::function<void(RuntimeState*, Status*)>& call_back)
143
106k
        : _query_id(std::move(query_id)),
144
106k
          _fragment_id(request.fragment_id),
145
106k
          _exec_env(exec_env),
146
106k
          _query_ctx(std::move(query_ctx)),
147
106k
          _call_back(call_back),
148
106k
          _is_report_on_cancel(true),
149
106k
          _params(request),
150
106k
          _parallel_instances(_params.__isset.parallel_instances ? _params.parallel_instances : 0),
151
106k
          _need_notify_close(request.__isset.need_notify_close ? request.need_notify_close
152
106k
                                                               : false) {
153
106k
    _fragment_watcher.start();
154
106k
}
155
156
106k
PipelineFragmentContext::~PipelineFragmentContext() {
157
106k
    LOG_INFO("PipelineFragmentContext::~PipelineFragmentContext")
158
106k
            .tag("query_id", print_id(_query_id))
159
106k
            .tag("fragment_id", _fragment_id);
160
106k
    _release_resource();
161
106k
    {
162
        // The memory released by the query end is recorded in the query mem tracker.
163
106k
        SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_ctx->query_mem_tracker());
164
106k
        _runtime_state.reset();
165
106k
        _query_ctx.reset();
166
106k
    }
167
106k
}
168
169
0
bool PipelineFragmentContext::is_timeout(timespec now) const {
170
0
    if (_timeout <= 0) {
171
0
        return false;
172
0
    }
173
0
    return _fragment_watcher.elapsed_time_seconds(now) > _timeout;
174
0
}
175
176
// notify_close() transitions the PFC from "waiting for external close notification" to
177
// "self-managed close". For recursive CTE fragments, the old PFC is kept alive until
178
// the rerun_fragment(wait_for_destroy) RPC calls this to trigger shutdown.
179
// Returns true if all tasks have already closed (i.e., the PFC can be safely destroyed).
180
930
bool PipelineFragmentContext::notify_close() {
181
930
    bool all_closed = false;
182
930
    bool need_remove = false;
183
930
    {
184
930
        std::lock_guard<std::mutex> l(_task_mutex);
185
930
        if (_closed_tasks >= _total_tasks) {
186
30
            if (_need_notify_close) {
187
                // Fragment was cancelled and waiting for notify to close.
188
                // Record that we need to remove from fragment mgr, but do it
189
                // after releasing _task_mutex to avoid ABBA deadlock with
190
                // dump_pipeline_tasks() (which acquires _pipeline_map lock
191
                // first, then _task_mutex via debug_string()).
192
0
                need_remove = true;
193
0
            }
194
30
            all_closed = true;
195
30
        }
196
        // make fragment release by self after cancel
197
930
        _need_notify_close = false;
198
930
    }
199
930
    if (need_remove) {
200
0
        _exec_env->fragment_mgr()->remove_pipeline_context({_query_id, _fragment_id});
201
0
    }
202
930
    return all_closed;
203
930
}
204
205
// Must not add lock in this method. Because it will call query ctx cancel. And
206
// QueryCtx cancel will call fragment ctx cancel. And Also Fragment ctx's running
207
// Method like exchange sink buffer will call query ctx cancel. If we add lock here
208
// There maybe dead lock.
209
930
void PipelineFragmentContext::cancel(const Status reason) {
210
930
    LOG_INFO("PipelineFragmentContext::cancel")
211
930
            .tag("query_id", print_id(_query_id))
212
930
            .tag("fragment_id", _fragment_id)
213
930
            .tag("reason", reason.to_string());
214
930
    if (notify_close()) {
215
30
        return;
216
30
    }
217
    // Timeout is a special error code, we need print current stack to debug timeout issue.
218
900
    if (reason.is<ErrorCode::TIMEOUT>()) {
219
0
        auto dbg_str = fmt::format("PipelineFragmentContext is cancelled due to timeout:\n{}",
220
0
                                   debug_string());
221
0
        LOG_LONG_STRING(WARNING, dbg_str);
222
0
    }
223
224
    // `ILLEGAL_STATE` means queries this fragment belongs to was not found in FE (maybe finished)
225
900
    if (reason.is<ErrorCode::ILLEGAL_STATE>()) {
226
0
        LOG_WARNING("PipelineFragmentContext is cancelled due to illegal state : {}",
227
0
                    debug_string());
228
0
    }
229
230
900
    if (reason.is<ErrorCode::MEM_LIMIT_EXCEEDED>() || reason.is<ErrorCode::MEM_ALLOC_FAILED>()) {
231
0
        print_profile("cancel pipeline, reason: " + reason.to_string());
232
0
    }
233
234
900
    if (auto error_url = get_load_error_url(); !error_url.empty()) {
235
0
        _query_ctx->set_load_error_url(error_url);
236
0
    }
237
238
900
    if (auto first_error_msg = get_first_error_msg(); !first_error_msg.empty()) {
239
0
        _query_ctx->set_first_error_msg(first_error_msg);
240
0
    }
241
242
900
    _query_ctx->cancel(reason, _fragment_id);
243
900
    if (reason.is<ErrorCode::LIMIT_REACH>()) {
244
230
        _is_report_on_cancel = false;
245
670
    } else {
246
3.32k
        for (auto& id : _fragment_instance_ids) {
247
3.32k
            LOG(WARNING) << "PipelineFragmentContext cancel instance: " << print_id(id);
248
3.32k
        }
249
670
    }
250
    // Get pipe from new load stream manager and send cancel to it or the fragment may hang to wait read from pipe
251
    // For stream load the fragment's query_id == load id, it is set in FE.
252
900
    auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(_query_id);
253
900
    if (stream_load_ctx != nullptr) {
254
0
        stream_load_ctx->pipe->cancel(reason.to_string());
255
        // Set error URL here because after pipe is cancelled, stream load execution may return early.
256
        // We need to set the error URL at this point to ensure error information is properly
257
        // propagated to the client.
258
0
        stream_load_ctx->error_url = get_load_error_url();
259
0
        stream_load_ctx->first_error_msg = get_first_error_msg();
260
0
    }
261
262
3.66k
    for (auto& tasks : _tasks) {
263
8.91k
        for (auto& task : tasks) {
264
8.91k
            task.first->unblock_all_dependencies();
265
8.91k
        }
266
3.66k
    }
267
900
}
268
269
161k
PipelinePtr PipelineFragmentContext::add_pipeline(PipelinePtr parent, int idx) {
270
161k
    PipelineId id = _next_pipeline_id++;
271
161k
    auto pipeline = std::make_shared<Pipeline>(
272
161k
            id, parent ? std::min(parent->num_tasks(), _num_instances) : _num_instances,
273
161k
            parent ? parent->num_tasks() : _num_instances);
274
161k
    if (idx >= 0) {
275
16.5k
        _pipelines.insert(_pipelines.begin() + idx, pipeline);
276
144k
    } else {
277
144k
        _pipelines.emplace_back(pipeline);
278
144k
    }
279
161k
    if (parent) {
280
50.9k
        parent->set_children(pipeline);
281
50.9k
    }
282
161k
    return pipeline;
283
161k
}
284
285
106k
Status PipelineFragmentContext::_build_and_prepare_full_pipeline(ThreadPool* thread_pool) {
286
106k
    {
287
106k
        SCOPED_TIMER(_build_pipelines_timer);
288
        // 2. Build pipelines with operators in this fragment.
289
106k
        auto root_pipeline = add_pipeline();
290
106k
        RETURN_IF_ERROR(_build_pipelines(_runtime_state->obj_pool(), *_query_ctx->desc_tbl,
291
106k
                                         &_root_op, root_pipeline));
292
293
        // 3. Create sink operator
294
106k
        if (!_params.fragment.__isset.output_sink) {
295
0
            return Status::InternalError("No output sink in this fragment!");
296
0
        }
297
106k
        RETURN_IF_ERROR(_create_data_sink(_runtime_state->obj_pool(), _params.fragment.output_sink,
298
106k
                                          _params.fragment.output_exprs, _params,
299
106k
                                          root_pipeline->output_row_desc(), _runtime_state.get(),
300
106k
                                          *_desc_tbl, root_pipeline->id()));
301
106k
        RETURN_IF_ERROR(_sink->init(_params.fragment.output_sink));
302
106k
        RETURN_IF_ERROR(root_pipeline->set_sink(_sink));
303
304
144k
        for (PipelinePtr& pipeline : _pipelines) {
305
144k
            DCHECK(pipeline->sink() != nullptr) << pipeline->operators().size();
306
144k
            RETURN_IF_ERROR(pipeline->sink()->set_child(pipeline->operators().back()));
307
144k
        }
308
106k
    }
309
    // 4. Build local exchanger
310
106k
    if (_runtime_state->enable_local_shuffle()) {
311
106k
        SCOPED_TIMER(_plan_local_exchanger_timer);
312
106k
        RETURN_IF_ERROR(_plan_local_exchange(_params.num_buckets,
313
106k
                                             _params.bucket_seq_to_instance_idx,
314
106k
                                             _params.shuffle_idx_to_instance_idx));
315
106k
    }
316
317
    // 5. Initialize global states in pipelines.
318
161k
    for (PipelinePtr& pipeline : _pipelines) {
319
161k
        SCOPED_TIMER(_prepare_all_pipelines_timer);
320
161k
        pipeline->children().clear();
321
161k
        RETURN_IF_ERROR(pipeline->prepare(_runtime_state.get()));
322
161k
    }
323
324
106k
    {
325
106k
        SCOPED_TIMER(_build_tasks_timer);
326
        // 6. Build pipeline tasks and initialize local state.
327
106k
        RETURN_IF_ERROR(_build_pipeline_tasks(thread_pool));
328
106k
    }
329
330
106k
    return Status::OK();
331
106k
}
332
333
106k
Status PipelineFragmentContext::prepare(ThreadPool* thread_pool) {
334
106k
    if (_prepared) {
335
0
        return Status::InternalError("Already prepared");
336
0
    }
337
106k
    if (_params.__isset.query_options && _params.query_options.__isset.execution_timeout) {
338
106k
        _timeout = _params.query_options.execution_timeout;
339
106k
    }
340
341
106k
    _fragment_level_profile = std::make_unique<RuntimeProfile>("PipelineContext");
342
106k
    _prepare_timer = ADD_TIMER(_fragment_level_profile, "PrepareTime");
343
106k
    SCOPED_TIMER(_prepare_timer);
344
106k
    _build_pipelines_timer = ADD_TIMER(_fragment_level_profile, "BuildPipelinesTime");
345
106k
    _init_context_timer = ADD_TIMER(_fragment_level_profile, "InitContextTime");
346
106k
    _plan_local_exchanger_timer = ADD_TIMER(_fragment_level_profile, "PlanLocalLocalExchangerTime");
347
106k
    _build_tasks_timer = ADD_TIMER(_fragment_level_profile, "BuildTasksTime");
348
106k
    _prepare_all_pipelines_timer = ADD_TIMER(_fragment_level_profile, "PrepareAllPipelinesTime");
349
106k
    {
350
106k
        SCOPED_TIMER(_init_context_timer);
351
106k
        cast_set(_num_instances, _params.local_params.size());
352
106k
        _total_instances =
353
106k
                _params.__isset.total_instances ? _params.total_instances : _num_instances;
354
355
106k
        auto* fragment_context = this;
356
357
106k
        if (_params.query_options.__isset.is_report_success) {
358
106k
            fragment_context->set_is_report_success(_params.query_options.is_report_success);
359
106k
        }
360
361
        // 1. Set up the global runtime state.
362
106k
        _runtime_state = RuntimeState::create_unique(
363
106k
                _params.query_id, _params.fragment_id, _params.query_options,
364
106k
                _query_ctx->query_globals, _exec_env, _query_ctx.get());
365
106k
        _runtime_state->set_task_execution_context(shared_from_this());
366
106k
        SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_runtime_state->query_mem_tracker());
367
106k
        if (_params.__isset.backend_id) {
368
106k
            _runtime_state->set_backend_id(_params.backend_id);
369
106k
        }
370
106k
        if (_params.__isset.import_label) {
371
0
            _runtime_state->set_import_label(_params.import_label);
372
0
        }
373
106k
        if (_params.__isset.db_name) {
374
0
            _runtime_state->set_db_name(_params.db_name);
375
0
        }
376
106k
        if (_params.__isset.load_job_id) {
377
0
            _runtime_state->set_load_job_id(_params.load_job_id);
378
0
        }
379
380
106k
        if (_params.is_simplified_param) {
381
31.7k
            _desc_tbl = _query_ctx->desc_tbl;
382
75.0k
        } else {
383
75.0k
            DCHECK(_params.__isset.desc_tbl);
384
75.0k
            RETURN_IF_ERROR(DescriptorTbl::create(_runtime_state->obj_pool(), _params.desc_tbl,
385
75.0k
                                                  &_desc_tbl));
386
75.0k
        }
387
106k
        _runtime_state->set_desc_tbl(_desc_tbl);
388
106k
        _runtime_state->set_num_per_fragment_instances(_params.num_senders);
389
106k
        _runtime_state->set_load_stream_per_node(_params.load_stream_per_node);
390
106k
        _runtime_state->set_total_load_streams(_params.total_load_streams);
391
106k
        _runtime_state->set_num_local_sink(_params.num_local_sink);
392
393
        // init fragment_instance_ids
394
106k
        const auto target_size = _params.local_params.size();
395
106k
        _fragment_instance_ids.resize(target_size);
396
329k
        for (size_t i = 0; i < _params.local_params.size(); i++) {
397
222k
            auto fragment_instance_id = _params.local_params[i].fragment_instance_id;
398
222k
            _fragment_instance_ids[i] = fragment_instance_id;
399
222k
        }
400
106k
    }
401
402
106k
    RETURN_IF_ERROR(_build_and_prepare_full_pipeline(thread_pool));
403
404
106k
    _init_next_report_time();
405
406
106k
    _prepared = true;
407
106k
    return Status::OK();
408
106k
}
409
410
Status PipelineFragmentContext::_build_pipeline_tasks_for_instance(
411
        int instance_idx,
412
222k
        const std::vector<std::shared_ptr<RuntimeProfile>>& pipeline_id_to_profile) {
413
222k
    const auto& local_params = _params.local_params[instance_idx];
414
222k
    auto fragment_instance_id = local_params.fragment_instance_id;
415
222k
    auto runtime_filter_mgr = std::make_unique<RuntimeFilterMgr>(false);
416
222k
    std::map<PipelineId, PipelineTask*> pipeline_id_to_task;
417
222k
    auto get_shared_state = [&](PipelinePtr pipeline)
418
222k
            -> std::map<int, std::pair<std::shared_ptr<BasicSharedState>,
419
394k
                                       std::vector<std::shared_ptr<Dependency>>>> {
420
394k
        std::map<int, std::pair<std::shared_ptr<BasicSharedState>,
421
394k
                                std::vector<std::shared_ptr<Dependency>>>>
422
394k
                shared_state_map;
423
412k
        for (auto& op : pipeline->operators()) {
424
412k
            auto source_id = op->operator_id();
425
412k
            if (auto iter = _op_id_to_shared_state.find(source_id);
426
412k
                iter != _op_id_to_shared_state.end()) {
427
138k
                shared_state_map.insert({source_id, iter->second});
428
138k
            }
429
412k
        }
430
396k
        for (auto sink_to_source_id : pipeline->sink()->dests_id()) {
431
396k
            if (auto iter = _op_id_to_shared_state.find(sink_to_source_id);
432
396k
                iter != _op_id_to_shared_state.end()) {
433
42.8k
                shared_state_map.insert({sink_to_source_id, iter->second});
434
42.8k
            }
435
396k
        }
436
394k
        return shared_state_map;
437
394k
    };
438
439
712k
    for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) {
440
490k
        auto& pipeline = _pipelines[pip_idx];
441
490k
        if (pipeline->num_tasks() > 1 || instance_idx == 0) {
442
394k
            auto task_runtime_state = RuntimeState::create_unique(
443
394k
                    local_params.fragment_instance_id, _params.query_id, _params.fragment_id,
444
394k
                    _params.query_options, _query_ctx->query_globals, _exec_env, _query_ctx.get());
445
394k
            {
446
                // Initialize runtime state for this task
447
394k
                task_runtime_state->set_query_mem_tracker(_query_ctx->query_mem_tracker());
448
449
394k
                task_runtime_state->set_task_execution_context(shared_from_this());
450
394k
                task_runtime_state->set_be_number(local_params.backend_num);
451
452
394k
                if (_params.__isset.backend_id) {
453
394k
                    task_runtime_state->set_backend_id(_params.backend_id);
454
394k
                }
455
394k
                if (_params.__isset.import_label) {
456
0
                    task_runtime_state->set_import_label(_params.import_label);
457
0
                }
458
394k
                if (_params.__isset.db_name) {
459
0
                    task_runtime_state->set_db_name(_params.db_name);
460
0
                }
461
394k
                if (_params.__isset.load_job_id) {
462
0
                    task_runtime_state->set_load_job_id(_params.load_job_id);
463
0
                }
464
394k
                if (_params.__isset.wal_id) {
465
0
                    task_runtime_state->set_wal_id(_params.wal_id);
466
0
                }
467
394k
                if (_params.__isset.content_length) {
468
0
                    task_runtime_state->set_content_length(_params.content_length);
469
0
                }
470
471
394k
                task_runtime_state->set_desc_tbl(_desc_tbl);
472
394k
                task_runtime_state->set_per_fragment_instance_idx(local_params.sender_id);
473
394k
                task_runtime_state->set_num_per_fragment_instances(_params.num_senders);
474
394k
                task_runtime_state->resize_op_id_to_local_state(max_operator_id());
475
394k
                task_runtime_state->set_max_operator_id(max_operator_id());
476
394k
                task_runtime_state->set_load_stream_per_node(_params.load_stream_per_node);
477
394k
                task_runtime_state->set_total_load_streams(_params.total_load_streams);
478
394k
                task_runtime_state->set_num_local_sink(_params.num_local_sink);
479
480
394k
                task_runtime_state->set_runtime_filter_mgr(runtime_filter_mgr.get());
481
394k
            }
482
394k
            auto cur_task_id = _total_tasks++;
483
394k
            task_runtime_state->set_task_id(cur_task_id);
484
394k
            task_runtime_state->set_task_num(pipeline->num_tasks());
485
394k
            auto task = std::make_shared<PipelineTask>(
486
394k
                    pipeline, cur_task_id, task_runtime_state.get(),
487
394k
                    std::dynamic_pointer_cast<PipelineFragmentContext>(shared_from_this()),
488
394k
                    pipeline_id_to_profile[pip_idx].get(), get_shared_state(pipeline),
489
394k
                    instance_idx);
490
394k
            pipeline->incr_created_tasks(instance_idx, task.get());
491
394k
            pipeline_id_to_task.insert({pipeline->id(), task.get()});
492
394k
            _tasks[instance_idx].emplace_back(
493
394k
                    std::pair<std::shared_ptr<PipelineTask>, std::unique_ptr<RuntimeState>> {
494
394k
                            std::move(task), std::move(task_runtime_state)});
495
394k
        }
496
490k
    }
497
498
    /**
499
         * Build DAG for pipeline tasks.
500
         * For example, we have
501
         *
502
         *   ExchangeSink (Pipeline1)     JoinBuildSink (Pipeline2)
503
         *            \                      /
504
         *          JoinProbeOperator1 (Pipeline1)    JoinBuildSink (Pipeline3)
505
         *                 \                          /
506
         *               JoinProbeOperator2 (Pipeline1)
507
         *
508
         * In this fragment, we have three pipelines and pipeline 1 depends on pipeline 2 and pipeline 3.
509
         * To build this DAG, `_dag` manage dependencies between pipelines by pipeline ID and
510
         * `pipeline_id_to_task` is used to find the task by a unique pipeline ID.
511
         *
512
         * Finally, we have two upstream dependencies in Pipeline1 corresponding to JoinProbeOperator1
513
         * and JoinProbeOperator2.
514
         */
515
490k
    for (auto& _pipeline : _pipelines) {
516
490k
        if (pipeline_id_to_task.contains(_pipeline->id())) {
517
394k
            auto* task = pipeline_id_to_task[_pipeline->id()];
518
394k
            DCHECK(task != nullptr);
519
520
            // If this task has upstream dependency, then inject it into this task.
521
394k
            if (_dag.contains(_pipeline->id())) {
522
263k
                auto& deps = _dag[_pipeline->id()];
523
400k
                for (auto& dep : deps) {
524
400k
                    if (pipeline_id_to_task.contains(dep)) {
525
208k
                        auto ss = pipeline_id_to_task[dep]->get_sink_shared_state();
526
208k
                        if (ss) {
527
128k
                            task->inject_shared_state(ss);
528
128k
                        } else {
529
80.5k
                            pipeline_id_to_task[dep]->inject_shared_state(
530
80.5k
                                    task->get_source_shared_state());
531
80.5k
                        }
532
208k
                    }
533
400k
                }
534
263k
            }
535
394k
        }
536
490k
    }
537
712k
    for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) {
538
490k
        if (pipeline_id_to_task.contains(_pipelines[pip_idx]->id())) {
539
394k
            auto* task = pipeline_id_to_task[_pipelines[pip_idx]->id()];
540
394k
            DCHECK(pipeline_id_to_profile[pip_idx]);
541
394k
            std::vector<TScanRangeParams> scan_ranges;
542
394k
            auto node_id = _pipelines[pip_idx]->operators().front()->node_id();
543
394k
            if (local_params.per_node_scan_ranges.contains(node_id)) {
544
84.5k
                scan_ranges = local_params.per_node_scan_ranges.find(node_id)->second;
545
84.5k
            }
546
394k
            RETURN_IF_ERROR_OR_CATCH_EXCEPTION(task->prepare(scan_ranges, local_params.sender_id,
547
394k
                                                             _params.fragment.output_sink));
548
394k
        }
549
490k
    }
550
222k
    {
551
222k
        std::lock_guard<std::mutex> l(_state_map_lock);
552
222k
        _runtime_filter_mgr_map[instance_idx] = std::move(runtime_filter_mgr);
553
222k
    }
554
222k
    return Status::OK();
555
222k
}
556
557
106k
Status PipelineFragmentContext::_build_pipeline_tasks(ThreadPool* thread_pool) {
558
106k
    _total_tasks = 0;
559
106k
    _closed_tasks = 0;
560
106k
    const auto target_size = _params.local_params.size();
561
106k
    _tasks.resize(target_size);
562
106k
    _runtime_filter_mgr_map.resize(target_size);
563
268k
    for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) {
564
161k
        _pip_id_to_pipeline[_pipelines[pip_idx]->id()] = _pipelines[pip_idx].get();
565
161k
    }
566
106k
    auto pipeline_id_to_profile = _runtime_state->build_pipeline_profile(_pipelines.size());
567
568
106k
    if (target_size > 1 &&
569
106k
        (_runtime_state->query_options().__isset.parallel_prepare_threshold &&
570
16.5k
         target_size > _runtime_state->query_options().parallel_prepare_threshold)) {
571
        // If instances parallelism is big enough ( > parallel_prepare_threshold), we will prepare all tasks by multi-threads
572
0
        std::vector<Status> prepare_status(target_size);
573
0
        int submitted_tasks = 0;
574
0
        Status submit_status;
575
0
        CountDownLatch latch((int)target_size);
576
0
        for (int i = 0; i < target_size; i++) {
577
0
            submit_status = thread_pool->submit_func([&, i]() {
578
0
                SCOPED_ATTACH_TASK(_query_ctx.get());
579
0
                prepare_status[i] = _build_pipeline_tasks_for_instance(i, pipeline_id_to_profile);
580
0
                latch.count_down();
581
0
            });
582
0
            if (LIKELY(submit_status.ok())) {
583
0
                submitted_tasks++;
584
0
            } else {
585
0
                break;
586
0
            }
587
0
        }
588
0
        latch.arrive_and_wait(target_size - submitted_tasks);
589
0
        if (UNLIKELY(!submit_status.ok())) {
590
0
            return submit_status;
591
0
        }
592
0
        for (int i = 0; i < submitted_tasks; i++) {
593
0
            if (!prepare_status[i].ok()) {
594
0
                return prepare_status[i];
595
0
            }
596
0
        }
597
106k
    } else {
598
329k
        for (int i = 0; i < target_size; i++) {
599
222k
            RETURN_IF_ERROR(_build_pipeline_tasks_for_instance(i, pipeline_id_to_profile));
600
222k
        }
601
106k
    }
602
106k
    _pipeline_parent_map.clear();
603
106k
    _op_id_to_shared_state.clear();
604
    // Record task cardinality once when this fragment context finishes task initialization.
605
106k
    _query_ctx->add_total_task_num(_total_tasks.load(std::memory_order_relaxed));
606
607
106k
    return Status::OK();
608
106k
}
609
610
106k
void PipelineFragmentContext::_init_next_report_time() {
611
106k
    auto interval_s = config::pipeline_status_report_interval;
612
106k
    if (_is_report_success && interval_s > 0 && _timeout > interval_s) {
613
10.2k
        VLOG_FILE << "enable period report: fragment id=" << _fragment_id;
614
10.2k
        uint64_t report_fragment_offset = (uint64_t)(rand() % interval_s) * NANOS_PER_SEC;
615
        // We don't want to wait longer than it takes to run the entire fragment.
616
10.2k
        _previous_report_time =
617
10.2k
                MonotonicNanos() + report_fragment_offset - (uint64_t)(interval_s)*NANOS_PER_SEC;
618
10.2k
        _disable_period_report = false;
619
10.2k
    }
620
106k
}
621
622
1.21k
void PipelineFragmentContext::refresh_next_report_time() {
623
1.21k
    auto disable = _disable_period_report.load(std::memory_order_acquire);
624
1.21k
    DCHECK(disable == true);
625
1.21k
    _previous_report_time.store(MonotonicNanos(), std::memory_order_release);
626
1.21k
    _disable_period_report.compare_exchange_strong(disable, false);
627
1.21k
}
628
629
1.54M
void PipelineFragmentContext::trigger_report_if_necessary() {
630
1.54M
    if (!_is_report_success) {
631
1.42M
        return;
632
1.42M
    }
633
118k
    auto disable = _disable_period_report.load(std::memory_order_acquire);
634
118k
    if (disable) {
635
2.70k
        return;
636
2.70k
    }
637
115k
    int32_t interval_s = config::pipeline_status_report_interval;
638
115k
    if (interval_s <= 0) {
639
0
        LOG(WARNING) << "config::status_report_interval is equal to or less than zero, do not "
640
0
                        "trigger "
641
0
                        "report.";
642
0
    }
643
115k
    uint64_t next_report_time = _previous_report_time.load(std::memory_order_acquire) +
644
115k
                                (uint64_t)(interval_s)*NANOS_PER_SEC;
645
115k
    if (MonotonicNanos() > next_report_time) {
646
1.22k
        if (!_disable_period_report.compare_exchange_strong(disable, true,
647
1.22k
                                                            std::memory_order_acq_rel)) {
648
6
            return;
649
6
        }
650
1.21k
        if (VLOG_FILE_IS_ON) {
651
0
            VLOG_FILE << "Reporting "
652
0
                      << "profile for query_id " << print_id(_query_id)
653
0
                      << ", fragment id: " << _fragment_id;
654
655
0
            std::stringstream ss;
656
0
            _runtime_state->runtime_profile()->compute_time_in_profile();
657
0
            _runtime_state->runtime_profile()->pretty_print(&ss);
658
0
            if (_runtime_state->load_channel_profile()) {
659
0
                _runtime_state->load_channel_profile()->pretty_print(&ss);
660
0
            }
661
662
0
            VLOG_FILE << "Query " << print_id(get_query_id()) << " fragment " << get_fragment_id()
663
0
                      << " profile:\n"
664
0
                      << ss.str();
665
0
        }
666
1.21k
        auto st = send_report(false);
667
1.21k
        if (!st.ok()) {
668
0
            disable = true;
669
0
            _disable_period_report.compare_exchange_strong(disable, false,
670
0
                                                           std::memory_order_acq_rel);
671
0
        }
672
1.21k
    }
673
115k
}
674
675
Status PipelineFragmentContext::_build_pipelines(ObjectPool* pool, const DescriptorTbl& descs,
676
106k
                                                 OperatorPtr* root, PipelinePtr cur_pipe) {
677
106k
    if (_params.fragment.plan.nodes.empty()) {
678
0
        throw Exception(ErrorCode::INTERNAL_ERROR, "Invalid plan which has no plan node!");
679
0
    }
680
681
106k
    int node_idx = 0;
682
683
106k
    RETURN_IF_ERROR(_create_tree_helper(pool, _params.fragment.plan.nodes, descs, nullptr,
684
106k
                                        &node_idx, root, cur_pipe, 0, false, false));
685
686
106k
    if (node_idx + 1 != _params.fragment.plan.nodes.size()) {
687
0
        return Status::InternalError(
688
0
                "Plan tree only partially reconstructed. Not all thrift nodes were used.");
689
0
    }
690
106k
    return Status::OK();
691
106k
}
692
693
Status PipelineFragmentContext::_create_tree_helper(
694
        ObjectPool* pool, const std::vector<TPlanNode>& tnodes, const DescriptorTbl& descs,
695
        OperatorPtr parent, int* node_idx, OperatorPtr* root, PipelinePtr& cur_pipe, int child_idx,
696
148k
        const bool followed_by_shuffled_operator, const bool require_bucket_distribution) {
697
    // propagate error case
698
148k
    if (*node_idx >= tnodes.size()) {
699
0
        return Status::InternalError(
700
0
                "Failed to reconstruct plan tree from thrift. Node id: {}, number of nodes: {}",
701
0
                *node_idx, tnodes.size());
702
0
    }
703
148k
    const TPlanNode& tnode = tnodes[*node_idx];
704
705
148k
    int num_children = tnodes[*node_idx].num_children;
706
148k
    bool current_followed_by_shuffled_operator = followed_by_shuffled_operator;
707
148k
    bool current_require_bucket_distribution = require_bucket_distribution;
708
    // TODO: Create CacheOperator is confused now
709
148k
    OperatorPtr op = nullptr;
710
148k
    OperatorPtr cache_op = nullptr;
711
148k
    RETURN_IF_ERROR(_create_operator(pool, tnodes[*node_idx], descs, op, cur_pipe,
712
148k
                                     parent == nullptr ? -1 : parent->node_id(), child_idx,
713
148k
                                     followed_by_shuffled_operator,
714
148k
                                     current_require_bucket_distribution, cache_op));
715
    // Initialization must be done here. For example, group by expressions in agg will be used to
716
    // decide if a local shuffle should be planed, so it must be initialized here.
717
148k
    RETURN_IF_ERROR(op->init(tnode, _runtime_state.get()));
718
    // assert(parent != nullptr || (node_idx == 0 && root_expr != nullptr));
719
148k
    if (parent != nullptr) {
720
        // add to parent's child(s)
721
41.9k
        RETURN_IF_ERROR(parent->set_child(cache_op ? cache_op : op));
722
106k
    } else {
723
106k
        *root = op;
724
106k
    }
725
    /**
726
     * `ExchangeType::HASH_SHUFFLE` should be used if an operator is followed by a shuffled operator (shuffled hash join, union operator followed by co-located operators).
727
     *
728
     * For plan:
729
     * LocalExchange(id=0) -> Aggregation(id=1) -> ShuffledHashJoin(id=2)
730
     *                           Exchange(id=3) -> ShuffledHashJoinBuild(id=2)
731
     * We must ensure data distribution of `LocalExchange(id=0)` is same as Exchange(id=3).
732
     *
733
     * If an operator's is followed by a local exchange without shuffle (e.g. passthrough), a
734
     * shuffled local exchanger will be used before join so it is not followed by shuffle join.
735
     */
736
148k
    auto required_data_distribution =
737
148k
            cur_pipe->operators().empty()
738
148k
                    ? cur_pipe->sink()->required_data_distribution(_runtime_state.get())
739
148k
                    : op->required_data_distribution(_runtime_state.get());
740
148k
    current_followed_by_shuffled_operator =
741
148k
            ((followed_by_shuffled_operator ||
742
148k
              (cur_pipe->operators().empty() ? cur_pipe->sink()->is_shuffled_operator()
743
146k
                                             : op->is_shuffled_operator())) &&
744
148k
             Pipeline::is_hash_exchange(required_data_distribution.distribution_type)) ||
745
148k
            (followed_by_shuffled_operator &&
746
144k
             required_data_distribution.distribution_type == ExchangeType::NOOP);
747
748
148k
    current_require_bucket_distribution =
749
148k
            ((require_bucket_distribution ||
750
148k
              (cur_pipe->operators().empty() ? cur_pipe->sink()->is_colocated_operator()
751
146k
                                             : op->is_colocated_operator())) &&
752
148k
             Pipeline::is_hash_exchange(required_data_distribution.distribution_type)) ||
753
148k
            (require_bucket_distribution &&
754
144k
             required_data_distribution.distribution_type == ExchangeType::NOOP);
755
756
148k
    if (num_children == 0) {
757
110k
        _use_serial_source = op->is_serial_operator();
758
110k
    }
759
    // rely on that tnodes is preorder of the plan
760
190k
    for (int i = 0; i < num_children; i++) {
761
41.9k
        ++*node_idx;
762
41.9k
        RETURN_IF_ERROR(_create_tree_helper(pool, tnodes, descs, op, node_idx, nullptr, cur_pipe, i,
763
41.9k
                                            current_followed_by_shuffled_operator,
764
41.9k
                                            current_require_bucket_distribution));
765
766
        // we are expecting a child, but have used all nodes
767
        // this means we have been given a bad tree and must fail
768
41.9k
        if (*node_idx >= tnodes.size()) {
769
0
            return Status::InternalError(
770
0
                    "Failed to reconstruct plan tree from thrift. Node id: {}, number of "
771
0
                    "nodes: {}",
772
0
                    *node_idx, tnodes.size());
773
0
        }
774
41.9k
    }
775
776
148k
    return Status::OK();
777
148k
}
778
779
void PipelineFragmentContext::_inherit_pipeline_properties(
780
        const DataDistribution& data_distribution, PipelinePtr pipe_with_source,
781
16.5k
        PipelinePtr pipe_with_sink) {
782
16.5k
    pipe_with_sink->set_num_tasks(pipe_with_source->num_tasks());
783
16.5k
    pipe_with_source->set_num_tasks(_num_instances);
784
16.5k
    pipe_with_source->set_data_distribution(data_distribution);
785
16.5k
}
786
787
Status PipelineFragmentContext::_add_local_exchange_impl(
788
        int idx, ObjectPool* pool, PipelinePtr cur_pipe, PipelinePtr new_pip,
789
        DataDistribution data_distribution, bool* do_local_exchange, int num_buckets,
790
        const std::map<int, int>& bucket_seq_to_instance_idx,
791
16.5k
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
792
16.5k
    auto& operators = cur_pipe->operators();
793
16.5k
    const auto downstream_pipeline_id = cur_pipe->id();
794
16.5k
    auto local_exchange_id = next_operator_id();
795
    // 1. Create a new pipeline with local exchange sink.
796
16.5k
    DataSinkOperatorPtr sink;
797
16.5k
    auto sink_id = next_sink_operator_id();
798
799
    /**
800
     * `bucket_seq_to_instance_idx` is empty if no scan operator is contained in this fragment.
801
     * So co-located operators(e.g. Agg, Analytic) should use `HASH_SHUFFLE` instead of `BUCKET_HASH_SHUFFLE`.
802
     */
803
16.5k
    const bool followed_by_shuffled_operator =
804
16.5k
            operators.size() > idx ? operators[idx]->followed_by_shuffled_operator()
805
16.5k
                                   : cur_pipe->sink()->followed_by_shuffled_operator();
806
16.5k
    const bool use_global_hash_shuffle = bucket_seq_to_instance_idx.empty() &&
807
16.5k
                                         !shuffle_idx_to_instance_idx.contains(-1) &&
808
16.5k
                                         followed_by_shuffled_operator && !_use_serial_source;
809
16.5k
    sink = std::make_shared<LocalExchangeSinkOperatorX>(
810
16.5k
            sink_id, local_exchange_id, use_global_hash_shuffle ? _total_instances : _num_instances,
811
16.5k
            data_distribution.partition_exprs, bucket_seq_to_instance_idx);
812
16.5k
    if (bucket_seq_to_instance_idx.empty() &&
813
16.5k
        data_distribution.distribution_type == ExchangeType::BUCKET_HASH_SHUFFLE) {
814
0
        data_distribution.distribution_type = ExchangeType::HASH_SHUFFLE;
815
0
    }
816
16.5k
    RETURN_IF_ERROR(new_pip->set_sink(sink));
817
16.5k
    RETURN_IF_ERROR(new_pip->sink()->init(_runtime_state.get(), data_distribution.distribution_type,
818
16.5k
                                          num_buckets, use_global_hash_shuffle,
819
16.5k
                                          shuffle_idx_to_instance_idx));
820
821
    // 2. Create and initialize LocalExchangeSharedState.
822
16.5k
    std::shared_ptr<LocalExchangeSharedState> shared_state =
823
16.5k
            LocalExchangeSharedState::create_shared(_num_instances);
824
16.5k
    switch (data_distribution.distribution_type) {
825
630
    case ExchangeType::HASH_SHUFFLE:
826
630
        shared_state->exchanger = ShuffleExchanger::create_unique(
827
630
                std::max(cur_pipe->num_tasks(), _num_instances), _num_instances,
828
630
                use_global_hash_shuffle ? _total_instances : _num_instances,
829
630
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
830
630
                        ? cast_set<int>(
831
630
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
832
630
                        : 0);
833
630
        break;
834
8
    case ExchangeType::BUCKET_HASH_SHUFFLE:
835
8
        shared_state->exchanger = BucketShuffleExchanger::create_unique(
836
8
                std::max(cur_pipe->num_tasks(), _num_instances), _num_instances, num_buckets,
837
8
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
838
8
                        ? cast_set<int>(
839
8
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
840
8
                        : 0);
841
8
        break;
842
15.1k
    case ExchangeType::PASSTHROUGH:
843
15.1k
        shared_state->exchanger = PassthroughExchanger::create_unique(
844
15.1k
                cur_pipe->num_tasks(), _num_instances,
845
15.1k
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
846
15.1k
                        ? cast_set<int>(
847
15.1k
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
848
15.1k
                        : 0);
849
15.1k
        break;
850
58
    case ExchangeType::BROADCAST:
851
58
        shared_state->exchanger = BroadcastExchanger::create_unique(
852
58
                cur_pipe->num_tasks(), _num_instances,
853
58
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
854
58
                        ? cast_set<int>(
855
58
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
856
58
                        : 0);
857
58
        break;
858
666
    case ExchangeType::PASS_TO_ONE:
859
666
        if (_runtime_state->enable_share_hash_table_for_broadcast_join()) {
860
            // If shared hash table is enabled for BJ, hash table will be built by only one task
861
666
            shared_state->exchanger = PassToOneExchanger::create_unique(
862
666
                    cur_pipe->num_tasks(), _num_instances,
863
666
                    _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
864
666
                            ? cast_set<int>(_runtime_state->query_options()
865
666
                                                    .local_exchange_free_blocks_limit)
866
666
                            : 0);
867
666
        } else {
868
0
            shared_state->exchanger = BroadcastExchanger::create_unique(
869
0
                    cur_pipe->num_tasks(), _num_instances,
870
0
                    _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
871
0
                            ? cast_set<int>(_runtime_state->query_options()
872
0
                                                    .local_exchange_free_blocks_limit)
873
0
                            : 0);
874
0
        }
875
666
        break;
876
58
    case ExchangeType::ADAPTIVE_PASSTHROUGH:
877
58
        shared_state->exchanger = AdaptivePassthroughExchanger::create_unique(
878
58
                std::max(cur_pipe->num_tasks(), _num_instances), _num_instances,
879
58
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
880
58
                        ? cast_set<int>(
881
58
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
882
58
                        : 0);
883
58
        break;
884
0
    default:
885
0
        return Status::InternalError("Unsupported local exchange type : " +
886
0
                                     std::to_string((int)data_distribution.distribution_type));
887
16.5k
    }
888
16.5k
    shared_state->create_source_dependencies(_num_instances, local_exchange_id, local_exchange_id,
889
16.5k
                                             "LOCAL_EXCHANGE_OPERATOR");
890
16.5k
    shared_state->create_sink_dependency(sink_id, local_exchange_id, "LOCAL_EXCHANGE_SINK");
891
16.5k
    _op_id_to_shared_state.insert({local_exchange_id, {shared_state, shared_state->sink_deps}});
892
893
    // 3. Set two pipelines' operator list. For example, split pipeline [Scan - AggSink] to
894
    // pipeline1 [Scan - LocalExchangeSink] and pipeline2 [LocalExchangeSource - AggSink].
895
896
    // 3.1 Initialize new pipeline's operator list.
897
16.5k
    std::copy(operators.begin(), operators.begin() + idx,
898
16.5k
              std::inserter(new_pip->operators(), new_pip->operators().end()));
899
900
    // 3.2 Erase unused operators in previous pipeline.
901
16.5k
    operators.erase(operators.begin(), operators.begin() + idx);
902
903
    // 4. Initialize LocalExchangeSource and insert it into this pipeline.
904
16.5k
    OperatorPtr source_op;
905
16.5k
    source_op = std::make_shared<LocalExchangeSourceOperatorX>(pool, local_exchange_id);
906
16.5k
    RETURN_IF_ERROR(source_op->set_child(new_pip->operators().back()));
907
16.5k
    RETURN_IF_ERROR(source_op->init(data_distribution.distribution_type));
908
16.5k
    if (!operators.empty()) {
909
1.38k
        RETURN_IF_ERROR(operators.front()->set_child(nullptr));
910
1.38k
        RETURN_IF_ERROR(operators.front()->set_child(source_op));
911
1.38k
    }
912
16.5k
    operators.insert(operators.begin(), source_op);
913
914
    // 5. Set children for two pipelines separately.
915
16.5k
    std::vector<std::shared_ptr<Pipeline>> new_children;
916
16.5k
    std::vector<PipelineId> edges_with_source;
917
18.6k
    for (auto child : cur_pipe->children()) {
918
18.6k
        bool found = false;
919
19.6k
        for (auto op : new_pip->operators()) {
920
19.6k
            if (child->sink()->node_id() == op->node_id()) {
921
1.32k
                new_pip->set_children(child);
922
1.32k
                found = true;
923
1.32k
            };
924
19.6k
        }
925
18.6k
        if (!found) {
926
17.3k
            new_children.push_back(child);
927
17.3k
            edges_with_source.push_back(child->id());
928
17.3k
        }
929
18.6k
    }
930
16.5k
    new_children.push_back(new_pip);
931
16.5k
    edges_with_source.push_back(new_pip->id());
932
933
    // 6. Set DAG for new pipelines.
934
16.5k
    if (!new_pip->children().empty()) {
935
862
        std::vector<PipelineId> edges_with_sink;
936
1.32k
        for (auto child : new_pip->children()) {
937
1.32k
            edges_with_sink.push_back(child->id());
938
1.32k
        }
939
862
        _dag.insert({new_pip->id(), edges_with_sink});
940
862
    }
941
16.5k
    cur_pipe->set_children(new_children);
942
16.5k
    _dag[downstream_pipeline_id] = edges_with_source;
943
16.5k
    RETURN_IF_ERROR(new_pip->sink()->set_child(new_pip->operators().back()));
944
16.5k
    RETURN_IF_ERROR(cur_pipe->sink()->set_child(nullptr));
945
16.5k
    RETURN_IF_ERROR(cur_pipe->sink()->set_child(cur_pipe->operators().back()));
946
947
    // 7. Inherit properties from current pipeline.
948
16.5k
    _inherit_pipeline_properties(data_distribution, cur_pipe, new_pip);
949
16.5k
    return Status::OK();
950
16.5k
}
951
952
Status PipelineFragmentContext::_add_local_exchange(
953
        int pip_idx, int idx, int node_id, ObjectPool* pool, PipelinePtr cur_pipe,
954
        DataDistribution data_distribution, bool* do_local_exchange, int num_buckets,
955
        const std::map<int, int>& bucket_seq_to_instance_idx,
956
32.1k
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
957
32.1k
    if (_num_instances <= 1 || cur_pipe->num_tasks_of_parent() <= 1) {
958
15.4k
        return Status::OK();
959
15.4k
    }
960
961
16.7k
    if (!cur_pipe->need_to_local_exchange(data_distribution, idx)) {
962
814
        return Status::OK();
963
814
    }
964
15.8k
    *do_local_exchange = true;
965
966
15.8k
    auto& operators = cur_pipe->operators();
967
15.8k
    auto total_op_num = operators.size();
968
15.8k
    auto new_pip = add_pipeline(cur_pipe, pip_idx + 1);
969
15.8k
    RETURN_IF_ERROR(_add_local_exchange_impl(
970
15.8k
            idx, pool, cur_pipe, new_pip, data_distribution, do_local_exchange, num_buckets,
971
15.8k
            bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx));
972
973
15.8k
    CHECK(total_op_num + 1 == cur_pipe->operators().size() + new_pip->operators().size())
974
0
            << "total_op_num: " << total_op_num
975
0
            << " cur_pipe->operators().size(): " << cur_pipe->operators().size()
976
0
            << " new_pip->operators().size(): " << new_pip->operators().size();
977
978
    // There are some local shuffles with relatively heavy operations on the sink.
979
    // If the local sink concurrency is 1 and the local source concurrency is n, the sink becomes a bottleneck.
980
    // Therefore, local passthrough is used to increase the concurrency of the sink.
981
    // op -> local sink(1) -> local source (n)
982
    // op -> local passthrough(1) -> local passthrough(n) ->  local sink(n) -> local source (n)
983
15.8k
    if (cur_pipe->num_tasks() > 1 && new_pip->num_tasks() == 1 &&
984
15.8k
        Pipeline::heavy_operations_on_the_sink(data_distribution.distribution_type)) {
985
696
        RETURN_IF_ERROR(_add_local_exchange_impl(
986
696
                cast_set<int>(new_pip->operators().size()), pool, new_pip,
987
696
                add_pipeline(new_pip, pip_idx + 2), DataDistribution(ExchangeType::PASSTHROUGH),
988
696
                do_local_exchange, num_buckets, bucket_seq_to_instance_idx,
989
696
                shuffle_idx_to_instance_idx));
990
696
    }
991
15.8k
    return Status::OK();
992
15.8k
}
993
994
Status PipelineFragmentContext::_plan_local_exchange(
995
        int num_buckets, const std::map<int, int>& bucket_seq_to_instance_idx,
996
106k
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
997
250k
    for (int pip_idx = cast_set<int>(_pipelines.size()) - 1; pip_idx >= 0; pip_idx--) {
998
144k
        _pipelines[pip_idx]->init_data_distribution(_runtime_state.get());
999
        // Set property if child pipeline is not join operator's child.
1000
144k
        if (!_pipelines[pip_idx]->children().empty()) {
1001
34.3k
            for (auto& child : _pipelines[pip_idx]->children()) {
1002
34.3k
                if (child->sink()->node_id() ==
1003
34.3k
                    _pipelines[pip_idx]->operators().front()->node_id()) {
1004
30.9k
                    _pipelines[pip_idx]->set_data_distribution(child->data_distribution());
1005
30.9k
                }
1006
34.3k
            }
1007
32.8k
        }
1008
1009
        // if 'num_buckets == 0' means the fragment is colocated by exchange node not the
1010
        // scan node. so here use `_num_instance` to replace the `num_buckets` to prevent dividing 0
1011
        // still keep colocate plan after local shuffle
1012
144k
        RETURN_IF_ERROR(_plan_local_exchange(num_buckets, pip_idx, _pipelines[pip_idx],
1013
144k
                                             bucket_seq_to_instance_idx,
1014
144k
                                             shuffle_idx_to_instance_idx));
1015
144k
    }
1016
106k
    return Status::OK();
1017
106k
}
1018
1019
Status PipelineFragmentContext::_plan_local_exchange(
1020
        int num_buckets, int pip_idx, PipelinePtr pip,
1021
        const std::map<int, int>& bucket_seq_to_instance_idx,
1022
144k
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
1023
144k
    int idx = 1;
1024
144k
    bool do_local_exchange = false;
1025
145k
    do {
1026
145k
        auto& ops = pip->operators();
1027
145k
        do_local_exchange = false;
1028
        // Plan local exchange for each operator.
1029
151k
        for (; idx < ops.size();) {
1030
7.57k
            if (ops[idx]->required_data_distribution(_runtime_state.get()).need_local_exchange()) {
1031
5.32k
                RETURN_IF_ERROR(_add_local_exchange(
1032
5.32k
                        pip_idx, idx, ops[idx]->node_id(), _runtime_state->obj_pool(), pip,
1033
5.32k
                        ops[idx]->required_data_distribution(_runtime_state.get()),
1034
5.32k
                        &do_local_exchange, num_buckets, bucket_seq_to_instance_idx,
1035
5.32k
                        shuffle_idx_to_instance_idx));
1036
5.32k
            }
1037
7.57k
            if (do_local_exchange) {
1038
                // If local exchange is needed for current operator, we will split this pipeline to
1039
                // two pipelines by local exchange sink/source. And then we need to process remaining
1040
                // operators in this pipeline so we set idx to 2 (0 is local exchange source and 1
1041
                // is current operator was already processed) and continue to plan local exchange.
1042
1.38k
                idx = 2;
1043
1.38k
                break;
1044
1.38k
            }
1045
6.19k
            idx++;
1046
6.19k
        }
1047
145k
    } while (do_local_exchange);
1048
144k
    if (pip->sink()->required_data_distribution(_runtime_state.get()).need_local_exchange()) {
1049
26.8k
        RETURN_IF_ERROR(_add_local_exchange(
1050
26.8k
                pip_idx, idx, pip->sink()->node_id(), _runtime_state->obj_pool(), pip,
1051
26.8k
                pip->sink()->required_data_distribution(_runtime_state.get()), &do_local_exchange,
1052
26.8k
                num_buckets, bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx));
1053
26.8k
    }
1054
144k
    return Status::OK();
1055
144k
}
1056
1057
Status PipelineFragmentContext::_create_data_sink(ObjectPool* pool, const TDataSink& thrift_sink,
1058
                                                  const std::vector<TExpr>& output_exprs,
1059
                                                  const TPipelineFragmentParams& params,
1060
                                                  const RowDescriptor& row_desc,
1061
                                                  RuntimeState* state, DescriptorTbl& desc_tbl,
1062
106k
                                                  PipelineId cur_pipeline_id) {
1063
106k
    switch (thrift_sink.type) {
1064
30.4k
    case TDataSinkType::DATA_STREAM_SINK: {
1065
30.4k
        if (!thrift_sink.__isset.stream_sink) {
1066
0
            return Status::InternalError("Missing data stream sink.");
1067
0
        }
1068
30.4k
        _sink = std::make_shared<ExchangeSinkOperatorX>(
1069
30.4k
                state, row_desc, next_sink_operator_id(), thrift_sink.stream_sink,
1070
30.4k
                params.destinations, _fragment_instance_ids);
1071
30.4k
        break;
1072
30.4k
    }
1073
68.7k
    case TDataSinkType::RESULT_SINK: {
1074
68.7k
        if (!thrift_sink.__isset.result_sink) {
1075
0
            return Status::InternalError("Missing data buffer sink.");
1076
0
        }
1077
1078
68.7k
        auto& pipeline = _pipelines[cur_pipeline_id];
1079
68.7k
        int child_node_id = pipeline->operators().back()->node_id();
1080
68.7k
        _sink = std::make_shared<ResultSinkOperatorX>(next_sink_operator_id(), child_node_id + 1,
1081
68.7k
                                                      row_desc, output_exprs,
1082
68.7k
                                                      thrift_sink.result_sink);
1083
68.7k
        break;
1084
68.7k
    }
1085
0
    case TDataSinkType::DICTIONARY_SINK: {
1086
0
        if (!thrift_sink.__isset.dictionary_sink) {
1087
0
            return Status::InternalError("Missing dict sink.");
1088
0
        }
1089
1090
0
        _sink = std::make_shared<DictSinkOperatorX>(next_sink_operator_id(), row_desc, output_exprs,
1091
0
                                                    thrift_sink.dictionary_sink);
1092
0
        break;
1093
0
    }
1094
0
    case TDataSinkType::GROUP_COMMIT_OLAP_TABLE_SINK:
1095
2.56k
    case TDataSinkType::OLAP_TABLE_SINK: {
1096
2.56k
        auto& pipeline = _pipelines[cur_pipeline_id];
1097
2.56k
        int child_node_id = pipeline->operators().back()->node_id();
1098
2.56k
        if (state->query_options().enable_memtable_on_sink_node &&
1099
2.56k
            !_has_inverted_index_v1_or_partial_update(thrift_sink.olap_table_sink) &&
1100
2.56k
            !config::is_cloud_mode()) {
1101
2.10k
            _sink = std::make_shared<OlapTableSinkV2OperatorX>(
1102
2.10k
                    pool, next_sink_operator_id(), child_node_id + 1, row_desc, output_exprs);
1103
2.10k
        } else {
1104
456
            _sink = std::make_shared<OlapTableSinkOperatorX>(
1105
456
                    pool, next_sink_operator_id(), child_node_id + 1, row_desc, output_exprs);
1106
456
        }
1107
2.56k
        break;
1108
0
    }
1109
0
    case TDataSinkType::GROUP_COMMIT_BLOCK_SINK: {
1110
0
        DCHECK(thrift_sink.__isset.olap_table_sink);
1111
0
        DCHECK(state->get_query_ctx() != nullptr);
1112
0
        state->get_query_ctx()->query_mem_tracker()->is_group_commit_load = true;
1113
0
        _sink = std::make_shared<GroupCommitBlockSinkOperatorX>(next_sink_operator_id(), row_desc,
1114
0
                                                                output_exprs);
1115
0
        break;
1116
0
    }
1117
1.46k
    case TDataSinkType::HIVE_TABLE_SINK: {
1118
1.46k
        if (!thrift_sink.__isset.hive_table_sink) {
1119
0
            return Status::InternalError("Missing hive table sink.");
1120
0
        }
1121
1.46k
        _sink = std::make_shared<HiveTableSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1122
1.46k
                                                         output_exprs);
1123
1.46k
        break;
1124
1.46k
    }
1125
1.73k
    case TDataSinkType::ICEBERG_TABLE_SINK: {
1126
1.73k
        if (!thrift_sink.__isset.iceberg_table_sink) {
1127
0
            return Status::InternalError("Missing iceberg table sink.");
1128
0
        }
1129
1.73k
        if (thrift_sink.iceberg_table_sink.__isset.sort_info) {
1130
0
            _sink = std::make_shared<SpillIcebergTableSinkOperatorX>(pool, next_sink_operator_id(),
1131
0
                                                                     row_desc, output_exprs);
1132
1.73k
        } else {
1133
1.73k
            _sink = std::make_shared<IcebergTableSinkOperatorX>(pool, next_sink_operator_id(),
1134
1.73k
                                                                row_desc, output_exprs);
1135
1.73k
        }
1136
1.73k
        break;
1137
1.73k
    }
1138
20
    case TDataSinkType::ICEBERG_DELETE_SINK: {
1139
20
        if (!thrift_sink.__isset.iceberg_delete_sink) {
1140
0
            return Status::InternalError("Missing iceberg delete sink.");
1141
0
        }
1142
20
        _sink = std::make_shared<IcebergDeleteSinkOperatorX>(pool, next_sink_operator_id(),
1143
20
                                                             row_desc, output_exprs);
1144
20
        break;
1145
20
    }
1146
80
    case TDataSinkType::ICEBERG_MERGE_SINK: {
1147
80
        if (!thrift_sink.__isset.iceberg_merge_sink) {
1148
0
            return Status::InternalError("Missing iceberg merge sink.");
1149
0
        }
1150
80
        _sink = std::make_shared<IcebergMergeSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1151
80
                                                            output_exprs);
1152
80
        break;
1153
80
    }
1154
0
    case TDataSinkType::MAXCOMPUTE_TABLE_SINK: {
1155
0
        if (!thrift_sink.__isset.max_compute_table_sink) {
1156
0
            return Status::InternalError("Missing max compute table sink.");
1157
0
        }
1158
0
        _sink = std::make_shared<MCTableSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1159
0
                                                       output_exprs);
1160
0
        break;
1161
0
    }
1162
88
    case TDataSinkType::JDBC_TABLE_SINK: {
1163
88
        if (!thrift_sink.__isset.jdbc_table_sink) {
1164
0
            return Status::InternalError("Missing data jdbc sink.");
1165
0
        }
1166
88
        if (config::enable_java_support) {
1167
88
            _sink = std::make_shared<JdbcTableSinkOperatorX>(row_desc, next_sink_operator_id(),
1168
88
                                                             output_exprs);
1169
88
        } else {
1170
0
            return Status::InternalError(
1171
0
                    "Jdbc table sink is not enabled, you can change be config "
1172
0
                    "enable_java_support to true and restart be.");
1173
0
        }
1174
88
        break;
1175
88
    }
1176
88
    case TDataSinkType::MEMORY_SCRATCH_SINK: {
1177
0
        if (!thrift_sink.__isset.memory_scratch_sink) {
1178
0
            return Status::InternalError("Missing data buffer sink.");
1179
0
        }
1180
1181
0
        _sink = std::make_shared<MemoryScratchSinkOperatorX>(row_desc, next_sink_operator_id(),
1182
0
                                                             output_exprs);
1183
0
        break;
1184
0
    }
1185
164
    case TDataSinkType::RESULT_FILE_SINK: {
1186
164
        if (!thrift_sink.__isset.result_file_sink) {
1187
0
            return Status::InternalError("Missing result file sink.");
1188
0
        }
1189
1190
        // Result file sink is not the top sink
1191
164
        if (params.__isset.destinations && !params.destinations.empty()) {
1192
0
            _sink = std::make_shared<ResultFileSinkOperatorX>(
1193
0
                    next_sink_operator_id(), row_desc, thrift_sink.result_file_sink,
1194
0
                    params.destinations, output_exprs, desc_tbl);
1195
164
        } else {
1196
164
            _sink = std::make_shared<ResultFileSinkOperatorX>(next_sink_operator_id(), row_desc,
1197
164
                                                              output_exprs);
1198
164
        }
1199
164
        break;
1200
164
    }
1201
1.23k
    case TDataSinkType::MULTI_CAST_DATA_STREAM_SINK: {
1202
1.23k
        DCHECK(thrift_sink.__isset.multi_cast_stream_sink);
1203
1.23k
        DCHECK_GT(thrift_sink.multi_cast_stream_sink.sinks.size(), 0);
1204
1.23k
        auto sink_id = next_sink_operator_id();
1205
1.23k
        const int multi_cast_node_id = sink_id;
1206
1.23k
        auto sender_size = thrift_sink.multi_cast_stream_sink.sinks.size();
1207
        // one sink has multiple sources.
1208
1.23k
        std::vector<int> sources;
1209
4.95k
        for (int i = 0; i < sender_size; ++i) {
1210
3.71k
            auto source_id = next_operator_id();
1211
3.71k
            sources.push_back(source_id);
1212
3.71k
        }
1213
1214
1.23k
        _sink = std::make_shared<MultiCastDataStreamSinkOperatorX>(
1215
1.23k
                sink_id, multi_cast_node_id, sources, pool, thrift_sink.multi_cast_stream_sink);
1216
4.95k
        for (int i = 0; i < sender_size; ++i) {
1217
3.71k
            auto new_pipeline = add_pipeline();
1218
            // use to exchange sink
1219
3.71k
            RowDescriptor* exchange_row_desc = nullptr;
1220
3.71k
            {
1221
3.71k
                const auto& tmp_row_desc =
1222
3.71k
                        !thrift_sink.multi_cast_stream_sink.sinks[i].output_exprs.empty()
1223
3.71k
                                ? RowDescriptor(state->desc_tbl(),
1224
3.71k
                                                {thrift_sink.multi_cast_stream_sink.sinks[i]
1225
3.71k
                                                         .output_tuple_id})
1226
3.71k
                                : row_desc;
1227
3.71k
                exchange_row_desc = pool->add(new RowDescriptor(tmp_row_desc));
1228
3.71k
            }
1229
3.71k
            auto source_id = sources[i];
1230
3.71k
            OperatorPtr source_op;
1231
            // 1. create and set the source operator of multi_cast_data_stream_source for new pipeline
1232
3.71k
            source_op = std::make_shared<MultiCastDataStreamerSourceOperatorX>(
1233
3.71k
                    /*node_id*/ source_id, /*consumer_id*/ i, pool,
1234
3.71k
                    thrift_sink.multi_cast_stream_sink.sinks[i], row_desc,
1235
3.71k
                    /*operator_id=*/source_id);
1236
3.71k
            RETURN_IF_ERROR(new_pipeline->add_operator(
1237
3.71k
                    source_op, params.__isset.parallel_instances ? params.parallel_instances : 0));
1238
            // 2. create and set sink operator of data stream sender for new pipeline
1239
1240
3.71k
            DataSinkOperatorPtr sink_op;
1241
3.71k
            sink_op = std::make_shared<ExchangeSinkOperatorX>(
1242
3.71k
                    state, *exchange_row_desc, next_sink_operator_id(),
1243
3.71k
                    thrift_sink.multi_cast_stream_sink.sinks[i],
1244
3.71k
                    thrift_sink.multi_cast_stream_sink.destinations[i], _fragment_instance_ids);
1245
1246
3.71k
            RETURN_IF_ERROR(new_pipeline->set_sink(sink_op));
1247
3.71k
            {
1248
3.71k
                TDataSink* t = pool->add(new TDataSink());
1249
3.71k
                t->stream_sink = thrift_sink.multi_cast_stream_sink.sinks[i];
1250
3.71k
                RETURN_IF_ERROR(sink_op->init(*t));
1251
3.71k
            }
1252
1253
            // 3. set dependency dag
1254
3.71k
            _dag[new_pipeline->id()].push_back(cur_pipeline_id);
1255
3.71k
        }
1256
1.23k
        if (sources.empty()) {
1257
0
            return Status::InternalError("size of sources must be greater than 0");
1258
0
        }
1259
1.23k
        break;
1260
1.23k
    }
1261
1.23k
    case TDataSinkType::BLACKHOLE_SINK: {
1262
10
        if (!thrift_sink.__isset.blackhole_sink) {
1263
0
            return Status::InternalError("Missing blackhole sink.");
1264
0
        }
1265
1266
10
        _sink.reset(new BlackholeSinkOperatorX(next_sink_operator_id()));
1267
10
        break;
1268
10
    }
1269
156
    case TDataSinkType::TVF_TABLE_SINK: {
1270
156
        if (!thrift_sink.__isset.tvf_table_sink) {
1271
0
            return Status::InternalError("Missing TVF table sink.");
1272
0
        }
1273
156
        _sink = std::make_shared<TVFTableSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1274
156
                                                        output_exprs);
1275
156
        break;
1276
156
    }
1277
0
    default:
1278
0
        return Status::InternalError("Unsuported sink type in pipeline: {}", thrift_sink.type);
1279
106k
    }
1280
106k
    return Status::OK();
1281
106k
}
1282
1283
// NOLINTBEGIN(readability-function-size)
1284
// NOLINTBEGIN(readability-function-cognitive-complexity)
1285
Status PipelineFragmentContext::_create_operator(ObjectPool* pool, const TPlanNode& tnode,
1286
                                                 const DescriptorTbl& descs, OperatorPtr& op,
1287
                                                 PipelinePtr& cur_pipe, int parent_idx,
1288
                                                 int child_idx,
1289
                                                 const bool followed_by_shuffled_operator,
1290
                                                 const bool require_bucket_distribution,
1291
148k
                                                 OperatorPtr& cache_op) {
1292
148k
    std::vector<DataSinkOperatorPtr> sink_ops;
1293
148k
    Defer defer = Defer([&]() {
1294
148k
        if (op) {
1295
148k
            op->update_operator(tnode, followed_by_shuffled_operator, require_bucket_distribution);
1296
148k
        }
1297
148k
        for (auto& s : sink_ops) {
1298
34.3k
            s->update_operator(tnode, followed_by_shuffled_operator, require_bucket_distribution);
1299
34.3k
        }
1300
148k
    });
1301
    // We directly construct the operator from Thrift because the given array is in the order of preorder traversal.
1302
    // Therefore, here we need to use a stack-like structure.
1303
148k
    _pipeline_parent_map.pop(cur_pipe, parent_idx, child_idx);
1304
148k
    std::stringstream error_msg;
1305
148k
    bool enable_query_cache = _params.fragment.__isset.query_cache_param;
1306
1307
148k
    bool fe_with_old_version = false;
1308
148k
    switch (tnode.node_type) {
1309
49.1k
    case TPlanNodeType::OLAP_SCAN_NODE: {
1310
49.1k
        op = std::make_shared<OlapScanOperatorX>(
1311
49.1k
                pool, tnode, next_operator_id(), descs, _num_instances,
1312
49.1k
                enable_query_cache ? _params.fragment.query_cache_param : TQueryCacheParam {});
1313
49.1k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1314
49.1k
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1315
49.1k
        break;
1316
49.1k
    }
1317
0
    case TPlanNodeType::GROUP_COMMIT_SCAN_NODE: {
1318
0
        DCHECK(_query_ctx != nullptr);
1319
0
        _query_ctx->query_mem_tracker()->is_group_commit_load = true;
1320
0
        op = std::make_shared<GroupCommitOperatorX>(pool, tnode, next_operator_id(), descs,
1321
0
                                                    _num_instances);
1322
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1323
0
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1324
0
        break;
1325
0
    }
1326
0
    case TPlanNodeType::JDBC_SCAN_NODE: {
1327
0
        if (config::enable_java_support) {
1328
0
            op = std::make_shared<JDBCScanOperatorX>(pool, tnode, next_operator_id(), descs,
1329
0
                                                     _num_instances);
1330
0
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1331
0
        } else {
1332
0
            return Status::InternalError(
1333
0
                    "Jdbc scan node is disabled, you can change be config enable_java_support "
1334
0
                    "to true and restart be.");
1335
0
        }
1336
0
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1337
0
        break;
1338
0
    }
1339
20.6k
    case TPlanNodeType::FILE_SCAN_NODE: {
1340
20.6k
        op = std::make_shared<FileScanOperatorX>(pool, tnode, next_operator_id(), descs,
1341
20.6k
                                                 _num_instances);
1342
20.6k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1343
20.6k
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1344
20.6k
        break;
1345
20.6k
    }
1346
34.2k
    case TPlanNodeType::EXCHANGE_NODE: {
1347
34.2k
        int num_senders = _params.per_exch_num_senders.contains(tnode.node_id)
1348
34.2k
                                  ? _params.per_exch_num_senders.find(tnode.node_id)->second
1349
34.2k
                                  : 0;
1350
34.2k
        DCHECK_GT(num_senders, 0);
1351
34.2k
        op = std::make_shared<ExchangeSourceOperatorX>(pool, tnode, next_operator_id(), descs,
1352
34.2k
                                                       num_senders);
1353
34.2k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1354
34.2k
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1355
34.2k
        break;
1356
34.2k
    }
1357
20.7k
    case TPlanNodeType::AGGREGATION_NODE: {
1358
20.7k
        if (tnode.agg_node.grouping_exprs.empty() &&
1359
20.7k
            descs.get_tuple_descriptor(tnode.agg_node.output_tuple_id)->slots().empty()) {
1360
0
            return Status::InternalError("Illegal aggregate node " + std::to_string(tnode.node_id) +
1361
0
                                         ": group by and output is empty");
1362
0
        }
1363
20.7k
        bool need_create_cache_op =
1364
20.7k
                enable_query_cache && tnode.node_id == _params.fragment.query_cache_param.node_id;
1365
20.7k
        auto create_query_cache_operator = [&](PipelinePtr& new_pipe) {
1366
0
            auto cache_node_id = _params.local_params[0].per_node_scan_ranges.begin()->first;
1367
0
            auto cache_source_id = next_operator_id();
1368
0
            op = std::make_shared<CacheSourceOperatorX>(pool, cache_node_id, cache_source_id,
1369
0
                                                        _params.fragment.query_cache_param);
1370
0
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1371
1372
0
            const auto downstream_pipeline_id = cur_pipe->id();
1373
0
            if (!_dag.contains(downstream_pipeline_id)) {
1374
0
                _dag.insert({downstream_pipeline_id, {}});
1375
0
            }
1376
0
            new_pipe = add_pipeline(cur_pipe);
1377
0
            _dag[downstream_pipeline_id].push_back(new_pipe->id());
1378
1379
0
            DataSinkOperatorPtr cache_sink(new CacheSinkOperatorX(
1380
0
                    next_sink_operator_id(), op->node_id(), op->operator_id()));
1381
0
            RETURN_IF_ERROR(new_pipe->set_sink(cache_sink));
1382
0
            return Status::OK();
1383
0
        };
1384
20.7k
        const bool group_by_limit_opt =
1385
20.7k
                tnode.agg_node.__isset.agg_sort_info_by_group_key && tnode.limit > 0;
1386
1387
        /// PartitionedAggSourceOperatorX does not support "group by limit opt(#29641)" yet.
1388
        /// If `group_by_limit_opt` is true, then it might not need to spill at all.
1389
20.7k
        const bool enable_spill = _runtime_state->enable_spill() &&
1390
20.7k
                                  !tnode.agg_node.grouping_exprs.empty() && !group_by_limit_opt;
1391
20.7k
        const bool is_streaming_agg = tnode.agg_node.__isset.use_streaming_preaggregation &&
1392
20.7k
                                      tnode.agg_node.use_streaming_preaggregation &&
1393
20.7k
                                      !tnode.agg_node.grouping_exprs.empty();
1394
        // TODO: distinct streaming agg does not support spill.
1395
20.7k
        const bool can_use_distinct_streaming_agg =
1396
20.7k
                (!enable_spill || is_streaming_agg) && tnode.agg_node.aggregate_functions.empty() &&
1397
20.7k
                !tnode.agg_node.__isset.agg_sort_info_by_group_key &&
1398
20.7k
                _params.query_options.__isset.enable_distinct_streaming_aggregation &&
1399
20.7k
                _params.query_options.enable_distinct_streaming_aggregation;
1400
1401
20.7k
        if (can_use_distinct_streaming_agg) {
1402
36
            if (need_create_cache_op) {
1403
0
                PipelinePtr new_pipe;
1404
0
                RETURN_IF_ERROR(create_query_cache_operator(new_pipe));
1405
1406
0
                cache_op = op;
1407
0
                op = std::make_shared<DistinctStreamingAggOperatorX>(pool, next_operator_id(),
1408
0
                                                                     tnode, descs);
1409
0
                RETURN_IF_ERROR(new_pipe->add_operator(op, _parallel_instances));
1410
0
                RETURN_IF_ERROR(cur_pipe->operators().front()->set_child(op));
1411
0
                cur_pipe = new_pipe;
1412
36
            } else {
1413
36
                op = std::make_shared<DistinctStreamingAggOperatorX>(pool, next_operator_id(),
1414
36
                                                                     tnode, descs);
1415
36
                RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1416
36
            }
1417
20.7k
        } else if (is_streaming_agg) {
1418
1.79k
            if (need_create_cache_op) {
1419
0
                PipelinePtr new_pipe;
1420
0
                RETURN_IF_ERROR(create_query_cache_operator(new_pipe));
1421
0
                cache_op = op;
1422
0
                op = std::make_shared<StreamingAggOperatorX>(pool, next_operator_id(), tnode,
1423
0
                                                             descs);
1424
0
                RETURN_IF_ERROR(cur_pipe->operators().front()->set_child(op));
1425
0
                RETURN_IF_ERROR(new_pipe->add_operator(op, _parallel_instances));
1426
0
                cur_pipe = new_pipe;
1427
1.79k
            } else {
1428
1.79k
                op = std::make_shared<StreamingAggOperatorX>(pool, next_operator_id(), tnode,
1429
1.79k
                                                             descs);
1430
1.79k
                RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1431
1.79k
            }
1432
18.9k
        } else {
1433
            // create new pipeline to add query cache operator
1434
18.9k
            PipelinePtr new_pipe;
1435
18.9k
            if (need_create_cache_op) {
1436
0
                RETURN_IF_ERROR(create_query_cache_operator(new_pipe));
1437
0
                cache_op = op;
1438
0
            }
1439
1440
18.9k
            if (enable_spill) {
1441
0
                op = std::make_shared<PartitionedAggSourceOperatorX>(pool, tnode,
1442
0
                                                                     next_operator_id(), descs);
1443
18.9k
            } else {
1444
18.9k
                op = std::make_shared<AggSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1445
18.9k
            }
1446
18.9k
            if (need_create_cache_op) {
1447
0
                RETURN_IF_ERROR(cur_pipe->operators().front()->set_child(op));
1448
0
                RETURN_IF_ERROR(new_pipe->add_operator(op, _parallel_instances));
1449
0
                cur_pipe = new_pipe;
1450
18.9k
            } else {
1451
18.9k
                RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1452
18.9k
            }
1453
1454
18.9k
            const auto downstream_pipeline_id = cur_pipe->id();
1455
18.9k
            if (!_dag.contains(downstream_pipeline_id)) {
1456
17.6k
                _dag.insert({downstream_pipeline_id, {}});
1457
17.6k
            }
1458
18.9k
            cur_pipe = add_pipeline(cur_pipe);
1459
18.9k
            _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1460
1461
18.9k
            if (enable_spill) {
1462
0
                sink_ops.push_back(std::make_shared<PartitionedAggSinkOperatorX>(
1463
0
                        pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1464
18.9k
            } else {
1465
18.9k
                sink_ops.push_back(std::make_shared<AggSinkOperatorX>(
1466
18.9k
                        pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1467
18.9k
            }
1468
18.9k
            RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1469
18.9k
            RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1470
18.9k
        }
1471
20.7k
        break;
1472
20.7k
    }
1473
20.7k
    case TPlanNodeType::BUCKETED_AGGREGATION_NODE: {
1474
0
        if (tnode.bucketed_agg_node.grouping_exprs.empty()) {
1475
0
            return Status::InternalError(
1476
0
                    "Bucketed aggregation node {} should not be used without group by keys",
1477
0
                    tnode.node_id);
1478
0
        }
1479
1480
        // Create source operator (goes on the current / downstream pipeline).
1481
0
        op = std::make_shared<BucketedAggSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1482
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1483
1484
        // Create a new pipeline for the sink side.
1485
0
        const auto downstream_pipeline_id = cur_pipe->id();
1486
0
        if (!_dag.contains(downstream_pipeline_id)) {
1487
0
            _dag.insert({downstream_pipeline_id, {}});
1488
0
        }
1489
0
        cur_pipe = add_pipeline(cur_pipe);
1490
0
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1491
1492
        // Create sink operator.
1493
0
        sink_ops.push_back(std::make_shared<BucketedAggSinkOperatorX>(
1494
0
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1495
0
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1496
0
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1497
1498
        // Pre-register a single shared state for ALL instances so that every
1499
        // sink instance writes its per-instance hash table into the same
1500
        // BucketedAggSharedState and every source instance can merge across
1501
        // all of them.
1502
0
        {
1503
0
            auto shared_state = BucketedAggSharedState::create_shared();
1504
0
            shared_state->id = op->operator_id();
1505
0
            shared_state->related_op_ids.insert(op->operator_id());
1506
1507
0
            for (int i = 0; i < _num_instances; i++) {
1508
0
                auto sink_dep = std::make_shared<Dependency>(op->operator_id(), op->node_id(),
1509
0
                                                             "BUCKETED_AGG_SINK_DEPENDENCY");
1510
0
                sink_dep->set_shared_state(shared_state.get());
1511
0
                shared_state->sink_deps.push_back(sink_dep);
1512
0
            }
1513
0
            shared_state->create_source_dependencies(_num_instances, op->operator_id(),
1514
0
                                                     op->node_id(), "BUCKETED_AGG_SOURCE");
1515
0
            _op_id_to_shared_state.insert(
1516
0
                    {op->operator_id(), {shared_state, shared_state->sink_deps}});
1517
0
        }
1518
0
        break;
1519
0
    }
1520
824
    case TPlanNodeType::HASH_JOIN_NODE: {
1521
824
        const auto is_broadcast_join = tnode.hash_join_node.__isset.is_broadcast_join &&
1522
824
                                       tnode.hash_join_node.is_broadcast_join;
1523
824
        const auto enable_spill = _runtime_state->enable_spill();
1524
824
        if (enable_spill && !is_broadcast_join) {
1525
0
            auto tnode_ = tnode;
1526
0
            tnode_.runtime_filters.clear();
1527
0
            auto inner_probe_operator =
1528
0
                    std::make_shared<HashJoinProbeOperatorX>(pool, tnode_, 0, descs);
1529
1530
            // probe side inner sink operator is used to build hash table on probe side when data is spilled.
1531
            // So here use `tnode_` which has no runtime filters.
1532
0
            auto probe_side_inner_sink_operator =
1533
0
                    std::make_shared<HashJoinBuildSinkOperatorX>(pool, 0, 0, tnode_, descs);
1534
1535
0
            RETURN_IF_ERROR(inner_probe_operator->init(tnode_, _runtime_state.get()));
1536
0
            RETURN_IF_ERROR(probe_side_inner_sink_operator->init(tnode_, _runtime_state.get()));
1537
1538
0
            auto probe_operator = std::make_shared<PartitionedHashJoinProbeOperatorX>(
1539
0
                    pool, tnode_, next_operator_id(), descs);
1540
0
            probe_operator->set_inner_operators(probe_side_inner_sink_operator,
1541
0
                                                inner_probe_operator);
1542
0
            op = std::move(probe_operator);
1543
0
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1544
1545
0
            const auto downstream_pipeline_id = cur_pipe->id();
1546
0
            if (!_dag.contains(downstream_pipeline_id)) {
1547
0
                _dag.insert({downstream_pipeline_id, {}});
1548
0
            }
1549
0
            PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1550
0
            _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1551
1552
0
            auto inner_sink_operator =
1553
0
                    std::make_shared<HashJoinBuildSinkOperatorX>(pool, 0, 0, tnode, descs);
1554
0
            auto sink_operator = std::make_shared<PartitionedHashJoinSinkOperatorX>(
1555
0
                    pool, next_sink_operator_id(), op->operator_id(), tnode_, descs);
1556
0
            RETURN_IF_ERROR(inner_sink_operator->init(tnode, _runtime_state.get()));
1557
1558
0
            sink_operator->set_inner_operators(inner_sink_operator, inner_probe_operator);
1559
0
            sink_ops.push_back(std::move(sink_operator));
1560
0
            RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1561
0
            RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode_, _runtime_state.get()));
1562
1563
0
            _pipeline_parent_map.push(op->node_id(), cur_pipe);
1564
0
            _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1565
824
        } else {
1566
824
            op = std::make_shared<HashJoinProbeOperatorX>(pool, tnode, next_operator_id(), descs);
1567
824
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1568
1569
824
            const auto downstream_pipeline_id = cur_pipe->id();
1570
824
            if (!_dag.contains(downstream_pipeline_id)) {
1571
812
                _dag.insert({downstream_pipeline_id, {}});
1572
812
            }
1573
824
            PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1574
824
            _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1575
1576
824
            sink_ops.push_back(std::make_shared<HashJoinBuildSinkOperatorX>(
1577
824
                    pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1578
824
            RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1579
824
            RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode, _runtime_state.get()));
1580
1581
824
            _pipeline_parent_map.push(op->node_id(), cur_pipe);
1582
824
            _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1583
824
        }
1584
824
        if (is_broadcast_join && _runtime_state->enable_share_hash_table_for_broadcast_join()) {
1585
748
            std::shared_ptr<HashJoinSharedState> shared_state =
1586
748
                    HashJoinSharedState::create_shared(_num_instances);
1587
6.53k
            for (int i = 0; i < _num_instances; i++) {
1588
5.78k
                auto sink_dep = std::make_shared<Dependency>(op->operator_id(), op->node_id(),
1589
5.78k
                                                             "HASH_JOIN_BUILD_DEPENDENCY");
1590
5.78k
                sink_dep->set_shared_state(shared_state.get());
1591
5.78k
                shared_state->sink_deps.push_back(sink_dep);
1592
5.78k
            }
1593
748
            shared_state->create_source_dependencies(_num_instances, op->operator_id(),
1594
748
                                                     op->node_id(), "HASH_JOIN_PROBE");
1595
748
            _op_id_to_shared_state.insert(
1596
748
                    {op->operator_id(), {shared_state, shared_state->sink_deps}});
1597
748
        }
1598
824
        break;
1599
824
    }
1600
2.53k
    case TPlanNodeType::CROSS_JOIN_NODE: {
1601
2.53k
        op = std::make_shared<NestedLoopJoinProbeOperatorX>(pool, tnode, next_operator_id(), descs);
1602
2.53k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1603
1604
2.53k
        const auto downstream_pipeline_id = cur_pipe->id();
1605
2.53k
        if (!_dag.contains(downstream_pipeline_id)) {
1606
2.53k
            _dag.insert({downstream_pipeline_id, {}});
1607
2.53k
        }
1608
2.53k
        PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1609
2.53k
        _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1610
1611
2.53k
        sink_ops.push_back(std::make_shared<NestedLoopJoinBuildSinkOperatorX>(
1612
2.53k
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1613
2.53k
        RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1614
2.53k
        RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode, _runtime_state.get()));
1615
2.53k
        _pipeline_parent_map.push(op->node_id(), cur_pipe);
1616
2.53k
        _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1617
2.53k
        break;
1618
2.53k
    }
1619
4.01k
    case TPlanNodeType::UNION_NODE: {
1620
4.01k
        int child_count = tnode.num_children;
1621
4.01k
        op = std::make_shared<UnionSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1622
4.01k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1623
1624
4.01k
        const auto downstream_pipeline_id = cur_pipe->id();
1625
4.01k
        if (!_dag.contains(downstream_pipeline_id)) {
1626
3.96k
            _dag.insert({downstream_pipeline_id, {}});
1627
3.96k
        }
1628
4.41k
        for (int i = 0; i < child_count; i++) {
1629
408
            PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1630
408
            _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1631
408
            sink_ops.push_back(std::make_shared<UnionSinkOperatorX>(
1632
408
                    i, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1633
408
            RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1634
408
            RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode, _runtime_state.get()));
1635
            // preset children pipelines. if any pipeline found this as its father, will use the prepared pipeline to build.
1636
408
            _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1637
408
        }
1638
4.01k
        break;
1639
4.01k
    }
1640
11.5k
    case TPlanNodeType::SORT_NODE: {
1641
11.5k
        const auto should_spill = _runtime_state->enable_spill() &&
1642
11.5k
                                  tnode.sort_node.algorithm == TSortAlgorithm::FULL_SORT;
1643
11.5k
        const bool use_local_merge =
1644
11.5k
                tnode.sort_node.__isset.use_local_merge && tnode.sort_node.use_local_merge;
1645
11.5k
        if (should_spill) {
1646
0
            op = std::make_shared<SpillSortSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1647
11.5k
        } else if (use_local_merge) {
1648
11.1k
            op = std::make_shared<LocalMergeSortSourceOperatorX>(pool, tnode, next_operator_id(),
1649
11.1k
                                                                 descs);
1650
11.1k
        } else {
1651
472
            op = std::make_shared<SortSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1652
472
        }
1653
11.5k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1654
1655
11.5k
        const auto downstream_pipeline_id = cur_pipe->id();
1656
11.5k
        if (!_dag.contains(downstream_pipeline_id)) {
1657
11.5k
            _dag.insert({downstream_pipeline_id, {}});
1658
11.5k
        }
1659
11.5k
        cur_pipe = add_pipeline(cur_pipe);
1660
11.5k
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1661
1662
11.5k
        if (should_spill) {
1663
0
            sink_ops.push_back(std::make_shared<SpillSortSinkOperatorX>(
1664
0
                    pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1665
11.5k
        } else {
1666
11.5k
            sink_ops.push_back(std::make_shared<SortSinkOperatorX>(
1667
11.5k
                    pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1668
11.5k
        }
1669
11.5k
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1670
11.5k
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1671
11.5k
        break;
1672
11.5k
    }
1673
11.5k
    case TPlanNodeType::PARTITION_SORT_NODE: {
1674
0
        op = std::make_shared<PartitionSortSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1675
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1676
1677
0
        const auto downstream_pipeline_id = cur_pipe->id();
1678
0
        if (!_dag.contains(downstream_pipeline_id)) {
1679
0
            _dag.insert({downstream_pipeline_id, {}});
1680
0
        }
1681
0
        cur_pipe = add_pipeline(cur_pipe);
1682
0
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1683
1684
0
        sink_ops.push_back(std::make_shared<PartitionSortSinkOperatorX>(
1685
0
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1686
0
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1687
0
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1688
0
        break;
1689
0
    }
1690
44
    case TPlanNodeType::ANALYTIC_EVAL_NODE: {
1691
44
        op = std::make_shared<AnalyticSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1692
44
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1693
1694
44
        const auto downstream_pipeline_id = cur_pipe->id();
1695
44
        if (!_dag.contains(downstream_pipeline_id)) {
1696
44
            _dag.insert({downstream_pipeline_id, {}});
1697
44
        }
1698
44
        cur_pipe = add_pipeline(cur_pipe);
1699
44
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1700
1701
44
        sink_ops.push_back(std::make_shared<AnalyticSinkOperatorX>(
1702
44
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1703
44
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1704
44
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1705
44
        break;
1706
44
    }
1707
934
    case TPlanNodeType::MATERIALIZATION_NODE: {
1708
934
        op = std::make_shared<MaterializationOperator>(pool, tnode, next_operator_id(), descs);
1709
934
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1710
934
        break;
1711
934
    }
1712
934
    case TPlanNodeType::INTERSECT_NODE: {
1713
0
        RETURN_IF_ERROR(_build_operators_for_set_operation_node<true>(pool, tnode, descs, op,
1714
0
                                                                      cur_pipe, sink_ops));
1715
0
        break;
1716
0
    }
1717
0
    case TPlanNodeType::EXCEPT_NODE: {
1718
0
        RETURN_IF_ERROR(_build_operators_for_set_operation_node<false>(pool, tnode, descs, op,
1719
0
                                                                       cur_pipe, sink_ops));
1720
0
        break;
1721
0
    }
1722
0
    case TPlanNodeType::REPEAT_NODE: {
1723
0
        op = std::make_shared<RepeatOperatorX>(pool, tnode, next_operator_id(), descs);
1724
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1725
0
        break;
1726
0
    }
1727
4
    case TPlanNodeType::TABLE_FUNCTION_NODE: {
1728
4
        op = std::make_shared<TableFunctionOperatorX>(pool, tnode, next_operator_id(), descs);
1729
4
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1730
4
        break;
1731
4
    }
1732
200
    case TPlanNodeType::ASSERT_NUM_ROWS_NODE: {
1733
200
        op = std::make_shared<AssertNumRowsOperatorX>(pool, tnode, next_operator_id(), descs);
1734
200
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1735
200
        break;
1736
200
    }
1737
200
    case TPlanNodeType::EMPTY_SET_NODE: {
1738
98
        op = std::make_shared<EmptySetSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1739
98
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1740
98
        break;
1741
98
    }
1742
194
    case TPlanNodeType::DATA_GEN_SCAN_NODE: {
1743
194
        op = std::make_shared<DataGenSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1744
194
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1745
194
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1746
194
        break;
1747
194
    }
1748
490
    case TPlanNodeType::SCHEMA_SCAN_NODE: {
1749
490
        op = std::make_shared<SchemaScanOperatorX>(pool, tnode, next_operator_id(), descs);
1750
490
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1751
490
        break;
1752
490
    }
1753
1.73k
    case TPlanNodeType::META_SCAN_NODE: {
1754
1.73k
        op = std::make_shared<MetaScanOperatorX>(pool, tnode, next_operator_id(), descs);
1755
1.73k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1756
1.73k
        break;
1757
1.73k
    }
1758
1.73k
    case TPlanNodeType::SELECT_NODE: {
1759
1.24k
        op = std::make_shared<SelectOperatorX>(pool, tnode, next_operator_id(), descs);
1760
1.24k
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1761
1.24k
        break;
1762
1.24k
    }
1763
1.24k
    case TPlanNodeType::REC_CTE_NODE: {
1764
0
        op = std::make_shared<RecCTESourceOperatorX>(pool, tnode, next_operator_id(), descs);
1765
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1766
1767
0
        const auto downstream_pipeline_id = cur_pipe->id();
1768
0
        if (!_dag.contains(downstream_pipeline_id)) {
1769
0
            _dag.insert({downstream_pipeline_id, {}});
1770
0
        }
1771
1772
0
        PipelinePtr anchor_side_pipe = add_pipeline(cur_pipe);
1773
0
        _dag[downstream_pipeline_id].push_back(anchor_side_pipe->id());
1774
1775
0
        DataSinkOperatorPtr anchor_sink;
1776
0
        anchor_sink = std::make_shared<RecCTEAnchorSinkOperatorX>(next_sink_operator_id(),
1777
0
                                                                  op->operator_id(), tnode, descs);
1778
0
        RETURN_IF_ERROR(anchor_side_pipe->set_sink(anchor_sink));
1779
0
        RETURN_IF_ERROR(anchor_side_pipe->sink()->init(tnode, _runtime_state.get()));
1780
0
        _pipeline_parent_map.push(op->node_id(), anchor_side_pipe);
1781
1782
0
        PipelinePtr rec_side_pipe = add_pipeline(cur_pipe);
1783
0
        _dag[downstream_pipeline_id].push_back(rec_side_pipe->id());
1784
1785
0
        DataSinkOperatorPtr rec_sink;
1786
0
        rec_sink = std::make_shared<RecCTESinkOperatorX>(next_sink_operator_id(), op->operator_id(),
1787
0
                                                         tnode, descs);
1788
0
        RETURN_IF_ERROR(rec_side_pipe->set_sink(rec_sink));
1789
0
        RETURN_IF_ERROR(rec_side_pipe->sink()->init(tnode, _runtime_state.get()));
1790
0
        _pipeline_parent_map.push(op->node_id(), rec_side_pipe);
1791
1792
0
        break;
1793
0
    }
1794
0
    case TPlanNodeType::REC_CTE_SCAN_NODE: {
1795
0
        op = std::make_shared<RecCTEScanOperatorX>(pool, tnode, next_operator_id(), descs);
1796
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1797
0
        break;
1798
0
    }
1799
0
    default:
1800
0
        return Status::InternalError("Unsupported exec type in pipeline: {}",
1801
0
                                     print_plan_node_type(tnode.node_type));
1802
148k
    }
1803
148k
    if (_params.__isset.parallel_instances && fe_with_old_version) {
1804
0
        cur_pipe->set_num_tasks(_params.parallel_instances);
1805
0
        op->set_serial_operator();
1806
0
    }
1807
1808
148k
    return Status::OK();
1809
148k
}
1810
// NOLINTEND(readability-function-cognitive-complexity)
1811
// NOLINTEND(readability-function-size)
1812
1813
template <bool is_intersect>
1814
Status PipelineFragmentContext::_build_operators_for_set_operation_node(
1815
        ObjectPool* pool, const TPlanNode& tnode, const DescriptorTbl& descs, OperatorPtr& op,
1816
0
        PipelinePtr& cur_pipe, std::vector<DataSinkOperatorPtr>& sink_ops) {
1817
0
    op.reset(new SetSourceOperatorX<is_intersect>(pool, tnode, next_operator_id(), descs));
1818
0
    RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1819
1820
0
    const auto downstream_pipeline_id = cur_pipe->id();
1821
0
    if (!_dag.contains(downstream_pipeline_id)) {
1822
0
        _dag.insert({downstream_pipeline_id, {}});
1823
0
    }
1824
1825
0
    for (int child_id = 0; child_id < tnode.num_children; child_id++) {
1826
0
        PipelinePtr probe_side_pipe = add_pipeline(cur_pipe);
1827
0
        _dag[downstream_pipeline_id].push_back(probe_side_pipe->id());
1828
1829
0
        if (child_id == 0) {
1830
0
            sink_ops.push_back(std::make_shared<SetSinkOperatorX<is_intersect>>(
1831
0
                    child_id, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1832
0
        } else {
1833
0
            sink_ops.push_back(std::make_shared<SetProbeSinkOperatorX<is_intersect>>(
1834
0
                    child_id, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1835
0
        }
1836
0
        RETURN_IF_ERROR(probe_side_pipe->set_sink(sink_ops.back()));
1837
0
        RETURN_IF_ERROR(probe_side_pipe->sink()->init(tnode, _runtime_state.get()));
1838
        // prepare children pipelines. if any pipeline found this as its father, will use the prepared pipeline to build.
1839
0
        _pipeline_parent_map.push(op->node_id(), probe_side_pipe);
1840
0
    }
1841
1842
0
    return Status::OK();
1843
0
}
Unexecuted instantiation: _ZN5doris23PipelineFragmentContext39_build_operators_for_set_operation_nodeILb1EEENS_6StatusEPNS_10ObjectPoolERKNS_9TPlanNodeERKNS_13DescriptorTblERSt10shared_ptrINS_13OperatorXBaseEERSB_INS_8PipelineEERSt6vectorISB_INS_21DataSinkOperatorXBaseEESaISK_EE
Unexecuted instantiation: _ZN5doris23PipelineFragmentContext39_build_operators_for_set_operation_nodeILb0EEENS_6StatusEPNS_10ObjectPoolERKNS_9TPlanNodeERKNS_13DescriptorTblERSt10shared_ptrINS_13OperatorXBaseEERSB_INS_8PipelineEERSt6vectorISB_INS_21DataSinkOperatorXBaseEESaISK_EE
1844
1845
106k
Status PipelineFragmentContext::submit() {
1846
106k
    if (_submitted) {
1847
0
        return Status::InternalError("submitted");
1848
0
    }
1849
106k
    _submitted = true;
1850
1851
106k
    int submit_tasks = 0;
1852
106k
    Status st;
1853
106k
    auto* scheduler = _query_ctx->get_pipe_exec_scheduler();
1854
222k
    for (auto& task : _tasks) {
1855
394k
        for (auto& t : task) {
1856
394k
            st = scheduler->submit(t.first);
1857
394k
            DBUG_EXECUTE_IF("PipelineFragmentContext.submit.failed",
1858
394k
                            { st = Status::Aborted("PipelineFragmentContext.submit.failed"); });
1859
394k
            if (!st) {
1860
0
                cancel(Status::InternalError("submit context to executor fail"));
1861
0
                std::lock_guard<std::mutex> l(_task_mutex);
1862
0
                _total_tasks = submit_tasks;
1863
0
                break;
1864
0
            }
1865
394k
            submit_tasks++;
1866
394k
        }
1867
222k
    }
1868
106k
    if (!st.ok()) {
1869
0
        bool need_remove = false;
1870
0
        {
1871
0
            std::lock_guard<std::mutex> l(_task_mutex);
1872
0
            if (_closed_tasks >= _total_tasks) {
1873
0
                need_remove = _close_fragment_instance();
1874
0
            }
1875
0
        }
1876
        // Call remove_pipeline_context() outside _task_mutex to avoid ABBA deadlock.
1877
0
        if (need_remove) {
1878
0
            _exec_env->fragment_mgr()->remove_pipeline_context({_query_id, _fragment_id});
1879
0
        }
1880
0
        return Status::InternalError("Submit pipeline failed. err = {}, BE: {}", st.to_string(),
1881
0
                                     BackendOptions::get_localhost());
1882
106k
    } else {
1883
106k
        return st;
1884
106k
    }
1885
106k
}
1886
1887
0
void PipelineFragmentContext::print_profile(const std::string& extra_info) {
1888
0
    if (_runtime_state->enable_profile()) {
1889
0
        std::stringstream ss;
1890
0
        for (auto runtime_profile_ptr : _runtime_state->pipeline_id_to_profile()) {
1891
0
            runtime_profile_ptr->pretty_print(&ss);
1892
0
        }
1893
1894
0
        if (_runtime_state->load_channel_profile()) {
1895
0
            _runtime_state->load_channel_profile()->pretty_print(&ss);
1896
0
        }
1897
1898
0
        auto profile_str =
1899
0
                fmt::format("Query {} fragment {} {}, profile, {}", print_id(this->_query_id),
1900
0
                            this->_fragment_id, extra_info, ss.str());
1901
0
        LOG_LONG_STRING(INFO, profile_str);
1902
0
    }
1903
0
}
1904
// If all pipeline tasks binded to the fragment instance are finished, then we could
1905
// close the fragment instance.
1906
// Returns true if the caller should call remove_pipeline_context() **after** releasing
1907
// _task_mutex. We must not call remove_pipeline_context() here because it acquires
1908
// _pipeline_map's shard lock, and this function is called while _task_mutex is held.
1909
// Acquiring _pipeline_map while holding _task_mutex creates an ABBA deadlock with
1910
// dump_pipeline_tasks(), which acquires _pipeline_map first and then _task_mutex
1911
// (via debug_string()).
1912
106k
bool PipelineFragmentContext::_close_fragment_instance() {
1913
106k
    if (_is_fragment_instance_closed) {
1914
0
        return false;
1915
0
    }
1916
106k
    Defer defer_op {[&]() { _is_fragment_instance_closed = true; }};
1917
106k
    _fragment_level_profile->total_time_counter()->update(_fragment_watcher.elapsed_time());
1918
106k
    if (!_need_notify_close) {
1919
106k
        auto st = send_report(true);
1920
106k
        if (!st) {
1921
0
            LOG(WARNING) << fmt::format("Failed to send report for query {}, fragment {}: {}",
1922
0
                                        print_id(_query_id), _fragment_id, st.to_string());
1923
0
        }
1924
106k
    }
1925
    // Print profile content in info log is a tempoeray solution for stream load and external_connector.
1926
    // Since stream load does not have someting like coordinator on FE, so
1927
    // backend can not report profile to FE, ant its profile can not be shown
1928
    // in the same way with other query. So we print the profile content to info log.
1929
1930
106k
    if (_runtime_state->enable_profile() &&
1931
106k
        (_query_ctx->get_query_source() == QuerySource::STREAM_LOAD ||
1932
440
         _query_ctx->get_query_source() == QuerySource::EXTERNAL_CONNECTOR ||
1933
440
         _query_ctx->get_query_source() == QuerySource::GROUP_COMMIT_LOAD)) {
1934
0
        std::stringstream ss;
1935
        // Compute the _local_time_percent before pretty_print the runtime_profile
1936
        // Before add this operation, the print out like that:
1937
        // UNION_NODE (id=0):(Active: 56.720us, non-child: 00.00%)
1938
        // After add the operation, the print out like that:
1939
        // UNION_NODE (id=0):(Active: 56.720us, non-child: 82.53%)
1940
        // We can easily know the exec node execute time without child time consumed.
1941
0
        for (auto runtime_profile_ptr : _runtime_state->pipeline_id_to_profile()) {
1942
0
            runtime_profile_ptr->pretty_print(&ss);
1943
0
        }
1944
1945
0
        if (_runtime_state->load_channel_profile()) {
1946
0
            _runtime_state->load_channel_profile()->pretty_print(&ss);
1947
0
        }
1948
1949
0
        LOG_INFO("Query {} fragment {} profile:\n {}", print_id(_query_id), _fragment_id, ss.str());
1950
0
    }
1951
1952
106k
    if (_query_ctx->enable_profile()) {
1953
440
        _query_ctx->add_fragment_profile(_fragment_id, collect_realtime_profile(),
1954
440
                                         collect_realtime_load_channel_profile());
1955
440
    }
1956
1957
    // Return whether the caller needs to remove from the pipeline map.
1958
    // The caller must do this after releasing _task_mutex.
1959
106k
    return !_need_notify_close;
1960
106k
}
1961
1962
393k
void PipelineFragmentContext::decrement_running_task(PipelineId pipeline_id) {
1963
    // If all tasks of this pipeline has been closed, upstream tasks is never needed, and we just make those runnable here
1964
393k
    DCHECK(_pip_id_to_pipeline.contains(pipeline_id));
1965
393k
    if (_pip_id_to_pipeline[pipeline_id]->close_task()) {
1966
161k
        if (_dag.contains(pipeline_id)) {
1967
71.2k
            for (auto dep : _dag[pipeline_id]) {
1968
71.2k
                _pip_id_to_pipeline[dep]->make_all_runnable(pipeline_id);
1969
71.2k
            }
1970
56.2k
        }
1971
161k
    }
1972
393k
    bool need_remove = false;
1973
393k
    {
1974
393k
        std::lock_guard<std::mutex> l(_task_mutex);
1975
393k
        ++_closed_tasks;
1976
        // Update query-level finished task progress in real time.
1977
393k
        _query_ctx->inc_finished_task_num();
1978
393k
        if (_closed_tasks >= _total_tasks) {
1979
106k
            need_remove = _close_fragment_instance();
1980
106k
        }
1981
393k
    }
1982
    // Call remove_pipeline_context() outside _task_mutex to avoid ABBA deadlock.
1983
393k
    if (need_remove) {
1984
106k
        _exec_env->fragment_mgr()->remove_pipeline_context({_query_id, _fragment_id});
1985
106k
    }
1986
393k
}
1987
1988
12.4k
std::string PipelineFragmentContext::get_load_error_url() {
1989
12.4k
    if (const auto& str = _runtime_state->get_error_log_file_path(); !str.empty()) {
1990
0
        return to_load_error_http_path(str);
1991
0
    }
1992
38.6k
    for (auto& tasks : _tasks) {
1993
49.8k
        for (auto& task : tasks) {
1994
49.8k
            if (const auto& str = task.second->get_error_log_file_path(); !str.empty()) {
1995
12
                return to_load_error_http_path(str);
1996
12
            }
1997
49.8k
        }
1998
38.6k
    }
1999
12.4k
    return "";
2000
12.4k
}
2001
2002
12.4k
std::string PipelineFragmentContext::get_first_error_msg() {
2003
12.4k
    if (const auto& str = _runtime_state->get_first_error_msg(); !str.empty()) {
2004
0
        return str;
2005
0
    }
2006
38.6k
    for (auto& tasks : _tasks) {
2007
49.8k
        for (auto& task : tasks) {
2008
49.8k
            if (const auto& str = task.second->get_first_error_msg(); !str.empty()) {
2009
12
                return str;
2010
12
            }
2011
49.8k
        }
2012
38.6k
    }
2013
12.4k
    return "";
2014
12.4k
}
2015
2016
0
std::string PipelineFragmentContext::_to_http_path(const std::string& file_name) const {
2017
0
    std::stringstream url;
2018
0
    url << "http://" << BackendOptions::get_localhost() << ":" << config::webserver_port
2019
0
        << "/api/_download_load?"
2020
0
        << "token=" << _exec_env->token() << "&file=" << file_name;
2021
0
    return url.str();
2022
0
}
2023
2024
11.5k
void PipelineFragmentContext::_coordinator_callback(const ReportStatusRequest& req) {
2025
11.5k
    DBUG_EXECUTE_IF("FragmentMgr::coordinator_callback.report_delay", {
2026
11.5k
        int random_seconds = req.status.is<ErrorCode::DATA_QUALITY_ERROR>() ? 8 : 2;
2027
11.5k
        LOG_INFO("sleep : ").tag("time", random_seconds).tag("query_id", print_id(req.query_id));
2028
11.5k
        std::this_thread::sleep_for(std::chrono::seconds(random_seconds));
2029
11.5k
        LOG_INFO("sleep done").tag("query_id", print_id(req.query_id));
2030
11.5k
    });
2031
2032
11.5k
    DCHECK(req.status.ok() || req.done); // if !status.ok() => done
2033
11.5k
    if (req.coord_addr.hostname == "external") {
2034
        // External query (flink/spark read tablets) not need to report to FE.
2035
0
        return;
2036
0
    }
2037
11.5k
    int callback_retries = 10;
2038
11.5k
    const int sleep_ms = 1000;
2039
11.5k
    Status exec_status = req.status;
2040
11.5k
    Status coord_status;
2041
11.5k
    std::unique_ptr<FrontendServiceConnection> coord = nullptr;
2042
11.5k
    do {
2043
11.5k
        coord = std::make_unique<FrontendServiceConnection>(_exec_env->frontend_client_cache(),
2044
11.5k
                                                            req.coord_addr, &coord_status);
2045
11.5k
        if (!coord_status.ok()) {
2046
0
            std::this_thread::sleep_for(std::chrono::milliseconds(sleep_ms));
2047
0
        }
2048
11.5k
    } while (!coord_status.ok() && callback_retries-- > 0);
2049
2050
11.5k
    if (!coord_status.ok()) {
2051
0
        UniqueId uid(req.query_id.hi, req.query_id.lo);
2052
0
        static_cast<void>(req.cancel_fn(Status::InternalError(
2053
0
                "query_id: {}, couldn't get a client for {}, reason is {}", uid.to_string(),
2054
0
                PrintThriftNetworkAddress(req.coord_addr), coord_status.to_string())));
2055
0
        return;
2056
0
    }
2057
2058
11.5k
    TReportExecStatusParams params;
2059
11.5k
    params.protocol_version = FrontendServiceVersion::V1;
2060
11.5k
    params.__set_query_id(req.query_id);
2061
11.5k
    params.__set_backend_num(req.backend_num);
2062
11.5k
    params.__set_fragment_instance_id(req.fragment_instance_id);
2063
11.5k
    params.__set_fragment_id(req.fragment_id);
2064
11.5k
    params.__set_status(exec_status.to_thrift());
2065
11.5k
    params.__set_done(req.done);
2066
11.5k
    params.__set_query_type(req.runtime_state->query_type());
2067
11.5k
    params.__isset.profile = false;
2068
2069
11.5k
    DCHECK(req.runtime_state != nullptr);
2070
2071
11.5k
    if (req.runtime_state->query_type() == TQueryType::LOAD) {
2072
10.6k
        params.__set_loaded_rows(req.runtime_state->num_rows_load_total());
2073
10.6k
        params.__set_loaded_bytes(req.runtime_state->num_bytes_load_total());
2074
10.6k
    } else {
2075
980
        DCHECK(!req.runtime_states.empty());
2076
980
        if (!req.runtime_state->output_files().empty()) {
2077
0
            params.__isset.delta_urls = true;
2078
0
            for (auto& it : req.runtime_state->output_files()) {
2079
0
                params.delta_urls.push_back(_to_http_path(it));
2080
0
            }
2081
0
        }
2082
980
        if (!params.delta_urls.empty()) {
2083
0
            params.__isset.delta_urls = true;
2084
0
        }
2085
980
    }
2086
2087
11.5k
    static std::string s_dpp_normal_all = "dpp.norm.ALL";
2088
11.5k
    static std::string s_dpp_abnormal_all = "dpp.abnorm.ALL";
2089
11.5k
    static std::string s_unselected_rows = "unselected.rows";
2090
11.5k
    int64_t num_rows_load_success = 0;
2091
11.5k
    int64_t num_rows_load_filtered = 0;
2092
11.5k
    int64_t num_rows_load_unselected = 0;
2093
11.5k
    if (req.runtime_state->num_rows_load_total() > 0 ||
2094
11.5k
        req.runtime_state->num_rows_load_filtered() > 0 ||
2095
11.5k
        req.runtime_state->num_finished_range() > 0) {
2096
0
        params.__isset.load_counters = true;
2097
2098
0
        num_rows_load_success = req.runtime_state->num_rows_load_success();
2099
0
        num_rows_load_filtered = req.runtime_state->num_rows_load_filtered();
2100
0
        num_rows_load_unselected = req.runtime_state->num_rows_load_unselected();
2101
0
        params.__isset.fragment_instance_reports = true;
2102
0
        TFragmentInstanceReport t;
2103
0
        t.__set_fragment_instance_id(req.runtime_state->fragment_instance_id());
2104
0
        t.__set_num_finished_range(cast_set<int>(req.runtime_state->num_finished_range()));
2105
0
        t.__set_loaded_rows(req.runtime_state->num_rows_load_total());
2106
0
        t.__set_loaded_bytes(req.runtime_state->num_bytes_load_total());
2107
0
        params.fragment_instance_reports.push_back(t);
2108
11.5k
    } else if (!req.runtime_states.empty()) {
2109
41.0k
        for (auto* rs : req.runtime_states) {
2110
41.0k
            if (rs->num_rows_load_total() > 0 || rs->num_rows_load_filtered() > 0 ||
2111
41.0k
                rs->num_finished_range() > 0) {
2112
5.54k
                params.__isset.load_counters = true;
2113
5.54k
                num_rows_load_success += rs->num_rows_load_success();
2114
5.54k
                num_rows_load_filtered += rs->num_rows_load_filtered();
2115
5.54k
                num_rows_load_unselected += rs->num_rows_load_unselected();
2116
5.54k
                params.__isset.fragment_instance_reports = true;
2117
5.54k
                TFragmentInstanceReport t;
2118
5.54k
                t.__set_fragment_instance_id(rs->fragment_instance_id());
2119
5.54k
                t.__set_num_finished_range(cast_set<int>(rs->num_finished_range()));
2120
5.54k
                t.__set_loaded_rows(rs->num_rows_load_total());
2121
5.54k
                t.__set_loaded_bytes(rs->num_bytes_load_total());
2122
5.54k
                params.fragment_instance_reports.push_back(t);
2123
5.54k
            }
2124
41.0k
        }
2125
11.5k
    }
2126
11.5k
    params.load_counters.emplace(s_dpp_normal_all, std::to_string(num_rows_load_success));
2127
11.5k
    params.load_counters.emplace(s_dpp_abnormal_all, std::to_string(num_rows_load_filtered));
2128
11.5k
    params.load_counters.emplace(s_unselected_rows, std::to_string(num_rows_load_unselected));
2129
2130
11.5k
    if (!req.load_error_url.empty()) {
2131
12
        params.__set_tracking_url(req.load_error_url);
2132
12
    }
2133
11.5k
    if (!req.first_error_msg.empty()) {
2134
12
        params.__set_first_error_msg(req.first_error_msg);
2135
12
    }
2136
41.0k
    for (auto* rs : req.runtime_states) {
2137
41.0k
        if (rs->wal_id() > 0) {
2138
0
            params.__set_txn_id(rs->wal_id());
2139
0
            params.__set_label(rs->import_label());
2140
0
        }
2141
41.0k
    }
2142
11.5k
    if (!req.runtime_state->export_output_files().empty()) {
2143
0
        params.__isset.export_files = true;
2144
0
        params.export_files = req.runtime_state->export_output_files();
2145
11.5k
    } else if (!req.runtime_states.empty()) {
2146
41.0k
        for (auto* rs : req.runtime_states) {
2147
41.0k
            if (!rs->export_output_files().empty()) {
2148
0
                params.__isset.export_files = true;
2149
0
                params.export_files.insert(params.export_files.end(),
2150
0
                                           rs->export_output_files().begin(),
2151
0
                                           rs->export_output_files().end());
2152
0
            }
2153
41.0k
        }
2154
11.5k
    }
2155
11.5k
    if (auto tci = req.runtime_state->tablet_commit_infos(); !tci.empty()) {
2156
0
        params.__isset.commitInfos = true;
2157
0
        params.commitInfos.insert(params.commitInfos.end(), tci.begin(), tci.end());
2158
11.5k
    } else if (!req.runtime_states.empty()) {
2159
41.0k
        for (auto* rs : req.runtime_states) {
2160
41.0k
            if (auto rs_tci = rs->tablet_commit_infos(); !rs_tci.empty()) {
2161
2.15k
                params.__isset.commitInfos = true;
2162
2.15k
                params.commitInfos.insert(params.commitInfos.end(), rs_tci.begin(), rs_tci.end());
2163
2.15k
            }
2164
41.0k
        }
2165
11.5k
    }
2166
11.5k
    if (auto eti = req.runtime_state->error_tablet_infos(); !eti.empty()) {
2167
0
        params.__isset.errorTabletInfos = true;
2168
0
        params.errorTabletInfos.insert(params.errorTabletInfos.end(), eti.begin(), eti.end());
2169
11.5k
    } else if (!req.runtime_states.empty()) {
2170
41.0k
        for (auto* rs : req.runtime_states) {
2171
41.0k
            if (auto rs_eti = rs->error_tablet_infos(); !rs_eti.empty()) {
2172
0
                params.__isset.errorTabletInfos = true;
2173
0
                params.errorTabletInfos.insert(params.errorTabletInfos.end(), rs_eti.begin(),
2174
0
                                               rs_eti.end());
2175
0
            }
2176
41.0k
        }
2177
11.5k
    }
2178
11.5k
    if (auto hpu = req.runtime_state->hive_partition_updates(); !hpu.empty()) {
2179
0
        params.__isset.hive_partition_updates = true;
2180
0
        params.hive_partition_updates.insert(params.hive_partition_updates.end(), hpu.begin(),
2181
0
                                             hpu.end());
2182
11.5k
    } else if (!req.runtime_states.empty()) {
2183
41.0k
        for (auto* rs : req.runtime_states) {
2184
41.0k
            if (auto rs_hpu = rs->hive_partition_updates(); !rs_hpu.empty()) {
2185
2.10k
                params.__isset.hive_partition_updates = true;
2186
2.10k
                params.hive_partition_updates.insert(params.hive_partition_updates.end(),
2187
2.10k
                                                     rs_hpu.begin(), rs_hpu.end());
2188
2.10k
            }
2189
41.0k
        }
2190
11.5k
    }
2191
11.5k
    if (auto icd = req.runtime_state->iceberg_commit_datas(); !icd.empty()) {
2192
0
        params.__isset.iceberg_commit_datas = true;
2193
0
        params.iceberg_commit_datas.insert(params.iceberg_commit_datas.end(), icd.begin(),
2194
0
                                           icd.end());
2195
11.5k
    } else if (!req.runtime_states.empty()) {
2196
41.0k
        for (auto* rs : req.runtime_states) {
2197
41.0k
            if (auto rs_icd = rs->iceberg_commit_datas(); !rs_icd.empty()) {
2198
2.07k
                params.__isset.iceberg_commit_datas = true;
2199
2.07k
                params.iceberg_commit_datas.insert(params.iceberg_commit_datas.end(),
2200
2.07k
                                                   rs_icd.begin(), rs_icd.end());
2201
2.07k
            }
2202
41.0k
        }
2203
11.5k
    }
2204
2205
11.5k
    if (auto mcd = req.runtime_state->mc_commit_datas(); !mcd.empty()) {
2206
0
        params.__isset.mc_commit_datas = true;
2207
0
        params.mc_commit_datas.insert(params.mc_commit_datas.end(), mcd.begin(), mcd.end());
2208
11.5k
    } else if (!req.runtime_states.empty()) {
2209
41.0k
        for (auto* rs : req.runtime_states) {
2210
41.0k
            if (auto rs_mcd = rs->mc_commit_datas(); !rs_mcd.empty()) {
2211
0
                params.__isset.mc_commit_datas = true;
2212
0
                params.mc_commit_datas.insert(params.mc_commit_datas.end(), rs_mcd.begin(),
2213
0
                                              rs_mcd.end());
2214
0
            }
2215
41.0k
        }
2216
11.5k
    }
2217
2218
11.5k
    req.runtime_state->get_unreported_errors(&(params.error_log));
2219
11.5k
    params.__isset.error_log = (!params.error_log.empty());
2220
2221
11.5k
    if (_exec_env->cluster_info()->backend_id != 0) {
2222
11.5k
        params.__set_backend_id(_exec_env->cluster_info()->backend_id);
2223
11.5k
    }
2224
2225
11.5k
    TReportExecStatusResult res;
2226
11.5k
    Status rpc_status;
2227
2228
11.5k
    VLOG_DEBUG << "reportExecStatus params is "
2229
0
               << apache::thrift::ThriftDebugString(params).c_str();
2230
11.5k
    if (!exec_status.ok()) {
2231
146
        LOG(WARNING) << "report error status: " << exec_status.msg()
2232
146
                     << " to coordinator: " << req.coord_addr
2233
146
                     << ", query id: " << print_id(req.query_id);
2234
146
    }
2235
11.5k
    try {
2236
11.5k
        try {
2237
11.5k
            (*coord)->reportExecStatus(res, params);
2238
11.5k
        } catch ([[maybe_unused]] apache::thrift::transport::TTransportException& e) {
2239
#ifndef ADDRESS_SANITIZER
2240
            LOG(WARNING) << "Retrying ReportExecStatus. query id: " << print_id(req.query_id)
2241
                         << ", instance id: " << print_id(req.fragment_instance_id) << " to "
2242
                         << req.coord_addr << ", err: " << e.what();
2243
#endif
2244
0
            rpc_status = coord->reopen();
2245
2246
0
            if (!rpc_status.ok()) {
2247
0
                req.cancel_fn(rpc_status);
2248
0
                return;
2249
0
            }
2250
0
            (*coord)->reportExecStatus(res, params);
2251
0
        }
2252
2253
11.5k
        rpc_status = Status::create<false>(res.status);
2254
11.5k
    } catch (apache::thrift::TException& e) {
2255
0
        rpc_status = Status::InternalError("ReportExecStatus() to {} failed: {}",
2256
0
                                           PrintThriftNetworkAddress(req.coord_addr), e.what());
2257
0
    }
2258
2259
11.5k
    if (!rpc_status.ok()) {
2260
0
        LOG_INFO("Going to cancel query {} since report exec status got rpc failed: {}",
2261
0
                 print_id(req.query_id), rpc_status.to_string());
2262
0
        req.cancel_fn(rpc_status);
2263
0
    }
2264
11.5k
}
2265
2266
108k
Status PipelineFragmentContext::send_report(bool done) {
2267
108k
    Status exec_status = _query_ctx->exec_status();
2268
    // If plan is done successfully, but _is_report_success is false,
2269
    // no need to send report.
2270
    // Load will set _is_report_success to true because load wants to know
2271
    // the process.
2272
108k
    if (!_is_report_success && done && exec_status.ok()) {
2273
96.1k
        return Status::OK();
2274
96.1k
    }
2275
2276
    // If both _is_report_success and _is_report_on_cancel are false,
2277
    // which means no matter query is success or failed, no report is needed.
2278
    // This may happen when the query limit reached and
2279
    // a internal cancellation being processed
2280
    // When limit is reached the fragment is also cancelled, but _is_report_on_cancel will
2281
    // be set to false, to avoid sending fault report to FE.
2282
11.8k
    if (!_is_report_success && !_is_report_on_cancel) {
2283
230
        if (done) {
2284
            // if done is true, which means the query is finished successfully, we can safely close the fragment instance without sending report to FE, and just return OK status here.
2285
230
            return Status::OK();
2286
230
        }
2287
0
        return Status::NeedSendAgain("");
2288
230
    }
2289
2290
11.5k
    std::vector<RuntimeState*> runtime_states;
2291
2292
35.0k
    for (auto& tasks : _tasks) {
2293
41.0k
        for (auto& task : tasks) {
2294
41.0k
            runtime_states.push_back(task.second.get());
2295
41.0k
        }
2296
35.0k
    }
2297
2298
11.5k
    std::string load_eror_url = _query_ctx->get_load_error_url().empty()
2299
11.5k
                                        ? get_load_error_url()
2300
11.5k
                                        : _query_ctx->get_load_error_url();
2301
11.5k
    std::string first_error_msg = _query_ctx->get_first_error_msg().empty()
2302
11.5k
                                          ? get_first_error_msg()
2303
11.5k
                                          : _query_ctx->get_first_error_msg();
2304
2305
11.5k
    ReportStatusRequest req {.status = exec_status,
2306
11.5k
                             .runtime_states = runtime_states,
2307
11.5k
                             .done = done || !exec_status.ok(),
2308
11.5k
                             .coord_addr = _query_ctx->coord_addr,
2309
11.5k
                             .query_id = _query_id,
2310
11.5k
                             .fragment_id = _fragment_id,
2311
11.5k
                             .fragment_instance_id = TUniqueId(),
2312
11.5k
                             .backend_num = -1,
2313
11.5k
                             .runtime_state = _runtime_state.get(),
2314
11.5k
                             .load_error_url = load_eror_url,
2315
11.5k
                             .first_error_msg = first_error_msg,
2316
11.5k
                             .cancel_fn = [this](const Status& reason) { cancel(reason); }};
2317
11.5k
    auto ctx = std::dynamic_pointer_cast<PipelineFragmentContext>(shared_from_this());
2318
11.5k
    return _exec_env->fragment_mgr()->get_thread_pool()->submit_func([this, req, ctx]() {
2319
11.5k
        SCOPED_ATTACH_TASK(ctx->get_query_ctx()->query_mem_tracker());
2320
11.5k
        _coordinator_callback(req);
2321
11.5k
        if (!req.done) {
2322
1.21k
            ctx->refresh_next_report_time();
2323
1.21k
        }
2324
11.5k
    });
2325
11.8k
}
2326
2327
0
size_t PipelineFragmentContext::get_revocable_size(bool* has_running_task) const {
2328
0
    size_t res = 0;
2329
    // _tasks will be cleared during ~PipelineFragmentContext, so that it's safe
2330
    // here to traverse the vector.
2331
0
    for (const auto& task_instances : _tasks) {
2332
0
        for (const auto& task : task_instances) {
2333
0
            if (task.first->is_running()) {
2334
0
                LOG_EVERY_N(INFO, 50) << "Query: " << print_id(_query_id)
2335
0
                                      << " is running, task: " << (void*)task.first.get()
2336
0
                                      << ", is_running: " << task.first->is_running();
2337
0
                *has_running_task = true;
2338
0
                return 0;
2339
0
            }
2340
2341
0
            size_t revocable_size = task.first->get_revocable_size();
2342
0
            if (revocable_size >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
2343
0
                res += revocable_size;
2344
0
            }
2345
0
        }
2346
0
    }
2347
0
    return res;
2348
0
}
2349
2350
0
std::vector<PipelineTask*> PipelineFragmentContext::get_revocable_tasks() const {
2351
0
    std::vector<PipelineTask*> revocable_tasks;
2352
0
    for (const auto& task_instances : _tasks) {
2353
0
        for (const auto& task : task_instances) {
2354
0
            size_t revocable_size_ = task.first->get_revocable_size();
2355
2356
0
            if (revocable_size_ >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
2357
0
                revocable_tasks.emplace_back(task.first.get());
2358
0
            }
2359
0
        }
2360
0
    }
2361
0
    return revocable_tasks;
2362
0
}
2363
2364
0
std::string PipelineFragmentContext::debug_string() {
2365
0
    std::lock_guard<std::mutex> l(_task_mutex);
2366
0
    fmt::memory_buffer debug_string_buffer;
2367
0
    fmt::format_to(debug_string_buffer,
2368
0
                   "PipelineFragmentContext Info: _closed_tasks={}, _total_tasks={}, "
2369
0
                   "need_notify_close={}, fragment_id={}, _rec_cte_stage={}\n",
2370
0
                   _closed_tasks, _total_tasks, _need_notify_close, _fragment_id, _rec_cte_stage);
2371
0
    for (size_t j = 0; j < _tasks.size(); j++) {
2372
0
        fmt::format_to(debug_string_buffer, "Tasks in instance {}:\n", j);
2373
0
        for (size_t i = 0; i < _tasks[j].size(); i++) {
2374
0
            fmt::format_to(debug_string_buffer, "Task {}: {}\n", i,
2375
0
                           _tasks[j][i].first->debug_string());
2376
0
        }
2377
0
    }
2378
2379
0
    return fmt::to_string(debug_string_buffer);
2380
0
}
2381
2382
std::vector<std::shared_ptr<TRuntimeProfileTree>>
2383
440
PipelineFragmentContext::collect_realtime_profile() const {
2384
440
    std::vector<std::shared_ptr<TRuntimeProfileTree>> res;
2385
2386
    // we do not have mutex to protect pipeline_id_to_profile
2387
    // so we need to make sure this funciton is invoked after fragment context
2388
    // has already been prepared.
2389
440
    if (!_prepared) {
2390
0
        std::string msg =
2391
0
                "Query " + print_id(_query_id) + " collecting profile, but its not prepared";
2392
0
        DCHECK(false) << msg;
2393
0
        LOG_ERROR(msg);
2394
0
        return res;
2395
0
    }
2396
2397
    // Make sure first profile is fragment level profile
2398
440
    auto fragment_profile = std::make_shared<TRuntimeProfileTree>();
2399
440
    _fragment_level_profile->to_thrift(fragment_profile.get(), _runtime_state->profile_level());
2400
440
    res.push_back(fragment_profile);
2401
2402
    // pipeline_id_to_profile is initialized in prepare stage
2403
648
    for (auto pipeline_profile : _runtime_state->pipeline_id_to_profile()) {
2404
648
        auto profile_ptr = std::make_shared<TRuntimeProfileTree>();
2405
648
        pipeline_profile->to_thrift(profile_ptr.get(), _runtime_state->profile_level());
2406
648
        res.push_back(profile_ptr);
2407
648
    }
2408
2409
440
    return res;
2410
440
}
2411
2412
std::shared_ptr<TRuntimeProfileTree>
2413
440
PipelineFragmentContext::collect_realtime_load_channel_profile() const {
2414
    // we do not have mutex to protect pipeline_id_to_profile
2415
    // so we need to make sure this funciton is invoked after fragment context
2416
    // has already been prepared.
2417
440
    if (!_prepared) {
2418
0
        std::string msg =
2419
0
                "Query " + print_id(_query_id) + " collecting profile, but its not prepared";
2420
0
        DCHECK(false) << msg;
2421
0
        LOG_ERROR(msg);
2422
0
        return nullptr;
2423
0
    }
2424
2425
608
    for (const auto& tasks : _tasks) {
2426
1.06k
        for (const auto& task : tasks) {
2427
1.06k
            if (task.second->load_channel_profile() == nullptr) {
2428
0
                continue;
2429
0
            }
2430
2431
1.06k
            auto tmp_load_channel_profile = std::make_shared<TRuntimeProfileTree>();
2432
2433
1.06k
            task.second->load_channel_profile()->to_thrift(tmp_load_channel_profile.get(),
2434
1.06k
                                                           _runtime_state->profile_level());
2435
1.06k
            _runtime_state->load_channel_profile()->update(*tmp_load_channel_profile);
2436
1.06k
        }
2437
608
    }
2438
2439
440
    auto load_channel_profile = std::make_shared<TRuntimeProfileTree>();
2440
440
    _runtime_state->load_channel_profile()->to_thrift(load_channel_profile.get(),
2441
440
                                                      _runtime_state->profile_level());
2442
440
    return load_channel_profile;
2443
440
}
2444
2445
// Collect runtime filter IDs registered by all tasks in this PFC.
2446
// Used during recursive CTE stage transitions to know which filters to deregister
2447
// before creating the new PFC for the next recursion round.
2448
// Called from rerun_fragment(wait_for_destroy) while tasks are still closing.
2449
// Thread safety: safe because _tasks is structurally immutable after prepare() —
2450
// the vector sizes do not change, and individual RuntimeState filter sets are
2451
// written only during open() which has completed by the time we reach rerun.
2452
0
std::set<int> PipelineFragmentContext::get_deregister_runtime_filter() const {
2453
0
    std::set<int> result;
2454
0
    for (const auto& _task : _tasks) {
2455
0
        for (const auto& task : _task) {
2456
0
            auto set = task.first->runtime_state()->get_deregister_runtime_filter();
2457
0
            result.merge(set);
2458
0
        }
2459
0
    }
2460
0
    if (_runtime_state) {
2461
0
        auto set = _runtime_state->get_deregister_runtime_filter();
2462
0
        result.merge(set);
2463
0
    }
2464
0
    return result;
2465
0
}
2466
2467
106k
void PipelineFragmentContext::_release_resource() {
2468
106k
    std::lock_guard<std::mutex> l(_task_mutex);
2469
    // The memory released by the query end is recorded in the query mem tracker.
2470
106k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_ctx->query_mem_tracker());
2471
106k
    auto st = _query_ctx->exec_status();
2472
222k
    for (auto& _task : _tasks) {
2473
222k
        if (!_task.empty()) {
2474
222k
            _call_back(_task.front().first->runtime_state(), &st);
2475
222k
        }
2476
222k
    }
2477
106k
    _tasks.clear();
2478
106k
    _dag.clear();
2479
106k
    _pip_id_to_pipeline.clear();
2480
106k
    _pipelines.clear();
2481
106k
    _sink.reset();
2482
106k
    _root_op.reset();
2483
106k
    _runtime_filter_mgr_map.clear();
2484
106k
    _op_id_to_shared_state.clear();
2485
106k
}
2486
2487
} // namespace doris