Coverage Report

Created: 2026-04-20 14:38

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/exec/pipeline/pipeline_fragment_context.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "exec/pipeline/pipeline_fragment_context.h"
19
20
#include <gen_cpp/DataSinks_types.h>
21
#include <gen_cpp/FrontendService.h>
22
#include <gen_cpp/FrontendService_types.h>
23
#include <gen_cpp/PaloInternalService_types.h>
24
#include <gen_cpp/PlanNodes_types.h>
25
#include <pthread.h>
26
27
#include <algorithm>
28
#include <cstdlib>
29
// IWYU pragma: no_include <bits/chrono.h>
30
#include <fmt/format.h>
31
#include <thrift/Thrift.h>
32
#include <thrift/protocol/TDebugProtocol.h>
33
#include <thrift/transport/TTransportException.h>
34
35
#include <chrono> // IWYU pragma: keep
36
#include <map>
37
#include <memory>
38
#include <ostream>
39
#include <utility>
40
41
#include "cloud/config.h"
42
#include "common/cast_set.h"
43
#include "common/config.h"
44
#include "common/exception.h"
45
#include "common/logging.h"
46
#include "common/status.h"
47
#include "exec/exchange/local_exchange_sink_operator.h"
48
#include "exec/exchange/local_exchange_source_operator.h"
49
#include "exec/exchange/local_exchanger.h"
50
#include "exec/exchange/vdata_stream_mgr.h"
51
#include "exec/operator/aggregation_sink_operator.h"
52
#include "exec/operator/aggregation_source_operator.h"
53
#include "exec/operator/analytic_sink_operator.h"
54
#include "exec/operator/analytic_source_operator.h"
55
#include "exec/operator/assert_num_rows_operator.h"
56
#include "exec/operator/blackhole_sink_operator.h"
57
#include "exec/operator/bucketed_aggregation_sink_operator.h"
58
#include "exec/operator/bucketed_aggregation_source_operator.h"
59
#include "exec/operator/cache_sink_operator.h"
60
#include "exec/operator/cache_source_operator.h"
61
#include "exec/operator/datagen_operator.h"
62
#include "exec/operator/dict_sink_operator.h"
63
#include "exec/operator/distinct_streaming_aggregation_operator.h"
64
#include "exec/operator/empty_set_operator.h"
65
#include "exec/operator/es_scan_operator.h"
66
#include "exec/operator/exchange_sink_operator.h"
67
#include "exec/operator/exchange_source_operator.h"
68
#include "exec/operator/file_scan_operator.h"
69
#include "exec/operator/group_commit_block_sink_operator.h"
70
#include "exec/operator/group_commit_scan_operator.h"
71
#include "exec/operator/hashjoin_build_sink.h"
72
#include "exec/operator/hashjoin_probe_operator.h"
73
#include "exec/operator/hive_table_sink_operator.h"
74
#include "exec/operator/iceberg_delete_sink_operator.h"
75
#include "exec/operator/iceberg_merge_sink_operator.h"
76
#include "exec/operator/iceberg_table_sink_operator.h"
77
#include "exec/operator/jdbc_scan_operator.h"
78
#include "exec/operator/jdbc_table_sink_operator.h"
79
#include "exec/operator/local_merge_sort_source_operator.h"
80
#include "exec/operator/materialization_opertor.h"
81
#include "exec/operator/maxcompute_table_sink_operator.h"
82
#include "exec/operator/memory_scratch_sink_operator.h"
83
#include "exec/operator/meta_scan_operator.h"
84
#include "exec/operator/multi_cast_data_stream_sink.h"
85
#include "exec/operator/multi_cast_data_stream_source.h"
86
#include "exec/operator/nested_loop_join_build_operator.h"
87
#include "exec/operator/nested_loop_join_probe_operator.h"
88
#include "exec/operator/olap_scan_operator.h"
89
#include "exec/operator/olap_table_sink_operator.h"
90
#include "exec/operator/olap_table_sink_v2_operator.h"
91
#include "exec/operator/partition_sort_sink_operator.h"
92
#include "exec/operator/partition_sort_source_operator.h"
93
#include "exec/operator/partitioned_aggregation_sink_operator.h"
94
#include "exec/operator/partitioned_aggregation_source_operator.h"
95
#include "exec/operator/partitioned_hash_join_probe_operator.h"
96
#include "exec/operator/partitioned_hash_join_sink_operator.h"
97
#include "exec/operator/rec_cte_anchor_sink_operator.h"
98
#include "exec/operator/rec_cte_scan_operator.h"
99
#include "exec/operator/rec_cte_sink_operator.h"
100
#include "exec/operator/rec_cte_source_operator.h"
101
#include "exec/operator/repeat_operator.h"
102
#include "exec/operator/result_file_sink_operator.h"
103
#include "exec/operator/result_sink_operator.h"
104
#include "exec/operator/schema_scan_operator.h"
105
#include "exec/operator/select_operator.h"
106
#include "exec/operator/set_probe_sink_operator.h"
107
#include "exec/operator/set_sink_operator.h"
108
#include "exec/operator/set_source_operator.h"
109
#include "exec/operator/sort_sink_operator.h"
110
#include "exec/operator/sort_source_operator.h"
111
#include "exec/operator/spill_iceberg_table_sink_operator.h"
112
#include "exec/operator/spill_sort_sink_operator.h"
113
#include "exec/operator/spill_sort_source_operator.h"
114
#include "exec/operator/streaming_aggregation_operator.h"
115
#include "exec/operator/table_function_operator.h"
116
#include "exec/operator/tvf_table_sink_operator.h"
117
#include "exec/operator/union_sink_operator.h"
118
#include "exec/operator/union_source_operator.h"
119
#include "exec/pipeline/dependency.h"
120
#include "exec/pipeline/pipeline_task.h"
121
#include "exec/pipeline/task_scheduler.h"
122
#include "exec/runtime_filter/runtime_filter_mgr.h"
123
#include "exec/sort/topn_sorter.h"
124
#include "exec/spill/spill_file.h"
125
#include "io/fs/stream_load_pipe.h"
126
#include "load/stream_load/new_load_stream_mgr.h"
127
#include "runtime/exec_env.h"
128
#include "runtime/fragment_mgr.h"
129
#include "runtime/result_buffer_mgr.h"
130
#include "runtime/runtime_state.h"
131
#include "runtime/thread_context.h"
132
#include "service/backend_options.h"
133
#include "util/client_cache.h"
134
#include "util/countdown_latch.h"
135
#include "util/debug_util.h"
136
#include "util/network_util.h"
137
#include "util/uid_util.h"
138
139
namespace doris {
140
PipelineFragmentContext::PipelineFragmentContext(
141
        TUniqueId query_id, const TPipelineFragmentParams& request,
142
        std::shared_ptr<QueryContext> query_ctx, ExecEnv* exec_env,
143
        const std::function<void(RuntimeState*, Status*)>& call_back)
144
26
        : _query_id(std::move(query_id)),
145
26
          _fragment_id(request.fragment_id),
146
26
          _exec_env(exec_env),
147
26
          _query_ctx(std::move(query_ctx)),
148
26
          _call_back(call_back),
149
26
          _is_report_on_cancel(true),
150
26
          _params(request),
151
26
          _parallel_instances(_params.__isset.parallel_instances ? _params.parallel_instances : 0),
152
26
          _need_notify_close(request.__isset.need_notify_close ? request.need_notify_close
153
26
                                                               : false) {
154
26
    _fragment_watcher.start();
155
26
}
156
157
26
PipelineFragmentContext::~PipelineFragmentContext() {
158
26
    LOG_INFO("PipelineFragmentContext::~PipelineFragmentContext")
159
26
            .tag("query_id", print_id(_query_id))
160
26
            .tag("fragment_id", _fragment_id);
161
26
    _release_resource();
162
26
    {
163
        // The memory released by the query end is recorded in the query mem tracker.
164
26
        SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_ctx->query_mem_tracker());
165
26
        _runtime_state.reset();
166
26
        _query_ctx.reset();
167
26
    }
168
26
}
169
170
0
bool PipelineFragmentContext::is_timeout(timespec now) const {
171
0
    if (_timeout <= 0) {
172
0
        return false;
173
0
    }
174
0
    return _fragment_watcher.elapsed_time_seconds(now) > _timeout;
175
0
}
176
177
// notify_close() transitions the PFC from "waiting for external close notification" to
178
// "self-managed close". For recursive CTE fragments, the old PFC is kept alive until
179
// the rerun_fragment(wait_for_destroy) RPC calls this to trigger shutdown.
180
// Returns true if all tasks have already closed (i.e., the PFC can be safely destroyed).
181
0
bool PipelineFragmentContext::notify_close() {
182
0
    bool all_closed = false;
183
0
    bool need_remove = false;
184
0
    {
185
0
        std::lock_guard<std::mutex> l(_task_mutex);
186
0
        if (_closed_tasks >= _total_tasks) {
187
0
            if (_need_notify_close) {
188
                // Fragment was cancelled and waiting for notify to close.
189
                // Record that we need to remove from fragment mgr, but do it
190
                // after releasing _task_mutex to avoid ABBA deadlock with
191
                // dump_pipeline_tasks() (which acquires _pipeline_map lock
192
                // first, then _task_mutex via debug_string()).
193
0
                need_remove = true;
194
0
            }
195
0
            all_closed = true;
196
0
        }
197
        // make fragment release by self after cancel
198
0
        _need_notify_close = false;
199
0
    }
200
0
    if (need_remove) {
201
0
        _exec_env->fragment_mgr()->remove_pipeline_context({_query_id, _fragment_id});
202
0
    }
203
0
    return all_closed;
204
0
}
205
206
// Must not add lock in this method. Because it will call query ctx cancel. And
207
// QueryCtx cancel will call fragment ctx cancel. And Also Fragment ctx's running
208
// Method like exchange sink buffer will call query ctx cancel. If we add lock here
209
// There maybe dead lock.
210
0
void PipelineFragmentContext::cancel(const Status reason) {
211
0
    LOG_INFO("PipelineFragmentContext::cancel")
212
0
            .tag("query_id", print_id(_query_id))
213
0
            .tag("fragment_id", _fragment_id)
214
0
            .tag("reason", reason.to_string());
215
0
    if (notify_close()) {
216
0
        return;
217
0
    }
218
    // Timeout is a special error code, we need print current stack to debug timeout issue.
219
0
    if (reason.is<ErrorCode::TIMEOUT>()) {
220
0
        auto dbg_str = fmt::format("PipelineFragmentContext is cancelled due to timeout:\n{}",
221
0
                                   debug_string());
222
0
        LOG_LONG_STRING(WARNING, dbg_str);
223
0
    }
224
225
    // `ILLEGAL_STATE` means queries this fragment belongs to was not found in FE (maybe finished)
226
0
    if (reason.is<ErrorCode::ILLEGAL_STATE>()) {
227
0
        LOG_WARNING("PipelineFragmentContext is cancelled due to illegal state : {}",
228
0
                    debug_string());
229
0
    }
230
231
0
    if (reason.is<ErrorCode::MEM_LIMIT_EXCEEDED>() || reason.is<ErrorCode::MEM_ALLOC_FAILED>()) {
232
0
        print_profile("cancel pipeline, reason: " + reason.to_string());
233
0
    }
234
235
0
    if (auto error_url = get_load_error_url(); !error_url.empty()) {
236
0
        _query_ctx->set_load_error_url(error_url);
237
0
    }
238
239
0
    if (auto first_error_msg = get_first_error_msg(); !first_error_msg.empty()) {
240
0
        _query_ctx->set_first_error_msg(first_error_msg);
241
0
    }
242
243
0
    _query_ctx->cancel(reason, _fragment_id);
244
0
    if (reason.is<ErrorCode::LIMIT_REACH>()) {
245
0
        _is_report_on_cancel = false;
246
0
    } else {
247
0
        for (auto& id : _fragment_instance_ids) {
248
0
            LOG(WARNING) << "PipelineFragmentContext cancel instance: " << print_id(id);
249
0
        }
250
0
    }
251
    // Get pipe from new load stream manager and send cancel to it or the fragment may hang to wait read from pipe
252
    // For stream load the fragment's query_id == load id, it is set in FE.
253
0
    auto stream_load_ctx = _exec_env->new_load_stream_mgr()->get(_query_id);
254
0
    if (stream_load_ctx != nullptr) {
255
0
        stream_load_ctx->pipe->cancel(reason.to_string());
256
        // Set error URL here because after pipe is cancelled, stream load execution may return early.
257
        // We need to set the error URL at this point to ensure error information is properly
258
        // propagated to the client.
259
0
        stream_load_ctx->error_url = get_load_error_url();
260
0
        stream_load_ctx->first_error_msg = get_first_error_msg();
261
0
    }
262
263
0
    for (auto& tasks : _tasks) {
264
0
        for (auto& task : tasks) {
265
0
            task.first->unblock_all_dependencies();
266
0
        }
267
0
    }
268
0
}
269
270
0
PipelinePtr PipelineFragmentContext::add_pipeline(PipelinePtr parent, int idx) {
271
0
    PipelineId id = _next_pipeline_id++;
272
0
    auto pipeline = std::make_shared<Pipeline>(
273
0
            id, parent ? std::min(parent->num_tasks(), _num_instances) : _num_instances,
274
0
            parent ? parent->num_tasks() : _num_instances);
275
0
    if (idx >= 0) {
276
0
        _pipelines.insert(_pipelines.begin() + idx, pipeline);
277
0
    } else {
278
0
        _pipelines.emplace_back(pipeline);
279
0
    }
280
0
    if (parent) {
281
0
        parent->set_children(pipeline);
282
0
    }
283
0
    return pipeline;
284
0
}
285
286
0
Status PipelineFragmentContext::_build_and_prepare_full_pipeline(ThreadPool* thread_pool) {
287
0
    {
288
0
        SCOPED_TIMER(_build_pipelines_timer);
289
        // 2. Build pipelines with operators in this fragment.
290
0
        auto root_pipeline = add_pipeline();
291
0
        RETURN_IF_ERROR(_build_pipelines(_runtime_state->obj_pool(), *_query_ctx->desc_tbl,
292
0
                                         &_root_op, root_pipeline));
293
294
        // 3. Create sink operator
295
0
        if (!_params.fragment.__isset.output_sink) {
296
0
            return Status::InternalError("No output sink in this fragment!");
297
0
        }
298
0
        RETURN_IF_ERROR(_create_data_sink(_runtime_state->obj_pool(), _params.fragment.output_sink,
299
0
                                          _params.fragment.output_exprs, _params,
300
0
                                          root_pipeline->output_row_desc(), _runtime_state.get(),
301
0
                                          *_desc_tbl, root_pipeline->id()));
302
0
        RETURN_IF_ERROR(_sink->init(_params.fragment.output_sink));
303
0
        RETURN_IF_ERROR(root_pipeline->set_sink(_sink));
304
305
0
        for (PipelinePtr& pipeline : _pipelines) {
306
0
            DCHECK(pipeline->sink() != nullptr) << pipeline->operators().size();
307
0
            RETURN_IF_ERROR(pipeline->sink()->set_child(pipeline->operators().back()));
308
0
        }
309
0
    }
310
    // 4. Build local exchanger
311
0
    if (_runtime_state->enable_local_shuffle()) {
312
0
        SCOPED_TIMER(_plan_local_exchanger_timer);
313
0
        RETURN_IF_ERROR(_plan_local_exchange(_params.num_buckets,
314
0
                                             _params.bucket_seq_to_instance_idx,
315
0
                                             _params.shuffle_idx_to_instance_idx));
316
0
    }
317
318
    // 5. Initialize global states in pipelines.
319
0
    for (PipelinePtr& pipeline : _pipelines) {
320
0
        SCOPED_TIMER(_prepare_all_pipelines_timer);
321
0
        pipeline->children().clear();
322
0
        RETURN_IF_ERROR(pipeline->prepare(_runtime_state.get()));
323
0
    }
324
325
0
    {
326
0
        SCOPED_TIMER(_build_tasks_timer);
327
        // 6. Build pipeline tasks and initialize local state.
328
0
        RETURN_IF_ERROR(_build_pipeline_tasks(thread_pool));
329
0
    }
330
331
0
    return Status::OK();
332
0
}
333
334
0
Status PipelineFragmentContext::prepare(ThreadPool* thread_pool) {
335
0
    if (_prepared) {
336
0
        return Status::InternalError("Already prepared");
337
0
    }
338
0
    if (_params.__isset.query_options && _params.query_options.__isset.execution_timeout) {
339
0
        _timeout = _params.query_options.execution_timeout;
340
0
    }
341
342
0
    _fragment_level_profile = std::make_unique<RuntimeProfile>("PipelineContext");
343
0
    _prepare_timer = ADD_TIMER(_fragment_level_profile, "PrepareTime");
344
0
    SCOPED_TIMER(_prepare_timer);
345
0
    _build_pipelines_timer = ADD_TIMER(_fragment_level_profile, "BuildPipelinesTime");
346
0
    _init_context_timer = ADD_TIMER(_fragment_level_profile, "InitContextTime");
347
0
    _plan_local_exchanger_timer = ADD_TIMER(_fragment_level_profile, "PlanLocalLocalExchangerTime");
348
0
    _build_tasks_timer = ADD_TIMER(_fragment_level_profile, "BuildTasksTime");
349
0
    _prepare_all_pipelines_timer = ADD_TIMER(_fragment_level_profile, "PrepareAllPipelinesTime");
350
0
    {
351
0
        SCOPED_TIMER(_init_context_timer);
352
0
        cast_set(_num_instances, _params.local_params.size());
353
0
        _total_instances =
354
0
                _params.__isset.total_instances ? _params.total_instances : _num_instances;
355
356
0
        auto* fragment_context = this;
357
358
0
        if (_params.query_options.__isset.is_report_success) {
359
0
            fragment_context->set_is_report_success(_params.query_options.is_report_success);
360
0
        }
361
362
        // 1. Set up the global runtime state.
363
0
        _runtime_state = RuntimeState::create_unique(
364
0
                _params.query_id, _params.fragment_id, _params.query_options,
365
0
                _query_ctx->query_globals, _exec_env, _query_ctx.get());
366
0
        _runtime_state->set_task_execution_context(shared_from_this());
367
0
        SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_runtime_state->query_mem_tracker());
368
0
        if (_params.__isset.backend_id) {
369
0
            _runtime_state->set_backend_id(_params.backend_id);
370
0
        }
371
0
        if (_params.__isset.import_label) {
372
0
            _runtime_state->set_import_label(_params.import_label);
373
0
        }
374
0
        if (_params.__isset.db_name) {
375
0
            _runtime_state->set_db_name(_params.db_name);
376
0
        }
377
0
        if (_params.__isset.load_job_id) {
378
0
            _runtime_state->set_load_job_id(_params.load_job_id);
379
0
        }
380
381
0
        if (_params.is_simplified_param) {
382
0
            _desc_tbl = _query_ctx->desc_tbl;
383
0
        } else {
384
0
            DCHECK(_params.__isset.desc_tbl);
385
0
            RETURN_IF_ERROR(DescriptorTbl::create(_runtime_state->obj_pool(), _params.desc_tbl,
386
0
                                                  &_desc_tbl));
387
0
        }
388
0
        _runtime_state->set_desc_tbl(_desc_tbl);
389
0
        _runtime_state->set_num_per_fragment_instances(_params.num_senders);
390
0
        _runtime_state->set_load_stream_per_node(_params.load_stream_per_node);
391
0
        _runtime_state->set_total_load_streams(_params.total_load_streams);
392
0
        _runtime_state->set_num_local_sink(_params.num_local_sink);
393
394
        // init fragment_instance_ids
395
0
        const auto target_size = _params.local_params.size();
396
0
        _fragment_instance_ids.resize(target_size);
397
0
        for (size_t i = 0; i < _params.local_params.size(); i++) {
398
0
            auto fragment_instance_id = _params.local_params[i].fragment_instance_id;
399
0
            _fragment_instance_ids[i] = fragment_instance_id;
400
0
        }
401
0
    }
402
403
0
    RETURN_IF_ERROR(_build_and_prepare_full_pipeline(thread_pool));
404
405
0
    _init_next_report_time();
406
407
0
    _prepared = true;
408
0
    return Status::OK();
409
0
}
410
411
Status PipelineFragmentContext::_build_pipeline_tasks_for_instance(
412
        int instance_idx,
413
0
        const std::vector<std::shared_ptr<RuntimeProfile>>& pipeline_id_to_profile) {
414
0
    const auto& local_params = _params.local_params[instance_idx];
415
0
    auto fragment_instance_id = local_params.fragment_instance_id;
416
0
    auto runtime_filter_mgr = std::make_unique<RuntimeFilterMgr>(false);
417
0
    std::map<PipelineId, PipelineTask*> pipeline_id_to_task;
418
0
    auto get_shared_state = [&](PipelinePtr pipeline)
419
0
            -> std::map<int, std::pair<std::shared_ptr<BasicSharedState>,
420
0
                                       std::vector<std::shared_ptr<Dependency>>>> {
421
0
        std::map<int, std::pair<std::shared_ptr<BasicSharedState>,
422
0
                                std::vector<std::shared_ptr<Dependency>>>>
423
0
                shared_state_map;
424
0
        for (auto& op : pipeline->operators()) {
425
0
            auto source_id = op->operator_id();
426
0
            if (auto iter = _op_id_to_shared_state.find(source_id);
427
0
                iter != _op_id_to_shared_state.end()) {
428
0
                shared_state_map.insert({source_id, iter->second});
429
0
            }
430
0
        }
431
0
        for (auto sink_to_source_id : pipeline->sink()->dests_id()) {
432
0
            if (auto iter = _op_id_to_shared_state.find(sink_to_source_id);
433
0
                iter != _op_id_to_shared_state.end()) {
434
0
                shared_state_map.insert({sink_to_source_id, iter->second});
435
0
            }
436
0
        }
437
0
        return shared_state_map;
438
0
    };
439
440
0
    for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) {
441
0
        auto& pipeline = _pipelines[pip_idx];
442
0
        if (pipeline->num_tasks() > 1 || instance_idx == 0) {
443
0
            auto task_runtime_state = RuntimeState::create_unique(
444
0
                    local_params.fragment_instance_id, _params.query_id, _params.fragment_id,
445
0
                    _params.query_options, _query_ctx->query_globals, _exec_env, _query_ctx.get());
446
0
            {
447
                // Initialize runtime state for this task
448
0
                task_runtime_state->set_query_mem_tracker(_query_ctx->query_mem_tracker());
449
450
0
                task_runtime_state->set_task_execution_context(shared_from_this());
451
0
                task_runtime_state->set_be_number(local_params.backend_num);
452
453
0
                if (_params.__isset.backend_id) {
454
0
                    task_runtime_state->set_backend_id(_params.backend_id);
455
0
                }
456
0
                if (_params.__isset.import_label) {
457
0
                    task_runtime_state->set_import_label(_params.import_label);
458
0
                }
459
0
                if (_params.__isset.db_name) {
460
0
                    task_runtime_state->set_db_name(_params.db_name);
461
0
                }
462
0
                if (_params.__isset.load_job_id) {
463
0
                    task_runtime_state->set_load_job_id(_params.load_job_id);
464
0
                }
465
0
                if (_params.__isset.wal_id) {
466
0
                    task_runtime_state->set_wal_id(_params.wal_id);
467
0
                }
468
0
                if (_params.__isset.content_length) {
469
0
                    task_runtime_state->set_content_length(_params.content_length);
470
0
                }
471
472
0
                task_runtime_state->set_desc_tbl(_desc_tbl);
473
0
                task_runtime_state->set_per_fragment_instance_idx(local_params.sender_id);
474
0
                task_runtime_state->set_num_per_fragment_instances(_params.num_senders);
475
0
                task_runtime_state->resize_op_id_to_local_state(max_operator_id());
476
0
                task_runtime_state->set_max_operator_id(max_operator_id());
477
0
                task_runtime_state->set_load_stream_per_node(_params.load_stream_per_node);
478
0
                task_runtime_state->set_total_load_streams(_params.total_load_streams);
479
0
                task_runtime_state->set_num_local_sink(_params.num_local_sink);
480
481
0
                task_runtime_state->set_runtime_filter_mgr(runtime_filter_mgr.get());
482
0
            }
483
0
            auto cur_task_id = _total_tasks++;
484
0
            task_runtime_state->set_task_id(cur_task_id);
485
0
            task_runtime_state->set_task_num(pipeline->num_tasks());
486
0
            auto task = std::make_shared<PipelineTask>(
487
0
                    pipeline, cur_task_id, task_runtime_state.get(),
488
0
                    std::dynamic_pointer_cast<PipelineFragmentContext>(shared_from_this()),
489
0
                    pipeline_id_to_profile[pip_idx].get(), get_shared_state(pipeline),
490
0
                    instance_idx);
491
0
            pipeline->incr_created_tasks(instance_idx, task.get());
492
0
            pipeline_id_to_task.insert({pipeline->id(), task.get()});
493
0
            _tasks[instance_idx].emplace_back(
494
0
                    std::pair<std::shared_ptr<PipelineTask>, std::unique_ptr<RuntimeState>> {
495
0
                            std::move(task), std::move(task_runtime_state)});
496
0
        }
497
0
    }
498
499
    /**
500
         * Build DAG for pipeline tasks.
501
         * For example, we have
502
         *
503
         *   ExchangeSink (Pipeline1)     JoinBuildSink (Pipeline2)
504
         *            \                      /
505
         *          JoinProbeOperator1 (Pipeline1)    JoinBuildSink (Pipeline3)
506
         *                 \                          /
507
         *               JoinProbeOperator2 (Pipeline1)
508
         *
509
         * In this fragment, we have three pipelines and pipeline 1 depends on pipeline 2 and pipeline 3.
510
         * To build this DAG, `_dag` manage dependencies between pipelines by pipeline ID and
511
         * `pipeline_id_to_task` is used to find the task by a unique pipeline ID.
512
         *
513
         * Finally, we have two upstream dependencies in Pipeline1 corresponding to JoinProbeOperator1
514
         * and JoinProbeOperator2.
515
         */
516
0
    for (auto& _pipeline : _pipelines) {
517
0
        if (pipeline_id_to_task.contains(_pipeline->id())) {
518
0
            auto* task = pipeline_id_to_task[_pipeline->id()];
519
0
            DCHECK(task != nullptr);
520
521
            // If this task has upstream dependency, then inject it into this task.
522
0
            if (_dag.contains(_pipeline->id())) {
523
0
                auto& deps = _dag[_pipeline->id()];
524
0
                for (auto& dep : deps) {
525
0
                    if (pipeline_id_to_task.contains(dep)) {
526
0
                        auto ss = pipeline_id_to_task[dep]->get_sink_shared_state();
527
0
                        if (ss) {
528
0
                            task->inject_shared_state(ss);
529
0
                        } else {
530
0
                            pipeline_id_to_task[dep]->inject_shared_state(
531
0
                                    task->get_source_shared_state());
532
0
                        }
533
0
                    }
534
0
                }
535
0
            }
536
0
        }
537
0
    }
538
0
    for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) {
539
0
        if (pipeline_id_to_task.contains(_pipelines[pip_idx]->id())) {
540
0
            auto* task = pipeline_id_to_task[_pipelines[pip_idx]->id()];
541
0
            DCHECK(pipeline_id_to_profile[pip_idx]);
542
0
            std::vector<TScanRangeParams> scan_ranges;
543
0
            auto node_id = _pipelines[pip_idx]->operators().front()->node_id();
544
0
            if (local_params.per_node_scan_ranges.contains(node_id)) {
545
0
                scan_ranges = local_params.per_node_scan_ranges.find(node_id)->second;
546
0
            }
547
0
            RETURN_IF_ERROR_OR_CATCH_EXCEPTION(task->prepare(scan_ranges, local_params.sender_id,
548
0
                                                             _params.fragment.output_sink));
549
0
        }
550
0
    }
551
0
    {
552
0
        std::lock_guard<std::mutex> l(_state_map_lock);
553
0
        _runtime_filter_mgr_map[instance_idx] = std::move(runtime_filter_mgr);
554
0
    }
555
0
    return Status::OK();
556
0
}
557
558
0
Status PipelineFragmentContext::_build_pipeline_tasks(ThreadPool* thread_pool) {
559
0
    _total_tasks = 0;
560
0
    _closed_tasks = 0;
561
0
    const auto target_size = _params.local_params.size();
562
0
    _tasks.resize(target_size);
563
0
    _runtime_filter_mgr_map.resize(target_size);
564
0
    for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) {
565
0
        _pip_id_to_pipeline[_pipelines[pip_idx]->id()] = _pipelines[pip_idx].get();
566
0
    }
567
0
    auto pipeline_id_to_profile = _runtime_state->build_pipeline_profile(_pipelines.size());
568
569
0
    if (target_size > 1 &&
570
0
        (_runtime_state->query_options().__isset.parallel_prepare_threshold &&
571
0
         target_size > _runtime_state->query_options().parallel_prepare_threshold)) {
572
        // If instances parallelism is big enough ( > parallel_prepare_threshold), we will prepare all tasks by multi-threads
573
0
        std::vector<Status> prepare_status(target_size);
574
0
        int submitted_tasks = 0;
575
0
        Status submit_status;
576
0
        CountDownLatch latch((int)target_size);
577
0
        for (int i = 0; i < target_size; i++) {
578
0
            submit_status = thread_pool->submit_func([&, i]() {
579
0
                SCOPED_ATTACH_TASK(_query_ctx.get());
580
0
                prepare_status[i] = _build_pipeline_tasks_for_instance(i, pipeline_id_to_profile);
581
0
                latch.count_down();
582
0
            });
583
0
            if (LIKELY(submit_status.ok())) {
584
0
                submitted_tasks++;
585
0
            } else {
586
0
                break;
587
0
            }
588
0
        }
589
0
        latch.arrive_and_wait(target_size - submitted_tasks);
590
0
        if (UNLIKELY(!submit_status.ok())) {
591
0
            return submit_status;
592
0
        }
593
0
        for (int i = 0; i < submitted_tasks; i++) {
594
0
            if (!prepare_status[i].ok()) {
595
0
                return prepare_status[i];
596
0
            }
597
0
        }
598
0
    } else {
599
0
        for (int i = 0; i < target_size; i++) {
600
0
            RETURN_IF_ERROR(_build_pipeline_tasks_for_instance(i, pipeline_id_to_profile));
601
0
        }
602
0
    }
603
0
    _pipeline_parent_map.clear();
604
0
    _op_id_to_shared_state.clear();
605
606
0
    return Status::OK();
607
0
}
608
609
0
void PipelineFragmentContext::_init_next_report_time() {
610
0
    auto interval_s = config::pipeline_status_report_interval;
611
0
    if (_is_report_success && interval_s > 0 && _timeout > interval_s) {
612
0
        VLOG_FILE << "enable period report: fragment id=" << _fragment_id;
613
0
        uint64_t report_fragment_offset = (uint64_t)(rand() % interval_s) * NANOS_PER_SEC;
614
        // We don't want to wait longer than it takes to run the entire fragment.
615
0
        _previous_report_time =
616
0
                MonotonicNanos() + report_fragment_offset - (uint64_t)(interval_s)*NANOS_PER_SEC;
617
0
        _disable_period_report = false;
618
0
    }
619
0
}
620
621
0
void PipelineFragmentContext::refresh_next_report_time() {
622
0
    auto disable = _disable_period_report.load(std::memory_order_acquire);
623
0
    DCHECK(disable == true);
624
0
    _previous_report_time.store(MonotonicNanos(), std::memory_order_release);
625
0
    _disable_period_report.compare_exchange_strong(disable, false);
626
0
}
627
628
0
void PipelineFragmentContext::trigger_report_if_necessary() {
629
0
    if (!_is_report_success) {
630
0
        return;
631
0
    }
632
0
    auto disable = _disable_period_report.load(std::memory_order_acquire);
633
0
    if (disable) {
634
0
        return;
635
0
    }
636
0
    int32_t interval_s = config::pipeline_status_report_interval;
637
0
    if (interval_s <= 0) {
638
0
        LOG(WARNING) << "config::status_report_interval is equal to or less than zero, do not "
639
0
                        "trigger "
640
0
                        "report.";
641
0
    }
642
0
    uint64_t next_report_time = _previous_report_time.load(std::memory_order_acquire) +
643
0
                                (uint64_t)(interval_s)*NANOS_PER_SEC;
644
0
    if (MonotonicNanos() > next_report_time) {
645
0
        if (!_disable_period_report.compare_exchange_strong(disable, true,
646
0
                                                            std::memory_order_acq_rel)) {
647
0
            return;
648
0
        }
649
0
        if (VLOG_FILE_IS_ON) {
650
0
            VLOG_FILE << "Reporting "
651
0
                      << "profile for query_id " << print_id(_query_id)
652
0
                      << ", fragment id: " << _fragment_id;
653
654
0
            std::stringstream ss;
655
0
            _runtime_state->runtime_profile()->compute_time_in_profile();
656
0
            _runtime_state->runtime_profile()->pretty_print(&ss);
657
0
            if (_runtime_state->load_channel_profile()) {
658
0
                _runtime_state->load_channel_profile()->pretty_print(&ss);
659
0
            }
660
661
0
            VLOG_FILE << "Query " << print_id(get_query_id()) << " fragment " << get_fragment_id()
662
0
                      << " profile:\n"
663
0
                      << ss.str();
664
0
        }
665
0
        auto st = send_report(false);
666
0
        if (!st.ok()) {
667
0
            disable = true;
668
0
            _disable_period_report.compare_exchange_strong(disable, false,
669
0
                                                           std::memory_order_acq_rel);
670
0
        }
671
0
    }
672
0
}
673
674
Status PipelineFragmentContext::_build_pipelines(ObjectPool* pool, const DescriptorTbl& descs,
675
0
                                                 OperatorPtr* root, PipelinePtr cur_pipe) {
676
0
    if (_params.fragment.plan.nodes.empty()) {
677
0
        throw Exception(ErrorCode::INTERNAL_ERROR, "Invalid plan which has no plan node!");
678
0
    }
679
680
0
    int node_idx = 0;
681
682
0
    RETURN_IF_ERROR(_create_tree_helper(pool, _params.fragment.plan.nodes, descs, nullptr,
683
0
                                        &node_idx, root, cur_pipe, 0, false, false));
684
685
0
    if (node_idx + 1 != _params.fragment.plan.nodes.size()) {
686
0
        return Status::InternalError(
687
0
                "Plan tree only partially reconstructed. Not all thrift nodes were used.");
688
0
    }
689
0
    return Status::OK();
690
0
}
691
692
Status PipelineFragmentContext::_create_tree_helper(
693
        ObjectPool* pool, const std::vector<TPlanNode>& tnodes, const DescriptorTbl& descs,
694
        OperatorPtr parent, int* node_idx, OperatorPtr* root, PipelinePtr& cur_pipe, int child_idx,
695
0
        const bool followed_by_shuffled_operator, const bool require_bucket_distribution) {
696
    // propagate error case
697
0
    if (*node_idx >= tnodes.size()) {
698
0
        return Status::InternalError(
699
0
                "Failed to reconstruct plan tree from thrift. Node id: {}, number of nodes: {}",
700
0
                *node_idx, tnodes.size());
701
0
    }
702
0
    const TPlanNode& tnode = tnodes[*node_idx];
703
704
0
    int num_children = tnodes[*node_idx].num_children;
705
0
    bool current_followed_by_shuffled_operator = followed_by_shuffled_operator;
706
0
    bool current_require_bucket_distribution = require_bucket_distribution;
707
    // TODO: Create CacheOperator is confused now
708
0
    OperatorPtr op = nullptr;
709
0
    OperatorPtr cache_op = nullptr;
710
0
    RETURN_IF_ERROR(_create_operator(pool, tnodes[*node_idx], descs, op, cur_pipe,
711
0
                                     parent == nullptr ? -1 : parent->node_id(), child_idx,
712
0
                                     followed_by_shuffled_operator,
713
0
                                     current_require_bucket_distribution, cache_op));
714
    // Initialization must be done here. For example, group by expressions in agg will be used to
715
    // decide if a local shuffle should be planed, so it must be initialized here.
716
0
    RETURN_IF_ERROR(op->init(tnode, _runtime_state.get()));
717
    // assert(parent != nullptr || (node_idx == 0 && root_expr != nullptr));
718
0
    if (parent != nullptr) {
719
        // add to parent's child(s)
720
0
        RETURN_IF_ERROR(parent->set_child(cache_op ? cache_op : op));
721
0
    } else {
722
0
        *root = op;
723
0
    }
724
    /**
725
     * `ExchangeType::HASH_SHUFFLE` should be used if an operator is followed by a shuffled operator (shuffled hash join, union operator followed by co-located operators).
726
     *
727
     * For plan:
728
     * LocalExchange(id=0) -> Aggregation(id=1) -> ShuffledHashJoin(id=2)
729
     *                           Exchange(id=3) -> ShuffledHashJoinBuild(id=2)
730
     * We must ensure data distribution of `LocalExchange(id=0)` is same as Exchange(id=3).
731
     *
732
     * If an operator's is followed by a local exchange without shuffle (e.g. passthrough), a
733
     * shuffled local exchanger will be used before join so it is not followed by shuffle join.
734
     */
735
0
    auto required_data_distribution =
736
0
            cur_pipe->operators().empty()
737
0
                    ? cur_pipe->sink()->required_data_distribution(_runtime_state.get())
738
0
                    : op->required_data_distribution(_runtime_state.get());
739
0
    current_followed_by_shuffled_operator =
740
0
            ((followed_by_shuffled_operator ||
741
0
              (cur_pipe->operators().empty() ? cur_pipe->sink()->is_shuffled_operator()
742
0
                                             : op->is_shuffled_operator())) &&
743
0
             Pipeline::is_hash_exchange(required_data_distribution.distribution_type)) ||
744
0
            (followed_by_shuffled_operator &&
745
0
             required_data_distribution.distribution_type == ExchangeType::NOOP);
746
747
0
    current_require_bucket_distribution =
748
0
            ((require_bucket_distribution ||
749
0
              (cur_pipe->operators().empty() ? cur_pipe->sink()->is_colocated_operator()
750
0
                                             : op->is_colocated_operator())) &&
751
0
             Pipeline::is_hash_exchange(required_data_distribution.distribution_type)) ||
752
0
            (require_bucket_distribution &&
753
0
             required_data_distribution.distribution_type == ExchangeType::NOOP);
754
755
0
    if (num_children == 0) {
756
0
        _use_serial_source = op->is_serial_operator();
757
0
    }
758
    // rely on that tnodes is preorder of the plan
759
0
    for (int i = 0; i < num_children; i++) {
760
0
        ++*node_idx;
761
0
        RETURN_IF_ERROR(_create_tree_helper(pool, tnodes, descs, op, node_idx, nullptr, cur_pipe, i,
762
0
                                            current_followed_by_shuffled_operator,
763
0
                                            current_require_bucket_distribution));
764
765
        // we are expecting a child, but have used all nodes
766
        // this means we have been given a bad tree and must fail
767
0
        if (*node_idx >= tnodes.size()) {
768
0
            return Status::InternalError(
769
0
                    "Failed to reconstruct plan tree from thrift. Node id: {}, number of "
770
0
                    "nodes: {}",
771
0
                    *node_idx, tnodes.size());
772
0
        }
773
0
    }
774
775
0
    return Status::OK();
776
0
}
777
778
void PipelineFragmentContext::_inherit_pipeline_properties(
779
        const DataDistribution& data_distribution, PipelinePtr pipe_with_source,
780
0
        PipelinePtr pipe_with_sink) {
781
0
    pipe_with_sink->set_num_tasks(pipe_with_source->num_tasks());
782
0
    pipe_with_source->set_num_tasks(_num_instances);
783
0
    pipe_with_source->set_data_distribution(data_distribution);
784
0
}
785
786
Status PipelineFragmentContext::_add_local_exchange_impl(
787
        int idx, ObjectPool* pool, PipelinePtr cur_pipe, PipelinePtr new_pip,
788
        DataDistribution data_distribution, bool* do_local_exchange, int num_buckets,
789
        const std::map<int, int>& bucket_seq_to_instance_idx,
790
0
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
791
0
    auto& operators = cur_pipe->operators();
792
0
    const auto downstream_pipeline_id = cur_pipe->id();
793
0
    auto local_exchange_id = next_operator_id();
794
    // 1. Create a new pipeline with local exchange sink.
795
0
    DataSinkOperatorPtr sink;
796
0
    auto sink_id = next_sink_operator_id();
797
798
    /**
799
     * `bucket_seq_to_instance_idx` is empty if no scan operator is contained in this fragment.
800
     * So co-located operators(e.g. Agg, Analytic) should use `HASH_SHUFFLE` instead of `BUCKET_HASH_SHUFFLE`.
801
     */
802
0
    const bool followed_by_shuffled_operator =
803
0
            operators.size() > idx ? operators[idx]->followed_by_shuffled_operator()
804
0
                                   : cur_pipe->sink()->followed_by_shuffled_operator();
805
0
    const bool use_global_hash_shuffle = bucket_seq_to_instance_idx.empty() &&
806
0
                                         !shuffle_idx_to_instance_idx.contains(-1) &&
807
0
                                         followed_by_shuffled_operator && !_use_serial_source;
808
0
    sink = std::make_shared<LocalExchangeSinkOperatorX>(
809
0
            sink_id, local_exchange_id, use_global_hash_shuffle ? _total_instances : _num_instances,
810
0
            data_distribution.partition_exprs, bucket_seq_to_instance_idx);
811
0
    if (bucket_seq_to_instance_idx.empty() &&
812
0
        data_distribution.distribution_type == ExchangeType::BUCKET_HASH_SHUFFLE) {
813
0
        data_distribution.distribution_type = ExchangeType::HASH_SHUFFLE;
814
0
    }
815
0
    RETURN_IF_ERROR(new_pip->set_sink(sink));
816
0
    RETURN_IF_ERROR(new_pip->sink()->init(_runtime_state.get(), data_distribution.distribution_type,
817
0
                                          num_buckets, use_global_hash_shuffle,
818
0
                                          shuffle_idx_to_instance_idx));
819
820
    // 2. Create and initialize LocalExchangeSharedState.
821
0
    std::shared_ptr<LocalExchangeSharedState> shared_state =
822
0
            LocalExchangeSharedState::create_shared(_num_instances);
823
0
    switch (data_distribution.distribution_type) {
824
0
    case ExchangeType::HASH_SHUFFLE:
825
0
        shared_state->exchanger = ShuffleExchanger::create_unique(
826
0
                std::max(cur_pipe->num_tasks(), _num_instances), _num_instances,
827
0
                use_global_hash_shuffle ? _total_instances : _num_instances,
828
0
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
829
0
                        ? cast_set<int>(
830
0
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
831
0
                        : 0);
832
0
        break;
833
0
    case ExchangeType::BUCKET_HASH_SHUFFLE:
834
0
        shared_state->exchanger = BucketShuffleExchanger::create_unique(
835
0
                std::max(cur_pipe->num_tasks(), _num_instances), _num_instances, num_buckets,
836
0
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
837
0
                        ? cast_set<int>(
838
0
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
839
0
                        : 0);
840
0
        break;
841
0
    case ExchangeType::PASSTHROUGH:
842
0
        shared_state->exchanger = PassthroughExchanger::create_unique(
843
0
                cur_pipe->num_tasks(), _num_instances,
844
0
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
845
0
                        ? cast_set<int>(
846
0
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
847
0
                        : 0);
848
0
        break;
849
0
    case ExchangeType::BROADCAST:
850
0
        shared_state->exchanger = BroadcastExchanger::create_unique(
851
0
                cur_pipe->num_tasks(), _num_instances,
852
0
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
853
0
                        ? cast_set<int>(
854
0
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
855
0
                        : 0);
856
0
        break;
857
0
    case ExchangeType::PASS_TO_ONE:
858
0
        if (_runtime_state->enable_share_hash_table_for_broadcast_join()) {
859
            // If shared hash table is enabled for BJ, hash table will be built by only one task
860
0
            shared_state->exchanger = PassToOneExchanger::create_unique(
861
0
                    cur_pipe->num_tasks(), _num_instances,
862
0
                    _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
863
0
                            ? cast_set<int>(_runtime_state->query_options()
864
0
                                                    .local_exchange_free_blocks_limit)
865
0
                            : 0);
866
0
        } else {
867
0
            shared_state->exchanger = BroadcastExchanger::create_unique(
868
0
                    cur_pipe->num_tasks(), _num_instances,
869
0
                    _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
870
0
                            ? cast_set<int>(_runtime_state->query_options()
871
0
                                                    .local_exchange_free_blocks_limit)
872
0
                            : 0);
873
0
        }
874
0
        break;
875
0
    case ExchangeType::ADAPTIVE_PASSTHROUGH:
876
0
        shared_state->exchanger = AdaptivePassthroughExchanger::create_unique(
877
0
                std::max(cur_pipe->num_tasks(), _num_instances), _num_instances,
878
0
                _runtime_state->query_options().__isset.local_exchange_free_blocks_limit
879
0
                        ? cast_set<int>(
880
0
                                  _runtime_state->query_options().local_exchange_free_blocks_limit)
881
0
                        : 0);
882
0
        break;
883
0
    default:
884
0
        return Status::InternalError("Unsupported local exchange type : " +
885
0
                                     std::to_string((int)data_distribution.distribution_type));
886
0
    }
887
0
    shared_state->create_source_dependencies(_num_instances, local_exchange_id, local_exchange_id,
888
0
                                             "LOCAL_EXCHANGE_OPERATOR");
889
0
    shared_state->create_sink_dependency(sink_id, local_exchange_id, "LOCAL_EXCHANGE_SINK");
890
0
    _op_id_to_shared_state.insert({local_exchange_id, {shared_state, shared_state->sink_deps}});
891
892
    // 3. Set two pipelines' operator list. For example, split pipeline [Scan - AggSink] to
893
    // pipeline1 [Scan - LocalExchangeSink] and pipeline2 [LocalExchangeSource - AggSink].
894
895
    // 3.1 Initialize new pipeline's operator list.
896
0
    std::copy(operators.begin(), operators.begin() + idx,
897
0
              std::inserter(new_pip->operators(), new_pip->operators().end()));
898
899
    // 3.2 Erase unused operators in previous pipeline.
900
0
    operators.erase(operators.begin(), operators.begin() + idx);
901
902
    // 4. Initialize LocalExchangeSource and insert it into this pipeline.
903
0
    OperatorPtr source_op;
904
0
    source_op = std::make_shared<LocalExchangeSourceOperatorX>(pool, local_exchange_id);
905
0
    RETURN_IF_ERROR(source_op->set_child(new_pip->operators().back()));
906
0
    RETURN_IF_ERROR(source_op->init(data_distribution.distribution_type));
907
0
    if (!operators.empty()) {
908
0
        RETURN_IF_ERROR(operators.front()->set_child(nullptr));
909
0
        RETURN_IF_ERROR(operators.front()->set_child(source_op));
910
0
    }
911
0
    operators.insert(operators.begin(), source_op);
912
913
    // 5. Set children for two pipelines separately.
914
0
    std::vector<std::shared_ptr<Pipeline>> new_children;
915
0
    std::vector<PipelineId> edges_with_source;
916
0
    for (auto child : cur_pipe->children()) {
917
0
        bool found = false;
918
0
        for (auto op : new_pip->operators()) {
919
0
            if (child->sink()->node_id() == op->node_id()) {
920
0
                new_pip->set_children(child);
921
0
                found = true;
922
0
            };
923
0
        }
924
0
        if (!found) {
925
0
            new_children.push_back(child);
926
0
            edges_with_source.push_back(child->id());
927
0
        }
928
0
    }
929
0
    new_children.push_back(new_pip);
930
0
    edges_with_source.push_back(new_pip->id());
931
932
    // 6. Set DAG for new pipelines.
933
0
    if (!new_pip->children().empty()) {
934
0
        std::vector<PipelineId> edges_with_sink;
935
0
        for (auto child : new_pip->children()) {
936
0
            edges_with_sink.push_back(child->id());
937
0
        }
938
0
        _dag.insert({new_pip->id(), edges_with_sink});
939
0
    }
940
0
    cur_pipe->set_children(new_children);
941
0
    _dag[downstream_pipeline_id] = edges_with_source;
942
0
    RETURN_IF_ERROR(new_pip->sink()->set_child(new_pip->operators().back()));
943
0
    RETURN_IF_ERROR(cur_pipe->sink()->set_child(nullptr));
944
0
    RETURN_IF_ERROR(cur_pipe->sink()->set_child(cur_pipe->operators().back()));
945
946
    // 7. Inherit properties from current pipeline.
947
0
    _inherit_pipeline_properties(data_distribution, cur_pipe, new_pip);
948
0
    return Status::OK();
949
0
}
950
951
Status PipelineFragmentContext::_add_local_exchange(
952
        int pip_idx, int idx, int node_id, ObjectPool* pool, PipelinePtr cur_pipe,
953
        DataDistribution data_distribution, bool* do_local_exchange, int num_buckets,
954
        const std::map<int, int>& bucket_seq_to_instance_idx,
955
0
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
956
0
    if (_num_instances <= 1 || cur_pipe->num_tasks_of_parent() <= 1) {
957
0
        return Status::OK();
958
0
    }
959
960
0
    if (!cur_pipe->need_to_local_exchange(data_distribution, idx)) {
961
0
        return Status::OK();
962
0
    }
963
0
    *do_local_exchange = true;
964
965
0
    auto& operators = cur_pipe->operators();
966
0
    auto total_op_num = operators.size();
967
0
    auto new_pip = add_pipeline(cur_pipe, pip_idx + 1);
968
0
    RETURN_IF_ERROR(_add_local_exchange_impl(
969
0
            idx, pool, cur_pipe, new_pip, data_distribution, do_local_exchange, num_buckets,
970
0
            bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx));
971
972
0
    CHECK(total_op_num + 1 == cur_pipe->operators().size() + new_pip->operators().size())
973
0
            << "total_op_num: " << total_op_num
974
0
            << " cur_pipe->operators().size(): " << cur_pipe->operators().size()
975
0
            << " new_pip->operators().size(): " << new_pip->operators().size();
976
977
    // There are some local shuffles with relatively heavy operations on the sink.
978
    // If the local sink concurrency is 1 and the local source concurrency is n, the sink becomes a bottleneck.
979
    // Therefore, local passthrough is used to increase the concurrency of the sink.
980
    // op -> local sink(1) -> local source (n)
981
    // op -> local passthrough(1) -> local passthrough(n) ->  local sink(n) -> local source (n)
982
0
    if (cur_pipe->num_tasks() > 1 && new_pip->num_tasks() == 1 &&
983
0
        Pipeline::heavy_operations_on_the_sink(data_distribution.distribution_type)) {
984
0
        RETURN_IF_ERROR(_add_local_exchange_impl(
985
0
                cast_set<int>(new_pip->operators().size()), pool, new_pip,
986
0
                add_pipeline(new_pip, pip_idx + 2), DataDistribution(ExchangeType::PASSTHROUGH),
987
0
                do_local_exchange, num_buckets, bucket_seq_to_instance_idx,
988
0
                shuffle_idx_to_instance_idx));
989
0
    }
990
0
    return Status::OK();
991
0
}
992
993
Status PipelineFragmentContext::_plan_local_exchange(
994
        int num_buckets, const std::map<int, int>& bucket_seq_to_instance_idx,
995
0
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
996
0
    for (int pip_idx = cast_set<int>(_pipelines.size()) - 1; pip_idx >= 0; pip_idx--) {
997
0
        _pipelines[pip_idx]->init_data_distribution(_runtime_state.get());
998
        // Set property if child pipeline is not join operator's child.
999
0
        if (!_pipelines[pip_idx]->children().empty()) {
1000
0
            for (auto& child : _pipelines[pip_idx]->children()) {
1001
0
                if (child->sink()->node_id() ==
1002
0
                    _pipelines[pip_idx]->operators().front()->node_id()) {
1003
0
                    _pipelines[pip_idx]->set_data_distribution(child->data_distribution());
1004
0
                }
1005
0
            }
1006
0
        }
1007
1008
        // if 'num_buckets == 0' means the fragment is colocated by exchange node not the
1009
        // scan node. so here use `_num_instance` to replace the `num_buckets` to prevent dividing 0
1010
        // still keep colocate plan after local shuffle
1011
0
        RETURN_IF_ERROR(_plan_local_exchange(num_buckets, pip_idx, _pipelines[pip_idx],
1012
0
                                             bucket_seq_to_instance_idx,
1013
0
                                             shuffle_idx_to_instance_idx));
1014
0
    }
1015
0
    return Status::OK();
1016
0
}
1017
1018
Status PipelineFragmentContext::_plan_local_exchange(
1019
        int num_buckets, int pip_idx, PipelinePtr pip,
1020
        const std::map<int, int>& bucket_seq_to_instance_idx,
1021
0
        const std::map<int, int>& shuffle_idx_to_instance_idx) {
1022
0
    int idx = 1;
1023
0
    bool do_local_exchange = false;
1024
0
    do {
1025
0
        auto& ops = pip->operators();
1026
0
        do_local_exchange = false;
1027
        // Plan local exchange for each operator.
1028
0
        for (; idx < ops.size();) {
1029
0
            if (ops[idx]->required_data_distribution(_runtime_state.get()).need_local_exchange()) {
1030
0
                RETURN_IF_ERROR(_add_local_exchange(
1031
0
                        pip_idx, idx, ops[idx]->node_id(), _runtime_state->obj_pool(), pip,
1032
0
                        ops[idx]->required_data_distribution(_runtime_state.get()),
1033
0
                        &do_local_exchange, num_buckets, bucket_seq_to_instance_idx,
1034
0
                        shuffle_idx_to_instance_idx));
1035
0
            }
1036
0
            if (do_local_exchange) {
1037
                // If local exchange is needed for current operator, we will split this pipeline to
1038
                // two pipelines by local exchange sink/source. And then we need to process remaining
1039
                // operators in this pipeline so we set idx to 2 (0 is local exchange source and 1
1040
                // is current operator was already processed) and continue to plan local exchange.
1041
0
                idx = 2;
1042
0
                break;
1043
0
            }
1044
0
            idx++;
1045
0
        }
1046
0
    } while (do_local_exchange);
1047
0
    if (pip->sink()->required_data_distribution(_runtime_state.get()).need_local_exchange()) {
1048
0
        RETURN_IF_ERROR(_add_local_exchange(
1049
0
                pip_idx, idx, pip->sink()->node_id(), _runtime_state->obj_pool(), pip,
1050
0
                pip->sink()->required_data_distribution(_runtime_state.get()), &do_local_exchange,
1051
0
                num_buckets, bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx));
1052
0
    }
1053
0
    return Status::OK();
1054
0
}
1055
1056
Status PipelineFragmentContext::_create_data_sink(ObjectPool* pool, const TDataSink& thrift_sink,
1057
                                                  const std::vector<TExpr>& output_exprs,
1058
                                                  const TPipelineFragmentParams& params,
1059
                                                  const RowDescriptor& row_desc,
1060
                                                  RuntimeState* state, DescriptorTbl& desc_tbl,
1061
0
                                                  PipelineId cur_pipeline_id) {
1062
0
    switch (thrift_sink.type) {
1063
0
    case TDataSinkType::DATA_STREAM_SINK: {
1064
0
        if (!thrift_sink.__isset.stream_sink) {
1065
0
            return Status::InternalError("Missing data stream sink.");
1066
0
        }
1067
0
        _sink = std::make_shared<ExchangeSinkOperatorX>(
1068
0
                state, row_desc, next_sink_operator_id(), thrift_sink.stream_sink,
1069
0
                params.destinations, _fragment_instance_ids);
1070
0
        break;
1071
0
    }
1072
0
    case TDataSinkType::RESULT_SINK: {
1073
0
        if (!thrift_sink.__isset.result_sink) {
1074
0
            return Status::InternalError("Missing data buffer sink.");
1075
0
        }
1076
1077
0
        _sink = std::make_shared<ResultSinkOperatorX>(next_sink_operator_id(), row_desc,
1078
0
                                                      output_exprs, thrift_sink.result_sink);
1079
0
        break;
1080
0
    }
1081
0
    case TDataSinkType::DICTIONARY_SINK: {
1082
0
        if (!thrift_sink.__isset.dictionary_sink) {
1083
0
            return Status::InternalError("Missing dict sink.");
1084
0
        }
1085
1086
0
        _sink = std::make_shared<DictSinkOperatorX>(next_sink_operator_id(), row_desc, output_exprs,
1087
0
                                                    thrift_sink.dictionary_sink);
1088
0
        break;
1089
0
    }
1090
0
    case TDataSinkType::GROUP_COMMIT_OLAP_TABLE_SINK:
1091
0
    case TDataSinkType::OLAP_TABLE_SINK: {
1092
0
        if (state->query_options().enable_memtable_on_sink_node &&
1093
0
            !_has_inverted_index_v1_or_partial_update(thrift_sink.olap_table_sink) &&
1094
0
            !config::is_cloud_mode()) {
1095
0
            _sink = std::make_shared<OlapTableSinkV2OperatorX>(pool, next_sink_operator_id(),
1096
0
                                                               row_desc, output_exprs);
1097
0
        } else {
1098
0
            _sink = std::make_shared<OlapTableSinkOperatorX>(pool, next_sink_operator_id(),
1099
0
                                                             row_desc, output_exprs);
1100
0
        }
1101
0
        break;
1102
0
    }
1103
0
    case TDataSinkType::GROUP_COMMIT_BLOCK_SINK: {
1104
0
        DCHECK(thrift_sink.__isset.olap_table_sink);
1105
0
        DCHECK(state->get_query_ctx() != nullptr);
1106
0
        state->get_query_ctx()->query_mem_tracker()->is_group_commit_load = true;
1107
0
        _sink = std::make_shared<GroupCommitBlockSinkOperatorX>(next_sink_operator_id(), row_desc,
1108
0
                                                                output_exprs);
1109
0
        break;
1110
0
    }
1111
0
    case TDataSinkType::HIVE_TABLE_SINK: {
1112
0
        if (!thrift_sink.__isset.hive_table_sink) {
1113
0
            return Status::InternalError("Missing hive table sink.");
1114
0
        }
1115
0
        _sink = std::make_shared<HiveTableSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1116
0
                                                         output_exprs);
1117
0
        break;
1118
0
    }
1119
0
    case TDataSinkType::ICEBERG_TABLE_SINK: {
1120
0
        if (!thrift_sink.__isset.iceberg_table_sink) {
1121
0
            return Status::InternalError("Missing iceberg table sink.");
1122
0
        }
1123
0
        if (thrift_sink.iceberg_table_sink.__isset.sort_info) {
1124
0
            _sink = std::make_shared<SpillIcebergTableSinkOperatorX>(pool, next_sink_operator_id(),
1125
0
                                                                     row_desc, output_exprs);
1126
0
        } else {
1127
0
            _sink = std::make_shared<IcebergTableSinkOperatorX>(pool, next_sink_operator_id(),
1128
0
                                                                row_desc, output_exprs);
1129
0
        }
1130
0
        break;
1131
0
    }
1132
0
    case TDataSinkType::ICEBERG_DELETE_SINK: {
1133
0
        if (!thrift_sink.__isset.iceberg_delete_sink) {
1134
0
            return Status::InternalError("Missing iceberg delete sink.");
1135
0
        }
1136
0
        _sink = std::make_shared<IcebergDeleteSinkOperatorX>(pool, next_sink_operator_id(),
1137
0
                                                             row_desc, output_exprs);
1138
0
        break;
1139
0
    }
1140
0
    case TDataSinkType::ICEBERG_MERGE_SINK: {
1141
0
        if (!thrift_sink.__isset.iceberg_merge_sink) {
1142
0
            return Status::InternalError("Missing iceberg merge sink.");
1143
0
        }
1144
0
        _sink = std::make_shared<IcebergMergeSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1145
0
                                                            output_exprs);
1146
0
        break;
1147
0
    }
1148
0
    case TDataSinkType::MAXCOMPUTE_TABLE_SINK: {
1149
0
        if (!thrift_sink.__isset.max_compute_table_sink) {
1150
0
            return Status::InternalError("Missing max compute table sink.");
1151
0
        }
1152
0
        _sink = std::make_shared<MCTableSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1153
0
                                                       output_exprs);
1154
0
        break;
1155
0
    }
1156
0
    case TDataSinkType::JDBC_TABLE_SINK: {
1157
0
        if (!thrift_sink.__isset.jdbc_table_sink) {
1158
0
            return Status::InternalError("Missing data jdbc sink.");
1159
0
        }
1160
0
        if (config::enable_java_support) {
1161
0
            _sink = std::make_shared<JdbcTableSinkOperatorX>(row_desc, next_sink_operator_id(),
1162
0
                                                             output_exprs);
1163
0
        } else {
1164
0
            return Status::InternalError(
1165
0
                    "Jdbc table sink is not enabled, you can change be config "
1166
0
                    "enable_java_support to true and restart be.");
1167
0
        }
1168
0
        break;
1169
0
    }
1170
0
    case TDataSinkType::MEMORY_SCRATCH_SINK: {
1171
0
        if (!thrift_sink.__isset.memory_scratch_sink) {
1172
0
            return Status::InternalError("Missing data buffer sink.");
1173
0
        }
1174
1175
0
        _sink = std::make_shared<MemoryScratchSinkOperatorX>(row_desc, next_sink_operator_id(),
1176
0
                                                             output_exprs);
1177
0
        break;
1178
0
    }
1179
0
    case TDataSinkType::RESULT_FILE_SINK: {
1180
0
        if (!thrift_sink.__isset.result_file_sink) {
1181
0
            return Status::InternalError("Missing result file sink.");
1182
0
        }
1183
1184
        // Result file sink is not the top sink
1185
0
        if (params.__isset.destinations && !params.destinations.empty()) {
1186
0
            _sink = std::make_shared<ResultFileSinkOperatorX>(
1187
0
                    next_sink_operator_id(), row_desc, thrift_sink.result_file_sink,
1188
0
                    params.destinations, output_exprs, desc_tbl);
1189
0
        } else {
1190
0
            _sink = std::make_shared<ResultFileSinkOperatorX>(next_sink_operator_id(), row_desc,
1191
0
                                                              output_exprs);
1192
0
        }
1193
0
        break;
1194
0
    }
1195
0
    case TDataSinkType::MULTI_CAST_DATA_STREAM_SINK: {
1196
0
        DCHECK(thrift_sink.__isset.multi_cast_stream_sink);
1197
0
        DCHECK_GT(thrift_sink.multi_cast_stream_sink.sinks.size(), 0);
1198
0
        auto sink_id = next_sink_operator_id();
1199
0
        const int multi_cast_node_id = sink_id;
1200
0
        auto sender_size = thrift_sink.multi_cast_stream_sink.sinks.size();
1201
        // one sink has multiple sources.
1202
0
        std::vector<int> sources;
1203
0
        for (int i = 0; i < sender_size; ++i) {
1204
0
            auto source_id = next_operator_id();
1205
0
            sources.push_back(source_id);
1206
0
        }
1207
1208
0
        _sink = std::make_shared<MultiCastDataStreamSinkOperatorX>(
1209
0
                sink_id, multi_cast_node_id, sources, pool, thrift_sink.multi_cast_stream_sink);
1210
0
        for (int i = 0; i < sender_size; ++i) {
1211
0
            auto new_pipeline = add_pipeline();
1212
            // use to exchange sink
1213
0
            RowDescriptor* exchange_row_desc = nullptr;
1214
0
            {
1215
0
                const auto& tmp_row_desc =
1216
0
                        !thrift_sink.multi_cast_stream_sink.sinks[i].output_exprs.empty()
1217
0
                                ? RowDescriptor(state->desc_tbl(),
1218
0
                                                {thrift_sink.multi_cast_stream_sink.sinks[i]
1219
0
                                                         .output_tuple_id})
1220
0
                                : row_desc;
1221
0
                exchange_row_desc = pool->add(new RowDescriptor(tmp_row_desc));
1222
0
            }
1223
0
            auto source_id = sources[i];
1224
0
            OperatorPtr source_op;
1225
            // 1. create and set the source operator of multi_cast_data_stream_source for new pipeline
1226
0
            source_op = std::make_shared<MultiCastDataStreamerSourceOperatorX>(
1227
0
                    /*node_id*/ source_id, /*consumer_id*/ i, pool,
1228
0
                    thrift_sink.multi_cast_stream_sink.sinks[i], row_desc,
1229
0
                    /*operator_id=*/source_id);
1230
0
            RETURN_IF_ERROR(new_pipeline->add_operator(
1231
0
                    source_op, params.__isset.parallel_instances ? params.parallel_instances : 0));
1232
            // 2. create and set sink operator of data stream sender for new pipeline
1233
1234
0
            DataSinkOperatorPtr sink_op;
1235
0
            sink_op = std::make_shared<ExchangeSinkOperatorX>(
1236
0
                    state, *exchange_row_desc, next_sink_operator_id(),
1237
0
                    thrift_sink.multi_cast_stream_sink.sinks[i],
1238
0
                    thrift_sink.multi_cast_stream_sink.destinations[i], _fragment_instance_ids);
1239
1240
0
            RETURN_IF_ERROR(new_pipeline->set_sink(sink_op));
1241
0
            {
1242
0
                TDataSink* t = pool->add(new TDataSink());
1243
0
                t->stream_sink = thrift_sink.multi_cast_stream_sink.sinks[i];
1244
0
                RETURN_IF_ERROR(sink_op->init(*t));
1245
0
            }
1246
1247
            // 3. set dependency dag
1248
0
            _dag[new_pipeline->id()].push_back(cur_pipeline_id);
1249
0
        }
1250
0
        if (sources.empty()) {
1251
0
            return Status::InternalError("size of sources must be greater than 0");
1252
0
        }
1253
0
        break;
1254
0
    }
1255
0
    case TDataSinkType::BLACKHOLE_SINK: {
1256
0
        if (!thrift_sink.__isset.blackhole_sink) {
1257
0
            return Status::InternalError("Missing blackhole sink.");
1258
0
        }
1259
1260
0
        _sink.reset(new BlackholeSinkOperatorX(next_sink_operator_id()));
1261
0
        break;
1262
0
    }
1263
0
    case TDataSinkType::TVF_TABLE_SINK: {
1264
0
        if (!thrift_sink.__isset.tvf_table_sink) {
1265
0
            return Status::InternalError("Missing TVF table sink.");
1266
0
        }
1267
0
        _sink = std::make_shared<TVFTableSinkOperatorX>(pool, next_sink_operator_id(), row_desc,
1268
0
                                                        output_exprs);
1269
0
        break;
1270
0
    }
1271
0
    default:
1272
0
        return Status::InternalError("Unsuported sink type in pipeline: {}", thrift_sink.type);
1273
0
    }
1274
0
    return Status::OK();
1275
0
}
1276
1277
// NOLINTBEGIN(readability-function-size)
1278
// NOLINTBEGIN(readability-function-cognitive-complexity)
1279
Status PipelineFragmentContext::_create_operator(ObjectPool* pool, const TPlanNode& tnode,
1280
                                                 const DescriptorTbl& descs, OperatorPtr& op,
1281
                                                 PipelinePtr& cur_pipe, int parent_idx,
1282
                                                 int child_idx,
1283
                                                 const bool followed_by_shuffled_operator,
1284
                                                 const bool require_bucket_distribution,
1285
0
                                                 OperatorPtr& cache_op) {
1286
0
    std::vector<DataSinkOperatorPtr> sink_ops;
1287
0
    Defer defer = Defer([&]() {
1288
0
        if (op) {
1289
0
            op->update_operator(tnode, followed_by_shuffled_operator, require_bucket_distribution);
1290
0
        }
1291
0
        for (auto& s : sink_ops) {
1292
0
            s->update_operator(tnode, followed_by_shuffled_operator, require_bucket_distribution);
1293
0
        }
1294
0
    });
1295
    // We directly construct the operator from Thrift because the given array is in the order of preorder traversal.
1296
    // Therefore, here we need to use a stack-like structure.
1297
0
    _pipeline_parent_map.pop(cur_pipe, parent_idx, child_idx);
1298
0
    std::stringstream error_msg;
1299
0
    bool enable_query_cache = _params.fragment.__isset.query_cache_param;
1300
1301
0
    bool fe_with_old_version = false;
1302
0
    switch (tnode.node_type) {
1303
0
    case TPlanNodeType::OLAP_SCAN_NODE: {
1304
0
        op = std::make_shared<OlapScanOperatorX>(
1305
0
                pool, tnode, next_operator_id(), descs, _num_instances,
1306
0
                enable_query_cache ? _params.fragment.query_cache_param : TQueryCacheParam {});
1307
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1308
0
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1309
0
        break;
1310
0
    }
1311
0
    case TPlanNodeType::GROUP_COMMIT_SCAN_NODE: {
1312
0
        DCHECK(_query_ctx != nullptr);
1313
0
        _query_ctx->query_mem_tracker()->is_group_commit_load = true;
1314
0
        op = std::make_shared<GroupCommitOperatorX>(pool, tnode, next_operator_id(), descs,
1315
0
                                                    _num_instances);
1316
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1317
0
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1318
0
        break;
1319
0
    }
1320
0
    case TPlanNodeType::JDBC_SCAN_NODE: {
1321
0
        if (config::enable_java_support) {
1322
0
            op = std::make_shared<JDBCScanOperatorX>(pool, tnode, next_operator_id(), descs,
1323
0
                                                     _num_instances);
1324
0
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1325
0
        } else {
1326
0
            return Status::InternalError(
1327
0
                    "Jdbc scan node is disabled, you can change be config enable_java_support "
1328
0
                    "to true and restart be.");
1329
0
        }
1330
0
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1331
0
        break;
1332
0
    }
1333
0
    case TPlanNodeType::FILE_SCAN_NODE: {
1334
0
        op = std::make_shared<FileScanOperatorX>(pool, tnode, next_operator_id(), descs,
1335
0
                                                 _num_instances);
1336
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1337
0
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1338
0
        break;
1339
0
    }
1340
0
    case TPlanNodeType::ES_SCAN_NODE:
1341
0
    case TPlanNodeType::ES_HTTP_SCAN_NODE: {
1342
0
        op = std::make_shared<EsScanOperatorX>(pool, tnode, next_operator_id(), descs,
1343
0
                                               _num_instances);
1344
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1345
0
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1346
0
        break;
1347
0
    }
1348
0
    case TPlanNodeType::EXCHANGE_NODE: {
1349
0
        int num_senders = _params.per_exch_num_senders.contains(tnode.node_id)
1350
0
                                  ? _params.per_exch_num_senders.find(tnode.node_id)->second
1351
0
                                  : 0;
1352
0
        DCHECK_GT(num_senders, 0);
1353
0
        op = std::make_shared<ExchangeSourceOperatorX>(pool, tnode, next_operator_id(), descs,
1354
0
                                                       num_senders);
1355
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1356
0
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1357
0
        break;
1358
0
    }
1359
0
    case TPlanNodeType::AGGREGATION_NODE: {
1360
0
        if (tnode.agg_node.grouping_exprs.empty() &&
1361
0
            descs.get_tuple_descriptor(tnode.agg_node.output_tuple_id)->slots().empty()) {
1362
0
            return Status::InternalError("Illegal aggregate node " + std::to_string(tnode.node_id) +
1363
0
                                         ": group by and output is empty");
1364
0
        }
1365
0
        bool need_create_cache_op =
1366
0
                enable_query_cache && tnode.node_id == _params.fragment.query_cache_param.node_id;
1367
0
        auto create_query_cache_operator = [&](PipelinePtr& new_pipe) {
1368
0
            auto cache_node_id = _params.local_params[0].per_node_scan_ranges.begin()->first;
1369
0
            auto cache_source_id = next_operator_id();
1370
0
            op = std::make_shared<CacheSourceOperatorX>(pool, cache_node_id, cache_source_id,
1371
0
                                                        _params.fragment.query_cache_param);
1372
0
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1373
1374
0
            const auto downstream_pipeline_id = cur_pipe->id();
1375
0
            if (!_dag.contains(downstream_pipeline_id)) {
1376
0
                _dag.insert({downstream_pipeline_id, {}});
1377
0
            }
1378
0
            new_pipe = add_pipeline(cur_pipe);
1379
0
            _dag[downstream_pipeline_id].push_back(new_pipe->id());
1380
1381
0
            DataSinkOperatorPtr cache_sink(new CacheSinkOperatorX(
1382
0
                    next_sink_operator_id(), op->node_id(), op->operator_id()));
1383
0
            RETURN_IF_ERROR(new_pipe->set_sink(cache_sink));
1384
0
            return Status::OK();
1385
0
        };
1386
0
        const bool group_by_limit_opt =
1387
0
                tnode.agg_node.__isset.agg_sort_info_by_group_key && tnode.limit > 0;
1388
1389
        /// PartitionedAggSourceOperatorX does not support "group by limit opt(#29641)" yet.
1390
        /// If `group_by_limit_opt` is true, then it might not need to spill at all.
1391
0
        const bool enable_spill = _runtime_state->enable_spill() &&
1392
0
                                  !tnode.agg_node.grouping_exprs.empty() && !group_by_limit_opt;
1393
0
        const bool is_streaming_agg = tnode.agg_node.__isset.use_streaming_preaggregation &&
1394
0
                                      tnode.agg_node.use_streaming_preaggregation &&
1395
0
                                      !tnode.agg_node.grouping_exprs.empty();
1396
        // TODO: distinct streaming agg does not support spill.
1397
0
        const bool can_use_distinct_streaming_agg =
1398
0
                (!enable_spill || is_streaming_agg) && tnode.agg_node.aggregate_functions.empty() &&
1399
0
                !tnode.agg_node.__isset.agg_sort_info_by_group_key &&
1400
0
                _params.query_options.__isset.enable_distinct_streaming_aggregation &&
1401
0
                _params.query_options.enable_distinct_streaming_aggregation;
1402
1403
0
        if (can_use_distinct_streaming_agg) {
1404
0
            if (need_create_cache_op) {
1405
0
                PipelinePtr new_pipe;
1406
0
                RETURN_IF_ERROR(create_query_cache_operator(new_pipe));
1407
1408
0
                cache_op = op;
1409
0
                op = std::make_shared<DistinctStreamingAggOperatorX>(pool, next_operator_id(),
1410
0
                                                                     tnode, descs);
1411
0
                RETURN_IF_ERROR(new_pipe->add_operator(op, _parallel_instances));
1412
0
                RETURN_IF_ERROR(cur_pipe->operators().front()->set_child(op));
1413
0
                cur_pipe = new_pipe;
1414
0
            } else {
1415
0
                op = std::make_shared<DistinctStreamingAggOperatorX>(pool, next_operator_id(),
1416
0
                                                                     tnode, descs);
1417
0
                RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1418
0
            }
1419
0
        } else if (is_streaming_agg) {
1420
0
            if (need_create_cache_op) {
1421
0
                PipelinePtr new_pipe;
1422
0
                RETURN_IF_ERROR(create_query_cache_operator(new_pipe));
1423
0
                cache_op = op;
1424
0
                op = std::make_shared<StreamingAggOperatorX>(pool, next_operator_id(), tnode,
1425
0
                                                             descs);
1426
0
                RETURN_IF_ERROR(cur_pipe->operators().front()->set_child(op));
1427
0
                RETURN_IF_ERROR(new_pipe->add_operator(op, _parallel_instances));
1428
0
                cur_pipe = new_pipe;
1429
0
            } else {
1430
0
                op = std::make_shared<StreamingAggOperatorX>(pool, next_operator_id(), tnode,
1431
0
                                                             descs);
1432
0
                RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1433
0
            }
1434
0
        } else {
1435
            // create new pipeline to add query cache operator
1436
0
            PipelinePtr new_pipe;
1437
0
            if (need_create_cache_op) {
1438
0
                RETURN_IF_ERROR(create_query_cache_operator(new_pipe));
1439
0
                cache_op = op;
1440
0
            }
1441
1442
0
            if (enable_spill) {
1443
0
                op = std::make_shared<PartitionedAggSourceOperatorX>(pool, tnode,
1444
0
                                                                     next_operator_id(), descs);
1445
0
            } else {
1446
0
                op = std::make_shared<AggSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1447
0
            }
1448
0
            if (need_create_cache_op) {
1449
0
                RETURN_IF_ERROR(cur_pipe->operators().front()->set_child(op));
1450
0
                RETURN_IF_ERROR(new_pipe->add_operator(op, _parallel_instances));
1451
0
                cur_pipe = new_pipe;
1452
0
            } else {
1453
0
                RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1454
0
            }
1455
1456
0
            const auto downstream_pipeline_id = cur_pipe->id();
1457
0
            if (!_dag.contains(downstream_pipeline_id)) {
1458
0
                _dag.insert({downstream_pipeline_id, {}});
1459
0
            }
1460
0
            cur_pipe = add_pipeline(cur_pipe);
1461
0
            _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1462
1463
0
            if (enable_spill) {
1464
0
                sink_ops.push_back(std::make_shared<PartitionedAggSinkOperatorX>(
1465
0
                        pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1466
0
            } else {
1467
0
                sink_ops.push_back(std::make_shared<AggSinkOperatorX>(
1468
0
                        pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1469
0
            }
1470
0
            RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1471
0
            RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1472
0
        }
1473
0
        break;
1474
0
    }
1475
0
    case TPlanNodeType::BUCKETED_AGGREGATION_NODE: {
1476
0
        if (tnode.bucketed_agg_node.grouping_exprs.empty()) {
1477
0
            return Status::InternalError(
1478
0
                    "Bucketed aggregation node {} should not be used without group by keys",
1479
0
                    tnode.node_id);
1480
0
        }
1481
1482
        // Create source operator (goes on the current / downstream pipeline).
1483
0
        op = std::make_shared<BucketedAggSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1484
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1485
1486
        // Create a new pipeline for the sink side.
1487
0
        const auto downstream_pipeline_id = cur_pipe->id();
1488
0
        if (!_dag.contains(downstream_pipeline_id)) {
1489
0
            _dag.insert({downstream_pipeline_id, {}});
1490
0
        }
1491
0
        cur_pipe = add_pipeline(cur_pipe);
1492
0
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1493
1494
        // Create sink operator.
1495
0
        sink_ops.push_back(std::make_shared<BucketedAggSinkOperatorX>(
1496
0
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1497
0
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1498
0
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1499
1500
        // Pre-register a single shared state for ALL instances so that every
1501
        // sink instance writes its per-instance hash table into the same
1502
        // BucketedAggSharedState and every source instance can merge across
1503
        // all of them.
1504
0
        {
1505
0
            auto shared_state = BucketedAggSharedState::create_shared();
1506
0
            shared_state->id = op->operator_id();
1507
0
            shared_state->related_op_ids.insert(op->operator_id());
1508
1509
0
            for (int i = 0; i < _num_instances; i++) {
1510
0
                auto sink_dep = std::make_shared<Dependency>(op->operator_id(), op->node_id(),
1511
0
                                                             "BUCKETED_AGG_SINK_DEPENDENCY");
1512
0
                sink_dep->set_shared_state(shared_state.get());
1513
0
                shared_state->sink_deps.push_back(sink_dep);
1514
0
            }
1515
0
            shared_state->create_source_dependencies(_num_instances, op->operator_id(),
1516
0
                                                     op->node_id(), "BUCKETED_AGG_SOURCE");
1517
0
            _op_id_to_shared_state.insert(
1518
0
                    {op->operator_id(), {shared_state, shared_state->sink_deps}});
1519
0
        }
1520
0
        break;
1521
0
    }
1522
0
    case TPlanNodeType::HASH_JOIN_NODE: {
1523
0
        const auto is_broadcast_join = tnode.hash_join_node.__isset.is_broadcast_join &&
1524
0
                                       tnode.hash_join_node.is_broadcast_join;
1525
0
        const auto enable_spill = _runtime_state->enable_spill();
1526
0
        if (enable_spill && !is_broadcast_join) {
1527
0
            auto tnode_ = tnode;
1528
0
            tnode_.runtime_filters.clear();
1529
0
            auto inner_probe_operator =
1530
0
                    std::make_shared<HashJoinProbeOperatorX>(pool, tnode_, 0, descs);
1531
1532
            // probe side inner sink operator is used to build hash table on probe side when data is spilled.
1533
            // So here use `tnode_` which has no runtime filters.
1534
0
            auto probe_side_inner_sink_operator =
1535
0
                    std::make_shared<HashJoinBuildSinkOperatorX>(pool, 0, 0, tnode_, descs);
1536
1537
0
            RETURN_IF_ERROR(inner_probe_operator->init(tnode_, _runtime_state.get()));
1538
0
            RETURN_IF_ERROR(probe_side_inner_sink_operator->init(tnode_, _runtime_state.get()));
1539
1540
0
            auto probe_operator = std::make_shared<PartitionedHashJoinProbeOperatorX>(
1541
0
                    pool, tnode_, next_operator_id(), descs);
1542
0
            probe_operator->set_inner_operators(probe_side_inner_sink_operator,
1543
0
                                                inner_probe_operator);
1544
0
            op = std::move(probe_operator);
1545
0
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1546
1547
0
            const auto downstream_pipeline_id = cur_pipe->id();
1548
0
            if (!_dag.contains(downstream_pipeline_id)) {
1549
0
                _dag.insert({downstream_pipeline_id, {}});
1550
0
            }
1551
0
            PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1552
0
            _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1553
1554
0
            auto inner_sink_operator =
1555
0
                    std::make_shared<HashJoinBuildSinkOperatorX>(pool, 0, 0, tnode, descs);
1556
0
            auto sink_operator = std::make_shared<PartitionedHashJoinSinkOperatorX>(
1557
0
                    pool, next_sink_operator_id(), op->operator_id(), tnode_, descs);
1558
0
            RETURN_IF_ERROR(inner_sink_operator->init(tnode, _runtime_state.get()));
1559
1560
0
            sink_operator->set_inner_operators(inner_sink_operator, inner_probe_operator);
1561
0
            sink_ops.push_back(std::move(sink_operator));
1562
0
            RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1563
0
            RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode_, _runtime_state.get()));
1564
1565
0
            _pipeline_parent_map.push(op->node_id(), cur_pipe);
1566
0
            _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1567
0
        } else {
1568
0
            op = std::make_shared<HashJoinProbeOperatorX>(pool, tnode, next_operator_id(), descs);
1569
0
            RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1570
1571
0
            const auto downstream_pipeline_id = cur_pipe->id();
1572
0
            if (!_dag.contains(downstream_pipeline_id)) {
1573
0
                _dag.insert({downstream_pipeline_id, {}});
1574
0
            }
1575
0
            PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1576
0
            _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1577
1578
0
            sink_ops.push_back(std::make_shared<HashJoinBuildSinkOperatorX>(
1579
0
                    pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1580
0
            RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1581
0
            RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode, _runtime_state.get()));
1582
1583
0
            _pipeline_parent_map.push(op->node_id(), cur_pipe);
1584
0
            _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1585
0
        }
1586
0
        if (is_broadcast_join && _runtime_state->enable_share_hash_table_for_broadcast_join()) {
1587
0
            std::shared_ptr<HashJoinSharedState> shared_state =
1588
0
                    HashJoinSharedState::create_shared(_num_instances);
1589
0
            for (int i = 0; i < _num_instances; i++) {
1590
0
                auto sink_dep = std::make_shared<Dependency>(op->operator_id(), op->node_id(),
1591
0
                                                             "HASH_JOIN_BUILD_DEPENDENCY");
1592
0
                sink_dep->set_shared_state(shared_state.get());
1593
0
                shared_state->sink_deps.push_back(sink_dep);
1594
0
            }
1595
0
            shared_state->create_source_dependencies(_num_instances, op->operator_id(),
1596
0
                                                     op->node_id(), "HASH_JOIN_PROBE");
1597
0
            _op_id_to_shared_state.insert(
1598
0
                    {op->operator_id(), {shared_state, shared_state->sink_deps}});
1599
0
        }
1600
0
        break;
1601
0
    }
1602
0
    case TPlanNodeType::CROSS_JOIN_NODE: {
1603
0
        op = std::make_shared<NestedLoopJoinProbeOperatorX>(pool, tnode, next_operator_id(), descs);
1604
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1605
1606
0
        const auto downstream_pipeline_id = cur_pipe->id();
1607
0
        if (!_dag.contains(downstream_pipeline_id)) {
1608
0
            _dag.insert({downstream_pipeline_id, {}});
1609
0
        }
1610
0
        PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1611
0
        _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1612
1613
0
        sink_ops.push_back(std::make_shared<NestedLoopJoinBuildSinkOperatorX>(
1614
0
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1615
0
        RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1616
0
        RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode, _runtime_state.get()));
1617
0
        _pipeline_parent_map.push(op->node_id(), cur_pipe);
1618
0
        _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1619
0
        break;
1620
0
    }
1621
0
    case TPlanNodeType::UNION_NODE: {
1622
0
        int child_count = tnode.num_children;
1623
0
        op = std::make_shared<UnionSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1624
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1625
1626
0
        const auto downstream_pipeline_id = cur_pipe->id();
1627
0
        if (!_dag.contains(downstream_pipeline_id)) {
1628
0
            _dag.insert({downstream_pipeline_id, {}});
1629
0
        }
1630
0
        for (int i = 0; i < child_count; i++) {
1631
0
            PipelinePtr build_side_pipe = add_pipeline(cur_pipe);
1632
0
            _dag[downstream_pipeline_id].push_back(build_side_pipe->id());
1633
0
            sink_ops.push_back(std::make_shared<UnionSinkOperatorX>(
1634
0
                    i, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1635
0
            RETURN_IF_ERROR(build_side_pipe->set_sink(sink_ops.back()));
1636
0
            RETURN_IF_ERROR(build_side_pipe->sink()->init(tnode, _runtime_state.get()));
1637
            // preset children pipelines. if any pipeline found this as its father, will use the prepared pipeline to build.
1638
0
            _pipeline_parent_map.push(op->node_id(), build_side_pipe);
1639
0
        }
1640
0
        break;
1641
0
    }
1642
0
    case TPlanNodeType::SORT_NODE: {
1643
0
        const auto should_spill = _runtime_state->enable_spill() &&
1644
0
                                  tnode.sort_node.algorithm == TSortAlgorithm::FULL_SORT;
1645
0
        const bool use_local_merge =
1646
0
                tnode.sort_node.__isset.use_local_merge && tnode.sort_node.use_local_merge;
1647
0
        if (should_spill) {
1648
0
            op = std::make_shared<SpillSortSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1649
0
        } else if (use_local_merge) {
1650
0
            op = std::make_shared<LocalMergeSortSourceOperatorX>(pool, tnode, next_operator_id(),
1651
0
                                                                 descs);
1652
0
        } else {
1653
0
            op = std::make_shared<SortSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1654
0
        }
1655
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1656
1657
0
        const auto downstream_pipeline_id = cur_pipe->id();
1658
0
        if (!_dag.contains(downstream_pipeline_id)) {
1659
0
            _dag.insert({downstream_pipeline_id, {}});
1660
0
        }
1661
0
        cur_pipe = add_pipeline(cur_pipe);
1662
0
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1663
1664
0
        if (should_spill) {
1665
0
            sink_ops.push_back(std::make_shared<SpillSortSinkOperatorX>(
1666
0
                    pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1667
0
        } else {
1668
0
            sink_ops.push_back(std::make_shared<SortSinkOperatorX>(
1669
0
                    pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1670
0
        }
1671
0
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1672
0
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1673
0
        break;
1674
0
    }
1675
0
    case TPlanNodeType::PARTITION_SORT_NODE: {
1676
0
        op = std::make_shared<PartitionSortSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1677
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1678
1679
0
        const auto downstream_pipeline_id = cur_pipe->id();
1680
0
        if (!_dag.contains(downstream_pipeline_id)) {
1681
0
            _dag.insert({downstream_pipeline_id, {}});
1682
0
        }
1683
0
        cur_pipe = add_pipeline(cur_pipe);
1684
0
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1685
1686
0
        sink_ops.push_back(std::make_shared<PartitionSortSinkOperatorX>(
1687
0
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1688
0
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1689
0
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1690
0
        break;
1691
0
    }
1692
0
    case TPlanNodeType::ANALYTIC_EVAL_NODE: {
1693
0
        op = std::make_shared<AnalyticSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1694
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1695
1696
0
        const auto downstream_pipeline_id = cur_pipe->id();
1697
0
        if (!_dag.contains(downstream_pipeline_id)) {
1698
0
            _dag.insert({downstream_pipeline_id, {}});
1699
0
        }
1700
0
        cur_pipe = add_pipeline(cur_pipe);
1701
0
        _dag[downstream_pipeline_id].push_back(cur_pipe->id());
1702
1703
0
        sink_ops.push_back(std::make_shared<AnalyticSinkOperatorX>(
1704
0
                pool, next_sink_operator_id(), op->operator_id(), tnode, descs));
1705
0
        RETURN_IF_ERROR(cur_pipe->set_sink(sink_ops.back()));
1706
0
        RETURN_IF_ERROR(cur_pipe->sink()->init(tnode, _runtime_state.get()));
1707
0
        break;
1708
0
    }
1709
0
    case TPlanNodeType::MATERIALIZATION_NODE: {
1710
0
        op = std::make_shared<MaterializationOperator>(pool, tnode, next_operator_id(), descs);
1711
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1712
0
        break;
1713
0
    }
1714
0
    case TPlanNodeType::INTERSECT_NODE: {
1715
0
        RETURN_IF_ERROR(_build_operators_for_set_operation_node<true>(pool, tnode, descs, op,
1716
0
                                                                      cur_pipe, sink_ops));
1717
0
        break;
1718
0
    }
1719
0
    case TPlanNodeType::EXCEPT_NODE: {
1720
0
        RETURN_IF_ERROR(_build_operators_for_set_operation_node<false>(pool, tnode, descs, op,
1721
0
                                                                       cur_pipe, sink_ops));
1722
0
        break;
1723
0
    }
1724
0
    case TPlanNodeType::REPEAT_NODE: {
1725
0
        op = std::make_shared<RepeatOperatorX>(pool, tnode, next_operator_id(), descs);
1726
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1727
0
        break;
1728
0
    }
1729
0
    case TPlanNodeType::TABLE_FUNCTION_NODE: {
1730
0
        op = std::make_shared<TableFunctionOperatorX>(pool, tnode, next_operator_id(), descs);
1731
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1732
0
        break;
1733
0
    }
1734
0
    case TPlanNodeType::ASSERT_NUM_ROWS_NODE: {
1735
0
        op = std::make_shared<AssertNumRowsOperatorX>(pool, tnode, next_operator_id(), descs);
1736
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1737
0
        break;
1738
0
    }
1739
0
    case TPlanNodeType::EMPTY_SET_NODE: {
1740
0
        op = std::make_shared<EmptySetSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1741
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1742
0
        break;
1743
0
    }
1744
0
    case TPlanNodeType::DATA_GEN_SCAN_NODE: {
1745
0
        op = std::make_shared<DataGenSourceOperatorX>(pool, tnode, next_operator_id(), descs);
1746
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1747
0
        fe_with_old_version = !tnode.__isset.is_serial_operator;
1748
0
        break;
1749
0
    }
1750
0
    case TPlanNodeType::SCHEMA_SCAN_NODE: {
1751
0
        op = std::make_shared<SchemaScanOperatorX>(pool, tnode, next_operator_id(), descs);
1752
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1753
0
        break;
1754
0
    }
1755
0
    case TPlanNodeType::META_SCAN_NODE: {
1756
0
        op = std::make_shared<MetaScanOperatorX>(pool, tnode, next_operator_id(), descs);
1757
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1758
0
        break;
1759
0
    }
1760
0
    case TPlanNodeType::SELECT_NODE: {
1761
0
        op = std::make_shared<SelectOperatorX>(pool, tnode, next_operator_id(), descs);
1762
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1763
0
        break;
1764
0
    }
1765
0
    case TPlanNodeType::REC_CTE_NODE: {
1766
0
        op = std::make_shared<RecCTESourceOperatorX>(pool, tnode, next_operator_id(), descs);
1767
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1768
1769
0
        const auto downstream_pipeline_id = cur_pipe->id();
1770
0
        if (!_dag.contains(downstream_pipeline_id)) {
1771
0
            _dag.insert({downstream_pipeline_id, {}});
1772
0
        }
1773
1774
0
        PipelinePtr anchor_side_pipe = add_pipeline(cur_pipe);
1775
0
        _dag[downstream_pipeline_id].push_back(anchor_side_pipe->id());
1776
1777
0
        DataSinkOperatorPtr anchor_sink;
1778
0
        anchor_sink = std::make_shared<RecCTEAnchorSinkOperatorX>(next_sink_operator_id(),
1779
0
                                                                  op->operator_id(), tnode, descs);
1780
0
        RETURN_IF_ERROR(anchor_side_pipe->set_sink(anchor_sink));
1781
0
        RETURN_IF_ERROR(anchor_side_pipe->sink()->init(tnode, _runtime_state.get()));
1782
0
        _pipeline_parent_map.push(op->node_id(), anchor_side_pipe);
1783
1784
0
        PipelinePtr rec_side_pipe = add_pipeline(cur_pipe);
1785
0
        _dag[downstream_pipeline_id].push_back(rec_side_pipe->id());
1786
1787
0
        DataSinkOperatorPtr rec_sink;
1788
0
        rec_sink = std::make_shared<RecCTESinkOperatorX>(next_sink_operator_id(), op->operator_id(),
1789
0
                                                         tnode, descs);
1790
0
        RETURN_IF_ERROR(rec_side_pipe->set_sink(rec_sink));
1791
0
        RETURN_IF_ERROR(rec_side_pipe->sink()->init(tnode, _runtime_state.get()));
1792
0
        _pipeline_parent_map.push(op->node_id(), rec_side_pipe);
1793
1794
0
        break;
1795
0
    }
1796
0
    case TPlanNodeType::REC_CTE_SCAN_NODE: {
1797
0
        op = std::make_shared<RecCTEScanOperatorX>(pool, tnode, next_operator_id(), descs);
1798
0
        RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1799
0
        break;
1800
0
    }
1801
0
    default:
1802
0
        return Status::InternalError("Unsupported exec type in pipeline: {}",
1803
0
                                     print_plan_node_type(tnode.node_type));
1804
0
    }
1805
0
    if (_params.__isset.parallel_instances && fe_with_old_version) {
1806
0
        cur_pipe->set_num_tasks(_params.parallel_instances);
1807
0
        op->set_serial_operator();
1808
0
    }
1809
1810
0
    return Status::OK();
1811
0
}
1812
// NOLINTEND(readability-function-cognitive-complexity)
1813
// NOLINTEND(readability-function-size)
1814
1815
template <bool is_intersect>
1816
Status PipelineFragmentContext::_build_operators_for_set_operation_node(
1817
        ObjectPool* pool, const TPlanNode& tnode, const DescriptorTbl& descs, OperatorPtr& op,
1818
0
        PipelinePtr& cur_pipe, std::vector<DataSinkOperatorPtr>& sink_ops) {
1819
0
    op.reset(new SetSourceOperatorX<is_intersect>(pool, tnode, next_operator_id(), descs));
1820
0
    RETURN_IF_ERROR(cur_pipe->add_operator(op, _parallel_instances));
1821
1822
0
    const auto downstream_pipeline_id = cur_pipe->id();
1823
0
    if (!_dag.contains(downstream_pipeline_id)) {
1824
0
        _dag.insert({downstream_pipeline_id, {}});
1825
0
    }
1826
1827
0
    for (int child_id = 0; child_id < tnode.num_children; child_id++) {
1828
0
        PipelinePtr probe_side_pipe = add_pipeline(cur_pipe);
1829
0
        _dag[downstream_pipeline_id].push_back(probe_side_pipe->id());
1830
1831
0
        if (child_id == 0) {
1832
0
            sink_ops.push_back(std::make_shared<SetSinkOperatorX<is_intersect>>(
1833
0
                    child_id, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1834
0
        } else {
1835
0
            sink_ops.push_back(std::make_shared<SetProbeSinkOperatorX<is_intersect>>(
1836
0
                    child_id, next_sink_operator_id(), op->operator_id(), pool, tnode, descs));
1837
0
        }
1838
0
        RETURN_IF_ERROR(probe_side_pipe->set_sink(sink_ops.back()));
1839
0
        RETURN_IF_ERROR(probe_side_pipe->sink()->init(tnode, _runtime_state.get()));
1840
        // prepare children pipelines. if any pipeline found this as its father, will use the prepared pipeline to build.
1841
0
        _pipeline_parent_map.push(op->node_id(), probe_side_pipe);
1842
0
    }
1843
1844
0
    return Status::OK();
1845
0
}
Unexecuted instantiation: _ZN5doris23PipelineFragmentContext39_build_operators_for_set_operation_nodeILb1EEENS_6StatusEPNS_10ObjectPoolERKNS_9TPlanNodeERKNS_13DescriptorTblERSt10shared_ptrINS_13OperatorXBaseEERSB_INS_8PipelineEERSt6vectorISB_INS_21DataSinkOperatorXBaseEESaISK_EE
Unexecuted instantiation: _ZN5doris23PipelineFragmentContext39_build_operators_for_set_operation_nodeILb0EEENS_6StatusEPNS_10ObjectPoolERKNS_9TPlanNodeERKNS_13DescriptorTblERSt10shared_ptrINS_13OperatorXBaseEERSB_INS_8PipelineEERSt6vectorISB_INS_21DataSinkOperatorXBaseEESaISK_EE
1846
1847
0
Status PipelineFragmentContext::submit() {
1848
0
    if (_submitted) {
1849
0
        return Status::InternalError("submitted");
1850
0
    }
1851
0
    _submitted = true;
1852
1853
0
    int submit_tasks = 0;
1854
0
    Status st;
1855
0
    auto* scheduler = _query_ctx->get_pipe_exec_scheduler();
1856
0
    for (auto& task : _tasks) {
1857
0
        for (auto& t : task) {
1858
0
            st = scheduler->submit(t.first);
1859
0
            DBUG_EXECUTE_IF("PipelineFragmentContext.submit.failed",
1860
0
                            { st = Status::Aborted("PipelineFragmentContext.submit.failed"); });
1861
0
            if (!st) {
1862
0
                cancel(Status::InternalError("submit context to executor fail"));
1863
0
                std::lock_guard<std::mutex> l(_task_mutex);
1864
0
                _total_tasks = submit_tasks;
1865
0
                break;
1866
0
            }
1867
0
            submit_tasks++;
1868
0
        }
1869
0
    }
1870
0
    if (!st.ok()) {
1871
0
        bool need_remove = false;
1872
0
        {
1873
0
            std::lock_guard<std::mutex> l(_task_mutex);
1874
0
            if (_closed_tasks >= _total_tasks) {
1875
0
                need_remove = _close_fragment_instance();
1876
0
            }
1877
0
        }
1878
        // Call remove_pipeline_context() outside _task_mutex to avoid ABBA deadlock.
1879
0
        if (need_remove) {
1880
0
            _exec_env->fragment_mgr()->remove_pipeline_context({_query_id, _fragment_id});
1881
0
        }
1882
0
        return Status::InternalError("Submit pipeline failed. err = {}, BE: {}", st.to_string(),
1883
0
                                     BackendOptions::get_localhost());
1884
0
    } else {
1885
0
        return st;
1886
0
    }
1887
0
}
1888
1889
0
void PipelineFragmentContext::print_profile(const std::string& extra_info) {
1890
0
    if (_runtime_state->enable_profile()) {
1891
0
        std::stringstream ss;
1892
0
        for (auto runtime_profile_ptr : _runtime_state->pipeline_id_to_profile()) {
1893
0
            runtime_profile_ptr->pretty_print(&ss);
1894
0
        }
1895
1896
0
        if (_runtime_state->load_channel_profile()) {
1897
0
            _runtime_state->load_channel_profile()->pretty_print(&ss);
1898
0
        }
1899
1900
0
        auto profile_str =
1901
0
                fmt::format("Query {} fragment {} {}, profile, {}", print_id(this->_query_id),
1902
0
                            this->_fragment_id, extra_info, ss.str());
1903
0
        LOG_LONG_STRING(INFO, profile_str);
1904
0
    }
1905
0
}
1906
// If all pipeline tasks binded to the fragment instance are finished, then we could
1907
// close the fragment instance.
1908
// Returns true if the caller should call remove_pipeline_context() **after** releasing
1909
// _task_mutex. We must not call remove_pipeline_context() here because it acquires
1910
// _pipeline_map's shard lock, and this function is called while _task_mutex is held.
1911
// Acquiring _pipeline_map while holding _task_mutex creates an ABBA deadlock with
1912
// dump_pipeline_tasks(), which acquires _pipeline_map first and then _task_mutex
1913
// (via debug_string()).
1914
0
bool PipelineFragmentContext::_close_fragment_instance() {
1915
0
    if (_is_fragment_instance_closed) {
1916
0
        return false;
1917
0
    }
1918
0
    Defer defer_op {[&]() { _is_fragment_instance_closed = true; }};
1919
0
    _fragment_level_profile->total_time_counter()->update(_fragment_watcher.elapsed_time());
1920
0
    if (!_need_notify_close) {
1921
0
        auto st = send_report(true);
1922
0
        if (!st) {
1923
0
            LOG(WARNING) << fmt::format("Failed to send report for query {}, fragment {}: {}",
1924
0
                                        print_id(_query_id), _fragment_id, st.to_string());
1925
0
        }
1926
0
    }
1927
    // Print profile content in info log is a tempoeray solution for stream load and external_connector.
1928
    // Since stream load does not have someting like coordinator on FE, so
1929
    // backend can not report profile to FE, ant its profile can not be shown
1930
    // in the same way with other query. So we print the profile content to info log.
1931
1932
0
    if (_runtime_state->enable_profile() &&
1933
0
        (_query_ctx->get_query_source() == QuerySource::STREAM_LOAD ||
1934
0
         _query_ctx->get_query_source() == QuerySource::EXTERNAL_CONNECTOR ||
1935
0
         _query_ctx->get_query_source() == QuerySource::GROUP_COMMIT_LOAD)) {
1936
0
        std::stringstream ss;
1937
        // Compute the _local_time_percent before pretty_print the runtime_profile
1938
        // Before add this operation, the print out like that:
1939
        // UNION_NODE (id=0):(Active: 56.720us, non-child: 00.00%)
1940
        // After add the operation, the print out like that:
1941
        // UNION_NODE (id=0):(Active: 56.720us, non-child: 82.53%)
1942
        // We can easily know the exec node execute time without child time consumed.
1943
0
        for (auto runtime_profile_ptr : _runtime_state->pipeline_id_to_profile()) {
1944
0
            runtime_profile_ptr->pretty_print(&ss);
1945
0
        }
1946
1947
0
        if (_runtime_state->load_channel_profile()) {
1948
0
            _runtime_state->load_channel_profile()->pretty_print(&ss);
1949
0
        }
1950
1951
0
        LOG_INFO("Query {} fragment {} profile:\n {}", print_id(_query_id), _fragment_id, ss.str());
1952
0
    }
1953
1954
0
    if (_query_ctx->enable_profile()) {
1955
0
        _query_ctx->add_fragment_profile(_fragment_id, collect_realtime_profile(),
1956
0
                                         collect_realtime_load_channel_profile());
1957
0
    }
1958
1959
    // Return whether the caller needs to remove from the pipeline map.
1960
    // The caller must do this after releasing _task_mutex.
1961
0
    return !_need_notify_close;
1962
0
}
1963
1964
0
void PipelineFragmentContext::decrement_running_task(PipelineId pipeline_id) {
1965
    // If all tasks of this pipeline has been closed, upstream tasks is never needed, and we just make those runnable here
1966
0
    DCHECK(_pip_id_to_pipeline.contains(pipeline_id));
1967
0
    if (_pip_id_to_pipeline[pipeline_id]->close_task()) {
1968
0
        if (_dag.contains(pipeline_id)) {
1969
0
            for (auto dep : _dag[pipeline_id]) {
1970
0
                _pip_id_to_pipeline[dep]->make_all_runnable(pipeline_id);
1971
0
            }
1972
0
        }
1973
0
    }
1974
0
    bool need_remove = false;
1975
0
    {
1976
0
        std::lock_guard<std::mutex> l(_task_mutex);
1977
0
        ++_closed_tasks;
1978
0
        if (_closed_tasks >= _total_tasks) {
1979
0
            need_remove = _close_fragment_instance();
1980
0
        }
1981
0
    }
1982
    // Call remove_pipeline_context() outside _task_mutex to avoid ABBA deadlock.
1983
0
    if (need_remove) {
1984
0
        _exec_env->fragment_mgr()->remove_pipeline_context({_query_id, _fragment_id});
1985
0
    }
1986
0
}
1987
1988
0
std::string PipelineFragmentContext::get_load_error_url() {
1989
0
    if (const auto& str = _runtime_state->get_error_log_file_path(); !str.empty()) {
1990
0
        return to_load_error_http_path(str);
1991
0
    }
1992
0
    for (auto& tasks : _tasks) {
1993
0
        for (auto& task : tasks) {
1994
0
            if (const auto& str = task.second->get_error_log_file_path(); !str.empty()) {
1995
0
                return to_load_error_http_path(str);
1996
0
            }
1997
0
        }
1998
0
    }
1999
0
    return "";
2000
0
}
2001
2002
0
std::string PipelineFragmentContext::get_first_error_msg() {
2003
0
    if (const auto& str = _runtime_state->get_first_error_msg(); !str.empty()) {
2004
0
        return str;
2005
0
    }
2006
0
    for (auto& tasks : _tasks) {
2007
0
        for (auto& task : tasks) {
2008
0
            if (const auto& str = task.second->get_first_error_msg(); !str.empty()) {
2009
0
                return str;
2010
0
            }
2011
0
        }
2012
0
    }
2013
0
    return "";
2014
0
}
2015
2016
0
std::string PipelineFragmentContext::_to_http_path(const std::string& file_name) const {
2017
0
    std::stringstream url;
2018
0
    url << "http://" << BackendOptions::get_localhost() << ":" << config::webserver_port
2019
0
        << "/api/_download_load?"
2020
0
        << "token=" << _exec_env->token() << "&file=" << file_name;
2021
0
    return url.str();
2022
0
}
2023
2024
0
void PipelineFragmentContext::_coordinator_callback(const ReportStatusRequest& req) {
2025
0
    DBUG_EXECUTE_IF("FragmentMgr::coordinator_callback.report_delay", {
2026
0
        int random_seconds = req.status.is<ErrorCode::DATA_QUALITY_ERROR>() ? 8 : 2;
2027
0
        LOG_INFO("sleep : ").tag("time", random_seconds).tag("query_id", print_id(req.query_id));
2028
0
        std::this_thread::sleep_for(std::chrono::seconds(random_seconds));
2029
0
        LOG_INFO("sleep done").tag("query_id", print_id(req.query_id));
2030
0
    });
2031
2032
0
    DCHECK(req.status.ok() || req.done); // if !status.ok() => done
2033
0
    if (req.coord_addr.hostname == "external") {
2034
        // External query (flink/spark read tablets) not need to report to FE.
2035
0
        return;
2036
0
    }
2037
0
    int callback_retries = 10;
2038
0
    const int sleep_ms = 1000;
2039
0
    Status exec_status = req.status;
2040
0
    Status coord_status;
2041
0
    std::unique_ptr<FrontendServiceConnection> coord = nullptr;
2042
0
    do {
2043
0
        coord = std::make_unique<FrontendServiceConnection>(_exec_env->frontend_client_cache(),
2044
0
                                                            req.coord_addr, &coord_status);
2045
0
        if (!coord_status.ok()) {
2046
0
            std::this_thread::sleep_for(std::chrono::milliseconds(sleep_ms));
2047
0
        }
2048
0
    } while (!coord_status.ok() && callback_retries-- > 0);
2049
2050
0
    if (!coord_status.ok()) {
2051
0
        UniqueId uid(req.query_id.hi, req.query_id.lo);
2052
0
        static_cast<void>(req.cancel_fn(Status::InternalError(
2053
0
                "query_id: {}, couldn't get a client for {}, reason is {}", uid.to_string(),
2054
0
                PrintThriftNetworkAddress(req.coord_addr), coord_status.to_string())));
2055
0
        return;
2056
0
    }
2057
2058
0
    TReportExecStatusParams params;
2059
0
    params.protocol_version = FrontendServiceVersion::V1;
2060
0
    params.__set_query_id(req.query_id);
2061
0
    params.__set_backend_num(req.backend_num);
2062
0
    params.__set_fragment_instance_id(req.fragment_instance_id);
2063
0
    params.__set_fragment_id(req.fragment_id);
2064
0
    params.__set_status(exec_status.to_thrift());
2065
0
    params.__set_done(req.done);
2066
0
    params.__set_query_type(req.runtime_state->query_type());
2067
0
    params.__isset.profile = false;
2068
2069
0
    DCHECK(req.runtime_state != nullptr);
2070
2071
0
    if (req.runtime_state->query_type() == TQueryType::LOAD) {
2072
0
        params.__set_loaded_rows(req.runtime_state->num_rows_load_total());
2073
0
        params.__set_loaded_bytes(req.runtime_state->num_bytes_load_total());
2074
0
    } else {
2075
0
        DCHECK(!req.runtime_states.empty());
2076
0
        if (!req.runtime_state->output_files().empty()) {
2077
0
            params.__isset.delta_urls = true;
2078
0
            for (auto& it : req.runtime_state->output_files()) {
2079
0
                params.delta_urls.push_back(_to_http_path(it));
2080
0
            }
2081
0
        }
2082
0
        if (!params.delta_urls.empty()) {
2083
0
            params.__isset.delta_urls = true;
2084
0
        }
2085
0
    }
2086
2087
0
    static std::string s_dpp_normal_all = "dpp.norm.ALL";
2088
0
    static std::string s_dpp_abnormal_all = "dpp.abnorm.ALL";
2089
0
    static std::string s_unselected_rows = "unselected.rows";
2090
0
    int64_t num_rows_load_success = 0;
2091
0
    int64_t num_rows_load_filtered = 0;
2092
0
    int64_t num_rows_load_unselected = 0;
2093
0
    if (req.runtime_state->num_rows_load_total() > 0 ||
2094
0
        req.runtime_state->num_rows_load_filtered() > 0 ||
2095
0
        req.runtime_state->num_finished_range() > 0) {
2096
0
        params.__isset.load_counters = true;
2097
2098
0
        num_rows_load_success = req.runtime_state->num_rows_load_success();
2099
0
        num_rows_load_filtered = req.runtime_state->num_rows_load_filtered();
2100
0
        num_rows_load_unselected = req.runtime_state->num_rows_load_unselected();
2101
0
        params.__isset.fragment_instance_reports = true;
2102
0
        TFragmentInstanceReport t;
2103
0
        t.__set_fragment_instance_id(req.runtime_state->fragment_instance_id());
2104
0
        t.__set_num_finished_range(cast_set<int>(req.runtime_state->num_finished_range()));
2105
0
        t.__set_loaded_rows(req.runtime_state->num_rows_load_total());
2106
0
        t.__set_loaded_bytes(req.runtime_state->num_bytes_load_total());
2107
0
        params.fragment_instance_reports.push_back(t);
2108
0
    } else if (!req.runtime_states.empty()) {
2109
0
        for (auto* rs : req.runtime_states) {
2110
0
            if (rs->num_rows_load_total() > 0 || rs->num_rows_load_filtered() > 0 ||
2111
0
                rs->num_finished_range() > 0) {
2112
0
                params.__isset.load_counters = true;
2113
0
                num_rows_load_success += rs->num_rows_load_success();
2114
0
                num_rows_load_filtered += rs->num_rows_load_filtered();
2115
0
                num_rows_load_unselected += rs->num_rows_load_unselected();
2116
0
                params.__isset.fragment_instance_reports = true;
2117
0
                TFragmentInstanceReport t;
2118
0
                t.__set_fragment_instance_id(rs->fragment_instance_id());
2119
0
                t.__set_num_finished_range(cast_set<int>(rs->num_finished_range()));
2120
0
                t.__set_loaded_rows(rs->num_rows_load_total());
2121
0
                t.__set_loaded_bytes(rs->num_bytes_load_total());
2122
0
                params.fragment_instance_reports.push_back(t);
2123
0
            }
2124
0
        }
2125
0
    }
2126
0
    params.load_counters.emplace(s_dpp_normal_all, std::to_string(num_rows_load_success));
2127
0
    params.load_counters.emplace(s_dpp_abnormal_all, std::to_string(num_rows_load_filtered));
2128
0
    params.load_counters.emplace(s_unselected_rows, std::to_string(num_rows_load_unselected));
2129
2130
0
    if (!req.load_error_url.empty()) {
2131
0
        params.__set_tracking_url(req.load_error_url);
2132
0
    }
2133
0
    if (!req.first_error_msg.empty()) {
2134
0
        params.__set_first_error_msg(req.first_error_msg);
2135
0
    }
2136
0
    for (auto* rs : req.runtime_states) {
2137
0
        if (rs->wal_id() > 0) {
2138
0
            params.__set_txn_id(rs->wal_id());
2139
0
            params.__set_label(rs->import_label());
2140
0
        }
2141
0
    }
2142
0
    if (!req.runtime_state->export_output_files().empty()) {
2143
0
        params.__isset.export_files = true;
2144
0
        params.export_files = req.runtime_state->export_output_files();
2145
0
    } else if (!req.runtime_states.empty()) {
2146
0
        for (auto* rs : req.runtime_states) {
2147
0
            if (!rs->export_output_files().empty()) {
2148
0
                params.__isset.export_files = true;
2149
0
                params.export_files.insert(params.export_files.end(),
2150
0
                                           rs->export_output_files().begin(),
2151
0
                                           rs->export_output_files().end());
2152
0
            }
2153
0
        }
2154
0
    }
2155
0
    if (auto tci = req.runtime_state->tablet_commit_infos(); !tci.empty()) {
2156
0
        params.__isset.commitInfos = true;
2157
0
        params.commitInfos.insert(params.commitInfos.end(), tci.begin(), tci.end());
2158
0
    } else if (!req.runtime_states.empty()) {
2159
0
        for (auto* rs : req.runtime_states) {
2160
0
            if (auto rs_tci = rs->tablet_commit_infos(); !rs_tci.empty()) {
2161
0
                params.__isset.commitInfos = true;
2162
0
                params.commitInfos.insert(params.commitInfos.end(), rs_tci.begin(), rs_tci.end());
2163
0
            }
2164
0
        }
2165
0
    }
2166
0
    if (auto eti = req.runtime_state->error_tablet_infos(); !eti.empty()) {
2167
0
        params.__isset.errorTabletInfos = true;
2168
0
        params.errorTabletInfos.insert(params.errorTabletInfos.end(), eti.begin(), eti.end());
2169
0
    } else if (!req.runtime_states.empty()) {
2170
0
        for (auto* rs : req.runtime_states) {
2171
0
            if (auto rs_eti = rs->error_tablet_infos(); !rs_eti.empty()) {
2172
0
                params.__isset.errorTabletInfos = true;
2173
0
                params.errorTabletInfos.insert(params.errorTabletInfos.end(), rs_eti.begin(),
2174
0
                                               rs_eti.end());
2175
0
            }
2176
0
        }
2177
0
    }
2178
0
    if (auto hpu = req.runtime_state->hive_partition_updates(); !hpu.empty()) {
2179
0
        params.__isset.hive_partition_updates = true;
2180
0
        params.hive_partition_updates.insert(params.hive_partition_updates.end(), hpu.begin(),
2181
0
                                             hpu.end());
2182
0
    } else if (!req.runtime_states.empty()) {
2183
0
        for (auto* rs : req.runtime_states) {
2184
0
            if (auto rs_hpu = rs->hive_partition_updates(); !rs_hpu.empty()) {
2185
0
                params.__isset.hive_partition_updates = true;
2186
0
                params.hive_partition_updates.insert(params.hive_partition_updates.end(),
2187
0
                                                     rs_hpu.begin(), rs_hpu.end());
2188
0
            }
2189
0
        }
2190
0
    }
2191
0
    if (auto icd = req.runtime_state->iceberg_commit_datas(); !icd.empty()) {
2192
0
        params.__isset.iceberg_commit_datas = true;
2193
0
        params.iceberg_commit_datas.insert(params.iceberg_commit_datas.end(), icd.begin(),
2194
0
                                           icd.end());
2195
0
    } else if (!req.runtime_states.empty()) {
2196
0
        for (auto* rs : req.runtime_states) {
2197
0
            if (auto rs_icd = rs->iceberg_commit_datas(); !rs_icd.empty()) {
2198
0
                params.__isset.iceberg_commit_datas = true;
2199
0
                params.iceberg_commit_datas.insert(params.iceberg_commit_datas.end(),
2200
0
                                                   rs_icd.begin(), rs_icd.end());
2201
0
            }
2202
0
        }
2203
0
    }
2204
2205
0
    if (auto mcd = req.runtime_state->mc_commit_datas(); !mcd.empty()) {
2206
0
        params.__isset.mc_commit_datas = true;
2207
0
        params.mc_commit_datas.insert(params.mc_commit_datas.end(), mcd.begin(), mcd.end());
2208
0
    } else if (!req.runtime_states.empty()) {
2209
0
        for (auto* rs : req.runtime_states) {
2210
0
            if (auto rs_mcd = rs->mc_commit_datas(); !rs_mcd.empty()) {
2211
0
                params.__isset.mc_commit_datas = true;
2212
0
                params.mc_commit_datas.insert(params.mc_commit_datas.end(), rs_mcd.begin(),
2213
0
                                              rs_mcd.end());
2214
0
            }
2215
0
        }
2216
0
    }
2217
2218
0
    req.runtime_state->get_unreported_errors(&(params.error_log));
2219
0
    params.__isset.error_log = (!params.error_log.empty());
2220
2221
0
    if (_exec_env->cluster_info()->backend_id != 0) {
2222
0
        params.__set_backend_id(_exec_env->cluster_info()->backend_id);
2223
0
    }
2224
2225
0
    TReportExecStatusResult res;
2226
0
    Status rpc_status;
2227
2228
0
    VLOG_DEBUG << "reportExecStatus params is "
2229
0
               << apache::thrift::ThriftDebugString(params).c_str();
2230
0
    if (!exec_status.ok()) {
2231
0
        LOG(WARNING) << "report error status: " << exec_status.msg()
2232
0
                     << " to coordinator: " << req.coord_addr
2233
0
                     << ", query id: " << print_id(req.query_id);
2234
0
    }
2235
0
    try {
2236
0
        try {
2237
0
            (*coord)->reportExecStatus(res, params);
2238
0
        } catch ([[maybe_unused]] apache::thrift::transport::TTransportException& e) {
2239
#ifndef ADDRESS_SANITIZER
2240
            LOG(WARNING) << "Retrying ReportExecStatus. query id: " << print_id(req.query_id)
2241
                         << ", instance id: " << print_id(req.fragment_instance_id) << " to "
2242
                         << req.coord_addr << ", err: " << e.what();
2243
#endif
2244
0
            rpc_status = coord->reopen();
2245
2246
0
            if (!rpc_status.ok()) {
2247
0
                req.cancel_fn(rpc_status);
2248
0
                return;
2249
0
            }
2250
0
            (*coord)->reportExecStatus(res, params);
2251
0
        }
2252
2253
0
        rpc_status = Status::create<false>(res.status);
2254
0
    } catch (apache::thrift::TException& e) {
2255
0
        rpc_status = Status::InternalError("ReportExecStatus() to {} failed: {}",
2256
0
                                           PrintThriftNetworkAddress(req.coord_addr), e.what());
2257
0
    }
2258
2259
0
    if (!rpc_status.ok()) {
2260
0
        LOG_INFO("Going to cancel query {} since report exec status got rpc failed: {}",
2261
0
                 print_id(req.query_id), rpc_status.to_string());
2262
0
        req.cancel_fn(rpc_status);
2263
0
    }
2264
0
}
2265
2266
0
Status PipelineFragmentContext::send_report(bool done) {
2267
0
    Status exec_status = _query_ctx->exec_status();
2268
    // If plan is done successfully, but _is_report_success is false,
2269
    // no need to send report.
2270
    // Load will set _is_report_success to true because load wants to know
2271
    // the process.
2272
0
    if (!_is_report_success && done && exec_status.ok()) {
2273
0
        return Status::OK();
2274
0
    }
2275
2276
    // If both _is_report_success and _is_report_on_cancel are false,
2277
    // which means no matter query is success or failed, no report is needed.
2278
    // This may happen when the query limit reached and
2279
    // a internal cancellation being processed
2280
    // When limit is reached the fragment is also cancelled, but _is_report_on_cancel will
2281
    // be set to false, to avoid sending fault report to FE.
2282
0
    if (!_is_report_success && !_is_report_on_cancel) {
2283
0
        if (done) {
2284
            // if done is true, which means the query is finished successfully, we can safely close the fragment instance without sending report to FE, and just return OK status here.
2285
0
            return Status::OK();
2286
0
        }
2287
0
        return Status::NeedSendAgain("");
2288
0
    }
2289
2290
0
    std::vector<RuntimeState*> runtime_states;
2291
2292
0
    for (auto& tasks : _tasks) {
2293
0
        for (auto& task : tasks) {
2294
0
            runtime_states.push_back(task.second.get());
2295
0
        }
2296
0
    }
2297
2298
0
    std::string load_eror_url = _query_ctx->get_load_error_url().empty()
2299
0
                                        ? get_load_error_url()
2300
0
                                        : _query_ctx->get_load_error_url();
2301
0
    std::string first_error_msg = _query_ctx->get_first_error_msg().empty()
2302
0
                                          ? get_first_error_msg()
2303
0
                                          : _query_ctx->get_first_error_msg();
2304
2305
0
    ReportStatusRequest req {.status = exec_status,
2306
0
                             .runtime_states = runtime_states,
2307
0
                             .done = done || !exec_status.ok(),
2308
0
                             .coord_addr = _query_ctx->coord_addr,
2309
0
                             .query_id = _query_id,
2310
0
                             .fragment_id = _fragment_id,
2311
0
                             .fragment_instance_id = TUniqueId(),
2312
0
                             .backend_num = -1,
2313
0
                             .runtime_state = _runtime_state.get(),
2314
0
                             .load_error_url = load_eror_url,
2315
0
                             .first_error_msg = first_error_msg,
2316
0
                             .cancel_fn = [this](const Status& reason) { cancel(reason); }};
2317
0
    auto ctx = std::dynamic_pointer_cast<PipelineFragmentContext>(shared_from_this());
2318
0
    return _exec_env->fragment_mgr()->get_thread_pool()->submit_func([this, req, ctx]() {
2319
0
        SCOPED_ATTACH_TASK(ctx->get_query_ctx()->query_mem_tracker());
2320
0
        _coordinator_callback(req);
2321
0
        if (!req.done) {
2322
0
            ctx->refresh_next_report_time();
2323
0
        }
2324
0
    });
2325
0
}
2326
2327
0
size_t PipelineFragmentContext::get_revocable_size(bool* has_running_task) const {
2328
0
    size_t res = 0;
2329
    // _tasks will be cleared during ~PipelineFragmentContext, so that it's safe
2330
    // here to traverse the vector.
2331
0
    for (const auto& task_instances : _tasks) {
2332
0
        for (const auto& task : task_instances) {
2333
0
            if (task.first->is_running()) {
2334
0
                LOG_EVERY_N(INFO, 50) << "Query: " << print_id(_query_id)
2335
0
                                      << " is running, task: " << (void*)task.first.get()
2336
0
                                      << ", is_running: " << task.first->is_running();
2337
0
                *has_running_task = true;
2338
0
                return 0;
2339
0
            }
2340
2341
0
            size_t revocable_size = task.first->get_revocable_size();
2342
0
            if (revocable_size >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
2343
0
                res += revocable_size;
2344
0
            }
2345
0
        }
2346
0
    }
2347
0
    return res;
2348
0
}
2349
2350
0
std::vector<PipelineTask*> PipelineFragmentContext::get_revocable_tasks() const {
2351
0
    std::vector<PipelineTask*> revocable_tasks;
2352
0
    for (const auto& task_instances : _tasks) {
2353
0
        for (const auto& task : task_instances) {
2354
0
            size_t revocable_size_ = task.first->get_revocable_size();
2355
2356
0
            if (revocable_size_ >= SpillFile::MIN_SPILL_WRITE_BATCH_MEM) {
2357
0
                revocable_tasks.emplace_back(task.first.get());
2358
0
            }
2359
0
        }
2360
0
    }
2361
0
    return revocable_tasks;
2362
0
}
2363
2364
0
std::string PipelineFragmentContext::debug_string() {
2365
0
    std::lock_guard<std::mutex> l(_task_mutex);
2366
0
    fmt::memory_buffer debug_string_buffer;
2367
0
    fmt::format_to(debug_string_buffer,
2368
0
                   "PipelineFragmentContext Info: _closed_tasks={}, _total_tasks={}, "
2369
0
                   "need_notify_close={}, fragment_id={}, _rec_cte_stage={}\n",
2370
0
                   _closed_tasks, _total_tasks, _need_notify_close, _fragment_id, _rec_cte_stage);
2371
0
    for (size_t j = 0; j < _tasks.size(); j++) {
2372
0
        fmt::format_to(debug_string_buffer, "Tasks in instance {}:\n", j);
2373
0
        for (size_t i = 0; i < _tasks[j].size(); i++) {
2374
0
            fmt::format_to(debug_string_buffer, "Task {}: {}\n", i,
2375
0
                           _tasks[j][i].first->debug_string());
2376
0
        }
2377
0
    }
2378
2379
0
    return fmt::to_string(debug_string_buffer);
2380
0
}
2381
2382
std::vector<std::shared_ptr<TRuntimeProfileTree>>
2383
0
PipelineFragmentContext::collect_realtime_profile() const {
2384
0
    std::vector<std::shared_ptr<TRuntimeProfileTree>> res;
2385
2386
    // we do not have mutex to protect pipeline_id_to_profile
2387
    // so we need to make sure this funciton is invoked after fragment context
2388
    // has already been prepared.
2389
0
    if (!_prepared) {
2390
0
        std::string msg =
2391
0
                "Query " + print_id(_query_id) + " collecting profile, but its not prepared";
2392
0
        DCHECK(false) << msg;
2393
0
        LOG_ERROR(msg);
2394
0
        return res;
2395
0
    }
2396
2397
    // Make sure first profile is fragment level profile
2398
0
    auto fragment_profile = std::make_shared<TRuntimeProfileTree>();
2399
0
    _fragment_level_profile->to_thrift(fragment_profile.get(), _runtime_state->profile_level());
2400
0
    res.push_back(fragment_profile);
2401
2402
    // pipeline_id_to_profile is initialized in prepare stage
2403
0
    for (auto pipeline_profile : _runtime_state->pipeline_id_to_profile()) {
2404
0
        auto profile_ptr = std::make_shared<TRuntimeProfileTree>();
2405
0
        pipeline_profile->to_thrift(profile_ptr.get(), _runtime_state->profile_level());
2406
0
        res.push_back(profile_ptr);
2407
0
    }
2408
2409
0
    return res;
2410
0
}
2411
2412
std::shared_ptr<TRuntimeProfileTree>
2413
0
PipelineFragmentContext::collect_realtime_load_channel_profile() const {
2414
    // we do not have mutex to protect pipeline_id_to_profile
2415
    // so we need to make sure this funciton is invoked after fragment context
2416
    // has already been prepared.
2417
0
    if (!_prepared) {
2418
0
        std::string msg =
2419
0
                "Query " + print_id(_query_id) + " collecting profile, but its not prepared";
2420
0
        DCHECK(false) << msg;
2421
0
        LOG_ERROR(msg);
2422
0
        return nullptr;
2423
0
    }
2424
2425
0
    for (const auto& tasks : _tasks) {
2426
0
        for (const auto& task : tasks) {
2427
0
            if (task.second->load_channel_profile() == nullptr) {
2428
0
                continue;
2429
0
            }
2430
2431
0
            auto tmp_load_channel_profile = std::make_shared<TRuntimeProfileTree>();
2432
2433
0
            task.second->load_channel_profile()->to_thrift(tmp_load_channel_profile.get(),
2434
0
                                                           _runtime_state->profile_level());
2435
0
            _runtime_state->load_channel_profile()->update(*tmp_load_channel_profile);
2436
0
        }
2437
0
    }
2438
2439
0
    auto load_channel_profile = std::make_shared<TRuntimeProfileTree>();
2440
0
    _runtime_state->load_channel_profile()->to_thrift(load_channel_profile.get(),
2441
0
                                                      _runtime_state->profile_level());
2442
0
    return load_channel_profile;
2443
0
}
2444
2445
// Collect runtime filter IDs registered by all tasks in this PFC.
2446
// Used during recursive CTE stage transitions to know which filters to deregister
2447
// before creating the new PFC for the next recursion round.
2448
// Called from rerun_fragment(wait_for_destroy) while tasks are still closing.
2449
// Thread safety: safe because _tasks is structurally immutable after prepare() —
2450
// the vector sizes do not change, and individual RuntimeState filter sets are
2451
// written only during open() which has completed by the time we reach rerun.
2452
0
std::set<int> PipelineFragmentContext::get_deregister_runtime_filter() const {
2453
0
    std::set<int> result;
2454
0
    for (const auto& _task : _tasks) {
2455
0
        for (const auto& task : _task) {
2456
0
            auto set = task.first->runtime_state()->get_deregister_runtime_filter();
2457
0
            result.merge(set);
2458
0
        }
2459
0
    }
2460
0
    if (_runtime_state) {
2461
0
        auto set = _runtime_state->get_deregister_runtime_filter();
2462
0
        result.merge(set);
2463
0
    }
2464
0
    return result;
2465
0
}
2466
2467
26
void PipelineFragmentContext::_release_resource() {
2468
26
    std::lock_guard<std::mutex> l(_task_mutex);
2469
    // The memory released by the query end is recorded in the query mem tracker.
2470
26
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_ctx->query_mem_tracker());
2471
26
    auto st = _query_ctx->exec_status();
2472
26
    for (auto& _task : _tasks) {
2473
0
        if (!_task.empty()) {
2474
0
            _call_back(_task.front().first->runtime_state(), &st);
2475
0
        }
2476
0
    }
2477
26
    _tasks.clear();
2478
26
    _dag.clear();
2479
26
    _pip_id_to_pipeline.clear();
2480
26
    _pipelines.clear();
2481
26
    _sink.reset();
2482
26
    _root_op.reset();
2483
26
    _runtime_filter_mgr_map.clear();
2484
26
    _op_id_to_shared_state.clear();
2485
26
}
2486
2487
} // namespace doris