Coverage Report

Created: 2026-03-12 17:15

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/runtime/fragment_mgr.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "runtime/fragment_mgr.h"
19
20
#include <brpc/controller.h>
21
#include <bvar/latency_recorder.h>
22
#include <fmt/format.h>
23
#include <gen_cpp/DorisExternalService_types.h>
24
#include <gen_cpp/FrontendService.h>
25
#include <gen_cpp/FrontendService_types.h>
26
#include <gen_cpp/HeartbeatService_types.h>
27
#include <gen_cpp/Metrics_types.h>
28
#include <gen_cpp/PaloInternalService_types.h>
29
#include <gen_cpp/PlanNodes_types.h>
30
#include <gen_cpp/Planner_types.h>
31
#include <gen_cpp/QueryPlanExtra_types.h>
32
#include <gen_cpp/RuntimeProfile_types.h>
33
#include <gen_cpp/Types_types.h>
34
#include <gen_cpp/internal_service.pb.h>
35
#include <pthread.h>
36
#include <sys/time.h>
37
#include <thrift/TApplicationException.h>
38
#include <thrift/Thrift.h>
39
#include <thrift/protocol/TDebugProtocol.h>
40
#include <thrift/transport/TTransportException.h>
41
#include <unistd.h>
42
43
#include <algorithm>
44
#include <cstddef>
45
#include <ctime>
46
47
// IWYU pragma: no_include <bits/chrono.h>
48
#include <chrono> // IWYU pragma: keep
49
#include <cstdint>
50
#include <map>
51
#include <memory>
52
#include <mutex>
53
#include <sstream>
54
#include <unordered_map>
55
#include <unordered_set>
56
#include <utility>
57
58
#include "common/config.h"
59
#include "common/exception.h"
60
#include "common/logging.h"
61
#include "common/metrics/doris_metrics.h"
62
#include "common/object_pool.h"
63
#include "common/status.h"
64
#include "common/utils.h"
65
#include "core/data_type/primitive_type.h"
66
#include "exec/pipeline/pipeline_fragment_context.h"
67
#include "exec/runtime_filter/runtime_filter_consumer.h"
68
#include "exec/runtime_filter/runtime_filter_mgr.h"
69
#include "io/fs/stream_load_pipe.h"
70
#include "load/stream_load/new_load_stream_mgr.h"
71
#include "load/stream_load/stream_load_context.h"
72
#include "load/stream_load/stream_load_executor.h"
73
#include "runtime/descriptors.h"
74
#include "runtime/exec_env.h"
75
#include "runtime/frontend_info.h"
76
#include "runtime/query_context.h"
77
#include "runtime/runtime_profile.h"
78
#include "runtime/runtime_query_statistics_mgr.h"
79
#include "runtime/runtime_state.h"
80
#include "runtime/thread_context.h"
81
#include "runtime/workload_group/workload_group.h"
82
#include "runtime/workload_group/workload_group_manager.h"
83
#include "service/backend_options.h"
84
#include "util/brpc_client_cache.h"
85
#include "util/client_cache.h"
86
#include "util/debug_points.h"
87
#include "util/debug_util.h"
88
#include "util/network_util.h"
89
#include "util/thread.h"
90
#include "util/threadpool.h"
91
#include "util/thrift_util.h"
92
#include "util/uid_util.h"
93
94
namespace doris {
95
#include "common/compile_check_begin.h"
96
97
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(fragment_instance_count, MetricUnit::NOUNIT);
98
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(timeout_canceled_fragment_count, MetricUnit::NOUNIT);
99
100
bvar::LatencyRecorder g_fragmentmgr_prepare_latency("doris_FragmentMgr", "prepare");
101
102
bvar::Adder<uint64_t> g_fragment_executing_count("fragment_executing_count");
103
bvar::Status<uint64_t> g_fragment_last_active_time(
104
        "fragment_last_active_time", duration_cast<std::chrono::milliseconds>(
105
                                             std::chrono::system_clock::now().time_since_epoch())
106
                                             .count());
107
108
880
uint64_t get_fragment_executing_count() {
109
880
    return g_fragment_executing_count.get_value();
110
880
}
111
880
uint64_t get_fragment_last_active_time() {
112
880
    return g_fragment_last_active_time.get_value();
113
880
}
114
115
220
std::string to_load_error_http_path(const std::string& file_name) {
116
220
    if (file_name.empty()) {
117
194
        return "";
118
194
    }
119
26
    if (file_name.compare(0, 4, "http") == 0) {
120
0
        return file_name;
121
0
    }
122
26
    std::stringstream url;
123
26
    url << "http://" << get_host_port(BackendOptions::get_localhost(), config::webserver_port)
124
26
        << "/api/_load_error_log?"
125
26
        << "file=" << file_name;
126
26
    return url.str();
127
26
}
128
129
using apache::thrift::TException;
130
using apache::thrift::transport::TTransportException;
131
132
static Status _do_fetch_running_queries_rpc(const FrontendInfo& fe_info,
133
0
                                            std::unordered_set<TUniqueId>& query_set) {
134
0
    TFetchRunningQueriesResult rpc_result;
135
0
    TFetchRunningQueriesRequest rpc_request;
136
137
0
    Status client_status;
138
0
    const int32_t timeout_ms = 3 * 1000;
139
0
    FrontendServiceConnection rpc_client(ExecEnv::GetInstance()->frontend_client_cache(),
140
0
                                         fe_info.info.coordinator_address, timeout_ms,
141
0
                                         &client_status);
142
    // Abort this fe.
143
0
    if (!client_status.ok()) {
144
0
        LOG_WARNING("Failed to get client for {}, reason is {}",
145
0
                    PrintThriftNetworkAddress(fe_info.info.coordinator_address),
146
0
                    client_status.to_string());
147
0
        return Status::InternalError("Failed to get client for {}, reason is {}",
148
0
                                     PrintThriftNetworkAddress(fe_info.info.coordinator_address),
149
0
                                     client_status.to_string());
150
0
    }
151
152
    // do rpc
153
0
    try {
154
0
        try {
155
0
            rpc_client->fetchRunningQueries(rpc_result, rpc_request);
156
0
        } catch (const apache::thrift::transport::TTransportException& e) {
157
0
            LOG_WARNING("Transport exception reason: {}, reopening", e.what());
158
0
            client_status = rpc_client.reopen(config::thrift_rpc_timeout_ms);
159
0
            if (!client_status.ok()) {
160
0
                LOG_WARNING("Reopen failed, reason: {}", client_status.to_string_no_stack());
161
0
                return Status::InternalError("Reopen failed, reason: {}",
162
0
                                             client_status.to_string_no_stack());
163
0
            }
164
165
0
            rpc_client->fetchRunningQueries(rpc_result, rpc_request);
166
0
        }
167
0
    } catch (apache::thrift::TException& e) {
168
        // During upgrading cluster or meet any other network error.
169
0
        LOG_WARNING("Failed to fetch running queries from {}, reason: {}",
170
0
                    PrintThriftNetworkAddress(fe_info.info.coordinator_address), e.what());
171
0
        return Status::InternalError("Failed to fetch running queries from {}, reason: {}",
172
0
                                     PrintThriftNetworkAddress(fe_info.info.coordinator_address),
173
0
                                     e.what());
174
0
    }
175
176
    // Avoid logic error in frontend.
177
0
    if (!rpc_result.__isset.status || rpc_result.status.status_code != TStatusCode::OK) {
178
0
        LOG_WARNING("Failed to fetch running queries from {}, reason: {}",
179
0
                    PrintThriftNetworkAddress(fe_info.info.coordinator_address),
180
0
                    doris::to_string(rpc_result.status.status_code));
181
0
        return Status::InternalError("Failed to fetch running queries from {}, reason: {}",
182
0
                                     PrintThriftNetworkAddress(fe_info.info.coordinator_address),
183
0
                                     doris::to_string(rpc_result.status.status_code));
184
0
    }
185
186
0
    if (!rpc_result.__isset.running_queries) {
187
0
        return Status::InternalError("Failed to fetch running queries from {}, reason: {}",
188
0
                                     PrintThriftNetworkAddress(fe_info.info.coordinator_address),
189
0
                                     "running_queries is not set");
190
0
    }
191
192
0
    query_set = std::unordered_set<TUniqueId>(rpc_result.running_queries.begin(),
193
0
                                              rpc_result.running_queries.end());
194
0
    return Status::OK();
195
0
};
196
197
0
static std::map<int64_t, std::unordered_set<TUniqueId>> _get_all_running_queries_from_fe() {
198
0
    const std::map<TNetworkAddress, FrontendInfo>& running_fes =
199
0
            ExecEnv::GetInstance()->get_running_frontends();
200
201
0
    std::map<int64_t, std::unordered_set<TUniqueId>> result;
202
0
    std::vector<FrontendInfo> qualified_fes;
203
204
0
    for (const auto& fe : running_fes) {
205
        // Only consider normal frontend.
206
0
        if (fe.first.port != 0 && fe.second.info.process_uuid != 0) {
207
0
            qualified_fes.push_back(fe.second);
208
0
        } else {
209
0
            return {};
210
0
        }
211
0
    }
212
213
0
    for (const auto& fe_addr : qualified_fes) {
214
0
        const int64_t process_uuid = fe_addr.info.process_uuid;
215
0
        std::unordered_set<TUniqueId> query_set;
216
0
        Status st = _do_fetch_running_queries_rpc(fe_addr, query_set);
217
0
        if (!st.ok()) {
218
            // Empty result, cancel worker will not do anything
219
0
            return {};
220
0
        }
221
222
        // frontend_info and process_uuid has been checked in rpc threads.
223
0
        result[process_uuid] = query_set;
224
0
    }
225
226
0
    return result;
227
0
}
228
229
407k
inline uint32_t get_map_id(const TUniqueId& query_id, size_t capacity) {
230
407k
    uint32_t value = HashUtil::hash(&query_id.lo, 8, 0);
231
407k
    value = HashUtil::hash(&query_id.hi, 8, value);
232
407k
    return value % capacity;
233
407k
}
234
235
314k
inline uint32_t get_map_id(std::pair<TUniqueId, int> key, size_t capacity) {
236
314k
    uint32_t value = HashUtil::hash(&key.first.lo, 8, 0);
237
314k
    value = HashUtil::hash(&key.first.hi, 8, value);
238
314k
    return value % capacity;
239
314k
}
240
241
template <typename Key, typename Value, typename ValueType>
242
39
ConcurrentContextMap<Key, Value, ValueType>::ConcurrentContextMap() {
243
39
    _internal_map.resize(config::num_query_ctx_map_partitions);
244
5.03k
    for (size_t i = 0; i < config::num_query_ctx_map_partitions; i++) {
245
4.99k
        _internal_map[i] = {std::make_unique<std::shared_mutex>(),
246
4.99k
                            phmap::flat_hash_map<Key, Value>()};
247
4.99k
    }
248
39
}
_ZN5doris20ConcurrentContextMapISt4pairINS_9TUniqueIdEiESt10shared_ptrINS_23PipelineFragmentContextEES5_EC2Ev
Line
Count
Source
242
13
ConcurrentContextMap<Key, Value, ValueType>::ConcurrentContextMap() {
243
13
    _internal_map.resize(config::num_query_ctx_map_partitions);
244
1.67k
    for (size_t i = 0; i < config::num_query_ctx_map_partitions; i++) {
245
1.66k
        _internal_map[i] = {std::make_unique<std::shared_mutex>(),
246
1.66k
                            phmap::flat_hash_map<Key, Value>()};
247
1.66k
    }
248
13
}
_ZN5doris20ConcurrentContextMapINS_9TUniqueIdESt8weak_ptrINS_12QueryContextEES3_EC2Ev
Line
Count
Source
242
13
ConcurrentContextMap<Key, Value, ValueType>::ConcurrentContextMap() {
243
13
    _internal_map.resize(config::num_query_ctx_map_partitions);
244
1.67k
    for (size_t i = 0; i < config::num_query_ctx_map_partitions; i++) {
245
1.66k
        _internal_map[i] = {std::make_unique<std::shared_mutex>(),
246
1.66k
                            phmap::flat_hash_map<Key, Value>()};
247
1.66k
    }
248
13
}
_ZN5doris20ConcurrentContextMapINS_9TUniqueIdESt10shared_ptrINS_12QueryContextEES3_EC2Ev
Line
Count
Source
242
13
ConcurrentContextMap<Key, Value, ValueType>::ConcurrentContextMap() {
243
13
    _internal_map.resize(config::num_query_ctx_map_partitions);
244
1.67k
    for (size_t i = 0; i < config::num_query_ctx_map_partitions; i++) {
245
1.66k
        _internal_map[i] = {std::make_unique<std::shared_mutex>(),
246
1.66k
                            phmap::flat_hash_map<Key, Value>()};
247
1.66k
    }
248
13
}
249
250
template <typename Key, typename Value, typename ValueType>
251
261k
Value ConcurrentContextMap<Key, Value, ValueType>::find(const Key& query_id) {
252
261k
    auto id = get_map_id(query_id, _internal_map.size());
253
261k
    {
254
261k
        std::shared_lock lock(*_internal_map[id].first);
255
261k
        auto& map = _internal_map[id].second;
256
261k
        auto search = map.find(query_id);
257
261k
        if (search != map.end()) {
258
71.9k
            return search->second;
259
71.9k
        }
260
189k
        return std::shared_ptr<ValueType>(nullptr);
261
261k
    }
262
261k
}
_ZN5doris20ConcurrentContextMapINS_9TUniqueIdESt8weak_ptrINS_12QueryContextEES3_E4findERKS1_
Line
Count
Source
251
156k
Value ConcurrentContextMap<Key, Value, ValueType>::find(const Key& query_id) {
252
156k
    auto id = get_map_id(query_id, _internal_map.size());
253
156k
    {
254
156k
        std::shared_lock lock(*_internal_map[id].first);
255
156k
        auto& map = _internal_map[id].second;
256
156k
        auto search = map.find(query_id);
257
156k
        if (search != map.end()) {
258
71.9k
            return search->second;
259
71.9k
        }
260
84.7k
        return std::shared_ptr<ValueType>(nullptr);
261
156k
    }
262
156k
}
_ZN5doris20ConcurrentContextMapISt4pairINS_9TUniqueIdEiESt10shared_ptrINS_23PipelineFragmentContextEES5_E4findERKS3_
Line
Count
Source
251
104k
Value ConcurrentContextMap<Key, Value, ValueType>::find(const Key& query_id) {
252
104k
    auto id = get_map_id(query_id, _internal_map.size());
253
104k
    {
254
104k
        std::shared_lock lock(*_internal_map[id].first);
255
104k
        auto& map = _internal_map[id].second;
256
104k
        auto search = map.find(query_id);
257
104k
        if (search != map.end()) {
258
0
            return search->second;
259
0
        }
260
104k
        return std::shared_ptr<ValueType>(nullptr);
261
104k
    }
262
104k
}
263
264
template <typename Key, typename Value, typename ValueType>
265
Status ConcurrentContextMap<Key, Value, ValueType>::apply_if_not_exists(
266
73.5k
        const Key& query_id, std::shared_ptr<ValueType>& query_ctx, ApplyFunction&& function) {
267
73.5k
    auto id = get_map_id(query_id, _internal_map.size());
268
73.5k
    {
269
73.5k
        std::unique_lock lock(*_internal_map[id].first);
270
73.5k
        auto& map = _internal_map[id].second;
271
73.5k
        auto search = map.find(query_id);
272
73.5k
        if (search != map.end()) {
273
0
            query_ctx = search->second.lock();
274
0
        }
275
73.6k
        if (!query_ctx) {
276
73.6k
            return function(map);
277
73.6k
        }
278
18.4E
        return Status::OK();
279
73.5k
    }
280
73.5k
}
281
282
template <typename Key, typename Value, typename ValueType>
283
281k
bool ConcurrentContextMap<Key, Value, ValueType>::erase(const Key& query_id) {
284
281k
    auto id = get_map_id(query_id, _internal_map.size());
285
281k
    std::unique_lock lock(*_internal_map[id].first);
286
281k
    auto& map = _internal_map[id].second;
287
281k
    return map.erase(query_id) != 0;
288
281k
}
_ZN5doris20ConcurrentContextMapISt4pairINS_9TUniqueIdEiESt10shared_ptrINS_23PipelineFragmentContextEES5_E5eraseERKS3_
Line
Count
Source
283
104k
bool ConcurrentContextMap<Key, Value, ValueType>::erase(const Key& query_id) {
284
104k
    auto id = get_map_id(query_id, _internal_map.size());
285
104k
    std::unique_lock lock(*_internal_map[id].first);
286
104k
    auto& map = _internal_map[id].second;
287
104k
    return map.erase(query_id) != 0;
288
104k
}
_ZN5doris20ConcurrentContextMapINS_9TUniqueIdESt10shared_ptrINS_12QueryContextEES3_E5eraseERKS1_
Line
Count
Source
283
88.4k
bool ConcurrentContextMap<Key, Value, ValueType>::erase(const Key& query_id) {
284
88.4k
    auto id = get_map_id(query_id, _internal_map.size());
285
88.4k
    std::unique_lock lock(*_internal_map[id].first);
286
88.4k
    auto& map = _internal_map[id].second;
287
88.4k
    return map.erase(query_id) != 0;
288
88.4k
}
_ZN5doris20ConcurrentContextMapINS_9TUniqueIdESt8weak_ptrINS_12QueryContextEES3_E5eraseERKS1_
Line
Count
Source
283
88.5k
bool ConcurrentContextMap<Key, Value, ValueType>::erase(const Key& query_id) {
284
88.5k
    auto id = get_map_id(query_id, _internal_map.size());
285
88.5k
    std::unique_lock lock(*_internal_map[id].first);
286
88.5k
    auto& map = _internal_map[id].second;
287
88.5k
    return map.erase(query_id) != 0;
288
88.5k
}
289
290
template <typename Key, typename Value, typename ValueType>
291
void ConcurrentContextMap<Key, Value, ValueType>::insert(const Key& query_id,
292
105k
                                                         std::shared_ptr<ValueType> query_ctx) {
293
105k
    auto id = get_map_id(query_id, _internal_map.size());
294
105k
    {
295
105k
        std::unique_lock lock(*_internal_map[id].first);
296
105k
        auto& map = _internal_map[id].second;
297
105k
        map.insert({query_id, query_ctx});
298
105k
    }
299
105k
}
_ZN5doris20ConcurrentContextMapINS_9TUniqueIdESt10shared_ptrINS_12QueryContextEES3_E6insertERKS1_S4_
Line
Count
Source
292
570
                                                         std::shared_ptr<ValueType> query_ctx) {
293
570
    auto id = get_map_id(query_id, _internal_map.size());
294
570
    {
295
570
        std::unique_lock lock(*_internal_map[id].first);
296
570
        auto& map = _internal_map[id].second;
297
570
        map.insert({query_id, query_ctx});
298
570
    }
299
570
}
_ZN5doris20ConcurrentContextMapISt4pairINS_9TUniqueIdEiESt10shared_ptrINS_23PipelineFragmentContextEES5_E6insertERKS3_S6_
Line
Count
Source
292
104k
                                                         std::shared_ptr<ValueType> query_ctx) {
293
104k
    auto id = get_map_id(query_id, _internal_map.size());
294
104k
    {
295
104k
        std::unique_lock lock(*_internal_map[id].first);
296
104k
        auto& map = _internal_map[id].second;
297
104k
        map.insert({query_id, query_ctx});
298
104k
    }
299
104k
}
300
301
template <typename Key, typename Value, typename ValueType>
302
30
void ConcurrentContextMap<Key, Value, ValueType>::clear() {
303
3.84k
    for (auto& pair : _internal_map) {
304
3.84k
        std::unique_lock lock(*pair.first);
305
3.84k
        auto& map = pair.second;
306
3.84k
        map.clear();
307
3.84k
    }
308
30
}
_ZN5doris20ConcurrentContextMapINS_9TUniqueIdESt8weak_ptrINS_12QueryContextEES3_E5clearEv
Line
Count
Source
302
10
void ConcurrentContextMap<Key, Value, ValueType>::clear() {
303
1.28k
    for (auto& pair : _internal_map) {
304
1.28k
        std::unique_lock lock(*pair.first);
305
1.28k
        auto& map = pair.second;
306
1.28k
        map.clear();
307
1.28k
    }
308
10
}
_ZN5doris20ConcurrentContextMapINS_9TUniqueIdESt10shared_ptrINS_12QueryContextEES3_E5clearEv
Line
Count
Source
302
10
void ConcurrentContextMap<Key, Value, ValueType>::clear() {
303
1.28k
    for (auto& pair : _internal_map) {
304
1.28k
        std::unique_lock lock(*pair.first);
305
1.28k
        auto& map = pair.second;
306
1.28k
        map.clear();
307
1.28k
    }
308
10
}
_ZN5doris20ConcurrentContextMapISt4pairINS_9TUniqueIdEiESt10shared_ptrINS_23PipelineFragmentContextEES5_E5clearEv
Line
Count
Source
302
10
void ConcurrentContextMap<Key, Value, ValueType>::clear() {
303
1.28k
    for (auto& pair : _internal_map) {
304
1.28k
        std::unique_lock lock(*pair.first);
305
1.28k
        auto& map = pair.second;
306
1.28k
        map.clear();
307
1.28k
    }
308
10
}
309
310
FragmentMgr::FragmentMgr(ExecEnv* exec_env)
311
13
        : _exec_env(exec_env), _stop_background_threads_latch(1) {
312
13
    _entity = DorisMetrics::instance()->metric_registry()->register_entity("FragmentMgr");
313
13
    INT_UGAUGE_METRIC_REGISTER(_entity, timeout_canceled_fragment_count);
314
315
13
    auto s = Thread::create(
316
13
            "FragmentMgr", "cancel_timeout_plan_fragment", [this]() { this->cancel_worker(); },
317
13
            &_cancel_thread);
318
13
    CHECK(s.ok()) << s.to_string();
319
320
13
    s = ThreadPoolBuilder("FragmentMgrAsyncWorkThreadPool")
321
13
                .set_min_threads(config::fragment_mgr_async_work_pool_thread_num_min)
322
13
                .set_max_threads(config::fragment_mgr_async_work_pool_thread_num_max)
323
13
                .set_max_queue_size(config::fragment_mgr_async_work_pool_queue_size)
324
13
                .build(&_thread_pool);
325
13
    CHECK(s.ok()) << s.to_string();
326
13
}
327
328
10
FragmentMgr::~FragmentMgr() = default;
329
330
10
void FragmentMgr::stop() {
331
10
    DEREGISTER_HOOK_METRIC(fragment_instance_count);
332
10
    _stop_background_threads_latch.count_down();
333
10
    if (_cancel_thread) {
334
10
        _cancel_thread->join();
335
10
    }
336
337
10
    _thread_pool->shutdown();
338
    // Only me can delete
339
10
    _query_ctx_map.clear();
340
    // in one BE's graceful shutdown, cancel_worker will get related running queries via _get_all_running_queries_from_fe and cancel them.
341
    // so clearing here will not make RF consumer hang. if we dont do this, in ~FragmentMgr() there may be QueryContext in _query_ctx_map_delay_delete
342
    // destructred and remove it from _query_ctx_map_delay_delete which is destructring. it's UB.
343
10
    _query_ctx_map_delay_delete.clear();
344
10
    _pipeline_map.clear();
345
10
    {
346
10
        std::lock_guard<std::mutex> lk(_rerunnable_params_lock);
347
10
        _rerunnable_params_map.clear();
348
10
    }
349
10
}
350
351
0
std::string FragmentMgr::to_http_path(const std::string& file_name) {
352
0
    std::stringstream url;
353
0
    url << "http://" << BackendOptions::get_localhost() << ":" << config::webserver_port
354
0
        << "/api/_download_load?"
355
0
        << "token=" << _exec_env->token() << "&file=" << file_name;
356
0
    return url.str();
357
0
}
358
359
Status FragmentMgr::trigger_pipeline_context_report(
360
10.6k
        const ReportStatusRequest req, std::shared_ptr<PipelineFragmentContext>&& ctx) {
361
10.6k
    return _thread_pool->submit_func([this, req, ctx]() {
362
10.6k
        SCOPED_ATTACH_TASK(ctx->get_query_ctx()->query_mem_tracker());
363
10.6k
        coordinator_callback(req);
364
10.6k
        if (!req.done) {
365
1.14k
            ctx->refresh_next_report_time();
366
1.14k
        }
367
10.6k
    });
368
10.6k
}
369
370
// There can only be one of these callbacks in-flight at any moment, because
371
// it is only invoked from the executor's reporting thread.
372
// Also, the reported status will always reflect the most recent execution status,
373
// including the final status when execution finishes.
374
10.6k
void FragmentMgr::coordinator_callback(const ReportStatusRequest& req) {
375
10.6k
    DBUG_EXECUTE_IF("FragmentMgr::coordinator_callback.report_delay", {
376
10.6k
        int random_seconds = req.status.is<ErrorCode::DATA_QUALITY_ERROR>() ? 8 : 2;
377
10.6k
        LOG_INFO("sleep : ").tag("time", random_seconds).tag("query_id", print_id(req.query_id));
378
10.6k
        std::this_thread::sleep_for(std::chrono::seconds(random_seconds));
379
10.6k
        LOG_INFO("sleep done").tag("query_id", print_id(req.query_id));
380
10.6k
    });
381
382
10.6k
    DCHECK(req.status.ok() || req.done); // if !status.ok() => done
383
10.6k
    if (req.coord_addr.hostname == "external") {
384
        // External query (flink/spark read tablets) not need to report to FE.
385
0
        return;
386
0
    }
387
10.6k
    int callback_retries = 10;
388
10.6k
    const int sleep_ms = 1000;
389
10.6k
    Status exec_status = req.status;
390
10.6k
    Status coord_status;
391
10.6k
    std::unique_ptr<FrontendServiceConnection> coord = nullptr;
392
10.6k
    do {
393
10.6k
        coord = std::make_unique<FrontendServiceConnection>(_exec_env->frontend_client_cache(),
394
10.6k
                                                            req.coord_addr, &coord_status);
395
10.6k
        if (!coord_status.ok()) {
396
0
            std::this_thread::sleep_for(std::chrono::milliseconds(sleep_ms));
397
0
        }
398
10.6k
    } while (!coord_status.ok() && callback_retries-- > 0);
399
400
10.6k
    if (!coord_status.ok()) {
401
0
        std::stringstream ss;
402
0
        UniqueId uid(req.query_id.hi, req.query_id.lo);
403
0
        static_cast<void>(req.cancel_fn(Status::InternalError(
404
0
                "query_id: {}, couldn't get a client for {}, reason is {}", uid.to_string(),
405
0
                PrintThriftNetworkAddress(req.coord_addr), coord_status.to_string())));
406
0
        return;
407
0
    }
408
409
10.6k
    TReportExecStatusParams params;
410
10.6k
    params.protocol_version = FrontendServiceVersion::V1;
411
10.6k
    params.__set_query_id(req.query_id);
412
10.6k
    params.__set_backend_num(req.backend_num);
413
10.6k
    params.__set_fragment_instance_id(req.fragment_instance_id);
414
10.6k
    params.__set_fragment_id(req.fragment_id);
415
10.6k
    params.__set_status(exec_status.to_thrift());
416
10.6k
    params.__set_done(req.done);
417
10.6k
    params.__set_query_type(req.runtime_state->query_type());
418
10.6k
    params.__isset.profile = false;
419
420
10.6k
    DCHECK(req.runtime_state != nullptr);
421
422
10.6k
    if (req.runtime_state->query_type() == TQueryType::LOAD) {
423
9.94k
        params.__set_loaded_rows(req.runtime_state->num_rows_load_total());
424
9.94k
        params.__set_loaded_bytes(req.runtime_state->num_bytes_load_total());
425
9.94k
    } else {
426
684
        DCHECK(!req.runtime_states.empty());
427
684
        if (!req.runtime_state->output_files().empty()) {
428
0
            params.__isset.delta_urls = true;
429
0
            for (auto& it : req.runtime_state->output_files()) {
430
0
                params.delta_urls.push_back(to_http_path(it));
431
0
            }
432
0
        }
433
684
        if (!params.delta_urls.empty()) {
434
0
            params.__isset.delta_urls = true;
435
0
        }
436
684
    }
437
438
    // load rows
439
10.6k
    static std::string s_dpp_normal_all = "dpp.norm.ALL";
440
10.6k
    static std::string s_dpp_abnormal_all = "dpp.abnorm.ALL";
441
10.6k
    static std::string s_unselected_rows = "unselected.rows";
442
10.6k
    int64_t num_rows_load_success = 0;
443
10.6k
    int64_t num_rows_load_filtered = 0;
444
10.6k
    int64_t num_rows_load_unselected = 0;
445
10.6k
    if (req.runtime_state->num_rows_load_total() > 0 ||
446
10.6k
        req.runtime_state->num_rows_load_filtered() > 0 ||
447
10.6k
        req.runtime_state->num_finished_range() > 0) {
448
0
        params.__isset.load_counters = true;
449
450
0
        num_rows_load_success = req.runtime_state->num_rows_load_success();
451
0
        num_rows_load_filtered = req.runtime_state->num_rows_load_filtered();
452
0
        num_rows_load_unselected = req.runtime_state->num_rows_load_unselected();
453
0
        params.__isset.fragment_instance_reports = true;
454
0
        TFragmentInstanceReport t;
455
0
        t.__set_fragment_instance_id(req.runtime_state->fragment_instance_id());
456
0
        t.__set_num_finished_range(cast_set<int>(req.runtime_state->num_finished_range()));
457
0
        t.__set_loaded_rows(req.runtime_state->num_rows_load_total());
458
0
        t.__set_loaded_bytes(req.runtime_state->num_bytes_load_total());
459
0
        params.fragment_instance_reports.push_back(t);
460
10.6k
    } else if (!req.runtime_states.empty()) {
461
37.0k
        for (auto* rs : req.runtime_states) {
462
37.0k
            if (rs->num_rows_load_total() > 0 || rs->num_rows_load_filtered() > 0 ||
463
37.0k
                rs->num_finished_range() > 0) {
464
5.34k
                params.__isset.load_counters = true;
465
5.34k
                num_rows_load_success += rs->num_rows_load_success();
466
5.34k
                num_rows_load_filtered += rs->num_rows_load_filtered();
467
5.34k
                num_rows_load_unselected += rs->num_rows_load_unselected();
468
5.34k
                params.__isset.fragment_instance_reports = true;
469
5.34k
                TFragmentInstanceReport t;
470
5.34k
                t.__set_fragment_instance_id(rs->fragment_instance_id());
471
5.34k
                t.__set_num_finished_range(cast_set<int>(rs->num_finished_range()));
472
5.34k
                t.__set_loaded_rows(rs->num_rows_load_total());
473
5.34k
                t.__set_loaded_bytes(rs->num_bytes_load_total());
474
5.34k
                params.fragment_instance_reports.push_back(t);
475
5.34k
            }
476
37.0k
        }
477
10.6k
    }
478
10.6k
    params.load_counters.emplace(s_dpp_normal_all, std::to_string(num_rows_load_success));
479
10.6k
    params.load_counters.emplace(s_dpp_abnormal_all, std::to_string(num_rows_load_filtered));
480
10.6k
    params.load_counters.emplace(s_unselected_rows, std::to_string(num_rows_load_unselected));
481
482
10.6k
    if (!req.load_error_url.empty()) {
483
12
        params.__set_tracking_url(req.load_error_url);
484
12
    }
485
10.6k
    if (!req.first_error_msg.empty()) {
486
12
        params.__set_first_error_msg(req.first_error_msg);
487
12
    }
488
37.0k
    for (auto* rs : req.runtime_states) {
489
37.0k
        if (rs->wal_id() > 0) {
490
0
            params.__set_txn_id(rs->wal_id());
491
0
            params.__set_label(rs->import_label());
492
0
        }
493
37.0k
    }
494
10.6k
    if (!req.runtime_state->export_output_files().empty()) {
495
0
        params.__isset.export_files = true;
496
0
        params.export_files = req.runtime_state->export_output_files();
497
10.6k
    } else if (!req.runtime_states.empty()) {
498
37.0k
        for (auto* rs : req.runtime_states) {
499
37.0k
            if (!rs->export_output_files().empty()) {
500
0
                params.__isset.export_files = true;
501
0
                params.export_files.insert(params.export_files.end(),
502
0
                                           rs->export_output_files().begin(),
503
0
                                           rs->export_output_files().end());
504
0
            }
505
37.0k
        }
506
10.6k
    }
507
10.6k
    if (auto tci = req.runtime_state->tablet_commit_infos(); !tci.empty()) {
508
0
        params.__isset.commitInfos = true;
509
0
        params.commitInfos.insert(params.commitInfos.end(), tci.begin(), tci.end());
510
10.6k
    } else if (!req.runtime_states.empty()) {
511
37.0k
        for (auto* rs : req.runtime_states) {
512
37.0k
            if (auto rs_tci = rs->tablet_commit_infos(); !rs_tci.empty()) {
513
2.03k
                params.__isset.commitInfos = true;
514
2.03k
                params.commitInfos.insert(params.commitInfos.end(), rs_tci.begin(), rs_tci.end());
515
2.03k
            }
516
37.0k
        }
517
10.6k
    }
518
10.6k
    if (auto eti = req.runtime_state->error_tablet_infos(); !eti.empty()) {
519
0
        params.__isset.errorTabletInfos = true;
520
0
        params.errorTabletInfos.insert(params.errorTabletInfos.end(), eti.begin(), eti.end());
521
10.6k
    } else if (!req.runtime_states.empty()) {
522
37.0k
        for (auto* rs : req.runtime_states) {
523
37.0k
            if (auto rs_eti = rs->error_tablet_infos(); !rs_eti.empty()) {
524
0
                params.__isset.errorTabletInfos = true;
525
0
                params.errorTabletInfos.insert(params.errorTabletInfos.end(), rs_eti.begin(),
526
0
                                               rs_eti.end());
527
0
            }
528
37.0k
        }
529
10.6k
    }
530
10.6k
    if (auto hpu = req.runtime_state->hive_partition_updates(); !hpu.empty()) {
531
0
        params.__isset.hive_partition_updates = true;
532
0
        params.hive_partition_updates.insert(params.hive_partition_updates.end(), hpu.begin(),
533
0
                                             hpu.end());
534
10.6k
    } else if (!req.runtime_states.empty()) {
535
37.0k
        for (auto* rs : req.runtime_states) {
536
37.0k
            if (auto rs_hpu = rs->hive_partition_updates(); !rs_hpu.empty()) {
537
2.12k
                params.__isset.hive_partition_updates = true;
538
2.12k
                params.hive_partition_updates.insert(params.hive_partition_updates.end(),
539
2.12k
                                                     rs_hpu.begin(), rs_hpu.end());
540
2.12k
            }
541
37.0k
        }
542
10.6k
    }
543
10.6k
    if (auto icd = req.runtime_state->iceberg_commit_datas(); !icd.empty()) {
544
0
        params.__isset.iceberg_commit_datas = true;
545
0
        params.iceberg_commit_datas.insert(params.iceberg_commit_datas.end(), icd.begin(),
546
0
                                           icd.end());
547
10.6k
    } else if (!req.runtime_states.empty()) {
548
37.0k
        for (auto* rs : req.runtime_states) {
549
37.0k
            if (auto rs_icd = rs->iceberg_commit_datas(); !rs_icd.empty()) {
550
1.75k
                params.__isset.iceberg_commit_datas = true;
551
1.75k
                params.iceberg_commit_datas.insert(params.iceberg_commit_datas.end(),
552
1.75k
                                                   rs_icd.begin(), rs_icd.end());
553
1.75k
            }
554
37.0k
        }
555
10.6k
    }
556
557
10.6k
    if (auto mcd = req.runtime_state->mc_commit_datas(); !mcd.empty()) {
558
0
        params.__isset.mc_commit_datas = true;
559
0
        params.mc_commit_datas.insert(params.mc_commit_datas.end(), mcd.begin(), mcd.end());
560
10.6k
    } else if (!req.runtime_states.empty()) {
561
37.0k
        for (auto* rs : req.runtime_states) {
562
37.0k
            if (auto rs_mcd = rs->mc_commit_datas(); !rs_mcd.empty()) {
563
0
                params.__isset.mc_commit_datas = true;
564
0
                params.mc_commit_datas.insert(params.mc_commit_datas.end(), rs_mcd.begin(),
565
0
                                              rs_mcd.end());
566
0
            }
567
37.0k
        }
568
10.6k
    }
569
570
    // Send new errors to coordinator
571
10.6k
    req.runtime_state->get_unreported_errors(&(params.error_log));
572
10.6k
    params.__isset.error_log = (!params.error_log.empty());
573
574
10.6k
    if (_exec_env->cluster_info()->backend_id != 0) {
575
10.6k
        params.__set_backend_id(_exec_env->cluster_info()->backend_id);
576
10.6k
    }
577
578
10.6k
    TReportExecStatusResult res;
579
10.6k
    Status rpc_status;
580
581
10.6k
    VLOG_DEBUG << "reportExecStatus params is "
582
0
               << apache::thrift::ThriftDebugString(params).c_str();
583
10.6k
    if (!exec_status.ok()) {
584
108
        LOG(WARNING) << "report error status: " << exec_status.msg()
585
108
                     << " to coordinator: " << req.coord_addr
586
108
                     << ", query id: " << print_id(req.query_id);
587
108
    }
588
10.6k
    try {
589
10.6k
        try {
590
10.6k
            (*coord)->reportExecStatus(res, params);
591
10.6k
        } catch ([[maybe_unused]] TTransportException& e) {
592
#ifndef ADDRESS_SANITIZER
593
            LOG(WARNING) << "Retrying ReportExecStatus. query id: " << print_id(req.query_id)
594
                         << ", instance id: " << print_id(req.fragment_instance_id) << " to "
595
                         << req.coord_addr << ", err: " << e.what();
596
#endif
597
0
            rpc_status = coord->reopen();
598
599
0
            if (!rpc_status.ok()) {
600
                // we need to cancel the execution of this fragment
601
0
                req.cancel_fn(rpc_status);
602
0
                return;
603
0
            }
604
0
            (*coord)->reportExecStatus(res, params);
605
0
        }
606
607
10.6k
        rpc_status = Status::create<false>(res.status);
608
10.6k
    } catch (TException& e) {
609
0
        rpc_status = Status::InternalError("ReportExecStatus() to {} failed: {}",
610
0
                                           PrintThriftNetworkAddress(req.coord_addr), e.what());
611
0
    }
612
613
10.6k
    if (!rpc_status.ok()) {
614
0
        LOG_INFO("Going to cancel query {} since report exec status got rpc failed: {}",
615
0
                 print_id(req.query_id), rpc_status.to_string());
616
        // we need to cancel the execution of this fragment
617
0
        req.cancel_fn(rpc_status);
618
0
    }
619
10.6k
}
620
621
227k
static void empty_function(RuntimeState*, Status*) {}
622
623
Status FragmentMgr::exec_plan_fragment(const TPipelineFragmentParams& params,
624
                                       const QuerySource query_source,
625
104k
                                       const TPipelineFragmentParamsList& parent) {
626
104k
    if (params.txn_conf.need_txn) {
627
0
        std::shared_ptr<StreamLoadContext> stream_load_ctx =
628
0
                std::make_shared<StreamLoadContext>(_exec_env);
629
0
        stream_load_ctx->db = params.txn_conf.db;
630
0
        stream_load_ctx->db_id = params.txn_conf.db_id;
631
0
        stream_load_ctx->table = params.txn_conf.tbl;
632
0
        stream_load_ctx->txn_id = params.txn_conf.txn_id;
633
0
        stream_load_ctx->id = UniqueId(params.query_id);
634
0
        stream_load_ctx->put_result.__set_pipeline_params(params);
635
0
        stream_load_ctx->use_streaming = true;
636
0
        stream_load_ctx->load_type = TLoadType::MANUL_LOAD;
637
0
        stream_load_ctx->load_src_type = TLoadSourceType::RAW;
638
0
        stream_load_ctx->label = params.import_label;
639
0
        stream_load_ctx->format = TFileFormatType::FORMAT_CSV_PLAIN;
640
0
        stream_load_ctx->timeout_second = 3600;
641
0
        stream_load_ctx->auth.token = params.txn_conf.token;
642
0
        stream_load_ctx->need_commit_self = true;
643
0
        stream_load_ctx->need_rollback = true;
644
0
        auto pipe = std::make_shared<io::StreamLoadPipe>(
645
0
                io::kMaxPipeBufferedBytes /* max_buffered_bytes */, 64 * 1024 /* min_chunk_size */,
646
0
                -1 /* total_length */, true /* use_proto */);
647
0
        stream_load_ctx->body_sink = pipe;
648
0
        stream_load_ctx->pipe = pipe;
649
0
        stream_load_ctx->max_filter_ratio = params.txn_conf.max_filter_ratio;
650
651
0
        RETURN_IF_ERROR(
652
0
                _exec_env->new_load_stream_mgr()->put(stream_load_ctx->id, stream_load_ctx));
653
654
0
        RETURN_IF_ERROR(
655
0
                _exec_env->stream_load_executor()->execute_plan_fragment(stream_load_ctx, parent));
656
0
        return Status::OK();
657
104k
    } else {
658
104k
        return exec_plan_fragment(params, query_source, empty_function, parent);
659
104k
    }
660
104k
}
661
662
// Stage 2. prepare finished. then get FE instruction to execute
663
22.1k
Status FragmentMgr::start_query_execution(const PExecPlanFragmentStartRequest* request) {
664
22.1k
    TUniqueId query_id;
665
22.1k
    query_id.__set_hi(request->query_id().hi());
666
22.1k
    query_id.__set_lo(request->query_id().lo());
667
22.1k
    auto q_ctx = get_query_ctx(query_id);
668
22.1k
    if (q_ctx) {
669
22.1k
        q_ctx->set_ready_to_execute(Status::OK());
670
22.1k
        LOG_INFO("Query {} start execution", print_id(query_id));
671
22.1k
    } else {
672
0
        return Status::InternalError(
673
0
                "Failed to get query fragments context. Query {} may be "
674
0
                "timeout or be cancelled. host: {}",
675
0
                print_id(query_id), BackendOptions::get_localhost());
676
0
    }
677
22.1k
    return Status::OK();
678
22.1k
}
679
680
104k
void FragmentMgr::remove_pipeline_context(std::pair<TUniqueId, int> key) {
681
104k
    if (_pipeline_map.erase(key)) {
682
104k
        int64_t now = duration_cast<std::chrono::milliseconds>(
683
104k
                              std::chrono::system_clock::now().time_since_epoch())
684
104k
                              .count();
685
104k
        g_fragment_executing_count << -1;
686
104k
        g_fragment_last_active_time.set_value(now);
687
104k
    }
688
104k
}
689
690
88.4k
void FragmentMgr::remove_query_context(const TUniqueId& key) {
691
    // Clean up any saved rerunnable params for this query to avoid memory leaks.
692
    // This covers both cancel and normal destruction paths.
693
88.4k
    {
694
88.4k
        std::lock_guard<std::mutex> lk(_rerunnable_params_lock);
695
88.4k
        for (auto it = _rerunnable_params_map.begin(); it != _rerunnable_params_map.end();) {
696
0
            if (it->first.first == key) {
697
0
                it = _rerunnable_params_map.erase(it);
698
0
            } else {
699
0
                ++it;
700
0
            }
701
0
        }
702
88.4k
    }
703
88.4k
    _query_ctx_map_delay_delete.erase(key);
704
88.4k
#ifndef BE_TEST
705
88.4k
    _query_ctx_map.erase(key);
706
88.4k
#endif
707
88.4k
}
708
709
156k
std::shared_ptr<QueryContext> FragmentMgr::get_query_ctx(const TUniqueId& query_id) {
710
156k
    auto val = _query_ctx_map.find(query_id);
711
156k
    if (auto q_ctx = val.lock()) {
712
70.8k
        return q_ctx;
713
70.8k
    }
714
85.9k
    return nullptr;
715
156k
}
716
717
Status FragmentMgr::_get_or_create_query_ctx(const TPipelineFragmentParams& params,
718
                                             const TPipelineFragmentParamsList& parent,
719
                                             QuerySource query_source,
720
104k
                                             std::shared_ptr<QueryContext>& query_ctx) {
721
104k
    auto query_id = params.query_id;
722
104k
    DBUG_EXECUTE_IF("FragmentMgr._get_query_ctx.failed", {
723
104k
        return Status::InternalError("FragmentMgr._get_query_ctx.failed, query id {}",
724
104k
                                     print_id(query_id));
725
104k
    });
726
727
    // Find _query_ctx_map, in case some other request has already
728
    // create the query fragments context.
729
104k
    query_ctx = get_query_ctx(query_id);
730
104k
    if (params.is_simplified_param) {
731
        // Get common components from _query_ctx_map
732
31.1k
        if (!query_ctx) {
733
0
            return Status::InternalError(
734
0
                    "Failed to get query fragments context. Query {} may be timeout or be "
735
0
                    "cancelled. host: {}",
736
0
                    print_id(query_id), BackendOptions::get_localhost());
737
0
        }
738
73.6k
    } else {
739
73.6k
        if (!query_ctx) {
740
73.6k
            RETURN_IF_ERROR(_query_ctx_map.apply_if_not_exists(
741
73.6k
                    query_id, query_ctx,
742
73.6k
                    [&](phmap::flat_hash_map<TUniqueId, std::weak_ptr<QueryContext>>& map)
743
73.6k
                            -> Status {
744
73.6k
                        WorkloadGroupPtr workload_group_ptr = nullptr;
745
73.6k
                        std::vector<uint64_t> wg_id_set;
746
73.6k
                        if (params.__isset.workload_groups && !params.workload_groups.empty()) {
747
73.6k
                            for (auto& wg : params.workload_groups) {
748
73.6k
                                wg_id_set.push_back(wg.id);
749
73.6k
                            }
750
73.6k
                        }
751
73.6k
                        workload_group_ptr = _exec_env->workload_group_mgr()->get_group(wg_id_set);
752
753
                        // First time a fragment of a query arrived. print logs.
754
73.6k
                        LOG(INFO) << "query_id: " << print_id(query_id)
755
73.6k
                                  << ", coord_addr: " << params.coord
756
73.6k
                                  << ", total fragment num on current host: "
757
73.6k
                                  << params.fragment_num_on_host
758
73.6k
                                  << ", fe process uuid: " << params.query_options.fe_process_uuid
759
73.6k
                                  << ", query type: " << params.query_options.query_type
760
73.6k
                                  << ", report audit fe:" << params.current_connect_fe
761
73.6k
                                  << ", use wg:" << workload_group_ptr->id() << ","
762
73.6k
                                  << workload_group_ptr->name();
763
764
                        // This may be a first fragment request of the query.
765
                        // Create the query fragments context.
766
73.6k
                        query_ctx = QueryContext::create(query_id, _exec_env, params.query_options,
767
73.6k
                                                         params.coord, params.is_nereids,
768
73.6k
                                                         params.current_connect_fe, query_source);
769
73.6k
                        SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(query_ctx->query_mem_tracker());
770
73.6k
                        RETURN_IF_ERROR(DescriptorTbl::create(
771
73.6k
                                &(query_ctx->obj_pool), params.desc_tbl, &(query_ctx->desc_tbl)));
772
                        // set file scan range params
773
73.6k
                        if (params.__isset.file_scan_params) {
774
73.6k
                            query_ctx->file_scan_range_params_map = params.file_scan_params;
775
73.6k
                        }
776
777
73.6k
                        query_ctx->query_globals = params.query_globals;
778
779
73.6k
                        if (params.__isset.resource_info) {
780
73.6k
                            query_ctx->user = params.resource_info.user;
781
73.6k
                            query_ctx->group = params.resource_info.group;
782
73.6k
                            query_ctx->set_rsc_info = true;
783
73.6k
                        }
784
785
73.6k
                        if (params.__isset.ai_resources) {
786
73.6k
                            query_ctx->set_ai_resources(params.ai_resources);
787
73.6k
                        }
788
789
73.6k
                        RETURN_IF_ERROR(query_ctx->set_workload_group(workload_group_ptr));
790
791
73.6k
                        if (parent.__isset.runtime_filter_info) {
792
73.6k
                            auto info = parent.runtime_filter_info;
793
73.6k
                            if (info.__isset.runtime_filter_params) {
794
73.6k
                                auto handler =
795
73.6k
                                        std::make_shared<RuntimeFilterMergeControllerEntity>();
796
73.6k
                                RETURN_IF_ERROR(
797
73.6k
                                        handler->init(query_ctx, info.runtime_filter_params));
798
73.6k
                                query_ctx->set_merge_controller_handler(handler);
799
800
73.6k
                                query_ctx->runtime_filter_mgr()->set_runtime_filter_params(
801
73.6k
                                        info.runtime_filter_params);
802
73.6k
                                if (!handler->empty()) {
803
73.6k
                                    _query_ctx_map_delay_delete.insert(query_id, query_ctx);
804
73.6k
                                }
805
73.6k
                            }
806
73.6k
                            if (info.__isset.topn_filter_descs) {
807
73.6k
                                query_ctx->init_runtime_predicates(info.topn_filter_descs);
808
73.6k
                            }
809
73.6k
                        }
810
811
                        // There is some logic in query ctx's dctor, we could not check if exists and delete the
812
                        // temp query ctx now. For example, the query id maybe removed from workload group's queryset.
813
73.6k
                        map.insert({query_id, query_ctx});
814
73.6k
                        return Status::OK();
815
73.6k
                    }));
816
73.6k
        }
817
73.6k
    }
818
104k
    return Status::OK();
819
104k
}
820
821
74
std::string FragmentMgr::dump_pipeline_tasks(int64_t duration) {
822
74
    fmt::memory_buffer debug_string_buffer;
823
74
    size_t i = 0;
824
74
    {
825
74
        fmt::format_to(debug_string_buffer,
826
74
                       "{} pipeline fragment contexts are still running! duration_limit={}\n",
827
74
                       _pipeline_map.num_items(), duration);
828
74
        timespec now;
829
74
        clock_gettime(CLOCK_MONOTONIC, &now);
830
831
74
        _pipeline_map.apply([&](phmap::flat_hash_map<std::pair<TUniqueId, int>,
832
74
                                                     std::shared_ptr<PipelineFragmentContext>>& map)
833
9.47k
                                    -> Status {
834
9.47k
            std::set<TUniqueId> query_id_set;
835
9.47k
            for (auto& it : map) {
836
244
                auto elapsed = it.second->elapsed_time() / 1000000000;
837
244
                if (elapsed < duration) {
838
                    // Only display tasks which has been running for more than {duration} seconds.
839
244
                    continue;
840
244
                }
841
0
                if (!query_id_set.contains(it.first.first)) {
842
0
                    query_id_set.insert(it.first.first);
843
0
                    fmt::format_to(
844
0
                            debug_string_buffer, "QueryId: {}, global_runtime_filter_mgr: {}\n",
845
0
                            print_id(it.first.first),
846
0
                            it.second->get_query_ctx()->runtime_filter_mgr()->debug_string());
847
848
0
                    if (it.second->get_query_ctx()->get_merge_controller_handler()) {
849
0
                        fmt::format_to(debug_string_buffer, "{}\n",
850
0
                                       it.second->get_query_ctx()
851
0
                                               ->get_merge_controller_handler()
852
0
                                               ->debug_string());
853
0
                    }
854
0
                }
855
856
0
                auto timeout_second = it.second->timeout_second();
857
0
                fmt::format_to(
858
0
                        debug_string_buffer,
859
0
                        "No.{} (elapse_second={}s, query_timeout_second={}s, is_timeout={}): {}\n",
860
0
                        i, elapsed, timeout_second, it.second->is_timeout(now),
861
0
                        it.second->debug_string());
862
0
                i++;
863
0
            }
864
9.47k
            return Status::OK();
865
9.47k
        });
866
74
    }
867
74
    return fmt::to_string(debug_string_buffer);
868
74
}
869
870
0
std::string FragmentMgr::dump_pipeline_tasks(TUniqueId& query_id) {
871
0
    if (auto q_ctx = get_query_ctx(query_id)) {
872
0
        return q_ctx->print_all_pipeline_context();
873
0
    } else {
874
0
        return fmt::format(
875
0
                "Dump pipeline tasks failed: Query context (query id = {}) not found. \n",
876
0
                print_id(query_id));
877
0
    }
878
0
}
879
880
Status FragmentMgr::exec_plan_fragment(const TPipelineFragmentParams& params,
881
                                       QuerySource query_source, const FinishCallback& cb,
882
                                       const TPipelineFragmentParamsList& parent,
883
104k
                                       std::shared_ptr<bool> is_prepare_success) {
884
104k
    VLOG_ROW << "Query: " << print_id(params.query_id) << " exec_plan_fragment params is "
885
52
             << apache::thrift::ThriftDebugString(params).c_str();
886
    // sometimes TPipelineFragmentParams debug string is too long and glog
887
    // will truncate the log line, so print query options seperately for debuggin purpose
888
104k
    VLOG_ROW << "Query: " << print_id(params.query_id) << "query options is "
889
10
             << apache::thrift::ThriftDebugString(params.query_options).c_str();
890
891
104k
    std::shared_ptr<QueryContext> query_ctx;
892
104k
    RETURN_IF_ERROR(_get_or_create_query_ctx(params, parent, query_source, query_ctx));
893
104k
    SCOPED_ATTACH_TASK(query_ctx.get()->resource_ctx());
894
104k
    int64_t duration_ns = 0;
895
104k
    std::shared_ptr<PipelineFragmentContext> context = std::make_shared<PipelineFragmentContext>(
896
104k
            query_ctx->query_id(), params, query_ctx, _exec_env, cb,
897
104k
            [this](const ReportStatusRequest& req, auto&& ctx) {
898
10.6k
                return this->trigger_pipeline_context_report(req, std::move(ctx));
899
10.6k
            });
900
104k
    {
901
104k
        SCOPED_RAW_TIMER(&duration_ns);
902
104k
        Status prepare_st = Status::OK();
903
104k
        ASSIGN_STATUS_IF_CATCH_EXCEPTION(prepare_st = context->prepare(_thread_pool.get()),
904
104k
                                         prepare_st);
905
104k
        DBUG_EXECUTE_IF("FragmentMgr.exec_plan_fragment.prepare_failed", {
906
104k
            prepare_st = Status::Aborted("FragmentMgr.exec_plan_fragment.prepare_failed");
907
104k
        });
908
104k
        if (!prepare_st.ok()) {
909
0
            query_ctx->cancel(prepare_st, params.fragment_id);
910
0
            return prepare_st;
911
0
        }
912
104k
    }
913
104k
    g_fragmentmgr_prepare_latency << (duration_ns / 1000);
914
915
104k
    DBUG_EXECUTE_IF("FragmentMgr.exec_plan_fragment.failed",
916
104k
                    { return Status::Aborted("FragmentMgr.exec_plan_fragment.failed"); });
917
104k
    {
918
104k
        int64_t now = duration_cast<std::chrono::milliseconds>(
919
104k
                              std::chrono::system_clock::now().time_since_epoch())
920
104k
                              .count();
921
104k
        g_fragment_executing_count << 1;
922
104k
        g_fragment_last_active_time.set_value(now);
923
924
        // (query_id, fragment_id) is executed only on one BE, locks _pipeline_map.
925
104k
        auto res = _pipeline_map.find({params.query_id, params.fragment_id});
926
104k
        if (res != nullptr) {
927
0
            return Status::InternalError(
928
0
                    "exec_plan_fragment query_id({}) input duplicated fragment_id({})",
929
0
                    print_id(params.query_id), params.fragment_id);
930
0
        }
931
104k
        _pipeline_map.insert({params.query_id, params.fragment_id}, context);
932
104k
    }
933
934
    // Save params for recursive CTE child fragments so we can recreate the PFC later.
935
104k
    if (params.__isset.need_notify_close && params.need_notify_close) {
936
0
        std::lock_guard<std::mutex> lk(_rerunnable_params_lock);
937
0
        _rerunnable_params_map[{params.query_id, params.fragment_id}] = {
938
0
                .deregister_runtime_filter_ids = {},
939
0
                .params = params,
940
0
                .parent = parent,
941
0
                .finish_callback = cb,
942
0
                .query_ctx = query_ctx};
943
0
    }
944
945
104k
    if (!params.__isset.need_wait_execution_trigger || !params.need_wait_execution_trigger) {
946
51.3k
        query_ctx->set_ready_to_execute_only();
947
51.3k
    }
948
949
104k
    query_ctx->set_pipeline_context(params.fragment_id, context);
950
951
104k
    RETURN_IF_ERROR(context->submit());
952
104k
    if (is_prepare_success != nullptr) {
953
208
        *is_prepare_success = true;
954
208
    }
955
104k
    return Status::OK();
956
104k
}
957
958
27.0k
void FragmentMgr::cancel_query(const TUniqueId query_id, const Status reason) {
959
27.0k
    std::shared_ptr<QueryContext> query_ctx = nullptr;
960
27.0k
    {
961
27.0k
        if (auto q_ctx = get_query_ctx(query_id)) {
962
14.7k
            query_ctx = q_ctx;
963
14.7k
        } else {
964
12.2k
            LOG(WARNING) << "Query " << print_id(query_id)
965
12.2k
                         << " does not exists, failed to cancel it";
966
12.2k
            return;
967
12.2k
        }
968
27.0k
    }
969
14.7k
    SCOPED_ATTACH_TASK(query_ctx->resource_ctx());
970
14.7k
    query_ctx->cancel(reason);
971
14.7k
    remove_query_context(query_id);
972
14.7k
    LOG(INFO) << "Query " << print_id(query_id)
973
14.7k
              << " is cancelled and removed. Reason: " << reason.to_string();
974
14.7k
}
975
976
13
void FragmentMgr::cancel_worker() {
977
13
    LOG(INFO) << "FragmentMgr cancel worker start working.";
978
979
13
    timespec check_invalid_query_last_timestamp;
980
13
    clock_gettime(CLOCK_MONOTONIC, &check_invalid_query_last_timestamp);
981
982
9.14k
    do {
983
9.14k
        std::vector<TUniqueId> queries_lost_coordinator;
984
9.14k
        std::vector<TUniqueId> queries_timeout;
985
9.14k
        std::vector<TUniqueId> queries_pipeline_task_leak;
986
        // Fe process uuid -> set<QueryId>
987
9.14k
        std::map<int64_t, std::unordered_set<TUniqueId>> running_queries_on_all_fes;
988
9.14k
        const std::map<TNetworkAddress, FrontendInfo>& running_fes =
989
9.14k
                ExecEnv::GetInstance()->get_running_frontends();
990
991
9.14k
        timespec now;
992
9.14k
        clock_gettime(CLOCK_MONOTONIC, &now);
993
994
9.14k
        if (config::enable_pipeline_task_leakage_detect &&
995
9.14k
            now.tv_sec - check_invalid_query_last_timestamp.tv_sec >
996
0
                    config::pipeline_task_leakage_detect_period_secs) {
997
0
            check_invalid_query_last_timestamp = now;
998
0
            running_queries_on_all_fes = _get_all_running_queries_from_fe();
999
9.14k
        } else {
1000
9.14k
            running_queries_on_all_fes.clear();
1001
9.14k
        }
1002
1003
9.14k
        std::vector<std::shared_ptr<PipelineFragmentContext>> ctx;
1004
9.14k
        _pipeline_map.apply(
1005
9.14k
                [&](phmap::flat_hash_map<std::pair<TUniqueId, int>,
1006
1.17M
                                         std::shared_ptr<PipelineFragmentContext>>& map) -> Status {
1007
1.17M
                    ctx.reserve(ctx.size() + map.size());
1008
1.17M
                    for (auto& pipeline_itr : map) {
1009
15.7k
                        ctx.push_back(pipeline_itr.second);
1010
15.7k
                    }
1011
1.17M
                    return Status::OK();
1012
1.17M
                });
1013
15.7k
        for (auto& c : ctx) {
1014
15.7k
            c->clear_finished_tasks();
1015
15.7k
        }
1016
1017
9.14k
        std::unordered_map<std::shared_ptr<PBackendService_Stub>, BrpcItem> brpc_stub_with_queries;
1018
9.14k
        _collect_timeout_queries_and_brpc_items(queries_timeout, brpc_stub_with_queries, now);
1019
1020
        // We use a very conservative cancel strategy.
1021
        // 0. If there are no running frontends, do not cancel any queries.
1022
        // 1. If query's process uuid is zero, do not cancel
1023
        // 2. If same process uuid, do not cancel
1024
        // 3. If fe has zero process uuid, do not cancel
1025
9.14k
        if (running_fes.empty() && _query_ctx_map.num_items() != 0) {
1026
0
            LOG_EVERY_N(WARNING, 10)
1027
0
                    << "Could not find any running frontends, maybe we are upgrading or "
1028
0
                       "starting? "
1029
0
                    << "We will not cancel any outdated queries in this situation.";
1030
9.14k
        } else {
1031
9.14k
            _collect_invalid_queries(queries_lost_coordinator, queries_pipeline_task_leak,
1032
9.14k
                                     running_queries_on_all_fes, running_fes,
1033
9.14k
                                     check_invalid_query_last_timestamp);
1034
9.14k
        }
1035
1036
9.14k
        if (config::enable_brpc_connection_check) {
1037
1.24k
            for (auto it : brpc_stub_with_queries) {
1038
25
                if (!it.first) {
1039
0
                    LOG(WARNING) << "brpc stub is nullptr, skip it.";
1040
0
                    continue;
1041
0
                }
1042
25
                _check_brpc_available(it.first, it.second);
1043
25
            }
1044
1.24k
        }
1045
1046
9.14k
        if (!queries_lost_coordinator.empty()) {
1047
4
            LOG(INFO) << "There are " << queries_lost_coordinator.size()
1048
4
                      << " queries need to be cancelled, coordinator dead or restarted.";
1049
4
        }
1050
1051
9.14k
        for (const auto& qid : queries_timeout) {
1052
0
            cancel_query(qid,
1053
0
                         Status::Error<ErrorCode::TIMEOUT>(
1054
0
                                 "FragmentMgr cancel worker going to cancel timeout instance "));
1055
0
        }
1056
1057
9.14k
        for (const auto& qid : queries_pipeline_task_leak) {
1058
            // Cancel the query, and maybe try to report debug info to fe so that we can
1059
            // collect debug info by sql or http api instead of search log.
1060
0
            cancel_query(qid, Status::Error<ErrorCode::ILLEGAL_STATE>(
1061
0
                                      "Potential pipeline task leakage"));
1062
0
        }
1063
1064
9.14k
        for (const auto& qid : queries_lost_coordinator) {
1065
4
            cancel_query(qid, Status::Error<ErrorCode::CANCELLED>(
1066
4
                                      "Source frontend is not running or restarted"));
1067
4
        }
1068
1069
9.14k
    } while (!_stop_background_threads_latch.wait_for(
1070
9.14k
            std::chrono::seconds(config::fragment_mgr_cancel_worker_interval_seconds)));
1071
13
    LOG(INFO) << "FragmentMgr cancel worker is going to exit.";
1072
13
}
1073
1074
void FragmentMgr::_collect_timeout_queries_and_brpc_items(
1075
        std::vector<TUniqueId>& queries_timeout,
1076
        std::unordered_map<std::shared_ptr<PBackendService_Stub>, BrpcItem>& brpc_stub_with_queries,
1077
9.14k
        timespec now) {
1078
9.14k
    std::vector<std::shared_ptr<QueryContext>> contexts;
1079
9.14k
    _query_ctx_map.apply(
1080
1.17M
            [&](phmap::flat_hash_map<TUniqueId, std::weak_ptr<QueryContext>>& map) -> Status {
1081
1.18M
                for (auto it = map.begin(); it != map.end();) {
1082
10.3k
                    if (auto q_ctx = it->second.lock()) {
1083
10.3k
                        contexts.push_back(q_ctx);
1084
10.3k
                        if (q_ctx->is_timeout(now)) {
1085
0
                            LOG_WARNING("Query {} is timeout", print_id(it->first));
1086
0
                            queries_timeout.push_back(it->first);
1087
10.3k
                        } else if (config::enable_brpc_connection_check) {
1088
199
                            auto brpc_stubs = q_ctx->get_using_brpc_stubs();
1089
199
                            for (auto& item : brpc_stubs) {
1090
25
                                if (!brpc_stub_with_queries.contains(item.second)) {
1091
25
                                    brpc_stub_with_queries.emplace(item.second,
1092
25
                                                                   BrpcItem {item.first, {q_ctx}});
1093
25
                                } else {
1094
0
                                    brpc_stub_with_queries[item.second].queries.emplace_back(q_ctx);
1095
0
                                }
1096
25
                            }
1097
199
                        }
1098
10.3k
                        ++it;
1099
10.3k
                    } else {
1100
10
                        it = map.erase(it);
1101
10
                    }
1102
10.3k
                }
1103
1.17M
                return Status::OK();
1104
1.17M
            });
1105
9.14k
}
1106
1107
void FragmentMgr::_collect_invalid_queries(
1108
        std::vector<TUniqueId>& queries_lost_coordinator,
1109
        std::vector<TUniqueId>& queries_pipeline_task_leak,
1110
        const std::map<int64_t, std::unordered_set<TUniqueId>>& running_queries_on_all_fes,
1111
        const std::map<TNetworkAddress, FrontendInfo>& running_fes,
1112
9.14k
        timespec check_invalid_query_last_timestamp) {
1113
9.14k
    std::vector<std::shared_ptr<QueryContext>> q_contexts;
1114
9.14k
    _query_ctx_map.apply([&](phmap::flat_hash_map<TUniqueId, std::weak_ptr<QueryContext>>& map)
1115
1.17M
                                 -> Status {
1116
1.17M
        for (const auto& it : map) {
1117
10.3k
            if (auto q_ctx = it.second.lock()) {
1118
10.3k
                q_contexts.push_back(q_ctx);
1119
10.3k
                const int64_t fe_process_uuid = q_ctx->get_fe_process_uuid();
1120
1121
10.3k
                if (fe_process_uuid == 0) {
1122
                    // zero means this query is from a older version fe or
1123
                    // this fe is starting
1124
30
                    continue;
1125
30
                }
1126
1127
                // If the query is not running on the any frontends, cancel it.
1128
10.3k
                if (auto itr = running_queries_on_all_fes.find(fe_process_uuid);
1129
10.3k
                    itr != running_queries_on_all_fes.end()) {
1130
                    // Query not found on this frontend, and the query arrives before the last check
1131
0
                    if (itr->second.find(it.first) == itr->second.end() &&
1132
                        // tv_nsec represents the number of nanoseconds that have elapsed since the time point stored in tv_sec.
1133
                        // tv_sec is enough, we do not need to check tv_nsec.
1134
0
                        q_ctx->get_query_arrival_timestamp().tv_sec <
1135
0
                                check_invalid_query_last_timestamp.tv_sec &&
1136
0
                        q_ctx->get_query_source() == QuerySource::INTERNAL_FRONTEND) {
1137
0
                        queries_pipeline_task_leak.push_back(q_ctx->query_id());
1138
0
                        LOG_INFO(
1139
0
                                "Query {}, type {} is not found on any frontends, "
1140
0
                                "maybe it "
1141
0
                                "is leaked.",
1142
0
                                print_id(q_ctx->query_id()), toString(q_ctx->get_query_source()));
1143
0
                        continue;
1144
0
                    }
1145
0
                }
1146
1147
10.3k
                auto itr = running_fes.find(q_ctx->coord_addr);
1148
10.3k
                if (itr != running_fes.end()) {
1149
10.3k
                    if (fe_process_uuid == itr->second.info.process_uuid ||
1150
10.3k
                        itr->second.info.process_uuid == 0) {
1151
10.3k
                        continue;
1152
10.3k
                    } else {
1153
0
                        LOG_WARNING(
1154
0
                                "Coordinator of query {} restarted, going to cancel "
1155
0
                                "it.",
1156
0
                                print_id(q_ctx->query_id()));
1157
0
                    }
1158
10.3k
                } else {
1159
                    // In some rear cases, the rpc port of follower is not updated in time,
1160
                    // then the port of this follower will be zero, but acutally it is still running,
1161
                    // and be has already received the query from follower.
1162
                    // So we need to check if host is in running_fes.
1163
0
                    bool fe_host_is_standing = std::any_of(
1164
0
                            running_fes.begin(), running_fes.end(), [&q_ctx](const auto& fe) {
1165
0
                                return fe.first.hostname == q_ctx->coord_addr.hostname &&
1166
0
                                       fe.first.port == 0;
1167
0
                            });
1168
0
                    if (fe_host_is_standing) {
1169
0
                        LOG_WARNING(
1170
0
                                "Coordinator {}:{} is not found, but its host is still "
1171
0
                                "running with an unstable brpc port, not going to "
1172
0
                                "cancel "
1173
0
                                "it.",
1174
0
                                q_ctx->coord_addr.hostname, q_ctx->coord_addr.port,
1175
0
                                print_id(q_ctx->query_id()));
1176
0
                        continue;
1177
0
                    } else {
1178
0
                        LOG_WARNING(
1179
0
                                "Could not find target coordinator {}:{} of query {}, "
1180
0
                                "going to "
1181
0
                                "cancel it.",
1182
0
                                q_ctx->coord_addr.hostname, q_ctx->coord_addr.port,
1183
0
                                print_id(q_ctx->query_id()));
1184
0
                    }
1185
0
                }
1186
10.3k
            }
1187
            // Coordinator of this query has already dead or query context has been released.
1188
4
            queries_lost_coordinator.push_back(it.first);
1189
4
        }
1190
1.17M
        return Status::OK();
1191
1.17M
    });
1192
9.14k
}
1193
1194
void FragmentMgr::_check_brpc_available(const std::shared_ptr<PBackendService_Stub>& brpc_stub,
1195
25
                                        const BrpcItem& brpc_item) {
1196
25
    const std::string message = "hello doris!";
1197
25
    std::string error_message;
1198
25
    int32_t failed_count = 0;
1199
25
    const int64_t check_timeout_ms =
1200
25
            std::max<int64_t>(100, config::brpc_connection_check_timeout_ms);
1201
1202
25
    while (true) {
1203
25
        PHandShakeRequest request;
1204
25
        request.set_hello(message);
1205
25
        PHandShakeResponse response;
1206
25
        brpc::Controller cntl;
1207
25
        cntl.set_timeout_ms(check_timeout_ms);
1208
25
        cntl.set_max_retry(10);
1209
25
        brpc_stub->hand_shake(&cntl, &request, &response, nullptr);
1210
1211
25
        if (cntl.Failed()) {
1212
0
            error_message = cntl.ErrorText();
1213
0
            LOG(WARNING) << "brpc stub: " << brpc_item.network_address.hostname << ":"
1214
0
                         << brpc_item.network_address.port << " check failed: " << error_message;
1215
25
        } else if (response.has_status() && response.status().status_code() == 0) {
1216
25
            break;
1217
25
        } else {
1218
0
            error_message = response.DebugString();
1219
0
            LOG(WARNING) << "brpc stub: " << brpc_item.network_address.hostname << ":"
1220
0
                         << brpc_item.network_address.port << " check failed: " << error_message;
1221
0
        }
1222
0
        failed_count++;
1223
0
        if (failed_count == 2) {
1224
0
            for (const auto& query_wptr : brpc_item.queries) {
1225
0
                auto query = query_wptr.lock();
1226
0
                if (query && !query->is_cancelled()) {
1227
0
                    query->cancel(Status::InternalError("brpc(dest: {}:{}) check failed: {}",
1228
0
                                                        brpc_item.network_address.hostname,
1229
0
                                                        brpc_item.network_address.port,
1230
0
                                                        error_message));
1231
0
                }
1232
0
            }
1233
1234
0
            LOG(WARNING) << "remove brpc stub from cache: " << brpc_item.network_address.hostname
1235
0
                         << ":" << brpc_item.network_address.port << ", error: " << error_message;
1236
0
            ExecEnv::GetInstance()->brpc_internal_client_cache()->erase(
1237
0
                    brpc_item.network_address.hostname, brpc_item.network_address.port);
1238
0
            break;
1239
0
        }
1240
0
    }
1241
25
}
1242
1243
0
void FragmentMgr::debug(std::stringstream& ss) {}
1244
/*
1245
 * 1. resolve opaqued_query_plan to thrift structure
1246
 * 2. build TPipelineFragmentParams
1247
 */
1248
Status FragmentMgr::exec_external_plan_fragment(const TScanOpenParams& params,
1249
                                                const TQueryPlanInfo& t_query_plan_info,
1250
                                                const TUniqueId& query_id,
1251
                                                const TUniqueId& fragment_instance_id,
1252
0
                                                std::vector<TScanColumnDesc>* selected_columns) {
1253
    // set up desc tbl
1254
0
    DescriptorTbl* desc_tbl = nullptr;
1255
0
    ObjectPool obj_pool;
1256
0
    Status st = DescriptorTbl::create(&obj_pool, t_query_plan_info.desc_tbl, &desc_tbl);
1257
0
    if (!st.ok()) {
1258
0
        LOG(WARNING) << "open context error: extract DescriptorTbl failure";
1259
0
        std::stringstream msg;
1260
0
        msg << " create DescriptorTbl error, should not be modified after returned Doris FE "
1261
0
               "processed";
1262
0
        return Status::InvalidArgument(msg.str());
1263
0
    }
1264
0
    TupleDescriptor* tuple_desc = desc_tbl->get_tuple_descriptor(0);
1265
0
    if (tuple_desc == nullptr) {
1266
0
        LOG(WARNING) << "open context error: extract TupleDescriptor failure";
1267
0
        std::stringstream msg;
1268
0
        msg << " get  TupleDescriptor error, should not be modified after returned Doris FE "
1269
0
               "processed";
1270
0
        return Status::InvalidArgument(msg.str());
1271
0
    }
1272
    // process selected columns form slots
1273
0
    for (const SlotDescriptor* slot : tuple_desc->slots()) {
1274
0
        TScanColumnDesc col;
1275
0
        col.__set_name(slot->col_name());
1276
0
        col.__set_type(to_thrift(slot->type()->get_primitive_type()));
1277
0
        selected_columns->emplace_back(std::move(col));
1278
0
    }
1279
1280
0
    VLOG_QUERY << "BackendService execute open()  TQueryPlanInfo: "
1281
0
               << apache::thrift::ThriftDebugString(t_query_plan_info);
1282
    // assign the param used to execute PlanFragment
1283
0
    TPipelineFragmentParams exec_fragment_params;
1284
0
    exec_fragment_params.protocol_version = (PaloInternalServiceVersion::type)0;
1285
0
    exec_fragment_params.__set_is_simplified_param(false);
1286
0
    exec_fragment_params.__set_fragment(t_query_plan_info.plan_fragment);
1287
0
    exec_fragment_params.__set_desc_tbl(t_query_plan_info.desc_tbl);
1288
1289
    // assign the param used for executing of PlanFragment-self
1290
0
    TPipelineInstanceParams fragment_exec_params;
1291
0
    exec_fragment_params.query_id = query_id;
1292
0
    fragment_exec_params.fragment_instance_id = fragment_instance_id;
1293
0
    exec_fragment_params.coord.hostname = "external";
1294
0
    std::map<::doris::TPlanNodeId, std::vector<TScanRangeParams>> per_node_scan_ranges;
1295
0
    std::vector<TScanRangeParams> scan_ranges;
1296
0
    std::vector<int64_t> tablet_ids = params.tablet_ids;
1297
0
    TNetworkAddress address;
1298
0
    address.hostname = BackendOptions::get_localhost();
1299
0
    address.port = doris::config::be_port;
1300
0
    std::map<int64_t, TTabletVersionInfo> tablet_info = t_query_plan_info.tablet_info;
1301
0
    for (auto tablet_id : params.tablet_ids) {
1302
0
        TPaloScanRange scan_range;
1303
0
        scan_range.db_name = params.database;
1304
0
        scan_range.table_name = params.table;
1305
0
        auto iter = tablet_info.find(tablet_id);
1306
0
        if (iter != tablet_info.end()) {
1307
0
            TTabletVersionInfo info = iter->second;
1308
0
            scan_range.tablet_id = tablet_id;
1309
0
            scan_range.version = std::to_string(info.version);
1310
            // Useless but it is required field in TPaloScanRange
1311
0
            scan_range.version_hash = "0";
1312
0
            scan_range.schema_hash = std::to_string(info.schema_hash);
1313
0
            scan_range.hosts.push_back(address);
1314
0
        } else {
1315
0
            std::stringstream msg;
1316
0
            msg << "tablet_id: " << tablet_id << " not found";
1317
0
            LOG(WARNING) << "tablet_id [ " << tablet_id << " ] not found";
1318
0
            return Status::NotFound(msg.str());
1319
0
        }
1320
0
        TScanRange doris_scan_range;
1321
0
        doris_scan_range.__set_palo_scan_range(scan_range);
1322
0
        TScanRangeParams scan_range_params;
1323
0
        scan_range_params.scan_range = doris_scan_range;
1324
0
        scan_ranges.push_back(scan_range_params);
1325
0
    }
1326
0
    per_node_scan_ranges.insert(std::make_pair((::doris::TPlanNodeId)0, scan_ranges));
1327
0
    fragment_exec_params.per_node_scan_ranges = per_node_scan_ranges;
1328
0
    exec_fragment_params.local_params.push_back(fragment_exec_params);
1329
0
    TQueryOptions query_options;
1330
0
    query_options.batch_size = params.batch_size;
1331
0
    query_options.execution_timeout = params.execution_timeout;
1332
0
    query_options.mem_limit = params.mem_limit;
1333
0
    query_options.query_type = TQueryType::EXTERNAL;
1334
0
    query_options.be_exec_version = BeExecVersionManager::get_newest_version();
1335
0
    exec_fragment_params.__set_query_options(query_options);
1336
0
    VLOG_ROW << "external exec_plan_fragment params is "
1337
0
             << apache::thrift::ThriftDebugString(exec_fragment_params).c_str();
1338
1339
0
    TPipelineFragmentParamsList mocked;
1340
0
    return exec_plan_fragment(exec_fragment_params, QuerySource::EXTERNAL_CONNECTOR, mocked);
1341
0
}
1342
1343
Status FragmentMgr::apply_filterv2(const PPublishFilterRequestV2* request,
1344
112
                                   butil::IOBufAsZeroCopyInputStream* attach_data) {
1345
112
    UniqueId queryid = request->query_id();
1346
112
    TUniqueId query_id;
1347
112
    query_id.__set_hi(queryid.hi);
1348
112
    query_id.__set_lo(queryid.lo);
1349
112
    if (auto q_ctx = get_query_ctx(query_id)) {
1350
112
        SCOPED_ATTACH_TASK(q_ctx.get());
1351
        // just discard low stage request
1352
112
        if (q_ctx->get_stage(request->filter_id()) != request->stage()) {
1353
0
            return Status::OK();
1354
0
        }
1355
112
        RuntimeFilterMgr* runtime_filter_mgr = q_ctx->runtime_filter_mgr();
1356
112
        DCHECK(runtime_filter_mgr != nullptr);
1357
1358
        // 1. get the target filters
1359
112
        std::vector<std::shared_ptr<RuntimeFilterConsumer>> filters =
1360
112
                runtime_filter_mgr->get_consume_filters(request->filter_id());
1361
1362
        // 2. create the filter wrapper to replace or ignore/disable the target filters
1363
112
        if (!filters.empty()) {
1364
112
            RETURN_IF_ERROR(filters[0]->assign(*request, attach_data));
1365
1.06k
            std::ranges::for_each(filters, [&](auto& filter) { filter->signal(filters[0].get()); });
1366
112
        }
1367
112
    }
1368
112
    return Status::OK();
1369
112
}
1370
1371
12
Status FragmentMgr::send_filter_size(const PSendFilterSizeRequest* request) {
1372
12
    UniqueId queryid = request->query_id();
1373
12
    TUniqueId query_id;
1374
12
    query_id.__set_hi(queryid.hi);
1375
12
    query_id.__set_lo(queryid.lo);
1376
1377
12
    if (config::enable_debug_points &&
1378
12
        DebugPoints::instance()->is_enable("FragmentMgr::send_filter_size.return_eof")) {
1379
0
        return Status::EndOfFile("inject FragmentMgr::send_filter_size.return_eof");
1380
0
    }
1381
1382
12
    if (auto q_ctx = get_query_ctx(query_id)) {
1383
        // just discard low stage request
1384
12
        if (q_ctx->get_stage(request->filter_id()) != request->stage()) {
1385
0
            return Status::OK();
1386
0
        }
1387
12
        return q_ctx->get_merge_controller_handler()->send_filter_size(q_ctx, request);
1388
12
    } else {
1389
0
        return Status::EndOfFile(
1390
0
                "Send filter size failed: Query context (query-id: {}) not found, maybe "
1391
0
                "finished",
1392
0
                queryid.to_string());
1393
0
    }
1394
12
}
1395
1396
12
Status FragmentMgr::sync_filter_size(const PSyncFilterSizeRequest* request) {
1397
12
    UniqueId queryid = request->query_id();
1398
12
    TUniqueId query_id;
1399
12
    query_id.__set_hi(queryid.hi);
1400
12
    query_id.__set_lo(queryid.lo);
1401
12
    if (auto q_ctx = get_query_ctx(query_id)) {
1402
        // just discard low stage request
1403
12
        if (q_ctx->get_stage(request->filter_id()) != request->stage()) {
1404
0
            return Status::OK();
1405
0
        }
1406
12
        try {
1407
12
            return q_ctx->runtime_filter_mgr()->sync_filter_size(request);
1408
12
        } catch (const Exception& e) {
1409
0
            return Status::InternalError(
1410
0
                    "Sync filter size failed: Query context (query-id: {}) error: {}",
1411
0
                    queryid.to_string(), e.what());
1412
0
        }
1413
12
    } else {
1414
0
        return Status::EndOfFile(
1415
0
                "Sync filter size failed: Query context (query-id: {}) already finished",
1416
0
                queryid.to_string());
1417
0
    }
1418
12
}
1419
1420
Status FragmentMgr::merge_filter(const PMergeFilterRequest* request,
1421
120
                                 butil::IOBufAsZeroCopyInputStream* attach_data) {
1422
120
    UniqueId queryid = request->query_id();
1423
1424
120
    TUniqueId query_id;
1425
120
    query_id.__set_hi(queryid.hi);
1426
120
    query_id.__set_lo(queryid.lo);
1427
120
    if (auto q_ctx = get_query_ctx(query_id)) {
1428
112
        SCOPED_ATTACH_TASK(q_ctx.get());
1429
        // just discard low stage request
1430
112
        if (q_ctx->get_stage(request->filter_id()) != request->stage()) {
1431
0
            return Status::OK();
1432
0
        }
1433
112
        if (!q_ctx->get_merge_controller_handler()) {
1434
0
            return Status::InternalError("Merge filter failed: Merge controller handler is null");
1435
0
        }
1436
112
        return q_ctx->get_merge_controller_handler()->merge(q_ctx, request, attach_data);
1437
112
    } else {
1438
8
        return Status::EndOfFile(
1439
8
                "Merge filter size failed: Query context (query-id: {}) already finished",
1440
8
                queryid.to_string());
1441
8
    }
1442
120
}
1443
1444
void FragmentMgr::get_runtime_query_info(
1445
17.8k
        std::vector<std::weak_ptr<ResourceContext>>* _resource_ctx_list) {
1446
17.8k
    std::vector<std::shared_ptr<QueryContext>> contexts;
1447
17.8k
    _query_ctx_map.apply(
1448
2.28M
            [&](phmap::flat_hash_map<TUniqueId, std::weak_ptr<QueryContext>>& map) -> Status {
1449
2.30M
                for (auto iter = map.begin(); iter != map.end();) {
1450
20.7k
                    if (auto q_ctx = iter->second.lock()) {
1451
20.7k
                        _resource_ctx_list->push_back(q_ctx->resource_ctx());
1452
20.7k
                        contexts.push_back(q_ctx);
1453
20.7k
                        iter++;
1454
20.7k
                    } else {
1455
30
                        iter = map.erase(iter);
1456
30
                    }
1457
20.7k
                }
1458
2.28M
                return Status::OK();
1459
2.28M
            });
1460
17.8k
}
1461
1462
Status FragmentMgr::get_realtime_exec_status(const TUniqueId& query_id,
1463
0
                                             TReportExecStatusParams* exec_status) {
1464
0
    if (exec_status == nullptr) {
1465
0
        return Status::InvalidArgument("exes_status is nullptr");
1466
0
    }
1467
1468
0
    std::shared_ptr<QueryContext> query_context = get_query_ctx(query_id);
1469
0
    if (query_context == nullptr) {
1470
0
        return Status::NotFound("Query {} not found or released", print_id(query_id));
1471
0
    }
1472
1473
0
    *exec_status = query_context->get_realtime_exec_status();
1474
1475
0
    return Status::OK();
1476
0
}
1477
1478
0
Status FragmentMgr::get_query_statistics(const TUniqueId& query_id, TQueryStatistics* query_stats) {
1479
0
    if (query_stats == nullptr) {
1480
0
        return Status::InvalidArgument("query_stats is nullptr");
1481
0
    }
1482
1483
0
    return ExecEnv::GetInstance()->runtime_query_statistics_mgr()->get_query_statistics(
1484
0
            print_id(query_id), query_stats);
1485
0
}
1486
1487
Status FragmentMgr::transmit_rec_cte_block(
1488
        const TUniqueId& query_id, const TUniqueId& instance_id, int node_id,
1489
0
        const google::protobuf::RepeatedPtrField<doris::PBlock>& pblocks, bool eos) {
1490
0
    if (auto q_ctx = get_query_ctx(query_id)) {
1491
0
        SCOPED_ATTACH_TASK(q_ctx.get());
1492
0
        return q_ctx->send_block_to_cte_scan(instance_id, node_id, pblocks, eos);
1493
0
    } else {
1494
0
        return Status::EndOfFile(
1495
0
                "Transmit rec cte block failed: Query context (query-id: {}) not found, maybe "
1496
0
                "finished",
1497
0
                print_id(query_id));
1498
0
    }
1499
0
}
1500
1501
Status FragmentMgr::rerun_fragment(const std::shared_ptr<brpc::ClosureGuard>& guard,
1502
                                   const TUniqueId& query_id, int fragment,
1503
0
                                   PRerunFragmentParams_Opcode stage) {
1504
0
    if (stage == PRerunFragmentParams::wait_for_destroy ||
1505
0
        stage == PRerunFragmentParams::final_close) {
1506
0
        auto fragment_ctx = _pipeline_map.find({query_id, fragment});
1507
0
        if (!fragment_ctx) {
1508
0
            return Status::NotFound("Fragment context (query-id: {}, fragment-id: {}) not found",
1509
0
                                    print_id(query_id), fragment);
1510
0
        }
1511
1512
0
        if (stage == PRerunFragmentParams::wait_for_destroy) {
1513
0
            std::lock_guard<std::mutex> lk(_rerunnable_params_lock);
1514
0
            auto it = _rerunnable_params_map.find({query_id, fragment});
1515
0
            if (it == _rerunnable_params_map.end()) {
1516
0
                auto st = fragment_ctx->listen_wait_close(guard, true);
1517
0
                if (!st.ok()) {
1518
0
                    LOG(WARNING) << fmt::format(
1519
0
                            "wait_for_destroy fragment context (query-id: {}, fragment-id: "
1520
0
                            "{}) failed: {}",
1521
0
                            print_id(query_id), fragment, st.to_string());
1522
0
                }
1523
0
                return Status::NotFound(
1524
0
                        "Rerunnable params (query-id: {}, fragment-id: {}) not found",
1525
0
                        print_id(query_id), fragment);
1526
0
            }
1527
1528
0
            it->second.deregister_runtime_filter_ids.merge(
1529
0
                    fragment_ctx->get_deregister_runtime_filter());
1530
0
        }
1531
1532
0
        auto* query_ctx = fragment_ctx->get_query_ctx();
1533
0
        SCOPED_ATTACH_TASK(query_ctx);
1534
0
        RETURN_IF_ERROR(
1535
0
                fragment_ctx->listen_wait_close(guard, stage == PRerunFragmentParams::final_close));
1536
0
        fragment_ctx->notify_close();
1537
0
        return Status::OK();
1538
0
    } else if (stage == PRerunFragmentParams::rebuild) {
1539
0
        auto q_ctx = get_query_ctx(query_id);
1540
0
        if (!q_ctx) {
1541
0
            return Status::NotFound(
1542
0
                    "rerun_fragment: Query context (query-id: {}) not found, maybe finished",
1543
0
                    print_id(query_id));
1544
0
        }
1545
0
        SCOPED_ATTACH_TASK(q_ctx.get());
1546
0
        RerunableFragmentInfo info;
1547
0
        {
1548
0
            std::lock_guard<std::mutex> lk(_rerunnable_params_lock);
1549
0
            auto it = _rerunnable_params_map.find({query_id, fragment});
1550
0
            if (it == _rerunnable_params_map.end()) {
1551
0
                return Status::NotFound("rebuild (query-id: {}, fragment-id: {}) not found",
1552
0
                                        print_id(query_id), fragment);
1553
0
            }
1554
0
            it->second.stage++;
1555
0
            RETURN_IF_ERROR(q_ctx->update_filters_stage(it->second.stage,
1556
0
                                                        it->second.deregister_runtime_filter_ids));
1557
0
            info = it->second;
1558
0
        }
1559
1560
0
        auto context = std::make_shared<PipelineFragmentContext>(
1561
0
                q_ctx->query_id(), info.params, q_ctx, _exec_env, info.finish_callback,
1562
0
                [this](const ReportStatusRequest& req, auto&& ctx) {
1563
0
                    return this->trigger_pipeline_context_report(req, std::move(ctx));
1564
0
                });
1565
1566
0
        Status prepare_st = Status::OK();
1567
0
        ASSIGN_STATUS_IF_CATCH_EXCEPTION(prepare_st = context->prepare(_thread_pool.get()),
1568
0
                                         prepare_st);
1569
0
        if (!prepare_st.ok()) {
1570
0
            q_ctx->cancel(prepare_st, info.params.fragment_id);
1571
0
            return prepare_st;
1572
0
        }
1573
1574
        // Insert new PFC into _pipeline_map (old one was removed)
1575
0
        _pipeline_map.insert({info.params.query_id, info.params.fragment_id}, context);
1576
1577
        // Update QueryContext mapping (must support overwrite)
1578
0
        q_ctx->set_pipeline_context(info.params.fragment_id, context);
1579
0
        return Status::OK();
1580
1581
0
    } else if (stage == PRerunFragmentParams::submit) {
1582
0
        auto fragment_ctx = _pipeline_map.find({query_id, fragment});
1583
0
        if (!fragment_ctx) {
1584
0
            return Status::NotFound("Fragment context (query-id: {}, fragment-id: {}) not found",
1585
0
                                    print_id(query_id), fragment);
1586
0
        }
1587
0
        return fragment_ctx->submit();
1588
0
    } else {
1589
0
        return Status::InvalidArgument("Unknown rerun fragment opcode: {}", stage);
1590
0
    }
1591
0
}
1592
1593
Status FragmentMgr::reset_global_rf(const TUniqueId& query_id,
1594
0
                                    const google::protobuf::RepeatedField<int32_t>& filter_ids) {
1595
0
    if (auto q_ctx = get_query_ctx(query_id)) {
1596
0
        SCOPED_ATTACH_TASK(q_ctx.get());
1597
0
        return q_ctx->reset_global_rf(filter_ids);
1598
0
    } else {
1599
0
        return Status::NotFound(
1600
0
                "reset_fragment: Query context (query-id: {}) not found, maybe finished",
1601
0
                print_id(query_id));
1602
0
    }
1603
0
    return Status::OK();
1604
0
}
1605
1606
#include "common/compile_check_end.h"
1607
1608
} // namespace doris