/root/doris/be/src/pipeline/dependency.h
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #pragma once |
19 | | |
20 | | #include "vec/common/custom_allocator.h" |
21 | | #ifdef __APPLE__ |
22 | | #include <netinet/in.h> |
23 | | #include <sys/_types/_u_int.h> |
24 | | #endif |
25 | | |
26 | | #include <concurrentqueue.h> |
27 | | #include <sqltypes.h> |
28 | | |
29 | | #include <atomic> |
30 | | #include <deque> |
31 | | #include <functional> |
32 | | #include <memory> |
33 | | #include <mutex> |
34 | | #include <thread> |
35 | | #include <unordered_map> |
36 | | #include <utility> |
37 | | |
38 | | #include "common/config.h" |
39 | | #include "common/logging.h" |
40 | | #include "gen_cpp/internal_service.pb.h" |
41 | | #include "pipeline/common/agg_utils.h" |
42 | | #include "pipeline/common/join_utils.h" |
43 | | #include "pipeline/common/set_utils.h" |
44 | | #include "pipeline/exec/data_queue.h" |
45 | | #include "pipeline/exec/hierarchical_spill_partition.h" |
46 | | #include "pipeline/exec/join/process_hash_table_probe.h" |
47 | | #include "util/brpc_closure.h" |
48 | | #include "util/stack_util.h" |
49 | | #include "vec/common/sort/partition_sorter.h" |
50 | | #include "vec/common/sort/sorter.h" |
51 | | #include "vec/core/block.h" |
52 | | #include "vec/core/types.h" |
53 | | #include "vec/spill/spill_stream.h" |
54 | | |
55 | | namespace doris::vectorized { |
56 | | class AggFnEvaluator; |
57 | | class VSlotRef; |
58 | | } // namespace doris::vectorized |
59 | | |
60 | | namespace doris::pipeline { |
61 | | #include "common/compile_check_begin.h" |
62 | | class Dependency; |
63 | | class PipelineTask; |
64 | | struct BasicSharedState; |
65 | | using DependencySPtr = std::shared_ptr<Dependency>; |
66 | | class LocalExchangeSourceLocalState; |
67 | | |
68 | | static constexpr auto SLOW_DEPENDENCY_THRESHOLD = 60 * 1000L * 1000L * 1000L; |
69 | | static constexpr auto TIME_UNIT_DEPENDENCY_LOG = 30 * 1000L * 1000L * 1000L; |
70 | | static_assert(TIME_UNIT_DEPENDENCY_LOG < SLOW_DEPENDENCY_THRESHOLD); |
71 | | |
72 | | struct BasicSharedState { |
73 | | ENABLE_FACTORY_CREATOR(BasicSharedState) |
74 | | |
75 | | template <class TARGET> |
76 | 96.4k | TARGET* cast() { |
77 | 96.4k | DCHECK(dynamic_cast<TARGET*>(this)) |
78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() |
79 | 0 | << " and expect type is" << typeid(TARGET).name(); |
80 | 96.4k | return reinterpret_cast<TARGET*>(this); |
81 | 96.4k | } _ZN5doris8pipeline16BasicSharedState4castINS0_19HashJoinSharedStateEEEPT_v Line | Count | Source | 76 | 96.0k | TARGET* cast() { | 77 | 96.0k | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 96.0k | return reinterpret_cast<TARGET*>(this); | 81 | 96.0k | } |
_ZN5doris8pipeline16BasicSharedState4castINS0_30PartitionedHashJoinSharedStateEEEPT_v Line | Count | Source | 76 | 3 | TARGET* cast() { | 77 | 3 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 3 | return reinterpret_cast<TARGET*>(this); | 81 | 3 | } |
_ZN5doris8pipeline16BasicSharedState4castINS0_15SortSharedStateEEEPT_v Line | Count | Source | 76 | 34 | TARGET* cast() { | 77 | 34 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 34 | return reinterpret_cast<TARGET*>(this); | 81 | 34 | } |
_ZN5doris8pipeline16BasicSharedState4castINS0_20SpillSortSharedStateEEEPT_v Line | Count | Source | 76 | 13 | TARGET* cast() { | 77 | 13 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 13 | return reinterpret_cast<TARGET*>(this); | 81 | 13 | } |
Unexecuted instantiation: _ZN5doris8pipeline16BasicSharedState4castINS0_25NestedLoopJoinSharedStateEEEPT_v _ZN5doris8pipeline16BasicSharedState4castINS0_19AnalyticSharedStateEEEPT_v Line | Count | Source | 76 | 18 | TARGET* cast() { | 77 | 18 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 18 | return reinterpret_cast<TARGET*>(this); | 81 | 18 | } |
_ZN5doris8pipeline16BasicSharedState4castINS0_14AggSharedStateEEEPT_v Line | Count | Source | 76 | 74 | TARGET* cast() { | 77 | 74 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 74 | return reinterpret_cast<TARGET*>(this); | 81 | 74 | } |
_ZN5doris8pipeline16BasicSharedState4castINS0_25PartitionedAggSharedStateEEEPT_v Line | Count | Source | 76 | 20 | TARGET* cast() { | 77 | 20 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 20 | return reinterpret_cast<TARGET*>(this); | 81 | 20 | } |
_ZN5doris8pipeline16BasicSharedState4castINS0_16UnionSharedStateEEEPT_v Line | Count | Source | 76 | 4 | TARGET* cast() { | 77 | 4 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 4 | return reinterpret_cast<TARGET*>(this); | 81 | 4 | } |
_ZN5doris8pipeline16BasicSharedState4castINS0_28PartitionSortNodeSharedStateEEEPT_v Line | Count | Source | 76 | 204 | TARGET* cast() { | 77 | 204 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 204 | return reinterpret_cast<TARGET*>(this); | 81 | 204 | } |
Unexecuted instantiation: _ZN5doris8pipeline16BasicSharedState4castINS0_20MultiCastSharedStateEEEPT_v _ZN5doris8pipeline16BasicSharedState4castINS0_14SetSharedStateEEEPT_v Line | Count | Source | 76 | 33 | TARGET* cast() { | 77 | 33 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 33 | return reinterpret_cast<TARGET*>(this); | 81 | 33 | } |
Unexecuted instantiation: _ZN5doris8pipeline16BasicSharedState4castINS0_24LocalExchangeSharedStateEEEPT_v _ZN5doris8pipeline16BasicSharedState4castIS1_EEPT_v Line | Count | Source | 76 | 11 | TARGET* cast() { | 77 | 11 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 11 | return reinterpret_cast<TARGET*>(this); | 81 | 11 | } |
_ZN5doris8pipeline16BasicSharedState4castINS0_20DataQueueSharedStateEEEPT_v Line | Count | Source | 76 | 6 | TARGET* cast() { | 77 | 6 | DCHECK(dynamic_cast<TARGET*>(this)) | 78 | 0 | << " Mismatch type! Current type is " << typeid(*this).name() | 79 | 0 | << " and expect type is" << typeid(TARGET).name(); | 80 | 6 | return reinterpret_cast<TARGET*>(this); | 81 | 6 | } |
Unexecuted instantiation: _ZN5doris8pipeline16BasicSharedState4castINS0_17RecCTESharedStateEEEPT_v |
82 | | template <class TARGET> |
83 | | const TARGET* cast() const { |
84 | | DCHECK(dynamic_cast<const TARGET*>(this)) |
85 | | << " Mismatch type! Current type is " << typeid(*this).name() |
86 | | << " and expect type is" << typeid(TARGET).name(); |
87 | | return reinterpret_cast<const TARGET*>(this); |
88 | | } |
89 | | std::vector<DependencySPtr> source_deps; |
90 | | std::vector<DependencySPtr> sink_deps; |
91 | | int id = 0; |
92 | | std::set<int> related_op_ids; |
93 | | |
94 | 72.3k | virtual ~BasicSharedState() = default; |
95 | | |
96 | | void create_source_dependencies(int num_sources, int operator_id, int node_id, |
97 | | const std::string& name); |
98 | | Dependency* create_source_dependency(int operator_id, int node_id, const std::string& name); |
99 | | |
100 | | Dependency* create_sink_dependency(int dest_id, int node_id, const std::string& name); |
101 | 24 | std::vector<DependencySPtr> get_dep_by_channel_id(int channel_id) { |
102 | 24 | DCHECK_LT(channel_id, source_deps.size()); |
103 | 24 | return {source_deps[channel_id]}; |
104 | 24 | } |
105 | | }; |
106 | | |
107 | | class Dependency : public std::enable_shared_from_this<Dependency> { |
108 | | public: |
109 | | ENABLE_FACTORY_CREATOR(Dependency); |
110 | | Dependency(int id, int node_id, std::string name, bool ready = false) |
111 | 484k | : _id(id), _node_id(node_id), _name(std::move(name)), _ready(ready) {} |
112 | 484k | virtual ~Dependency() = default; |
113 | | |
114 | 0 | [[nodiscard]] int id() const { return _id; } |
115 | 96.5k | [[nodiscard]] virtual std::string name() const { return _name; } |
116 | 4 | BasicSharedState* shared_state() { return _shared_state; } |
117 | 144k | void set_shared_state(BasicSharedState* shared_state) { _shared_state = shared_state; } |
118 | | virtual std::string debug_string(int indentation_level = 0); |
119 | 868M | bool ready() const { return _ready; } |
120 | | |
121 | | // Start the watcher. We use it to count how long this dependency block the current pipeline task. |
122 | 24 | void start_watcher() { _watcher.start(); } |
123 | 96.1k | [[nodiscard]] int64_t watcher_elapse_time() { return _watcher.elapsed_time(); } |
124 | | |
125 | | // Which dependency current pipeline task is blocked by. `nullptr` if this dependency is ready. |
126 | | [[nodiscard]] Dependency* is_blocked_by(std::shared_ptr<PipelineTask> task = nullptr); |
127 | | // Notify downstream pipeline tasks this dependency is ready. |
128 | | void set_ready(); |
129 | 48.6k | void set_ready_to_read(int channel_id = 0) { |
130 | 48.6k | DCHECK_LT(channel_id, _shared_state->source_deps.size()) << debug_string(); |
131 | 48.6k | _shared_state->source_deps[channel_id]->set_ready(); |
132 | 48.6k | } |
133 | 0 | void set_ready_to_write() { |
134 | 0 | DCHECK_EQ(_shared_state->sink_deps.size(), 1) << debug_string(); |
135 | 0 | _shared_state->sink_deps.front()->set_ready(); |
136 | 0 | } |
137 | | |
138 | | // Notify downstream pipeline tasks this dependency is blocked. |
139 | 1.97k | void block() { |
140 | 1.97k | if (_always_ready) { |
141 | 11 | return; |
142 | 11 | } |
143 | 1.96k | std::unique_lock<std::mutex> lc(_always_ready_lock); |
144 | 1.96k | if (_always_ready) { |
145 | 0 | return; |
146 | 0 | } |
147 | 1.96k | _ready = false; |
148 | 1.96k | } |
149 | | |
150 | 133 | void set_always_ready() { |
151 | 133 | if (_always_ready) { |
152 | 35 | return; |
153 | 35 | } |
154 | 98 | std::unique_lock<std::mutex> lc(_always_ready_lock); |
155 | 98 | if (_always_ready) { |
156 | 0 | return; |
157 | 0 | } |
158 | 98 | _always_ready = true; |
159 | 98 | set_ready(); |
160 | 98 | } |
161 | | |
162 | | protected: |
163 | | void _add_block_task(std::shared_ptr<PipelineTask> task); |
164 | | |
165 | | const int _id; |
166 | | const int _node_id; |
167 | | const std::string _name; |
168 | | std::atomic<bool> _ready; |
169 | | |
170 | | BasicSharedState* _shared_state = nullptr; |
171 | | MonotonicStopWatch _watcher; |
172 | | |
173 | | std::mutex _task_lock; |
174 | | std::vector<std::weak_ptr<PipelineTask>> _blocked_task; |
175 | | |
176 | | // If `_always_ready` is true, `block()` will never block tasks. |
177 | | std::atomic<bool> _always_ready = false; |
178 | | std::mutex _always_ready_lock; |
179 | | }; |
180 | | |
181 | | struct FakeSharedState final : public BasicSharedState { |
182 | | ENABLE_FACTORY_CREATOR(FakeSharedState) |
183 | | }; |
184 | | |
185 | | class CountedFinishDependency final : public Dependency { |
186 | | public: |
187 | | using SharedState = FakeSharedState; |
188 | | CountedFinishDependency(int id, int node_id, std::string name) |
189 | 96.0k | : Dependency(id, node_id, std::move(name), true) {} |
190 | | |
191 | 8 | void add(uint32_t count = 1) { |
192 | 8 | std::unique_lock<std::mutex> l(_mtx); |
193 | 8 | if (!_counter) { |
194 | 7 | block(); |
195 | 7 | } |
196 | 8 | _counter += count; |
197 | 8 | } |
198 | | |
199 | 7 | void sub() { |
200 | 7 | std::unique_lock<std::mutex> l(_mtx); |
201 | 7 | _counter--; |
202 | 7 | if (!_counter) { |
203 | 6 | set_ready(); |
204 | 6 | } |
205 | 7 | } |
206 | | |
207 | | std::string debug_string(int indentation_level = 0) override; |
208 | | |
209 | | private: |
210 | | std::mutex _mtx; |
211 | | uint32_t _counter = 0; |
212 | | }; |
213 | | |
214 | | struct RuntimeFilterTimerQueue; |
215 | | class RuntimeFilterTimer { |
216 | | public: |
217 | | RuntimeFilterTimer(int64_t registration_time, int32_t wait_time_ms, |
218 | | std::shared_ptr<Dependency> parent, bool force_wait_timeout = false) |
219 | 2 | : _parent(std::move(parent)), |
220 | 2 | _registration_time(registration_time), |
221 | 2 | _wait_time_ms(wait_time_ms), |
222 | 2 | _force_wait_timeout(force_wait_timeout) {} |
223 | | |
224 | | // Called by runtime filter producer. |
225 | | void call_ready(); |
226 | | |
227 | | // Called by RuntimeFilterTimerQueue which is responsible for checking if this rf is timeout. |
228 | | void call_timeout(); |
229 | | |
230 | 2 | int64_t registration_time() const { return _registration_time; } |
231 | 2 | int32_t wait_time_ms() const { return _wait_time_ms; } |
232 | | |
233 | | void set_local_runtime_filter_dependencies( |
234 | 0 | const std::vector<std::shared_ptr<Dependency>>& deps) { |
235 | 0 | _local_runtime_filter_dependencies = deps; |
236 | 0 | } |
237 | | |
238 | | bool should_be_check_timeout(); |
239 | | |
240 | 2 | bool force_wait_timeout() { return _force_wait_timeout; } |
241 | | |
242 | | private: |
243 | | friend struct RuntimeFilterTimerQueue; |
244 | | std::shared_ptr<Dependency> _parent = nullptr; |
245 | | std::vector<std::shared_ptr<Dependency>> _local_runtime_filter_dependencies; |
246 | | std::mutex _lock; |
247 | | int64_t _registration_time; |
248 | | const int32_t _wait_time_ms; |
249 | | // true only for group_commit_scan_operator |
250 | | bool _force_wait_timeout; |
251 | | }; |
252 | | |
253 | | struct RuntimeFilterTimerQueue { |
254 | | constexpr static int64_t interval = 10; |
255 | 1 | void run() { _thread.detach(); } |
256 | | void start(); |
257 | | |
258 | 0 | void stop() { |
259 | 0 | _stop = true; |
260 | 0 | cv.notify_all(); |
261 | 0 | wait_for_shutdown(); |
262 | 0 | } |
263 | | |
264 | 0 | void wait_for_shutdown() const { |
265 | 0 | while (!_shutdown) { |
266 | 0 | std::this_thread::sleep_for(std::chrono::milliseconds(interval)); |
267 | 0 | } |
268 | 0 | } |
269 | | |
270 | 0 | ~RuntimeFilterTimerQueue() = default; |
271 | 1 | RuntimeFilterTimerQueue() { _thread = std::thread(&RuntimeFilterTimerQueue::start, this); } |
272 | 1 | void push_filter_timer(std::vector<std::shared_ptr<pipeline::RuntimeFilterTimer>>&& filter) { |
273 | 1 | std::unique_lock<std::mutex> lc(_que_lock); |
274 | 1 | _que.insert(_que.end(), filter.begin(), filter.end()); |
275 | 1 | cv.notify_all(); |
276 | 1 | } |
277 | | |
278 | | std::thread _thread; |
279 | | std::condition_variable cv; |
280 | | std::mutex cv_m; |
281 | | std::mutex _que_lock; |
282 | | std::atomic_bool _stop = false; |
283 | | std::atomic_bool _shutdown = false; |
284 | | std::list<std::shared_ptr<pipeline::RuntimeFilterTimer>> _que; |
285 | | }; |
286 | | |
287 | | struct AggSharedState : public BasicSharedState { |
288 | | ENABLE_FACTORY_CREATOR(AggSharedState) |
289 | | public: |
290 | 42 | AggSharedState() { agg_data = std::make_unique<AggregatedDataVariants>(); } |
291 | 42 | ~AggSharedState() override { |
292 | 42 | if (!probe_expr_ctxs.empty()) { |
293 | 32 | _close_with_serialized_key(); |
294 | 32 | } else { |
295 | 10 | _close_without_key(); |
296 | 10 | } |
297 | 42 | } |
298 | | |
299 | | Status reset_hash_table(); |
300 | | |
301 | | bool do_limit_filter(vectorized::Block* block, size_t num_rows, |
302 | | const std::vector<int>* key_locs = nullptr); |
303 | | void build_limit_heap(size_t hash_table_size); |
304 | | |
305 | | // We should call this function only at 1st phase. |
306 | | // 1st phase: is_merge=true, only have one SlotRef. |
307 | | // 2nd phase: is_merge=false, maybe have multiple exprs. |
308 | | static int get_slot_column_id(const vectorized::AggFnEvaluator* evaluator); |
309 | | |
310 | | AggregatedDataVariantsUPtr agg_data = nullptr; |
311 | | std::unique_ptr<AggregateDataContainer> aggregate_data_container; |
312 | | std::vector<vectorized::AggFnEvaluator*> aggregate_evaluators; |
313 | | // group by k1,k2 |
314 | | vectorized::VExprContextSPtrs probe_expr_ctxs; |
315 | | size_t input_num_rows = 0; |
316 | | std::vector<vectorized::AggregateDataPtr> values; |
317 | | /// The total size of the row from the aggregate functions. |
318 | | size_t total_size_of_aggregate_states = 0; |
319 | | size_t align_aggregate_states = 1; |
320 | | /// The offset to the n-th aggregate function in a row of aggregate functions. |
321 | | vectorized::Sizes offsets_of_aggregate_states; |
322 | | std::vector<size_t> make_nullable_keys; |
323 | | |
324 | | bool agg_data_created_without_key = false; |
325 | | bool enable_spill = false; |
326 | | bool reach_limit = false; |
327 | | |
328 | | int64_t limit = -1; |
329 | | bool do_sort_limit = false; |
330 | | vectorized::MutableColumns limit_columns; |
331 | | int limit_columns_min = -1; |
332 | | vectorized::PaddedPODArray<uint8_t> need_computes; |
333 | | std::vector<uint8_t> cmp_res; |
334 | | std::vector<int> order_directions; |
335 | | std::vector<int> null_directions; |
336 | | |
337 | | struct HeapLimitCursor { |
338 | | HeapLimitCursor(int row_id, vectorized::MutableColumns& limit_columns, |
339 | | std::vector<int>& order_directions, std::vector<int>& null_directions) |
340 | 32 | : _row_id(row_id), |
341 | 32 | _limit_columns(limit_columns), |
342 | 32 | _order_directions(order_directions), |
343 | 32 | _null_directions(null_directions) {} |
344 | | |
345 | | HeapLimitCursor(const HeapLimitCursor& other) = default; |
346 | | |
347 | | HeapLimitCursor(HeapLimitCursor&& other) noexcept |
348 | 152 | : _row_id(other._row_id), |
349 | 152 | _limit_columns(other._limit_columns), |
350 | 152 | _order_directions(other._order_directions), |
351 | 152 | _null_directions(other._null_directions) {} |
352 | | |
353 | 0 | HeapLimitCursor& operator=(const HeapLimitCursor& other) noexcept { |
354 | 0 | _row_id = other._row_id; |
355 | 0 | return *this; |
356 | 0 | } |
357 | | |
358 | 129 | HeapLimitCursor& operator=(HeapLimitCursor&& other) noexcept { |
359 | 129 | _row_id = other._row_id; |
360 | 129 | return *this; |
361 | 129 | } |
362 | | |
363 | 79 | bool operator<(const HeapLimitCursor& rhs) const { |
364 | 85 | for (int i = 0; i < _limit_columns.size(); ++i) { |
365 | 79 | const auto& _limit_column = _limit_columns[i]; |
366 | 79 | auto res = _limit_column->compare_at(_row_id, rhs._row_id, *_limit_column, |
367 | 79 | _null_directions[i]) * |
368 | 79 | _order_directions[i]; |
369 | 79 | if (res < 0) { |
370 | 46 | return true; |
371 | 46 | } else if (res > 0) { |
372 | 27 | return false; |
373 | 27 | } |
374 | 79 | } |
375 | 6 | return false; |
376 | 79 | } |
377 | | |
378 | | int _row_id; |
379 | | vectorized::MutableColumns& _limit_columns; |
380 | | std::vector<int>& _order_directions; |
381 | | std::vector<int>& _null_directions; |
382 | | }; |
383 | | |
384 | | std::priority_queue<HeapLimitCursor> limit_heap; |
385 | | |
386 | | // Refresh the top limit heap with a new row |
387 | | void refresh_top_limit(size_t row_id, const vectorized::ColumnRawPtrs& key_columns); |
388 | | |
389 | | vectorized::Arena agg_arena_pool; |
390 | | vectorized::Arena agg_profile_arena; |
391 | | |
392 | | private: |
393 | | vectorized::MutableColumns _get_keys_hash_table(); |
394 | | |
395 | 32 | void _close_with_serialized_key() { |
396 | 32 | std::visit( |
397 | 32 | vectorized::Overload {[&](std::monostate& arg) -> void { |
398 | | // Do nothing |
399 | 0 | }, |
400 | 32 | [&](auto& agg_method) -> void { |
401 | 32 | auto& data = *agg_method.hash_table; |
402 | 7.95k | data.for_each_mapped([&](auto& mapped) { |
403 | 7.95k | if (mapped) { |
404 | 7.95k | _destroy_agg_status(mapped); |
405 | 7.95k | mapped = nullptr; |
406 | 7.95k | } |
407 | 7.95k | }); Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapIN4wide7integerILm256EjEEPc9HashCRC32ISB_EEEEEEvS3_ENKUlS3_E_clISC_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapINS6_7UInt136EPc9HashCRC32IS9_EEEEEEvS3_ENKUlS3_E_clISA_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapIN4wide7integerILm128EjEEPc9HashCRC32ISB_EEEEEEvS3_ENKUlS3_E_clISC_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapINS6_7UInt104EPc9HashCRC32IS9_EEEEEEvS3_ENKUlS3_E_clISA_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapINS6_6UInt96EPc9HashCRC32IS9_EEEEEEvS3_ENKUlS3_E_clISA_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapINS6_6UInt72EPc9HashCRC32IS9_EEEEEEvS3_ENKUlS3_E_clISA_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapImPc9HashCRC32ImEEEEEEvS3_ENKUlS3_E_clIS9_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_19MethodStringNoCacheINS6_15DataWithNullKeyINS_13StringHashMapIPcNS_9AllocatorILb1ELb1ELb0ENS_22DefaultMemoryAllocatorELb1EEEEEEEEEEEEEvS3_ENKUlS3_E_clISB_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberIN4wide7integerILm256EjEENS6_15DataWithNullKeyI9PHHashMapISB_Pc9HashCRC32ISB_EEEEEEEEEEvS3_ENKUlS3_E_clISE_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberIN4wide7integerILm128EjEENS6_15DataWithNullKeyI9PHHashMapISB_Pc9HashCRC32ISB_EEEEEEEEEEvS3_ENKUlS3_E_clISE_EEDaS3_ _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberImNS6_15DataWithNullKeyI9PHHashMapImPc14HashMixWrapperIm9HashCRC32ImEEEEEEEEEEEvS3_ENKUlS3_E_clISB_EEDaS3_ Line | Count | Source | 402 | 20 | data.for_each_mapped([&](auto& mapped) { | 403 | 20 | if (mapped) { | 404 | 20 | _destroy_agg_status(mapped); | 405 | 20 | mapped = nullptr; | 406 | 20 | } | 407 | 20 | }); |
Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberIjNS6_15DataWithNullKeyI9PHHashMapIjPc14HashMixWrapperIj9HashCRC32IjEEEEEEEEEEEvS3_ENKUlS3_E_clISB_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberImNS6_15DataWithNullKeyI9PHHashMapImPc9HashCRC32ImEEEEEEEEEEvS3_ENKUlS3_E_clISB_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberIjNS6_15DataWithNullKeyI9PHHashMapIjPc9HashCRC32IjEEEEEEEEEEvS3_ENKUlS3_E_clISB_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberItNS6_15DataWithNullKeyI9PHHashMapItPc9HashCRC32ItEEEEEEEEEEvS3_ENKUlS3_E_clISB_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberIhNS6_15DataWithNullKeyI9PHHashMapIhPc9HashCRC32IhEEEEEEEEEEvS3_ENKUlS3_E_clISB_EEDaS3_ _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIm9PHHashMapImPc14HashMixWrapperIm9HashCRC32ImEEEEEEEvS3_ENKUlS3_E_clIS9_EEDaS3_ Line | Count | Source | 402 | 55 | data.for_each_mapped([&](auto& mapped) { | 403 | 55 | if (mapped) { | 404 | 55 | _destroy_agg_status(mapped); | 405 | 55 | mapped = nullptr; | 406 | 55 | } | 407 | 55 | }); |
_ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIj9PHHashMapIjPc14HashMixWrapperIj9HashCRC32IjEEEEEEEvS3_ENKUlS3_E_clIS9_EEDaS3_ Line | Count | Source | 402 | 7.87k | data.for_each_mapped([&](auto& mapped) { | 403 | 7.87k | if (mapped) { | 404 | 7.87k | _destroy_agg_status(mapped); | 405 | 7.87k | mapped = nullptr; | 406 | 7.87k | } | 407 | 7.87k | }); |
Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIN4wide7integerILm256EjEE9PHHashMapISA_Pc9HashCRC32ISA_EEEEEEvS3_ENKUlS3_E_clISC_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIN4wide7integerILm128EjEE9PHHashMapISA_Pc9HashCRC32ISA_EEEEEEvS3_ENKUlS3_E_clISC_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized19MethodStringNoCacheINS_13StringHashMapIPcNS_9AllocatorILb1ELb1ELb0ENS_22DefaultMemoryAllocatorELb1EEEEEEEEEvS3_ENKUlS3_E_clIS9_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIm9PHHashMapImPc9HashCRC32ImEEEEEEvS3_ENKUlS3_E_clIS9_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIj9PHHashMapIjPc9HashCRC32IjEEEEEEvS3_ENKUlS3_E_clIS9_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIt9PHHashMapItPc9HashCRC32ItEEEEEEvS3_ENKUlS3_E_clIS9_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIh9PHHashMapIhPc9HashCRC32IhEEEEEEvS3_ENKUlS3_E_clIS9_EEDaS3_ Unexecuted instantiation: _ZZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized16MethodSerializedI9PHHashMapINS_9StringRefEPc11DefaultHashIS9_vEEEEEEvS3_ENKUlS3_E_clISA_EEDaS3_ |
408 | 32 | if (data.has_null_key_data()) { |
409 | 5 | _destroy_agg_status(data.template get_null_key_data< |
410 | 5 | vectorized::AggregateDataPtr>()); |
411 | 5 | } |
412 | 32 | }}, Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapIN4wide7integerILm256EjEEPc9HashCRC32ISB_EEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapINS6_7UInt136EPc9HashCRC32IS9_EEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapIN4wide7integerILm128EjEEPc9HashCRC32ISB_EEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapINS6_7UInt104EPc9HashCRC32IS9_EEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapINS6_6UInt96EPc9HashCRC32IS9_EEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapINS6_6UInt72EPc9HashCRC32IS9_EEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodKeysFixedI9PHHashMapImPc9HashCRC32ImEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_19MethodStringNoCacheINS6_15DataWithNullKeyINS_13StringHashMapIPcNS_9AllocatorILb1ELb1ELb0ENS_22DefaultMemoryAllocatorELb1EEEEEEEEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberIN4wide7integerILm256EjEENS6_15DataWithNullKeyI9PHHashMapISB_Pc9HashCRC32ISB_EEEEEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberIN4wide7integerILm128EjEENS6_15DataWithNullKeyI9PHHashMapISB_Pc9HashCRC32ISB_EEEEEEEEEEvS3_ _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberImNS6_15DataWithNullKeyI9PHHashMapImPc14HashMixWrapperIm9HashCRC32ImEEEEEEEEEEEvS3_ Line | Count | Source | 400 | 5 | [&](auto& agg_method) -> void { | 401 | 5 | auto& data = *agg_method.hash_table; | 402 | 5 | data.for_each_mapped([&](auto& mapped) { | 403 | 5 | if (mapped) { | 404 | 5 | _destroy_agg_status(mapped); | 405 | 5 | mapped = nullptr; | 406 | 5 | } | 407 | 5 | }); | 408 | 5 | if (data.has_null_key_data()) { | 409 | 5 | _destroy_agg_status(data.template get_null_key_data< | 410 | 5 | vectorized::AggregateDataPtr>()); | 411 | 5 | } | 412 | 5 | }}, |
Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberIjNS6_15DataWithNullKeyI9PHHashMapIjPc14HashMixWrapperIj9HashCRC32IjEEEEEEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberImNS6_15DataWithNullKeyI9PHHashMapImPc9HashCRC32ImEEEEEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberIjNS6_15DataWithNullKeyI9PHHashMapIjPc9HashCRC32IjEEEEEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberItNS6_15DataWithNullKeyI9PHHashMapItPc9HashCRC32ItEEEEEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized26MethodSingleNullableColumnINS6_15MethodOneNumberIhNS6_15DataWithNullKeyI9PHHashMapIhPc9HashCRC32IhEEEEEEEEEEvS3_ _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIm9PHHashMapImPc14HashMixWrapperIm9HashCRC32ImEEEEEEEvS3_ Line | Count | Source | 400 | 14 | [&](auto& agg_method) -> void { | 401 | 14 | auto& data = *agg_method.hash_table; | 402 | 14 | data.for_each_mapped([&](auto& mapped) { | 403 | 14 | if (mapped) { | 404 | 14 | _destroy_agg_status(mapped); | 405 | 14 | mapped = nullptr; | 406 | 14 | } | 407 | 14 | }); | 408 | 14 | if (data.has_null_key_data()) { | 409 | 0 | _destroy_agg_status(data.template get_null_key_data< | 410 | 0 | vectorized::AggregateDataPtr>()); | 411 | 0 | } | 412 | 14 | }}, |
_ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIj9PHHashMapIjPc14HashMixWrapperIj9HashCRC32IjEEEEEEEvS3_ Line | Count | Source | 400 | 13 | [&](auto& agg_method) -> void { | 401 | 13 | auto& data = *agg_method.hash_table; | 402 | 13 | data.for_each_mapped([&](auto& mapped) { | 403 | 13 | if (mapped) { | 404 | 13 | _destroy_agg_status(mapped); | 405 | 13 | mapped = nullptr; | 406 | 13 | } | 407 | 13 | }); | 408 | 13 | if (data.has_null_key_data()) { | 409 | 0 | _destroy_agg_status(data.template get_null_key_data< | 410 | 0 | vectorized::AggregateDataPtr>()); | 411 | 0 | } | 412 | 13 | }}, |
Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIN4wide7integerILm256EjEE9PHHashMapISA_Pc9HashCRC32ISA_EEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIN4wide7integerILm128EjEE9PHHashMapISA_Pc9HashCRC32ISA_EEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized19MethodStringNoCacheINS_13StringHashMapIPcNS_9AllocatorILb1ELb1ELb0ENS_22DefaultMemoryAllocatorELb1EEEEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIm9PHHashMapImPc9HashCRC32ImEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIj9PHHashMapIjPc9HashCRC32IjEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIt9PHHashMapItPc9HashCRC32ItEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized15MethodOneNumberIh9PHHashMapIhPc9HashCRC32IhEEEEEEvS3_ Unexecuted instantiation: _ZZN5doris8pipeline14AggSharedState26_close_with_serialized_keyEvENKUlRT_E_clINS_10vectorized16MethodSerializedI9PHHashMapINS_9StringRefEPc11DefaultHashIS9_vEEEEEEvS3_ |
413 | 32 | agg_data->method_variant); |
414 | 32 | } |
415 | | |
416 | 10 | void _close_without_key() { |
417 | | //because prepare maybe failed, and couldn't create agg data. |
418 | | //but finally call close to destory agg data, if agg data has bitmapValue |
419 | | //will be core dump, it's not initialized |
420 | 10 | if (agg_data_created_without_key) { |
421 | 8 | _destroy_agg_status(agg_data->without_key); |
422 | 8 | agg_data_created_without_key = false; |
423 | 8 | } |
424 | 10 | } |
425 | | void _destroy_agg_status(vectorized::AggregateDataPtr data); |
426 | | }; |
427 | | |
428 | | struct BasicSpillSharedState { |
429 | 76 | virtual ~BasicSpillSharedState() = default; |
430 | | |
431 | | // These two counters are shared to spill source operators as the initial value |
432 | | // of 'SpillWriteFileCurrentBytes' and 'SpillWriteFileCurrentCount'. |
433 | | // Total bytes of spill data written to disk file(after serialized) |
434 | | RuntimeProfile::Counter* _spill_write_file_total_size = nullptr; |
435 | | RuntimeProfile::Counter* _spill_file_total_count = nullptr; |
436 | | |
437 | 34 | void setup_shared_profile(RuntimeProfile* sink_profile) { |
438 | 34 | _spill_file_total_count = |
439 | 34 | ADD_COUNTER_WITH_LEVEL(sink_profile, "SpillWriteFileTotalCount", TUnit::UNIT, 1); |
440 | 34 | _spill_write_file_total_size = |
441 | 34 | ADD_COUNTER_WITH_LEVEL(sink_profile, "SpillWriteFileBytes", TUnit::BYTES, 1); |
442 | 34 | } |
443 | | |
444 | | virtual void update_spill_stream_profiles(RuntimeProfile* source_profile) = 0; |
445 | | }; |
446 | | |
447 | | struct AggSpillPartition { |
448 | | #ifndef NDEBUG |
449 | | static constexpr int64_t AGG_SPILL_FILE_SIZE = 8 * 1024; // 8KB |
450 | | #else |
451 | | static constexpr int64_t AGG_SPILL_FILE_SIZE = 1024 * 1024 * 1024; // 1G |
452 | | #endif |
453 | | |
454 | 688 | AggSpillPartition() = default; |
455 | | |
456 | | SpillPartitionId id; |
457 | | bool is_split = false; |
458 | | // Best-effort bytes written via this partition node (in block format). |
459 | | // Used as a split trigger; not used for correctness. |
460 | | int64_t spilled_bytes = 0; |
461 | | |
462 | | void close(); |
463 | | |
464 | | Status get_spill_stream(RuntimeState* state, int node_id, RuntimeProfile* profile, |
465 | | vectorized::SpillStreamSPtr& spill_stream); |
466 | | |
467 | 1.44k | Status flush_if_full() { |
468 | 1.44k | DCHECK(spilling_stream); |
469 | 1.44k | Status status; |
470 | | // avoid small spill files |
471 | 1.44k | if (spilling_stream->get_written_bytes() >= AGG_SPILL_FILE_SIZE) { |
472 | 1.39k | status = spilling_stream->spill_eof(); |
473 | 1.39k | spilling_stream.reset(); |
474 | 1.39k | } |
475 | 1.44k | return status; |
476 | 1.44k | } |
477 | | |
478 | 712 | Status finish_current_spilling(bool eos = false) { |
479 | 712 | if (spilling_stream) { |
480 | 633 | if (eos || spilling_stream->get_written_bytes() >= AGG_SPILL_FILE_SIZE) { |
481 | 600 | auto status = spilling_stream->spill_eof(); |
482 | 600 | spilling_stream.reset(); |
483 | 600 | return status; |
484 | 600 | } |
485 | 633 | } |
486 | 112 | return Status::OK(); |
487 | 712 | } |
488 | | |
489 | | std::deque<vectorized::SpillStreamSPtr> spill_streams; |
490 | | vectorized::SpillStreamSPtr spilling_stream; |
491 | | }; |
492 | | using AggSpillPartitionSPtr = std::shared_ptr<AggSpillPartition>; |
493 | | |
494 | | struct PartitionedAggSharedState : public BasicSharedState, |
495 | | public BasicSpillSharedState, |
496 | | public std::enable_shared_from_this<PartitionedAggSharedState> { |
497 | | ENABLE_FACTORY_CREATOR(PartitionedAggSharedState) |
498 | | |
499 | 14 | PartitionedAggSharedState() = default; |
500 | 14 | ~PartitionedAggSharedState() override = default; |
501 | | |
502 | | void update_spill_stream_profiles(RuntimeProfile* source_profile) override; |
503 | | |
504 | | void init_spill_params(); |
505 | | |
506 | | void close(); |
507 | | |
508 | | AggSharedState* in_mem_shared_state = nullptr; |
509 | | std::shared_ptr<BasicSharedState> in_mem_shared_state_sptr; |
510 | | |
511 | | size_t partition_count; |
512 | | bool is_spilled = false; |
513 | | std::atomic_bool is_closed = false; |
514 | | // Hierarchical spill partitions (multi-level split). |
515 | | // Keyed by SpillPartitionId::key(). (level-0 has kSpillFanout base partitions.) |
516 | | DorisMap<uint32_t, AggSpillPartition> spill_partitions; |
517 | | |
518 | | std::deque<SpillPartitionId> pending_partitions; |
519 | | |
520 | 0 | size_t get_partition_index(size_t hash_value) const { return hash_value % partition_count; } |
521 | | |
522 | | // NOTE: Aggregation has a "null key" bucket in its hash table implementation. |
523 | | // We route spilled null-key rows to a deterministic hash bucket so it participates in |
524 | | // the same multi-level split behavior as normal keys. |
525 | 77.8k | inline AggSpillPartition& get_or_create_agg_partition(const SpillPartitionId& partition_id) { |
526 | 77.8k | auto [it, inserted] = spill_partitions.try_emplace(partition_id.key()); |
527 | 77.8k | if (inserted) { |
528 | 584 | it->second.id = partition_id; |
529 | 584 | } |
530 | 77.8k | return it->second; |
531 | 77.8k | } |
532 | | }; |
533 | | |
534 | | struct SortSharedState : public BasicSharedState { |
535 | | ENABLE_FACTORY_CREATOR(SortSharedState) |
536 | | public: |
537 | | std::shared_ptr<vectorized::Sorter> sorter; |
538 | | }; |
539 | | |
540 | | struct SpillSortSharedState : public BasicSharedState, |
541 | | public BasicSpillSharedState, |
542 | | public std::enable_shared_from_this<SpillSortSharedState> { |
543 | | ENABLE_FACTORY_CREATOR(SpillSortSharedState) |
544 | | |
545 | 10 | SpillSortSharedState() = default; |
546 | 10 | ~SpillSortSharedState() override = default; |
547 | | |
548 | 5 | void update_spill_block_batch_row_count(RuntimeState* state, const vectorized::Block* block) { |
549 | 5 | auto rows = block->rows(); |
550 | 5 | if (rows > 0 && 0 == avg_row_bytes) { |
551 | 4 | avg_row_bytes = std::max((std::size_t)1, block->bytes() / rows); |
552 | 4 | spill_block_batch_row_count = |
553 | 4 | (state->spill_sort_batch_bytes() + avg_row_bytes - 1) / avg_row_bytes; |
554 | 4 | LOG(INFO) << "spill sort block batch row count: " << spill_block_batch_row_count; |
555 | 4 | } |
556 | 5 | } |
557 | | |
558 | | void update_spill_stream_profiles(RuntimeProfile* source_profile) override; |
559 | | |
560 | | void close(); |
561 | | |
562 | | SortSharedState* in_mem_shared_state = nullptr; |
563 | | bool enable_spill = false; |
564 | | bool is_spilled = false; |
565 | | int64_t limit = -1; |
566 | | int64_t offset = 0; |
567 | | std::atomic_bool is_closed = false; |
568 | | std::shared_ptr<BasicSharedState> in_mem_shared_state_sptr; |
569 | | |
570 | | std::deque<vectorized::SpillStreamSPtr> sorted_streams; |
571 | | size_t avg_row_bytes = 0; |
572 | | size_t spill_block_batch_row_count; |
573 | | }; |
574 | | |
575 | | struct UnionSharedState : public BasicSharedState { |
576 | | ENABLE_FACTORY_CREATOR(UnionSharedState) |
577 | | |
578 | | public: |
579 | 1 | UnionSharedState(int child_count = 1) : data_queue(child_count), _child_count(child_count) {}; |
580 | 0 | int child_count() const { return _child_count; } |
581 | | DataQueue data_queue; |
582 | | const int _child_count; |
583 | | }; |
584 | | |
585 | | struct DataQueueSharedState : public BasicSharedState { |
586 | | ENABLE_FACTORY_CREATOR(DataQueueSharedState) |
587 | | public: |
588 | | DataQueue data_queue; |
589 | | }; |
590 | | |
591 | | class MultiCastDataStreamer; |
592 | | |
593 | | struct MultiCastSharedState : public BasicSharedState, |
594 | | public BasicSpillSharedState, |
595 | | public std::enable_shared_from_this<MultiCastSharedState> { |
596 | | MultiCastSharedState(ObjectPool* pool, int cast_sender_count, int node_id); |
597 | | std::unique_ptr<pipeline::MultiCastDataStreamer> multi_cast_data_streamer; |
598 | | |
599 | | void update_spill_stream_profiles(RuntimeProfile* source_profile) override; |
600 | | }; |
601 | | |
602 | | struct AnalyticSharedState : public BasicSharedState { |
603 | | ENABLE_FACTORY_CREATOR(AnalyticSharedState) |
604 | | |
605 | | public: |
606 | 9 | AnalyticSharedState() = default; |
607 | | std::queue<vectorized::Block> blocks_buffer; |
608 | | std::mutex buffer_mutex; |
609 | | bool sink_eos = false; |
610 | | std::mutex sink_eos_lock; |
611 | | vectorized::Arena agg_arena_pool; |
612 | | }; |
613 | | |
614 | | struct JoinSharedState : public BasicSharedState { |
615 | | // For some join case, we can apply a short circuit strategy |
616 | | // 1. _has_null_in_build_side = true |
617 | | // 2. build side rows is empty, Join op is: inner join/right outer join/left semi/right semi/right anti |
618 | | bool _has_null_in_build_side = false; |
619 | | bool short_circuit_for_probe = false; |
620 | | // for some join, when build side rows is empty, we could return directly by add some additional null data in probe table. |
621 | | bool empty_right_table_need_probe_dispose = false; |
622 | | JoinOpVariants join_op_variants; |
623 | | }; |
624 | | |
625 | | struct HashJoinSharedState : public JoinSharedState { |
626 | | ENABLE_FACTORY_CREATOR(HashJoinSharedState) |
627 | 72.1k | HashJoinSharedState() { |
628 | 72.1k | hash_table_variant_vector.push_back(std::make_shared<JoinDataVariants>()); |
629 | 72.1k | } |
630 | 1 | HashJoinSharedState(int num_instances) { |
631 | 1 | source_deps.resize(num_instances, nullptr); |
632 | 1 | hash_table_variant_vector.resize(num_instances, nullptr); |
633 | 9 | for (int i = 0; i < num_instances; i++) { |
634 | 8 | hash_table_variant_vector[i] = std::make_shared<JoinDataVariants>(); |
635 | 8 | } |
636 | 1 | } |
637 | | std::shared_ptr<vectorized::Arena> arena = std::make_shared<vectorized::Arena>(); |
638 | | |
639 | | const std::vector<TupleDescriptor*> build_side_child_desc; |
640 | | size_t build_exprs_size = 0; |
641 | | std::shared_ptr<vectorized::Block> build_block; |
642 | | std::shared_ptr<std::vector<uint32_t>> build_indexes_null; |
643 | | |
644 | | // Used by shared hash table |
645 | | // For probe operator, hash table in _hash_table_variants is read-only if visited flags is not |
646 | | // used. (visited flags will be used only in right / full outer join). |
647 | | // |
648 | | // For broadcast join, although hash table is read-only, some states in `_hash_table_variants` |
649 | | // are still could be written. For example, serialized keys will be written in a continuous |
650 | | // memory in `_hash_table_variants`. So before execution, we should use a local _hash_table_variants |
651 | | // which has a shared hash table in it. |
652 | | std::vector<std::shared_ptr<JoinDataVariants>> hash_table_variant_vector; |
653 | | |
654 | | // whether left semi join could directly return |
655 | | // if runtime filters contains local in filter, we can make sure all input rows are matched |
656 | | // local filter will always be applied, and in filter could guarantee precise filtering |
657 | | // ATTN: we should disable always_true logic for in filter when we set this flag |
658 | | bool left_semi_direct_return = false; |
659 | | }; |
660 | | |
661 | | // Hierarchical spill partitioning for hash join probe-side. |
662 | | static constexpr uint32_t kHashJoinSpillFanout = kSpillFanout; |
663 | | static constexpr uint32_t kHashJoinSpillBitsPerLevel = kSpillBitsPerLevel; |
664 | | static constexpr uint32_t kHashJoinSpillMaxDepth = kSpillMaxDepth; |
665 | | using HashJoinSpillPartitionId = SpillPartitionId; |
666 | | |
667 | | struct HashJoinSpillPartition { |
668 | | HashJoinSpillPartitionId id; |
669 | | bool is_split = false; |
670 | | // Probe-side buffered rows for this partition before flushing into blocks/spill. |
671 | | std::unique_ptr<vectorized::MutableBlock> accumulating_block; |
672 | | // Probe-side materialized blocks for this partition (in-memory). |
673 | | std::vector<vectorized::Block> blocks; |
674 | | vectorized::SpillStreamSPtr spill_stream; |
675 | | |
676 | | // Memory tracking for this partition. |
677 | | int64_t in_mem_bytes = 0; // Bytes of data currently in memory (accumulating_block + blocks). |
678 | | int64_t spilled_bytes = 0; // Bytes of data that have been spilled to disk. |
679 | | |
680 | 6 | int64_t total_bytes() const { return in_mem_bytes + spilled_bytes; } |
681 | | }; |
682 | | |
683 | | using HashJoinSpillPartitionMap = DorisMap<uint32_t, HashJoinSpillPartition>; |
684 | | |
685 | | struct HashJoinSpillBuildPartition { |
686 | | HashJoinSpillPartitionId id; |
687 | | bool is_split = false; |
688 | | // Build-side buffered rows for this partition before hash table build. |
689 | | std::unique_ptr<vectorized::MutableBlock> build_block; |
690 | | std::vector<vectorized::Block> blocks; |
691 | | vectorized::SpillStreamSPtr spill_stream; |
692 | | |
693 | | // Memory tracking for this partition. |
694 | | int64_t in_mem_bytes = 0; // Bytes of data currently in memory (build_block). |
695 | | int64_t spilled_bytes = 0; // Bytes of data currently spilled to disk. |
696 | | int64_t row_count = 0; // Total number of rows currently in this partition. |
697 | | |
698 | 2 | int64_t total_bytes() const { return in_mem_bytes + spilled_bytes; } |
699 | | }; |
700 | | |
701 | | using HashJoinSpillBuildPartitionMap = DorisMap<uint32_t, HashJoinSpillBuildPartition>; |
702 | | |
703 | | struct PartitionedHashJoinSharedState |
704 | | : public HashJoinSharedState, |
705 | | public BasicSpillSharedState, |
706 | | public std::enable_shared_from_this<PartitionedHashJoinSharedState> { |
707 | | ENABLE_FACTORY_CREATOR(PartitionedHashJoinSharedState) |
708 | | |
709 | 0 | void update_spill_stream_profiles(RuntimeProfile* source_profile) override { |
710 | 0 | for (auto& [_, partition] : build_partitions) { |
711 | 0 | if (partition.spill_stream) { |
712 | 0 | partition.spill_stream->update_shared_profiles(source_profile); |
713 | 0 | } |
714 | 0 | } |
715 | 0 | } |
716 | | |
717 | | std::unique_ptr<RuntimeState> inner_runtime_state; |
718 | | std::shared_ptr<HashJoinSharedState> inner_shared_state; |
719 | | HashJoinSpillPartitionMap probe_partitions; |
720 | | HashJoinSpillBuildPartitionMap build_partitions; |
721 | | std::deque<HashJoinSpillPartitionId> pending_probe_partitions; |
722 | | bool is_spilled = false; |
723 | | }; |
724 | | |
725 | | struct NestedLoopJoinSharedState : public JoinSharedState { |
726 | | ENABLE_FACTORY_CREATOR(NestedLoopJoinSharedState) |
727 | | // if true, probe child has no more rows to process |
728 | | bool probe_side_eos = false; |
729 | | // Visited flags for each row in build side. |
730 | | vectorized::MutableColumns build_side_visited_flags; |
731 | | // List of build blocks, constructed in prepare() |
732 | | vectorized::Blocks build_blocks; |
733 | | }; |
734 | | |
735 | | struct PartitionSortNodeSharedState : public BasicSharedState { |
736 | | ENABLE_FACTORY_CREATOR(PartitionSortNodeSharedState) |
737 | | public: |
738 | | std::queue<vectorized::Block> blocks_buffer; |
739 | | std::mutex buffer_mutex; |
740 | | std::vector<std::unique_ptr<vectorized::PartitionSorter>> partition_sorts; |
741 | | bool sink_eos = false; |
742 | | std::mutex sink_eos_lock; |
743 | | std::mutex prepared_finish_lock; |
744 | | }; |
745 | | |
746 | | struct SetSharedState : public BasicSharedState { |
747 | | ENABLE_FACTORY_CREATOR(SetSharedState) |
748 | | public: |
749 | | /// default init |
750 | | vectorized::Block build_block; // build to source |
751 | | //record element size in hashtable |
752 | | int64_t valid_element_in_hash_tbl = 0; |
753 | | //first: idx mapped to column types |
754 | | //second: column_id, could point to origin column or cast column |
755 | | std::unordered_map<int, int> build_col_idx; |
756 | | |
757 | | //// shared static states (shared, decided in prepare/open...) |
758 | | |
759 | | /// init in setup_local_state |
760 | | std::unique_ptr<SetDataVariants> hash_table_variants = |
761 | | std::make_unique<SetDataVariants>(); // the real data HERE. |
762 | | std::vector<bool> build_not_ignore_null; |
763 | | |
764 | | // The SET operator's child might have different nullable attributes. |
765 | | // If a calculation involves both nullable and non-nullable columns, the final output should be a nullable column |
766 | | Status update_build_not_ignore_null(const vectorized::VExprContextSPtrs& ctxs); |
767 | | |
768 | | size_t get_hash_table_size() const; |
769 | | /// init in both upstream side. |
770 | | //The i-th result expr list refers to the i-th child. |
771 | | std::vector<vectorized::VExprContextSPtrs> child_exprs_lists; |
772 | | |
773 | | /// init in build side |
774 | | size_t child_quantity; |
775 | | vectorized::VExprContextSPtrs build_child_exprs; |
776 | | std::vector<Dependency*> probe_finished_children_dependency; |
777 | | |
778 | | /// init in probe side |
779 | | std::vector<vectorized::VExprContextSPtrs> probe_child_exprs_lists; |
780 | | |
781 | | std::atomic<bool> ready_for_read = false; |
782 | | |
783 | | vectorized::Arena arena; |
784 | | |
785 | | /// called in setup_local_state |
786 | | Status hash_table_init(); |
787 | | }; |
788 | | |
789 | | enum class ExchangeType : uint8_t { |
790 | | NOOP = 0, |
791 | | // Shuffle data by Crc32CHashPartitioner |
792 | | HASH_SHUFFLE = 1, |
793 | | // Round-robin passthrough data blocks. |
794 | | PASSTHROUGH = 2, |
795 | | // Shuffle data by Crc32HashPartitioner<ShuffleChannelIds> (e.g. same as storage engine). |
796 | | BUCKET_HASH_SHUFFLE = 3, |
797 | | // Passthrough data blocks to all channels. |
798 | | BROADCAST = 4, |
799 | | // Passthrough data to channels evenly in an adaptive way. |
800 | | ADAPTIVE_PASSTHROUGH = 5, |
801 | | // Send all data to the first channel. |
802 | | PASS_TO_ONE = 6, |
803 | | }; |
804 | | |
805 | 63 | inline std::string get_exchange_type_name(ExchangeType idx) { |
806 | 63 | switch (idx) { |
807 | 14 | case ExchangeType::NOOP: |
808 | 14 | return "NOOP"; |
809 | 49 | case ExchangeType::HASH_SHUFFLE: |
810 | 49 | return "HASH_SHUFFLE"; |
811 | 0 | case ExchangeType::PASSTHROUGH: |
812 | 0 | return "PASSTHROUGH"; |
813 | 0 | case ExchangeType::BUCKET_HASH_SHUFFLE: |
814 | 0 | return "BUCKET_HASH_SHUFFLE"; |
815 | 0 | case ExchangeType::BROADCAST: |
816 | 0 | return "BROADCAST"; |
817 | 0 | case ExchangeType::ADAPTIVE_PASSTHROUGH: |
818 | 0 | return "ADAPTIVE_PASSTHROUGH"; |
819 | 0 | case ExchangeType::PASS_TO_ONE: |
820 | 0 | return "PASS_TO_ONE"; |
821 | 63 | } |
822 | 0 | throw Exception(Status::FatalError("__builtin_unreachable")); |
823 | 63 | } |
824 | | |
825 | | struct DataDistribution { |
826 | 144k | DataDistribution(ExchangeType type) : distribution_type(type) {} |
827 | | DataDistribution(ExchangeType type, const std::vector<TExpr>& partition_exprs_) |
828 | 52 | : distribution_type(type), partition_exprs(partition_exprs_) {} |
829 | 0 | DataDistribution(const DataDistribution& other) = default; |
830 | 5 | bool need_local_exchange() const { return distribution_type != ExchangeType::NOOP; } |
831 | 5 | DataDistribution& operator=(const DataDistribution& other) = default; |
832 | | ExchangeType distribution_type; |
833 | | std::vector<TExpr> partition_exprs; |
834 | | }; |
835 | | |
836 | | class ExchangerBase; |
837 | | |
838 | | struct LocalExchangeSharedState : public BasicSharedState { |
839 | | public: |
840 | | ENABLE_FACTORY_CREATOR(LocalExchangeSharedState); |
841 | | LocalExchangeSharedState(int num_instances); |
842 | | ~LocalExchangeSharedState() override; |
843 | | std::unique_ptr<ExchangerBase> exchanger {}; |
844 | | std::vector<RuntimeProfile::Counter*> mem_counters; |
845 | | std::atomic<int64_t> mem_usage = 0; |
846 | | std::atomic<size_t> _buffer_mem_limit = config::local_exchange_buffer_mem_limit; |
847 | | // We need to make sure to add mem_usage first and then enqueue, otherwise sub mem_usage may cause negative mem_usage during concurrent dequeue. |
848 | | std::mutex le_lock; |
849 | | void sub_running_sink_operators(); |
850 | | void sub_running_source_operators(); |
851 | 10 | void _set_always_ready() { |
852 | 40 | for (auto& dep : source_deps) { |
853 | 40 | DCHECK(dep); |
854 | 40 | dep->set_always_ready(); |
855 | 40 | } |
856 | 10 | for (auto& dep : sink_deps) { |
857 | 10 | DCHECK(dep); |
858 | 10 | dep->set_always_ready(); |
859 | 10 | } |
860 | 10 | } |
861 | | |
862 | 0 | Dependency* get_sink_dep_by_channel_id(int channel_id) { return nullptr; } |
863 | | |
864 | 129 | void set_ready_to_read(int channel_id) { |
865 | 129 | auto& dep = source_deps[channel_id]; |
866 | 129 | DCHECK(dep) << channel_id; |
867 | 129 | dep->set_ready(); |
868 | 129 | } |
869 | | |
870 | 161 | void add_mem_usage(int channel_id, size_t delta) { mem_counters[channel_id]->update(delta); } |
871 | | |
872 | 125 | void sub_mem_usage(int channel_id, size_t delta) { |
873 | 125 | mem_counters[channel_id]->update(-(int64_t)delta); |
874 | 125 | } |
875 | | |
876 | 114 | void add_total_mem_usage(size_t delta) { |
877 | 114 | if (cast_set<int64_t>(mem_usage.fetch_add(delta) + delta) > _buffer_mem_limit) { |
878 | 15 | sink_deps.front()->block(); |
879 | 15 | } |
880 | 114 | } |
881 | | |
882 | 114 | void sub_total_mem_usage(size_t delta) { |
883 | 114 | auto prev_usage = mem_usage.fetch_sub(delta); |
884 | 114 | DCHECK_GE(prev_usage - delta, 0) << "prev_usage: " << prev_usage << " delta: " << delta; |
885 | 114 | if (cast_set<int64_t>(prev_usage - delta) <= _buffer_mem_limit) { |
886 | 102 | sink_deps.front()->set_ready(); |
887 | 102 | } |
888 | 114 | } |
889 | | |
890 | 0 | void set_low_memory_mode(RuntimeState* state) { |
891 | 0 | _buffer_mem_limit = std::min<int64_t>(config::local_exchange_buffer_mem_limit, |
892 | 0 | state->low_memory_mode_buffer_limit()); |
893 | 0 | } |
894 | | }; |
895 | | |
896 | | #include "common/compile_check_end.h" |
897 | | } // namespace doris::pipeline |