Coverage Report

Created: 2026-05-09 04:14

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/exec/exchange/local_exchanger.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "exec/exchange/local_exchanger.h"
19
20
#include "common/cast_set.h"
21
#include "common/status.h"
22
#include "exec/exchange/local_exchange_sink_operator.h"
23
#include "exec/exchange/local_exchange_source_operator.h"
24
#include "exec/partitioner/partitioner.h"
25
26
namespace doris {
27
template <typename BlockType>
28
void Exchanger<BlockType>::_enqueue_data_and_set_ready(int channel_id,
29
                                                       LocalExchangeSinkLocalState* local_state,
30
275k
                                                       BlockType&& block) {
31
275k
    if (local_state == nullptr) {
32
0
        _enqueue_data_and_set_ready(channel_id, std::move(block));
33
0
        return;
34
0
    }
35
    // PartitionedBlock is used by shuffle exchanger.
36
    // PartitionedBlock will be push into multiple queues with different row ranges, so it will be
37
    // referenced multiple times. Otherwise, we only ref the block once because it is only push into
38
    // one queue.
39
275k
    std::unique_lock l(*_m[channel_id]);
40
    if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
41
72.1k
                  std::is_same_v<BroadcastBlock, BlockType>) {
42
72.1k
        block.first->record_channel_id(channel_id);
43
202k
    } else {
44
202k
        block->record_channel_id(channel_id);
45
202k
    }
46
47
275k
    if (_data_queue[channel_id].enqueue(std::move(block))) {
48
275k
        local_state->_shared_state->set_ready_to_read(channel_id);
49
275k
    }
50
275k
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_18PartitionedRowIdxsEEE27_enqueue_data_and_set_readyEiPNS_27LocalExchangeSinkLocalStateEOS7_
Line
Count
Source
30
45.6k
                                                       BlockType&& block) {
31
45.6k
    if (local_state == nullptr) {
32
0
        _enqueue_data_and_set_ready(channel_id, std::move(block));
33
0
        return;
34
0
    }
35
    // PartitionedBlock is used by shuffle exchanger.
36
    // PartitionedBlock will be push into multiple queues with different row ranges, so it will be
37
    // referenced multiple times. Otherwise, we only ref the block once because it is only push into
38
    // one queue.
39
45.6k
    std::unique_lock l(*_m[channel_id]);
40
    if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
41
45.6k
                  std::is_same_v<BroadcastBlock, BlockType>) {
42
45.6k
        block.first->record_channel_id(channel_id);
43
    } else {
44
        block->record_channel_id(channel_id);
45
    }
46
47
45.6k
    if (_data_queue[channel_id].enqueue(std::move(block))) {
48
45.5k
        local_state->_shared_state->set_ready_to_read(channel_id);
49
45.5k
    }
50
45.6k
}
_ZN5doris9ExchangerISt10shared_ptrINS_13ExchangerBase12BlockWrapperEEE27_enqueue_data_and_set_readyEiPNS_27LocalExchangeSinkLocalStateEOS4_
Line
Count
Source
30
202k
                                                       BlockType&& block) {
31
202k
    if (local_state == nullptr) {
32
0
        _enqueue_data_and_set_ready(channel_id, std::move(block));
33
0
        return;
34
0
    }
35
    // PartitionedBlock is used by shuffle exchanger.
36
    // PartitionedBlock will be push into multiple queues with different row ranges, so it will be
37
    // referenced multiple times. Otherwise, we only ref the block once because it is only push into
38
    // one queue.
39
202k
    std::unique_lock l(*_m[channel_id]);
40
    if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
41
                  std::is_same_v<BroadcastBlock, BlockType>) {
42
        block.first->record_channel_id(channel_id);
43
202k
    } else {
44
202k
        block->record_channel_id(channel_id);
45
202k
    }
46
47
202k
    if (_data_queue[channel_id].enqueue(std::move(block))) {
48
202k
        local_state->_shared_state->set_ready_to_read(channel_id);
49
202k
    }
50
202k
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_17BroadcastRowRangeEEE27_enqueue_data_and_set_readyEiPNS_27LocalExchangeSinkLocalStateEOS7_
Line
Count
Source
30
26.5k
                                                       BlockType&& block) {
31
26.5k
    if (local_state == nullptr) {
32
0
        _enqueue_data_and_set_ready(channel_id, std::move(block));
33
0
        return;
34
0
    }
35
    // PartitionedBlock is used by shuffle exchanger.
36
    // PartitionedBlock will be push into multiple queues with different row ranges, so it will be
37
    // referenced multiple times. Otherwise, we only ref the block once because it is only push into
38
    // one queue.
39
26.5k
    std::unique_lock l(*_m[channel_id]);
40
    if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
41
26.5k
                  std::is_same_v<BroadcastBlock, BlockType>) {
42
26.5k
        block.first->record_channel_id(channel_id);
43
    } else {
44
        block->record_channel_id(channel_id);
45
    }
46
47
26.5k
    if (_data_queue[channel_id].enqueue(std::move(block))) {
48
26.5k
        local_state->_shared_state->set_ready_to_read(channel_id);
49
26.5k
    }
50
26.5k
}
51
52
template <typename BlockType>
53
bool Exchanger<BlockType>::_dequeue_data(LocalExchangeSourceLocalState* local_state,
54
                                         BlockType& block, bool* eos, Block* data_block,
55
1.89M
                                         int channel_id) {
56
1.89M
    if (local_state == nullptr) {
57
20
        return _dequeue_data(block, eos, data_block, channel_id);
58
20
    }
59
1.89M
    bool all_finished = _running_sink_operators == 0;
60
1.89M
    if (_data_queue[channel_id].try_dequeue(block)) {
61
        if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
62
72.0k
                      std::is_same_v<BroadcastBlock, BlockType>) {
63
72.0k
            local_state->_shared_state->sub_mem_usage(channel_id, block.first->_allocated_bytes);
64
202k
        } else {
65
202k
            local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
66
202k
            data_block->swap(block->_data_block);
67
202k
        }
68
274k
        return true;
69
1.61M
    } else if (all_finished) {
70
1.44M
        *eos = true;
71
1.44M
    } else {
72
167k
        std::unique_lock l(*_m[channel_id]);
73
167k
        if (_data_queue[channel_id].try_dequeue(block)) {
74
            if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
75
9
                          std::is_same_v<BroadcastBlock, BlockType>) {
76
9
                local_state->_shared_state->sub_mem_usage(channel_id,
77
9
                                                          block.first->_allocated_bytes);
78
9
            } else {
79
9
                local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
80
9
                data_block->swap(block->_data_block);
81
9
            }
82
18
            return true;
83
18
        }
84
167k
        COUNTER_UPDATE(local_state->_get_block_failed_counter, 1);
85
167k
        local_state->_dependency->block();
86
167k
    }
87
1.61M
    return false;
88
1.89M
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_18PartitionedRowIdxsEEE13_dequeue_dataEPNS_29LocalExchangeSourceLocalStateERS7_PbPNS_5BlockEi
Line
Count
Source
55
257k
                                         int channel_id) {
56
257k
    if (local_state == nullptr) {
57
8
        return _dequeue_data(block, eos, data_block, channel_id);
58
8
    }
59
257k
    bool all_finished = _running_sink_operators == 0;
60
257k
    if (_data_queue[channel_id].try_dequeue(block)) {
61
        if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
62
45.5k
                      std::is_same_v<BroadcastBlock, BlockType>) {
63
45.5k
            local_state->_shared_state->sub_mem_usage(channel_id, block.first->_allocated_bytes);
64
        } else {
65
            local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
66
            data_block->swap(block->_data_block);
67
        }
68
45.5k
        return true;
69
212k
    } else if (all_finished) {
70
186k
        *eos = true;
71
186k
    } else {
72
26.0k
        std::unique_lock l(*_m[channel_id]);
73
26.0k
        if (_data_queue[channel_id].try_dequeue(block)) {
74
            if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
75
5
                          std::is_same_v<BroadcastBlock, BlockType>) {
76
5
                local_state->_shared_state->sub_mem_usage(channel_id,
77
5
                                                          block.first->_allocated_bytes);
78
            } else {
79
                local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
80
                data_block->swap(block->_data_block);
81
            }
82
5
            return true;
83
5
        }
84
26.0k
        COUNTER_UPDATE(local_state->_get_block_failed_counter, 1);
85
26.0k
        local_state->_dependency->block();
86
26.0k
    }
87
212k
    return false;
88
257k
}
_ZN5doris9ExchangerISt10shared_ptrINS_13ExchangerBase12BlockWrapperEEE13_dequeue_dataEPNS_29LocalExchangeSourceLocalStateERS4_PbPNS_5BlockEi
Line
Count
Source
55
1.57M
                                         int channel_id) {
56
1.57M
    if (local_state == nullptr) {
57
8
        return _dequeue_data(block, eos, data_block, channel_id);
58
8
    }
59
1.57M
    bool all_finished = _running_sink_operators == 0;
60
1.57M
    if (_data_queue[channel_id].try_dequeue(block)) {
61
        if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
62
                      std::is_same_v<BroadcastBlock, BlockType>) {
63
            local_state->_shared_state->sub_mem_usage(channel_id, block.first->_allocated_bytes);
64
202k
        } else {
65
202k
            local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
66
202k
            data_block->swap(block->_data_block);
67
202k
        }
68
202k
        return true;
69
1.37M
    } else if (all_finished) {
70
1.23M
        *eos = true;
71
1.23M
    } else {
72
135k
        std::unique_lock l(*_m[channel_id]);
73
135k
        if (_data_queue[channel_id].try_dequeue(block)) {
74
            if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
75
                          std::is_same_v<BroadcastBlock, BlockType>) {
76
                local_state->_shared_state->sub_mem_usage(channel_id,
77
                                                          block.first->_allocated_bytes);
78
9
            } else {
79
9
                local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
80
9
                data_block->swap(block->_data_block);
81
9
            }
82
9
            return true;
83
9
        }
84
135k
        COUNTER_UPDATE(local_state->_get_block_failed_counter, 1);
85
135k
        local_state->_dependency->block();
86
135k
    }
87
1.37M
    return false;
88
1.57M
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_17BroadcastRowRangeEEE13_dequeue_dataEPNS_29LocalExchangeSourceLocalStateERS7_PbPNS_5BlockEi
Line
Count
Source
55
59.0k
                                         int channel_id) {
56
59.0k
    if (local_state == nullptr) {
57
4
        return _dequeue_data(block, eos, data_block, channel_id);
58
4
    }
59
59.0k
    bool all_finished = _running_sink_operators == 0;
60
59.0k
    if (_data_queue[channel_id].try_dequeue(block)) {
61
        if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
62
26.4k
                      std::is_same_v<BroadcastBlock, BlockType>) {
63
26.4k
            local_state->_shared_state->sub_mem_usage(channel_id, block.first->_allocated_bytes);
64
        } else {
65
            local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
66
            data_block->swap(block->_data_block);
67
        }
68
26.4k
        return true;
69
32.5k
    } else if (all_finished) {
70
26.9k
        *eos = true;
71
26.9k
    } else {
72
5.54k
        std::unique_lock l(*_m[channel_id]);
73
5.54k
        if (_data_queue[channel_id].try_dequeue(block)) {
74
            if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
75
4
                          std::is_same_v<BroadcastBlock, BlockType>) {
76
4
                local_state->_shared_state->sub_mem_usage(channel_id,
77
4
                                                          block.first->_allocated_bytes);
78
            } else {
79
                local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
80
                data_block->swap(block->_data_block);
81
            }
82
4
            return true;
83
4
        }
84
5.54k
        COUNTER_UPDATE(local_state->_get_block_failed_counter, 1);
85
5.54k
        local_state->_dependency->block();
86
5.54k
    }
87
32.5k
    return false;
88
59.0k
}
89
90
template <typename BlockType>
91
0
void Exchanger<BlockType>::_enqueue_data_and_set_ready(int channel_id, BlockType&& block) {
92
    if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
93
0
                  std::is_same_v<BroadcastBlock, BlockType>) {
94
0
        block.first->record_channel_id(channel_id);
95
0
    } else {
96
0
        block->record_channel_id(channel_id);
97
0
    }
98
0
    _data_queue[channel_id].enqueue(std::move(block));
99
0
}
Unexecuted instantiation: _ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_18PartitionedRowIdxsEEE27_enqueue_data_and_set_readyEiOS7_
Unexecuted instantiation: _ZN5doris9ExchangerISt10shared_ptrINS_13ExchangerBase12BlockWrapperEEE27_enqueue_data_and_set_readyEiOS4_
Unexecuted instantiation: _ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_17BroadcastRowRangeEEE27_enqueue_data_and_set_readyEiOS7_
100
101
template <typename BlockType>
102
bool Exchanger<BlockType>::_dequeue_data(BlockType& block, bool* eos, Block* data_block,
103
20
                                         int channel_id) {
104
20
    if (_data_queue[channel_id].try_dequeue(block)) {
105
        if constexpr (!std::is_same_v<PartitionedBlock, BlockType> &&
106
0
                      !std::is_same_v<BroadcastBlock, BlockType>) {
107
0
            data_block->swap(block->_data_block);
108
0
        }
109
0
        return true;
110
0
    }
111
20
    return false;
112
20
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_18PartitionedRowIdxsEEE13_dequeue_dataERS7_PbPNS_5BlockEi
Line
Count
Source
103
8
                                         int channel_id) {
104
8
    if (_data_queue[channel_id].try_dequeue(block)) {
105
        if constexpr (!std::is_same_v<PartitionedBlock, BlockType> &&
106
                      !std::is_same_v<BroadcastBlock, BlockType>) {
107
            data_block->swap(block->_data_block);
108
        }
109
0
        return true;
110
0
    }
111
8
    return false;
112
8
}
_ZN5doris9ExchangerISt10shared_ptrINS_13ExchangerBase12BlockWrapperEEE13_dequeue_dataERS4_PbPNS_5BlockEi
Line
Count
Source
103
8
                                         int channel_id) {
104
8
    if (_data_queue[channel_id].try_dequeue(block)) {
105
        if constexpr (!std::is_same_v<PartitionedBlock, BlockType> &&
106
0
                      !std::is_same_v<BroadcastBlock, BlockType>) {
107
0
            data_block->swap(block->_data_block);
108
0
        }
109
0
        return true;
110
0
    }
111
8
    return false;
112
8
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_17BroadcastRowRangeEEE13_dequeue_dataERS7_PbPNS_5BlockEi
Line
Count
Source
103
4
                                         int channel_id) {
104
4
    if (_data_queue[channel_id].try_dequeue(block)) {
105
        if constexpr (!std::is_same_v<PartitionedBlock, BlockType> &&
106
                      !std::is_same_v<BroadcastBlock, BlockType>) {
107
            data_block->swap(block->_data_block);
108
        }
109
0
        return true;
110
0
    }
111
4
    return false;
112
4
}
113
114
Status ShuffleExchanger::sink(RuntimeState* state, Block* in_block, bool eos, Profile&& profile,
115
105k
                              SinkInfo& sink_info) {
116
105k
    if (in_block->empty()) {
117
87.2k
        return Status::OK();
118
87.2k
    }
119
18.0k
    {
120
18.0k
        SCOPED_TIMER(profile.compute_hash_value_timer);
121
18.0k
        RETURN_IF_ERROR(sink_info.partitioner->do_partitioning(state, in_block));
122
18.0k
    }
123
18.0k
    {
124
18.0k
        SCOPED_TIMER(profile.distribute_timer);
125
18.0k
        RETURN_IF_ERROR(_split_rows(state, sink_info.partitioner->get_channel_ids(), in_block,
126
18.0k
                                    *sink_info.channel_id, sink_info.local_state,
127
18.0k
                                    sink_info.shuffle_idx_to_instance_idx));
128
18.0k
    }
129
130
18.0k
    sink_info.local_state->_memory_used_counter->set(
131
18.0k
            sink_info.local_state->_shared_state->mem_usage);
132
18.0k
    return Status::OK();
133
18.0k
}
134
135
87.8k
void ShuffleExchanger::close(SourceInfo&& source_info) {
136
87.8k
    PartitionedBlock partitioned_block;
137
87.8k
    bool eos;
138
87.8k
    Block block;
139
87.8k
    _data_queue[source_info.channel_id].set_eos();
140
87.8k
    while (_dequeue_data(source_info.local_state, partitioned_block, &eos, &block,
141
87.8k
                         source_info.channel_id)) {
142
        // do nothing
143
53
    }
144
87.8k
}
145
146
Status ShuffleExchanger::get_block(RuntimeState* state, Block* block, bool* eos, Profile&& profile,
147
110k
                                   SourceInfo&& source_info) {
148
110k
    PartitionedBlock partitioned_block;
149
110k
    MutableBlock mutable_block;
150
151
110k
    auto get_data = [&]() -> Status {
152
36.6k
        do {
153
36.6k
            const auto* offset_start = partitioned_block.second.row_idxs->data() +
154
36.6k
                                       partitioned_block.second.offset_start;
155
36.6k
            auto block_wrapper = partitioned_block.first;
156
36.6k
            RETURN_IF_ERROR(mutable_block.add_rows(&block_wrapper->_data_block, offset_start,
157
36.6k
                                                   offset_start + partitioned_block.second.length));
158
36.6k
        } while (mutable_block.rows() < state->batch_size() && !*eos &&
159
36.6k
                 _dequeue_data(source_info.local_state, partitioned_block, eos, block,
160
35.7k
                               source_info.channel_id));
161
29.1k
        return Status::OK();
162
29.1k
    };
163
164
110k
    if (_dequeue_data(source_info.local_state, partitioned_block, eos, block,
165
110k
                      source_info.channel_id)) {
166
29.1k
        SCOPED_TIMER(profile.copy_data_timer);
167
29.1k
        mutable_block = VectorizedUtils::build_mutable_mem_reuse_block(
168
29.1k
                block, partitioned_block.first->_data_block);
169
29.1k
        RETURN_IF_ERROR(get_data());
170
29.1k
    }
171
110k
    return Status::OK();
172
110k
}
173
174
Status ShuffleExchanger::_split_rows(RuntimeState* state, const std::vector<uint32_t>& channel_ids,
175
                                     Block* block, int channel_id,
176
                                     LocalExchangeSinkLocalState* local_state,
177
17.9k
                                     std::map<int, int>* shuffle_idx_to_instance_idx) {
178
17.9k
    if (local_state == nullptr) {
179
0
        return _split_rows(state, channel_ids, block, channel_id);
180
0
    }
181
17.9k
    const auto rows = cast_set<int32_t>(block->rows());
182
17.9k
    auto row_idx = std::make_shared<PODArray<uint32_t>>(rows);
183
17.9k
    auto& partition_rows_histogram = _partition_rows_histogram[channel_id];
184
17.9k
    {
185
17.9k
        partition_rows_histogram.assign(_num_partitions + 1, 0);
186
13.2M
        for (int32_t i = 0; i < rows; ++i) {
187
13.2M
            partition_rows_histogram[channel_ids[i]]++;
188
13.2M
        }
189
180k
        for (int32_t i = 1; i <= _num_partitions; ++i) {
190
162k
            partition_rows_histogram[i] += partition_rows_histogram[i - 1];
191
162k
        }
192
12.5M
        for (int32_t i = rows - 1; i >= 0; --i) {
193
12.4M
            (*row_idx)[partition_rows_histogram[channel_ids[i]] - 1] = i;
194
12.4M
            partition_rows_histogram[channel_ids[i]]--;
195
12.4M
        }
196
17.9k
    }
197
198
17.9k
    Block data_block;
199
17.9k
    std::shared_ptr<BlockWrapper> new_block_wrapper;
200
17.9k
    if (!_free_blocks.try_dequeue(data_block)) {
201
8.89k
        data_block = block->clone_empty();
202
8.89k
    }
203
17.9k
    data_block.swap(*block);
204
17.9k
    new_block_wrapper =
205
17.9k
            BlockWrapper::create_shared(std::move(data_block), local_state->_shared_state, -1);
206
17.9k
    if (new_block_wrapper->_data_block.empty()) {
207
0
        return Status::OK();
208
0
    }
209
    /**
210
     * Data are hash-shuffled and distributed to all instances of
211
     * all BEs. So we need a shuffleId-To-InstanceId mapping.
212
     * For example, row 1 get a hash value 1 which means we should distribute to instance 1 on
213
     * BE 1 and row 2 get a hash value 2 which means we should distribute to instance 1 on BE 3.
214
     */
215
17.9k
    DCHECK(shuffle_idx_to_instance_idx && shuffle_idx_to_instance_idx->size() > 0);
216
17.9k
    const auto& map = *shuffle_idx_to_instance_idx;
217
17.9k
    int32_t enqueue_rows = 0;
218
162k
    for (const auto& it : map) {
219
162k
        DCHECK(it.second >= 0 && it.second < _num_partitions)
220
1
                << it.first << " : " << it.second << " " << _num_partitions;
221
162k
        uint32_t start = partition_rows_histogram[it.first];
222
162k
        uint32_t size = partition_rows_histogram[it.first + 1] - start;
223
162k
        if (size > 0) {
224
36.7k
            enqueue_rows += size;
225
36.7k
            _enqueue_data_and_set_ready(
226
36.7k
                    it.second, local_state,
227
36.7k
                    {new_block_wrapper,
228
36.7k
                     {.row_idxs = row_idx, .offset_start = start, .length = size}});
229
36.7k
        }
230
162k
    }
231
17.9k
    if (enqueue_rows != rows) [[unlikely]] {
232
1
        fmt::memory_buffer debug_string_buffer;
233
1
        fmt::format_to(debug_string_buffer, "Type: {}, Local Exchange Id: {}, Shuffled Map: ",
234
1
                       get_exchange_type_name(get_type()), local_state->parent()->node_id());
235
3
        for (const auto& it : map) {
236
3
            fmt::format_to(debug_string_buffer, "[{}:{}], ", it.first, it.second);
237
3
        }
238
1
        return Status::InternalError(
239
1
                "Rows mismatched! Data may be lost. [Expected enqueue rows={}, Real enqueue "
240
1
                "rows={}, Detail: {}]",
241
1
                rows, enqueue_rows, fmt::to_string(debug_string_buffer));
242
1
    }
243
244
17.9k
    return Status::OK();
245
17.9k
}
246
247
Status ShuffleExchanger::_split_rows(RuntimeState* state, const std::vector<uint32_t>& channel_ids,
248
0
                                     Block* block, int channel_id) {
249
0
    const auto rows = cast_set<int32_t>(block->rows());
250
0
    auto row_idx = std::make_shared<PODArray<uint32_t>>(rows);
251
0
    auto& partition_rows_histogram = _partition_rows_histogram[channel_id];
252
0
    {
253
0
        partition_rows_histogram.assign(_num_partitions + 1, 0);
254
0
        for (int32_t i = 0; i < rows; ++i) {
255
0
            partition_rows_histogram[channel_ids[i]]++;
256
0
        }
257
0
        for (int32_t i = 1; i <= _num_partitions; ++i) {
258
0
            partition_rows_histogram[i] += partition_rows_histogram[i - 1];
259
0
        }
260
0
        for (int32_t i = rows - 1; i >= 0; --i) {
261
0
            (*row_idx)[partition_rows_histogram[channel_ids[i]] - 1] = i;
262
0
            partition_rows_histogram[channel_ids[i]]--;
263
0
        }
264
0
    }
265
266
0
    Block data_block;
267
0
    std::shared_ptr<BlockWrapper> new_block_wrapper;
268
0
    if (!_free_blocks.try_dequeue(data_block)) {
269
0
        data_block = block->clone_empty();
270
0
    }
271
0
    data_block.swap(*block);
272
0
    new_block_wrapper = BlockWrapper::create_shared(std::move(data_block), nullptr, -1);
273
0
    if (new_block_wrapper->_data_block.empty()) {
274
0
        return Status::OK();
275
0
    }
276
0
    for (int i = 0; i < _num_partitions; i++) {
277
0
        uint32_t start = partition_rows_histogram[i];
278
0
        uint32_t size = partition_rows_histogram[i + 1] - start;
279
0
        if (size > 0) {
280
0
            _enqueue_data_and_set_ready(
281
0
                    i, {new_block_wrapper,
282
0
                        {.row_idxs = row_idx, .offset_start = start, .length = size}});
283
0
        }
284
0
    }
285
286
0
    return Status::OK();
287
0
}
288
289
Status PassthroughExchanger::sink(RuntimeState* state, Block* in_block, bool eos, Profile&& profile,
290
308k
                                  SinkInfo& sink_info) {
291
308k
    if (in_block->empty()) {
292
108k
        return Status::OK();
293
108k
    }
294
200k
    Block new_block;
295
200k
    if (!_free_blocks.try_dequeue(new_block)) {
296
79.4k
        new_block = {in_block->clone_empty()};
297
79.4k
    }
298
200k
    new_block.swap(*in_block);
299
200k
    auto channel_id = ((*sink_info.channel_id)++) % _num_partitions;
300
200k
    BlockWrapperSPtr wrapper = BlockWrapper::create_shared(
301
200k
            std::move(new_block),
302
200k
            sink_info.local_state ? sink_info.local_state->_shared_state : nullptr, channel_id);
303
304
200k
    _enqueue_data_and_set_ready(channel_id, sink_info.local_state, std::move(wrapper));
305
306
200k
    sink_info.local_state->_memory_used_counter->set(
307
200k
            sink_info.local_state->_shared_state->mem_usage);
308
309
200k
    return Status::OK();
310
308k
}
311
312
610k
void PassthroughExchanger::close(SourceInfo&& source_info) {
313
610k
    Block next_block;
314
610k
    BlockWrapperSPtr wrapper;
315
610k
    bool eos;
316
610k
    _data_queue[source_info.channel_id].set_eos();
317
610k
    while (_dequeue_data(source_info.local_state, wrapper, &eos, &next_block,
318
610k
                         source_info.channel_id)) {
319
        // do nothing
320
32
    }
321
610k
}
322
323
11.4k
void PassToOneExchanger::close(SourceInfo&& source_info) {
324
11.4k
    Block next_block;
325
11.4k
    BlockWrapperSPtr wrapper;
326
11.4k
    bool eos;
327
11.4k
    _data_queue[source_info.channel_id].set_eos();
328
11.4k
    while (_dequeue_data(source_info.local_state, wrapper, &eos, &next_block,
329
11.4k
                         source_info.channel_id)) {
330
        // do nothing
331
0
    }
332
11.4k
}
333
334
Status PassthroughExchanger::get_block(RuntimeState* state, Block* block, bool* eos,
335
943k
                                       Profile&& profile, SourceInfo&& source_info) {
336
943k
    BlockWrapperSPtr next_block;
337
943k
    _dequeue_data(source_info.local_state, next_block, eos, block, source_info.channel_id);
338
943k
    return Status::OK();
339
943k
}
340
341
Status PassToOneExchanger::sink(RuntimeState* state, Block* in_block, bool eos, Profile&& profile,
342
4.34k
                                SinkInfo& sink_info) {
343
4.34k
    if (in_block->empty()) {
344
1.49k
        return Status::OK();
345
1.49k
    }
346
2.85k
    Block new_block;
347
2.85k
    if (!_free_blocks.try_dequeue(new_block)) {
348
1.74k
        new_block = {in_block->clone_empty()};
349
1.74k
    }
350
2.85k
    new_block.swap(*in_block);
351
352
2.85k
    BlockWrapperSPtr wrapper = BlockWrapper::create_shared(
353
2.85k
            std::move(new_block),
354
2.85k
            sink_info.local_state ? sink_info.local_state->_shared_state : nullptr, 0);
355
2.85k
    _enqueue_data_and_set_ready(0, sink_info.local_state, std::move(wrapper));
356
357
2.85k
    sink_info.local_state->_memory_used_counter->set(
358
2.85k
            sink_info.local_state->_shared_state->mem_usage);
359
360
2.85k
    return Status::OK();
361
4.34k
}
362
363
Status PassToOneExchanger::get_block(RuntimeState* state, Block* block, bool* eos,
364
5.22k
                                     Profile&& profile, SourceInfo&& source_info) {
365
5.22k
    if (source_info.channel_id != 0) {
366
3
        *eos = true;
367
3
        return Status::OK();
368
3
    }
369
5.22k
    BlockWrapperSPtr next_block;
370
5.22k
    _dequeue_data(source_info.local_state, next_block, eos, block, source_info.channel_id);
371
5.22k
    return Status::OK();
372
5.22k
}
373
374
108k
void ExchangerBase::finalize() {
375
108k
    DCHECK(_running_source_operators == 0);
376
108k
    Block block;
377
202k
    while (_free_blocks.try_dequeue(block)) {
378
        // do nothing
379
93.6k
    }
380
108k
}
381
382
Status BroadcastExchanger::sink(RuntimeState* state, Block* in_block, bool eos, Profile&& profile,
383
5.20k
                                SinkInfo& sink_info) {
384
5.20k
    if (in_block->empty()) {
385
1.79k
        return Status::OK();
386
1.79k
    }
387
3.40k
    Block new_block;
388
3.40k
    if (!_free_blocks.try_dequeue(new_block)) {
389
2.34k
        new_block = {in_block->clone_empty()};
390
2.34k
    }
391
3.40k
    new_block.swap(*in_block);
392
3.40k
    auto wrapper = BlockWrapper::create_shared(
393
3.40k
            std::move(new_block),
394
3.40k
            sink_info.local_state ? sink_info.local_state->_shared_state : nullptr, -1);
395
29.9k
    for (int i = 0; i < _num_partitions; i++) {
396
26.5k
        _enqueue_data_and_set_ready(
397
26.5k
                i, sink_info.local_state,
398
26.5k
                {wrapper, {.offset_start = 0, .length = wrapper->_data_block.rows()}});
399
26.5k
    }
400
401
3.40k
    return Status::OK();
402
5.20k
}
403
404
13.4k
void BroadcastExchanger::close(SourceInfo&& source_info) {
405
13.4k
    BroadcastBlock partitioned_block;
406
13.4k
    bool eos;
407
13.4k
    Block block;
408
13.4k
    _data_queue[source_info.channel_id].set_eos();
409
13.4k
    while (_dequeue_data(source_info.local_state, partitioned_block, &eos, &block,
410
13.4k
                         source_info.channel_id)) {
411
        // do nothing
412
0
    }
413
13.4k
}
414
415
Status BroadcastExchanger::get_block(RuntimeState* state, Block* block, bool* eos,
416
45.4k
                                     Profile&& profile, SourceInfo&& source_info) {
417
45.4k
    BroadcastBlock partitioned_block;
418
419
45.4k
    if (_dequeue_data(source_info.local_state, partitioned_block, eos, block,
420
45.4k
                      source_info.channel_id)) {
421
26.4k
        SCOPED_TIMER(profile.copy_data_timer);
422
26.4k
        MutableBlock mutable_block = VectorizedUtils::build_mutable_mem_reuse_block(
423
26.4k
                block, partitioned_block.first->_data_block);
424
26.4k
        auto block_wrapper = partitioned_block.first;
425
26.4k
        RETURN_IF_ERROR(mutable_block.add_rows(&block_wrapper->_data_block,
426
26.4k
                                               partitioned_block.second.offset_start,
427
26.4k
                                               partitioned_block.second.length));
428
26.4k
    }
429
430
45.4k
    return Status::OK();
431
45.4k
}
432
433
Status AdaptivePassthroughExchanger::_passthrough_sink(RuntimeState* state, Block* in_block,
434
409
                                                       SinkInfo& sink_info) {
435
409
    Block new_block;
436
409
    if (!_free_blocks.try_dequeue(new_block)) {
437
182
        new_block = {in_block->clone_empty()};
438
182
    }
439
409
    new_block.swap(*in_block);
440
409
    auto channel_id = ((*sink_info.channel_id)++) % _num_partitions;
441
409
    _enqueue_data_and_set_ready(
442
409
            channel_id, sink_info.local_state,
443
409
            {BlockWrapper::create_shared(
444
409
                     std::move(new_block),
445
409
                     sink_info.local_state ? sink_info.local_state->_shared_state : nullptr, -1),
446
409
             {.row_idxs = nullptr, .offset_start = 0, .length = 0}});
447
448
409
    sink_info.local_state->_memory_used_counter->set(
449
409
            sink_info.local_state->_shared_state->mem_usage);
450
409
    return Status::OK();
451
409
}
452
453
Status AdaptivePassthroughExchanger::_shuffle_sink(RuntimeState* state, Block* block,
454
2.12k
                                                   SinkInfo& sink_info) {
455
2.12k
    std::vector<uint32_t> channel_ids;
456
2.12k
    const auto num_rows = block->rows();
457
2.12k
    channel_ids.resize(num_rows, 0);
458
2.12k
    if (num_rows <= _num_partitions) {
459
1.71k
        std::iota(channel_ids.begin(), channel_ids.end(), 0);
460
1.71k
    } else {
461
408
        size_t i = 0;
462
9.76k
        for (; i < num_rows - _num_partitions; i += _num_partitions) {
463
9.35k
            std::iota(channel_ids.begin() + i, channel_ids.begin() + i + _num_partitions, 0);
464
9.35k
        }
465
408
        if (i < num_rows - 1) {
466
312
            std::iota(channel_ids.begin() + i, channel_ids.end(), 0);
467
312
        }
468
408
    }
469
470
2.12k
    sink_info.local_state->_memory_used_counter->set(
471
2.12k
            sink_info.local_state->_shared_state->mem_usage);
472
2.12k
    RETURN_IF_ERROR(_split_rows(state, channel_ids, block, sink_info));
473
2.12k
    return Status::OK();
474
2.12k
}
475
476
Status AdaptivePassthroughExchanger::_split_rows(RuntimeState* state,
477
                                                 const std::vector<uint32_t>& channel_ids,
478
2.12k
                                                 Block* block, SinkInfo& sink_info) {
479
2.12k
    const auto rows = cast_set<int32_t>(block->rows());
480
2.12k
    auto row_idx = std::make_shared<PODArray<uint32_t>>(rows);
481
2.12k
    auto& partition_rows_histogram = _partition_rows_histogram[sink_info.ins_idx];
482
2.12k
    {
483
2.12k
        partition_rows_histogram.assign(_num_partitions + 1, 0);
484
67.2k
        for (int32_t i = 0; i < rows; ++i) {
485
65.1k
            partition_rows_histogram[channel_ids[i]]++;
486
65.1k
        }
487
17.3k
        for (int32_t i = 1; i <= _num_partitions; ++i) {
488
15.2k
            partition_rows_histogram[i] += partition_rows_histogram[i - 1];
489
15.2k
        }
490
491
64.7k
        for (int32_t i = rows - 1; i >= 0; --i) {
492
62.5k
            (*row_idx)[partition_rows_histogram[channel_ids[i]] - 1] = i;
493
62.5k
            partition_rows_histogram[channel_ids[i]]--;
494
62.5k
        }
495
2.12k
    }
496
2.12k
    Block data_block;
497
2.12k
    if (!_free_blocks.try_dequeue(data_block)) {
498
1.84k
        data_block = block->clone_empty();
499
1.84k
    }
500
2.12k
    data_block.swap(*block);
501
2.12k
    std::shared_ptr<BlockWrapper> new_block_wrapper = BlockWrapper::create_shared(
502
2.12k
            std::move(data_block), sink_info.local_state->_shared_state, sink_info.ins_idx);
503
2.12k
    if (new_block_wrapper->_data_block.empty()) {
504
0
        return Status::OK();
505
0
    }
506
17.3k
    for (int32_t i = 0; i < _num_partitions; i++) {
507
15.2k
        const uint32_t start = partition_rows_histogram[i];
508
15.2k
        const uint32_t size = partition_rows_histogram[i + 1] - start;
509
15.2k
        if (size > 0) {
510
8.41k
            _enqueue_data_and_set_ready(
511
8.41k
                    i, sink_info.local_state,
512
8.41k
                    {new_block_wrapper,
513
8.41k
                     {.row_idxs = row_idx, .offset_start = start, .length = size}});
514
8.41k
        }
515
15.2k
    }
516
2.12k
    return Status::OK();
517
2.12k
}
518
519
Status AdaptivePassthroughExchanger::sink(RuntimeState* state, Block* in_block, bool eos,
520
7.89k
                                          Profile&& profile, SinkInfo& sink_info) {
521
7.89k
    if (in_block->empty()) {
522
5.34k
        return Status::OK();
523
5.34k
    }
524
2.54k
    if (_is_pass_through) {
525
409
        return _passthrough_sink(state, in_block, sink_info);
526
2.13k
    } else {
527
2.13k
        if (++_total_block >= _num_partitions) {
528
209
            _is_pass_through = true;
529
209
        }
530
2.13k
        return _shuffle_sink(state, in_block, sink_info);
531
2.13k
    }
532
2.54k
}
533
534
Status AdaptivePassthroughExchanger::get_block(RuntimeState* state, Block* block, bool* eos,
535
9.56k
                                               Profile&& profile, SourceInfo&& source_info) {
536
9.56k
    if (!_tmp_block[source_info.channel_id].empty()) {
537
132
        *block = std::move(_tmp_block[source_info.channel_id]);
538
132
        *eos = _tmp_eos[source_info.channel_id];
539
132
        _tmp_block[source_info.channel_id] = {};
540
132
        return Status::OK();
541
132
    }
542
9.43k
    PartitionedBlock partitioned_block;
543
9.43k
    MutableBlock mutable_block;
544
545
9.43k
    auto get_data = [&]() -> Status {
546
8.81k
        do {
547
8.81k
            if (partitioned_block.second.row_idxs == nullptr) {
548
                // The passthrough path which means the block is not partitioned, we can directly move the block without copying.
549
405
                if (mutable_block.rows() > 0) {
550
132
                    _tmp_block[source_info.channel_id] =
551
132
                            std::move(partitioned_block.first->_data_block);
552
132
                    _tmp_eos[source_info.channel_id] = *eos;
553
132
                    *eos = false;
554
273
                } else {
555
273
                    *block = std::move(partitioned_block.first->_data_block);
556
273
                }
557
405
                break;
558
405
            }
559
8.40k
            const auto* offset_start = partitioned_block.second.row_idxs->data() +
560
8.40k
                                       partitioned_block.second.offset_start;
561
8.40k
            auto block_wrapper = partitioned_block.first;
562
8.40k
            RETURN_IF_ERROR(mutable_block.add_rows(&block_wrapper->_data_block, offset_start,
563
8.40k
                                                   offset_start + partitioned_block.second.length));
564
8.41k
        } while (mutable_block.rows() < state->batch_size() && !*eos &&
565
8.41k
                 _dequeue_data(source_info.local_state, partitioned_block, eos, block,
566
8.41k
                               source_info.channel_id));
567
5.22k
        return Status::OK();
568
5.22k
    };
569
570
9.43k
    if (_dequeue_data(source_info.local_state, partitioned_block, eos, block,
571
9.43k
                      source_info.channel_id)) {
572
5.23k
        SCOPED_TIMER(profile.copy_data_timer);
573
5.23k
        mutable_block = VectorizedUtils::build_mutable_mem_reuse_block(
574
5.23k
                block, partitioned_block.first->_data_block);
575
5.23k
        RETURN_IF_ERROR(get_data());
576
5.23k
    }
577
9.43k
    return Status::OK();
578
9.43k
}
579
580
5.95k
void AdaptivePassthroughExchanger::close(SourceInfo&& source_info) {
581
5.95k
    PartitionedBlock partitioned_block;
582
5.95k
    bool eos;
583
5.95k
    Block block;
584
5.95k
    _data_queue[source_info.channel_id].set_eos();
585
5.95k
    while (_dequeue_data(source_info.local_state, partitioned_block, &eos, &block,
586
5.95k
                         source_info.channel_id)) {
587
        // do nothing
588
5
    }
589
5.95k
}
590
591
} // namespace doris