Coverage Report

Created: 2026-04-02 03:37

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/exec/exchange/local_exchanger.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "exec/exchange/local_exchanger.h"
19
20
#include "common/cast_set.h"
21
#include "common/status.h"
22
#include "exec/exchange/local_exchange_sink_operator.h"
23
#include "exec/exchange/local_exchange_source_operator.h"
24
#include "exec/partitioner/partitioner.h"
25
26
namespace doris {
27
#include "common/compile_check_begin.h"
28
template <typename BlockType>
29
void Exchanger<BlockType>::_enqueue_data_and_set_ready(int channel_id,
30
                                                       LocalExchangeSinkLocalState* local_state,
31
210k
                                                       BlockType&& block) {
32
210k
    if (local_state == nullptr) {
33
0
        _enqueue_data_and_set_ready(channel_id, std::move(block));
34
0
        return;
35
0
    }
36
    // PartitionedBlock is used by shuffle exchanger.
37
    // PartitionedBlock will be push into multiple queues with different row ranges, so it will be
38
    // referenced multiple times. Otherwise, we only ref the block once because it is only push into
39
    // one queue.
40
210k
    std::unique_lock l(*_m[channel_id]);
41
    if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
42
55.5k
                  std::is_same_v<BroadcastBlock, BlockType>) {
43
55.5k
        block.first->record_channel_id(channel_id);
44
154k
    } else {
45
154k
        block->record_channel_id(channel_id);
46
154k
    }
47
48
210k
    if (_data_queue[channel_id].enqueue(std::move(block))) {
49
210k
        local_state->_shared_state->set_ready_to_read(channel_id);
50
210k
    }
51
210k
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_18PartitionedRowIdxsEEE27_enqueue_data_and_set_readyEiPNS_27LocalExchangeSinkLocalStateEOS7_
Line
Count
Source
31
26.3k
                                                       BlockType&& block) {
32
26.3k
    if (local_state == nullptr) {
33
0
        _enqueue_data_and_set_ready(channel_id, std::move(block));
34
0
        return;
35
0
    }
36
    // PartitionedBlock is used by shuffle exchanger.
37
    // PartitionedBlock will be push into multiple queues with different row ranges, so it will be
38
    // referenced multiple times. Otherwise, we only ref the block once because it is only push into
39
    // one queue.
40
26.3k
    std::unique_lock l(*_m[channel_id]);
41
    if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
42
26.3k
                  std::is_same_v<BroadcastBlock, BlockType>) {
43
26.3k
        block.first->record_channel_id(channel_id);
44
    } else {
45
        block->record_channel_id(channel_id);
46
    }
47
48
26.3k
    if (_data_queue[channel_id].enqueue(std::move(block))) {
49
26.3k
        local_state->_shared_state->set_ready_to_read(channel_id);
50
26.3k
    }
51
26.3k
}
_ZN5doris9ExchangerISt10shared_ptrINS_13ExchangerBase12BlockWrapperEEE27_enqueue_data_and_set_readyEiPNS_27LocalExchangeSinkLocalStateEOS4_
Line
Count
Source
31
154k
                                                       BlockType&& block) {
32
154k
    if (local_state == nullptr) {
33
0
        _enqueue_data_and_set_ready(channel_id, std::move(block));
34
0
        return;
35
0
    }
36
    // PartitionedBlock is used by shuffle exchanger.
37
    // PartitionedBlock will be push into multiple queues with different row ranges, so it will be
38
    // referenced multiple times. Otherwise, we only ref the block once because it is only push into
39
    // one queue.
40
154k
    std::unique_lock l(*_m[channel_id]);
41
    if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
42
                  std::is_same_v<BroadcastBlock, BlockType>) {
43
        block.first->record_channel_id(channel_id);
44
154k
    } else {
45
154k
        block->record_channel_id(channel_id);
46
154k
    }
47
48
154k
    if (_data_queue[channel_id].enqueue(std::move(block))) {
49
154k
        local_state->_shared_state->set_ready_to_read(channel_id);
50
154k
    }
51
154k
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_17BroadcastRowRangeEEE27_enqueue_data_and_set_readyEiPNS_27LocalExchangeSinkLocalStateEOS7_
Line
Count
Source
31
29.1k
                                                       BlockType&& block) {
32
29.1k
    if (local_state == nullptr) {
33
0
        _enqueue_data_and_set_ready(channel_id, std::move(block));
34
0
        return;
35
0
    }
36
    // PartitionedBlock is used by shuffle exchanger.
37
    // PartitionedBlock will be push into multiple queues with different row ranges, so it will be
38
    // referenced multiple times. Otherwise, we only ref the block once because it is only push into
39
    // one queue.
40
29.1k
    std::unique_lock l(*_m[channel_id]);
41
    if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
42
29.1k
                  std::is_same_v<BroadcastBlock, BlockType>) {
43
29.1k
        block.first->record_channel_id(channel_id);
44
    } else {
45
        block->record_channel_id(channel_id);
46
    }
47
48
29.1k
    if (_data_queue[channel_id].enqueue(std::move(block))) {
49
29.1k
        local_state->_shared_state->set_ready_to_read(channel_id);
50
29.1k
    }
51
29.1k
}
52
53
template <typename BlockType>
54
bool Exchanger<BlockType>::_dequeue_data(LocalExchangeSourceLocalState* local_state,
55
                                         BlockType& block, bool* eos, Block* data_block,
56
1.69M
                                         int channel_id) {
57
1.69M
    if (local_state == nullptr) {
58
20
        return _dequeue_data(block, eos, data_block, channel_id);
59
20
    }
60
1.69M
    bool all_finished = _running_sink_operators == 0;
61
1.69M
    if (_data_queue[channel_id].try_dequeue(block)) {
62
        if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
63
55.4k
                      std::is_same_v<BroadcastBlock, BlockType>) {
64
55.4k
            local_state->_shared_state->sub_mem_usage(channel_id, block.first->_allocated_bytes);
65
154k
        } else {
66
154k
            local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
67
154k
            data_block->swap(block->_data_block);
68
154k
        }
69
210k
        return true;
70
1.48M
    } else if (all_finished) {
71
1.37M
        *eos = true;
72
1.37M
    } else {
73
115k
        std::unique_lock l(*_m[channel_id]);
74
115k
        if (_data_queue[channel_id].try_dequeue(block)) {
75
            if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
76
17
                          std::is_same_v<BroadcastBlock, BlockType>) {
77
17
                local_state->_shared_state->sub_mem_usage(channel_id,
78
17
                                                          block.first->_allocated_bytes);
79
17
            } else {
80
9
                local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
81
9
                data_block->swap(block->_data_block);
82
9
            }
83
26
            return true;
84
26
        }
85
115k
        COUNTER_UPDATE(local_state->_get_block_failed_counter, 1);
86
115k
        local_state->_dependency->block();
87
115k
    }
88
1.48M
    return false;
89
1.69M
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_18PartitionedRowIdxsEEE13_dequeue_dataEPNS_29LocalExchangeSourceLocalStateERS7_PbPNS_5BlockEi
Line
Count
Source
56
309k
                                         int channel_id) {
57
309k
    if (local_state == nullptr) {
58
4
        return _dequeue_data(block, eos, data_block, channel_id);
59
4
    }
60
309k
    bool all_finished = _running_sink_operators == 0;
61
309k
    if (_data_queue[channel_id].try_dequeue(block)) {
62
        if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
63
26.3k
                      std::is_same_v<BroadcastBlock, BlockType>) {
64
26.3k
            local_state->_shared_state->sub_mem_usage(channel_id, block.first->_allocated_bytes);
65
        } else {
66
            local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
67
            data_block->swap(block->_data_block);
68
        }
69
26.3k
        return true;
70
282k
    } else if (all_finished) {
71
268k
        *eos = true;
72
268k
    } else {
73
14.0k
        std::unique_lock l(*_m[channel_id]);
74
14.0k
        if (_data_queue[channel_id].try_dequeue(block)) {
75
            if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
76
9
                          std::is_same_v<BroadcastBlock, BlockType>) {
77
9
                local_state->_shared_state->sub_mem_usage(channel_id,
78
9
                                                          block.first->_allocated_bytes);
79
            } else {
80
                local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
81
                data_block->swap(block->_data_block);
82
            }
83
9
            return true;
84
9
        }
85
14.0k
        COUNTER_UPDATE(local_state->_get_block_failed_counter, 1);
86
14.0k
        local_state->_dependency->block();
87
14.0k
    }
88
282k
    return false;
89
309k
}
_ZN5doris9ExchangerISt10shared_ptrINS_13ExchangerBase12BlockWrapperEEE13_dequeue_dataEPNS_29LocalExchangeSourceLocalStateERS4_PbPNS_5BlockEi
Line
Count
Source
56
1.32M
                                         int channel_id) {
57
1.32M
    if (local_state == nullptr) {
58
12
        return _dequeue_data(block, eos, data_block, channel_id);
59
12
    }
60
1.32M
    bool all_finished = _running_sink_operators == 0;
61
1.32M
    if (_data_queue[channel_id].try_dequeue(block)) {
62
        if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
63
                      std::is_same_v<BroadcastBlock, BlockType>) {
64
            local_state->_shared_state->sub_mem_usage(channel_id, block.first->_allocated_bytes);
65
154k
        } else {
66
154k
            local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
67
154k
            data_block->swap(block->_data_block);
68
154k
        }
69
154k
        return true;
70
1.17M
    } else if (all_finished) {
71
1.07M
        *eos = true;
72
1.07M
    } else {
73
95.1k
        std::unique_lock l(*_m[channel_id]);
74
95.1k
        if (_data_queue[channel_id].try_dequeue(block)) {
75
            if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
76
                          std::is_same_v<BroadcastBlock, BlockType>) {
77
                local_state->_shared_state->sub_mem_usage(channel_id,
78
                                                          block.first->_allocated_bytes);
79
9
            } else {
80
9
                local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
81
9
                data_block->swap(block->_data_block);
82
9
            }
83
9
            return true;
84
9
        }
85
95.1k
        COUNTER_UPDATE(local_state->_get_block_failed_counter, 1);
86
95.1k
        local_state->_dependency->block();
87
95.1k
    }
88
1.17M
    return false;
89
1.32M
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_17BroadcastRowRangeEEE13_dequeue_dataEPNS_29LocalExchangeSourceLocalStateERS7_PbPNS_5BlockEi
Line
Count
Source
56
62.2k
                                         int channel_id) {
57
62.2k
    if (local_state == nullptr) {
58
4
        return _dequeue_data(block, eos, data_block, channel_id);
59
4
    }
60
62.2k
    bool all_finished = _running_sink_operators == 0;
61
62.2k
    if (_data_queue[channel_id].try_dequeue(block)) {
62
        if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
63
29.1k
                      std::is_same_v<BroadcastBlock, BlockType>) {
64
29.1k
            local_state->_shared_state->sub_mem_usage(channel_id, block.first->_allocated_bytes);
65
        } else {
66
            local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
67
            data_block->swap(block->_data_block);
68
        }
69
29.1k
        return true;
70
33.1k
    } else if (all_finished) {
71
26.4k
        *eos = true;
72
26.4k
    } else {
73
6.76k
        std::unique_lock l(*_m[channel_id]);
74
6.76k
        if (_data_queue[channel_id].try_dequeue(block)) {
75
            if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
76
8
                          std::is_same_v<BroadcastBlock, BlockType>) {
77
8
                local_state->_shared_state->sub_mem_usage(channel_id,
78
8
                                                          block.first->_allocated_bytes);
79
            } else {
80
                local_state->_shared_state->sub_mem_usage(channel_id, block->_allocated_bytes);
81
                data_block->swap(block->_data_block);
82
            }
83
8
            return true;
84
8
        }
85
6.75k
        COUNTER_UPDATE(local_state->_get_block_failed_counter, 1);
86
6.75k
        local_state->_dependency->block();
87
6.75k
    }
88
33.1k
    return false;
89
62.2k
}
90
91
template <typename BlockType>
92
0
void Exchanger<BlockType>::_enqueue_data_and_set_ready(int channel_id, BlockType&& block) {
93
    if constexpr (std::is_same_v<PartitionedBlock, BlockType> ||
94
0
                  std::is_same_v<BroadcastBlock, BlockType>) {
95
0
        block.first->record_channel_id(channel_id);
96
0
    } else {
97
0
        block->record_channel_id(channel_id);
98
0
    }
99
0
    _data_queue[channel_id].enqueue(std::move(block));
100
0
}
Unexecuted instantiation: _ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_18PartitionedRowIdxsEEE27_enqueue_data_and_set_readyEiOS7_
Unexecuted instantiation: _ZN5doris9ExchangerISt10shared_ptrINS_13ExchangerBase12BlockWrapperEEE27_enqueue_data_and_set_readyEiOS4_
Unexecuted instantiation: _ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_17BroadcastRowRangeEEE27_enqueue_data_and_set_readyEiOS7_
101
102
template <typename BlockType>
103
bool Exchanger<BlockType>::_dequeue_data(BlockType& block, bool* eos, Block* data_block,
104
20
                                         int channel_id) {
105
20
    if (_data_queue[channel_id].try_dequeue(block)) {
106
        if constexpr (!std::is_same_v<PartitionedBlock, BlockType> &&
107
0
                      !std::is_same_v<BroadcastBlock, BlockType>) {
108
0
            data_block->swap(block->_data_block);
109
0
        }
110
0
        return true;
111
0
    }
112
20
    return false;
113
20
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_18PartitionedRowIdxsEEE13_dequeue_dataERS7_PbPNS_5BlockEi
Line
Count
Source
104
4
                                         int channel_id) {
105
4
    if (_data_queue[channel_id].try_dequeue(block)) {
106
        if constexpr (!std::is_same_v<PartitionedBlock, BlockType> &&
107
                      !std::is_same_v<BroadcastBlock, BlockType>) {
108
            data_block->swap(block->_data_block);
109
        }
110
0
        return true;
111
0
    }
112
4
    return false;
113
4
}
_ZN5doris9ExchangerISt10shared_ptrINS_13ExchangerBase12BlockWrapperEEE13_dequeue_dataERS4_PbPNS_5BlockEi
Line
Count
Source
104
12
                                         int channel_id) {
105
12
    if (_data_queue[channel_id].try_dequeue(block)) {
106
        if constexpr (!std::is_same_v<PartitionedBlock, BlockType> &&
107
0
                      !std::is_same_v<BroadcastBlock, BlockType>) {
108
0
            data_block->swap(block->_data_block);
109
0
        }
110
0
        return true;
111
0
    }
112
12
    return false;
113
12
}
_ZN5doris9ExchangerISt4pairISt10shared_ptrINS_13ExchangerBase12BlockWrapperEENS_17BroadcastRowRangeEEE13_dequeue_dataERS7_PbPNS_5BlockEi
Line
Count
Source
104
4
                                         int channel_id) {
105
4
    if (_data_queue[channel_id].try_dequeue(block)) {
106
        if constexpr (!std::is_same_v<PartitionedBlock, BlockType> &&
107
                      !std::is_same_v<BroadcastBlock, BlockType>) {
108
            data_block->swap(block->_data_block);
109
        }
110
0
        return true;
111
0
    }
112
4
    return false;
113
4
}
114
115
Status ShuffleExchanger::sink(RuntimeState* state, Block* in_block, bool eos, Profile&& profile,
116
148k
                              SinkInfo& sink_info) {
117
148k
    if (in_block->empty()) {
118
133k
        return Status::OK();
119
133k
    }
120
14.4k
    {
121
14.4k
        SCOPED_TIMER(profile.compute_hash_value_timer);
122
14.4k
        RETURN_IF_ERROR(sink_info.partitioner->do_partitioning(state, in_block));
123
14.4k
    }
124
14.4k
    {
125
14.4k
        SCOPED_TIMER(profile.distribute_timer);
126
14.4k
        RETURN_IF_ERROR(_split_rows(state, sink_info.partitioner->get_channel_ids(), in_block,
127
14.4k
                                    *sink_info.channel_id, sink_info.local_state,
128
14.4k
                                    sink_info.shuffle_idx_to_instance_idx));
129
14.4k
    }
130
131
14.4k
    sink_info.local_state->_memory_used_counter->set(
132
14.4k
            sink_info.local_state->_shared_state->mem_usage);
133
14.4k
    return Status::OK();
134
14.4k
}
135
136
135k
void ShuffleExchanger::close(SourceInfo&& source_info) {
137
135k
    PartitionedBlock partitioned_block;
138
135k
    bool eos;
139
135k
    Block block;
140
135k
    _data_queue[source_info.channel_id].set_eos();
141
135k
    while (_dequeue_data(source_info.local_state, partitioned_block, &eos, &block,
142
135k
                         source_info.channel_id)) {
143
        // do nothing
144
11
    }
145
135k
}
146
147
Status ShuffleExchanger::get_block(RuntimeState* state, Block* block, bool* eos, Profile&& profile,
148
148k
                                   SourceInfo&& source_info) {
149
148k
    PartitionedBlock partitioned_block;
150
148k
    MutableBlock mutable_block;
151
152
148k
    auto get_data = [&]() -> Status {
153
26.3k
        do {
154
26.3k
            const auto* offset_start = partitioned_block.second.row_idxs->data() +
155
26.3k
                                       partitioned_block.second.offset_start;
156
26.3k
            auto block_wrapper = partitioned_block.first;
157
26.3k
            RETURN_IF_ERROR(mutable_block.add_rows(&block_wrapper->_data_block, offset_start,
158
26.3k
                                                   offset_start + partitioned_block.second.length));
159
26.3k
        } while (mutable_block.rows() < state->batch_size() && !*eos &&
160
26.3k
                 _dequeue_data(source_info.local_state, partitioned_block, eos, block,
161
25.6k
                               source_info.channel_id));
162
19.2k
        return Status::OK();
163
19.2k
    };
164
165
148k
    if (_dequeue_data(source_info.local_state, partitioned_block, eos, block,
166
148k
                      source_info.channel_id)) {
167
19.2k
        SCOPED_TIMER(profile.copy_data_timer);
168
19.2k
        mutable_block = VectorizedUtils::build_mutable_mem_reuse_block(
169
19.2k
                block, partitioned_block.first->_data_block);
170
19.2k
        RETURN_IF_ERROR(get_data());
171
19.2k
    }
172
148k
    return Status::OK();
173
148k
}
174
175
Status ShuffleExchanger::_split_rows(RuntimeState* state, const std::vector<uint32_t>& channel_ids,
176
                                     Block* block, int channel_id,
177
                                     LocalExchangeSinkLocalState* local_state,
178
14.3k
                                     std::map<int, int>* shuffle_idx_to_instance_idx) {
179
14.3k
    if (local_state == nullptr) {
180
0
        return _split_rows(state, channel_ids, block, channel_id);
181
0
    }
182
14.3k
    const auto rows = cast_set<int32_t>(block->rows());
183
14.3k
    auto row_idx = std::make_shared<PODArray<uint32_t>>(rows);
184
14.3k
    auto& partition_rows_histogram = _partition_rows_histogram[channel_id];
185
14.3k
    {
186
14.3k
        partition_rows_histogram.assign(_num_partitions + 1, 0);
187
11.2M
        for (int32_t i = 0; i < rows; ++i) {
188
11.2M
            partition_rows_histogram[channel_ids[i]]++;
189
11.2M
        }
190
139k
        for (int32_t i = 1; i <= _num_partitions; ++i) {
191
124k
            partition_rows_histogram[i] += partition_rows_histogram[i - 1];
192
124k
        }
193
10.7M
        for (int32_t i = rows - 1; i >= 0; --i) {
194
10.7M
            (*row_idx)[partition_rows_histogram[channel_ids[i]] - 1] = i;
195
10.7M
            partition_rows_histogram[channel_ids[i]]--;
196
10.7M
        }
197
14.3k
    }
198
199
14.3k
    Block data_block;
200
14.3k
    std::shared_ptr<BlockWrapper> new_block_wrapper;
201
14.3k
    if (!_free_blocks.try_dequeue(data_block)) {
202
8.48k
        data_block = block->clone_empty();
203
8.48k
    }
204
14.3k
    data_block.swap(*block);
205
14.3k
    new_block_wrapper =
206
14.3k
            BlockWrapper::create_shared(std::move(data_block), local_state->_shared_state, -1);
207
14.3k
    if (new_block_wrapper->_data_block.empty()) {
208
0
        return Status::OK();
209
0
    }
210
    /**
211
     * Data are hash-shuffled and distributed to all instances of
212
     * all BEs. So we need a shuffleId-To-InstanceId mapping.
213
     * For example, row 1 get a hash value 1 which means we should distribute to instance 1 on
214
     * BE 1 and row 2 get a hash value 2 which means we should distribute to instance 1 on BE 3.
215
     */
216
14.3k
    DCHECK(shuffle_idx_to_instance_idx && shuffle_idx_to_instance_idx->size() > 0);
217
14.3k
    const auto& map = *shuffle_idx_to_instance_idx;
218
14.3k
    int32_t enqueue_rows = 0;
219
124k
    for (const auto& it : map) {
220
124k
        DCHECK(it.second >= 0 && it.second < _num_partitions)
221
4
                << it.first << " : " << it.second << " " << _num_partitions;
222
124k
        uint32_t start = partition_rows_histogram[it.first];
223
124k
        uint32_t size = partition_rows_histogram[it.first + 1] - start;
224
124k
        if (size > 0) {
225
26.3k
            enqueue_rows += size;
226
26.3k
            _enqueue_data_and_set_ready(
227
26.3k
                    it.second, local_state,
228
26.3k
                    {new_block_wrapper,
229
26.3k
                     {.row_idxs = row_idx, .offset_start = start, .length = size}});
230
26.3k
        }
231
124k
    }
232
14.3k
    if (enqueue_rows != rows) [[unlikely]] {
233
1
        fmt::memory_buffer debug_string_buffer;
234
1
        fmt::format_to(debug_string_buffer, "Type: {}, Local Exchange Id: {}, Shuffled Map: ",
235
1
                       get_exchange_type_name(get_type()), local_state->parent()->node_id());
236
3
        for (const auto& it : map) {
237
3
            fmt::format_to(debug_string_buffer, "[{}:{}], ", it.first, it.second);
238
3
        }
239
1
        return Status::InternalError(
240
1
                "Rows mismatched! Data may be lost. [Expected enqueue rows={}, Real enqueue "
241
1
                "rows={}, Detail: {}]",
242
1
                rows, enqueue_rows, fmt::to_string(debug_string_buffer));
243
1
    }
244
245
14.3k
    return Status::OK();
246
14.3k
}
247
248
Status ShuffleExchanger::_split_rows(RuntimeState* state, const std::vector<uint32_t>& channel_ids,
249
0
                                     Block* block, int channel_id) {
250
0
    const auto rows = cast_set<int32_t>(block->rows());
251
0
    auto row_idx = std::make_shared<PODArray<uint32_t>>(rows);
252
0
    auto& partition_rows_histogram = _partition_rows_histogram[channel_id];
253
0
    {
254
0
        partition_rows_histogram.assign(_num_partitions + 1, 0);
255
0
        for (int32_t i = 0; i < rows; ++i) {
256
0
            partition_rows_histogram[channel_ids[i]]++;
257
0
        }
258
0
        for (int32_t i = 1; i <= _num_partitions; ++i) {
259
0
            partition_rows_histogram[i] += partition_rows_histogram[i - 1];
260
0
        }
261
0
        for (int32_t i = rows - 1; i >= 0; --i) {
262
0
            (*row_idx)[partition_rows_histogram[channel_ids[i]] - 1] = i;
263
0
            partition_rows_histogram[channel_ids[i]]--;
264
0
        }
265
0
    }
266
267
0
    Block data_block;
268
0
    std::shared_ptr<BlockWrapper> new_block_wrapper;
269
0
    if (!_free_blocks.try_dequeue(data_block)) {
270
0
        data_block = block->clone_empty();
271
0
    }
272
0
    data_block.swap(*block);
273
0
    new_block_wrapper = BlockWrapper::create_shared(std::move(data_block), nullptr, -1);
274
0
    if (new_block_wrapper->_data_block.empty()) {
275
0
        return Status::OK();
276
0
    }
277
0
    for (int i = 0; i < _num_partitions; i++) {
278
0
        uint32_t start = partition_rows_histogram[i];
279
0
        uint32_t size = partition_rows_histogram[i + 1] - start;
280
0
        if (size > 0) {
281
0
            _enqueue_data_and_set_ready(
282
0
                    i, {new_block_wrapper,
283
0
                        {.row_idxs = row_idx, .offset_start = start, .length = size}});
284
0
        }
285
0
    }
286
287
0
    return Status::OK();
288
0
}
289
290
Status PassthroughExchanger::sink(RuntimeState* state, Block* in_block, bool eos, Profile&& profile,
291
237k
                                  SinkInfo& sink_info) {
292
237k
    if (in_block->empty()) {
293
92.3k
        return Status::OK();
294
92.3k
    }
295
144k
    Block new_block;
296
144k
    if (!_free_blocks.try_dequeue(new_block)) {
297
61.5k
        new_block = {in_block->clone_empty()};
298
61.5k
    }
299
144k
    new_block.swap(*in_block);
300
144k
    auto channel_id = ((*sink_info.channel_id)++) % _num_partitions;
301
144k
    BlockWrapperSPtr wrapper = BlockWrapper::create_shared(
302
144k
            std::move(new_block),
303
144k
            sink_info.local_state ? sink_info.local_state->_shared_state : nullptr, channel_id);
304
305
144k
    _enqueue_data_and_set_ready(channel_id, sink_info.local_state, std::move(wrapper));
306
307
144k
    sink_info.local_state->_memory_used_counter->set(
308
144k
            sink_info.local_state->_shared_state->mem_usage);
309
310
144k
    return Status::OK();
311
237k
}
312
313
525k
void PassthroughExchanger::close(SourceInfo&& source_info) {
314
525k
    Block next_block;
315
525k
    BlockWrapperSPtr wrapper;
316
525k
    bool eos;
317
525k
    _data_queue[source_info.channel_id].set_eos();
318
526k
    while (_dequeue_data(source_info.local_state, wrapper, &eos, &next_block,
319
526k
                         source_info.channel_id)) {
320
        // do nothing
321
1.44k
    }
322
525k
}
323
324
10.6k
void PassToOneExchanger::close(SourceInfo&& source_info) {
325
10.6k
    Block next_block;
326
10.6k
    BlockWrapperSPtr wrapper;
327
10.6k
    bool eos;
328
10.6k
    _data_queue[source_info.channel_id].set_eos();
329
10.6k
    while (_dequeue_data(source_info.local_state, wrapper, &eos, &next_block,
330
10.6k
                         source_info.channel_id)) {
331
        // do nothing
332
0
    }
333
10.6k
}
334
335
Status PassthroughExchanger::get_block(RuntimeState* state, Block* block, bool* eos,
336
760k
                                       Profile&& profile, SourceInfo&& source_info) {
337
760k
    BlockWrapperSPtr next_block;
338
760k
    _dequeue_data(source_info.local_state, next_block, eos, block, source_info.channel_id);
339
760k
    return Status::OK();
340
760k
}
341
342
Status PassToOneExchanger::sink(RuntimeState* state, Block* in_block, bool eos, Profile&& profile,
343
2.72k
                                SinkInfo& sink_info) {
344
2.72k
    if (in_block->empty()) {
345
1.09k
        return Status::OK();
346
1.09k
    }
347
1.62k
    Block new_block;
348
1.62k
    if (!_free_blocks.try_dequeue(new_block)) {
349
1.17k
        new_block = {in_block->clone_empty()};
350
1.17k
    }
351
1.62k
    new_block.swap(*in_block);
352
353
1.62k
    BlockWrapperSPtr wrapper = BlockWrapper::create_shared(
354
1.62k
            std::move(new_block),
355
1.62k
            sink_info.local_state ? sink_info.local_state->_shared_state : nullptr, 0);
356
1.62k
    _enqueue_data_and_set_ready(0, sink_info.local_state, std::move(wrapper));
357
358
1.62k
    sink_info.local_state->_memory_used_counter->set(
359
1.62k
            sink_info.local_state->_shared_state->mem_usage);
360
361
1.62k
    return Status::OK();
362
2.72k
}
363
364
Status PassToOneExchanger::get_block(RuntimeState* state, Block* block, bool* eos,
365
3.17k
                                     Profile&& profile, SourceInfo&& source_info) {
366
3.17k
    if (source_info.channel_id != 0) {
367
3
        *eos = true;
368
3
        return Status::OK();
369
3
    }
370
3.16k
    BlockWrapperSPtr next_block;
371
3.16k
    _dequeue_data(source_info.local_state, next_block, eos, block, source_info.channel_id);
372
3.16k
    return Status::OK();
373
3.17k
}
374
375
94.6k
void ExchangerBase::finalize() {
376
94.6k
    DCHECK(_running_source_operators == 0);
377
94.6k
    Block block;
378
173k
    while (_free_blocks.try_dequeue(block)) {
379
        // do nothing
380
78.7k
    }
381
94.6k
}
382
383
Status BroadcastExchanger::sink(RuntimeState* state, Block* in_block, bool eos, Profile&& profile,
384
5.56k
                                SinkInfo& sink_info) {
385
5.56k
    if (in_block->empty()) {
386
1.81k
        return Status::OK();
387
1.81k
    }
388
3.75k
    Block new_block;
389
3.75k
    if (!_free_blocks.try_dequeue(new_block)) {
390
2.52k
        new_block = {in_block->clone_empty()};
391
2.52k
    }
392
3.75k
    new_block.swap(*in_block);
393
3.75k
    auto wrapper = BlockWrapper::create_shared(
394
3.75k
            std::move(new_block),
395
3.75k
            sink_info.local_state ? sink_info.local_state->_shared_state : nullptr, -1);
396
32.9k
    for (int i = 0; i < _num_partitions; i++) {
397
29.1k
        _enqueue_data_and_set_ready(
398
29.1k
                i, sink_info.local_state,
399
29.1k
                {wrapper, {.offset_start = 0, .length = wrapper->_data_block.rows()}});
400
29.1k
    }
401
402
3.75k
    return Status::OK();
403
5.56k
}
404
405
13.2k
void BroadcastExchanger::close(SourceInfo&& source_info) {
406
13.2k
    BroadcastBlock partitioned_block;
407
13.2k
    bool eos;
408
13.2k
    Block block;
409
13.2k
    _data_queue[source_info.channel_id].set_eos();
410
13.2k
    while (_dequeue_data(source_info.local_state, partitioned_block, &eos, &block,
411
13.2k
                         source_info.channel_id)) {
412
        // do nothing
413
0
    }
414
13.2k
}
415
416
Status BroadcastExchanger::get_block(RuntimeState* state, Block* block, bool* eos,
417
49.0k
                                     Profile&& profile, SourceInfo&& source_info) {
418
49.0k
    BroadcastBlock partitioned_block;
419
420
49.0k
    if (_dequeue_data(source_info.local_state, partitioned_block, eos, block,
421
49.0k
                      source_info.channel_id)) {
422
29.1k
        SCOPED_TIMER(profile.copy_data_timer);
423
29.1k
        MutableBlock mutable_block = VectorizedUtils::build_mutable_mem_reuse_block(
424
29.1k
                block, partitioned_block.first->_data_block);
425
29.1k
        auto block_wrapper = partitioned_block.first;
426
29.1k
        RETURN_IF_ERROR(mutable_block.add_rows(&block_wrapper->_data_block,
427
29.1k
                                               partitioned_block.second.offset_start,
428
29.1k
                                               partitioned_block.second.length));
429
29.1k
    }
430
431
49.0k
    return Status::OK();
432
49.0k
}
433
434
Status AdaptivePassthroughExchanger::_passthrough_sink(RuntimeState* state, Block* in_block,
435
645
                                                       SinkInfo& sink_info) {
436
645
    Block new_block;
437
645
    if (!_free_blocks.try_dequeue(new_block)) {
438
195
        new_block = {in_block->clone_empty()};
439
195
    }
440
645
    new_block.swap(*in_block);
441
645
    auto channel_id = ((*sink_info.channel_id)++) % _num_partitions;
442
645
    _enqueue_data_and_set_ready(
443
645
            channel_id, sink_info.local_state,
444
645
            BlockWrapper::create_shared(
445
645
                    std::move(new_block),
446
645
                    sink_info.local_state ? sink_info.local_state->_shared_state : nullptr,
447
645
                    channel_id));
448
449
645
    sink_info.local_state->_memory_used_counter->set(
450
645
            sink_info.local_state->_shared_state->mem_usage);
451
645
    return Status::OK();
452
645
}
453
454
Status AdaptivePassthroughExchanger::_shuffle_sink(RuntimeState* state, Block* block,
455
1.98k
                                                   SinkInfo& sink_info) {
456
1.98k
    std::vector<uint32_t> channel_ids;
457
1.98k
    const auto num_rows = block->rows();
458
1.98k
    channel_ids.resize(num_rows, 0);
459
1.98k
    if (num_rows <= _num_partitions) {
460
1.56k
        std::iota(channel_ids.begin(), channel_ids.end(), 0);
461
1.56k
    } else {
462
419
        size_t i = 0;
463
9.38k
        for (; i < num_rows - _num_partitions; i += _num_partitions) {
464
8.96k
            std::iota(channel_ids.begin() + i, channel_ids.begin() + i + _num_partitions, 0);
465
8.96k
        }
466
419
        if (i < num_rows - 1) {
467
324
            std::iota(channel_ids.begin() + i, channel_ids.end(), 0);
468
324
        }
469
419
    }
470
471
1.98k
    sink_info.local_state->_memory_used_counter->set(
472
1.98k
            sink_info.local_state->_shared_state->mem_usage);
473
1.98k
    RETURN_IF_ERROR(_split_rows(state, channel_ids, block, sink_info));
474
1.98k
    return Status::OK();
475
1.98k
}
476
477
Status AdaptivePassthroughExchanger::_split_rows(RuntimeState* state,
478
                                                 const std::vector<uint32_t>& channel_ids,
479
1.98k
                                                 Block* block, SinkInfo& sink_info) {
480
1.98k
    const auto rows = cast_set<int32_t>(block->rows());
481
1.98k
    auto row_idx = std::make_shared<std::vector<uint32_t>>(rows);
482
1.98k
    auto& partition_rows_histogram = _partition_rows_histogram[*sink_info.channel_id];
483
1.98k
    {
484
1.98k
        partition_rows_histogram.assign(_num_partitions + 1, 0);
485
48.5k
        for (int32_t i = 0; i < rows; ++i) {
486
46.5k
            partition_rows_histogram[channel_ids[i]]++;
487
46.5k
        }
488
16.1k
        for (int32_t i = 1; i <= _num_partitions; ++i) {
489
14.1k
            partition_rows_histogram[i] += partition_rows_histogram[i - 1];
490
14.1k
        }
491
492
48.5k
        for (int32_t i = rows - 1; i >= 0; --i) {
493
46.5k
            (*row_idx)[partition_rows_histogram[channel_ids[i]] - 1] = i;
494
46.5k
            partition_rows_histogram[channel_ids[i]]--;
495
46.5k
        }
496
1.98k
    }
497
16.1k
    for (int32_t i = 0; i < _num_partitions; i++) {
498
14.1k
        const size_t start = partition_rows_histogram[i];
499
14.1k
        const size_t size = partition_rows_histogram[i + 1] - start;
500
14.1k
        if (size > 0) {
501
7.46k
            std::unique_ptr<MutableBlock> mutable_block =
502
7.46k
                    MutableBlock::create_unique(block->clone_empty());
503
7.46k
            RETURN_IF_ERROR(mutable_block->add_rows(block, start, size));
504
7.46k
            auto new_block = mutable_block->to_block();
505
506
7.46k
            _enqueue_data_and_set_ready(
507
7.46k
                    i, sink_info.local_state,
508
7.46k
                    BlockWrapper::create_shared(
509
7.46k
                            std::move(new_block),
510
18.4E
                            sink_info.local_state ? sink_info.local_state->_shared_state : nullptr,
511
7.46k
                            i));
512
7.46k
        }
513
14.1k
    }
514
1.98k
    return Status::OK();
515
1.98k
}
516
517
Status AdaptivePassthroughExchanger::sink(RuntimeState* state, Block* in_block, bool eos,
518
7.86k
                                          Profile&& profile, SinkInfo& sink_info) {
519
7.86k
    if (in_block->empty()) {
520
5.23k
        return Status::OK();
521
5.23k
    }
522
2.63k
    if (_is_pass_through) {
523
645
        return _passthrough_sink(state, in_block, sink_info);
524
1.98k
    } else {
525
1.98k
        if (++_total_block >= _num_partitions) {
526
209
            _is_pass_through = true;
527
209
        }
528
1.98k
        return _shuffle_sink(state, in_block, sink_info);
529
1.98k
    }
530
2.63k
}
531
532
Status AdaptivePassthroughExchanger::get_block(RuntimeState* state, Block* block, bool* eos,
533
15.1k
                                               Profile&& profile, SourceInfo&& source_info) {
534
15.1k
    BlockWrapperSPtr next_block;
535
15.1k
    _dequeue_data(source_info.local_state, next_block, eos, block, source_info.channel_id);
536
15.1k
    return Status::OK();
537
15.1k
}
538
539
5.69k
void AdaptivePassthroughExchanger::close(SourceInfo&& source_info) {
540
5.69k
    Block next_block;
541
5.69k
    bool eos;
542
5.69k
    BlockWrapperSPtr wrapper;
543
5.69k
    _data_queue[source_info.channel_id].set_eos();
544
5.70k
    while (_dequeue_data(source_info.local_state, wrapper, &eos, &next_block,
545
5.70k
                         source_info.channel_id)) {
546
        // do nothing
547
5
    }
548
5.69k
}
549
550
} // namespace doris