Coverage Report

Created: 2025-07-28 22:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/doris/be/src/util/memcpy_inlined.h
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#pragma once
19
20
#pragma once
21
#ifdef __AVX2__
22
#include <emmintrin.h>
23
#include <immintrin.h>
24
#endif
25
26
#include <stddef.h>
27
#include <stdint.h>
28
#include <stdio.h>
29
#include <string.h>
30
31
#include "common/compiler_util.h"
32
33
namespace doris {
34
35
ALWAYS_INLINE inline void memcpy_inlined(void* __restrict _dst, const void* __restrict _src,
36
2.84M
                                         size_t size) {
37
2.84M
    auto dst = static_cast<uint8_t*>(_dst);
38
2.84M
    auto src = static_cast<const uint8_t*>(_src);
39
40
3.24M
    [[maybe_unused]] tail :
41
            /// Small sizes and tails after the loop for large sizes.
42
            /// The order of branches is important but in fact the optimal order depends on the distribution of sizes in your application.
43
            /// This order of branches is from the disassembly of glibc's code.
44
            /// We copy chunks of possibly uneven size with two overlapping movs.
45
            /// Example: to copy 5 bytes [0, 1, 2, 3, 4] we will copy tail [1, 2, 3, 4] first and then head [0, 1, 2, 3].
46
3.24M
            if (size <= 16) {
47
2.36M
        if (size >= 8) {
48
            /// Chunks of 8..16 bytes.
49
2.12M
            __builtin_memcpy(dst + size - 8, src + size - 8, 8);
50
2.12M
            __builtin_memcpy(dst, src, 8);
51
2.12M
        } else if (size >= 4) {
52
            /// Chunks of 4..7 bytes.
53
181k
            __builtin_memcpy(dst + size - 4, src + size - 4, 4);
54
181k
            __builtin_memcpy(dst, src, 4);
55
181k
        } else if (size >= 2) {
56
            /// Chunks of 2..3 bytes.
57
45.4k
            __builtin_memcpy(dst + size - 2, src + size - 2, 2);
58
45.4k
            __builtin_memcpy(dst, src, 2);
59
45.4k
        } else if (size >= 1) {
60
            /// A single byte.
61
2.97k
            *dst = *src;
62
2.97k
        }
63
        /// No bytes remaining.
64
2.36M
    }
65
878k
    else {
66
878k
#ifdef __AVX2__
67
878k
        if (size <= 256) {
68
550k
            if (size <= 32) {
69
68.3k
                __builtin_memcpy(dst, src, 8);
70
68.3k
                __builtin_memcpy(dst + 8, src + 8, 8);
71
68.3k
                size -= 16;
72
68.3k
                dst += 16;
73
68.3k
                src += 16;
74
68.3k
                goto tail;
75
68.3k
            }
76
77
            /// Then we will copy every 16 bytes from the beginning in a loop.
78
            /// The last loop iteration will possibly overwrite some part of already copied last 32 bytes.
79
            /// This is Ok, similar to the code for small sizes above.
80
2.06M
            while (size > 32) {
81
1.57M
                _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst),
82
1.57M
                                    _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src)));
83
1.57M
                dst += 32;
84
1.57M
                src += 32;
85
1.57M
                size -= 32;
86
1.57M
            }
87
88
482k
            _mm256_storeu_si256(
89
482k
                    reinterpret_cast<__m256i*>(dst + size - 32),
90
482k
                    _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + size - 32)));
91
482k
        } else {
92
327k
            if (size >= 512 * 1024 && size <= 2048 * 1024) {
93
6
                asm volatile("rep movsb"
94
6
                             : "=D"(dst), "=S"(src), "=c"(size)
95
6
                             : "0"(dst), "1"(src), "2"(size)
96
6
                             : "memory");
97
327k
            } else {
98
327k
                size_t padding = (32 - (reinterpret_cast<size_t>(dst) & 31)) & 31;
99
100
327k
                if (padding > 0) {
101
301k
                    __m256i head = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src));
102
301k
                    _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst), head);
103
301k
                    dst += padding;
104
301k
                    src += padding;
105
301k
                    size -= padding;
106
301k
                }
107
108
                /// Aligned unrolled copy. We will use half of available AVX registers.
109
                /// It's not possible to have both src and dst aligned.
110
                /// So, we will use aligned stores and unaligned loads.
111
327k
                __m256i c0, c1, c2, c3, c4, c5, c6, c7;
112
113
17.7M
                while (size >= 256) {
114
17.4M
                    c0 = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src));
115
17.4M
                    c1 = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + 32));
116
17.4M
                    c2 = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + 64));
117
17.4M
                    c3 = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + 96));
118
17.4M
                    c4 = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + 128));
119
17.4M
                    c5 = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + 160));
120
17.4M
                    c6 = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + 192));
121
17.4M
                    c7 = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + 224));
122
17.4M
                    src += 256;
123
124
17.4M
                    _mm256_store_si256((reinterpret_cast<__m256i*>(dst)), c0);
125
17.4M
                    _mm256_store_si256((reinterpret_cast<__m256i*>(dst + 32)), c1);
126
17.4M
                    _mm256_store_si256((reinterpret_cast<__m256i*>(dst + 64)), c2);
127
17.4M
                    _mm256_store_si256((reinterpret_cast<__m256i*>(dst + 96)), c3);
128
17.4M
                    _mm256_store_si256((reinterpret_cast<__m256i*>(dst + 128)), c4);
129
17.4M
                    _mm256_store_si256((reinterpret_cast<__m256i*>(dst + 160)), c5);
130
17.4M
                    _mm256_store_si256((reinterpret_cast<__m256i*>(dst + 192)), c6);
131
17.4M
                    _mm256_store_si256((reinterpret_cast<__m256i*>(dst + 224)), c7);
132
17.4M
                    dst += 256;
133
134
17.4M
                    size -= 256;
135
17.4M
                }
136
137
327k
                goto tail;
138
327k
            }
139
327k
        }
140
#else
141
        memcpy(dst, src, size);
142
#endif
143
878k
    }
144
3.24M
}
145
} // namespace doris