be/src/util/hash_util.hpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | // This file is copied from |
18 | | // https://github.com/apache/impala/blob/branch-2.9.0/be/src/util/hash-util.h |
19 | | // and modified by Doris |
20 | | |
21 | | #pragma once |
22 | | |
23 | | #include <crc32c/crc32c.h> |
24 | | #include <gen_cpp/Types_types.h> |
25 | | #include <xxh3.h> |
26 | | #include <xxhash.h> |
27 | | #include <zlib.h> |
28 | | |
29 | | #include <bit> |
30 | | #include <functional> |
31 | | |
32 | | #include "common/compiler_util.h" // IWYU pragma: keep |
33 | | #include "exec/common/endian.h" |
34 | | #include "util/cpu_info.h" |
35 | | #include "util/hash/city.h" |
36 | | #include "util/hash/murmur_hash3.h" |
37 | | #include "util/sse_util.hpp" |
38 | | |
39 | | namespace doris { |
40 | | namespace detail { |
41 | | // Slicing-by-4 table: t[0] is the standard byte-at-a-time table, |
42 | | // t[1..3] are extended tables for parallel 4-byte processing. |
43 | | struct CRC32SliceBy4Table { |
44 | | uint32_t t[4][256] {}; |
45 | 0 | constexpr CRC32SliceBy4Table() { |
46 | 0 | // t[0]: standard CRC32 lookup table |
47 | 0 | for (uint32_t i = 0; i < 256; i++) { |
48 | 0 | uint32_t c = i; |
49 | 0 | for (int j = 0; j < 8; j++) { |
50 | 0 | c = (c & 1) ? ((c >> 1) ^ 0xEDB88320U) : (c >> 1); |
51 | 0 | } |
52 | 0 | t[0][i] = c; |
53 | 0 | } |
54 | 0 | // t[1..3]: each entry is one additional CRC byte-step applied to t[k-1] |
55 | 0 | for (uint32_t i = 0; i < 256; i++) { |
56 | 0 | uint32_t c = t[0][i]; |
57 | 0 | for (int k = 1; k < 4; k++) { |
58 | 0 | c = t[0][c & 0xFF] ^ (c >> 8); |
59 | 0 | t[k][i] = c; |
60 | 0 | } |
61 | 0 | } |
62 | 0 | } |
63 | | }; |
64 | | } // namespace detail |
65 | | |
66 | | // Utility class to compute hash values. |
67 | | class HashUtil { |
68 | | private: |
69 | | static inline constexpr detail::CRC32SliceBy4Table CRC32_TABLE {}; |
70 | | |
71 | | public: |
72 | 39.1M | static uint32_t zlib_crc_hash(const void* data, uint32_t bytes, uint32_t hash) { |
73 | 39.1M | return (uint32_t)crc32(hash, (const unsigned char*)data, bytes); |
74 | 39.1M | } |
75 | | |
76 | | // Inline CRC32 (zlib-compatible, standard CRC32 polynomial) for fixed-size types. |
77 | | // Uses Slicing-by-4 technique for 4/8-byte types: processes 4 bytes at a time using |
78 | | // 4 precomputed lookup tables, reducing serial table lookups from 4 to 1 per 4-byte chunk. |
79 | | // Polynomial: 0xEDB88320 (reflected form of 0x04C11DB7). |
80 | | // Endian note: CRC32 reflected algorithm processes bytes in address order (byte[0] first). |
81 | | // Slicing-by-4 requires byte[0] at LSB of the loaded uint32_t, which is little-endian layout. |
82 | | // LittleEndian::Load32 provides this on ALL platforms: noop on LE, bswap on BE. |
83 | | template <typename T> |
84 | 17.0M | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { |
85 | 17.0M | const auto* p = reinterpret_cast<const uint8_t*>(&value); |
86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF |
87 | 17.0M | uint32_t crc = hash ^ 0xFFFFFFFFU; |
88 | | |
89 | 17.0M | if constexpr (sizeof(T) == 1) { |
90 | | // 1 byte: single table lookup |
91 | 1.07k | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); |
92 | 1.07k | } else if constexpr (sizeof(T) == 2) { |
93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) |
94 | 250 | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); |
95 | 250 | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); |
96 | 17.0M | } else if constexpr (sizeof(T) == 4) { |
97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel |
98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, |
99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. |
100 | 17.0M | uint32_t word = LittleEndian::Load32(p) ^ crc; |
101 | 17.0M | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ |
102 | 17.0M | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; |
103 | 17.0M | } else if constexpr (sizeof(T) == 8) { |
104 | | // 8 bytes: two Slicing-by-4 steps |
105 | 5.06k | uint32_t word = LittleEndian::Load32(p) ^ crc; |
106 | 5.06k | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ |
107 | 5.06k | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; |
108 | | |
109 | 5.06k | word = LittleEndian::Load32(p + 4) ^ crc; |
110 | 5.06k | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ |
111 | 5.06k | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; |
112 | 5.06k | } else { |
113 | | // Fallback to zlib for larger/unusual types |
114 | 2.90k | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); |
115 | 2.90k | } |
116 | 0 | return crc ^ 0xFFFFFFFFU; |
117 | 17.0M | } _ZN5doris8HashUtil16zlib_crc32_fixedIiEEjRKT_j Line | Count | Source | 84 | 17.0M | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 17.0M | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 17.0M | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | 17.0M | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | 17.0M | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | 17.0M | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | 17.0M | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 17.0M | return crc ^ 0xFFFFFFFFU; | 117 | 17.0M | } |
_ZN5doris8HashUtil16zlib_crc32_fixedIhEEjRKT_j Line | Count | Source | 84 | 105 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 105 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 105 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | 105 | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | 105 | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 105 | return crc ^ 0xFFFFFFFFU; | 117 | 105 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedIaEEjRKT_j Line | Count | Source | 84 | 965 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 965 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 965 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | 965 | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | 965 | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 965 | return crc ^ 0xFFFFFFFFU; | 117 | 965 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedIsEEjRKT_j Line | Count | Source | 84 | 250 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 250 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 250 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | 250 | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | 250 | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | 250 | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 250 | return crc ^ 0xFFFFFFFFU; | 117 | 250 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedIlEEjRKT_j Line | Count | Source | 84 | 3.79k | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 3.79k | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 3.79k | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | 3.79k | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | 3.79k | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | 3.79k | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | 3.79k | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | 3.79k | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | 3.79k | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | 3.79k | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 3.79k | return crc ^ 0xFFFFFFFFU; | 117 | 3.79k | } |
_ZN5doris8HashUtil16zlib_crc32_fixedInEEjRKT_j Line | Count | Source | 84 | 160 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 160 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 160 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | 160 | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | 160 | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | 160 | } | 116 | 0 | return crc ^ 0xFFFFFFFFU; | 117 | 160 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedIfEEjRKT_j Line | Count | Source | 84 | 132 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 132 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 132 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | 132 | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | 132 | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | 132 | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | 132 | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 132 | return crc ^ 0xFFFFFFFFU; | 117 | 132 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedIdEEjRKT_j Line | Count | Source | 84 | 106 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 106 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 106 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | 106 | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | 106 | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | 106 | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | 106 | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | 106 | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | 106 | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | 106 | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 106 | return crc ^ 0xFFFFFFFFU; | 117 | 106 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedIjEEjRKT_j Line | Count | Source | 84 | 51 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 51 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 51 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | 51 | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | 51 | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | 51 | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | 51 | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 51 | return crc ^ 0xFFFFFFFFU; | 117 | 51 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedIoEEjRKT_j Line | Count | Source | 84 | 12 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 12 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 12 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | 12 | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | 12 | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | 12 | } | 116 | 0 | return crc ^ 0xFFFFFFFFU; | 117 | 12 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedINS_11DateV2ValueINS_15DateV2ValueTypeEEEEEjRKT_j Line | Count | Source | 84 | 100 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 100 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 100 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | 100 | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | 100 | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | 100 | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | 100 | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 100 | return crc ^ 0xFFFFFFFFU; | 117 | 100 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedINS_11DateV2ValueINS_19DateTimeV2ValueTypeEEEEEjRKT_j Line | Count | Source | 84 | 126 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 126 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 126 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | 126 | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | 126 | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | 126 | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | 126 | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | 126 | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | 126 | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | 126 | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 126 | return crc ^ 0xFFFFFFFFU; | 117 | 126 | } |
Unexecuted instantiation: _ZN5doris8HashUtil16zlib_crc32_fixedINS_16TimestampTzValueEEEjRKT_j _ZN5doris8HashUtil16zlib_crc32_fixedImEEjRKT_j Line | Count | Source | 84 | 18 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 18 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 18 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | 18 | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | 18 | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | 18 | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | 18 | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | 18 | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | 18 | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | 18 | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 18 | return crc ^ 0xFFFFFFFFU; | 117 | 18 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedINS_7DecimalIiEEEEjRKT_j Line | Count | Source | 84 | 521 | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 521 | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 521 | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | 521 | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | 521 | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | 521 | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | 521 | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 521 | return crc ^ 0xFFFFFFFFU; | 117 | 521 | } |
_ZN5doris8HashUtil16zlib_crc32_fixedINS_7DecimalIlEEEEjRKT_j Line | Count | Source | 84 | 1.02k | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 1.02k | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 1.02k | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | 1.02k | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | 1.02k | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | 1.02k | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | 1.02k | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | 1.02k | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | 1.02k | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | 1.02k | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | | } | 116 | 1.02k | return crc ^ 0xFFFFFFFFU; | 117 | 1.02k | } |
_ZN5doris8HashUtil16zlib_crc32_fixedINS_12Decimal128V3EEEjRKT_j Line | Count | Source | 84 | 1.07k | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 1.07k | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 1.07k | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | 1.07k | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | 1.07k | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | 1.07k | } | 116 | 0 | return crc ^ 0xFFFFFFFFU; | 117 | 1.07k | } |
_ZN5doris8HashUtil16zlib_crc32_fixedINS_7DecimalIN4wide7integerILm256EiEEEEEEjRKT_j Line | Count | Source | 84 | 1.65k | static uint32_t zlib_crc32_fixed(const T& value, uint32_t hash) { | 85 | 1.65k | const auto* p = reinterpret_cast<const uint8_t*>(&value); | 86 | | // zlib convention: pre/post XOR with 0xFFFFFFFF | 87 | 1.65k | uint32_t crc = hash ^ 0xFFFFFFFFU; | 88 | | | 89 | | if constexpr (sizeof(T) == 1) { | 90 | | // 1 byte: single table lookup | 91 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 92 | | } else if constexpr (sizeof(T) == 2) { | 93 | | // 2 bytes: two sequential table lookups (slicing doesn't help below 4 bytes) | 94 | | crc = CRC32_TABLE.t[0][(crc ^ p[0]) & 0xFF] ^ (crc >> 8); | 95 | | crc = CRC32_TABLE.t[0][(crc ^ p[1]) & 0xFF] ^ (crc >> 8); | 96 | | } else if constexpr (sizeof(T) == 4) { | 97 | | // 4 bytes: one Slicing-by-4 step — 4 independent lookups in parallel | 98 | | // LittleEndian::Load32 handles unaligned load + byte-swap on big-endian, | 99 | | // ensuring byte[0] is always at LSB for correct CRC byte processing order. | 100 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 101 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 102 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 103 | | } else if constexpr (sizeof(T) == 8) { | 104 | | // 8 bytes: two Slicing-by-4 steps | 105 | | uint32_t word = LittleEndian::Load32(p) ^ crc; | 106 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 107 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 108 | | | 109 | | word = LittleEndian::Load32(p + 4) ^ crc; | 110 | | crc = CRC32_TABLE.t[3][(word)&0xFF] ^ CRC32_TABLE.t[2][(word >> 8) & 0xFF] ^ | 111 | | CRC32_TABLE.t[1][(word >> 16) & 0xFF] ^ CRC32_TABLE.t[0][(word >> 24) & 0xFF]; | 112 | 1.65k | } else { | 113 | | // Fallback to zlib for larger/unusual types | 114 | 1.65k | return (uint32_t)crc32(hash, (const unsigned char*)&value, sizeof(T)); | 115 | 1.65k | } | 116 | 0 | return crc ^ 0xFFFFFFFFU; | 117 | 1.65k | } |
|
118 | | |
119 | 11.1M | static uint32_t zlib_crc_hash_null(uint32_t hash) { |
120 | | // null is treat as 0 when hash |
121 | 11.1M | static const int INT_VALUE = 0; |
122 | 11.1M | return zlib_crc32_fixed(INT_VALUE, hash); |
123 | 11.1M | } |
124 | | |
125 | | template <typename T> |
126 | 35.8M | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { |
127 | 35.8M | if constexpr (sizeof(T) == 1) { |
128 | 49.8k | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); |
129 | 144k | } else if constexpr (sizeof(T) == 2) { |
130 | 144k | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); |
131 | 25.9M | } else if constexpr (sizeof(T) == 4) { |
132 | 25.9M | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); |
133 | 25.9M | } else if constexpr (sizeof(T) == 8) { |
134 | 9.71M | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); |
135 | 9.71M | } else { |
136 | 31.2k | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); |
137 | 31.2k | } |
138 | 35.8M | } _ZN5doris8HashUtil12crc32c_fixedIiEEjRKT_j Line | Count | Source | 126 | 25.8M | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | 25.8M | } else if constexpr (sizeof(T) == 4) { | 132 | 25.8M | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 25.8M | } |
_ZN5doris8HashUtil12crc32c_fixedIhEEjRKT_j Line | Count | Source | 126 | 4.67k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | 4.67k | if constexpr (sizeof(T) == 1) { | 128 | 4.67k | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 4.67k | } |
_ZN5doris8HashUtil12crc32c_fixedIaEEjRKT_j Line | Count | Source | 126 | 45.1k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | 45.1k | if constexpr (sizeof(T) == 1) { | 128 | 45.1k | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 45.1k | } |
_ZN5doris8HashUtil12crc32c_fixedIsEEjRKT_j Line | Count | Source | 126 | 144k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | 144k | } else if constexpr (sizeof(T) == 2) { | 130 | 144k | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 144k | } |
_ZN5doris8HashUtil12crc32c_fixedIlEEjRKT_j Line | Count | Source | 126 | 9.54M | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | 9.54M | } else if constexpr (sizeof(T) == 8) { | 134 | 9.54M | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 9.54M | } |
_ZN5doris8HashUtil12crc32c_fixedInEEjRKT_j Line | Count | Source | 126 | 13.8k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | 13.8k | } else { | 136 | 13.8k | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | 13.8k | } | 138 | 13.8k | } |
_ZN5doris8HashUtil12crc32c_fixedIfEEjRKT_j Line | Count | Source | 126 | 7.26k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | 7.26k | } else if constexpr (sizeof(T) == 4) { | 132 | 7.26k | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 7.26k | } |
_ZN5doris8HashUtil12crc32c_fixedIdEEjRKT_j Line | Count | Source | 126 | 62.8k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | 62.8k | } else if constexpr (sizeof(T) == 8) { | 134 | 62.8k | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 62.8k | } |
_ZN5doris8HashUtil12crc32c_fixedIjEEjRKT_j Line | Count | Source | 126 | 6.80k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | 6.80k | } else if constexpr (sizeof(T) == 4) { | 132 | 6.80k | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 6.80k | } |
_ZN5doris8HashUtil12crc32c_fixedIoEEjRKT_j Line | Count | Source | 126 | 4.54k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | 4.54k | } else { | 136 | 4.54k | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | 4.54k | } | 138 | 4.54k | } |
_ZN5doris8HashUtil12crc32c_fixedINS_11DateV2ValueINS_15DateV2ValueTypeEEEEEjRKT_j Line | Count | Source | 126 | 31.3k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | 31.3k | } else if constexpr (sizeof(T) == 4) { | 132 | 31.3k | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 31.3k | } |
_ZN5doris8HashUtil12crc32c_fixedINS_11DateV2ValueINS_19DateTimeV2ValueTypeEEEEEjRKT_j Line | Count | Source | 126 | 20.5k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | 20.5k | } else if constexpr (sizeof(T) == 8) { | 134 | 20.5k | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 20.5k | } |
_ZN5doris8HashUtil12crc32c_fixedINS_16TimestampTzValueEEEjRKT_j Line | Count | Source | 126 | 49 | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | 49 | } else if constexpr (sizeof(T) == 8) { | 134 | 49 | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 49 | } |
Unexecuted instantiation: _ZN5doris8HashUtil12crc32c_fixedImEEjRKT_j _ZN5doris8HashUtil12crc32c_fixedINS_7DecimalIiEEEEjRKT_j Line | Count | Source | 126 | 3.97k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | 3.97k | } else if constexpr (sizeof(T) == 4) { | 132 | 3.97k | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 3.97k | } |
_ZN5doris8HashUtil12crc32c_fixedINS_7DecimalIlEEEEjRKT_j Line | Count | Source | 126 | 85.9k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | 85.9k | } else if constexpr (sizeof(T) == 8) { | 134 | 85.9k | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | | } else { | 136 | | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | | } | 138 | 85.9k | } |
Unexecuted instantiation: _ZN5doris8HashUtil12crc32c_fixedINS_14DecimalV2ValueEEEjRKT_j _ZN5doris8HashUtil12crc32c_fixedINS_12Decimal128V3EEEjRKT_j Line | Count | Source | 126 | 5.46k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | 5.46k | } else { | 136 | 5.46k | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | 5.46k | } | 138 | 5.46k | } |
_ZN5doris8HashUtil12crc32c_fixedINS_7DecimalIN4wide7integerILm256EiEEEEEEjRKT_j Line | Count | Source | 126 | 7.47k | static uint32_t crc32c_fixed(const T& value, uint32_t hash) { | 127 | | if constexpr (sizeof(T) == 1) { | 128 | | return _mm_crc32_u8(hash, *reinterpret_cast<const uint8_t*>(&value)); | 129 | | } else if constexpr (sizeof(T) == 2) { | 130 | | return _mm_crc32_u16(hash, *reinterpret_cast<const uint16_t*>(&value)); | 131 | | } else if constexpr (sizeof(T) == 4) { | 132 | | return _mm_crc32_u32(hash, *reinterpret_cast<const uint32_t*>(&value)); | 133 | | } else if constexpr (sizeof(T) == 8) { | 134 | | return (uint32_t)_mm_crc32_u64(hash, *reinterpret_cast<const uint64_t*>(&value)); | 135 | 7.47k | } else { | 136 | 7.47k | return crc32c_extend(hash, (const uint8_t*)&value, sizeof(T)); | 137 | 7.47k | } | 138 | 7.47k | } |
|
139 | | |
140 | 2.68k | static uint32_t crc32c_null(uint32_t hash) { |
141 | | // null is treat as 0 when hash |
142 | 2.68k | static const int INT_VALUE = 0; |
143 | 2.68k | return crc32c_fixed(INT_VALUE, hash); |
144 | 2.68k | } |
145 | | |
146 | | // Compute the Crc32 hash for data using SSE4 instructions. The input hash parameter is |
147 | | // the current hash/seed value. |
148 | | // This should only be called if SSE is supported. |
149 | | // This is ~4x faster than Fnv/Boost Hash. |
150 | | // NOTE: DO NOT use this method for checksum! This does not generate the standard CRC32 checksum! |
151 | | // For checksum, use CRC-32C algorithm from crc32c.h |
152 | | // NOTE: Any changes made to this function need to be reflected in Codegen::GetHashFn. |
153 | | // TODO: crc32 hashes with different seeds do not result in different hash functions. |
154 | | // The resulting hashes are correlated. |
155 | | // ATTN: prefer do not use this function anymore, use crc32c::Extend instead |
156 | | // This function is retained because it is not certain whether there are compatibility issues with historical data. |
157 | 163M | static uint32_t crc_hash(const void* data, uint32_t bytes, uint32_t hash) { |
158 | 163M | if (!CpuInfo::is_supported(CpuInfo::SSE4_2)) { |
159 | 0 | return zlib_crc_hash(data, bytes, hash); |
160 | 0 | } |
161 | 163M | uint32_t words = bytes / sizeof(uint32_t); |
162 | 163M | bytes = bytes % sizeof(uint32_t); |
163 | | |
164 | 163M | const uint32_t* p = reinterpret_cast<const uint32_t*>(data); |
165 | | |
166 | 453M | while (words--) { |
167 | 289M | hash = _mm_crc32_u32(hash, *p); |
168 | 289M | ++p; |
169 | 289M | } |
170 | | |
171 | 163M | const uint8_t* s = reinterpret_cast<const uint8_t*>(p); |
172 | | |
173 | 215M | while (bytes--) { |
174 | 51.9M | hash = _mm_crc32_u8(hash, *s); |
175 | 51.9M | ++s; |
176 | 51.9M | } |
177 | | |
178 | | // The lower half of the CRC hash has has poor uniformity, so swap the halves |
179 | | // for anyone who only uses the first several bits of the hash. |
180 | 163M | hash = (hash << 16) | (hash >> 16); |
181 | 163M | return hash; |
182 | 163M | } |
183 | | |
184 | 67.8k | static uint64_t crc_hash64(const void* data, uint32_t bytes, uint64_t hash) { |
185 | 67.8k | uint32_t words = bytes / sizeof(uint32_t); |
186 | 67.8k | bytes = bytes % sizeof(uint32_t); |
187 | | |
188 | 67.8k | uint32_t h1 = hash >> 32; |
189 | 67.8k | uint32_t h2 = (hash << 32) >> 32; |
190 | | |
191 | 67.8k | const uint32_t* p = reinterpret_cast<const uint32_t*>(data); |
192 | 1.40M | while (words--) { |
193 | 1.34M | (words & 1) ? (h1 = _mm_crc32_u32(h1, *p)) : (h2 = _mm_crc32_u32(h2, *p)); |
194 | 1.34M | ++p; |
195 | 1.34M | } |
196 | | |
197 | 67.8k | const uint8_t* s = reinterpret_cast<const uint8_t*>(p); |
198 | 132k | while (bytes--) { |
199 | 64.7k | (bytes & 1) ? (h1 = _mm_crc32_u8(h1, *s)) : (h2 = _mm_crc32_u8(h2, *s)); |
200 | 64.7k | ++s; |
201 | 64.7k | } |
202 | 67.8k | union { |
203 | 67.8k | uint64_t u64; |
204 | 67.8k | uint32_t u32[2]; |
205 | 67.8k | } converter; |
206 | 67.8k | converter.u64 = hash; |
207 | | |
208 | 67.8k | h1 = (h1 << 16) | (h1 >> 16); |
209 | 67.8k | h2 = (h2 << 16) | (h2 >> 16); |
210 | 67.8k | converter.u32[0] = h1; |
211 | 67.8k | converter.u32[1] = h2; |
212 | | |
213 | 67.8k | return converter.u64; |
214 | 67.8k | } |
215 | | |
216 | | // refer to https://github.com/apache/commons-codec/blob/master/src/main/java/org/apache/commons/codec/digest/MurmurHash3.java |
217 | | static const uint32_t MURMUR3_32_SEED = 104729; |
218 | | |
219 | | // modify from https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp |
220 | 1.09k | static uint32_t murmur_hash3_32(const void* key, int64_t len, uint32_t seed) { |
221 | 1.09k | uint32_t out = 0; |
222 | 1.09k | murmur_hash3_x86_32(key, len, seed, &out); |
223 | 1.09k | return out; |
224 | 1.09k | } |
225 | | |
226 | | template <bool is_mmh64_v2> |
227 | 127 | static uint64_t murmur_hash3_64(const void* key, int64_t len, uint64_t seed) { |
228 | 127 | uint64_t out = 0; |
229 | 127 | if constexpr (is_mmh64_v2) { |
230 | 62 | murmur_hash3_x64_64_shared(key, len, seed, &out); |
231 | 65 | } else { |
232 | 65 | murmur_hash3_x64_64(key, len, seed, &out); |
233 | 65 | } |
234 | 127 | return out; |
235 | 127 | } _ZN5doris8HashUtil15murmur_hash3_64ILb0EEEmPKvlm Line | Count | Source | 227 | 65 | static uint64_t murmur_hash3_64(const void* key, int64_t len, uint64_t seed) { | 228 | 65 | uint64_t out = 0; | 229 | | if constexpr (is_mmh64_v2) { | 230 | | murmur_hash3_x64_64_shared(key, len, seed, &out); | 231 | 65 | } else { | 232 | 65 | murmur_hash3_x64_64(key, len, seed, &out); | 233 | 65 | } | 234 | 65 | return out; | 235 | 65 | } |
_ZN5doris8HashUtil15murmur_hash3_64ILb1EEEmPKvlm Line | Count | Source | 227 | 62 | static uint64_t murmur_hash3_64(const void* key, int64_t len, uint64_t seed) { | 228 | 62 | uint64_t out = 0; | 229 | 62 | if constexpr (is_mmh64_v2) { | 230 | 62 | murmur_hash3_x64_64_shared(key, len, seed, &out); | 231 | | } else { | 232 | | murmur_hash3_x64_64(key, len, seed, &out); | 233 | | } | 234 | 62 | return out; | 235 | 62 | } |
|
236 | | |
237 | | static const int MURMUR_R = 47; |
238 | | |
239 | | // Murmur2 hash implementation returning 64-bit hashes. |
240 | 0 | static uint64_t murmur_hash2_64(const void* input, int len, uint64_t seed) { |
241 | 0 | uint64_t h = seed ^ (len * MURMUR_PRIME); |
242 | 0 |
|
243 | 0 | const uint64_t* data = reinterpret_cast<const uint64_t*>(input); |
244 | 0 | const uint64_t* end = data + (len / sizeof(uint64_t)); |
245 | 0 |
|
246 | 0 | while (data != end) { |
247 | 0 | uint64_t k = *data++; |
248 | 0 | k *= MURMUR_PRIME; |
249 | 0 | k ^= k >> MURMUR_R; |
250 | 0 | k *= MURMUR_PRIME; |
251 | 0 | h ^= k; |
252 | 0 | h *= MURMUR_PRIME; |
253 | 0 | } |
254 | 0 |
|
255 | 0 | const uint8_t* data2 = reinterpret_cast<const uint8_t*>(data); |
256 | 0 | switch (len & 7) { |
257 | 0 | case 7: |
258 | 0 | h ^= uint64_t(data2[6]) << 48; |
259 | 0 | [[fallthrough]]; |
260 | 0 | case 6: |
261 | 0 | h ^= uint64_t(data2[5]) << 40; |
262 | 0 | [[fallthrough]]; |
263 | 0 | case 5: |
264 | 0 | h ^= uint64_t(data2[4]) << 32; |
265 | 0 | [[fallthrough]]; |
266 | 0 | case 4: |
267 | 0 | h ^= uint64_t(data2[3]) << 24; |
268 | 0 | [[fallthrough]]; |
269 | 0 | case 3: |
270 | 0 | h ^= uint64_t(data2[2]) << 16; |
271 | 0 | [[fallthrough]]; |
272 | 0 | case 2: |
273 | 0 | h ^= uint64_t(data2[1]) << 8; |
274 | 0 | [[fallthrough]]; |
275 | 0 | case 1: |
276 | 0 | h ^= uint64_t(data2[0]); |
277 | 0 | h *= MURMUR_PRIME; |
278 | 0 | } |
279 | 0 |
|
280 | 0 | h ^= h >> MURMUR_R; |
281 | 0 | h *= MURMUR_PRIME; |
282 | 0 | h ^= h >> MURMUR_R; |
283 | 0 | return h; |
284 | 0 | } |
285 | | |
286 | | // default values recommended by http://isthe.com/chongo/tech/comp/fnv/ |
287 | | static const uint32_t FNV_PRIME = 0x01000193; // 16777619 |
288 | | static const uint32_t FNV_SEED = 0x811C9DC5; // 2166136261 |
289 | | static const uint64_t FNV64_PRIME = 1099511628211UL; |
290 | | static const uint64_t FNV64_SEED = 14695981039346656037UL; |
291 | | static const uint64_t MURMUR_PRIME = 0xc6a4a7935bd1e995ULL; |
292 | | static const uint32_t MURMUR_SEED = 0xadc83b19ULL; |
293 | | // Implementation of the Fowler–Noll–Vo hash function. This is not as performant |
294 | | // as boost's hash on int types (2x slower) but has bit entropy. |
295 | | // For ints, boost just returns the value of the int which can be pathological. |
296 | | // For example, if the data is <1000, 2000, 3000, 4000, ..> and then the mod of 1000 |
297 | | // is taken on the hash, all values will collide to the same bucket. |
298 | | // For string values, Fnv is slightly faster than boost. |
299 | 0 | static uint32_t fnv_hash(const void* data, uint32_t bytes, uint32_t hash) { |
300 | 0 | const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data); |
301 | |
|
302 | 0 | while (bytes--) { |
303 | 0 | hash = (*ptr ^ hash) * FNV_PRIME; |
304 | 0 | ++ptr; |
305 | 0 | } |
306 | |
|
307 | 0 | return hash; |
308 | 0 | } |
309 | | |
310 | 0 | static uint64_t fnv_hash64(const void* data, uint32_t bytes, uint64_t hash) { |
311 | 0 | const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data); |
312 | 0 |
|
313 | 0 | while (bytes--) { |
314 | 0 | hash = (*ptr ^ hash) * FNV64_PRIME; |
315 | 0 | ++ptr; |
316 | 0 | } |
317 | 0 |
|
318 | 0 | return hash; |
319 | 0 | } |
320 | | |
321 | | // Our hash function is MurmurHash2, 64 bit version. |
322 | | // It was modified in order to provide the same result in |
323 | | // big and little endian archs (endian neutral). |
324 | 56.3M | static uint64_t murmur_hash64A(const void* key, int64_t len, unsigned int seed) { |
325 | 56.3M | const uint64_t m = MURMUR_PRIME; |
326 | 56.3M | const int r = 47; |
327 | 56.3M | uint64_t h = seed ^ (len * m); |
328 | 56.3M | const uint8_t* data = (const uint8_t*)key; |
329 | 56.3M | const uint8_t* end = data + (len - (len & 7)); |
330 | | |
331 | 108M | while (data != end) { |
332 | 52.4M | uint64_t k; |
333 | | if constexpr (std::endian::native == std::endian::big) { |
334 | | k = (uint64_t)data[0]; |
335 | | k |= (uint64_t)data[1] << 8; |
336 | | k |= (uint64_t)data[2] << 16; |
337 | | k |= (uint64_t)data[3] << 24; |
338 | | k |= (uint64_t)data[4] << 32; |
339 | | k |= (uint64_t)data[5] << 40; |
340 | | k |= (uint64_t)data[6] << 48; |
341 | | k |= (uint64_t)data[7] << 56; |
342 | 52.4M | } else if constexpr (std::endian::native == std::endian::little) { |
343 | 52.4M | memcpy(&k, data, sizeof(k)); |
344 | | } else { |
345 | | static_assert(std::endian::native == std::endian::big || |
346 | | std::endian::native == std::endian::little, |
347 | | "Unsupported endianness"); |
348 | | } |
349 | | |
350 | 52.4M | k *= m; |
351 | 52.4M | k ^= k >> r; |
352 | 52.4M | k *= m; |
353 | 52.4M | h ^= k; |
354 | 52.4M | h *= m; |
355 | 52.4M | data += 8; |
356 | 52.4M | } |
357 | | |
358 | 56.3M | switch (len & 7) { |
359 | 418k | case 7: |
360 | 418k | h ^= (uint64_t)data[6] << 48; |
361 | 418k | [[fallthrough]]; |
362 | 600k | case 6: |
363 | 600k | h ^= (uint64_t)data[5] << 40; |
364 | 600k | [[fallthrough]]; |
365 | 852k | case 5: |
366 | 852k | h ^= (uint64_t)data[4] << 32; |
367 | 852k | [[fallthrough]]; |
368 | 6.77M | case 4: |
369 | 6.77M | h ^= (uint64_t)data[3] << 24; |
370 | 6.77M | [[fallthrough]]; |
371 | 7.24M | case 3: |
372 | 7.24M | h ^= (uint64_t)data[2] << 16; |
373 | 7.24M | [[fallthrough]]; |
374 | 7.42M | case 2: |
375 | 7.42M | h ^= (uint64_t)data[1] << 8; |
376 | 7.42M | [[fallthrough]]; |
377 | 9.12M | case 1: |
378 | 9.12M | h ^= (uint64_t)data[0]; |
379 | 9.12M | h *= m; |
380 | 56.3M | } |
381 | | |
382 | 56.3M | h ^= h >> r; |
383 | 56.3M | h *= m; |
384 | 56.3M | h ^= h >> r; |
385 | 56.3M | return h; |
386 | 56.3M | } |
387 | | |
388 | | // Computes the hash value for data. Will call either CrcHash or FnvHash |
389 | | // depending on hardware capabilities. |
390 | | // Seed values for different steps of the query execution should use different seeds |
391 | | // to prevent accidental key collisions. (See IMPALA-219 for more details). |
392 | 163M | static uint32_t hash(const void* data, uint32_t bytes, uint32_t seed) { |
393 | 163M | #ifdef __SSE4_2__ |
394 | | |
395 | 163M | if (LIKELY(CpuInfo::is_supported(CpuInfo::SSE4_2))) { |
396 | 163M | return crc_hash(data, bytes, seed); |
397 | 163M | } else { |
398 | 120k | return fnv_hash(data, bytes, seed); |
399 | 120k | } |
400 | | |
401 | | #else |
402 | | return fnv_hash(data, bytes, seed); |
403 | | #endif |
404 | 163M | } |
405 | | |
406 | 38.1M | static uint64_t hash64(const void* data, uint64_t bytes, uint64_t seed) { |
407 | | #ifdef _SSE4_2_ |
408 | | if (LIKELY(CpuInfo::is_supported(CpuInfo::SSE4_2))) { |
409 | | return crc_hash64(data, bytes, seed); |
410 | | |
411 | | } else { |
412 | | uint64_t hash = 0; |
413 | | murmur_hash3_x64_64(data, bytes, seed, &hash); |
414 | | return hash; |
415 | | } |
416 | | #else |
417 | 38.1M | uint64_t hash = 0; |
418 | 38.1M | murmur_hash3_x64_64(data, bytes, seed, &hash); |
419 | 38.1M | return hash; |
420 | 38.1M | #endif |
421 | 38.1M | } |
422 | | // hash_combine is the same with boost hash_combine, |
423 | | // except replace boost::hash with std::hash |
424 | | template <class T> |
425 | 8.29M | static inline void hash_combine(std::size_t& seed, const T& v) { |
426 | 8.29M | std::hash<T> hasher; |
427 | 8.29M | seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); |
428 | 8.29M | } _ZN5doris8HashUtil12hash_combineINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEEvRmRKT_ Line | Count | Source | 425 | 456 | static inline void hash_combine(std::size_t& seed, const T& v) { | 426 | 456 | std::hash<T> hasher; | 427 | 456 | seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); | 428 | 456 | } |
_ZN5doris8HashUtil12hash_combineIlEEvRmRKT_ Line | Count | Source | 425 | 8.29M | static inline void hash_combine(std::size_t& seed, const T& v) { | 426 | 8.29M | std::hash<T> hasher; | 427 | 8.29M | seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); | 428 | 8.29M | } |
|
429 | | |
430 | | #if defined(__clang__) |
431 | | #pragma clang diagnostic push |
432 | | #pragma clang diagnostic ignored "-Wused-but-marked-unused" |
433 | | #endif |
434 | | // xxHash function for a byte array. For convenience, a 64-bit seed is also |
435 | | // hashed into the result. The mapping may change from time to time. |
436 | 445 | static xxh_u32 xxHash32WithSeed(const char* s, size_t len, xxh_u32 seed) { |
437 | 445 | return XXH32(s, len, seed); |
438 | 445 | } |
439 | | |
440 | | // same to the up function, just for null value |
441 | 0 | static xxh_u32 xxHash32NullWithSeed(xxh_u32 seed) { |
442 | 0 | static const int INT_VALUE = 0; |
443 | 0 | return XXH32(reinterpret_cast<const char*>(&INT_VALUE), sizeof(int), seed); |
444 | 0 | } |
445 | | |
446 | 15.5M | static xxh_u64 xxHash64WithSeed(const char* s, size_t len, xxh_u64 seed) { |
447 | 15.5M | return XXH3_64bits_withSeed(s, len, seed); |
448 | 15.5M | } |
449 | | |
450 | | // same to the up function, just for null value |
451 | 1.08M | static xxh_u64 xxHash64NullWithSeed(xxh_u64 seed) { |
452 | 1.08M | static const int INT_VALUE = 0; |
453 | 1.08M | return XXH3_64bits_withSeed(reinterpret_cast<const char*>(&INT_VALUE), sizeof(int), seed); |
454 | 1.08M | } |
455 | | |
456 | 43 | static xxh_u64 xxhash64_compat_with_seed(const char* s, size_t len, xxh_u64 seed) { |
457 | 43 | return XXH64(reinterpret_cast<const void*>(s), len, seed); |
458 | 43 | } |
459 | | |
460 | 0 | static xxh_u64 xxhash64_compat_null_with_seed(xxh_u64 seed) { |
461 | 0 | static const int INT_VALUE = 0; |
462 | 0 | return XXH64(reinterpret_cast<const void*>(&INT_VALUE), sizeof(int), seed); |
463 | 0 | } |
464 | | |
465 | | #if defined(__clang__) |
466 | | #pragma clang diagnostic pop |
467 | | #endif |
468 | | }; |
469 | | |
470 | | } // namespace doris |
471 | | |
472 | | template <> |
473 | | struct std::hash<doris::TUniqueId> { |
474 | 4.56M | size_t operator()(const doris::TUniqueId& id) const { |
475 | 4.56M | uint32_t seed = 0; |
476 | 4.56M | seed = doris::HashUtil::hash(&id.lo, sizeof(id.lo), seed); |
477 | 4.56M | seed = doris::HashUtil::hash(&id.hi, sizeof(id.hi), seed); |
478 | 4.56M | return seed; |
479 | 4.56M | } |
480 | | }; |
481 | | |
482 | | template <> |
483 | | struct std::hash<doris::TNetworkAddress> { |
484 | 3.50M | size_t operator()(const doris::TNetworkAddress& address) const { |
485 | 3.50M | uint32_t seed = 0; |
486 | 3.50M | seed = doris::HashUtil::hash(address.hostname.data(), (uint32_t)address.hostname.size(), |
487 | 3.50M | seed); |
488 | 3.50M | seed = doris::HashUtil::hash(&address.port, 4, seed); |
489 | 3.50M | return seed; |
490 | 3.50M | } |
491 | | }; |
492 | | |
493 | | template <> |
494 | | struct std::hash<std::pair<doris::TUniqueId, int64_t>> { |
495 | 0 | size_t operator()(const std::pair<doris::TUniqueId, int64_t>& pair) const { |
496 | 0 | uint32_t seed = 0; |
497 | 0 | seed = doris::HashUtil::hash(&pair.first.lo, sizeof(pair.first.lo), seed); |
498 | 0 | seed = doris::HashUtil::hash(&pair.first.hi, sizeof(pair.first.hi), seed); |
499 | 0 | seed = doris::HashUtil::hash(&pair.second, sizeof(pair.second), seed); |
500 | 0 | return seed; |
501 | 0 | } |
502 | | }; |
503 | | |
504 | | template <class First, class Second> |
505 | | struct std::hash<std::pair<First, Second>> { |
506 | 61.5M | size_t operator()(const pair<First, Second>& p) const { |
507 | 61.5M | size_t h1 = std::hash<First>()(p.first); |
508 | 61.5M | size_t h2 = std::hash<Second>()(p.second); |
509 | 61.5M | return doris::util_hash::HashLen16(h1, h2); |
510 | 61.5M | } _ZNKSt4hashISt4pairINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEElEEclERKS7_ Line | Count | Source | 506 | 52.9k | size_t operator()(const pair<First, Second>& p) const { | 507 | 52.9k | size_t h1 = std::hash<First>()(p.first); | 508 | 52.9k | size_t h2 = std::hash<Second>()(p.second); | 509 | 52.9k | return doris::util_hash::HashLen16(h1, h2); | 510 | 52.9k | } |
_ZNKSt4hashISt4pairIiN5doris10PathInDataEEEclERKS3_ Line | Count | Source | 506 | 61.5M | size_t operator()(const pair<First, Second>& p) const { | 507 | 61.5M | size_t h1 = std::hash<First>()(p.first); | 508 | 61.5M | size_t h2 = std::hash<Second>()(p.second); | 509 | 61.5M | return doris::util_hash::HashLen16(h1, h2); | 510 | 61.5M | } |
_ZNKSt4hashISt4pairIlN5doris8RowsetIdEEEclERKS3_ Line | Count | Source | 506 | 16.7k | size_t operator()(const pair<First, Second>& p) const { | 507 | 16.7k | size_t h1 = std::hash<First>()(p.first); | 508 | 16.7k | size_t h2 = std::hash<Second>()(p.second); | 509 | 16.7k | return doris::util_hash::HashLen16(h1, h2); | 510 | 16.7k | } |
_ZNKSt4hashISt4pairIllEEclERKS1_ Line | Count | Source | 506 | 64 | size_t operator()(const pair<First, Second>& p) const { | 507 | 64 | size_t h1 = std::hash<First>()(p.first); | 508 | 64 | size_t h2 = std::hash<Second>()(p.second); | 509 | 64 | return doris::util_hash::HashLen16(h1, h2); | 510 | 64 | } |
_ZNKSt4hashISt4pairIN5doris9TUniqueIdEiEEclERKS3_ Line | Count | Source | 506 | 2.47k | size_t operator()(const pair<First, Second>& p) const { | 507 | 2.47k | size_t h1 = std::hash<First>()(p.first); | 508 | 2.47k | size_t h2 = std::hash<Second>()(p.second); | 509 | 2.47k | return doris::util_hash::HashLen16(h1, h2); | 510 | 2.47k | } |
|
511 | | }; |