be/src/util/hash/murmur_hash3.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | //----------------------------------------------------------------------------- |
19 | | // MurmurHash3 was written by Austin Appleby, and is placed in the public |
20 | | // domain. The author hereby disclaims copyright to this source code. |
21 | | |
22 | | // Note - The x86 and x64 versions do _not_ produce the same results, as the |
23 | | // algorithms are optimized for their respective platforms. You can still |
24 | | // compile and run any of them on any platform, but your performance with the |
25 | | // non-native version will be less than optimal. |
26 | | |
27 | | #include "util/hash/murmur_hash3.h" |
28 | | |
29 | | #if defined(_MSC_VER) |
30 | | #include <stdlib.h> |
31 | | #endif |
32 | | |
33 | | #include "util/unaligned.h" |
34 | | |
35 | | namespace doris { |
36 | | |
37 | | #if defined(_MSC_VER) |
38 | | |
39 | | #define FORCE_INLINE __forceinline |
40 | | |
41 | | #define ROTL32(x, y) _rotl(x, y) |
42 | | #define ROTL64(x, y) _rotl64(x, y) |
43 | | |
44 | | #define BIG_CONSTANT(x) (x) |
45 | | |
46 | | // Other compilers |
47 | | |
48 | | #else // defined(_MSC_VER) |
49 | | |
50 | | #define FORCE_INLINE inline __attribute__((always_inline)) |
51 | | |
52 | 1.41k | FORCE_INLINE uint32_t rotl32(uint32_t x, int8_t r) { |
53 | 1.41k | return (x << r) | (x >> (32 - r)); |
54 | 1.41k | } |
55 | | |
56 | 1.19G | FORCE_INLINE uint64_t rotl64(uint64_t x, int8_t r) { |
57 | 1.19G | return (x << r) | (x >> (64 - r)); |
58 | 1.19G | } |
59 | | |
60 | 1.41k | #define ROTL32(x, y) rotl32(x, y) |
61 | 1.19G | #define ROTL64(x, y) rotl64(x, y) |
62 | | |
63 | 198M | #define BIG_CONSTANT(x) (x##LLU) |
64 | | |
65 | | #endif // !defined(_MSC_VER) |
66 | | |
67 | | //----------------------------------------------------------------------------- |
68 | | // Block read - if your platform needs to do endian-swapping or can only |
69 | | // handle aligned reads, do the conversion here |
70 | | |
71 | 192 | FORCE_INLINE uint32_t getblock32(const uint32_t* p, int i) { |
72 | 192 | return unaligned_load<uint32_t>(&p[i]); |
73 | 192 | } |
74 | | |
75 | 581M | FORCE_INLINE uint64_t getblock64(const uint64_t* p, int i) { |
76 | 581M | return unaligned_load<uint64_t>(&p[i]); |
77 | 581M | } |
78 | | |
79 | | //----------------------------------------------------------------------------- |
80 | | // Finalization mix - force all bits of a hash block to avalanche |
81 | | |
82 | 1.06k | FORCE_INLINE uint32_t fmix32(uint32_t h) { |
83 | 1.06k | h ^= h >> 16; |
84 | 1.06k | h *= 0x85ebca6b; |
85 | 1.06k | h ^= h >> 13; |
86 | 1.06k | h *= 0xc2b2ae35; |
87 | 1.06k | h ^= h >> 16; |
88 | | |
89 | 1.06k | return h; |
90 | 1.06k | } |
91 | | |
92 | | //---------- |
93 | | |
94 | 49.7M | FORCE_INLINE uint64_t fmix64(uint64_t k) { |
95 | 49.7M | k ^= k >> 33; |
96 | 49.7M | k *= BIG_CONSTANT(0xff51afd7ed558ccd); |
97 | 49.7M | k ^= k >> 33; |
98 | 49.7M | k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); |
99 | 49.7M | k ^= k >> 33; |
100 | | |
101 | 49.7M | return k; |
102 | 49.7M | } |
103 | | |
104 | | //----------------------------------------------------------------------------- |
105 | | |
106 | 1.06k | void murmur_hash3_x86_32(const void* key, int64_t len, uint32_t seed, void* out) { |
107 | 1.06k | const uint8_t* data = (const uint8_t*)key; |
108 | 1.06k | const int nblocks = (int)len / 4; |
109 | | |
110 | 1.06k | uint32_t h1 = seed; |
111 | | |
112 | 1.06k | const uint32_t c1 = 0xcc9e2d51; |
113 | 1.06k | const uint32_t c2 = 0x1b873593; |
114 | | |
115 | | //---------- |
116 | | // body |
117 | | |
118 | 1.06k | const uint32_t* blocks = (const uint32_t*)(data + nblocks * 4); |
119 | | |
120 | 1.25k | for (int i = -nblocks; i; i++) { |
121 | 192 | uint32_t k1 = getblock32(blocks, i); |
122 | | |
123 | 192 | k1 *= c1; |
124 | 192 | k1 = ROTL32(k1, 15); |
125 | 192 | k1 *= c2; |
126 | | |
127 | 192 | h1 ^= k1; |
128 | 192 | h1 = ROTL32(h1, 13); |
129 | 192 | h1 = h1 * 5 + 0xe6546b64; |
130 | 192 | } |
131 | | |
132 | | //---------- |
133 | | // tail |
134 | | |
135 | 1.06k | const uint8_t* tail = (const uint8_t*)(data + nblocks * 4); |
136 | | |
137 | 1.06k | uint32_t k1 = 0; |
138 | | |
139 | 1.06k | switch (len & 3) { |
140 | 49 | case 3: |
141 | 49 | k1 ^= tail[2] << 16; |
142 | 49 | [[fallthrough]]; |
143 | 271 | case 2: |
144 | 271 | k1 ^= tail[1] << 8; |
145 | 271 | [[fallthrough]]; |
146 | 1.03k | case 1: |
147 | 1.03k | k1 ^= tail[0]; |
148 | 1.03k | k1 *= c1; |
149 | 1.03k | k1 = ROTL32(k1, 15); |
150 | 1.03k | k1 *= c2; |
151 | 1.03k | h1 ^= k1; |
152 | 1.06k | }; |
153 | | |
154 | | //---------- |
155 | | // finalization |
156 | | |
157 | 1.06k | h1 ^= len; |
158 | | |
159 | 1.06k | h1 = fmix32(h1); |
160 | | |
161 | 1.06k | *(uint32_t*)out = h1; |
162 | 1.06k | } |
163 | | |
164 | | //----------------------------------------------------------------------------- |
165 | | |
166 | 0 | void murmur_hash3_x86_128(const void* key, const int len, uint32_t seed, void* out) { |
167 | 0 | const uint8_t* data = (const uint8_t*)key; |
168 | 0 | const int nblocks = len / 16; |
169 | |
|
170 | 0 | uint32_t h1 = seed; |
171 | 0 | uint32_t h2 = seed; |
172 | 0 | uint32_t h3 = seed; |
173 | 0 | uint32_t h4 = seed; |
174 | |
|
175 | 0 | const uint32_t c1 = 0x239b961b; |
176 | 0 | const uint32_t c2 = 0xab0e9789; |
177 | 0 | const uint32_t c3 = 0x38b34ae5; |
178 | 0 | const uint32_t c4 = 0xa1e38b93; |
179 | | |
180 | | //---------- |
181 | | // body |
182 | |
|
183 | 0 | const uint32_t* blocks = (const uint32_t*)(data + nblocks * 16); |
184 | |
|
185 | 0 | for (int i = -nblocks; i; i++) { |
186 | 0 | uint32_t k1 = getblock32(blocks, i * 4 + 0); |
187 | 0 | uint32_t k2 = getblock32(blocks, i * 4 + 1); |
188 | 0 | uint32_t k3 = getblock32(blocks, i * 4 + 2); |
189 | 0 | uint32_t k4 = getblock32(blocks, i * 4 + 3); |
190 | |
|
191 | 0 | k1 *= c1; |
192 | 0 | k1 = ROTL32(k1, 15); |
193 | 0 | k1 *= c2; |
194 | 0 | h1 ^= k1; |
195 | |
|
196 | 0 | h1 = ROTL32(h1, 19); |
197 | 0 | h1 += h2; |
198 | 0 | h1 = h1 * 5 + 0x561ccd1b; |
199 | |
|
200 | 0 | k2 *= c2; |
201 | 0 | k2 = ROTL32(k2, 16); |
202 | 0 | k2 *= c3; |
203 | 0 | h2 ^= k2; |
204 | |
|
205 | 0 | h2 = ROTL32(h2, 17); |
206 | 0 | h2 += h3; |
207 | 0 | h2 = h2 * 5 + 0x0bcaa747; |
208 | |
|
209 | 0 | k3 *= c3; |
210 | 0 | k3 = ROTL32(k3, 17); |
211 | 0 | k3 *= c4; |
212 | 0 | h3 ^= k3; |
213 | |
|
214 | 0 | h3 = ROTL32(h3, 15); |
215 | 0 | h3 += h4; |
216 | 0 | h3 = h3 * 5 + 0x96cd1c35; |
217 | |
|
218 | 0 | k4 *= c4; |
219 | 0 | k4 = ROTL32(k4, 18); |
220 | 0 | k4 *= c1; |
221 | 0 | h4 ^= k4; |
222 | |
|
223 | 0 | h4 = ROTL32(h4, 13); |
224 | 0 | h4 += h1; |
225 | 0 | h4 = h4 * 5 + 0x32ac3b17; |
226 | 0 | } |
227 | | |
228 | | //---------- |
229 | | // tail |
230 | |
|
231 | 0 | const uint8_t* tail = (const uint8_t*)(data + nblocks * 16); |
232 | |
|
233 | 0 | uint32_t k1 = 0; |
234 | 0 | uint32_t k2 = 0; |
235 | 0 | uint32_t k3 = 0; |
236 | 0 | uint32_t k4 = 0; |
237 | |
|
238 | 0 | switch (len & 15) { |
239 | 0 | case 15: |
240 | 0 | k4 ^= tail[14] << 16; |
241 | 0 | [[fallthrough]]; |
242 | 0 | case 14: |
243 | 0 | k4 ^= tail[13] << 8; |
244 | 0 | [[fallthrough]]; |
245 | 0 | case 13: |
246 | 0 | k4 ^= tail[12] << 0; |
247 | 0 | k4 *= c4; |
248 | 0 | k4 = ROTL32(k4, 18); |
249 | 0 | k4 *= c1; |
250 | 0 | h4 ^= k4; |
251 | 0 | [[fallthrough]]; |
252 | 0 | case 12: |
253 | 0 | k3 ^= tail[11] << 24; |
254 | 0 | [[fallthrough]]; |
255 | 0 | case 11: |
256 | 0 | k3 ^= tail[10] << 16; |
257 | 0 | [[fallthrough]]; |
258 | 0 | case 10: |
259 | 0 | k3 ^= tail[9] << 8; |
260 | 0 | [[fallthrough]]; |
261 | 0 | case 9: |
262 | 0 | k3 ^= tail[8] << 0; |
263 | 0 | k3 *= c3; |
264 | 0 | k3 = ROTL32(k3, 17); |
265 | 0 | k3 *= c4; |
266 | 0 | h3 ^= k3; |
267 | 0 | [[fallthrough]]; |
268 | 0 | case 8: |
269 | 0 | k2 ^= tail[7] << 24; |
270 | 0 | [[fallthrough]]; |
271 | 0 | case 7: |
272 | 0 | k2 ^= tail[6] << 16; |
273 | 0 | [[fallthrough]]; |
274 | 0 | case 6: |
275 | 0 | k2 ^= tail[5] << 8; |
276 | 0 | [[fallthrough]]; |
277 | 0 | case 5: |
278 | 0 | k2 ^= tail[4] << 0; |
279 | 0 | k2 *= c2; |
280 | 0 | k2 = ROTL32(k2, 16); |
281 | 0 | k2 *= c3; |
282 | 0 | h2 ^= k2; |
283 | 0 | [[fallthrough]]; |
284 | 0 | case 4: |
285 | 0 | k1 ^= tail[3] << 24; |
286 | 0 | [[fallthrough]]; |
287 | 0 | case 3: |
288 | 0 | k1 ^= tail[2] << 16; |
289 | 0 | [[fallthrough]]; |
290 | 0 | case 2: |
291 | 0 | k1 ^= tail[1] << 8; |
292 | 0 | [[fallthrough]]; |
293 | 0 | case 1: |
294 | 0 | k1 ^= tail[0] << 0; |
295 | 0 | k1 *= c1; |
296 | 0 | k1 = ROTL32(k1, 15); |
297 | 0 | k1 *= c2; |
298 | 0 | h1 ^= k1; |
299 | 0 | }; |
300 | | |
301 | | //---------- |
302 | | // finalization |
303 | |
|
304 | 0 | h1 ^= len; |
305 | 0 | h2 ^= len; |
306 | 0 | h3 ^= len; |
307 | 0 | h4 ^= len; |
308 | |
|
309 | 0 | h1 += h2; |
310 | 0 | h1 += h3; |
311 | 0 | h1 += h4; |
312 | 0 | h2 += h1; |
313 | 0 | h3 += h1; |
314 | 0 | h4 += h1; |
315 | |
|
316 | 0 | h1 = fmix32(h1); |
317 | 0 | h2 = fmix32(h2); |
318 | 0 | h3 = fmix32(h3); |
319 | 0 | h4 = fmix32(h4); |
320 | |
|
321 | 0 | h1 += h2; |
322 | 0 | h1 += h3; |
323 | 0 | h1 += h4; |
324 | 0 | h2 += h1; |
325 | 0 | h3 += h1; |
326 | 0 | h4 += h1; |
327 | |
|
328 | 0 | ((uint32_t*)out)[0] = h1; |
329 | 0 | ((uint32_t*)out)[1] = h2; |
330 | 0 | ((uint32_t*)out)[2] = h3; |
331 | 0 | ((uint32_t*)out)[3] = h4; |
332 | 0 | } |
333 | | |
334 | | //----------------------------------------------------------------------------- |
335 | | |
336 | | // Helper function that implements the core MurmurHash3 128-bit hashing algorithm |
337 | 66 | void murmur_hash3_x64_process(const void* key, const int len, uint64_t& h1, uint64_t& h2) { |
338 | 66 | const uint8_t* data = (const uint8_t*)key; |
339 | 66 | const int nblocks = len / 16; |
340 | | |
341 | 66 | const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); |
342 | 66 | const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); |
343 | | |
344 | | //---------- |
345 | | // body |
346 | | |
347 | 66 | const uint64_t* blocks = (const uint64_t*)(data); |
348 | | |
349 | 89 | for (int i = 0; i < nblocks; i++) { |
350 | 23 | uint64_t k1 = getblock64(blocks, i * 2 + 0); |
351 | 23 | uint64_t k2 = getblock64(blocks, i * 2 + 1); |
352 | | |
353 | 23 | k1 *= c1; |
354 | 23 | k1 = ROTL64(k1, 31); |
355 | 23 | k1 *= c2; |
356 | 23 | h1 ^= k1; |
357 | | |
358 | 23 | h1 = ROTL64(h1, 27); |
359 | 23 | h1 += h2; |
360 | 23 | h1 = h1 * 5 + 0x52dce729; |
361 | | |
362 | 23 | k2 *= c2; |
363 | 23 | k2 = ROTL64(k2, 33); |
364 | 23 | k2 *= c1; |
365 | 23 | h2 ^= k2; |
366 | | |
367 | 23 | h2 = ROTL64(h2, 31); |
368 | 23 | h2 += h1; |
369 | 23 | h2 = h2 * 5 + 0x38495ab5; |
370 | 23 | } |
371 | | |
372 | | //---------- |
373 | | // tail |
374 | | |
375 | 66 | const uint8_t* tail = (const uint8_t*)(data + nblocks * 16); |
376 | | |
377 | 66 | uint64_t k1 = 0; |
378 | 66 | uint64_t k2 = 0; |
379 | | |
380 | 66 | switch (len & 15) { |
381 | 0 | case 15: |
382 | 0 | k2 ^= ((uint64_t)tail[14]) << 48; |
383 | 0 | [[fallthrough]]; |
384 | 0 | case 14: |
385 | 0 | k2 ^= ((uint64_t)tail[13]) << 40; |
386 | 0 | [[fallthrough]]; |
387 | 0 | case 13: |
388 | 0 | k2 ^= ((uint64_t)tail[12]) << 32; |
389 | 0 | [[fallthrough]]; |
390 | 7 | case 12: |
391 | 7 | k2 ^= ((uint64_t)tail[11]) << 24; |
392 | 7 | [[fallthrough]]; |
393 | 17 | case 11: |
394 | 17 | k2 ^= ((uint64_t)tail[10]) << 16; |
395 | 17 | [[fallthrough]]; |
396 | 21 | case 10: |
397 | 21 | k2 ^= ((uint64_t)tail[9]) << 8; |
398 | 21 | [[fallthrough]]; |
399 | 21 | case 9: |
400 | 21 | k2 ^= ((uint64_t)tail[8]) << 0; |
401 | 21 | k2 *= c2; |
402 | 21 | k2 = ROTL64(k2, 33); |
403 | 21 | k2 *= c1; |
404 | 21 | h2 ^= k2; |
405 | 21 | [[fallthrough]]; |
406 | 21 | case 8: |
407 | 21 | k1 ^= ((uint64_t)tail[7]) << 56; |
408 | 21 | [[fallthrough]]; |
409 | 21 | case 7: |
410 | 21 | k1 ^= ((uint64_t)tail[6]) << 48; |
411 | 21 | [[fallthrough]]; |
412 | 21 | case 6: |
413 | 21 | k1 ^= ((uint64_t)tail[5]) << 40; |
414 | 21 | [[fallthrough]]; |
415 | 48 | case 5: |
416 | 48 | k1 ^= ((uint64_t)tail[4]) << 32; |
417 | 48 | [[fallthrough]]; |
418 | 50 | case 4: |
419 | 50 | k1 ^= ((uint64_t)tail[3]) << 24; |
420 | 50 | [[fallthrough]]; |
421 | 50 | case 3: |
422 | 50 | k1 ^= ((uint64_t)tail[2]) << 16; |
423 | 50 | [[fallthrough]]; |
424 | 50 | case 2: |
425 | 50 | k1 ^= ((uint64_t)tail[1]) << 8; |
426 | 50 | [[fallthrough]]; |
427 | 58 | case 1: |
428 | 58 | k1 ^= ((uint64_t)tail[0]) << 0; |
429 | 58 | k1 *= c1; |
430 | 58 | k1 = ROTL64(k1, 31); |
431 | 58 | k1 *= c2; |
432 | 58 | h1 ^= k1; |
433 | 66 | }; |
434 | | |
435 | | //---------- |
436 | | // finalization |
437 | | |
438 | 66 | h1 ^= len; |
439 | 66 | h2 ^= len; |
440 | | |
441 | 66 | h1 += h2; |
442 | 66 | h2 += h1; |
443 | | |
444 | 66 | h1 = fmix64(h1); |
445 | 66 | h2 = fmix64(h2); |
446 | | |
447 | 66 | h1 += h2; |
448 | 66 | h2 += h1; |
449 | 66 | } |
450 | | |
451 | | //----------------------------------------------------------------------------- |
452 | | |
453 | | // The origin function `murmur_hash3_x64_128` is copied from: https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp |
454 | | // And Doris modified it into function `murmur_hash3_x64_process` |
455 | | // For this reason, this function is still retained even though it has no calls. |
456 | 1 | void murmur_hash3_x64_128(const void* key, const int len, const uint32_t seed, void* out) { |
457 | 1 | uint64_t h1 = seed; |
458 | 1 | uint64_t h2 = seed; |
459 | 1 | murmur_hash3_x64_process(key, len, h1, h2); |
460 | 1 | ((uint64_t*)out)[0] = h1; |
461 | 1 | ((uint64_t*)out)[1] = h2; |
462 | 1 | } |
463 | | |
464 | | //----------------------------------------------------------------------------- |
465 | | |
466 | | // MurmurHash3 x64 64-bit variant using shared 128-bit processing function |
467 | | // This implementation reuses the murmur_hash3_x64_process function and only outputs the first hash value |
468 | | // Used for function mmh3_64_v2 |
469 | | void murmur_hash3_x64_64_shared(const void* key, const int64_t len, const uint64_t seed, |
470 | 63 | void* out) { |
471 | 63 | uint64_t h1 = seed; |
472 | 63 | uint64_t h2 = seed; |
473 | 63 | murmur_hash3_x64_process(key, static_cast<int>(len), h1, h2); |
474 | 63 | ((uint64_t*)out)[0] = h1; |
475 | 63 | } |
476 | | |
477 | | //----------------------------------------------------------------------------- |
478 | | |
479 | | // MurmurHash3 x64 64-bit variant with optimized standalone implementation |
480 | | // This implementation is specifically optimized for 64-bit output |
481 | | // Used for function mmh3_64 |
482 | 49.7M | void murmur_hash3_x64_64(const void* key, const int64_t len, const uint64_t seed, void* out) { |
483 | 49.7M | const uint8_t* data = (const uint8_t*)key; |
484 | 49.7M | const int nblocks = (int)len / 8; |
485 | 49.7M | uint64_t h1 = seed; |
486 | | |
487 | 49.7M | const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); |
488 | 49.7M | const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); |
489 | | |
490 | | //---------- |
491 | | // body |
492 | | |
493 | 49.7M | const uint64_t* blocks = (const uint64_t*)(data); |
494 | | |
495 | 631M | for (int i = 0; i < nblocks; i++) { |
496 | 581M | uint64_t k1 = getblock64(blocks, i); |
497 | | |
498 | 581M | k1 *= c1; |
499 | 581M | k1 = ROTL64(k1, 31); |
500 | 581M | k1 *= c2; |
501 | 581M | h1 ^= k1; |
502 | | |
503 | 581M | h1 = ROTL64(h1, 27); |
504 | 581M | h1 = h1 * 5 + 0x52dce729; |
505 | 581M | } |
506 | | |
507 | | //---------- |
508 | | // tail |
509 | | |
510 | 49.7M | const uint8_t* tail = (const uint8_t*)(data + nblocks * 8); |
511 | 49.7M | uint64_t k1 = 0; |
512 | | |
513 | 49.7M | switch (len & 7) { |
514 | 2.55M | case 7: |
515 | 2.55M | k1 ^= ((uint64_t)tail[6]) << 48; |
516 | 2.55M | [[fallthrough]]; |
517 | 3.91M | case 6: |
518 | 3.91M | k1 ^= ((uint64_t)tail[5]) << 40; |
519 | 3.91M | [[fallthrough]]; |
520 | 6.13M | case 5: |
521 | 6.13M | k1 ^= ((uint64_t)tail[4]) << 32; |
522 | 6.13M | [[fallthrough]]; |
523 | 16.5M | case 4: |
524 | 16.5M | k1 ^= ((uint64_t)tail[3]) << 24; |
525 | 16.5M | [[fallthrough]]; |
526 | 18.1M | case 3: |
527 | 18.1M | k1 ^= ((uint64_t)tail[2]) << 16; |
528 | 18.1M | [[fallthrough]]; |
529 | 20.6M | case 2: |
530 | 20.6M | k1 ^= ((uint64_t)tail[1]) << 8; |
531 | 20.6M | [[fallthrough]]; |
532 | 30.2M | case 1: |
533 | 30.2M | k1 ^= ((uint64_t)tail[0]) << 0; |
534 | 30.2M | k1 *= c1; |
535 | 30.2M | k1 = ROTL64(k1, 31); |
536 | 30.2M | k1 *= c2; |
537 | 30.2M | h1 ^= k1; |
538 | 49.7M | }; |
539 | | |
540 | | //---------- |
541 | | // finalization |
542 | | |
543 | 49.7M | h1 ^= len; |
544 | 49.7M | h1 = fmix64(h1); |
545 | | |
546 | 49.7M | ((uint64_t*)out)[0] = h1; |
547 | 49.7M | } |
548 | | |
549 | | } // namespace doris |