be/src/util/hash/murmur_hash3.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | //----------------------------------------------------------------------------- |
19 | | // MurmurHash3 was written by Austin Appleby, and is placed in the public |
20 | | // domain. The author hereby disclaims copyright to this source code. |
21 | | |
22 | | // Note - The x86 and x64 versions do _not_ produce the same results, as the |
23 | | // algorithms are optimized for their respective platforms. You can still |
24 | | // compile and run any of them on any platform, but your performance with the |
25 | | // non-native version will be less than optimal. |
26 | | |
27 | | #include "util/hash/murmur_hash3.h" |
28 | | |
29 | | #include "util/unaligned.h" |
30 | | |
31 | | namespace doris { |
32 | | |
33 | | #if defined(_MSC_VER) |
34 | | |
35 | | #define FORCE_INLINE __forceinline |
36 | | |
37 | | #include <stdlib.h> |
38 | | |
39 | | #define ROTL32(x, y) _rotl(x, y) |
40 | | #define ROTL64(x, y) _rotl64(x, y) |
41 | | |
42 | | #define BIG_CONSTANT(x) (x) |
43 | | |
44 | | // Other compilers |
45 | | |
46 | | #else // defined(_MSC_VER) |
47 | | |
48 | | #define FORCE_INLINE inline __attribute__((always_inline)) |
49 | | |
50 | 1.51k | FORCE_INLINE uint32_t rotl32(uint32_t x, int8_t r) { |
51 | 1.51k | return (x << r) | (x >> (32 - r)); |
52 | 1.51k | } |
53 | | |
54 | 1.04G | FORCE_INLINE uint64_t rotl64(uint64_t x, int8_t r) { |
55 | 1.04G | return (x << r) | (x >> (64 - r)); |
56 | 1.04G | } |
57 | | |
58 | 1.52k | #define ROTL32(x, y) rotl32(x, y) |
59 | 1.04G | #define ROTL64(x, y) rotl64(x, y) |
60 | | |
61 | 221M | #define BIG_CONSTANT(x) (x##LLU) |
62 | | |
63 | | #endif // !defined(_MSC_VER) |
64 | | |
65 | | //----------------------------------------------------------------------------- |
66 | | // Block read - if your platform needs to do endian-swapping or can only |
67 | | // handle aligned reads, do the conversion here |
68 | | |
69 | 244 | FORCE_INLINE uint32_t getblock32(const uint32_t* p, int i) { |
70 | 244 | return unaligned_load<uint32_t>(&p[i]); |
71 | 244 | } |
72 | | |
73 | 506M | FORCE_INLINE uint64_t getblock64(const uint64_t* p, int i) { |
74 | 506M | return unaligned_load<uint64_t>(&p[i]); |
75 | 506M | } |
76 | | |
77 | | //----------------------------------------------------------------------------- |
78 | | // Finalization mix - force all bits of a hash block to avalanche |
79 | | |
80 | 1.08k | FORCE_INLINE uint32_t fmix32(uint32_t h) { |
81 | 1.08k | h ^= h >> 16; |
82 | 1.08k | h *= 0x85ebca6b; |
83 | 1.08k | h ^= h >> 13; |
84 | 1.08k | h *= 0xc2b2ae35; |
85 | 1.08k | h ^= h >> 16; |
86 | | |
87 | 1.08k | return h; |
88 | 1.08k | } |
89 | | |
90 | | //---------- |
91 | | |
92 | 55.3M | FORCE_INLINE uint64_t fmix64(uint64_t k) { |
93 | 55.3M | k ^= k >> 33; |
94 | 55.3M | k *= BIG_CONSTANT(0xff51afd7ed558ccd); |
95 | 55.3M | k ^= k >> 33; |
96 | 55.3M | k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); |
97 | 55.3M | k ^= k >> 33; |
98 | | |
99 | 55.3M | return k; |
100 | 55.3M | } |
101 | | |
102 | | //----------------------------------------------------------------------------- |
103 | | |
104 | 1.08k | void murmur_hash3_x86_32(const void* key, int64_t len, uint32_t seed, void* out) { |
105 | 1.08k | const uint8_t* data = (const uint8_t*)key; |
106 | 1.08k | const int nblocks = (int)len / 4; |
107 | | |
108 | 1.08k | uint32_t h1 = seed; |
109 | | |
110 | 1.08k | const uint32_t c1 = 0xcc9e2d51; |
111 | 1.08k | const uint32_t c2 = 0x1b873593; |
112 | | |
113 | | //---------- |
114 | | // body |
115 | | |
116 | 1.08k | const uint32_t* blocks = (const uint32_t*)(data + nblocks * 4); |
117 | | |
118 | 1.32k | for (int i = -nblocks; i; i++) { |
119 | 244 | uint32_t k1 = getblock32(blocks, i); |
120 | | |
121 | 244 | k1 *= c1; |
122 | 244 | k1 = ROTL32(k1, 15); |
123 | 244 | k1 *= c2; |
124 | | |
125 | 244 | h1 ^= k1; |
126 | 244 | h1 = ROTL32(h1, 13); |
127 | 244 | h1 = h1 * 5 + 0xe6546b64; |
128 | 244 | } |
129 | | |
130 | | //---------- |
131 | | // tail |
132 | | |
133 | 1.08k | const uint8_t* tail = (const uint8_t*)(data + nblocks * 4); |
134 | | |
135 | 1.08k | uint32_t k1 = 0; |
136 | | |
137 | 1.08k | switch (len & 3) { |
138 | 49 | case 3: |
139 | 49 | k1 ^= tail[2] << 16; |
140 | 49 | [[fallthrough]]; |
141 | 271 | case 2: |
142 | 271 | k1 ^= tail[1] << 8; |
143 | 271 | [[fallthrough]]; |
144 | 1.03k | case 1: |
145 | 1.03k | k1 ^= tail[0]; |
146 | 1.03k | k1 *= c1; |
147 | 1.03k | k1 = ROTL32(k1, 15); |
148 | 1.03k | k1 *= c2; |
149 | 1.03k | h1 ^= k1; |
150 | 1.08k | }; |
151 | | |
152 | | //---------- |
153 | | // finalization |
154 | | |
155 | 1.08k | h1 ^= len; |
156 | | |
157 | 1.08k | h1 = fmix32(h1); |
158 | | |
159 | 1.08k | *(uint32_t*)out = h1; |
160 | 1.08k | } |
161 | | |
162 | | //----------------------------------------------------------------------------- |
163 | | |
164 | 0 | void murmur_hash3_x86_128(const void* key, const int len, uint32_t seed, void* out) { |
165 | 0 | const uint8_t* data = (const uint8_t*)key; |
166 | 0 | const int nblocks = len / 16; |
167 | |
|
168 | 0 | uint32_t h1 = seed; |
169 | 0 | uint32_t h2 = seed; |
170 | 0 | uint32_t h3 = seed; |
171 | 0 | uint32_t h4 = seed; |
172 | |
|
173 | 0 | const uint32_t c1 = 0x239b961b; |
174 | 0 | const uint32_t c2 = 0xab0e9789; |
175 | 0 | const uint32_t c3 = 0x38b34ae5; |
176 | 0 | const uint32_t c4 = 0xa1e38b93; |
177 | | |
178 | | //---------- |
179 | | // body |
180 | |
|
181 | 0 | const uint32_t* blocks = (const uint32_t*)(data + nblocks * 16); |
182 | |
|
183 | 0 | for (int i = -nblocks; i; i++) { |
184 | 0 | uint32_t k1 = getblock32(blocks, i * 4 + 0); |
185 | 0 | uint32_t k2 = getblock32(blocks, i * 4 + 1); |
186 | 0 | uint32_t k3 = getblock32(blocks, i * 4 + 2); |
187 | 0 | uint32_t k4 = getblock32(blocks, i * 4 + 3); |
188 | |
|
189 | 0 | k1 *= c1; |
190 | 0 | k1 = ROTL32(k1, 15); |
191 | 0 | k1 *= c2; |
192 | 0 | h1 ^= k1; |
193 | |
|
194 | 0 | h1 = ROTL32(h1, 19); |
195 | 0 | h1 += h2; |
196 | 0 | h1 = h1 * 5 + 0x561ccd1b; |
197 | |
|
198 | 0 | k2 *= c2; |
199 | 0 | k2 = ROTL32(k2, 16); |
200 | 0 | k2 *= c3; |
201 | 0 | h2 ^= k2; |
202 | |
|
203 | 0 | h2 = ROTL32(h2, 17); |
204 | 0 | h2 += h3; |
205 | 0 | h2 = h2 * 5 + 0x0bcaa747; |
206 | |
|
207 | 0 | k3 *= c3; |
208 | 0 | k3 = ROTL32(k3, 17); |
209 | 0 | k3 *= c4; |
210 | 0 | h3 ^= k3; |
211 | |
|
212 | 0 | h3 = ROTL32(h3, 15); |
213 | 0 | h3 += h4; |
214 | 0 | h3 = h3 * 5 + 0x96cd1c35; |
215 | |
|
216 | 0 | k4 *= c4; |
217 | 0 | k4 = ROTL32(k4, 18); |
218 | 0 | k4 *= c1; |
219 | 0 | h4 ^= k4; |
220 | |
|
221 | 0 | h4 = ROTL32(h4, 13); |
222 | 0 | h4 += h1; |
223 | 0 | h4 = h4 * 5 + 0x32ac3b17; |
224 | 0 | } |
225 | | |
226 | | //---------- |
227 | | // tail |
228 | |
|
229 | 0 | const uint8_t* tail = (const uint8_t*)(data + nblocks * 16); |
230 | |
|
231 | 0 | uint32_t k1 = 0; |
232 | 0 | uint32_t k2 = 0; |
233 | 0 | uint32_t k3 = 0; |
234 | 0 | uint32_t k4 = 0; |
235 | |
|
236 | 0 | switch (len & 15) { |
237 | 0 | case 15: |
238 | 0 | k4 ^= tail[14] << 16; |
239 | 0 | [[fallthrough]]; |
240 | 0 | case 14: |
241 | 0 | k4 ^= tail[13] << 8; |
242 | 0 | [[fallthrough]]; |
243 | 0 | case 13: |
244 | 0 | k4 ^= tail[12] << 0; |
245 | 0 | k4 *= c4; |
246 | 0 | k4 = ROTL32(k4, 18); |
247 | 0 | k4 *= c1; |
248 | 0 | h4 ^= k4; |
249 | 0 | [[fallthrough]]; |
250 | 0 | case 12: |
251 | 0 | k3 ^= tail[11] << 24; |
252 | 0 | [[fallthrough]]; |
253 | 0 | case 11: |
254 | 0 | k3 ^= tail[10] << 16; |
255 | 0 | [[fallthrough]]; |
256 | 0 | case 10: |
257 | 0 | k3 ^= tail[9] << 8; |
258 | 0 | [[fallthrough]]; |
259 | 0 | case 9: |
260 | 0 | k3 ^= tail[8] << 0; |
261 | 0 | k3 *= c3; |
262 | 0 | k3 = ROTL32(k3, 17); |
263 | 0 | k3 *= c4; |
264 | 0 | h3 ^= k3; |
265 | 0 | [[fallthrough]]; |
266 | 0 | case 8: |
267 | 0 | k2 ^= tail[7] << 24; |
268 | 0 | [[fallthrough]]; |
269 | 0 | case 7: |
270 | 0 | k2 ^= tail[6] << 16; |
271 | 0 | [[fallthrough]]; |
272 | 0 | case 6: |
273 | 0 | k2 ^= tail[5] << 8; |
274 | 0 | [[fallthrough]]; |
275 | 0 | case 5: |
276 | 0 | k2 ^= tail[4] << 0; |
277 | 0 | k2 *= c2; |
278 | 0 | k2 = ROTL32(k2, 16); |
279 | 0 | k2 *= c3; |
280 | 0 | h2 ^= k2; |
281 | 0 | [[fallthrough]]; |
282 | 0 | case 4: |
283 | 0 | k1 ^= tail[3] << 24; |
284 | 0 | [[fallthrough]]; |
285 | 0 | case 3: |
286 | 0 | k1 ^= tail[2] << 16; |
287 | 0 | [[fallthrough]]; |
288 | 0 | case 2: |
289 | 0 | k1 ^= tail[1] << 8; |
290 | 0 | [[fallthrough]]; |
291 | 0 | case 1: |
292 | 0 | k1 ^= tail[0] << 0; |
293 | 0 | k1 *= c1; |
294 | 0 | k1 = ROTL32(k1, 15); |
295 | 0 | k1 *= c2; |
296 | 0 | h1 ^= k1; |
297 | 0 | }; |
298 | | |
299 | | //---------- |
300 | | // finalization |
301 | |
|
302 | 0 | h1 ^= len; |
303 | 0 | h2 ^= len; |
304 | 0 | h3 ^= len; |
305 | 0 | h4 ^= len; |
306 | |
|
307 | 0 | h1 += h2; |
308 | 0 | h1 += h3; |
309 | 0 | h1 += h4; |
310 | 0 | h2 += h1; |
311 | 0 | h3 += h1; |
312 | 0 | h4 += h1; |
313 | |
|
314 | 0 | h1 = fmix32(h1); |
315 | 0 | h2 = fmix32(h2); |
316 | 0 | h3 = fmix32(h3); |
317 | 0 | h4 = fmix32(h4); |
318 | |
|
319 | 0 | h1 += h2; |
320 | 0 | h1 += h3; |
321 | 0 | h1 += h4; |
322 | 0 | h2 += h1; |
323 | 0 | h3 += h1; |
324 | 0 | h4 += h1; |
325 | |
|
326 | 0 | ((uint32_t*)out)[0] = h1; |
327 | 0 | ((uint32_t*)out)[1] = h2; |
328 | 0 | ((uint32_t*)out)[2] = h3; |
329 | 0 | ((uint32_t*)out)[3] = h4; |
330 | 0 | } |
331 | | |
332 | | //----------------------------------------------------------------------------- |
333 | | |
334 | | // Helper function that implements the core MurmurHash3 128-bit hashing algorithm |
335 | 66 | void murmur_hash3_x64_process(const void* key, const int len, uint64_t& h1, uint64_t& h2) { |
336 | 66 | const uint8_t* data = (const uint8_t*)key; |
337 | 66 | const int nblocks = len / 16; |
338 | | |
339 | 66 | const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); |
340 | 66 | const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); |
341 | | |
342 | | //---------- |
343 | | // body |
344 | | |
345 | 66 | const uint64_t* blocks = (const uint64_t*)(data); |
346 | | |
347 | 89 | for (int i = 0; i < nblocks; i++) { |
348 | 23 | uint64_t k1 = getblock64(blocks, i * 2 + 0); |
349 | 23 | uint64_t k2 = getblock64(blocks, i * 2 + 1); |
350 | | |
351 | 23 | k1 *= c1; |
352 | 23 | k1 = ROTL64(k1, 31); |
353 | 23 | k1 *= c2; |
354 | 23 | h1 ^= k1; |
355 | | |
356 | 23 | h1 = ROTL64(h1, 27); |
357 | 23 | h1 += h2; |
358 | 23 | h1 = h1 * 5 + 0x52dce729; |
359 | | |
360 | 23 | k2 *= c2; |
361 | 23 | k2 = ROTL64(k2, 33); |
362 | 23 | k2 *= c1; |
363 | 23 | h2 ^= k2; |
364 | | |
365 | 23 | h2 = ROTL64(h2, 31); |
366 | 23 | h2 += h1; |
367 | 23 | h2 = h2 * 5 + 0x38495ab5; |
368 | 23 | } |
369 | | |
370 | | //---------- |
371 | | // tail |
372 | | |
373 | 66 | const uint8_t* tail = (const uint8_t*)(data + nblocks * 16); |
374 | | |
375 | 66 | uint64_t k1 = 0; |
376 | 66 | uint64_t k2 = 0; |
377 | | |
378 | 66 | switch (len & 15) { |
379 | 0 | case 15: |
380 | 0 | k2 ^= ((uint64_t)tail[14]) << 48; |
381 | 0 | [[fallthrough]]; |
382 | 0 | case 14: |
383 | 0 | k2 ^= ((uint64_t)tail[13]) << 40; |
384 | 0 | [[fallthrough]]; |
385 | 0 | case 13: |
386 | 0 | k2 ^= ((uint64_t)tail[12]) << 32; |
387 | 0 | [[fallthrough]]; |
388 | 7 | case 12: |
389 | 7 | k2 ^= ((uint64_t)tail[11]) << 24; |
390 | 7 | [[fallthrough]]; |
391 | 17 | case 11: |
392 | 17 | k2 ^= ((uint64_t)tail[10]) << 16; |
393 | 17 | [[fallthrough]]; |
394 | 21 | case 10: |
395 | 21 | k2 ^= ((uint64_t)tail[9]) << 8; |
396 | 21 | [[fallthrough]]; |
397 | 21 | case 9: |
398 | 21 | k2 ^= ((uint64_t)tail[8]) << 0; |
399 | 21 | k2 *= c2; |
400 | 21 | k2 = ROTL64(k2, 33); |
401 | 21 | k2 *= c1; |
402 | 21 | h2 ^= k2; |
403 | 21 | [[fallthrough]]; |
404 | 21 | case 8: |
405 | 21 | k1 ^= ((uint64_t)tail[7]) << 56; |
406 | 21 | [[fallthrough]]; |
407 | 21 | case 7: |
408 | 21 | k1 ^= ((uint64_t)tail[6]) << 48; |
409 | 21 | [[fallthrough]]; |
410 | 21 | case 6: |
411 | 21 | k1 ^= ((uint64_t)tail[5]) << 40; |
412 | 21 | [[fallthrough]]; |
413 | 48 | case 5: |
414 | 48 | k1 ^= ((uint64_t)tail[4]) << 32; |
415 | 48 | [[fallthrough]]; |
416 | 50 | case 4: |
417 | 50 | k1 ^= ((uint64_t)tail[3]) << 24; |
418 | 50 | [[fallthrough]]; |
419 | 50 | case 3: |
420 | 50 | k1 ^= ((uint64_t)tail[2]) << 16; |
421 | 50 | [[fallthrough]]; |
422 | 50 | case 2: |
423 | 50 | k1 ^= ((uint64_t)tail[1]) << 8; |
424 | 50 | [[fallthrough]]; |
425 | 58 | case 1: |
426 | 58 | k1 ^= ((uint64_t)tail[0]) << 0; |
427 | 58 | k1 *= c1; |
428 | 58 | k1 = ROTL64(k1, 31); |
429 | 58 | k1 *= c2; |
430 | 58 | h1 ^= k1; |
431 | 66 | }; |
432 | | |
433 | | //---------- |
434 | | // finalization |
435 | | |
436 | 66 | h1 ^= len; |
437 | 66 | h2 ^= len; |
438 | | |
439 | 66 | h1 += h2; |
440 | 66 | h2 += h1; |
441 | | |
442 | 66 | h1 = fmix64(h1); |
443 | 66 | h2 = fmix64(h2); |
444 | | |
445 | 66 | h1 += h2; |
446 | 66 | h2 += h1; |
447 | 66 | } |
448 | | |
449 | | //----------------------------------------------------------------------------- |
450 | | |
451 | | // The origin function `murmur_hash3_x64_128` is copied from: https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp |
452 | | // And Doris modified it into function `murmur_hash3_x64_process` |
453 | | // For this reason, this function is still retained even though it has no calls. |
454 | 1 | void murmur_hash3_x64_128(const void* key, const int len, const uint32_t seed, void* out) { |
455 | 1 | uint64_t h1 = seed; |
456 | 1 | uint64_t h2 = seed; |
457 | 1 | murmur_hash3_x64_process(key, len, h1, h2); |
458 | 1 | ((uint64_t*)out)[0] = h1; |
459 | 1 | ((uint64_t*)out)[1] = h2; |
460 | 1 | } |
461 | | |
462 | | //----------------------------------------------------------------------------- |
463 | | |
464 | | // MurmurHash3 x64 64-bit variant using shared 128-bit processing function |
465 | | // This implementation reuses the murmur_hash3_x64_process function and only outputs the first hash value |
466 | | // Used for function mmh3_64_v2 |
467 | | void murmur_hash3_x64_64_shared(const void* key, const int64_t len, const uint64_t seed, |
468 | 63 | void* out) { |
469 | 63 | uint64_t h1 = seed; |
470 | 63 | uint64_t h2 = seed; |
471 | 63 | murmur_hash3_x64_process(key, static_cast<int>(len), h1, h2); |
472 | 63 | ((uint64_t*)out)[0] = h1; |
473 | 63 | } |
474 | | |
475 | | //----------------------------------------------------------------------------- |
476 | | |
477 | | // MurmurHash3 x64 64-bit variant with optimized standalone implementation |
478 | | // This implementation is specifically optimized for 64-bit output |
479 | | // Used for function mmh3_64 |
480 | 55.3M | void murmur_hash3_x64_64(const void* key, const int64_t len, const uint64_t seed, void* out) { |
481 | 55.3M | const uint8_t* data = (const uint8_t*)key; |
482 | 55.3M | const int nblocks = (int)len / 8; |
483 | 55.3M | uint64_t h1 = seed; |
484 | | |
485 | 55.3M | const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); |
486 | 55.3M | const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); |
487 | | |
488 | | //---------- |
489 | | // body |
490 | | |
491 | 55.3M | const uint64_t* blocks = (const uint64_t*)(data); |
492 | | |
493 | 561M | for (int i = 0; i < nblocks; i++) { |
494 | 506M | uint64_t k1 = getblock64(blocks, i); |
495 | | |
496 | 506M | k1 *= c1; |
497 | 506M | k1 = ROTL64(k1, 31); |
498 | 506M | k1 *= c2; |
499 | 506M | h1 ^= k1; |
500 | | |
501 | 506M | h1 = ROTL64(h1, 27); |
502 | 506M | h1 = h1 * 5 + 0x52dce729; |
503 | 506M | } |
504 | | |
505 | | //---------- |
506 | | // tail |
507 | | |
508 | 55.3M | const uint8_t* tail = (const uint8_t*)(data + nblocks * 8); |
509 | 55.3M | uint64_t k1 = 0; |
510 | | |
511 | 55.3M | switch (len & 7) { |
512 | 2.68M | case 7: |
513 | 2.68M | k1 ^= ((uint64_t)tail[6]) << 48; |
514 | 2.68M | [[fallthrough]]; |
515 | 4.13M | case 6: |
516 | 4.13M | k1 ^= ((uint64_t)tail[5]) << 40; |
517 | 4.13M | [[fallthrough]]; |
518 | 6.54M | case 5: |
519 | 6.54M | k1 ^= ((uint64_t)tail[4]) << 32; |
520 | 6.54M | [[fallthrough]]; |
521 | 17.0M | case 4: |
522 | 17.0M | k1 ^= ((uint64_t)tail[3]) << 24; |
523 | 17.0M | [[fallthrough]]; |
524 | 18.6M | case 3: |
525 | 18.6M | k1 ^= ((uint64_t)tail[2]) << 16; |
526 | 18.6M | [[fallthrough]]; |
527 | 21.7M | case 2: |
528 | 21.7M | k1 ^= ((uint64_t)tail[1]) << 8; |
529 | 21.7M | [[fallthrough]]; |
530 | 32.4M | case 1: |
531 | 32.4M | k1 ^= ((uint64_t)tail[0]) << 0; |
532 | 32.4M | k1 *= c1; |
533 | 32.4M | k1 = ROTL64(k1, 31); |
534 | 32.4M | k1 *= c2; |
535 | 32.4M | h1 ^= k1; |
536 | 55.3M | }; |
537 | | |
538 | | //---------- |
539 | | // finalization |
540 | | |
541 | 55.3M | h1 ^= len; |
542 | 55.3M | h1 = fmix64(h1); |
543 | | |
544 | 55.3M | ((uint64_t*)out)[0] = h1; |
545 | 55.3M | } |
546 | | |
547 | | } // namespace doris |