be/src/util/hash/murmur_hash3.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | //----------------------------------------------------------------------------- |
19 | | // MurmurHash3 was written by Austin Appleby, and is placed in the public |
20 | | // domain. The author hereby disclaims copyright to this source code. |
21 | | |
22 | | // Note - The x86 and x64 versions do _not_ produce the same results, as the |
23 | | // algorithms are optimized for their respective platforms. You can still |
24 | | // compile and run any of them on any platform, but your performance with the |
25 | | // non-native version will be less than optimal. |
26 | | |
27 | | #include "util/hash/murmur_hash3.h" |
28 | | |
29 | | #include "util/unaligned.h" |
30 | | |
31 | | namespace doris { |
32 | | |
33 | | #include "common/compile_check_begin.h" |
34 | | #if defined(_MSC_VER) |
35 | | |
36 | | #define FORCE_INLINE __forceinline |
37 | | |
38 | | #include <stdlib.h> |
39 | | |
40 | | #define ROTL32(x, y) _rotl(x, y) |
41 | | #define ROTL64(x, y) _rotl64(x, y) |
42 | | |
43 | | #define BIG_CONSTANT(x) (x) |
44 | | |
45 | | // Other compilers |
46 | | |
47 | | #else // defined(_MSC_VER) |
48 | | |
49 | | #define FORCE_INLINE inline __attribute__((always_inline)) |
50 | | |
51 | 53 | FORCE_INLINE uint32_t rotl32(uint32_t x, int8_t r) { |
52 | 53 | return (x << r) | (x >> (32 - r)); |
53 | 53 | } |
54 | | |
55 | 12.4M | FORCE_INLINE uint64_t rotl64(uint64_t x, int8_t r) { |
56 | 12.4M | return (x << r) | (x >> (64 - r)); |
57 | 12.4M | } |
58 | | |
59 | 53 | #define ROTL32(x, y) rotl32(x, y) |
60 | 12.4M | #define ROTL64(x, y) rotl64(x, y) |
61 | | |
62 | 23.5M | #define BIG_CONSTANT(x) (x##LLU) |
63 | | |
64 | | #endif // !defined(_MSC_VER) |
65 | | |
66 | | //----------------------------------------------------------------------------- |
67 | | // Block read - if your platform needs to do endian-swapping or can only |
68 | | // handle aligned reads, do the conversion here |
69 | | |
70 | 21 | FORCE_INLINE uint32_t getblock32(const uint32_t* p, int i) { |
71 | 21 | return unaligned_load<uint32_t>(&p[i]); |
72 | 21 | } |
73 | | |
74 | 6.10M | FORCE_INLINE uint64_t getblock64(const uint64_t* p, int i) { |
75 | 6.10M | return unaligned_load<uint64_t>(&p[i]); |
76 | 6.10M | } |
77 | | |
78 | | //----------------------------------------------------------------------------- |
79 | | // Finalization mix - force all bits of a hash block to avalanche |
80 | | |
81 | 20 | FORCE_INLINE uint32_t fmix32(uint32_t h) { |
82 | 20 | h ^= h >> 16; |
83 | 20 | h *= 0x85ebca6b; |
84 | 20 | h ^= h >> 13; |
85 | 20 | h *= 0xc2b2ae35; |
86 | 20 | h ^= h >> 16; |
87 | | |
88 | 20 | return h; |
89 | 20 | } |
90 | | |
91 | | //---------- |
92 | | |
93 | 5.89M | FORCE_INLINE uint64_t fmix64(uint64_t k) { |
94 | 5.89M | k ^= k >> 33; |
95 | 5.89M | k *= BIG_CONSTANT(0xff51afd7ed558ccd); |
96 | 5.89M | k ^= k >> 33; |
97 | 5.89M | k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); |
98 | 5.89M | k ^= k >> 33; |
99 | | |
100 | 5.89M | return k; |
101 | 5.89M | } |
102 | | |
103 | | //----------------------------------------------------------------------------- |
104 | | |
105 | 20 | void murmur_hash3_x86_32(const void* key, int64_t len, uint32_t seed, void* out) { |
106 | 20 | const uint8_t* data = (const uint8_t*)key; |
107 | 20 | const int nblocks = (int)len / 4; |
108 | | |
109 | 20 | uint32_t h1 = seed; |
110 | | |
111 | 20 | const uint32_t c1 = 0xcc9e2d51; |
112 | 20 | const uint32_t c2 = 0x1b873593; |
113 | | |
114 | | //---------- |
115 | | // body |
116 | | |
117 | 20 | const uint32_t* blocks = (const uint32_t*)(data + nblocks * 4); |
118 | | |
119 | 41 | for (int i = -nblocks; i; i++) { |
120 | 21 | uint32_t k1 = getblock32(blocks, i); |
121 | | |
122 | 21 | k1 *= c1; |
123 | 21 | k1 = ROTL32(k1, 15); |
124 | 21 | k1 *= c2; |
125 | | |
126 | 21 | h1 ^= k1; |
127 | 21 | h1 = ROTL32(h1, 13); |
128 | 21 | h1 = h1 * 5 + 0xe6546b64; |
129 | 21 | } |
130 | | |
131 | | //---------- |
132 | | // tail |
133 | | |
134 | 20 | const uint8_t* tail = (const uint8_t*)(data + nblocks * 4); |
135 | | |
136 | 20 | uint32_t k1 = 0; |
137 | | |
138 | 20 | switch (len & 3) { |
139 | 1 | case 3: |
140 | 1 | k1 ^= tail[2] << 16; |
141 | 1 | [[fallthrough]]; |
142 | 2 | case 2: |
143 | 2 | k1 ^= tail[1] << 8; |
144 | 2 | [[fallthrough]]; |
145 | 11 | case 1: |
146 | 11 | k1 ^= tail[0]; |
147 | 11 | k1 *= c1; |
148 | 11 | k1 = ROTL32(k1, 15); |
149 | 11 | k1 *= c2; |
150 | 11 | h1 ^= k1; |
151 | 20 | }; |
152 | | |
153 | | //---------- |
154 | | // finalization |
155 | | |
156 | 20 | h1 ^= len; |
157 | | |
158 | 20 | h1 = fmix32(h1); |
159 | | |
160 | 20 | *(uint32_t*)out = h1; |
161 | 20 | } |
162 | | |
163 | | //----------------------------------------------------------------------------- |
164 | | |
165 | 0 | void murmur_hash3_x86_128(const void* key, const int len, uint32_t seed, void* out) { |
166 | 0 | const uint8_t* data = (const uint8_t*)key; |
167 | 0 | const int nblocks = len / 16; |
168 | |
|
169 | 0 | uint32_t h1 = seed; |
170 | 0 | uint32_t h2 = seed; |
171 | 0 | uint32_t h3 = seed; |
172 | 0 | uint32_t h4 = seed; |
173 | |
|
174 | 0 | const uint32_t c1 = 0x239b961b; |
175 | 0 | const uint32_t c2 = 0xab0e9789; |
176 | 0 | const uint32_t c3 = 0x38b34ae5; |
177 | 0 | const uint32_t c4 = 0xa1e38b93; |
178 | | |
179 | | //---------- |
180 | | // body |
181 | |
|
182 | 0 | const uint32_t* blocks = (const uint32_t*)(data + nblocks * 16); |
183 | |
|
184 | 0 | for (int i = -nblocks; i; i++) { |
185 | 0 | uint32_t k1 = getblock32(blocks, i * 4 + 0); |
186 | 0 | uint32_t k2 = getblock32(blocks, i * 4 + 1); |
187 | 0 | uint32_t k3 = getblock32(blocks, i * 4 + 2); |
188 | 0 | uint32_t k4 = getblock32(blocks, i * 4 + 3); |
189 | |
|
190 | 0 | k1 *= c1; |
191 | 0 | k1 = ROTL32(k1, 15); |
192 | 0 | k1 *= c2; |
193 | 0 | h1 ^= k1; |
194 | |
|
195 | 0 | h1 = ROTL32(h1, 19); |
196 | 0 | h1 += h2; |
197 | 0 | h1 = h1 * 5 + 0x561ccd1b; |
198 | |
|
199 | 0 | k2 *= c2; |
200 | 0 | k2 = ROTL32(k2, 16); |
201 | 0 | k2 *= c3; |
202 | 0 | h2 ^= k2; |
203 | |
|
204 | 0 | h2 = ROTL32(h2, 17); |
205 | 0 | h2 += h3; |
206 | 0 | h2 = h2 * 5 + 0x0bcaa747; |
207 | |
|
208 | 0 | k3 *= c3; |
209 | 0 | k3 = ROTL32(k3, 17); |
210 | 0 | k3 *= c4; |
211 | 0 | h3 ^= k3; |
212 | |
|
213 | 0 | h3 = ROTL32(h3, 15); |
214 | 0 | h3 += h4; |
215 | 0 | h3 = h3 * 5 + 0x96cd1c35; |
216 | |
|
217 | 0 | k4 *= c4; |
218 | 0 | k4 = ROTL32(k4, 18); |
219 | 0 | k4 *= c1; |
220 | 0 | h4 ^= k4; |
221 | |
|
222 | 0 | h4 = ROTL32(h4, 13); |
223 | 0 | h4 += h1; |
224 | 0 | h4 = h4 * 5 + 0x32ac3b17; |
225 | 0 | } |
226 | | |
227 | | //---------- |
228 | | // tail |
229 | |
|
230 | 0 | const uint8_t* tail = (const uint8_t*)(data + nblocks * 16); |
231 | |
|
232 | 0 | uint32_t k1 = 0; |
233 | 0 | uint32_t k2 = 0; |
234 | 0 | uint32_t k3 = 0; |
235 | 0 | uint32_t k4 = 0; |
236 | |
|
237 | 0 | switch (len & 15) { |
238 | 0 | case 15: |
239 | 0 | k4 ^= tail[14] << 16; |
240 | 0 | [[fallthrough]]; |
241 | 0 | case 14: |
242 | 0 | k4 ^= tail[13] << 8; |
243 | 0 | [[fallthrough]]; |
244 | 0 | case 13: |
245 | 0 | k4 ^= tail[12] << 0; |
246 | 0 | k4 *= c4; |
247 | 0 | k4 = ROTL32(k4, 18); |
248 | 0 | k4 *= c1; |
249 | 0 | h4 ^= k4; |
250 | 0 | [[fallthrough]]; |
251 | 0 | case 12: |
252 | 0 | k3 ^= tail[11] << 24; |
253 | 0 | [[fallthrough]]; |
254 | 0 | case 11: |
255 | 0 | k3 ^= tail[10] << 16; |
256 | 0 | [[fallthrough]]; |
257 | 0 | case 10: |
258 | 0 | k3 ^= tail[9] << 8; |
259 | 0 | [[fallthrough]]; |
260 | 0 | case 9: |
261 | 0 | k3 ^= tail[8] << 0; |
262 | 0 | k3 *= c3; |
263 | 0 | k3 = ROTL32(k3, 17); |
264 | 0 | k3 *= c4; |
265 | 0 | h3 ^= k3; |
266 | 0 | [[fallthrough]]; |
267 | 0 | case 8: |
268 | 0 | k2 ^= tail[7] << 24; |
269 | 0 | [[fallthrough]]; |
270 | 0 | case 7: |
271 | 0 | k2 ^= tail[6] << 16; |
272 | 0 | [[fallthrough]]; |
273 | 0 | case 6: |
274 | 0 | k2 ^= tail[5] << 8; |
275 | 0 | [[fallthrough]]; |
276 | 0 | case 5: |
277 | 0 | k2 ^= tail[4] << 0; |
278 | 0 | k2 *= c2; |
279 | 0 | k2 = ROTL32(k2, 16); |
280 | 0 | k2 *= c3; |
281 | 0 | h2 ^= k2; |
282 | 0 | [[fallthrough]]; |
283 | 0 | case 4: |
284 | 0 | k1 ^= tail[3] << 24; |
285 | 0 | [[fallthrough]]; |
286 | 0 | case 3: |
287 | 0 | k1 ^= tail[2] << 16; |
288 | 0 | [[fallthrough]]; |
289 | 0 | case 2: |
290 | 0 | k1 ^= tail[1] << 8; |
291 | 0 | [[fallthrough]]; |
292 | 0 | case 1: |
293 | 0 | k1 ^= tail[0] << 0; |
294 | 0 | k1 *= c1; |
295 | 0 | k1 = ROTL32(k1, 15); |
296 | 0 | k1 *= c2; |
297 | 0 | h1 ^= k1; |
298 | 0 | }; |
299 | | |
300 | | //---------- |
301 | | // finalization |
302 | |
|
303 | 0 | h1 ^= len; |
304 | 0 | h2 ^= len; |
305 | 0 | h3 ^= len; |
306 | 0 | h4 ^= len; |
307 | |
|
308 | 0 | h1 += h2; |
309 | 0 | h1 += h3; |
310 | 0 | h1 += h4; |
311 | 0 | h2 += h1; |
312 | 0 | h3 += h1; |
313 | 0 | h4 += h1; |
314 | |
|
315 | 0 | h1 = fmix32(h1); |
316 | 0 | h2 = fmix32(h2); |
317 | 0 | h3 = fmix32(h3); |
318 | 0 | h4 = fmix32(h4); |
319 | |
|
320 | 0 | h1 += h2; |
321 | 0 | h1 += h3; |
322 | 0 | h1 += h4; |
323 | 0 | h2 += h1; |
324 | 0 | h3 += h1; |
325 | 0 | h4 += h1; |
326 | |
|
327 | 0 | ((uint32_t*)out)[0] = h1; |
328 | 0 | ((uint32_t*)out)[1] = h2; |
329 | 0 | ((uint32_t*)out)[2] = h3; |
330 | 0 | ((uint32_t*)out)[3] = h4; |
331 | 0 | } |
332 | | |
333 | | //----------------------------------------------------------------------------- |
334 | | |
335 | | // Helper function that implements the core MurmurHash3 128-bit hashing algorithm |
336 | 7 | void murmur_hash3_x64_process(const void* key, const int len, uint64_t& h1, uint64_t& h2) { |
337 | 7 | const uint8_t* data = (const uint8_t*)key; |
338 | 7 | const int nblocks = len / 16; |
339 | | |
340 | 7 | const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); |
341 | 7 | const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); |
342 | | |
343 | | //---------- |
344 | | // body |
345 | | |
346 | 7 | const uint64_t* blocks = (const uint64_t*)(data); |
347 | | |
348 | 8 | for (int i = 0; i < nblocks; i++) { |
349 | 1 | uint64_t k1 = getblock64(blocks, i * 2 + 0); |
350 | 1 | uint64_t k2 = getblock64(blocks, i * 2 + 1); |
351 | | |
352 | 1 | k1 *= c1; |
353 | 1 | k1 = ROTL64(k1, 31); |
354 | 1 | k1 *= c2; |
355 | 1 | h1 ^= k1; |
356 | | |
357 | 1 | h1 = ROTL64(h1, 27); |
358 | 1 | h1 += h2; |
359 | 1 | h1 = h1 * 5 + 0x52dce729; |
360 | | |
361 | 1 | k2 *= c2; |
362 | 1 | k2 = ROTL64(k2, 33); |
363 | 1 | k2 *= c1; |
364 | 1 | h2 ^= k2; |
365 | | |
366 | 1 | h2 = ROTL64(h2, 31); |
367 | 1 | h2 += h1; |
368 | 1 | h2 = h2 * 5 + 0x38495ab5; |
369 | 1 | } |
370 | | |
371 | | //---------- |
372 | | // tail |
373 | | |
374 | 7 | const uint8_t* tail = (const uint8_t*)(data + nblocks * 16); |
375 | | |
376 | 7 | uint64_t k1 = 0; |
377 | 7 | uint64_t k2 = 0; |
378 | | |
379 | 7 | switch (len & 15) { |
380 | 0 | case 15: |
381 | 0 | k2 ^= ((uint64_t)tail[14]) << 48; |
382 | 0 | [[fallthrough]]; |
383 | 0 | case 14: |
384 | 0 | k2 ^= ((uint64_t)tail[13]) << 40; |
385 | 0 | [[fallthrough]]; |
386 | 0 | case 13: |
387 | 0 | k2 ^= ((uint64_t)tail[12]) << 32; |
388 | 0 | [[fallthrough]]; |
389 | 1 | case 12: |
390 | 1 | k2 ^= ((uint64_t)tail[11]) << 24; |
391 | 1 | [[fallthrough]]; |
392 | 5 | case 11: |
393 | 5 | k2 ^= ((uint64_t)tail[10]) << 16; |
394 | 5 | [[fallthrough]]; |
395 | 5 | case 10: |
396 | 5 | k2 ^= ((uint64_t)tail[9]) << 8; |
397 | 5 | [[fallthrough]]; |
398 | 5 | case 9: |
399 | 5 | k2 ^= ((uint64_t)tail[8]) << 0; |
400 | 5 | k2 *= c2; |
401 | 5 | k2 = ROTL64(k2, 33); |
402 | 5 | k2 *= c1; |
403 | 5 | h2 ^= k2; |
404 | 5 | [[fallthrough]]; |
405 | 5 | case 8: |
406 | 5 | k1 ^= ((uint64_t)tail[7]) << 56; |
407 | 5 | [[fallthrough]]; |
408 | 5 | case 7: |
409 | 5 | k1 ^= ((uint64_t)tail[6]) << 48; |
410 | 5 | [[fallthrough]]; |
411 | 5 | case 6: |
412 | 5 | k1 ^= ((uint64_t)tail[5]) << 40; |
413 | 5 | [[fallthrough]]; |
414 | 6 | case 5: |
415 | 6 | k1 ^= ((uint64_t)tail[4]) << 32; |
416 | 6 | [[fallthrough]]; |
417 | 6 | case 4: |
418 | 6 | k1 ^= ((uint64_t)tail[3]) << 24; |
419 | 6 | [[fallthrough]]; |
420 | 6 | case 3: |
421 | 6 | k1 ^= ((uint64_t)tail[2]) << 16; |
422 | 6 | [[fallthrough]]; |
423 | 6 | case 2: |
424 | 6 | k1 ^= ((uint64_t)tail[1]) << 8; |
425 | 6 | [[fallthrough]]; |
426 | 6 | case 1: |
427 | 6 | k1 ^= ((uint64_t)tail[0]) << 0; |
428 | 6 | k1 *= c1; |
429 | 6 | k1 = ROTL64(k1, 31); |
430 | 6 | k1 *= c2; |
431 | 6 | h1 ^= k1; |
432 | 7 | }; |
433 | | |
434 | | //---------- |
435 | | // finalization |
436 | | |
437 | 7 | h1 ^= len; |
438 | 7 | h2 ^= len; |
439 | | |
440 | 7 | h1 += h2; |
441 | 7 | h2 += h1; |
442 | | |
443 | 7 | h1 = fmix64(h1); |
444 | 7 | h2 = fmix64(h2); |
445 | | |
446 | 7 | h1 += h2; |
447 | 7 | h2 += h1; |
448 | 7 | } |
449 | | |
450 | | //----------------------------------------------------------------------------- |
451 | | |
452 | | // The origin function `murmur_hash3_x64_128` is copied from: https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp |
453 | | // And Doris modified it into function `murmur_hash3_x64_process` |
454 | | // For this reason, this function is still retained even though it has no calls. |
455 | 1 | void murmur_hash3_x64_128(const void* key, const int len, const uint32_t seed, void* out) { |
456 | 1 | uint64_t h1 = seed; |
457 | 1 | uint64_t h2 = seed; |
458 | 1 | murmur_hash3_x64_process(key, len, h1, h2); |
459 | 1 | ((uint64_t*)out)[0] = h1; |
460 | 1 | ((uint64_t*)out)[1] = h2; |
461 | 1 | } |
462 | | |
463 | | //----------------------------------------------------------------------------- |
464 | | |
465 | | // MurmurHash3 x64 64-bit variant using shared 128-bit processing function |
466 | | // This implementation reuses the murmur_hash3_x64_process function and only outputs the first hash value |
467 | | // Used for function mmh3_64_v2 |
468 | | void murmur_hash3_x64_64_shared(const void* key, const int64_t len, const uint64_t seed, |
469 | 4 | void* out) { |
470 | 4 | uint64_t h1 = seed; |
471 | 4 | uint64_t h2 = seed; |
472 | 4 | murmur_hash3_x64_process(key, static_cast<int>(len), h1, h2); |
473 | 4 | ((uint64_t*)out)[0] = h1; |
474 | 4 | } |
475 | | |
476 | | //----------------------------------------------------------------------------- |
477 | | |
478 | | // MurmurHash3 x64 64-bit variant with optimized standalone implementation |
479 | | // This implementation is specifically optimized for 64-bit output |
480 | | // Used for function mmh3_64 |
481 | 5.89M | void murmur_hash3_x64_64(const void* key, const int64_t len, const uint64_t seed, void* out) { |
482 | 5.89M | const uint8_t* data = (const uint8_t*)key; |
483 | 5.89M | const int nblocks = (int)len / 8; |
484 | 5.89M | uint64_t h1 = seed; |
485 | | |
486 | 5.89M | const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); |
487 | 5.89M | const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); |
488 | | |
489 | | //---------- |
490 | | // body |
491 | | |
492 | 5.89M | const uint64_t* blocks = (const uint64_t*)(data); |
493 | | |
494 | 12.0M | for (int i = 0; i < nblocks; i++) { |
495 | 6.10M | uint64_t k1 = getblock64(blocks, i); |
496 | | |
497 | 6.10M | k1 *= c1; |
498 | 6.10M | k1 = ROTL64(k1, 31); |
499 | 6.10M | k1 *= c2; |
500 | 6.10M | h1 ^= k1; |
501 | | |
502 | 6.10M | h1 = ROTL64(h1, 27); |
503 | 6.10M | h1 = h1 * 5 + 0x52dce729; |
504 | 6.10M | } |
505 | | |
506 | | //---------- |
507 | | // tail |
508 | | |
509 | 5.89M | const uint8_t* tail = (const uint8_t*)(data + nblocks * 8); |
510 | 5.89M | uint64_t k1 = 0; |
511 | | |
512 | 5.89M | switch (len & 7) { |
513 | 10.6k | case 7: |
514 | 10.6k | k1 ^= ((uint64_t)tail[6]) << 48; |
515 | 10.6k | [[fallthrough]]; |
516 | 11.8k | case 6: |
517 | 11.8k | k1 ^= ((uint64_t)tail[5]) << 40; |
518 | 11.8k | [[fallthrough]]; |
519 | 12.6k | case 5: |
520 | 12.6k | k1 ^= ((uint64_t)tail[4]) << 32; |
521 | 12.6k | [[fallthrough]]; |
522 | 91.4k | case 4: |
523 | 91.4k | k1 ^= ((uint64_t)tail[3]) << 24; |
524 | 91.4k | [[fallthrough]]; |
525 | 104k | case 3: |
526 | 104k | k1 ^= ((uint64_t)tail[2]) << 16; |
527 | 104k | [[fallthrough]]; |
528 | 241k | case 2: |
529 | 241k | k1 ^= ((uint64_t)tail[1]) << 8; |
530 | 241k | [[fallthrough]]; |
531 | 249k | case 1: |
532 | 249k | k1 ^= ((uint64_t)tail[0]) << 0; |
533 | 249k | k1 *= c1; |
534 | 249k | k1 = ROTL64(k1, 31); |
535 | 249k | k1 *= c2; |
536 | 249k | h1 ^= k1; |
537 | 5.89M | }; |
538 | | |
539 | | //---------- |
540 | | // finalization |
541 | | |
542 | 5.89M | h1 ^= len; |
543 | 5.89M | h1 = fmix64(h1); |
544 | | |
545 | 5.89M | ((uint64_t*)out)[0] = h1; |
546 | 5.89M | } |
547 | | #include "common/compile_check_end.h" |
548 | | |
549 | | } // namespace doris |