/root/doris/be/src/util/murmur_hash3.cpp
Line | Count | Source |
1 | | //----------------------------------------------------------------------------- |
2 | | // MurmurHash3 was written by Austin Appleby, and is placed in the public |
3 | | // domain. The author hereby disclaims copyright to this source code. |
4 | | |
5 | | // Note - The x86 and x64 versions do _not_ produce the same results, as the |
6 | | // algorithms are optimized for their respective platforms. You can still |
7 | | // compile and run any of them on any platform, but your performance with the |
8 | | // non-native version will be less than optimal. |
9 | | |
10 | | #include "murmur_hash3.h" |
11 | | |
12 | | #include "vec/common/unaligned.h" |
13 | | //----------------------------------------------------------------------------- |
14 | | // Platform-specific functions and macros |
15 | | |
16 | | // Microsoft Visual Studio |
17 | | |
18 | | #include "common/compile_check_begin.h" |
19 | | #if defined(_MSC_VER) |
20 | | |
21 | | #define FORCE_INLINE __forceinline |
22 | | |
23 | | #include <stdlib.h> |
24 | | |
25 | | #define ROTL32(x, y) _rotl(x, y) |
26 | | #define ROTL64(x, y) _rotl64(x, y) |
27 | | |
28 | | #define BIG_CONSTANT(x) (x) |
29 | | |
30 | | // Other compilers |
31 | | |
32 | | #else // defined(_MSC_VER) |
33 | | |
34 | | #define FORCE_INLINE inline __attribute__((always_inline)) |
35 | | |
36 | 53 | FORCE_INLINE uint32_t rotl32(uint32_t x, int8_t r) { |
37 | 53 | return (x << r) | (x >> (32 - r)); |
38 | 53 | } |
39 | | |
40 | 1.99M | FORCE_INLINE uint64_t rotl64(uint64_t x, int8_t r) { |
41 | 1.99M | return (x << r) | (x >> (64 - r)); |
42 | 1.99M | } |
43 | | |
44 | 53 | #define ROTL32(x, y) rotl32(x, y) |
45 | 1.99M | #define ROTL64(x, y) rotl64(x, y) |
46 | | |
47 | 2.75M | #define BIG_CONSTANT(x) (x##LLU) |
48 | | |
49 | | #endif // !defined(_MSC_VER) |
50 | | |
51 | | //----------------------------------------------------------------------------- |
52 | | // Block read - if your platform needs to do endian-swapping or can only |
53 | | // handle aligned reads, do the conversion here |
54 | | |
55 | 21 | FORCE_INLINE uint32_t getblock32(const uint32_t* p, int i) { |
56 | 21 | return unaligned_load<uint32_t>(&p[i]); |
57 | 21 | } |
58 | | |
59 | 872k | FORCE_INLINE uint64_t getblock64(const uint64_t* p, int i) { |
60 | 872k | return unaligned_load<uint64_t>(&p[i]); |
61 | 872k | } |
62 | | |
63 | | //----------------------------------------------------------------------------- |
64 | | // Finalization mix - force all bits of a hash block to avalanche |
65 | | |
66 | 20 | FORCE_INLINE uint32_t fmix32(uint32_t h) { |
67 | 20 | h ^= h >> 16; |
68 | 20 | h *= 0x85ebca6b; |
69 | 20 | h ^= h >> 13; |
70 | 20 | h *= 0xc2b2ae35; |
71 | 20 | h ^= h >> 16; |
72 | | |
73 | 20 | return h; |
74 | 20 | } |
75 | | |
76 | | //---------- |
77 | | |
78 | 688k | FORCE_INLINE uint64_t fmix64(uint64_t k) { |
79 | 688k | k ^= k >> 33; |
80 | 688k | k *= BIG_CONSTANT(0xff51afd7ed558ccd); |
81 | 688k | k ^= k >> 33; |
82 | 688k | k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); |
83 | 688k | k ^= k >> 33; |
84 | | |
85 | 688k | return k; |
86 | 688k | } |
87 | | |
88 | | //----------------------------------------------------------------------------- |
89 | | |
90 | 20 | void murmur_hash3_x86_32(const void* key, int64_t len, uint32_t seed, void* out) { |
91 | 20 | const uint8_t* data = (const uint8_t*)key; |
92 | 20 | const int nblocks = (int)len / 4; |
93 | | |
94 | 20 | uint32_t h1 = seed; |
95 | | |
96 | 20 | const uint32_t c1 = 0xcc9e2d51; |
97 | 20 | const uint32_t c2 = 0x1b873593; |
98 | | |
99 | | //---------- |
100 | | // body |
101 | | |
102 | 20 | const uint32_t* blocks = (const uint32_t*)(data + nblocks * 4); |
103 | | |
104 | 41 | for (int i = -nblocks; i; i++) { |
105 | 21 | uint32_t k1 = getblock32(blocks, i); |
106 | | |
107 | 21 | k1 *= c1; |
108 | 21 | k1 = ROTL32(k1, 15); |
109 | 21 | k1 *= c2; |
110 | | |
111 | 21 | h1 ^= k1; |
112 | 21 | h1 = ROTL32(h1, 13); |
113 | 21 | h1 = h1 * 5 + 0xe6546b64; |
114 | 21 | } |
115 | | |
116 | | //---------- |
117 | | // tail |
118 | | |
119 | 20 | const uint8_t* tail = (const uint8_t*)(data + nblocks * 4); |
120 | | |
121 | 20 | uint32_t k1 = 0; |
122 | | |
123 | 20 | switch (len & 3) { |
124 | 1 | case 3: |
125 | 1 | k1 ^= tail[2] << 16; |
126 | 1 | [[fallthrough]]; |
127 | 2 | case 2: |
128 | 2 | k1 ^= tail[1] << 8; |
129 | 2 | [[fallthrough]]; |
130 | 11 | case 1: |
131 | 11 | k1 ^= tail[0]; |
132 | 11 | k1 *= c1; |
133 | 11 | k1 = ROTL32(k1, 15); |
134 | 11 | k1 *= c2; |
135 | 11 | h1 ^= k1; |
136 | 20 | }; |
137 | | |
138 | | //---------- |
139 | | // finalization |
140 | | |
141 | 20 | h1 ^= len; |
142 | | |
143 | 20 | h1 = fmix32(h1); |
144 | | |
145 | 20 | *(uint32_t*)out = h1; |
146 | 20 | } |
147 | | |
148 | | //----------------------------------------------------------------------------- |
149 | | |
150 | 0 | void murmur_hash3_x86_128(const void* key, const int len, uint32_t seed, void* out) { |
151 | 0 | const uint8_t* data = (const uint8_t*)key; |
152 | 0 | const int nblocks = len / 16; |
153 | |
|
154 | 0 | uint32_t h1 = seed; |
155 | 0 | uint32_t h2 = seed; |
156 | 0 | uint32_t h3 = seed; |
157 | 0 | uint32_t h4 = seed; |
158 | |
|
159 | 0 | const uint32_t c1 = 0x239b961b; |
160 | 0 | const uint32_t c2 = 0xab0e9789; |
161 | 0 | const uint32_t c3 = 0x38b34ae5; |
162 | 0 | const uint32_t c4 = 0xa1e38b93; |
163 | | |
164 | | //---------- |
165 | | // body |
166 | |
|
167 | 0 | const uint32_t* blocks = (const uint32_t*)(data + nblocks * 16); |
168 | |
|
169 | 0 | for (int i = -nblocks; i; i++) { |
170 | 0 | uint32_t k1 = getblock32(blocks, i * 4 + 0); |
171 | 0 | uint32_t k2 = getblock32(blocks, i * 4 + 1); |
172 | 0 | uint32_t k3 = getblock32(blocks, i * 4 + 2); |
173 | 0 | uint32_t k4 = getblock32(blocks, i * 4 + 3); |
174 | |
|
175 | 0 | k1 *= c1; |
176 | 0 | k1 = ROTL32(k1, 15); |
177 | 0 | k1 *= c2; |
178 | 0 | h1 ^= k1; |
179 | |
|
180 | 0 | h1 = ROTL32(h1, 19); |
181 | 0 | h1 += h2; |
182 | 0 | h1 = h1 * 5 + 0x561ccd1b; |
183 | |
|
184 | 0 | k2 *= c2; |
185 | 0 | k2 = ROTL32(k2, 16); |
186 | 0 | k2 *= c3; |
187 | 0 | h2 ^= k2; |
188 | |
|
189 | 0 | h2 = ROTL32(h2, 17); |
190 | 0 | h2 += h3; |
191 | 0 | h2 = h2 * 5 + 0x0bcaa747; |
192 | |
|
193 | 0 | k3 *= c3; |
194 | 0 | k3 = ROTL32(k3, 17); |
195 | 0 | k3 *= c4; |
196 | 0 | h3 ^= k3; |
197 | |
|
198 | 0 | h3 = ROTL32(h3, 15); |
199 | 0 | h3 += h4; |
200 | 0 | h3 = h3 * 5 + 0x96cd1c35; |
201 | |
|
202 | 0 | k4 *= c4; |
203 | 0 | k4 = ROTL32(k4, 18); |
204 | 0 | k4 *= c1; |
205 | 0 | h4 ^= k4; |
206 | |
|
207 | 0 | h4 = ROTL32(h4, 13); |
208 | 0 | h4 += h1; |
209 | 0 | h4 = h4 * 5 + 0x32ac3b17; |
210 | 0 | } |
211 | | |
212 | | //---------- |
213 | | // tail |
214 | |
|
215 | 0 | const uint8_t* tail = (const uint8_t*)(data + nblocks * 16); |
216 | |
|
217 | 0 | uint32_t k1 = 0; |
218 | 0 | uint32_t k2 = 0; |
219 | 0 | uint32_t k3 = 0; |
220 | 0 | uint32_t k4 = 0; |
221 | |
|
222 | 0 | switch (len & 15) { |
223 | 0 | case 15: |
224 | 0 | k4 ^= tail[14] << 16; |
225 | 0 | [[fallthrough]]; |
226 | 0 | case 14: |
227 | 0 | k4 ^= tail[13] << 8; |
228 | 0 | [[fallthrough]]; |
229 | 0 | case 13: |
230 | 0 | k4 ^= tail[12] << 0; |
231 | 0 | k4 *= c4; |
232 | 0 | k4 = ROTL32(k4, 18); |
233 | 0 | k4 *= c1; |
234 | 0 | h4 ^= k4; |
235 | 0 | [[fallthrough]]; |
236 | 0 | case 12: |
237 | 0 | k3 ^= tail[11] << 24; |
238 | 0 | [[fallthrough]]; |
239 | 0 | case 11: |
240 | 0 | k3 ^= tail[10] << 16; |
241 | 0 | [[fallthrough]]; |
242 | 0 | case 10: |
243 | 0 | k3 ^= tail[9] << 8; |
244 | 0 | [[fallthrough]]; |
245 | 0 | case 9: |
246 | 0 | k3 ^= tail[8] << 0; |
247 | 0 | k3 *= c3; |
248 | 0 | k3 = ROTL32(k3, 17); |
249 | 0 | k3 *= c4; |
250 | 0 | h3 ^= k3; |
251 | 0 | [[fallthrough]]; |
252 | 0 | case 8: |
253 | 0 | k2 ^= tail[7] << 24; |
254 | 0 | [[fallthrough]]; |
255 | 0 | case 7: |
256 | 0 | k2 ^= tail[6] << 16; |
257 | 0 | [[fallthrough]]; |
258 | 0 | case 6: |
259 | 0 | k2 ^= tail[5] << 8; |
260 | 0 | [[fallthrough]]; |
261 | 0 | case 5: |
262 | 0 | k2 ^= tail[4] << 0; |
263 | 0 | k2 *= c2; |
264 | 0 | k2 = ROTL32(k2, 16); |
265 | 0 | k2 *= c3; |
266 | 0 | h2 ^= k2; |
267 | 0 | [[fallthrough]]; |
268 | 0 | case 4: |
269 | 0 | k1 ^= tail[3] << 24; |
270 | 0 | [[fallthrough]]; |
271 | 0 | case 3: |
272 | 0 | k1 ^= tail[2] << 16; |
273 | 0 | [[fallthrough]]; |
274 | 0 | case 2: |
275 | 0 | k1 ^= tail[1] << 8; |
276 | 0 | [[fallthrough]]; |
277 | 0 | case 1: |
278 | 0 | k1 ^= tail[0] << 0; |
279 | 0 | k1 *= c1; |
280 | 0 | k1 = ROTL32(k1, 15); |
281 | 0 | k1 *= c2; |
282 | 0 | h1 ^= k1; |
283 | 0 | }; |
284 | | |
285 | | //---------- |
286 | | // finalization |
287 | |
|
288 | 0 | h1 ^= len; |
289 | 0 | h2 ^= len; |
290 | 0 | h3 ^= len; |
291 | 0 | h4 ^= len; |
292 | |
|
293 | 0 | h1 += h2; |
294 | 0 | h1 += h3; |
295 | 0 | h1 += h4; |
296 | 0 | h2 += h1; |
297 | 0 | h3 += h1; |
298 | 0 | h4 += h1; |
299 | |
|
300 | 0 | h1 = fmix32(h1); |
301 | 0 | h2 = fmix32(h2); |
302 | 0 | h3 = fmix32(h3); |
303 | 0 | h4 = fmix32(h4); |
304 | |
|
305 | 0 | h1 += h2; |
306 | 0 | h1 += h3; |
307 | 0 | h1 += h4; |
308 | 0 | h2 += h1; |
309 | 0 | h3 += h1; |
310 | 0 | h4 += h1; |
311 | |
|
312 | 0 | ((uint32_t*)out)[0] = h1; |
313 | 0 | ((uint32_t*)out)[1] = h2; |
314 | 0 | ((uint32_t*)out)[2] = h3; |
315 | 0 | ((uint32_t*)out)[3] = h4; |
316 | 0 | } |
317 | | |
318 | | //----------------------------------------------------------------------------- |
319 | | |
320 | 0 | void murmur_hash3_x64_128(const void* key, const int len, const uint32_t seed, void* out) { |
321 | 0 | const uint8_t* data = (const uint8_t*)key; |
322 | 0 | const int nblocks = len / 16; |
323 | |
|
324 | 0 | uint64_t h1 = seed; |
325 | 0 | uint64_t h2 = seed; |
326 | |
|
327 | 0 | const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); |
328 | 0 | const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); |
329 | | |
330 | | //---------- |
331 | | // body |
332 | |
|
333 | 0 | const uint64_t* blocks = (const uint64_t*)(data); |
334 | |
|
335 | 0 | for (int i = 0; i < nblocks; i++) { |
336 | 0 | uint64_t k1 = getblock64(blocks, i * 2 + 0); |
337 | 0 | uint64_t k2 = getblock64(blocks, i * 2 + 1); |
338 | |
|
339 | 0 | k1 *= c1; |
340 | 0 | k1 = ROTL64(k1, 31); |
341 | 0 | k1 *= c2; |
342 | 0 | h1 ^= k1; |
343 | |
|
344 | 0 | h1 = ROTL64(h1, 27); |
345 | 0 | h1 += h2; |
346 | 0 | h1 = h1 * 5 + 0x52dce729; |
347 | |
|
348 | 0 | k2 *= c2; |
349 | 0 | k2 = ROTL64(k2, 33); |
350 | 0 | k2 *= c1; |
351 | 0 | h2 ^= k2; |
352 | |
|
353 | 0 | h2 = ROTL64(h2, 31); |
354 | 0 | h2 += h1; |
355 | 0 | h2 = h2 * 5 + 0x38495ab5; |
356 | 0 | } |
357 | | |
358 | | //---------- |
359 | | // tail |
360 | |
|
361 | 0 | const uint8_t* tail = (const uint8_t*)(data + nblocks * 16); |
362 | |
|
363 | 0 | uint64_t k1 = 0; |
364 | 0 | uint64_t k2 = 0; |
365 | |
|
366 | 0 | switch (len & 15) { |
367 | 0 | case 15: |
368 | 0 | k2 ^= ((uint64_t)tail[14]) << 48; |
369 | 0 | [[fallthrough]]; |
370 | 0 | case 14: |
371 | 0 | k2 ^= ((uint64_t)tail[13]) << 40; |
372 | 0 | [[fallthrough]]; |
373 | 0 | case 13: |
374 | 0 | k2 ^= ((uint64_t)tail[12]) << 32; |
375 | 0 | [[fallthrough]]; |
376 | 0 | case 12: |
377 | 0 | k2 ^= ((uint64_t)tail[11]) << 24; |
378 | 0 | [[fallthrough]]; |
379 | 0 | case 11: |
380 | 0 | k2 ^= ((uint64_t)tail[10]) << 16; |
381 | 0 | [[fallthrough]]; |
382 | 0 | case 10: |
383 | 0 | k2 ^= ((uint64_t)tail[9]) << 8; |
384 | 0 | [[fallthrough]]; |
385 | 0 | case 9: |
386 | 0 | k2 ^= ((uint64_t)tail[8]) << 0; |
387 | 0 | k2 *= c2; |
388 | 0 | k2 = ROTL64(k2, 33); |
389 | 0 | k2 *= c1; |
390 | 0 | h2 ^= k2; |
391 | 0 | [[fallthrough]]; |
392 | 0 | case 8: |
393 | 0 | k1 ^= ((uint64_t)tail[7]) << 56; |
394 | 0 | [[fallthrough]]; |
395 | 0 | case 7: |
396 | 0 | k1 ^= ((uint64_t)tail[6]) << 48; |
397 | 0 | [[fallthrough]]; |
398 | 0 | case 6: |
399 | 0 | k1 ^= ((uint64_t)tail[5]) << 40; |
400 | 0 | [[fallthrough]]; |
401 | 0 | case 5: |
402 | 0 | k1 ^= ((uint64_t)tail[4]) << 32; |
403 | 0 | [[fallthrough]]; |
404 | 0 | case 4: |
405 | 0 | k1 ^= ((uint64_t)tail[3]) << 24; |
406 | 0 | [[fallthrough]]; |
407 | 0 | case 3: |
408 | 0 | k1 ^= ((uint64_t)tail[2]) << 16; |
409 | 0 | [[fallthrough]]; |
410 | 0 | case 2: |
411 | 0 | k1 ^= ((uint64_t)tail[1]) << 8; |
412 | 0 | [[fallthrough]]; |
413 | 0 | case 1: |
414 | 0 | k1 ^= ((uint64_t)tail[0]) << 0; |
415 | 0 | k1 *= c1; |
416 | 0 | k1 = ROTL64(k1, 31); |
417 | 0 | k1 *= c2; |
418 | 0 | h1 ^= k1; |
419 | 0 | }; |
420 | | |
421 | | //---------- |
422 | | // finalization |
423 | |
|
424 | 0 | h1 ^= len; |
425 | 0 | h2 ^= len; |
426 | |
|
427 | 0 | h1 += h2; |
428 | 0 | h2 += h1; |
429 | |
|
430 | 0 | h1 = fmix64(h1); |
431 | 0 | h2 = fmix64(h2); |
432 | |
|
433 | 0 | h1 += h2; |
434 | 0 | h2 += h1; |
435 | |
|
436 | 0 | ((uint64_t*)out)[0] = h1; |
437 | 0 | ((uint64_t*)out)[1] = h2; |
438 | 0 | } |
439 | | |
440 | 688k | void murmur_hash3_x64_64(const void* key, const int64_t len, const uint64_t seed, void* out) { |
441 | 688k | const uint8_t* data = (const uint8_t*)key; |
442 | 688k | const int nblocks = (int)len / 8; |
443 | 688k | uint64_t h1 = seed; |
444 | | |
445 | 688k | const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); |
446 | 688k | const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); |
447 | | |
448 | | //---------- |
449 | | // body |
450 | | |
451 | 688k | const uint64_t* blocks = (const uint64_t*)(data); |
452 | | |
453 | 1.56M | for (int i = 0; i < nblocks; i++) { |
454 | 872k | uint64_t k1 = getblock64(blocks, i); |
455 | | |
456 | 872k | k1 *= c1; |
457 | 872k | k1 = ROTL64(k1, 31); |
458 | 872k | k1 *= c2; |
459 | 872k | h1 ^= k1; |
460 | | |
461 | 872k | h1 = ROTL64(h1, 27); |
462 | 872k | h1 = h1 * 5 + 0x52dce729; |
463 | 872k | } |
464 | | |
465 | | //---------- |
466 | | // tail |
467 | | |
468 | 688k | const uint8_t* tail = (const uint8_t*)(data + nblocks * 8); |
469 | 688k | uint64_t k1 = 0; |
470 | | |
471 | 688k | switch (len & 7) { |
472 | 9.97k | case 7: |
473 | 9.97k | k1 ^= ((uint64_t)tail[6]) << 48; |
474 | 9.97k | [[fallthrough]]; |
475 | 11.0k | case 6: |
476 | 11.0k | k1 ^= ((uint64_t)tail[5]) << 40; |
477 | 11.0k | [[fallthrough]]; |
478 | 11.3k | case 5: |
479 | 11.3k | k1 ^= ((uint64_t)tail[4]) << 32; |
480 | 11.3k | [[fallthrough]]; |
481 | 90.1k | case 4: |
482 | 90.1k | k1 ^= ((uint64_t)tail[3]) << 24; |
483 | 90.1k | [[fallthrough]]; |
484 | 102k | case 3: |
485 | 102k | k1 ^= ((uint64_t)tail[2]) << 16; |
486 | 102k | [[fallthrough]]; |
487 | 239k | case 2: |
488 | 239k | k1 ^= ((uint64_t)tail[1]) << 8; |
489 | 239k | [[fallthrough]]; |
490 | 245k | case 1: |
491 | 245k | k1 ^= ((uint64_t)tail[0]) << 0; |
492 | 245k | k1 *= c1; |
493 | 245k | k1 = ROTL64(k1, 31); |
494 | 245k | k1 *= c2; |
495 | 245k | h1 ^= k1; |
496 | 688k | }; |
497 | | |
498 | | //---------- |
499 | | // finalization |
500 | | |
501 | 688k | h1 ^= len; |
502 | 688k | h1 = fmix64(h1); |
503 | | |
504 | 688k | ((uint64_t*)out)[0] = h1; |
505 | 688k | } |
506 | | #include "common/compile_check_end.h" |
507 | | //----------------------------------------------------------------------------- |