Coverage Report

Created: 2026-04-16 20:39

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/util/hash/murmur_hash3.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
//-----------------------------------------------------------------------------
19
// MurmurHash3 was written by Austin Appleby, and is placed in the public
20
// domain. The author hereby disclaims copyright to this source code.
21
22
// Note - The x86 and x64 versions do _not_ produce the same results, as the
23
// algorithms are optimized for their respective platforms. You can still
24
// compile and run any of them on any platform, but your performance with the
25
// non-native version will be less than optimal.
26
27
#include "util/hash/murmur_hash3.h"
28
29
#include "util/unaligned.h"
30
31
namespace doris {
32
33
#if defined(_MSC_VER)
34
35
#define FORCE_INLINE __forceinline
36
37
#include <stdlib.h>
38
39
#define ROTL32(x, y) _rotl(x, y)
40
#define ROTL64(x, y) _rotl64(x, y)
41
42
#define BIG_CONSTANT(x) (x)
43
44
// Other compilers
45
46
#else // defined(_MSC_VER)
47
48
#define FORCE_INLINE inline __attribute__((always_inline))
49
50
53
FORCE_INLINE uint32_t rotl32(uint32_t x, int8_t r) {
51
53
    return (x << r) | (x >> (32 - r));
52
53
}
53
54
2.13M
FORCE_INLINE uint64_t rotl64(uint64_t x, int8_t r) {
55
2.13M
    return (x << r) | (x >> (64 - r));
56
2.13M
}
57
58
53
#define ROTL32(x, y) rotl32(x, y)
59
2.13M
#define ROTL64(x, y) rotl64(x, y)
60
61
3.04M
#define BIG_CONSTANT(x) (x##LLU)
62
63
#endif // !defined(_MSC_VER)
64
65
//-----------------------------------------------------------------------------
66
// Block read - if your platform needs to do endian-swapping or can only
67
// handle aligned reads, do the conversion here
68
69
21
FORCE_INLINE uint32_t getblock32(const uint32_t* p, int i) {
70
21
    return unaligned_load<uint32_t>(&p[i]);
71
21
}
72
73
945k
FORCE_INLINE uint64_t getblock64(const uint64_t* p, int i) {
74
945k
    return unaligned_load<uint64_t>(&p[i]);
75
945k
}
76
77
//-----------------------------------------------------------------------------
78
// Finalization mix - force all bits of a hash block to avalanche
79
80
20
FORCE_INLINE uint32_t fmix32(uint32_t h) {
81
20
    h ^= h >> 16;
82
20
    h *= 0x85ebca6b;
83
20
    h ^= h >> 13;
84
20
    h *= 0xc2b2ae35;
85
20
    h ^= h >> 16;
86
87
20
    return h;
88
20
}
89
90
//----------
91
92
760k
FORCE_INLINE uint64_t fmix64(uint64_t k) {
93
760k
    k ^= k >> 33;
94
760k
    k *= BIG_CONSTANT(0xff51afd7ed558ccd);
95
760k
    k ^= k >> 33;
96
760k
    k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
97
760k
    k ^= k >> 33;
98
99
760k
    return k;
100
760k
}
101
102
//-----------------------------------------------------------------------------
103
104
20
void murmur_hash3_x86_32(const void* key, int64_t len, uint32_t seed, void* out) {
105
20
    const uint8_t* data = (const uint8_t*)key;
106
20
    const int nblocks = (int)len / 4;
107
108
20
    uint32_t h1 = seed;
109
110
20
    const uint32_t c1 = 0xcc9e2d51;
111
20
    const uint32_t c2 = 0x1b873593;
112
113
    //----------
114
    // body
115
116
20
    const uint32_t* blocks = (const uint32_t*)(data + nblocks * 4);
117
118
41
    for (int i = -nblocks; i; i++) {
119
21
        uint32_t k1 = getblock32(blocks, i);
120
121
21
        k1 *= c1;
122
21
        k1 = ROTL32(k1, 15);
123
21
        k1 *= c2;
124
125
21
        h1 ^= k1;
126
21
        h1 = ROTL32(h1, 13);
127
21
        h1 = h1 * 5 + 0xe6546b64;
128
21
    }
129
130
    //----------
131
    // tail
132
133
20
    const uint8_t* tail = (const uint8_t*)(data + nblocks * 4);
134
135
20
    uint32_t k1 = 0;
136
137
20
    switch (len & 3) {
138
1
    case 3:
139
1
        k1 ^= tail[2] << 16;
140
1
        [[fallthrough]];
141
2
    case 2:
142
2
        k1 ^= tail[1] << 8;
143
2
        [[fallthrough]];
144
11
    case 1:
145
11
        k1 ^= tail[0];
146
11
        k1 *= c1;
147
11
        k1 = ROTL32(k1, 15);
148
11
        k1 *= c2;
149
11
        h1 ^= k1;
150
20
    };
151
152
    //----------
153
    // finalization
154
155
20
    h1 ^= len;
156
157
20
    h1 = fmix32(h1);
158
159
20
    *(uint32_t*)out = h1;
160
20
}
161
162
//-----------------------------------------------------------------------------
163
164
0
void murmur_hash3_x86_128(const void* key, const int len, uint32_t seed, void* out) {
165
0
    const uint8_t* data = (const uint8_t*)key;
166
0
    const int nblocks = len / 16;
167
168
0
    uint32_t h1 = seed;
169
0
    uint32_t h2 = seed;
170
0
    uint32_t h3 = seed;
171
0
    uint32_t h4 = seed;
172
173
0
    const uint32_t c1 = 0x239b961b;
174
0
    const uint32_t c2 = 0xab0e9789;
175
0
    const uint32_t c3 = 0x38b34ae5;
176
0
    const uint32_t c4 = 0xa1e38b93;
177
178
    //----------
179
    // body
180
181
0
    const uint32_t* blocks = (const uint32_t*)(data + nblocks * 16);
182
183
0
    for (int i = -nblocks; i; i++) {
184
0
        uint32_t k1 = getblock32(blocks, i * 4 + 0);
185
0
        uint32_t k2 = getblock32(blocks, i * 4 + 1);
186
0
        uint32_t k3 = getblock32(blocks, i * 4 + 2);
187
0
        uint32_t k4 = getblock32(blocks, i * 4 + 3);
188
189
0
        k1 *= c1;
190
0
        k1 = ROTL32(k1, 15);
191
0
        k1 *= c2;
192
0
        h1 ^= k1;
193
194
0
        h1 = ROTL32(h1, 19);
195
0
        h1 += h2;
196
0
        h1 = h1 * 5 + 0x561ccd1b;
197
198
0
        k2 *= c2;
199
0
        k2 = ROTL32(k2, 16);
200
0
        k2 *= c3;
201
0
        h2 ^= k2;
202
203
0
        h2 = ROTL32(h2, 17);
204
0
        h2 += h3;
205
0
        h2 = h2 * 5 + 0x0bcaa747;
206
207
0
        k3 *= c3;
208
0
        k3 = ROTL32(k3, 17);
209
0
        k3 *= c4;
210
0
        h3 ^= k3;
211
212
0
        h3 = ROTL32(h3, 15);
213
0
        h3 += h4;
214
0
        h3 = h3 * 5 + 0x96cd1c35;
215
216
0
        k4 *= c4;
217
0
        k4 = ROTL32(k4, 18);
218
0
        k4 *= c1;
219
0
        h4 ^= k4;
220
221
0
        h4 = ROTL32(h4, 13);
222
0
        h4 += h1;
223
0
        h4 = h4 * 5 + 0x32ac3b17;
224
0
    }
225
226
    //----------
227
    // tail
228
229
0
    const uint8_t* tail = (const uint8_t*)(data + nblocks * 16);
230
231
0
    uint32_t k1 = 0;
232
0
    uint32_t k2 = 0;
233
0
    uint32_t k3 = 0;
234
0
    uint32_t k4 = 0;
235
236
0
    switch (len & 15) {
237
0
    case 15:
238
0
        k4 ^= tail[14] << 16;
239
0
        [[fallthrough]];
240
0
    case 14:
241
0
        k4 ^= tail[13] << 8;
242
0
        [[fallthrough]];
243
0
    case 13:
244
0
        k4 ^= tail[12] << 0;
245
0
        k4 *= c4;
246
0
        k4 = ROTL32(k4, 18);
247
0
        k4 *= c1;
248
0
        h4 ^= k4;
249
0
        [[fallthrough]];
250
0
    case 12:
251
0
        k3 ^= tail[11] << 24;
252
0
        [[fallthrough]];
253
0
    case 11:
254
0
        k3 ^= tail[10] << 16;
255
0
        [[fallthrough]];
256
0
    case 10:
257
0
        k3 ^= tail[9] << 8;
258
0
        [[fallthrough]];
259
0
    case 9:
260
0
        k3 ^= tail[8] << 0;
261
0
        k3 *= c3;
262
0
        k3 = ROTL32(k3, 17);
263
0
        k3 *= c4;
264
0
        h3 ^= k3;
265
0
        [[fallthrough]];
266
0
    case 8:
267
0
        k2 ^= tail[7] << 24;
268
0
        [[fallthrough]];
269
0
    case 7:
270
0
        k2 ^= tail[6] << 16;
271
0
        [[fallthrough]];
272
0
    case 6:
273
0
        k2 ^= tail[5] << 8;
274
0
        [[fallthrough]];
275
0
    case 5:
276
0
        k2 ^= tail[4] << 0;
277
0
        k2 *= c2;
278
0
        k2 = ROTL32(k2, 16);
279
0
        k2 *= c3;
280
0
        h2 ^= k2;
281
0
        [[fallthrough]];
282
0
    case 4:
283
0
        k1 ^= tail[3] << 24;
284
0
        [[fallthrough]];
285
0
    case 3:
286
0
        k1 ^= tail[2] << 16;
287
0
        [[fallthrough]];
288
0
    case 2:
289
0
        k1 ^= tail[1] << 8;
290
0
        [[fallthrough]];
291
0
    case 1:
292
0
        k1 ^= tail[0] << 0;
293
0
        k1 *= c1;
294
0
        k1 = ROTL32(k1, 15);
295
0
        k1 *= c2;
296
0
        h1 ^= k1;
297
0
    };
298
299
    //----------
300
    // finalization
301
302
0
    h1 ^= len;
303
0
    h2 ^= len;
304
0
    h3 ^= len;
305
0
    h4 ^= len;
306
307
0
    h1 += h2;
308
0
    h1 += h3;
309
0
    h1 += h4;
310
0
    h2 += h1;
311
0
    h3 += h1;
312
0
    h4 += h1;
313
314
0
    h1 = fmix32(h1);
315
0
    h2 = fmix32(h2);
316
0
    h3 = fmix32(h3);
317
0
    h4 = fmix32(h4);
318
319
0
    h1 += h2;
320
0
    h1 += h3;
321
0
    h1 += h4;
322
0
    h2 += h1;
323
0
    h3 += h1;
324
0
    h4 += h1;
325
326
0
    ((uint32_t*)out)[0] = h1;
327
0
    ((uint32_t*)out)[1] = h2;
328
0
    ((uint32_t*)out)[2] = h3;
329
0
    ((uint32_t*)out)[3] = h4;
330
0
}
331
332
//-----------------------------------------------------------------------------
333
334
// Helper function that implements the core MurmurHash3 128-bit hashing algorithm
335
7
void murmur_hash3_x64_process(const void* key, const int len, uint64_t& h1, uint64_t& h2) {
336
7
    const uint8_t* data = (const uint8_t*)key;
337
7
    const int nblocks = len / 16;
338
339
7
    const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
340
7
    const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
341
342
    //----------
343
    // body
344
345
7
    const uint64_t* blocks = (const uint64_t*)(data);
346
347
8
    for (int i = 0; i < nblocks; i++) {
348
1
        uint64_t k1 = getblock64(blocks, i * 2 + 0);
349
1
        uint64_t k2 = getblock64(blocks, i * 2 + 1);
350
351
1
        k1 *= c1;
352
1
        k1 = ROTL64(k1, 31);
353
1
        k1 *= c2;
354
1
        h1 ^= k1;
355
356
1
        h1 = ROTL64(h1, 27);
357
1
        h1 += h2;
358
1
        h1 = h1 * 5 + 0x52dce729;
359
360
1
        k2 *= c2;
361
1
        k2 = ROTL64(k2, 33);
362
1
        k2 *= c1;
363
1
        h2 ^= k2;
364
365
1
        h2 = ROTL64(h2, 31);
366
1
        h2 += h1;
367
1
        h2 = h2 * 5 + 0x38495ab5;
368
1
    }
369
370
    //----------
371
    // tail
372
373
7
    const uint8_t* tail = (const uint8_t*)(data + nblocks * 16);
374
375
7
    uint64_t k1 = 0;
376
7
    uint64_t k2 = 0;
377
378
7
    switch (len & 15) {
379
0
    case 15:
380
0
        k2 ^= ((uint64_t)tail[14]) << 48;
381
0
        [[fallthrough]];
382
0
    case 14:
383
0
        k2 ^= ((uint64_t)tail[13]) << 40;
384
0
        [[fallthrough]];
385
0
    case 13:
386
0
        k2 ^= ((uint64_t)tail[12]) << 32;
387
0
        [[fallthrough]];
388
1
    case 12:
389
1
        k2 ^= ((uint64_t)tail[11]) << 24;
390
1
        [[fallthrough]];
391
5
    case 11:
392
5
        k2 ^= ((uint64_t)tail[10]) << 16;
393
5
        [[fallthrough]];
394
5
    case 10:
395
5
        k2 ^= ((uint64_t)tail[9]) << 8;
396
5
        [[fallthrough]];
397
5
    case 9:
398
5
        k2 ^= ((uint64_t)tail[8]) << 0;
399
5
        k2 *= c2;
400
5
        k2 = ROTL64(k2, 33);
401
5
        k2 *= c1;
402
5
        h2 ^= k2;
403
5
        [[fallthrough]];
404
5
    case 8:
405
5
        k1 ^= ((uint64_t)tail[7]) << 56;
406
5
        [[fallthrough]];
407
5
    case 7:
408
5
        k1 ^= ((uint64_t)tail[6]) << 48;
409
5
        [[fallthrough]];
410
5
    case 6:
411
5
        k1 ^= ((uint64_t)tail[5]) << 40;
412
5
        [[fallthrough]];
413
6
    case 5:
414
6
        k1 ^= ((uint64_t)tail[4]) << 32;
415
6
        [[fallthrough]];
416
6
    case 4:
417
6
        k1 ^= ((uint64_t)tail[3]) << 24;
418
6
        [[fallthrough]];
419
6
    case 3:
420
6
        k1 ^= ((uint64_t)tail[2]) << 16;
421
6
        [[fallthrough]];
422
6
    case 2:
423
6
        k1 ^= ((uint64_t)tail[1]) << 8;
424
6
        [[fallthrough]];
425
6
    case 1:
426
6
        k1 ^= ((uint64_t)tail[0]) << 0;
427
6
        k1 *= c1;
428
6
        k1 = ROTL64(k1, 31);
429
6
        k1 *= c2;
430
6
        h1 ^= k1;
431
7
    };
432
433
    //----------
434
    // finalization
435
436
7
    h1 ^= len;
437
7
    h2 ^= len;
438
439
7
    h1 += h2;
440
7
    h2 += h1;
441
442
7
    h1 = fmix64(h1);
443
7
    h2 = fmix64(h2);
444
445
7
    h1 += h2;
446
7
    h2 += h1;
447
7
}
448
449
//-----------------------------------------------------------------------------
450
451
// The origin function `murmur_hash3_x64_128` is copied from: https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp
452
// And Doris modified it into function `murmur_hash3_x64_process`
453
// For this reason, this function is still retained even though it has no calls.
454
1
void murmur_hash3_x64_128(const void* key, const int len, const uint32_t seed, void* out) {
455
1
    uint64_t h1 = seed;
456
1
    uint64_t h2 = seed;
457
1
    murmur_hash3_x64_process(key, len, h1, h2);
458
1
    ((uint64_t*)out)[0] = h1;
459
1
    ((uint64_t*)out)[1] = h2;
460
1
}
461
462
//-----------------------------------------------------------------------------
463
464
// MurmurHash3 x64 64-bit variant using shared 128-bit processing function
465
// This implementation reuses the murmur_hash3_x64_process function and only outputs the first hash value
466
// Used for function mmh3_64_v2
467
void murmur_hash3_x64_64_shared(const void* key, const int64_t len, const uint64_t seed,
468
4
                                void* out) {
469
4
    uint64_t h1 = seed;
470
4
    uint64_t h2 = seed;
471
4
    murmur_hash3_x64_process(key, static_cast<int>(len), h1, h2);
472
4
    ((uint64_t*)out)[0] = h1;
473
4
}
474
475
//-----------------------------------------------------------------------------
476
477
// MurmurHash3 x64 64-bit variant with optimized standalone implementation
478
// This implementation is specifically optimized for 64-bit output
479
// Used for function mmh3_64
480
760k
void murmur_hash3_x64_64(const void* key, const int64_t len, const uint64_t seed, void* out) {
481
760k
    const uint8_t* data = (const uint8_t*)key;
482
760k
    const int nblocks = (int)len / 8;
483
760k
    uint64_t h1 = seed;
484
485
760k
    const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
486
760k
    const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
487
488
    //----------
489
    // body
490
491
760k
    const uint64_t* blocks = (const uint64_t*)(data);
492
493
1.70M
    for (int i = 0; i < nblocks; i++) {
494
945k
        uint64_t k1 = getblock64(blocks, i);
495
496
945k
        k1 *= c1;
497
945k
        k1 = ROTL64(k1, 31);
498
945k
        k1 *= c2;
499
945k
        h1 ^= k1;
500
501
945k
        h1 = ROTL64(h1, 27);
502
945k
        h1 = h1 * 5 + 0x52dce729;
503
945k
    }
504
505
    //----------
506
    // tail
507
508
760k
    const uint8_t* tail = (const uint8_t*)(data + nblocks * 8);
509
760k
    uint64_t k1 = 0;
510
511
760k
    switch (len & 7) {
512
9.97k
    case 7:
513
9.97k
        k1 ^= ((uint64_t)tail[6]) << 48;
514
9.97k
        [[fallthrough]];
515
11.0k
    case 6:
516
11.0k
        k1 ^= ((uint64_t)tail[5]) << 40;
517
11.0k
        [[fallthrough]];
518
11.4k
    case 5:
519
11.4k
        k1 ^= ((uint64_t)tail[4]) << 32;
520
11.4k
        [[fallthrough]];
521
90.1k
    case 4:
522
90.1k
        k1 ^= ((uint64_t)tail[3]) << 24;
523
90.1k
        [[fallthrough]];
524
102k
    case 3:
525
102k
        k1 ^= ((uint64_t)tail[2]) << 16;
526
102k
        [[fallthrough]];
527
239k
    case 2:
528
239k
        k1 ^= ((uint64_t)tail[1]) << 8;
529
239k
        [[fallthrough]];
530
245k
    case 1:
531
245k
        k1 ^= ((uint64_t)tail[0]) << 0;
532
245k
        k1 *= c1;
533
245k
        k1 = ROTL64(k1, 31);
534
245k
        k1 *= c2;
535
245k
        h1 ^= k1;
536
760k
    };
537
538
    //----------
539
    // finalization
540
541
760k
    h1 ^= len;
542
760k
    h1 = fmix64(h1);
543
544
760k
    ((uint64_t*)out)[0] = h1;
545
760k
}
546
547
} // namespace doris