1// SPDX-License-Identifier: LGPL-2.1+
2/*
3 * MurmurHash3 was written by Austin Appleby, and is placed in the public
4 * domain. The author hereby disclaims copyright to this source code.
5 *
6 * Adapted by John Wiele (jwiele@redhat.com).
7 */
8
9#include "murmurhash3.h"
10
11#include <asm/unaligned.h>
12
13static inline u64 rotl64(u64 x, s8 r)
14{
15	return (x << r) | (x >> (64 - r));
16}
17
18#define ROTL64(x, y) rotl64(x, y)
19
20/* Finalization mix - force all bits of a hash block to avalanche */
21
22static __always_inline u64 fmix64(u64 k)
23{
24	k ^= k >> 33;
25	k *= 0xff51afd7ed558ccdLLU;
26	k ^= k >> 33;
27	k *= 0xc4ceb9fe1a85ec53LLU;
28	k ^= k >> 33;
29
30	return k;
31}
32
33void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
34{
35	const u8 *data = key;
36	const int nblocks = len / 16;
37
38	u64 h1 = seed;
39	u64 h2 = seed;
40
41	const u64 c1 = 0x87c37b91114253d5LLU;
42	const u64 c2 = 0x4cf5ad432745937fLLU;
43
44	u64 *hash_out = out;
45
46	/* body */
47
48	const u64 *blocks = (const u64 *)(data);
49
50	int i;
51
52	for (i = 0; i < nblocks; i++) {
53		u64 k1 = get_unaligned_le64(&blocks[i * 2]);
54		u64 k2 = get_unaligned_le64(&blocks[i * 2 + 1]);
55
56		k1 *= c1;
57		k1 = ROTL64(k1, 31);
58		k1 *= c2;
59		h1 ^= k1;
60
61		h1 = ROTL64(h1, 27);
62		h1 += h2;
63		h1 = h1 * 5 + 0x52dce729;
64
65		k2 *= c2;
66		k2 = ROTL64(k2, 33);
67		k2 *= c1;
68		h2 ^= k2;
69
70		h2 = ROTL64(h2, 31);
71		h2 += h1;
72		h2 = h2 * 5 + 0x38495ab5;
73	}
74
75	/* tail */
76
77	{
78		const u8 *tail = (const u8 *)(data + nblocks * 16);
79
80		u64 k1 = 0;
81		u64 k2 = 0;
82
83		switch (len & 15) {
84		case 15:
85			k2 ^= ((u64)tail[14]) << 48;
86			fallthrough;
87		case 14:
88			k2 ^= ((u64)tail[13]) << 40;
89			fallthrough;
90		case 13:
91			k2 ^= ((u64)tail[12]) << 32;
92			fallthrough;
93		case 12:
94			k2 ^= ((u64)tail[11]) << 24;
95			fallthrough;
96		case 11:
97			k2 ^= ((u64)tail[10]) << 16;
98			fallthrough;
99		case 10:
100			k2 ^= ((u64)tail[9]) << 8;
101			fallthrough;
102		case 9:
103			k2 ^= ((u64)tail[8]) << 0;
104			k2 *= c2;
105			k2 = ROTL64(k2, 33);
106			k2 *= c1;
107			h2 ^= k2;
108			fallthrough;
109
110		case 8:
111			k1 ^= ((u64)tail[7]) << 56;
112			fallthrough;
113		case 7:
114			k1 ^= ((u64)tail[6]) << 48;
115			fallthrough;
116		case 6:
117			k1 ^= ((u64)tail[5]) << 40;
118			fallthrough;
119		case 5:
120			k1 ^= ((u64)tail[4]) << 32;
121			fallthrough;
122		case 4:
123			k1 ^= ((u64)tail[3]) << 24;
124			fallthrough;
125		case 3:
126			k1 ^= ((u64)tail[2]) << 16;
127			fallthrough;
128		case 2:
129			k1 ^= ((u64)tail[1]) << 8;
130			fallthrough;
131		case 1:
132			k1 ^= ((u64)tail[0]) << 0;
133			k1 *= c1;
134			k1 = ROTL64(k1, 31);
135			k1 *= c2;
136			h1 ^= k1;
137			break;
138		default:
139			break;
140		}
141	}
142	/* finalization */
143
144	h1 ^= len;
145	h2 ^= len;
146
147	h1 += h2;
148	h2 += h1;
149
150	h1 = fmix64(h1);
151	h2 = fmix64(h2);
152
153	h1 += h2;
154	h2 += h1;
155
156	put_unaligned_le64(h1, &hash_out[0]);
157	put_unaligned_le64(h2, &hash_out[1]);
158}
159