1/*
2 * Copyright 2012-2015 Samy Al Bahra
3 * Copyright 2011-2014 AppNexus, Inc.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#ifndef CK_HT_HASH_H
28#define CK_HT_HASH_H
29
30/*
31 * This is the Murmur hash written by Austin Appleby.
32 */
33
34#include <ck_stdint.h>
35#include <ck_string.h>
36
37//-----------------------------------------------------------------------------
38// MurmurHash3 was written by Austin Appleby, and is placed in the public
39// domain. The author hereby disclaims copyright to this source code.
40
41// Note - The x86 and x64 versions do _not_ produce the same results, as the
42// algorithms are optimized for their respective platforms. You can still
43// compile and run any of them on any platform, but your performance with the
44// non-native version will be less than optimal.
45
46//-----------------------------------------------------------------------------
47// Platform-specific functions and macros
48
49// Microsoft Visual Studio
50
51#if defined(_MSC_VER)
52
53#define FORCE_INLINE    __forceinline
54
55#include <stdlib.h>
56
57#define ROTL32(x,y)     _rotl(x,y)
58#define ROTL64(x,y)     _rotl64(x,y)
59
60#define BIG_CONSTANT(x) (x)
61
62// Other compilers
63
64#else   // defined(_MSC_VER)
65
66#define FORCE_INLINE inline __attribute__((always_inline))
67
68static inline uint32_t rotl32 ( uint32_t x, int8_t r )
69{
70  return (x << r) | (x >> (32 - r));
71}
72
73static inline uint64_t rotl64 ( uint64_t x, int8_t r )
74{
75  return (x << r) | (x >> (64 - r));
76}
77
78#define ROTL32(x,y)     rotl32(x,y)
79#define ROTL64(x,y)     rotl64(x,y)
80
81#define BIG_CONSTANT(x) (x##LLU)
82
83#endif // !defined(_MSC_VER)
84
85//-----------------------------------------------------------------------------
86// Block read - if your platform needs to do endian-swapping or can only
87// handle aligned reads, do the conversion here
88
89FORCE_INLINE static uint32_t getblock ( const uint32_t * p, int i )
90{
91#ifdef __s390x__
92  uint32_t res;
93
94  __asm__ ("	lrv	%0,%1\n"
95	   : "=r" (res) : "Q" (p[i]) : "cc", "mem");
96  return res;
97#else
98  return p[i];
99#endif /* !__s390x__ */
100}
101
102//-----------------------------------------------------------------------------
103// Finalization mix - force all bits of a hash block to avalanche
104
105FORCE_INLINE static uint32_t fmix ( uint32_t h )
106{
107  h ^= h >> 16;
108  h *= 0x85ebca6b;
109  h ^= h >> 13;
110  h *= 0xc2b2ae35;
111  h ^= h >> 16;
112
113  return h;
114}
115
116//-----------------------------------------------------------------------------
117
118static inline void MurmurHash3_x86_32 ( const void * key, int len,
119                          uint32_t seed, uint32_t * out )
120{
121  const uint8_t * data = (const uint8_t*)key;
122  const int nblocks = len / 4;
123  int i;
124
125  uint32_t h1 = seed;
126
127  uint32_t c1 = 0xcc9e2d51;
128  uint32_t c2 = 0x1b873593;
129
130  //----------
131  // body
132
133  const uint32_t * blocks = (const uint32_t *)(const void *)(data + nblocks*4);
134
135  for(i = -nblocks; i; i++)
136  {
137    uint32_t k1 = getblock(blocks,i);
138
139    k1 *= c1;
140    k1 = ROTL32(k1,15);
141    k1 *= c2;
142
143    h1 ^= k1;
144    h1 = ROTL32(h1,13);
145    h1 = h1*5+0xe6546b64;
146  }
147
148  //----------
149  // tail
150
151  const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
152
153  uint32_t k1 = 0;
154
155  switch(len & 3)
156  {
157  case 3: k1 ^= tail[2] << 16;
158  /* fall through */
159  case 2: k1 ^= tail[1] << 8;
160  /* fall through */
161  case 1: k1 ^= tail[0];
162          k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
163  };
164
165  //----------
166  // finalization
167
168  h1 ^= len;
169
170  h1 = fmix(h1);
171
172  *(uint32_t *)out = h1;
173}
174
175static inline uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed )
176{
177  const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
178  const int r = 47;
179
180  uint64_t h = seed ^ (len * m);
181
182  const uint64_t * data = (const uint64_t *)key;
183  const uint64_t * end = data + (len/8);
184
185  while(data != end)
186  {
187    uint64_t k;
188
189    if (!((uintptr_t)data & 0x7))
190	    k = *data++;
191    else {
192	    memcpy(&k, data, sizeof(k));
193	    data++;
194    }
195
196    k *= m;
197    k ^= k >> r;
198    k *= m;
199
200    h ^= k;
201    h *= m;
202  }
203
204  const unsigned char * data2 = (const unsigned char*)data;
205
206  switch(len & 7)
207  {
208  case 7: h ^= (uint64_t)(data2[6]) << 48;
209  /* fall through */
210  case 6: h ^= (uint64_t)(data2[5]) << 40;
211  /* fall through */
212  case 5: h ^= (uint64_t)(data2[4]) << 32;
213  /* fall through */
214  case 4: h ^= (uint64_t)(data2[3]) << 24;
215  /* fall through */
216  case 3: h ^= (uint64_t)(data2[2]) << 16;
217  /* fall through */
218  case 2: h ^= (uint64_t)(data2[1]) << 8;
219  /* fall through */
220  case 1: h ^= (uint64_t)(data2[0]);
221          h *= m;
222  };
223
224  h ^= h >> r;
225  h *= m;
226  h ^= h >> r;
227
228  return h;
229}
230
231
232// 64-bit hash for 32-bit platforms
233
234static inline uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed )
235{
236  const uint32_t m = 0x5bd1e995;
237  const int r = 24;
238
239  uint32_t h1 = (uint32_t)(seed) ^ len;
240  uint32_t h2 = (uint32_t)(seed >> 32);
241
242  const uint32_t * data = (const uint32_t *)key;
243
244  while(len >= 8)
245  {
246    uint32_t k1 = *data++;
247    k1 *= m; k1 ^= k1 >> r; k1 *= m;
248    h1 *= m; h1 ^= k1;
249    len -= 4;
250
251    uint32_t k2 = *data++;
252    k2 *= m; k2 ^= k2 >> r; k2 *= m;
253    h2 *= m; h2 ^= k2;
254    len -= 4;
255  }
256
257  if(len >= 4)
258  {
259    uint32_t k1 = *data++;
260    k1 *= m; k1 ^= k1 >> r; k1 *= m;
261    h1 *= m; h1 ^= k1;
262    len -= 4;
263  }
264
265  switch(len)
266  {
267  case 3: h2 ^= ((const unsigned char*)data)[2] << 16;
268  /* fall through */
269  case 2: h2 ^= ((const unsigned char*)data)[1] << 8;
270  /* fall through */
271  case 1: h2 ^= ((const unsigned char*)data)[0];
272      h2 *= m;
273  };
274
275  h1 ^= h2 >> 18; h1 *= m;
276  h2 ^= h1 >> 22; h2 *= m;
277  h1 ^= h2 >> 17; h1 *= m;
278  h2 ^= h1 >> 19; h2 *= m;
279
280  uint64_t h = h1;
281
282  h = (h << 32) | h2;
283
284  return h;
285}
286
287#endif /* CK_HT_HASH_H */
288