1/*
2 *  xxHash - Fast Hash algorithm
3 *  Copyright (c) 2012-2020, Yann Collet, Facebook, Inc.
4 *
5 *  You can contact the author at :
6 *  - xxHash homepage: http://www.xxhash.com
7 *  - xxHash source repository : https://github.com/Cyan4973/xxHash
8 *
9 * This source code is licensed under both the BSD-style license (found in the
10 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11 * in the COPYING file in the root directory of this source tree).
12 * You may select, at your option, one of the above-listed licenses.
13*/
14
15
16/* *************************************
17*  Tuning parameters
18***************************************/
19/*!XXH_FORCE_MEMORY_ACCESS :
20 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
21 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
22 * The below switch allow to select different access method for improved performance.
23 * Method 0 (default) : use `memcpy()`. Safe and portable.
24 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
25 *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
26 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
27 *            It can generate buggy code on targets which do not support unaligned memory accesses.
28 *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
29 * See http://stackoverflow.com/a/32095106/646947 for details.
30 * Prefer these methods in priority order (0 > 1 > 2)
31 */
32#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
33#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
34#    define XXH_FORCE_MEMORY_ACCESS 2
35#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
36  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
37  defined(__ICCARM__)
38#    define XXH_FORCE_MEMORY_ACCESS 1
39#  endif
40#endif
41
42/*!XXH_ACCEPT_NULL_INPUT_POINTER :
43 * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
44 * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
45 * By default, this option is disabled. To enable it, uncomment below define :
46 */
47/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
48
49/*!XXH_FORCE_NATIVE_FORMAT :
50 * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
51 * Results are therefore identical for little-endian and big-endian CPU.
52 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
53 * Should endian-independence be of no importance for your application, you may set the #define below to 1,
54 * to improve speed for Big-endian CPU.
55 * This option has no impact on Little_Endian CPU.
56 */
57#ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */
58#  define XXH_FORCE_NATIVE_FORMAT 0
59#endif
60
61/*!XXH_FORCE_ALIGN_CHECK :
62 * This is a minor performance trick, only useful with lots of very small keys.
63 * It means : check for aligned/unaligned input.
64 * The check costs one initial branch per hash; set to 0 when the input data
65 * is guaranteed to be aligned.
66 */
67#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
68#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
69#    define XXH_FORCE_ALIGN_CHECK 0
70#  else
71#    define XXH_FORCE_ALIGN_CHECK 1
72#  endif
73#endif
74
75
76/* *************************************
77*  Includes & Memory related functions
78***************************************/
79/* Modify the local functions below should you wish to use some other memory routines */
80/* for malloc(), free() */
81#include <stdlib.h>
82#include <stddef.h>     /* size_t */
83static void* XXH_malloc(size_t s) { return malloc(s); }
84static void  XXH_free  (void* p)  { free(p); }
85/* for memcpy() */
86#include <string.h>
87static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
88
89#ifndef XXH_STATIC_LINKING_ONLY
90#  define XXH_STATIC_LINKING_ONLY
91#endif
92#include "xxhash.h"
93
94
95/* *************************************
96*  Compiler Specific Options
97***************************************/
98#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
99#  define INLINE_KEYWORD inline
100#else
101#  define INLINE_KEYWORD
102#endif
103
104#if defined(__GNUC__) || defined(__ICCARM__)
105#  define FORCE_INLINE_ATTR __attribute__((always_inline))
106#elif defined(_MSC_VER)
107#  define FORCE_INLINE_ATTR __forceinline
108#else
109#  define FORCE_INLINE_ATTR
110#endif
111
112#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
113
114
115#ifdef _MSC_VER
116#  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
117#endif
118
119
120/* *************************************
121*  Basic Types
122***************************************/
123#ifndef MEM_MODULE
124# define MEM_MODULE
125# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
126#   include <stdint.h>
127    typedef uint8_t  BYTE;
128    typedef uint16_t U16;
129    typedef uint32_t U32;
130    typedef  int32_t S32;
131    typedef uint64_t U64;
132#  else
133    typedef unsigned char      BYTE;
134    typedef unsigned short     U16;
135    typedef unsigned int       U32;
136    typedef   signed int       S32;
137    typedef unsigned long long U64;   /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
138#  endif
139#endif
140
141
142#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
143
144/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
145static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
146static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
147
148#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
149
150/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
151/* currently only defined for gcc and icc */
152typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
153
154static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
155static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
156
157#else
158
159/* portable and safe solution. Generally efficient.
160 * see : http://stackoverflow.com/a/32095106/646947
161 */
162
163static U32 XXH_read32(const void* memPtr)
164{
165    U32 val;
166    memcpy(&val, memPtr, sizeof(val));
167    return val;
168}
169
170static U64 XXH_read64(const void* memPtr)
171{
172    U64 val;
173    memcpy(&val, memPtr, sizeof(val));
174    return val;
175}
176
177#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
178
179
180/* ****************************************
181*  Compiler-specific Functions and Macros
182******************************************/
183#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
184
185/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
186#if defined(_MSC_VER)
187#  define XXH_rotl32(x,r) _rotl(x,r)
188#  define XXH_rotl64(x,r) _rotl64(x,r)
189#else
190#if defined(__ICCARM__)
191#  include <intrinsics.h>
192#  define XXH_rotl32(x,r) __ROR(x,(32 - r))
193#else
194#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
195#endif
196#  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
197#endif
198
199#if defined(_MSC_VER)     /* Visual Studio */
200#  define XXH_swap32 _byteswap_ulong
201#  define XXH_swap64 _byteswap_uint64
202#elif GCC_VERSION >= 403
203#  define XXH_swap32 __builtin_bswap32
204#  define XXH_swap64 __builtin_bswap64
205#else
206static U32 XXH_swap32 (U32 x)
207{
208    return  ((x << 24) & 0xff000000 ) |
209            ((x <<  8) & 0x00ff0000 ) |
210            ((x >>  8) & 0x0000ff00 ) |
211            ((x >> 24) & 0x000000ff );
212}
213static U64 XXH_swap64 (U64 x)
214{
215    return  ((x << 56) & 0xff00000000000000ULL) |
216            ((x << 40) & 0x00ff000000000000ULL) |
217            ((x << 24) & 0x0000ff0000000000ULL) |
218            ((x << 8)  & 0x000000ff00000000ULL) |
219            ((x >> 8)  & 0x00000000ff000000ULL) |
220            ((x >> 24) & 0x0000000000ff0000ULL) |
221            ((x >> 40) & 0x000000000000ff00ULL) |
222            ((x >> 56) & 0x00000000000000ffULL);
223}
224#endif
225
226
227/* *************************************
228*  Architecture Macros
229***************************************/
230typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
231
232/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
233#ifndef XXH_CPU_LITTLE_ENDIAN
234    static const int g_one = 1;
235#   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&g_one))
236#endif
237
238
239/* ***************************
240*  Memory reads
241*****************************/
242typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
243
244FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
245{
246    if (align==XXH_unaligned)
247        return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
248    else
249        return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
250}
251
252FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
253{
254    return XXH_readLE32_align(ptr, endian, XXH_unaligned);
255}
256
257static U32 XXH_readBE32(const void* ptr)
258{
259    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
260}
261
262FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
263{
264    if (align==XXH_unaligned)
265        return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
266    else
267        return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
268}
269
270FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
271{
272    return XXH_readLE64_align(ptr, endian, XXH_unaligned);
273}
274
275static U64 XXH_readBE64(const void* ptr)
276{
277    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
278}
279
280
281/* *************************************
282*  Macros
283***************************************/
284#define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }    /* use only *after* variable declarations */
285
286
287/* *************************************
288*  Constants
289***************************************/
290static const U32 PRIME32_1 = 2654435761U;
291static const U32 PRIME32_2 = 2246822519U;
292static const U32 PRIME32_3 = 3266489917U;
293static const U32 PRIME32_4 =  668265263U;
294static const U32 PRIME32_5 =  374761393U;
295
296static const U64 PRIME64_1 = 11400714785074694791ULL;
297static const U64 PRIME64_2 = 14029467366897019727ULL;
298static const U64 PRIME64_3 =  1609587929392839161ULL;
299static const U64 PRIME64_4 =  9650029242287828579ULL;
300static const U64 PRIME64_5 =  2870177450012600261ULL;
301
302XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
303
304
305/* **************************
306*  Utils
307****************************/
308XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
309{
310    memcpy(dstState, srcState, sizeof(*dstState));
311}
312
313XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
314{
315    memcpy(dstState, srcState, sizeof(*dstState));
316}
317
318
319/* ***************************
320*  Simple Hash Functions
321*****************************/
322
323static U32 XXH32_round(U32 seed, U32 input)
324{
325    seed += input * PRIME32_2;
326    seed  = XXH_rotl32(seed, 13);
327    seed *= PRIME32_1;
328    return seed;
329}
330
331FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
332{
333    const BYTE* p = (const BYTE*)input;
334    const BYTE* bEnd = p + len;
335    U32 h32;
336#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
337
338#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
339    if (p==NULL) {
340        len=0;
341        bEnd=p=(const BYTE*)(size_t)16;
342    }
343#endif
344
345    if (len>=16) {
346        const BYTE* const limit = bEnd - 16;
347        U32 v1 = seed + PRIME32_1 + PRIME32_2;
348        U32 v2 = seed + PRIME32_2;
349        U32 v3 = seed + 0;
350        U32 v4 = seed - PRIME32_1;
351
352        do {
353            v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
354            v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
355            v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
356            v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
357        } while (p<=limit);
358
359        h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
360    } else {
361        h32  = seed + PRIME32_5;
362    }
363
364    h32 += (U32) len;
365
366    while (p+4<=bEnd) {
367        h32 += XXH_get32bits(p) * PRIME32_3;
368        h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
369        p+=4;
370    }
371
372    while (p<bEnd) {
373        h32 += (*p) * PRIME32_5;
374        h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
375        p++;
376    }
377
378    h32 ^= h32 >> 15;
379    h32 *= PRIME32_2;
380    h32 ^= h32 >> 13;
381    h32 *= PRIME32_3;
382    h32 ^= h32 >> 16;
383
384    return h32;
385}
386
387
388XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
389{
390#if 0
391    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
392    XXH32_CREATESTATE_STATIC(state);
393    XXH32_reset(state, seed);
394    XXH32_update(state, input, len);
395    return XXH32_digest(state);
396#else
397    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
398
399    if (XXH_FORCE_ALIGN_CHECK) {
400        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
401            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
402                return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
403            else
404                return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
405    }   }
406
407    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
408        return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
409    else
410        return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
411#endif
412}
413
414
415static U64 XXH64_round(U64 acc, U64 input)
416{
417    acc += input * PRIME64_2;
418    acc  = XXH_rotl64(acc, 31);
419    acc *= PRIME64_1;
420    return acc;
421}
422
423static U64 XXH64_mergeRound(U64 acc, U64 val)
424{
425    val  = XXH64_round(0, val);
426    acc ^= val;
427    acc  = acc * PRIME64_1 + PRIME64_4;
428    return acc;
429}
430
431FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
432{
433    const BYTE* p = (const BYTE*)input;
434    const BYTE* const bEnd = p + len;
435    U64 h64;
436#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
437
438#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
439    if (p==NULL) {
440        len=0;
441        bEnd=p=(const BYTE*)(size_t)32;
442    }
443#endif
444
445    if (len>=32) {
446        const BYTE* const limit = bEnd - 32;
447        U64 v1 = seed + PRIME64_1 + PRIME64_2;
448        U64 v2 = seed + PRIME64_2;
449        U64 v3 = seed + 0;
450        U64 v4 = seed - PRIME64_1;
451
452        do {
453            v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
454            v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
455            v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
456            v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
457        } while (p<=limit);
458
459        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
460        h64 = XXH64_mergeRound(h64, v1);
461        h64 = XXH64_mergeRound(h64, v2);
462        h64 = XXH64_mergeRound(h64, v3);
463        h64 = XXH64_mergeRound(h64, v4);
464
465    } else {
466        h64  = seed + PRIME64_5;
467    }
468
469    h64 += (U64) len;
470
471    while (p+8<=bEnd) {
472        U64 const k1 = XXH64_round(0, XXH_get64bits(p));
473        h64 ^= k1;
474        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
475        p+=8;
476    }
477
478    if (p+4<=bEnd) {
479        h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
480        h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
481        p+=4;
482    }
483
484    while (p<bEnd) {
485        h64 ^= (*p) * PRIME64_5;
486        h64 = XXH_rotl64(h64, 11) * PRIME64_1;
487        p++;
488    }
489
490    h64 ^= h64 >> 33;
491    h64 *= PRIME64_2;
492    h64 ^= h64 >> 29;
493    h64 *= PRIME64_3;
494    h64 ^= h64 >> 32;
495
496    return h64;
497}
498
499
500XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
501{
502#if 0
503    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
504    XXH64_CREATESTATE_STATIC(state);
505    XXH64_reset(state, seed);
506    XXH64_update(state, input, len);
507    return XXH64_digest(state);
508#else
509    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
510
511    if (XXH_FORCE_ALIGN_CHECK) {
512        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
513            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
514                return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
515            else
516                return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
517    }   }
518
519    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
520        return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
521    else
522        return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
523#endif
524}
525
526
527/* **************************************************
528*  Advanced Hash Functions
529****************************************************/
530
531XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
532{
533    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
534}
535XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
536{
537    XXH_free(statePtr);
538    return XXH_OK;
539}
540
541XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
542{
543    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
544}
545XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
546{
547    XXH_free(statePtr);
548    return XXH_OK;
549}
550
551
552/*** Hash feed ***/
553
554XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
555{
556    XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
557    memset(&state, 0, sizeof(state)-4);   /* do not write into reserved, for future removal */
558    state.v1 = seed + PRIME32_1 + PRIME32_2;
559    state.v2 = seed + PRIME32_2;
560    state.v3 = seed + 0;
561    state.v4 = seed - PRIME32_1;
562    memcpy(statePtr, &state, sizeof(state));
563    return XXH_OK;
564}
565
566
567XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
568{
569    XXH64_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
570    memset(&state, 0, sizeof(state)-8);   /* do not write into reserved, for future removal */
571    state.v1 = seed + PRIME64_1 + PRIME64_2;
572    state.v2 = seed + PRIME64_2;
573    state.v3 = seed + 0;
574    state.v4 = seed - PRIME64_1;
575    memcpy(statePtr, &state, sizeof(state));
576    return XXH_OK;
577}
578
579
580FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
581{
582    const BYTE* p = (const BYTE*)input;
583    const BYTE* const bEnd = p + len;
584
585#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
586    if (input==NULL) return XXH_ERROR;
587#endif
588
589    state->total_len_32 += (unsigned)len;
590    state->large_len |= (len>=16) | (state->total_len_32>=16);
591
592    if (state->memsize + len < 16)  {   /* fill in tmp buffer */
593        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
594        state->memsize += (unsigned)len;
595        return XXH_OK;
596    }
597
598    if (state->memsize) {   /* some data left from previous update */
599        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
600        {   const U32* p32 = state->mem32;
601            state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
602            state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
603            state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
604            state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
605        }
606        p += 16-state->memsize;
607        state->memsize = 0;
608    }
609
610    if (p <= bEnd-16) {
611        const BYTE* const limit = bEnd - 16;
612        U32 v1 = state->v1;
613        U32 v2 = state->v2;
614        U32 v3 = state->v3;
615        U32 v4 = state->v4;
616
617        do {
618            v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
619            v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
620            v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
621            v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
622        } while (p<=limit);
623
624        state->v1 = v1;
625        state->v2 = v2;
626        state->v3 = v3;
627        state->v4 = v4;
628    }
629
630    if (p < bEnd) {
631        XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
632        state->memsize = (unsigned)(bEnd-p);
633    }
634
635    return XXH_OK;
636}
637
638XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
639{
640    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
641
642    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
643        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
644    else
645        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
646}
647
648
649
650FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
651{
652    const BYTE * p = (const BYTE*)state->mem32;
653    const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
654    U32 h32;
655
656    if (state->large_len) {
657        h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
658    } else {
659        h32 = state->v3 /* == seed */ + PRIME32_5;
660    }
661
662    h32 += state->total_len_32;
663
664    while (p+4<=bEnd) {
665        h32 += XXH_readLE32(p, endian) * PRIME32_3;
666        h32  = XXH_rotl32(h32, 17) * PRIME32_4;
667        p+=4;
668    }
669
670    while (p<bEnd) {
671        h32 += (*p) * PRIME32_5;
672        h32  = XXH_rotl32(h32, 11) * PRIME32_1;
673        p++;
674    }
675
676    h32 ^= h32 >> 15;
677    h32 *= PRIME32_2;
678    h32 ^= h32 >> 13;
679    h32 *= PRIME32_3;
680    h32 ^= h32 >> 16;
681
682    return h32;
683}
684
685
686XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
687{
688    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
689
690    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
691        return XXH32_digest_endian(state_in, XXH_littleEndian);
692    else
693        return XXH32_digest_endian(state_in, XXH_bigEndian);
694}
695
696
697
698/* **** XXH64 **** */
699
700FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
701{
702    const BYTE* p = (const BYTE*)input;
703    const BYTE* const bEnd = p + len;
704
705#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
706    if (input==NULL) return XXH_ERROR;
707#endif
708
709    state->total_len += len;
710
711    if (state->memsize + len < 32) {  /* fill in tmp buffer */
712        if (input != NULL) {
713            XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
714        }
715        state->memsize += (U32)len;
716        return XXH_OK;
717    }
718
719    if (state->memsize) {   /* tmp buffer is full */
720        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
721        state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
722        state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
723        state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
724        state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
725        p += 32-state->memsize;
726        state->memsize = 0;
727    }
728
729    if (p+32 <= bEnd) {
730        const BYTE* const limit = bEnd - 32;
731        U64 v1 = state->v1;
732        U64 v2 = state->v2;
733        U64 v3 = state->v3;
734        U64 v4 = state->v4;
735
736        do {
737            v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
738            v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
739            v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
740            v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
741        } while (p<=limit);
742
743        state->v1 = v1;
744        state->v2 = v2;
745        state->v3 = v3;
746        state->v4 = v4;
747    }
748
749    if (p < bEnd) {
750        XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
751        state->memsize = (unsigned)(bEnd-p);
752    }
753
754    return XXH_OK;
755}
756
757XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
758{
759    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
760
761    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
762        return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
763    else
764        return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
765}
766
767
768
769FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
770{
771    const BYTE * p = (const BYTE*)state->mem64;
772    const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
773    U64 h64;
774
775    if (state->total_len >= 32) {
776        U64 const v1 = state->v1;
777        U64 const v2 = state->v2;
778        U64 const v3 = state->v3;
779        U64 const v4 = state->v4;
780
781        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
782        h64 = XXH64_mergeRound(h64, v1);
783        h64 = XXH64_mergeRound(h64, v2);
784        h64 = XXH64_mergeRound(h64, v3);
785        h64 = XXH64_mergeRound(h64, v4);
786    } else {
787        h64  = state->v3 + PRIME64_5;
788    }
789
790    h64 += (U64) state->total_len;
791
792    while (p+8<=bEnd) {
793        U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
794        h64 ^= k1;
795        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
796        p+=8;
797    }
798
799    if (p+4<=bEnd) {
800        h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
801        h64  = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
802        p+=4;
803    }
804
805    while (p<bEnd) {
806        h64 ^= (*p) * PRIME64_5;
807        h64  = XXH_rotl64(h64, 11) * PRIME64_1;
808        p++;
809    }
810
811    h64 ^= h64 >> 33;
812    h64 *= PRIME64_2;
813    h64 ^= h64 >> 29;
814    h64 *= PRIME64_3;
815    h64 ^= h64 >> 32;
816
817    return h64;
818}
819
820
821XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
822{
823    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
824
825    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
826        return XXH64_digest_endian(state_in, XXH_littleEndian);
827    else
828        return XXH64_digest_endian(state_in, XXH_bigEndian);
829}
830
831
832/* **************************
833*  Canonical representation
834****************************/
835
836/*! Default XXH result types are basic unsigned 32 and 64 bits.
837*   The canonical representation follows human-readable write convention, aka big-endian (large digits first).
838*   These functions allow transformation of hash result into and from its canonical format.
839*   This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
840*/
841
842XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
843{
844    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
845    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
846    memcpy(dst, &hash, sizeof(*dst));
847}
848
849XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
850{
851    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
852    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
853    memcpy(dst, &hash, sizeof(*dst));
854}
855
856XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
857{
858    return XXH_readBE32(src);
859}
860
861XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
862{
863    return XXH_readBE64(src);
864}
865