• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * xxHash - Extremely Fast Hash algorithm
3  * Header File
4  * Copyright (C) 2012-2020 Yann Collet
5  *
6  * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  *
12  *    * Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    * Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following disclaimer
16  *      in the documentation and/or other materials provided with the
17  *      distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * You can contact the author at:
32  *   - xxHash homepage: https://www.xxhash.com
33  *   - xxHash source repository: https://github.com/Cyan4973/xxHash
34  */
35 /*!
36  * @mainpage xxHash
37  *
38  * @file xxhash.h
39  * xxHash prototypes and implementation
40  */
41 /* TODO: update */
42 /* Notice extracted from xxHash homepage:
43 
44 xxHash is an extremely fast hash algorithm, running at RAM speed limits.
45 It also successfully passes all tests from the SMHasher suite.
46 
47 Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
48 
49 Name            Speed       Q.Score   Author
50 xxHash          5.4 GB/s     10
51 CrapWow         3.2 GB/s      2       Andrew
52 MurmurHash 3a   2.7 GB/s     10       Austin Appleby
53 SpookyHash      2.0 GB/s     10       Bob Jenkins
54 SBox            1.4 GB/s      9       Bret Mulvey
55 Lookup3         1.2 GB/s      9       Bob Jenkins
56 SuperFastHash   1.2 GB/s      1       Paul Hsieh
57 CityHash64      1.05 GB/s    10       Pike & Alakuijala
58 FNV             0.55 GB/s     5       Fowler, Noll, Vo
59 CRC32           0.43 GB/s     9
60 MD5-32          0.33 GB/s    10       Ronald L. Rivest
61 SHA1-32         0.28 GB/s    10
62 
63 Q.Score is a measure of quality of the hash function.
64 It depends on successfully passing SMHasher test set.
65 10 is a perfect score.
66 
67 Note: SMHasher's CRC32 implementation is not the fastest one.
68 Other speed-oriented implementations can be faster,
69 especially in combination with PCLMUL instruction:
70 https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
71 
72 A 64-bit version, named XXH64, is available since r35.
73 It offers much better speed, but for 64-bit applications only.
74 Name     Speed on 64 bits    Speed on 32 bits
75 XXH64       13.8 GB/s            1.9 GB/s
76 XXH32        6.8 GB/s            6.0 GB/s
77 */
78 
79 #if defined (__cplusplus)
80 extern "C" {
81 #endif
82 
83 /* ****************************
84  *  INLINE mode
85  ******************************/
86 /*!
87  * XXH_INLINE_ALL (and XXH_PRIVATE_API)
88  * Use these build macros to inline xxhash into the target unit.
89  * Inlining improves performance on small inputs, especially when the length is
90  * expressed as a compile-time constant:
91  *
92  *      https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
93  *
94  * It also keeps xxHash symbols private to the unit, so they are not exported.
95  *
96  * Usage:
97  *     #define XXH_INLINE_ALL
98  *     #include "xxhash.h"
99  *
100  * Do not compile and link xxhash.o as a separate object, as it is not useful.
101  */
102 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
103     && !defined(XXH_INLINE_ALL_31684351384)
104    /* this section should be traversed only once */
105 #  define XXH_INLINE_ALL_31684351384
106    /* give access to the advanced API, required to compile implementations */
107 #  undef XXH_STATIC_LINKING_ONLY   /* avoid macro redef */
108 #  define XXH_STATIC_LINKING_ONLY
109    /* make all functions private */
110 #  undef XXH_PUBLIC_API
111 #  if defined(__GNUC__)
112 #    define XXH_PUBLIC_API static __inline __attribute__((unused))
113 #  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
114 #    define XXH_PUBLIC_API static inline
115 #  elif defined(_MSC_VER)
116 #    define XXH_PUBLIC_API static __inline
117 #  else
118      /* note: this version may generate warnings for unused static functions */
119 #    define XXH_PUBLIC_API static
120 #  endif
121 
122    /*
123     * This part deals with the special case where a unit wants to inline xxHash,
124     * but "xxhash.h" has previously been included without XXH_INLINE_ALL, such
125     * as part of some previously included *.h header file.
126     * Without further action, the new include would just be ignored,
127     * and functions would effectively _not_ be inlined (silent failure).
128     * The following macros solve this situation by prefixing all inlined names,
129     * avoiding naming collision with previous inclusions.
130     */
131 #  ifdef XXH_NAMESPACE
132 #    error "XXH_INLINE_ALL with XXH_NAMESPACE is not supported"
133      /*
134       * Note: Alternative: #undef all symbols (it's a pretty large list).
135       * Without #error: it compiles, but functions are actually not inlined.
136       */
137 #  endif
138 #  define XXH_NAMESPACE XXH_INLINE_
139    /*
140     * Some identifiers (enums, type names) are not symbols, but they must
141     * still be renamed to avoid redeclaration.
142     * Alternative solution: do not redeclare them.
143     * However, this requires some #ifdefs, and is a more dispersed action.
144     * Meanwhile, renaming can be achieved in a single block
145     */
146 #  define XXH_IPREF(Id)   XXH_INLINE_ ## Id
147 #  define XXH_OK XXH_IPREF(XXH_OK)
148 #  define XXH_ERROR XXH_IPREF(XXH_ERROR)
149 #  define XXH_errorcode XXH_IPREF(XXH_errorcode)
150 #  define XXH32_canonical_t  XXH_IPREF(XXH32_canonical_t)
151 #  define XXH64_canonical_t  XXH_IPREF(XXH64_canonical_t)
152 #  define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
153 #  define XXH32_state_s XXH_IPREF(XXH32_state_s)
154 #  define XXH32_state_t XXH_IPREF(XXH32_state_t)
155 #  define XXH64_state_s XXH_IPREF(XXH64_state_s)
156 #  define XXH64_state_t XXH_IPREF(XXH64_state_t)
157 #  define XXH3_state_s  XXH_IPREF(XXH3_state_s)
158 #  define XXH3_state_t  XXH_IPREF(XXH3_state_t)
159 #  define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
160    /* Ensure the header is parsed again, even if it was previously included */
161 #  undef XXHASH_H_5627135585666179
162 #  undef XXHASH_H_STATIC_13879238742
163 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
164 
165 
166 
167 /* ****************************************************************
168  *  Stable API
169  *****************************************************************/
170 #ifndef XXHASH_H_5627135585666179
171 #define XXHASH_H_5627135585666179 1
172 
173 
174 /*!
175  * @defgroup public Public API
176  * Contains details on the public xxHash functions.
177  * @{
178  */
179 /* specific declaration modes for Windows */
180 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
181 #  if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
182 #    ifdef XXH_EXPORT
183 #      define XXH_PUBLIC_API __declspec(dllexport)
184 #    elif XXH_IMPORT
185 #      define XXH_PUBLIC_API __declspec(dllimport)
186 #    endif
187 #  else
188 #    define XXH_PUBLIC_API   /* do nothing */
189 #  endif
190 #endif
191 
192 #ifdef XXH_DOXYGEN
193 /*!
194  * @brief Emulate a namespace by transparently prefixing all symbols.
195  *
196  * If you want to include _and expose_ xxHash functions from within your own
197  * library, but also want to avoid symbol collisions with other libraries which
198  * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix
199  * any public symbol from xxhash library with the value of XXH_NAMESPACE
200  * (therefore, avoid empty or numeric values).
201  *
202  * Note that no change is required within the calling program as long as it
203  * includes `xxhash.h`: Regular symbol names will be automatically translated
204  * by this header.
205  */
206 #  define XXH_NAMESPACE /* YOUR NAME HERE */
207 #  undef XXH_NAMESPACE
208 #endif
209 
210 #ifdef XXH_NAMESPACE
211 #  define XXH_CAT(A,B) A##B
212 #  define XXH_NAME2(A,B) XXH_CAT(A,B)
213 #  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
214 /* XXH32 */
215 #  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
216 #  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
217 #  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
218 #  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
219 #  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
220 #  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
221 #  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
222 #  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
223 #  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
224 /* XXH64 */
225 #  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
226 #  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
227 #  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
228 #  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
229 #  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
230 #  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
231 #  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
232 #  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
233 #  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
234 /* XXH3_64bits */
235 #  define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
236 #  define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
237 #  define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
238 #  define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
239 #  define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
240 #  define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
241 #  define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
242 #  define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
243 #  define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
244 #  define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
245 #  define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
246 #  define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
247 /* XXH3_128bits */
248 #  define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
249 #  define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
250 #  define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
251 #  define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
252 #  define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
253 #  define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
254 #  define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
255 #  define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
256 #  define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
257 #  define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
258 #  define XXH128_cmp     XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
259 #  define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
260 #  define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
261 #endif
262 
263 
264 /* *************************************
265 *  Version
266 ***************************************/
267 #define XXH_VERSION_MAJOR    0
268 #define XXH_VERSION_MINOR    8
269 #define XXH_VERSION_RELEASE  0
270 #define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
271 
272 /*!
273  * @brief Obtains the xxHash version.
274  *
275  * This is only useful when xxHash is compiled as a shared library, as it is
276  * independent of the version defined in the header.
277  *
278  * @return `XXH_VERSION_NUMBER` as of when the function was compiled.
279  */
280 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
281 
282 
283 /* ****************************
284 *  Definitions
285 ******************************/
286 #include <stddef.h>   /* size_t */
287 typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
288 
289 
290 /*-**********************************************************************
291 *  32-bit hash
292 ************************************************************************/
293 #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
294 /*!
295  * @brief An unsigned 32-bit integer.
296  *
297  * Not necessarily defined to `uint32_t` but functionally equivalent.
298  */
299 typedef uint32_t XXH32_hash_t;
300 #elif !defined (__VMS) \
301   && (defined (__cplusplus) \
302   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
303 #   include <stdint.h>
304     typedef uint32_t XXH32_hash_t;
305 #else
306 #   include <limits.h>
307 #   if UINT_MAX == 0xFFFFFFFFUL
308       typedef unsigned int XXH32_hash_t;
309 #   else
310 #     if ULONG_MAX == 0xFFFFFFFFUL
311         typedef unsigned long XXH32_hash_t;
312 #     else
313 #       error "unsupported platform: need a 32-bit type"
314 #     endif
315 #   endif
316 #endif
317 
318 /*!
319  * @}
320  *
321  * @defgroup xxh32_family XXH32 family
322  * @ingroup public
323  * Contains functions used in the classic 32-bit xxHash algorithm.
324  *
325  * @note
326  *   XXH32 is considered rather weak by today's standards.
327  *   The @ref xxh3_family provides competitive speed for both 32-bit and 64-bit
328  *   systems, and offers true 64/128 bit hash results. It provides a superior
329  *   level of dispersion, and greatly reduces the risks of collisions.
330  *
331  * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families
332  * @see @ref xxh32_impl for implementation details
333  * @{
334  */
335 
336 /*!
337  * @brief Calculates the 32-bit hash of @p input using xxHash32.
338  *
339  * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
340  *
341  * @param input The block of data to be hashed, at least @p length bytes in size.
342  * @param length The length of @p input, in bytes.
343  * @param seed The 32-bit seed to alter the hash's output predictably.
344  *
345  * @pre
346  *   The memory between @p input and @p input + @p length must be valid,
347  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
348  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
349  *
350  * @return The calculated 32-bit hash value.
351  *
352  * @see
353  *    XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
354  *    Direct equivalents for the other variants of xxHash.
355  * @see
356  *    XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
357  */
358 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
359 
360 /*!
361  * Streaming functions generate the xxHash value from an incremental input.
362  * This method is slower than single-call functions, due to state management.
363  * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
364  *
365  * An XXH state must first be allocated using `XXH*_createState()`.
366  *
367  * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
368  *
369  * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
370  *
371  * The function returns an error code, with 0 meaning OK, and any other value
372  * meaning there is an error.
373  *
374  * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
375  * This function returns the nn-bits hash as an int or long long.
376  *
377  * It's still possible to continue inserting input into the hash state after a
378  * digest, and generate new hash values later on by invoking `XXH*_digest()`.
379  *
380  * When done, release the state using `XXH*_freeState()`.
381  *
382  * Example code for incrementally hashing a file:
383  * @code{.c}
384  *    #include <stdio.h>
385  *    #include <xxhash.h>
386  *    #define BUFFER_SIZE 256
387  *
388  *    // Note: XXH64 and XXH3 use the same interface.
389  *    XXH32_hash_t
390  *    hashFile(FILE* stream)
391  *    {
392  *        XXH32_state_t* state;
393  *        unsigned char buf[BUFFER_SIZE];
394  *        size_t amt;
395  *        XXH32_hash_t hash;
396  *
397  *        state = XXH32_createState();       // Create a state
398  *        assert(state != NULL);             // Error check here
399  *        XXH32_reset(state, 0xbaad5eed);    // Reset state with our seed
400  *        while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) {
401  *            XXH32_update(state, buf, amt); // Hash the file in chunks
402  *        }
403  *        hash = XXH32_digest(state);        // Finalize the hash
404  *        XXH32_freeState(state);            // Clean up
405  *        return hash;
406  *    }
407  * @endcode
408  */
409 
410 /*!
411  * @typedef struct XXH32_state_s XXH32_state_t
412  * @brief The opaque state struct for the XXH32 streaming API.
413  *
414  * @see XXH32_state_s for details.
415  */
416 typedef struct XXH32_state_s XXH32_state_t;
417 
418 /*!
419  * @brief Allocates an @ref XXH32_state_t.
420  *
421  * Must be freed with XXH32_freeState().
422  * @return An allocated XXH32_state_t on success, `NULL` on failure.
423  */
424 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
425 /*!
426  * @brief Frees an @ref XXH32_state_t.
427  *
428  * Must be allocated with XXH32_createState().
429  * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
430  * @return XXH_OK.
431  */
432 XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
433 /*!
434  * @brief Copies one @ref XXH32_state_t to another.
435  *
436  * @param dst_state The state to copy to.
437  * @param src_state The state to copy from.
438  * @pre
439  *   @p dst_state and @p src_state must not be `NULL` and must not overlap.
440  */
441 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
442 
443 /*!
444  * @brief Resets an @ref XXH32_state_t to begin a new hash.
445  *
446  * This function resets and seeds a state. Call it before @ref XXH32_update().
447  *
448  * @param statePtr The state struct to reset.
449  * @param seed The 32-bit seed to alter the hash result predictably.
450  *
451  * @pre
452  *   @p statePtr must not be `NULL`.
453  *
454  * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
455  */
456 XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, XXH32_hash_t seed);
457 
458 /*!
459  * @brief Consumes a block of @p input to an @ref XXH32_state_t.
460  *
461  * Call this to incrementally consume blocks of data.
462  *
463  * @param statePtr The state struct to update.
464  * @param input The block of data to be hashed, at least @p length bytes in size.
465  * @param length The length of @p input, in bytes.
466  *
467  * @pre
468  *   @p statePtr must not be `NULL`.
469  * @pre
470  *   The memory between @p input and @p input + @p length must be valid,
471  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
472  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
473  *
474  * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
475  */
476 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
477 
478 /*!
479  * @brief Returns the calculated hash value from an @ref XXH32_state_t.
480  *
481  * @note
482  *   Calling XXH32_digest() will not affect @p statePtr, so you can update,
483  *   digest, and update again.
484  *
485  * @param statePtr The state struct to calculate the hash from.
486  *
487  * @pre
488  *  @p statePtr must not be `NULL`.
489  *
490  * @return The calculated xxHash32 value from that state.
491  */
492 XXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);
493 
494 /*******   Canonical representation   *******/
495 
496 /*
497  * The default return values from XXH functions are unsigned 32 and 64 bit
498  * integers.
499  * This the simplest and fastest format for further post-processing.
500  *
501  * However, this leaves open the question of what is the order on the byte level,
502  * since little and big endian conventions will store the same number differently.
503  *
504  * The canonical representation settles this issue by mandating big-endian
505  * convention, the same convention as human-readable numbers (large digits first).
506  *
507  * When writing hash values to storage, sending them over a network, or printing
508  * them, it's highly recommended to use the canonical representation to ensure
509  * portability across a wider range of systems, present and future.
510  *
511  * The following functions allow transformation of hash values to and from
512  * canonical format.
513  */
514 
515 /*!
516  * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
517  */
518 typedef struct {
519     unsigned char digest[4]; /*!< Hash bytes, big endian */
520 } XXH32_canonical_t;
521 
522 /*!
523  * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
524  *
525  * @param dst The @ref XXH32_canonical_t pointer to be stored to.
526  * @param hash The @ref XXH32_hash_t to be converted.
527  *
528  * @pre
529  *   @p dst must not be `NULL`.
530  */
531 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
532 
533 /*!
534  * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
535  *
536  * @param src The @ref XXH32_canonical_t to convert.
537  *
538  * @pre
539  *   @p src must not be `NULL`.
540  *
541  * @return The converted hash.
542  */
543 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
544 
545 
546 /*!
547  * @}
548  * @ingroup public
549  * @{
550  */
551 
552 #ifndef XXH_NO_LONG_LONG
553 /*-**********************************************************************
554 *  64-bit hash
555 ************************************************************************/
556 #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
557 /*!
558  * @brief An unsigned 64-bit integer.
559  *
560  * Not necessarily defined to `uint64_t` but functionally equivalent.
561  */
562 typedef uint64_t XXH64_hash_t;
563 #elif !defined (__VMS) \
564   && (defined (__cplusplus) \
565   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
566 #  include <stdint.h>
567    typedef uint64_t XXH64_hash_t;
568 #else
569 #  include <limits.h>
570 #  if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
571      /* LP64 ABI says uint64_t is unsigned long */
572      typedef unsigned long XXH64_hash_t;
573 #  else
574      /* the following type must have a width of 64-bit */
575      typedef unsigned long long XXH64_hash_t;
576 #  endif
577 #endif
578 
579 /*!
580  * @}
581  *
582  * @defgroup xxh64_family XXH64 family
583  * @ingroup public
584  * @{
585  * Contains functions used in the classic 64-bit xxHash algorithm.
586  *
587  * @note
588  *   XXH3 provides competitive speed for both 32-bit and 64-bit systems,
589  *   and offers true 64/128 bit hash results. It provides a superior level of
590  *   dispersion, and greatly reduces the risks of collisions.
591  */
592 
593 
594 /*!
595  * @brief Calculates the 64-bit hash of @p input using xxHash64.
596  *
597  * This function usually runs faster on 64-bit systems, but slower on 32-bit
598  * systems (see benchmark).
599  *
600  * @param input The block of data to be hashed, at least @p length bytes in size.
601  * @param length The length of @p input, in bytes.
602  * @param seed The 64-bit seed to alter the hash's output predictably.
603  *
604  * @pre
605  *   The memory between @p input and @p input + @p length must be valid,
606  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
607  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
608  *
609  * @return The calculated 64-bit hash.
610  *
611  * @see
612  *    XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
613  *    Direct equivalents for the other variants of xxHash.
614  * @see
615  *    XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
616  */
617 XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed);
618 
619 /*******   Streaming   *******/
620 /*!
621  * @brief The opaque state struct for the XXH64 streaming API.
622  *
623  * @see XXH64_state_s for details.
624  */
625 typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
626 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
627 XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
628 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
629 
630 XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, XXH64_hash_t seed);
631 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
632 XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
633 
634 /*******   Canonical representation   *******/
635 typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
636 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
637 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
638 
639 /*!
640  * @}
641  * ************************************************************************
642  * @defgroup xxh3_family XXH3 family
643  * @ingroup public
644  * @{
645  *
646  * XXH3 is a more recent hash algorithm featuring:
647  *  - Improved speed for both small and large inputs
648  *  - True 64-bit and 128-bit outputs
649  *  - SIMD acceleration
650  *  - Improved 32-bit viability
651  *
652  * Speed analysis methodology is explained here:
653  *
654  *    https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
655  *
656  * Compared to XXH64, expect XXH3 to run approximately
657  * ~2x faster on large inputs and >3x faster on small ones,
658  * exact differences vary depending on platform.
659  *
660  * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
661  * but does not require it.
662  * Any 32-bit and 64-bit targets that can run XXH32 smoothly
663  * can run XXH3 at competitive speeds, even without vector support.
664  * Further details are explained in the implementation.
665  *
666  * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
667  * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro.
668  *
669  * XXH3 implementation is portable:
670  * it has a generic C90 formulation that can be compiled on any platform,
671  * all implementations generage exactly the same hash value on all platforms.
672  * Starting from v0.8.0, it's also labelled "stable", meaning that
673  * any future version will also generate the same hash value.
674  *
675  * XXH3 offers 2 variants, _64bits and _128bits.
676  *
677  * When only 64 bits are needed, prefer invoking the _64bits variant, as it
678  * reduces the amount of mixing, resulting in faster speed on small inputs.
679  * It's also generally simpler to manipulate a scalar return type than a struct.
680  *
681  * The API supports one-shot hashing, streaming mode, and custom secrets.
682  */
683 
684 /*-**********************************************************************
685 *  XXH3 64-bit variant
686 ************************************************************************/
687 
688 /* XXH3_64bits():
689  * default 64-bit variant, using default secret and default seed of 0.
690  * It's the fastest variant. */
691 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
692 
693 /*
694  * XXH3_64bits_withSeed():
695  * This variant generates a custom secret on the fly
696  * based on default secret altered using the `seed` value.
697  * While this operation is decently fast, note that it's not completely free.
698  * Note: seed==0 produces the same results as XXH3_64bits().
699  */
700 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
701 
702 /*!
703  * The bare minimum size for a custom secret.
704  *
705  * @see
706  *  XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
707  *  XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
708  */
709 #define XXH3_SECRET_SIZE_MIN 136
710 
711 /*
712  * XXH3_64bits_withSecret():
713  * It's possible to provide any blob of bytes as a "secret" to generate the hash.
714  * This makes it more difficult for an external actor to prepare an intentional collision.
715  * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
716  * However, the quality of produced hash values depends on secret's entropy.
717  * Technically, the secret must look like a bunch of random bytes.
718  * Avoid "trivial" or structured data such as repeated sequences or a text document.
719  * Whenever unsure about the "randomness" of the blob of bytes,
720  * consider relabelling it as a "custom seed" instead,
721  * and employ "XXH3_generateSecret()" (see below)
722  * to generate a high entropy secret derived from the custom seed.
723  */
724 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
725 
726 
727 /*******   Streaming   *******/
728 /*
729  * Streaming requires state maintenance.
730  * This operation costs memory and CPU.
731  * As a consequence, streaming is slower than one-shot hashing.
732  * For better performance, prefer one-shot functions whenever applicable.
733  */
734 
735 /*!
736  * @brief The state struct for the XXH3 streaming API.
737  *
738  * @see XXH3_state_s for details.
739  */
740 typedef struct XXH3_state_s XXH3_state_t;
741 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
742 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
743 XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
744 
745 /*
746  * XXH3_64bits_reset():
747  * Initialize with default parameters.
748  * digest will be equivalent to `XXH3_64bits()`.
749  */
750 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
751 /*
752  * XXH3_64bits_reset_withSeed():
753  * Generate a custom secret from `seed`, and store it into `statePtr`.
754  * digest will be equivalent to `XXH3_64bits_withSeed()`.
755  */
756 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
757 /*
758  * XXH3_64bits_reset_withSecret():
759  * `secret` is referenced, it _must outlive_ the hash streaming session.
760  * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
761  * and the quality of produced hash values depends on secret's entropy
762  * (secret's content should look like a bunch of random bytes).
763  * When in doubt about the randomness of a candidate `secret`,
764  * consider employing `XXH3_generateSecret()` instead (see below).
765  */
766 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
767 
768 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
769 XXH_PUBLIC_API XXH64_hash_t  XXH3_64bits_digest (const XXH3_state_t* statePtr);
770 
771 /* note : canonical representation of XXH3 is the same as XXH64
772  * since they both produce XXH64_hash_t values */
773 
774 
775 /*-**********************************************************************
776 *  XXH3 128-bit variant
777 ************************************************************************/
778 
779 /*!
780  * @brief The return value from 128-bit hashes.
781  *
782  * Stored in little endian order, although the fields themselves are in native
783  * endianness.
784  */
785 typedef struct {
786     XXH64_hash_t low64;   /*!< `value & 0xFFFFFFFFFFFFFFFF` */
787     XXH64_hash_t high64;  /*!< `value >> 64` */
788 } XXH128_hash_t;
789 
790 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
791 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
792 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
793 
794 /*******   Streaming   *******/
795 /*
796  * Streaming requires state maintenance.
797  * This operation costs memory and CPU.
798  * As a consequence, streaming is slower than one-shot hashing.
799  * For better performance, prefer one-shot functions whenever applicable.
800  *
801  * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
802  * Use already declared XXH3_createState() and XXH3_freeState().
803  *
804  * All reset and streaming functions have same meaning as their 64-bit counterpart.
805  */
806 
807 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
808 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
809 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
810 
811 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
812 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
813 
814 /* Following helper functions make it possible to compare XXH128_hast_t values.
815  * Since XXH128_hash_t is a structure, this capability is not offered by the language.
816  * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
817 
818 /*!
819  * XXH128_isEqual():
820  * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
821  */
822 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
823 
824 /*!
825  * XXH128_cmp():
826  *
827  * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
828  *
829  * return: >0 if *h128_1  > *h128_2
830  *         =0 if *h128_1 == *h128_2
831  *         <0 if *h128_1  < *h128_2
832  */
833 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
834 
835 
836 /*******   Canonical representation   *******/
837 typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
838 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
839 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
840 
841 
842 #endif  /* XXH_NO_LONG_LONG */
843 
844 /*!
845  * @}
846  */
847 #endif /* XXHASH_H_5627135585666179 */
848 
849 
850 
851 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
852 #define XXHASH_H_STATIC_13879238742
853 /* ****************************************************************************
854  * This section contains declarations which are not guaranteed to remain stable.
855  * They may change in future versions, becoming incompatible with a different
856  * version of the library.
857  * These declarations should only be used with static linking.
858  * Never use them in association with dynamic linking!
859  ***************************************************************************** */
860 
861 /*
862  * These definitions are only present to allow static allocation
863  * of XXH states, on stack or in a struct, for example.
864  * Never **ever** access their members directly.
865  */
866 
867 /*!
868  * @internal
869  * @brief Structure for XXH32 streaming API.
870  *
871  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
872  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
873  * an opaque type. This allows fields to safely be changed.
874  *
875  * Typedef'd to @ref XXH32_state_t.
876  * Do not access the members of this struct directly.
877  * @see XXH64_state_s, XXH3_state_s
878  */
879 struct XXH32_state_s {
880    XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
881    XXH32_hash_t large_len;    /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
882    XXH32_hash_t v1;           /*!< First accumulator lane */
883    XXH32_hash_t v2;           /*!< Second accumulator lane */
884    XXH32_hash_t v3;           /*!< Third accumulator lane */
885    XXH32_hash_t v4;           /*!< Fourth accumulator lane */
886    XXH32_hash_t mem32[4];     /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
887    XXH32_hash_t memsize;      /*!< Amount of data in @ref mem32 */
888    XXH32_hash_t reserved;     /*!< Reserved field. Do not read or write to it, it may be removed. */
889 };   /* typedef'd to XXH32_state_t */
890 
891 
892 #ifndef XXH_NO_LONG_LONG  /* defined when there is no 64-bit support */
893 
894 /*!
895  * @internal
896  * @brief Structure for XXH64 streaming API.
897  *
898  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
899  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
900  * an opaque type. This allows fields to safely be changed.
901  *
902  * Typedef'd to @ref XXH64_state_t.
903  * Do not access the members of this struct directly.
904  * @see XXH32_state_s, XXH3_state_s
905  */
906 struct XXH64_state_s {
907    XXH64_hash_t total_len;    /*!< Total length hashed. This is always 64-bit. */
908    XXH64_hash_t v1;           /*!< First accumulator lane */
909    XXH64_hash_t v2;           /*!< Second accumulator lane */
910    XXH64_hash_t v3;           /*!< Third accumulator lane */
911    XXH64_hash_t v4;           /*!< Fourth accumulator lane */
912    XXH64_hash_t mem64[4];     /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
913    XXH32_hash_t memsize;      /*!< Amount of data in @ref mem64 */
914    XXH32_hash_t reserved32;   /*!< Reserved field, needed for padding anyways*/
915    XXH64_hash_t reserved64;   /*!< Reserved field. Do not read or write to it, it may be removed. */
916 };   /* typedef'd to XXH64_state_t */
917 
918 #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)   /* C11+ */
919 #  include <stdalign.h>
920 #  define XXH_ALIGN(n)      alignas(n)
921 #elif defined(__GNUC__)
922 #  define XXH_ALIGN(n)      __attribute__ ((aligned(n)))
923 #elif defined(_MSC_VER)
924 #  define XXH_ALIGN(n)      __declspec(align(n))
925 #else
926 #  define XXH_ALIGN(n)   /* disabled */
927 #endif
928 
929 /* Old GCC versions only accept the attribute after the type in structures. */
930 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L))   /* C11+ */ \
931     && defined(__GNUC__)
932 #   define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
933 #else
934 #   define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
935 #endif
936 
937 /*!
938  * @brief The size of the internal XXH3 buffer.
939  *
940  * This is the optimal update size for incremental hashing.
941  *
942  * @see XXH3_64b_update(), XXH3_128b_update().
943  */
944 #define XXH3_INTERNALBUFFER_SIZE 256
945 
946 /*!
947  * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
948  *
949  * This is the size used in @ref XXH3_kSecret and the seeded functions.
950  *
951  * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
952  */
953 #define XXH3_SECRET_DEFAULT_SIZE 192
954 
955 /*!
956  * @internal
957  * @brief Structure for XXH3 streaming API.
958  *
959  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
960  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
961  * an opaque type. This allows fields to safely be changed.
962  *
963  * @note **This structure has a strict alignment requirement of 64 bytes.** Do
964  * not allocate this with `malloc()` or `new`, it will not be sufficiently
965  * aligned. Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack
966  * allocation.
967  *
968  * Typedef'd to @ref XXH3_state_t.
969  * Do not access the members of this struct directly.
970  *
971  * @see XXH3_INITSTATE() for stack initialization.
972  * @see XXH3_createState(), XXH3_freeState().
973  * @see XXH32_state_s, XXH64_state_s
974  */
975 struct XXH3_state_s {
976    XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
977        /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */
978    XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
979        /*!< Used to store a custom secret generated from a seed. */
980    XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
981        /*!< The internal buffer. @see XXH32_state_s::mem32 */
982    XXH32_hash_t bufferedSize;
983        /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
984    XXH32_hash_t reserved32;
985        /*!< Reserved field. Needed for padding on 64-bit. */
986    size_t nbStripesSoFar;
987        /*!< Number or stripes processed. */
988    XXH64_hash_t totalLen;
989        /*!< Total length hashed. 64-bit even on 32-bit targets. */
990    size_t nbStripesPerBlock;
991        /*!< Number of stripes per block. */
992    size_t secretLimit;
993        /*!< Size of @ref customSecret or @ref extSecret */
994    XXH64_hash_t seed;
995        /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
996    XXH64_hash_t reserved64;
997        /*!< Reserved field. */
998    const unsigned char* extSecret;
999        /*!< Reference to an external secret for the _withSecret variants, NULL
1000         *   for other variants. */
1001    /* note: there may be some padding at the end due to alignment on 64 bytes */
1002 }; /* typedef'd to XXH3_state_t */
1003 
1004 #undef XXH_ALIGN_MEMBER
1005 
1006 /*!
1007  * @brief Initializes a stack-allocated `XXH3_state_s`.
1008  *
1009  * When the @ref XXH3_state_t structure is merely emplaced on stack,
1010  * it should be initialized with XXH3_INITSTATE() or a memset()
1011  * in case its first reset uses XXH3_NNbits_reset_withSeed().
1012  * This init can be omitted if the first reset uses default or _withSecret mode.
1013  * This operation isn't necessary when the state is created with XXH3_createState().
1014  * Note that this doesn't prepare the state for a streaming operation,
1015  * it's still necessary to use XXH3_NNbits_reset*() afterwards.
1016  */
1017 #define XXH3_INITSTATE(XXH3_state_ptr)   { (XXH3_state_ptr)->seed = 0; }
1018 
1019 
1020 /* ===   Experimental API   === */
1021 /* Symbols defined below must be considered tied to a specific library version. */
1022 
1023 /*
1024  * XXH3_generateSecret():
1025  *
1026  * Derive a high-entropy secret from any user-defined content, named customSeed.
1027  * The generated secret can be used in combination with `*_withSecret()` functions.
1028  * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
1029  * as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
1030  *
1031  * The function accepts as input a custom seed of any length and any content,
1032  * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE
1033  * into an already allocated buffer secretBuffer.
1034  * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long.
1035  *
1036  * The generated secret can then be used with any `*_withSecret()` variant.
1037  * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
1038  * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
1039  * are part of this list. They all accept a `secret` parameter
1040  * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
1041  * _and_ feature very high entropy (consist of random-looking bytes).
1042  * These conditions can be a high bar to meet, so
1043  * this function can be used to generate a secret of proper quality.
1044  *
1045  * customSeed can be anything. It can have any size, even small ones,
1046  * and its content can be anything, even stupidly "low entropy" source such as a bunch of zeroes.
1047  * The resulting `secret` will nonetheless provide all expected qualities.
1048  *
1049  * Supplying NULL as the customSeed copies the default secret into `secretBuffer`.
1050  * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1051  */
1052 XXH_PUBLIC_API void XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize);
1053 
1054 
1055 /* simple short-cut to pre-selected XXH3_128bits variant */
1056 XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
1057 
1058 
1059 #endif  /* XXH_NO_LONG_LONG */
1060 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1061 #  define XXH_IMPLEMENTATION
1062 #endif
1063 
1064 #endif  /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1065 
1066 
1067 /* ======================================================================== */
1068 /* ======================================================================== */
1069 /* ======================================================================== */
1070 
1071 
1072 /*-**********************************************************************
1073  * xxHash implementation
1074  *-**********************************************************************
1075  * xxHash's implementation used to be hosted inside xxhash.c.
1076  *
1077  * However, inlining requires implementation to be visible to the compiler,
1078  * hence be included alongside the header.
1079  * Previously, implementation was hosted inside xxhash.c,
1080  * which was then #included when inlining was activated.
1081  * This construction created issues with a few build and install systems,
1082  * as it required xxhash.c to be stored in /include directory.
1083  *
1084  * xxHash implementation is now directly integrated within xxhash.h.
1085  * As a consequence, xxhash.c is no longer needed in /include.
1086  *
1087  * xxhash.c is still available and is still useful.
1088  * In a "normal" setup, when xxhash is not inlined,
1089  * xxhash.h only exposes the prototypes and public symbols,
1090  * while xxhash.c can be built into an object file xxhash.o
1091  * which can then be linked into the final binary.
1092  ************************************************************************/
1093 
1094 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1095    || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1096 #  define XXH_IMPLEM_13a8737387
1097 
1098 /* *************************************
1099 *  Tuning parameters
1100 ***************************************/
1101 
1102 /*!
1103  * @defgroup tuning Tuning parameters
1104  * @{
1105  *
1106  * Various macros to control xxHash's behavior.
1107  */
1108 #ifdef XXH_DOXYGEN
1109 /*!
1110  * @brief Define this to disable 64-bit code.
1111  *
1112  * Useful if only using the @ref xxh32_family and you have a strict C90 compiler.
1113  */
1114 #  define XXH_NO_LONG_LONG
1115 #  undef XXH_NO_LONG_LONG /* don't actually */
1116 /*!
1117  * @brief Controls how unaligned memory is accessed.
1118  *
1119  * By default, access to unaligned memory is controlled by `memcpy()`, which is
1120  * safe and portable.
1121  *
1122  * Unfortunately, on some target/compiler combinations, the generated assembly
1123  * is sub-optimal.
1124  *
1125  * The below switch allow selection of a different access method
1126  * in the search for improved performance.
1127  *
1128  * @par Possible options:
1129  *
1130  *  - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
1131  *   @par
1132  *     Use `memcpy()`. Safe and portable. Note that most modern compilers will
1133  *     eliminate the function call and treat it as an unaligned access.
1134  *
1135  *  - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))`
1136  *   @par
1137  *     Depends on compiler extensions and is therefore not portable.
1138  *     This method is safe _if_ your compiler supports it,
1139  *     and *generally* as fast or faster than `memcpy`.
1140  *
1141  *  - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
1142  *  @par
1143  *     Casts directly and dereferences. This method doesn't depend on the
1144  *     compiler, but it violates the C standard as it directly dereferences an
1145  *     unaligned pointer. It can generate buggy code on targets which do not
1146  *     support unaligned memory accesses, but in some circumstances, it's the
1147  *     only known way to get the most performance.
1148  *
1149  *  - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
1150  *  @par
1151  *     Also portable. This can generate the best code on old compilers which don't
1152  *     inline small `memcpy()` calls, and it might also be faster on big-endian
1153  *     systems which lack a native byteswap instruction. However, some compilers
1154  *     will emit literal byteshifts even if the target supports unaligned access.
1155  *  .
1156  *
1157  * @warning
1158  *   Methods 1 and 2 rely on implementation-defined behavior. Use these with
1159  *   care, as what works on one compiler/platform/optimization level may cause
1160  *   another to read garbage data or even crash.
1161  *
1162  * See https://stackoverflow.com/a/32095106/646947 for details.
1163  *
1164  * Prefer these methods in priority order (0 > 3 > 1 > 2)
1165  */
1166 #  define XXH_FORCE_MEMORY_ACCESS 0
1167 /*!
1168  * @def XXH_ACCEPT_NULL_INPUT_POINTER
1169  * @brief Whether to add explicit `NULL` checks.
1170  *
1171  * If the input pointer is `NULL` and the length is non-zero, xxHash's default
1172  * behavior is to dereference it, triggering a segfault.
1173  *
1174  * When this macro is enabled, xxHash actively checks the input for a null pointer.
1175  * If it is, the result for null input pointers is the same as a zero-length input.
1176  */
1177 #  define XXH_ACCEPT_NULL_INPUT_POINTER 0
1178 /*!
1179  * @def XXH_FORCE_ALIGN_CHECK
1180  * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
1181  * and XXH64() only).
1182  *
1183  * This is an important performance trick for architectures without decent
1184  * unaligned memory access performance.
1185  *
1186  * It checks for input alignment, and when conditions are met, uses a "fast
1187  * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
1188  * faster_ read speed.
1189  *
1190  * The check costs one initial branch per hash, which is generally negligible,
1191  * but not zero.
1192  *
1193  * Moreover, it's not useful to generate an additional code path if memory
1194  * access uses the same instruction for both aligned and unaligned
1195  * addresses (e.g. x86 and aarch64).
1196  *
1197  * In these cases, the alignment check can be removed by setting this macro to 0.
1198  * Then the code will always use unaligned memory access.
1199  * Align check is automatically disabled on x86, x64 & arm64,
1200  * which are platforms known to offer good unaligned memory accesses performance.
1201  *
1202  * This option does not affect XXH3 (only XXH32 and XXH64).
1203  */
1204 #  define XXH_FORCE_ALIGN_CHECK 0
1205 
1206 /*!
1207  * @def XXH_NO_INLINE_HINTS
1208  * @brief When non-zero, sets all functions to `static`.
1209  *
1210  * By default, xxHash tries to force the compiler to inline almost all internal
1211  * functions.
1212  *
1213  * This can usually improve performance due to reduced jumping and improved
1214  * constant folding, but significantly increases the size of the binary which
1215  * might not be favorable.
1216  *
1217  * Additionally, sometimes the forced inlining can be detrimental to performance,
1218  * depending on the architecture.
1219  *
1220  * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
1221  * compiler full control on whether to inline or not.
1222  *
1223  * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using
1224  * -fno-inline with GCC or Clang, this will automatically be defined.
1225  */
1226 #  define XXH_NO_INLINE_HINTS 0
1227 
1228 /*!
1229  * @def XXH_REROLL
1230  * @brief Whether to reroll `XXH32_finalize` and `XXH64_finalize`.
1231  *
1232  * For performance, `XXH32_finalize` and `XXH64_finalize` use an unrolled loop
1233  * in the form of a switch statement.
1234  *
1235  * This is not always desirable, as it generates larger code, and depending on
1236  * the architecture, may even be slower
1237  *
1238  * This is automatically defined with `-Os`/`-Oz` on GCC and Clang.
1239  */
1240 #  define XXH_REROLL 0
1241 
1242 /*!
1243  * @internal
1244  * @brief Redefines old internal names.
1245  *
1246  * For compatibility with code that uses xxHash's internals before the names
1247  * were changed to improve namespacing. There is no other reason to use this.
1248  */
1249 #  define XXH_OLD_NAMES
1250 #  undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
1251 #endif /* XXH_DOXYGEN */
1252 /*!
1253  * @}
1254  */
1255 
1256 #ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
1257    /* prefer __packed__ structures (method 1) for gcc on armv7 and armv8 */
1258 #  if !defined(__clang__) && ( \
1259     (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
1260     (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7)) )
1261 #    define XXH_FORCE_MEMORY_ACCESS 1
1262 #  endif
1263 #endif
1264 
1265 #ifndef XXH_ACCEPT_NULL_INPUT_POINTER   /* can be defined externally */
1266 #  define XXH_ACCEPT_NULL_INPUT_POINTER 0
1267 #endif
1268 
1269 #ifndef XXH_FORCE_ALIGN_CHECK  /* can be defined externally */
1270 #  if defined(__i386)  || defined(__x86_64__) || defined(__aarch64__) \
1271    || defined(_M_IX86) || defined(_M_X64)     || defined(_M_ARM64) /* visual */
1272 #    define XXH_FORCE_ALIGN_CHECK 0
1273 #  else
1274 #    define XXH_FORCE_ALIGN_CHECK 1
1275 #  endif
1276 #endif
1277 
1278 #ifndef XXH_NO_INLINE_HINTS
1279 #  if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
1280    || defined(__NO_INLINE__)     /* -O0, -fno-inline */
1281 #    define XXH_NO_INLINE_HINTS 1
1282 #  else
1283 #    define XXH_NO_INLINE_HINTS 0
1284 #  endif
1285 #endif
1286 
1287 #ifndef XXH_REROLL
1288 #  if defined(__OPTIMIZE_SIZE__)
1289 #    define XXH_REROLL 1
1290 #  else
1291 #    define XXH_REROLL 0
1292 #  endif
1293 #endif
1294 
1295 /*!
1296  * @defgroup impl Implementation
1297  * @{
1298  */
1299 
1300 
1301 /* *************************************
1302 *  Includes & Memory related functions
1303 ***************************************/
1304 /*
1305  * Modify the local functions below should you wish to use
1306  * different memory routines for malloc() and free()
1307  */
1308 #include <stdlib.h>
1309 
1310 /*!
1311  * @internal
1312  * @brief Modify this function to use a different routine than malloc().
1313  */
XXH_malloc(size_t s)1314 static void* XXH_malloc(size_t s) { return malloc(s); }
1315 
1316 /*!
1317  * @internal
1318  * @brief Modify this function to use a different routine than free().
1319  */
XXH_free(void * p)1320 static void XXH_free(void* p) { free(p); }
1321 
1322 #include <string.h>
1323 
1324 /*!
1325  * @internal
1326  * @brief Modify this function to use a different routine than memcpy().
1327  */
XXH_memcpy(void * dest,const void * src,size_t size)1328 static void* XXH_memcpy(void* dest, const void* src, size_t size)
1329 {
1330     return memcpy(dest,src,size);
1331 }
1332 
1333 #include <limits.h>   /* ULLONG_MAX */
1334 
1335 
1336 /* *************************************
1337 *  Compiler Specific Options
1338 ***************************************/
1339 #ifdef _MSC_VER /* Visual Studio warning fix */
1340 #  pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1341 #endif
1342 
1343 #if XXH_NO_INLINE_HINTS  /* disable inlining hints */
1344 #  if defined(__GNUC__)
1345 #    define XXH_FORCE_INLINE static __attribute__((unused))
1346 #  else
1347 #    define XXH_FORCE_INLINE static
1348 #  endif
1349 #  define XXH_NO_INLINE static
1350 /* enable inlining hints */
1351 #elif defined(_MSC_VER)  /* Visual Studio */
1352 #  define XXH_FORCE_INLINE static __forceinline
1353 #  define XXH_NO_INLINE static __declspec(noinline)
1354 #elif defined(__GNUC__)
1355 #  define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1356 #  define XXH_NO_INLINE static __attribute__((noinline))
1357 #elif defined (__cplusplus) \
1358   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* C99 */
1359 #  define XXH_FORCE_INLINE static inline
1360 #  define XXH_NO_INLINE static
1361 #else
1362 #  define XXH_FORCE_INLINE static
1363 #  define XXH_NO_INLINE static
1364 #endif
1365 
1366 
1367 
1368 /* *************************************
1369 *  Debug
1370 ***************************************/
1371 /*!
1372  * @ingroup tuning
1373  * @def XXH_DEBUGLEVEL
1374  * @brief Sets the debugging level.
1375  *
1376  * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
1377  * compiler's command line options. The value must be a number.
1378  */
1379 #ifndef XXH_DEBUGLEVEL
1380 #  ifdef DEBUGLEVEL /* backwards compat */
1381 #    define XXH_DEBUGLEVEL DEBUGLEVEL
1382 #  else
1383 #    define XXH_DEBUGLEVEL 0
1384 #  endif
1385 #endif
1386 
1387 #if (XXH_DEBUGLEVEL>=1)
1388 #  include <assert.h>   /* note: can still be disabled with NDEBUG */
1389 #  define XXH_ASSERT(c)   assert(c)
1390 #else
1391 #  define XXH_ASSERT(c)   ((void)0)
1392 #endif
1393 
1394 /* note: use after variable declarations */
1395 #define XXH_STATIC_ASSERT(c)  do { enum { XXH_sa = 1/(int)(!!(c)) }; } while (0)
1396 
1397 
1398 /* *************************************
1399 *  Basic Types
1400 ***************************************/
1401 #if !defined (__VMS) \
1402  && (defined (__cplusplus) \
1403  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
1404 # include <stdint.h>
1405   typedef uint8_t xxh_u8;
1406 #else
1407   typedef unsigned char xxh_u8;
1408 #endif
1409 typedef XXH32_hash_t xxh_u32;
1410 
1411 #ifdef XXH_OLD_NAMES
1412 #  define BYTE xxh_u8
1413 #  define U8   xxh_u8
1414 #  define U32  xxh_u32
1415 #endif
1416 
1417 /* ***   Memory access   *** */
1418 
1419 /*!
1420  * @internal
1421  * @fn xxh_u32 XXH_read32(const void* ptr)
1422  * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
1423  *
1424  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1425  *
1426  * @param ptr The pointer to read from.
1427  * @return The 32-bit native endian integer from the bytes at @p ptr.
1428  */
1429 
1430 /*!
1431  * @internal
1432  * @fn xxh_u32 XXH_readLE32(const void* ptr)
1433  * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
1434  *
1435  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1436  *
1437  * @param ptr The pointer to read from.
1438  * @return The 32-bit little endian integer from the bytes at @p ptr.
1439  */
1440 
1441 /*!
1442  * @internal
1443  * @fn xxh_u32 XXH_readBE32(const void* ptr)
1444  * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
1445  *
1446  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1447  *
1448  * @param ptr The pointer to read from.
1449  * @return The 32-bit big endian integer from the bytes at @p ptr.
1450  */
1451 
1452 /*!
1453  * @internal
1454  * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1455  * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
1456  *
1457  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1458  * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
1459  * always @ref XXH_alignment::XXH_unaligned.
1460  *
1461  * @param ptr The pointer to read from.
1462  * @param align Whether @p ptr is aligned.
1463  * @pre
1464  *   If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
1465  *   aligned.
1466  * @return The 32-bit little endian integer from the bytes at @p ptr.
1467  */
1468 
1469 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1470 /*
1471  * Manual byteshift. Best for old compilers which don't inline memcpy.
1472  * We actually directly use XXH_readLE32 and XXH_readBE32.
1473  */
1474 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1475 
1476 /*
1477  * Force direct memory access. Only works on CPU which support unaligned memory
1478  * access in hardware.
1479  */
XXH_read32(const void * memPtr)1480 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
1481 
1482 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1483 
1484 /*
1485  * __pack instructions are safer but compiler specific, hence potentially
1486  * problematic for some compilers.
1487  *
1488  * Currently only defined for GCC and ICC.
1489  */
1490 #ifdef XXH_OLD_NAMES
1491 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
1492 #endif
XXH_read32(const void * ptr)1493 static xxh_u32 XXH_read32(const void* ptr)
1494 {
1495     typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
1496     return ((const xxh_unalign*)ptr)->u32;
1497 }
1498 
1499 #else
1500 
1501 /*
1502  * Portable and safe solution. Generally efficient.
1503  * see: https://stackoverflow.com/a/32095106/646947
1504  */
XXH_read32(const void * memPtr)1505 static xxh_u32 XXH_read32(const void* memPtr)
1506 {
1507     xxh_u32 val;
1508     memcpy(&val, memPtr, sizeof(val));
1509     return val;
1510 }
1511 
1512 #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
1513 
1514 
1515 /* ***   Endianness   *** */
1516 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
1517 
1518 /*!
1519  * @ingroup tuning
1520  * @def XXH_CPU_LITTLE_ENDIAN
1521  * @brief Whether the target is little endian.
1522  *
1523  * Defined to 1 if the target is little endian, or 0 if it is big endian.
1524  * It can be defined externally, for example on the compiler command line.
1525  *
1526  * If it is not defined, a runtime check (which is usually constant folded)
1527  * is used instead.
1528  *
1529  * @note
1530  *   This is not necessarily defined to an integer constant.
1531  *
1532  * @see XXH_isLittleEndian() for the runtime check.
1533  */
1534 #ifndef XXH_CPU_LITTLE_ENDIAN
1535 /*
1536  * Try to detect endianness automatically, to avoid the nonstandard behavior
1537  * in `XXH_isLittleEndian()`
1538  */
1539 #  if defined(_WIN32) /* Windows is always little endian */ \
1540      || defined(__LITTLE_ENDIAN__) \
1541      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1542 #    define XXH_CPU_LITTLE_ENDIAN 1
1543 #  elif defined(__BIG_ENDIAN__) \
1544      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
1545 #    define XXH_CPU_LITTLE_ENDIAN 0
1546 #  else
1547 /*!
1548  * @internal
1549  * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
1550  *
1551  * Most compilers will constant fold this.
1552  */
XXH_isLittleEndian(void)1553 static int XXH_isLittleEndian(void)
1554 {
1555     /*
1556      * Portable and well-defined behavior.
1557      * Don't use static: it is detrimental to performance.
1558      */
1559     const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
1560     return one.c[0];
1561 }
1562 #   define XXH_CPU_LITTLE_ENDIAN   XXH_isLittleEndian()
1563 #  endif
1564 #endif
1565 
1566 
1567 
1568 
1569 /* ****************************************
1570 *  Compiler-specific Functions and Macros
1571 ******************************************/
1572 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
1573 
1574 #ifdef __has_builtin
1575 #  define XXH_HAS_BUILTIN(x) __has_builtin(x)
1576 #else
1577 #  define XXH_HAS_BUILTIN(x) 0
1578 #endif
1579 
1580 /*!
1581  * @internal
1582  * @def XXH_rotl32(x,r)
1583  * @brief 32-bit rotate left.
1584  *
1585  * @param x The 32-bit integer to be rotated.
1586  * @param r The number of bits to rotate.
1587  * @pre
1588  *   @p r > 0 && @p r < 32
1589  * @note
1590  *   @p x and @p r may be evaluated multiple times.
1591  * @return The rotated result.
1592  */
1593 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
1594                                && XXH_HAS_BUILTIN(__builtin_rotateleft64)
1595 #  define XXH_rotl32 __builtin_rotateleft32
1596 #  define XXH_rotl64 __builtin_rotateleft64
1597 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
1598 #elif defined(_MSC_VER)
1599 #  define XXH_rotl32(x,r) _rotl(x,r)
1600 #  define XXH_rotl64(x,r) _rotl64(x,r)
1601 #else
1602 #  define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
1603 #  define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
1604 #endif
1605 
1606 /*!
1607  * @internal
1608  * @fn xxh_u32 XXH_swap32(xxh_u32 x)
1609  * @brief A 32-bit byteswap.
1610  *
1611  * @param x The 32-bit integer to byteswap.
1612  * @return @p x, byteswapped.
1613  */
1614 #if defined(_MSC_VER)     /* Visual Studio */
1615 #  define XXH_swap32 _byteswap_ulong
1616 #elif XXH_GCC_VERSION >= 403
1617 #  define XXH_swap32 __builtin_bswap32
1618 #else
XXH_swap32(xxh_u32 x)1619 static xxh_u32 XXH_swap32 (xxh_u32 x)
1620 {
1621     return  ((x << 24) & 0xff000000 ) |
1622             ((x <<  8) & 0x00ff0000 ) |
1623             ((x >>  8) & 0x0000ff00 ) |
1624             ((x >> 24) & 0x000000ff );
1625 }
1626 #endif
1627 
1628 
1629 /* ***************************
1630 *  Memory reads
1631 *****************************/
1632 
1633 /*!
1634  * @internal
1635  * @brief Enum to indicate whether a pointer is aligned.
1636  */
1637 typedef enum {
1638     XXH_aligned,  /*!< Aligned */
1639     XXH_unaligned /*!< Possibly unaligned */
1640 } XXH_alignment;
1641 
1642 /*
1643  * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
1644  *
1645  * This is ideal for older compilers which don't inline memcpy.
1646  */
1647 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1648 
XXH_readLE32(const void * memPtr)1649 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
1650 {
1651     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1652     return bytePtr[0]
1653          | ((xxh_u32)bytePtr[1] << 8)
1654          | ((xxh_u32)bytePtr[2] << 16)
1655          | ((xxh_u32)bytePtr[3] << 24);
1656 }
1657 
XXH_readBE32(const void * memPtr)1658 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
1659 {
1660     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1661     return bytePtr[3]
1662          | ((xxh_u32)bytePtr[2] << 8)
1663          | ((xxh_u32)bytePtr[1] << 16)
1664          | ((xxh_u32)bytePtr[0] << 24);
1665 }
1666 
1667 #else
XXH_readLE32(const void * ptr)1668 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
1669 {
1670     return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
1671 }
1672 
XXH_readBE32(const void * ptr)1673 static xxh_u32 XXH_readBE32(const void* ptr)
1674 {
1675     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
1676 }
1677 #endif
1678 
1679 XXH_FORCE_INLINE xxh_u32
XXH_readLE32_align(const void * ptr,XXH_alignment align)1680 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1681 {
1682     if (align==XXH_unaligned) {
1683         return XXH_readLE32(ptr);
1684     } else {
1685         return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
1686     }
1687 }
1688 
1689 
1690 /* *************************************
1691 *  Misc
1692 ***************************************/
1693 /*! @ingroup public */
XXH_versionNumber(void)1694 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
1695 
1696 
1697 /* *******************************************************************
1698 *  32-bit hash functions
1699 *********************************************************************/
1700 /*!
1701  * @}
1702  * @defgroup xxh32_impl XXH32 implementation
1703  * @ingroup impl
1704  * @{
1705  */
1706 static const xxh_u32 XXH_PRIME32_1 = 0x9E3779B1U;   /*!< 0b10011110001101110111100110110001 */
1707 static const xxh_u32 XXH_PRIME32_2 = 0x85EBCA77U;   /*!< 0b10000101111010111100101001110111 */
1708 static const xxh_u32 XXH_PRIME32_3 = 0xC2B2AE3DU;   /*!< 0b11000010101100101010111000111101 */
1709 static const xxh_u32 XXH_PRIME32_4 = 0x27D4EB2FU;   /*!< 0b00100111110101001110101100101111 */
1710 static const xxh_u32 XXH_PRIME32_5 = 0x165667B1U;   /*!< 0b00010110010101100110011110110001 */
1711 
1712 #ifdef XXH_OLD_NAMES
1713 #  define PRIME32_1 XXH_PRIME32_1
1714 #  define PRIME32_2 XXH_PRIME32_2
1715 #  define PRIME32_3 XXH_PRIME32_3
1716 #  define PRIME32_4 XXH_PRIME32_4
1717 #  define PRIME32_5 XXH_PRIME32_5
1718 #endif
1719 
1720 /*!
1721  * @internal
1722  * @brief Normal stripe processing routine.
1723  *
1724  * This shuffles the bits so that any bit from @p input impacts several bits in
1725  * @p acc.
1726  *
1727  * @param acc The accumulator lane.
1728  * @param input The stripe of input to mix.
1729  * @return The mixed accumulator lane.
1730  */
XXH32_round(xxh_u32 acc,xxh_u32 input)1731 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
1732 {
1733     acc += input * XXH_PRIME32_2;
1734     acc  = XXH_rotl32(acc, 13);
1735     acc *= XXH_PRIME32_1;
1736 #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1737     /*
1738      * UGLY HACK:
1739      * This inline assembly hack forces acc into a normal register. This is the
1740      * only thing that prevents GCC and Clang from autovectorizing the XXH32
1741      * loop (pragmas and attributes don't work for some reason) without globally
1742      * disabling SSE4.1.
1743      *
1744      * The reason we want to avoid vectorization is because despite working on
1745      * 4 integers at a time, there are multiple factors slowing XXH32 down on
1746      * SSE4:
1747      * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
1748      *   newer chips!) making it slightly slower to multiply four integers at
1749      *   once compared to four integers independently. Even when pmulld was
1750      *   fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
1751      *   just to multiply unless doing a long operation.
1752      *
1753      * - Four instructions are required to rotate,
1754      *      movqda tmp,  v // not required with VEX encoding
1755      *      pslld  tmp, 13 // tmp <<= 13
1756      *      psrld  v,   19 // x >>= 19
1757      *      por    v,  tmp // x |= tmp
1758      *   compared to one for scalar:
1759      *      roll   v, 13    // reliably fast across the board
1760      *      shldl  v, v, 13 // Sandy Bridge and later prefer this for some reason
1761      *
1762      * - Instruction level parallelism is actually more beneficial here because
1763      *   the SIMD actually serializes this operation: While v1 is rotating, v2
1764      *   can load data, while v3 can multiply. SSE forces them to operate
1765      *   together.
1766      *
1767      * How this hack works:
1768      * __asm__(""       // Declare an assembly block but don't declare any instructions
1769      *          :       // However, as an Input/Output Operand,
1770      *          "+r"    // constrain a read/write operand (+) as a general purpose register (r).
1771      *          (acc)   // and set acc as the operand
1772      * );
1773      *
1774      * Because of the 'r', the compiler has promised that seed will be in a
1775      * general purpose register and the '+' says that it will be 'read/write',
1776      * so it has to assume it has changed. It is like volatile without all the
1777      * loads and stores.
1778      *
1779      * Since the argument has to be in a normal register (not an SSE register),
1780      * each time XXH32_round is called, it is impossible to vectorize.
1781      */
1782     __asm__("" : "+r" (acc));
1783 #endif
1784     return acc;
1785 }
1786 
1787 /*!
1788  * @internal
1789  * @brief Mixes all bits to finalize the hash.
1790  *
1791  * The final mix ensures that all input bits have a chance to impact any bit in
1792  * the output digest, resulting in an unbiased distribution.
1793  *
1794  * @param h32 The hash to avalanche.
1795  * @return The avalanched hash.
1796  */
XXH32_avalanche(xxh_u32 h32)1797 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
1798 {
1799     h32 ^= h32 >> 15;
1800     h32 *= XXH_PRIME32_2;
1801     h32 ^= h32 >> 13;
1802     h32 *= XXH_PRIME32_3;
1803     h32 ^= h32 >> 16;
1804     return(h32);
1805 }
1806 
1807 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
1808 
1809 /*!
1810  * @internal
1811  * @brief Processes the last 0-15 bytes of @p ptr.
1812  *
1813  * There may be up to 15 bytes remaining to consume from the input.
1814  * This final stage will digest them to ensure that all input bytes are present
1815  * in the final mix.
1816  *
1817  * @param h32 The hash to finalize.
1818  * @param ptr The pointer to the remaining input.
1819  * @param len The remaining length, modulo 16.
1820  * @param align Whether @p ptr is aligned.
1821  * @return The finalized hash.
1822  */
1823 static xxh_u32
XXH32_finalize(xxh_u32 h32,const xxh_u8 * ptr,size_t len,XXH_alignment align)1824 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
1825 {
1826 #define XXH_PROCESS1 do {                           \
1827     h32 += (*ptr++) * XXH_PRIME32_5;                \
1828     h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1;      \
1829 } while (0)
1830 
1831 #define XXH_PROCESS4 do {                           \
1832     h32 += XXH_get32bits(ptr) * XXH_PRIME32_3;      \
1833     ptr += 4;                                   \
1834     h32  = XXH_rotl32(h32, 17) * XXH_PRIME32_4;     \
1835 } while (0)
1836 
1837     /* Compact rerolled version */
1838     if (XXH_REROLL) {
1839         len &= 15;
1840         while (len >= 4) {
1841             XXH_PROCESS4;
1842             len -= 4;
1843         }
1844         while (len > 0) {
1845             XXH_PROCESS1;
1846             --len;
1847         }
1848         return XXH32_avalanche(h32);
1849     } else {
1850          switch(len&15) /* or switch(bEnd - p) */ {
1851            case 12:      XXH_PROCESS4;
1852                          /* fallthrough */
1853            case 8:       XXH_PROCESS4;
1854                          /* fallthrough */
1855            case 4:       XXH_PROCESS4;
1856                          return XXH32_avalanche(h32);
1857 
1858            case 13:      XXH_PROCESS4;
1859                          /* fallthrough */
1860            case 9:       XXH_PROCESS4;
1861                          /* fallthrough */
1862            case 5:       XXH_PROCESS4;
1863                          XXH_PROCESS1;
1864                          return XXH32_avalanche(h32);
1865 
1866            case 14:      XXH_PROCESS4;
1867                          /* fallthrough */
1868            case 10:      XXH_PROCESS4;
1869                          /* fallthrough */
1870            case 6:       XXH_PROCESS4;
1871                          XXH_PROCESS1;
1872                          XXH_PROCESS1;
1873                          return XXH32_avalanche(h32);
1874 
1875            case 15:      XXH_PROCESS4;
1876                          /* fallthrough */
1877            case 11:      XXH_PROCESS4;
1878                          /* fallthrough */
1879            case 7:       XXH_PROCESS4;
1880                          /* fallthrough */
1881            case 3:       XXH_PROCESS1;
1882                          /* fallthrough */
1883            case 2:       XXH_PROCESS1;
1884                          /* fallthrough */
1885            case 1:       XXH_PROCESS1;
1886                          /* fallthrough */
1887            case 0:       return XXH32_avalanche(h32);
1888         }
1889         XXH_ASSERT(0);
1890         return h32;   /* reaching this point is deemed impossible */
1891     }
1892 }
1893 
1894 #ifdef XXH_OLD_NAMES
1895 #  define PROCESS1 XXH_PROCESS1
1896 #  define PROCESS4 XXH_PROCESS4
1897 #else
1898 #  undef XXH_PROCESS1
1899 #  undef XXH_PROCESS4
1900 #endif
1901 
1902 /*!
1903  * @internal
1904  * @brief The implementation for @ref XXH32().
1905  *
1906  * @param input, len, seed Directly passed from @ref XXH32().
1907  * @param align Whether @p input is aligned.
1908  * @return The calculated hash.
1909  */
1910 XXH_FORCE_INLINE xxh_u32
XXH32_endian_align(const xxh_u8 * input,size_t len,xxh_u32 seed,XXH_alignment align)1911 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
1912 {
1913     const xxh_u8* bEnd = input + len;
1914     xxh_u32 h32;
1915 
1916 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
1917     if (input==NULL) {
1918         len=0;
1919         bEnd=input=(const xxh_u8*)(size_t)16;
1920     }
1921 #endif
1922 
1923     if (len>=16) {
1924         const xxh_u8* const limit = bEnd - 15;
1925         xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
1926         xxh_u32 v2 = seed + XXH_PRIME32_2;
1927         xxh_u32 v3 = seed + 0;
1928         xxh_u32 v4 = seed - XXH_PRIME32_1;
1929 
1930         do {
1931             v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
1932             v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
1933             v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
1934             v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
1935         } while (input < limit);
1936 
1937         h32 = XXH_rotl32(v1, 1)  + XXH_rotl32(v2, 7)
1938             + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
1939     } else {
1940         h32  = seed + XXH_PRIME32_5;
1941     }
1942 
1943     h32 += (xxh_u32)len;
1944 
1945     return XXH32_finalize(h32, input, len&15, align);
1946 }
1947 
1948 /*! @ingroup xxh32_family */
XXH32(const void * input,size_t len,XXH32_hash_t seed)1949 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
1950 {
1951 #if 0
1952     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
1953     XXH32_state_t state;
1954     XXH32_reset(&state, seed);
1955     XXH32_update(&state, (const xxh_u8*)input, len);
1956     return XXH32_digest(&state);
1957 #else
1958     if (XXH_FORCE_ALIGN_CHECK) {
1959         if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
1960             return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
1961     }   }
1962 
1963     return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
1964 #endif
1965 }
1966 
1967 
1968 
1969 /*******   Hash streaming   *******/
1970 /*!
1971  * @ingroup xxh32_family
1972  */
XXH32_createState(void)1973 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
1974 {
1975     return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
1976 }
1977 /*! @ingroup xxh32_family */
XXH32_freeState(XXH32_state_t * statePtr)1978 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
1979 {
1980     XXH_free(statePtr);
1981     return XXH_OK;
1982 }
1983 
1984 /*! @ingroup xxh32_family */
XXH32_copyState(XXH32_state_t * dstState,const XXH32_state_t * srcState)1985 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
1986 {
1987     memcpy(dstState, srcState, sizeof(*dstState));
1988 }
1989 
1990 /*! @ingroup xxh32_family */
XXH32_reset(XXH32_state_t * statePtr,XXH32_hash_t seed)1991 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
1992 {
1993     XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
1994     memset(&state, 0, sizeof(state));
1995     state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
1996     state.v2 = seed + XXH_PRIME32_2;
1997     state.v3 = seed + 0;
1998     state.v4 = seed - XXH_PRIME32_1;
1999     /* do not write into reserved, planned to be removed in a future version */
2000     memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
2001     return XXH_OK;
2002 }
2003 
2004 
2005 /*! @ingroup xxh32_family */
2006 XXH_PUBLIC_API XXH_errorcode
XXH32_update(XXH32_state_t * state,const void * input,size_t len)2007 XXH32_update(XXH32_state_t* state, const void* input, size_t len)
2008 {
2009     if (input==NULL)
2010 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
2011         return XXH_OK;
2012 #else
2013         return XXH_ERROR;
2014 #endif
2015 
2016     {   const xxh_u8* p = (const xxh_u8*)input;
2017         const xxh_u8* const bEnd = p + len;
2018 
2019         state->total_len_32 += (XXH32_hash_t)len;
2020         state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
2021 
2022         if (state->memsize + len < 16)  {   /* fill in tmp buffer */
2023             XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
2024             state->memsize += (XXH32_hash_t)len;
2025             return XXH_OK;
2026         }
2027 
2028         if (state->memsize) {   /* some data left from previous update */
2029             XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
2030             {   const xxh_u32* p32 = state->mem32;
2031                 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
2032                 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
2033                 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
2034                 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32));
2035             }
2036             p += 16-state->memsize;
2037             state->memsize = 0;
2038         }
2039 
2040         if (p <= bEnd-16) {
2041             const xxh_u8* const limit = bEnd - 16;
2042             xxh_u32 v1 = state->v1;
2043             xxh_u32 v2 = state->v2;
2044             xxh_u32 v3 = state->v3;
2045             xxh_u32 v4 = state->v4;
2046 
2047             do {
2048                 v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
2049                 v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4;
2050                 v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4;
2051                 v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4;
2052             } while (p<=limit);
2053 
2054             state->v1 = v1;
2055             state->v2 = v2;
2056             state->v3 = v3;
2057             state->v4 = v4;
2058         }
2059 
2060         if (p < bEnd) {
2061             XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
2062             state->memsize = (unsigned)(bEnd-p);
2063         }
2064     }
2065 
2066     return XXH_OK;
2067 }
2068 
2069 
2070 /*! @ingroup xxh32_family */
XXH32_digest(const XXH32_state_t * state)2071 XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
2072 {
2073     xxh_u32 h32;
2074 
2075     if (state->large_len) {
2076         h32 = XXH_rotl32(state->v1, 1)
2077             + XXH_rotl32(state->v2, 7)
2078             + XXH_rotl32(state->v3, 12)
2079             + XXH_rotl32(state->v4, 18);
2080     } else {
2081         h32 = state->v3 /* == seed */ + XXH_PRIME32_5;
2082     }
2083 
2084     h32 += state->total_len_32;
2085 
2086     return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
2087 }
2088 
2089 
2090 /*******   Canonical representation   *******/
2091 
2092 /*!
2093  * @ingroup xxh32_family
2094  * The default return values from XXH functions are unsigned 32 and 64 bit
2095  * integers.
2096  *
2097  * The canonical representation uses big endian convention, the same convention
2098  * as human-readable numbers (large digits first).
2099  *
2100  * This way, hash values can be written into a file or buffer, remaining
2101  * comparable across different systems.
2102  *
2103  * The following functions allow transformation of hash values to and from their
2104  * canonical format.
2105  */
XXH32_canonicalFromHash(XXH32_canonical_t * dst,XXH32_hash_t hash)2106 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
2107 {
2108     XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
2109     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
2110     memcpy(dst, &hash, sizeof(*dst));
2111 }
2112 /*! @ingroup xxh32_family */
XXH32_hashFromCanonical(const XXH32_canonical_t * src)2113 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
2114 {
2115     return XXH_readBE32(src);
2116 }
2117 
2118 
2119 #ifndef XXH_NO_LONG_LONG
2120 
2121 /* *******************************************************************
2122 *  64-bit hash functions
2123 *********************************************************************/
2124 /*!
2125  * @}
2126  * @ingroup impl
2127  * @{
2128  */
2129 /*******   Memory access   *******/
2130 
2131 typedef XXH64_hash_t xxh_u64;
2132 
2133 #ifdef XXH_OLD_NAMES
2134 #  define U64 xxh_u64
2135 #endif
2136 
2137 /*!
2138  * XXH_REROLL_XXH64:
2139  * Whether to reroll the XXH64_finalize() loop.
2140  *
2141  * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a
2142  * performance gain on 64-bit hosts, as only one jump is required.
2143  *
2144  * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit
2145  * registers, and 64-bit arithmetic needs to be simulated, it isn't beneficial
2146  * to unroll. The code becomes ridiculously large (the largest function in the
2147  * binary on i386!), and rerolling it saves anywhere from 3kB to 20kB. It is
2148  * also slightly faster because it fits into cache better and is more likely
2149  * to be inlined by the compiler.
2150  *
2151  * If XXH_REROLL is defined, this is ignored and the loop is always rerolled.
2152  */
2153 #ifndef XXH_REROLL_XXH64
2154 #  if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \
2155    || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \
2156      || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \
2157      || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \
2158      || defined(__mips64__) || defined(__mips64)) /* mips64 */ \
2159    || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */
2160 #    define XXH_REROLL_XXH64 1
2161 #  else
2162 #    define XXH_REROLL_XXH64 0
2163 #  endif
2164 #endif /* !defined(XXH_REROLL_XXH64) */
2165 
2166 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2167 /*
2168  * Manual byteshift. Best for old compilers which don't inline memcpy.
2169  * We actually directly use XXH_readLE64 and XXH_readBE64.
2170  */
2171 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2172 
2173 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read64(const void * memPtr)2174 static xxh_u64 XXH_read64(const void* memPtr)
2175 {
2176     return *(const xxh_u64*) memPtr;
2177 }
2178 
2179 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2180 
2181 /*
2182  * __pack instructions are safer, but compiler specific, hence potentially
2183  * problematic for some compilers.
2184  *
2185  * Currently only defined for GCC and ICC.
2186  */
2187 #ifdef XXH_OLD_NAMES
2188 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
2189 #endif
XXH_read64(const void * ptr)2190 static xxh_u64 XXH_read64(const void* ptr)
2191 {
2192     typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
2193     return ((const xxh_unalign64*)ptr)->u64;
2194 }
2195 
2196 #else
2197 
2198 /*
2199  * Portable and safe solution. Generally efficient.
2200  * see: https://stackoverflow.com/a/32095106/646947
2201  */
XXH_read64(const void * memPtr)2202 static xxh_u64 XXH_read64(const void* memPtr)
2203 {
2204     xxh_u64 val;
2205     memcpy(&val, memPtr, sizeof(val));
2206     return val;
2207 }
2208 
2209 #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2210 
2211 #if defined(_MSC_VER)     /* Visual Studio */
2212 #  define XXH_swap64 _byteswap_uint64
2213 #elif XXH_GCC_VERSION >= 403
2214 #  define XXH_swap64 __builtin_bswap64
2215 #else
XXH_swap64(xxh_u64 x)2216 static xxh_u64 XXH_swap64(xxh_u64 x)
2217 {
2218     return  ((x << 56) & 0xff00000000000000ULL) |
2219             ((x << 40) & 0x00ff000000000000ULL) |
2220             ((x << 24) & 0x0000ff0000000000ULL) |
2221             ((x << 8)  & 0x000000ff00000000ULL) |
2222             ((x >> 8)  & 0x00000000ff000000ULL) |
2223             ((x >> 24) & 0x0000000000ff0000ULL) |
2224             ((x >> 40) & 0x000000000000ff00ULL) |
2225             ((x >> 56) & 0x00000000000000ffULL);
2226 }
2227 #endif
2228 
2229 
2230 /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
2231 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2232 
XXH_readLE64(const void * memPtr)2233 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
2234 {
2235     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2236     return bytePtr[0]
2237          | ((xxh_u64)bytePtr[1] << 8)
2238          | ((xxh_u64)bytePtr[2] << 16)
2239          | ((xxh_u64)bytePtr[3] << 24)
2240          | ((xxh_u64)bytePtr[4] << 32)
2241          | ((xxh_u64)bytePtr[5] << 40)
2242          | ((xxh_u64)bytePtr[6] << 48)
2243          | ((xxh_u64)bytePtr[7] << 56);
2244 }
2245 
XXH_readBE64(const void * memPtr)2246 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
2247 {
2248     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2249     return bytePtr[7]
2250          | ((xxh_u64)bytePtr[6] << 8)
2251          | ((xxh_u64)bytePtr[5] << 16)
2252          | ((xxh_u64)bytePtr[4] << 24)
2253          | ((xxh_u64)bytePtr[3] << 32)
2254          | ((xxh_u64)bytePtr[2] << 40)
2255          | ((xxh_u64)bytePtr[1] << 48)
2256          | ((xxh_u64)bytePtr[0] << 56);
2257 }
2258 
2259 #else
XXH_readLE64(const void * ptr)2260 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
2261 {
2262     return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
2263 }
2264 
XXH_readBE64(const void * ptr)2265 static xxh_u64 XXH_readBE64(const void* ptr)
2266 {
2267     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
2268 }
2269 #endif
2270 
2271 XXH_FORCE_INLINE xxh_u64
XXH_readLE64_align(const void * ptr,XXH_alignment align)2272 XXH_readLE64_align(const void* ptr, XXH_alignment align)
2273 {
2274     if (align==XXH_unaligned)
2275         return XXH_readLE64(ptr);
2276     else
2277         return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
2278 }
2279 
2280 
2281 /*******   xxh64   *******/
2282 /*!
2283  * @}
2284  * @defgroup xxh64_impl XXH64 implementation
2285  * @ingroup impl
2286  * @{
2287  */
2288 static const xxh_u64 XXH_PRIME64_1 = 0x9E3779B185EBCA87ULL;   /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
2289 static const xxh_u64 XXH_PRIME64_2 = 0xC2B2AE3D27D4EB4FULL;   /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
2290 static const xxh_u64 XXH_PRIME64_3 = 0x165667B19E3779F9ULL;   /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
2291 static const xxh_u64 XXH_PRIME64_4 = 0x85EBCA77C2B2AE63ULL;   /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
2292 static const xxh_u64 XXH_PRIME64_5 = 0x27D4EB2F165667C5ULL;   /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
2293 
2294 #ifdef XXH_OLD_NAMES
2295 #  define PRIME64_1 XXH_PRIME64_1
2296 #  define PRIME64_2 XXH_PRIME64_2
2297 #  define PRIME64_3 XXH_PRIME64_3
2298 #  define PRIME64_4 XXH_PRIME64_4
2299 #  define PRIME64_5 XXH_PRIME64_5
2300 #endif
2301 
XXH64_round(xxh_u64 acc,xxh_u64 input)2302 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
2303 {
2304     acc += input * XXH_PRIME64_2;
2305     acc  = XXH_rotl64(acc, 31);
2306     acc *= XXH_PRIME64_1;
2307     return acc;
2308 }
2309 
XXH64_mergeRound(xxh_u64 acc,xxh_u64 val)2310 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2311 {
2312     val  = XXH64_round(0, val);
2313     acc ^= val;
2314     acc  = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2315     return acc;
2316 }
2317 
XXH64_avalanche(xxh_u64 h64)2318 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2319 {
2320     h64 ^= h64 >> 33;
2321     h64 *= XXH_PRIME64_2;
2322     h64 ^= h64 >> 29;
2323     h64 *= XXH_PRIME64_3;
2324     h64 ^= h64 >> 32;
2325     return h64;
2326 }
2327 
2328 
2329 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
2330 
2331 static xxh_u64
XXH64_finalize(xxh_u64 h64,const xxh_u8 * ptr,size_t len,XXH_alignment align)2332 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
2333 {
2334 #define XXH_PROCESS1_64 do {                                   \
2335     h64 ^= (*ptr++) * XXH_PRIME64_5;                           \
2336     h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;                 \
2337 } while (0)
2338 
2339 #define XXH_PROCESS4_64 do {                                   \
2340     h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;      \
2341     ptr += 4;                                              \
2342     h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;     \
2343 } while (0)
2344 
2345 #define XXH_PROCESS8_64 do {                                   \
2346     xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \
2347     ptr += 8;                                              \
2348     h64 ^= k1;                                             \
2349     h64  = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;     \
2350 } while (0)
2351 
2352     /* Rerolled version for 32-bit targets is faster and much smaller. */
2353     if (XXH_REROLL || XXH_REROLL_XXH64) {
2354         len &= 31;
2355         while (len >= 8) {
2356             XXH_PROCESS8_64;
2357             len -= 8;
2358         }
2359         if (len >= 4) {
2360             XXH_PROCESS4_64;
2361             len -= 4;
2362         }
2363         while (len > 0) {
2364             XXH_PROCESS1_64;
2365             --len;
2366         }
2367          return  XXH64_avalanche(h64);
2368     } else {
2369         switch(len & 31) {
2370            case 24: XXH_PROCESS8_64;
2371                          /* fallthrough */
2372            case 16: XXH_PROCESS8_64;
2373                          /* fallthrough */
2374            case  8: XXH_PROCESS8_64;
2375                     return XXH64_avalanche(h64);
2376 
2377            case 28: XXH_PROCESS8_64;
2378                          /* fallthrough */
2379            case 20: XXH_PROCESS8_64;
2380                          /* fallthrough */
2381            case 12: XXH_PROCESS8_64;
2382                          /* fallthrough */
2383            case  4: XXH_PROCESS4_64;
2384                     return XXH64_avalanche(h64);
2385 
2386            case 25: XXH_PROCESS8_64;
2387                          /* fallthrough */
2388            case 17: XXH_PROCESS8_64;
2389                          /* fallthrough */
2390            case  9: XXH_PROCESS8_64;
2391                     XXH_PROCESS1_64;
2392                     return XXH64_avalanche(h64);
2393 
2394            case 29: XXH_PROCESS8_64;
2395                          /* fallthrough */
2396            case 21: XXH_PROCESS8_64;
2397                          /* fallthrough */
2398            case 13: XXH_PROCESS8_64;
2399                          /* fallthrough */
2400            case  5: XXH_PROCESS4_64;
2401                     XXH_PROCESS1_64;
2402                     return XXH64_avalanche(h64);
2403 
2404            case 26: XXH_PROCESS8_64;
2405                          /* fallthrough */
2406            case 18: XXH_PROCESS8_64;
2407                          /* fallthrough */
2408            case 10: XXH_PROCESS8_64;
2409                     XXH_PROCESS1_64;
2410                     XXH_PROCESS1_64;
2411                     return XXH64_avalanche(h64);
2412 
2413            case 30: XXH_PROCESS8_64;
2414                          /* fallthrough */
2415            case 22: XXH_PROCESS8_64;
2416                          /* fallthrough */
2417            case 14: XXH_PROCESS8_64;
2418                          /* fallthrough */
2419            case  6: XXH_PROCESS4_64;
2420                     XXH_PROCESS1_64;
2421                     XXH_PROCESS1_64;
2422                     return XXH64_avalanche(h64);
2423 
2424            case 27: XXH_PROCESS8_64;
2425                          /* fallthrough */
2426            case 19: XXH_PROCESS8_64;
2427                          /* fallthrough */
2428            case 11: XXH_PROCESS8_64;
2429                     XXH_PROCESS1_64;
2430                     XXH_PROCESS1_64;
2431                     XXH_PROCESS1_64;
2432                     return XXH64_avalanche(h64);
2433 
2434            case 31: XXH_PROCESS8_64;
2435                          /* fallthrough */
2436            case 23: XXH_PROCESS8_64;
2437                          /* fallthrough */
2438            case 15: XXH_PROCESS8_64;
2439                          /* fallthrough */
2440            case  7: XXH_PROCESS4_64;
2441                          /* fallthrough */
2442            case  3: XXH_PROCESS1_64;
2443                          /* fallthrough */
2444            case  2: XXH_PROCESS1_64;
2445                          /* fallthrough */
2446            case  1: XXH_PROCESS1_64;
2447                          /* fallthrough */
2448            case  0: return XXH64_avalanche(h64);
2449         }
2450     }
2451     /* impossible to reach */
2452     XXH_ASSERT(0);
2453     return 0;  /* unreachable, but some compilers complain without it */
2454 }
2455 
2456 #ifdef XXH_OLD_NAMES
2457 #  define PROCESS1_64 XXH_PROCESS1_64
2458 #  define PROCESS4_64 XXH_PROCESS4_64
2459 #  define PROCESS8_64 XXH_PROCESS8_64
2460 #else
2461 #  undef XXH_PROCESS1_64
2462 #  undef XXH_PROCESS4_64
2463 #  undef XXH_PROCESS8_64
2464 #endif
2465 
2466 XXH_FORCE_INLINE xxh_u64
XXH64_endian_align(const xxh_u8 * input,size_t len,xxh_u64 seed,XXH_alignment align)2467 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
2468 {
2469     const xxh_u8* bEnd = input + len;
2470     xxh_u64 h64;
2471 
2472 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
2473     if (input==NULL) {
2474         len=0;
2475         bEnd=input=(const xxh_u8*)(size_t)32;
2476     }
2477 #endif
2478 
2479     if (len>=32) {
2480         const xxh_u8* const limit = bEnd - 32;
2481         xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2482         xxh_u64 v2 = seed + XXH_PRIME64_2;
2483         xxh_u64 v3 = seed + 0;
2484         xxh_u64 v4 = seed - XXH_PRIME64_1;
2485 
2486         do {
2487             v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
2488             v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
2489             v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
2490             v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
2491         } while (input<=limit);
2492 
2493         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2494         h64 = XXH64_mergeRound(h64, v1);
2495         h64 = XXH64_mergeRound(h64, v2);
2496         h64 = XXH64_mergeRound(h64, v3);
2497         h64 = XXH64_mergeRound(h64, v4);
2498 
2499     } else {
2500         h64  = seed + XXH_PRIME64_5;
2501     }
2502 
2503     h64 += (xxh_u64) len;
2504 
2505     return XXH64_finalize(h64, input, len, align);
2506 }
2507 
2508 
2509 /*! @ingroup xxh64_family */
XXH64(const void * input,size_t len,XXH64_hash_t seed)2510 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
2511 {
2512 #if 0
2513     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2514     XXH64_state_t state;
2515     XXH64_reset(&state, seed);
2516     XXH64_update(&state, (const xxh_u8*)input, len);
2517     return XXH64_digest(&state);
2518 #else
2519     if (XXH_FORCE_ALIGN_CHECK) {
2520         if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
2521             return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2522     }   }
2523 
2524     return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2525 
2526 #endif
2527 }
2528 
2529 /*******   Hash Streaming   *******/
2530 
2531 /*! @ingroup xxh64_family*/
XXH64_createState(void)2532 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
2533 {
2534     return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
2535 }
2536 /*! @ingroup xxh64_family */
XXH64_freeState(XXH64_state_t * statePtr)2537 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
2538 {
2539     XXH_free(statePtr);
2540     return XXH_OK;
2541 }
2542 
2543 /*! @ingroup xxh64_family */
XXH64_copyState(XXH64_state_t * dstState,const XXH64_state_t * srcState)2544 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
2545 {
2546     memcpy(dstState, srcState, sizeof(*dstState));
2547 }
2548 
2549 /*! @ingroup xxh64_family */
XXH64_reset(XXH64_state_t * statePtr,XXH64_hash_t seed)2550 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
2551 {
2552     XXH64_state_t state;   /* use a local state to memcpy() in order to avoid strict-aliasing warnings */
2553     memset(&state, 0, sizeof(state));
2554     state.v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2555     state.v2 = seed + XXH_PRIME64_2;
2556     state.v3 = seed + 0;
2557     state.v4 = seed - XXH_PRIME64_1;
2558      /* do not write into reserved64, might be removed in a future version */
2559     memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
2560     return XXH_OK;
2561 }
2562 
2563 /*! @ingroup xxh64_family */
2564 XXH_PUBLIC_API XXH_errorcode
XXH64_update(XXH64_state_t * state,const void * input,size_t len)2565 XXH64_update (XXH64_state_t* state, const void* input, size_t len)
2566 {
2567     if (input==NULL)
2568 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
2569         return XXH_OK;
2570 #else
2571         return XXH_ERROR;
2572 #endif
2573 
2574     {   const xxh_u8* p = (const xxh_u8*)input;
2575         const xxh_u8* const bEnd = p + len;
2576 
2577         state->total_len += len;
2578 
2579         if (state->memsize + len < 32) {  /* fill in tmp buffer */
2580             XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
2581             state->memsize += (xxh_u32)len;
2582             return XXH_OK;
2583         }
2584 
2585         if (state->memsize) {   /* tmp buffer is full */
2586             XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
2587             state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
2588             state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
2589             state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
2590             state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3));
2591             p += 32 - state->memsize;
2592             state->memsize = 0;
2593         }
2594 
2595         if (p+32 <= bEnd) {
2596             const xxh_u8* const limit = bEnd - 32;
2597             xxh_u64 v1 = state->v1;
2598             xxh_u64 v2 = state->v2;
2599             xxh_u64 v3 = state->v3;
2600             xxh_u64 v4 = state->v4;
2601 
2602             do {
2603                 v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
2604                 v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8;
2605                 v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8;
2606                 v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8;
2607             } while (p<=limit);
2608 
2609             state->v1 = v1;
2610             state->v2 = v2;
2611             state->v3 = v3;
2612             state->v4 = v4;
2613         }
2614 
2615         if (p < bEnd) {
2616             XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
2617             state->memsize = (unsigned)(bEnd-p);
2618         }
2619     }
2620 
2621     return XXH_OK;
2622 }
2623 
2624 
2625 /*! @ingroup xxh64_family */
XXH64_digest(const XXH64_state_t * state)2626 XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state)
2627 {
2628     xxh_u64 h64;
2629 
2630     if (state->total_len >= 32) {
2631         xxh_u64 const v1 = state->v1;
2632         xxh_u64 const v2 = state->v2;
2633         xxh_u64 const v3 = state->v3;
2634         xxh_u64 const v4 = state->v4;
2635 
2636         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2637         h64 = XXH64_mergeRound(h64, v1);
2638         h64 = XXH64_mergeRound(h64, v2);
2639         h64 = XXH64_mergeRound(h64, v3);
2640         h64 = XXH64_mergeRound(h64, v4);
2641     } else {
2642         h64  = state->v3 /*seed*/ + XXH_PRIME64_5;
2643     }
2644 
2645     h64 += (xxh_u64) state->total_len;
2646 
2647     return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
2648 }
2649 
2650 
2651 /******* Canonical representation   *******/
2652 
2653 /*! @ingroup xxh64_family */
XXH64_canonicalFromHash(XXH64_canonical_t * dst,XXH64_hash_t hash)2654 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
2655 {
2656     XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
2657     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
2658     memcpy(dst, &hash, sizeof(*dst));
2659 }
2660 
2661 /*! @ingroup xxh64_family */
XXH64_hashFromCanonical(const XXH64_canonical_t * src)2662 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
2663 {
2664     return XXH_readBE64(src);
2665 }
2666 
2667 
2668 
2669 /* *********************************************************************
2670 *  XXH3
2671 *  New generation hash designed for speed on small keys and vectorization
2672 ************************************************************************ */
2673 /*!
2674  * @}
2675  * @defgroup xxh3_impl XXH3 implementation
2676  * @ingroup impl
2677  * @{
2678  */
2679 
2680 /* ===   Compiler specifics   === */
2681 
2682 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* >= C99 */
2683 #  define XXH_RESTRICT   restrict
2684 #else
2685 /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
2686 #  define XXH_RESTRICT   /* disable */
2687 #endif
2688 
2689 #if (defined(__GNUC__) && (__GNUC__ >= 3))  \
2690   || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
2691   || defined(__clang__)
2692 #    define XXH_likely(x) __builtin_expect(x, 1)
2693 #    define XXH_unlikely(x) __builtin_expect(x, 0)
2694 #else
2695 #    define XXH_likely(x) (x)
2696 #    define XXH_unlikely(x) (x)
2697 #endif
2698 
2699 #if defined(__GNUC__)
2700 #  if defined(__AVX2__)
2701 #    include <immintrin.h>
2702 #  elif defined(__SSE2__)
2703 #    include <emmintrin.h>
2704 #  elif defined(__ARM_NEON__) || defined(__ARM_NEON)
2705 #    define inline __inline__  /* circumvent a clang bug */
2706 #    include <arm_neon.h>
2707 #    undef inline
2708 #  endif
2709 #elif defined(_MSC_VER)
2710 #  include <intrin.h>
2711 #endif
2712 
2713 /*
2714  * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
2715  * remaining a true 64-bit/128-bit hash function.
2716  *
2717  * This is done by prioritizing a subset of 64-bit operations that can be
2718  * emulated without too many steps on the average 32-bit machine.
2719  *
2720  * For example, these two lines seem similar, and run equally fast on 64-bit:
2721  *
2722  *   xxh_u64 x;
2723  *   x ^= (x >> 47); // good
2724  *   x ^= (x >> 13); // bad
2725  *
2726  * However, to a 32-bit machine, there is a major difference.
2727  *
2728  * x ^= (x >> 47) looks like this:
2729  *
2730  *   x.lo ^= (x.hi >> (47 - 32));
2731  *
2732  * while x ^= (x >> 13) looks like this:
2733  *
2734  *   // note: funnel shifts are not usually cheap.
2735  *   x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
2736  *   x.hi ^= (x.hi >> 13);
2737  *
2738  * The first one is significantly faster than the second, simply because the
2739  * shift is larger than 32. This means:
2740  *  - All the bits we need are in the upper 32 bits, so we can ignore the lower
2741  *    32 bits in the shift.
2742  *  - The shift result will always fit in the lower 32 bits, and therefore,
2743  *    we can ignore the upper 32 bits in the xor.
2744  *
2745  * Thanks to this optimization, XXH3 only requires these features to be efficient:
2746  *
2747  *  - Usable unaligned access
2748  *  - A 32-bit or 64-bit ALU
2749  *      - If 32-bit, a decent ADC instruction
2750  *  - A 32 or 64-bit multiply with a 64-bit result
2751  *  - For the 128-bit variant, a decent byteswap helps short inputs.
2752  *
2753  * The first two are already required by XXH32, and almost all 32-bit and 64-bit
2754  * platforms which can run XXH32 can run XXH3 efficiently.
2755  *
2756  * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
2757  * notable exception.
2758  *
2759  * First of all, Thumb-1 lacks support for the UMULL instruction which
2760  * performs the important long multiply. This means numerous __aeabi_lmul
2761  * calls.
2762  *
2763  * Second of all, the 8 functional registers are just not enough.
2764  * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
2765  * Lo registers, and this shuffling results in thousands more MOVs than A32.
2766  *
2767  * A32 and T32 don't have this limitation. They can access all 14 registers,
2768  * do a 32->64 multiply with UMULL, and the flexible operand allowing free
2769  * shifts is helpful, too.
2770  *
2771  * Therefore, we do a quick sanity check.
2772  *
2773  * If compiling Thumb-1 for a target which supports ARM instructions, we will
2774  * emit a warning, as it is not a "sane" platform to compile for.
2775  *
2776  * Usually, if this happens, it is because of an accident and you probably need
2777  * to specify -march, as you likely meant to compile for a newer architecture.
2778  *
2779  * Credit: large sections of the vectorial and asm source code paths
2780  *         have been contributed by @easyaspi314
2781  */
2782 #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
2783 #   warning "XXH3 is highly inefficient without ARM or Thumb-2."
2784 #endif
2785 
2786 /* ==========================================
2787  * Vectorization detection
2788  * ========================================== */
2789 
2790 #ifdef XXH_DOXYGEN
2791 /*!
2792  * @ingroup tuning
2793  * @brief Overrides the vectorization implementation chosen for XXH3.
2794  *
2795  * Can be defined to 0 to disable SIMD or any of the values mentioned in
2796  * @ref XXH_VECTOR_TYPE.
2797  *
2798  * If this is not defined, it uses predefined macros to determine the best
2799  * implementation.
2800  */
2801 #  define XXH_VECTOR XXH_SCALAR
2802 /*!
2803  * @ingroup tuning
2804  * @brief Possible values for @ref XXH_VECTOR.
2805  *
2806  * Note that these are actually implemented as macros.
2807  *
2808  * If this is not defined, it is detected automatically.
2809  * @ref XXH_X86DISPATCH overrides this.
2810  */
2811 enum XXH_VECTOR_TYPE /* fake enum */ {
2812     XXH_SCALAR = 0,  /*!< Portable scalar version */
2813     XXH_SSE2   = 1,  /*!<
2814                       * SSE2 for Pentium 4, Opteron, all x86_64.
2815                       *
2816                       * @note SSE2 is also guaranteed on Windows 10, macOS, and
2817                       * Android x86.
2818                       */
2819     XXH_AVX2   = 2,  /*!< AVX2 for Haswell and Bulldozer */
2820     XXH_AVX512 = 3,  /*!< AVX512 for Skylake and Icelake */
2821     XXH_NEON   = 4,  /*!< NEON for most ARMv7-A and all AArch64 */
2822     XXH_VSX    = 5,  /*!< VSX and ZVector for POWER8/z13 (64-bit) */
2823 };
2824 /*!
2825  * @ingroup tuning
2826  * @brief Selects the minimum alignment for XXH3's accumulators.
2827  *
2828  * When using SIMD, this should match the alignment reqired for said vector
2829  * type, so, for example, 32 for AVX2.
2830  *
2831  * Default: Auto detected.
2832  */
2833 #  define XXH_ACC_ALIGN 8
2834 #endif
2835 
2836 /* Actual definition */
2837 #ifndef XXH_DOXYGEN
2838 #  define XXH_SCALAR 0
2839 #  define XXH_SSE2   1
2840 #  define XXH_AVX2   2
2841 #  define XXH_AVX512 3
2842 #  define XXH_NEON   4
2843 #  define XXH_VSX    5
2844 #endif
2845 
2846 #ifndef XXH_VECTOR    /* can be defined on command line */
2847 #  if defined(__AVX512F__)
2848 #    define XXH_VECTOR XXH_AVX512
2849 #  elif defined(__AVX2__)
2850 #    define XXH_VECTOR XXH_AVX2
2851 #  elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
2852 #    define XXH_VECTOR XXH_SSE2
2853 #  elif defined(__GNUC__) /* msvc support maybe later */ \
2854   && (defined(__ARM_NEON__) || defined(__ARM_NEON)) \
2855   && (defined(__LITTLE_ENDIAN__) /* We only support little endian NEON */ \
2856     || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
2857 #    define XXH_VECTOR XXH_NEON
2858 #  elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
2859      || (defined(__s390x__) && defined(__VEC__)) \
2860      && defined(__GNUC__) /* TODO: IBM XL */
2861 #    define XXH_VECTOR XXH_VSX
2862 #  else
2863 #    define XXH_VECTOR XXH_SCALAR
2864 #  endif
2865 #endif
2866 
2867 /*
2868  * Controls the alignment of the accumulator,
2869  * for compatibility with aligned vector loads, which are usually faster.
2870  */
2871 #ifndef XXH_ACC_ALIGN
2872 #  if defined(XXH_X86DISPATCH)
2873 #     define XXH_ACC_ALIGN 64  /* for compatibility with avx512 */
2874 #  elif XXH_VECTOR == XXH_SCALAR  /* scalar */
2875 #     define XXH_ACC_ALIGN 8
2876 #  elif XXH_VECTOR == XXH_SSE2  /* sse2 */
2877 #     define XXH_ACC_ALIGN 16
2878 #  elif XXH_VECTOR == XXH_AVX2  /* avx2 */
2879 #     define XXH_ACC_ALIGN 32
2880 #  elif XXH_VECTOR == XXH_NEON  /* neon */
2881 #     define XXH_ACC_ALIGN 16
2882 #  elif XXH_VECTOR == XXH_VSX   /* vsx */
2883 #     define XXH_ACC_ALIGN 16
2884 #  elif XXH_VECTOR == XXH_AVX512  /* avx512 */
2885 #     define XXH_ACC_ALIGN 64
2886 #  endif
2887 #endif
2888 
2889 #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
2890     || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
2891 #  define XXH_SEC_ALIGN XXH_ACC_ALIGN
2892 #else
2893 #  define XXH_SEC_ALIGN 8
2894 #endif
2895 
2896 /*
2897  * UGLY HACK:
2898  * GCC usually generates the best code with -O3 for xxHash.
2899  *
2900  * However, when targeting AVX2, it is overzealous in its unrolling resulting
2901  * in code roughly 3/4 the speed of Clang.
2902  *
2903  * There are other issues, such as GCC splitting _mm256_loadu_si256 into
2904  * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
2905  * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
2906  *
2907  * That is why when compiling the AVX2 version, it is recommended to use either
2908  *   -O2 -mavx2 -march=haswell
2909  * or
2910  *   -O2 -mavx2 -mno-avx256-split-unaligned-load
2911  * for decent performance, or to use Clang instead.
2912  *
2913  * Fortunately, we can control the first one with a pragma that forces GCC into
2914  * -O2, but the other one we can't control without "failed to inline always
2915  * inline function due to target mismatch" warnings.
2916  */
2917 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
2918   && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
2919   && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
2920 #  pragma GCC push_options
2921 #  pragma GCC optimize("-O2")
2922 #endif
2923 
2924 
2925 #if XXH_VECTOR == XXH_NEON
2926 /*
2927  * NEON's setup for vmlal_u32 is a little more complicated than it is on
2928  * SSE2, AVX2, and VSX.
2929  *
2930  * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
2931  *
2932  * To do the same operation, the 128-bit 'Q' register needs to be split into
2933  * two 64-bit 'D' registers, performing this operation::
2934  *
2935  *   [                a                 |                 b                ]
2936  *            |              '---------. .--------'                |
2937  *            |                         x                          |
2938  *            |              .---------' '--------.                |
2939  *   [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[    a >> 32     |     b >> 32    ]
2940  *
2941  * Due to significant changes in aarch64, the fastest method for aarch64 is
2942  * completely different than the fastest method for ARMv7-A.
2943  *
2944  * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
2945  * D11 will modify the high half of Q5. This is similar to how modifying AH
2946  * will only affect bits 8-15 of AX on x86.
2947  *
2948  * VZIP takes two registers, and puts even lanes in one register and odd lanes
2949  * in the other.
2950  *
2951  * On ARMv7-A, this strangely modifies both parameters in place instead of
2952  * taking the usual 3-operand form.
2953  *
2954  * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
2955  * lower and upper halves of the Q register to end up with the high and low
2956  * halves where we want - all in one instruction.
2957  *
2958  *   vzip.32   d10, d11       @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
2959  *
2960  * Unfortunately we need inline assembly for this: Instructions modifying two
2961  * registers at once is not possible in GCC or Clang's IR, and they have to
2962  * create a copy.
2963  *
2964  * aarch64 requires a different approach.
2965  *
2966  * In order to make it easier to write a decent compiler for aarch64, many
2967  * quirks were removed, such as conditional execution.
2968  *
2969  * NEON was also affected by this.
2970  *
2971  * aarch64 cannot access the high bits of a Q-form register, and writes to a
2972  * D-form register zero the high bits, similar to how writes to W-form scalar
2973  * registers (or DWORD registers on x86_64) work.
2974  *
2975  * The formerly free vget_high intrinsics now require a vext (with a few
2976  * exceptions)
2977  *
2978  * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
2979  * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
2980  * operand.
2981  *
2982  * The equivalent of the VZIP.32 on the lower and upper halves would be this
2983  * mess:
2984  *
2985  *   ext     v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
2986  *   zip1    v1.2s, v0.2s, v2.2s     // v1 = { v0[0], v2[0] }
2987  *   zip2    v0.2s, v0.2s, v1.2s     // v0 = { v0[1], v2[1] }
2988  *
2989  * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
2990  *
2991  *   shrn    v1.2s, v0.2d, #32  // v1 = (uint32x2_t)(v0 >> 32);
2992  *   xtn     v0.2s, v0.2d       // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
2993  *
2994  * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
2995  */
2996 
2997 /*!
2998  * Function-like macro:
2999  * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
3000  * {
3001  *     outLo = (uint32x2_t)(in & 0xFFFFFFFF);
3002  *     outHi = (uint32x2_t)(in >> 32);
3003  *     in = UNDEFINED;
3004  * }
3005  */
3006 # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
3007    && defined(__GNUC__) \
3008    && !defined(__aarch64__) && !defined(__arm64__)
3009 #  define XXH_SPLIT_IN_PLACE(in, outLo, outHi)                                              \
3010     do {                                                                                    \
3011       /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
3012       /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */     \
3013       /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
3014       __asm__("vzip.32  %e0, %f0" : "+w" (in));                                             \
3015       (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in));                                   \
3016       (outHi) = vget_high_u32(vreinterpretq_u32_u64(in));                                   \
3017    } while (0)
3018 # else
3019 #  define XXH_SPLIT_IN_PLACE(in, outLo, outHi)                                            \
3020     do {                                                                                  \
3021       (outLo) = vmovn_u64    (in);                                                        \
3022       (outHi) = vshrn_n_u64  ((in), 32);                                                  \
3023     } while (0)
3024 # endif
3025 #endif  /* XXH_VECTOR == XXH_NEON */
3026 
3027 /*
3028  * VSX and Z Vector helpers.
3029  *
3030  * This is very messy, and any pull requests to clean this up are welcome.
3031  *
3032  * There are a lot of problems with supporting VSX and s390x, due to
3033  * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
3034  */
3035 #if XXH_VECTOR == XXH_VSX
3036 #  if defined(__s390x__)
3037 #    include <s390intrin.h>
3038 #  else
3039 /* gcc's altivec.h can have the unwanted consequence to unconditionally
3040  * #define bool, vector, and pixel keywords,
3041  * with bad consequences for programs already using these keywords for other purposes.
3042  * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined.
3043  * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler,
3044  * but it seems that, in some cases, it isn't.
3045  * Force the build macro to be defined, so that keywords are not altered.
3046  */
3047 #    if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
3048 #      define __APPLE_ALTIVEC__
3049 #    endif
3050 #    include <altivec.h>
3051 #  endif
3052 
3053 typedef __vector unsigned long long xxh_u64x2;
3054 typedef __vector unsigned char xxh_u8x16;
3055 typedef __vector unsigned xxh_u32x4;
3056 
3057 # ifndef XXH_VSX_BE
3058 #  if defined(__BIG_ENDIAN__) \
3059   || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
3060 #    define XXH_VSX_BE 1
3061 #  elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
3062 #    warning "-maltivec=be is not recommended. Please use native endianness."
3063 #    define XXH_VSX_BE 1
3064 #  else
3065 #    define XXH_VSX_BE 0
3066 #  endif
3067 # endif /* !defined(XXH_VSX_BE) */
3068 
3069 # if XXH_VSX_BE
3070 #  if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
3071 #    define XXH_vec_revb vec_revb
3072 #  else
3073 /*!
3074  * A polyfill for POWER9's vec_revb().
3075  */
XXH_vec_revb(xxh_u64x2 val)3076 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
3077 {
3078     xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
3079                                   0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
3080     return vec_perm(val, val, vByteSwap);
3081 }
3082 #  endif
3083 # endif /* XXH_VSX_BE */
3084 
3085 /*!
3086  * Performs an unaligned vector load and byte swaps it on big endian.
3087  */
XXH_vec_loadu(const void * ptr)3088 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
3089 {
3090     xxh_u64x2 ret;
3091     memcpy(&ret, ptr, sizeof(xxh_u64x2));
3092 # if XXH_VSX_BE
3093     ret = XXH_vec_revb(ret);
3094 # endif
3095     return ret;
3096 }
3097 
3098 /*
3099  * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
3100  *
3101  * These intrinsics weren't added until GCC 8, despite existing for a while,
3102  * and they are endian dependent. Also, their meaning swap depending on version.
3103  * */
3104 # if defined(__s390x__)
3105  /* s390x is always big endian, no issue on this platform */
3106 #  define XXH_vec_mulo vec_mulo
3107 #  define XXH_vec_mule vec_mule
3108 # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
3109 /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
3110 #  define XXH_vec_mulo __builtin_altivec_vmulouw
3111 #  define XXH_vec_mule __builtin_altivec_vmuleuw
3112 # else
3113 /* gcc needs inline assembly */
3114 /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
XXH_vec_mulo(xxh_u32x4 a,xxh_u32x4 b)3115 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
3116 {
3117     xxh_u64x2 result;
3118     __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3119     return result;
3120 }
XXH_vec_mule(xxh_u32x4 a,xxh_u32x4 b)3121 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
3122 {
3123     xxh_u64x2 result;
3124     __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3125     return result;
3126 }
3127 # endif /* XXH_vec_mulo, XXH_vec_mule */
3128 #endif /* XXH_VECTOR == XXH_VSX */
3129 
3130 
3131 /* prefetch
3132  * can be disabled, by declaring XXH_NO_PREFETCH build macro */
3133 #if defined(XXH_NO_PREFETCH)
3134 #  define XXH_PREFETCH(ptr)  (void)(ptr)  /* disabled */
3135 #else
3136 #  if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))  /* _mm_prefetch() not defined outside of x86/x64 */
3137 #    include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
3138 #    define XXH_PREFETCH(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3139 #  elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3140 #    define XXH_PREFETCH(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
3141 #  else
3142 #    define XXH_PREFETCH(ptr) (void)(ptr)  /* disabled */
3143 #  endif
3144 #endif  /* XXH_NO_PREFETCH */
3145 
3146 
3147 /* ==========================================
3148  * XXH3 default settings
3149  * ========================================== */
3150 
3151 #define XXH_SECRET_DEFAULT_SIZE 192   /* minimum XXH3_SECRET_SIZE_MIN */
3152 
3153 #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3154 #  error "default keyset is not large enough"
3155 #endif
3156 
3157 /*! Pseudorandom secret taken directly from FARSH. */
3158 XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3159     0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3160     0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3161     0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3162     0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3163     0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3164     0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3165     0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3166     0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3167     0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3168     0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3169     0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3170     0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3171 };
3172 
3173 
3174 #ifdef XXH_OLD_NAMES
3175 #  define kSecret XXH3_kSecret
3176 #endif
3177 
3178 #ifdef XXH_DOXYGEN
3179 /*!
3180  * @brief Calculates a 32-bit to 64-bit long multiply.
3181  *
3182  * Implemented as a macro.
3183  *
3184  * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
3185  * need to (but it shouldn't need to anyways, it is about 7 instructions to do
3186  * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
3187  * use that instead of the normal method.
3188  *
3189  * If you are compiling for platforms like Thumb-1 and don't have a better option,
3190  * you may also want to write your own long multiply routine here.
3191  *
3192  * @param x, y Numbers to be multiplied
3193  * @return 64-bit product of the low 32 bits of @p x and @p y.
3194  */
3195 XXH_FORCE_INLINE xxh_u64
XXH_mult32to64(xxh_u64 x,xxh_u64 y)3196 XXH_mult32to64(xxh_u64 x, xxh_u64 y)
3197 {
3198    return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
3199 }
3200 #elif defined(_MSC_VER) && defined(_M_IX86)
3201 #    include <intrin.h>
3202 #    define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3203 #else
3204 /*
3205  * Downcast + upcast is usually better than masking on older compilers like
3206  * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
3207  *
3208  * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
3209  * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
3210  */
3211 #    define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3212 #endif
3213 
3214 /*!
3215  * @brief Calculates a 64->128-bit long multiply.
3216  *
3217  * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
3218  * version.
3219  *
3220  * @param lhs, rhs The 64-bit integers to be multiplied
3221  * @return The 128-bit result represented in an @ref XXH128_hash_t.
3222  */
3223 static XXH128_hash_t
XXH_mult64to128(xxh_u64 lhs,xxh_u64 rhs)3224 XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3225 {
3226     /*
3227      * GCC/Clang __uint128_t method.
3228      *
3229      * On most 64-bit targets, GCC and Clang define a __uint128_t type.
3230      * This is usually the best way as it usually uses a native long 64-bit
3231      * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
3232      *
3233      * Usually.
3234      *
3235      * Despite being a 32-bit platform, Clang (and emscripten) define this type
3236      * despite not having the arithmetic for it. This results in a laggy
3237      * compiler builtin call which calculates a full 128-bit multiply.
3238      * In that case it is best to use the portable one.
3239      * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
3240      */
3241 #if defined(__GNUC__) && !defined(__wasm__) \
3242     && defined(__SIZEOF_INT128__) \
3243     || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3244 
3245     __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
3246     XXH128_hash_t r128;
3247     r128.low64  = (xxh_u64)(product);
3248     r128.high64 = (xxh_u64)(product >> 64);
3249     return r128;
3250 
3251     /*
3252      * MSVC for x64's _umul128 method.
3253      *
3254      * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
3255      *
3256      * This compiles to single operand MUL on x64.
3257      */
3258 #elif defined(_M_X64) || defined(_M_IA64)
3259 
3260 #ifndef _MSC_VER
3261 #   pragma intrinsic(_umul128)
3262 #endif
3263     xxh_u64 product_high;
3264     xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
3265     XXH128_hash_t r128;
3266     r128.low64  = product_low;
3267     r128.high64 = product_high;
3268     return r128;
3269 
3270 #else
3271     /*
3272      * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
3273      *
3274      * This is a fast and simple grade school multiply, which is shown below
3275      * with base 10 arithmetic instead of base 0x100000000.
3276      *
3277      *           9 3 // D2 lhs = 93
3278      *         x 7 5 // D2 rhs = 75
3279      *     ----------
3280      *           1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
3281      *         4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
3282      *         2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
3283      *     + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
3284      *     ---------
3285      *         2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
3286      *     + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
3287      *     ---------
3288      *       6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
3289      *
3290      * The reasons for adding the products like this are:
3291      *  1. It avoids manual carry tracking. Just like how
3292      *     (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
3293      *     This avoids a lot of complexity.
3294      *
3295      *  2. It hints for, and on Clang, compiles to, the powerful UMAAL
3296      *     instruction available in ARM's Digital Signal Processing extension
3297      *     in 32-bit ARMv6 and later, which is shown below:
3298      *
3299      *         void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
3300      *         {
3301      *             xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
3302      *             *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
3303      *             *RdHi = (xxh_u32)(product >> 32);
3304      *         }
3305      *
3306      *     This instruction was designed for efficient long multiplication, and
3307      *     allows this to be calculated in only 4 instructions at speeds
3308      *     comparable to some 64-bit ALUs.
3309      *
3310      *  3. It isn't terrible on other platforms. Usually this will be a couple
3311      *     of 32-bit ADD/ADCs.
3312      */
3313 
3314     /* First calculate all of the cross products. */
3315     xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3316     xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32,        rhs & 0xFFFFFFFF);
3317     xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3318     xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32,        rhs >> 32);
3319 
3320     /* Now add the products together. These will never overflow. */
3321     xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3322     xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32)        + hi_hi;
3323     xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3324 
3325     XXH128_hash_t r128;
3326     r128.low64  = lower;
3327     r128.high64 = upper;
3328     return r128;
3329 #endif
3330 }
3331 
3332 /*!
3333  * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
3334  *
3335  * The reason for the separate function is to prevent passing too many structs
3336  * around by value. This will hopefully inline the multiply, but we don't force it.
3337  *
3338  * @param lhs, rhs The 64-bit integers to multiply
3339  * @return The low 64 bits of the product XOR'd by the high 64 bits.
3340  * @see XXH_mult64to128()
3341  */
3342 static xxh_u64
XXH3_mul128_fold64(xxh_u64 lhs,xxh_u64 rhs)3343 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3344 {
3345     XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
3346     return product.low64 ^ product.high64;
3347 }
3348 
3349 /*! Seems to produce slightly better code on GCC for some reason. */
XXH_xorshift64(xxh_u64 v64,int shift)3350 XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
3351 {
3352     XXH_ASSERT(0 <= shift && shift < 64);
3353     return v64 ^ (v64 >> shift);
3354 }
3355 
3356 /*
3357  * This is a fast avalanche stage,
3358  * suitable when input bits are already partially mixed
3359  */
XXH3_avalanche(xxh_u64 h64)3360 static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
3361 {
3362     h64 = XXH_xorshift64(h64, 37);
3363     h64 *= 0x165667919E3779F9ULL;
3364     h64 = XXH_xorshift64(h64, 32);
3365     return h64;
3366 }
3367 
3368 /*
3369  * This is a stronger avalanche,
3370  * inspired by Pelle Evensen's rrmxmx
3371  * preferable when input has not been previously mixed
3372  */
XXH3_rrmxmx(xxh_u64 h64,xxh_u64 len)3373 static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
3374 {
3375     /* this mix is inspired by Pelle Evensen's rrmxmx */
3376     h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
3377     h64 *= 0x9FB21C651E98DF25ULL;
3378     h64 ^= (h64 >> 35) + len ;
3379     h64 *= 0x9FB21C651E98DF25ULL;
3380     return XXH_xorshift64(h64, 28);
3381 }
3382 
3383 
3384 /* ==========================================
3385  * Short keys
3386  * ==========================================
3387  * One of the shortcomings of XXH32 and XXH64 was that their performance was
3388  * sub-optimal on short lengths. It used an iterative algorithm which strongly
3389  * favored lengths that were a multiple of 4 or 8.
3390  *
3391  * Instead of iterating over individual inputs, we use a set of single shot
3392  * functions which piece together a range of lengths and operate in constant time.
3393  *
3394  * Additionally, the number of multiplies has been significantly reduced. This
3395  * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
3396  *
3397  * Depending on the platform, this may or may not be faster than XXH32, but it
3398  * is almost guaranteed to be faster than XXH64.
3399  */
3400 
3401 /*
3402  * At very short lengths, there isn't enough input to fully hide secrets, or use
3403  * the entire secret.
3404  *
3405  * There is also only a limited amount of mixing we can do before significantly
3406  * impacting performance.
3407  *
3408  * Therefore, we use different sections of the secret and always mix two secret
3409  * samples with an XOR. This should have no effect on performance on the
3410  * seedless or withSeed variants because everything _should_ be constant folded
3411  * by modern compilers.
3412  *
3413  * The XOR mixing hides individual parts of the secret and increases entropy.
3414  *
3415  * This adds an extra layer of strength for custom secrets.
3416  */
3417 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_1to3_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3418 XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3419 {
3420     XXH_ASSERT(input != NULL);
3421     XXH_ASSERT(1 <= len && len <= 3);
3422     XXH_ASSERT(secret != NULL);
3423     /*
3424      * len = 1: combined = { input[0], 0x01, input[0], input[0] }
3425      * len = 2: combined = { input[1], 0x02, input[0], input[1] }
3426      * len = 3: combined = { input[2], 0x03, input[0], input[1] }
3427      */
3428     {   xxh_u8  const c1 = input[0];
3429         xxh_u8  const c2 = input[len >> 1];
3430         xxh_u8  const c3 = input[len - 1];
3431         xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2  << 24)
3432                                | ((xxh_u32)c3 <<  0) | ((xxh_u32)len << 8);
3433         xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
3434         xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
3435         return XXH64_avalanche(keyed);
3436     }
3437 }
3438 
3439 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_4to8_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3440 XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3441 {
3442     XXH_ASSERT(input != NULL);
3443     XXH_ASSERT(secret != NULL);
3444     XXH_ASSERT(4 <= len && len < 8);
3445     seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
3446     {   xxh_u32 const input1 = XXH_readLE32(input);
3447         xxh_u32 const input2 = XXH_readLE32(input + len - 4);
3448         xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
3449         xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
3450         xxh_u64 const keyed = input64 ^ bitflip;
3451         return XXH3_rrmxmx(keyed, len);
3452     }
3453 }
3454 
3455 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_9to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3456 XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3457 {
3458     XXH_ASSERT(input != NULL);
3459     XXH_ASSERT(secret != NULL);
3460     XXH_ASSERT(8 <= len && len <= 16);
3461     {   xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
3462         xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
3463         xxh_u64 const input_lo = XXH_readLE64(input)           ^ bitflip1;
3464         xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
3465         xxh_u64 const acc = len
3466                           + XXH_swap64(input_lo) + input_hi
3467                           + XXH3_mul128_fold64(input_lo, input_hi);
3468         return XXH3_avalanche(acc);
3469     }
3470 }
3471 
3472 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_0to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3473 XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3474 {
3475     XXH_ASSERT(len <= 16);
3476     {   if (XXH_likely(len >  8)) return XXH3_len_9to16_64b(input, len, secret, seed);
3477         if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
3478         if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
3479         return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
3480     }
3481 }
3482 
3483 /*
3484  * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
3485  * multiplication by zero, affecting hashes of lengths 17 to 240.
3486  *
3487  * However, they are very unlikely.
3488  *
3489  * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
3490  * unseeded non-cryptographic hashes, it does not attempt to defend itself
3491  * against specially crafted inputs, only random inputs.
3492  *
3493  * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
3494  * cancelling out the secret is taken an arbitrary number of times (addressed
3495  * in XXH3_accumulate_512), this collision is very unlikely with random inputs
3496  * and/or proper seeding:
3497  *
3498  * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
3499  * function that is only called up to 16 times per hash with up to 240 bytes of
3500  * input.
3501  *
3502  * This is not too bad for a non-cryptographic hash function, especially with
3503  * only 64 bit outputs.
3504  *
3505  * The 128-bit variant (which trades some speed for strength) is NOT affected
3506  * by this, although it is always a good idea to use a proper seed if you care
3507  * about strength.
3508  */
XXH3_mix16B(const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 seed64)3509 XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
3510                                      const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
3511 {
3512 #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3513   && defined(__i386__) && defined(__SSE2__)  /* x86 + SSE2 */ \
3514   && !defined(XXH_ENABLE_AUTOVECTORIZE)      /* Define to disable like XXH32 hack */
3515     /*
3516      * UGLY HACK:
3517      * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
3518      * slower code.
3519      *
3520      * By forcing seed64 into a register, we disrupt the cost model and
3521      * cause it to scalarize. See `XXH32_round()`
3522      *
3523      * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
3524      * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
3525      * GCC 9.2, despite both emitting scalar code.
3526      *
3527      * GCC generates much better scalar code than Clang for the rest of XXH3,
3528      * which is why finding a more optimal codepath is an interest.
3529      */
3530     __asm__ ("" : "+r" (seed64));
3531 #endif
3532     {   xxh_u64 const input_lo = XXH_readLE64(input);
3533         xxh_u64 const input_hi = XXH_readLE64(input+8);
3534         return XXH3_mul128_fold64(
3535             input_lo ^ (XXH_readLE64(secret)   + seed64),
3536             input_hi ^ (XXH_readLE64(secret+8) - seed64)
3537         );
3538     }
3539 }
3540 
3541 /* For mid range keys, XXH3 uses a Mum-hash variant. */
3542 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_17to128_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3543 XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3544                      const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3545                      XXH64_hash_t seed)
3546 {
3547     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3548     XXH_ASSERT(16 < len && len <= 128);
3549 
3550     {   xxh_u64 acc = len * XXH_PRIME64_1;
3551         if (len > 32) {
3552             if (len > 64) {
3553                 if (len > 96) {
3554                     acc += XXH3_mix16B(input+48, secret+96, seed);
3555                     acc += XXH3_mix16B(input+len-64, secret+112, seed);
3556                 }
3557                 acc += XXH3_mix16B(input+32, secret+64, seed);
3558                 acc += XXH3_mix16B(input+len-48, secret+80, seed);
3559             }
3560             acc += XXH3_mix16B(input+16, secret+32, seed);
3561             acc += XXH3_mix16B(input+len-32, secret+48, seed);
3562         }
3563         acc += XXH3_mix16B(input+0, secret+0, seed);
3564         acc += XXH3_mix16B(input+len-16, secret+16, seed);
3565 
3566         return XXH3_avalanche(acc);
3567     }
3568 }
3569 
3570 #define XXH3_MIDSIZE_MAX 240
3571 
3572 XXH_NO_INLINE XXH64_hash_t
XXH3_len_129to240_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3573 XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3574                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3575                       XXH64_hash_t seed)
3576 {
3577     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3578     XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
3579 
3580     #define XXH3_MIDSIZE_STARTOFFSET 3
3581     #define XXH3_MIDSIZE_LASTOFFSET  17
3582 
3583     {   xxh_u64 acc = len * XXH_PRIME64_1;
3584         int const nbRounds = (int)len / 16;
3585         int i;
3586         for (i=0; i<8; i++) {
3587             acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
3588         }
3589         acc = XXH3_avalanche(acc);
3590         XXH_ASSERT(nbRounds >= 8);
3591 #if defined(__clang__)                                /* Clang */ \
3592     && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
3593     && !defined(XXH_ENABLE_AUTOVECTORIZE)             /* Define to disable */
3594         /*
3595          * UGLY HACK:
3596          * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
3597          * In everywhere else, it uses scalar code.
3598          *
3599          * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
3600          * would still be slower than UMAAL (see XXH_mult64to128).
3601          *
3602          * Unfortunately, Clang doesn't handle the long multiplies properly and
3603          * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
3604          * scalarized into an ugly mess of VMOV.32 instructions.
3605          *
3606          * This mess is difficult to avoid without turning autovectorization
3607          * off completely, but they are usually relatively minor and/or not
3608          * worth it to fix.
3609          *
3610          * This loop is the easiest to fix, as unlike XXH32, this pragma
3611          * _actually works_ because it is a loop vectorization instead of an
3612          * SLP vectorization.
3613          */
3614         #pragma clang loop vectorize(disable)
3615 #endif
3616         for (i=8 ; i < nbRounds; i++) {
3617             acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
3618         }
3619         /* last bytes */
3620         acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
3621         return XXH3_avalanche(acc);
3622     }
3623 }
3624 
3625 
3626 /* =======     Long Keys     ======= */
3627 
3628 #define XXH_STRIPE_LEN 64
3629 #define XXH_SECRET_CONSUME_RATE 8   /* nb of secret bytes consumed at each accumulation */
3630 #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
3631 
3632 #ifdef XXH_OLD_NAMES
3633 #  define STRIPE_LEN XXH_STRIPE_LEN
3634 #  define ACC_NB XXH_ACC_NB
3635 #endif
3636 
XXH_writeLE64(void * dst,xxh_u64 v64)3637 XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
3638 {
3639     if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
3640     memcpy(dst, &v64, sizeof(v64));
3641 }
3642 
3643 /* Several intrinsic functions below are supposed to accept __int64 as argument,
3644  * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
3645  * However, several environments do not define __int64 type,
3646  * requiring a workaround.
3647  */
3648 #if !defined (__VMS) \
3649   && (defined (__cplusplus) \
3650   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
3651     typedef int64_t xxh_i64;
3652 #else
3653     /* the following type must have a width of 64-bit */
3654     typedef long long xxh_i64;
3655 #endif
3656 
3657 /*
3658  * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
3659  *
3660  * It is a hardened version of UMAC, based off of FARSH's implementation.
3661  *
3662  * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
3663  * implementations, and it is ridiculously fast.
3664  *
3665  * We harden it by mixing the original input to the accumulators as well as the product.
3666  *
3667  * This means that in the (relatively likely) case of a multiply by zero, the
3668  * original input is preserved.
3669  *
3670  * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
3671  * cross-pollination, as otherwise the upper and lower halves would be
3672  * essentially independent.
3673  *
3674  * This doesn't matter on 64-bit hashes since they all get merged together in
3675  * the end, so we skip the extra step.
3676  *
3677  * Both XXH3_64bits and XXH3_128bits use this subroutine.
3678  */
3679 
3680 #if (XXH_VECTOR == XXH_AVX512) \
3681      || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
3682 
3683 #ifndef XXH_TARGET_AVX512
3684 # define XXH_TARGET_AVX512  /* disable attribute target */
3685 #endif
3686 
3687 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_accumulate_512_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3688 XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
3689                      const void* XXH_RESTRICT input,
3690                      const void* XXH_RESTRICT secret)
3691 {
3692     XXH_ALIGN(64) __m512i* const xacc = (__m512i *) acc;
3693     XXH_ASSERT((((size_t)acc) & 63) == 0);
3694     XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3695 
3696     {
3697         /* data_vec    = input[0]; */
3698         __m512i const data_vec    = _mm512_loadu_si512   (input);
3699         /* key_vec     = secret[0]; */
3700         __m512i const key_vec     = _mm512_loadu_si512   (secret);
3701         /* data_key    = data_vec ^ key_vec; */
3702         __m512i const data_key    = _mm512_xor_si512     (data_vec, key_vec);
3703         /* data_key_lo = data_key >> 32; */
3704         __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3705         /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3706         __m512i const product     = _mm512_mul_epu32     (data_key, data_key_lo);
3707         /* xacc[0] += swap(data_vec); */
3708         __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
3709         __m512i const sum       = _mm512_add_epi64(*xacc, data_swap);
3710         /* xacc[0] += product; */
3711         *xacc = _mm512_add_epi64(product, sum);
3712     }
3713 }
3714 
3715 /*
3716  * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
3717  *
3718  * Multiplication isn't perfect, as explained by Google in HighwayHash:
3719  *
3720  *  // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
3721  *  // varying degrees. In descending order of goodness, bytes
3722  *  // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
3723  *  // As expected, the upper and lower bytes are much worse.
3724  *
3725  * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
3726  *
3727  * Since our algorithm uses a pseudorandom secret to add some variance into the
3728  * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
3729  *
3730  * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
3731  * extraction.
3732  *
3733  * Both XXH3_64bits and XXH3_128bits use this subroutine.
3734  */
3735 
3736 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_scrambleAcc_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3737 XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3738 {
3739     XXH_ASSERT((((size_t)acc) & 63) == 0);
3740     XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3741     {   XXH_ALIGN(64) __m512i* const xacc = (__m512i*) acc;
3742         const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
3743 
3744         /* xacc[0] ^= (xacc[0] >> 47) */
3745         __m512i const acc_vec     = *xacc;
3746         __m512i const shifted     = _mm512_srli_epi64    (acc_vec, 47);
3747         __m512i const data_vec    = _mm512_xor_si512     (acc_vec, shifted);
3748         /* xacc[0] ^= secret; */
3749         __m512i const key_vec     = _mm512_loadu_si512   (secret);
3750         __m512i const data_key    = _mm512_xor_si512     (data_vec, key_vec);
3751 
3752         /* xacc[0] *= XXH_PRIME32_1; */
3753         __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3754         __m512i const prod_lo     = _mm512_mul_epu32     (data_key, prime32);
3755         __m512i const prod_hi     = _mm512_mul_epu32     (data_key_hi, prime32);
3756         *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
3757     }
3758 }
3759 
3760 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_initCustomSecret_avx512(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3761 XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3762 {
3763     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
3764     XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
3765     XXH_ASSERT(((size_t)customSecret & 63) == 0);
3766     (void)(&XXH_writeLE64);
3767     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
3768         __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, -(xxh_i64)seed64);
3769 
3770         XXH_ALIGN(64) const __m512i* const src  = (const __m512i*) XXH3_kSecret;
3771         XXH_ALIGN(64)       __m512i* const dest = (      __m512i*) customSecret;
3772         int i;
3773         for (i=0; i < nbRounds; ++i) {
3774             /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
3775              * this will warn "discards ‘const’ qualifier". */
3776             union {
3777                 XXH_ALIGN(64) const __m512i* cp;
3778                 XXH_ALIGN(64) void* p;
3779             } remote_const_void;
3780             remote_const_void.cp = src + i;
3781             dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
3782     }   }
3783 }
3784 
3785 #endif
3786 
3787 #if (XXH_VECTOR == XXH_AVX2) \
3788     || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
3789 
3790 #ifndef XXH_TARGET_AVX2
3791 # define XXH_TARGET_AVX2  /* disable attribute target */
3792 #endif
3793 
3794 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_accumulate_512_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3795 XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
3796                     const void* XXH_RESTRICT input,
3797                     const void* XXH_RESTRICT secret)
3798 {
3799     XXH_ASSERT((((size_t)acc) & 31) == 0);
3800     {   XXH_ALIGN(32) __m256i* const xacc    =       (__m256i *) acc;
3801         /* Unaligned. This is mainly for pointer arithmetic, and because
3802          * _mm256_loadu_si256 requires  a const __m256i * pointer for some reason. */
3803         const         __m256i* const xinput  = (const __m256i *) input;
3804         /* Unaligned. This is mainly for pointer arithmetic, and because
3805          * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3806         const         __m256i* const xsecret = (const __m256i *) secret;
3807 
3808         size_t i;
3809         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3810             /* data_vec    = xinput[i]; */
3811             __m256i const data_vec    = _mm256_loadu_si256    (xinput+i);
3812             /* key_vec     = xsecret[i]; */
3813             __m256i const key_vec     = _mm256_loadu_si256   (xsecret+i);
3814             /* data_key    = data_vec ^ key_vec; */
3815             __m256i const data_key    = _mm256_xor_si256     (data_vec, key_vec);
3816             /* data_key_lo = data_key >> 32; */
3817             __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3818             /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3819             __m256i const product     = _mm256_mul_epu32     (data_key, data_key_lo);
3820             /* xacc[i] += swap(data_vec); */
3821             __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
3822             __m256i const sum       = _mm256_add_epi64(xacc[i], data_swap);
3823             /* xacc[i] += product; */
3824             xacc[i] = _mm256_add_epi64(product, sum);
3825     }   }
3826 }
3827 
3828 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_scrambleAcc_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3829 XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3830 {
3831     XXH_ASSERT((((size_t)acc) & 31) == 0);
3832     {   XXH_ALIGN(32) __m256i* const xacc = (__m256i*) acc;
3833         /* Unaligned. This is mainly for pointer arithmetic, and because
3834          * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3835         const         __m256i* const xsecret = (const __m256i *) secret;
3836         const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
3837 
3838         size_t i;
3839         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3840             /* xacc[i] ^= (xacc[i] >> 47) */
3841             __m256i const acc_vec     = xacc[i];
3842             __m256i const shifted     = _mm256_srli_epi64    (acc_vec, 47);
3843             __m256i const data_vec    = _mm256_xor_si256     (acc_vec, shifted);
3844             /* xacc[i] ^= xsecret; */
3845             __m256i const key_vec     = _mm256_loadu_si256   (xsecret+i);
3846             __m256i const data_key    = _mm256_xor_si256     (data_vec, key_vec);
3847 
3848             /* xacc[i] *= XXH_PRIME32_1; */
3849             __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3850             __m256i const prod_lo     = _mm256_mul_epu32     (data_key, prime32);
3851             __m256i const prod_hi     = _mm256_mul_epu32     (data_key_hi, prime32);
3852             xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
3853         }
3854     }
3855 }
3856 
XXH3_initCustomSecret_avx2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3857 XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3858 {
3859     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
3860     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
3861     XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
3862     (void)(&XXH_writeLE64);
3863     XXH_PREFETCH(customSecret);
3864     {   __m256i const seed = _mm256_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64, -(xxh_i64)seed64, (xxh_i64)seed64);
3865 
3866         XXH_ALIGN(64) const __m256i* const src  = (const __m256i*) XXH3_kSecret;
3867         XXH_ALIGN(64)       __m256i*       dest = (      __m256i*) customSecret;
3868 
3869 #       if defined(__GNUC__) || defined(__clang__)
3870         /*
3871          * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3872          *   - do not extract the secret from sse registers in the internal loop
3873          *   - use less common registers, and avoid pushing these reg into stack
3874          * The asm hack causes Clang to assume that XXH3_kSecretPtr aliases with
3875          * customSecret, and on aarch64, this prevented LDP from merging two
3876          * loads together for free. Putting the loads together before the stores
3877          * properly generates LDP.
3878          */
3879         __asm__("" : "+r" (dest));
3880 #       endif
3881 
3882         /* GCC -O2 need unroll loop manually */
3883         dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
3884         dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
3885         dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
3886         dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
3887         dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
3888         dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
3889     }
3890 }
3891 
3892 #endif
3893 
3894 /* x86dispatch always generates SSE2 */
3895 #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
3896 
3897 #ifndef XXH_TARGET_SSE2
3898 # define XXH_TARGET_SSE2  /* disable attribute target */
3899 #endif
3900 
3901 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_accumulate_512_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3902 XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
3903                     const void* XXH_RESTRICT input,
3904                     const void* XXH_RESTRICT secret)
3905 {
3906     /* SSE2 is just a half-scale version of the AVX2 version. */
3907     XXH_ASSERT((((size_t)acc) & 15) == 0);
3908     {   XXH_ALIGN(16) __m128i* const xacc    =       (__m128i *) acc;
3909         /* Unaligned. This is mainly for pointer arithmetic, and because
3910          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3911         const         __m128i* const xinput  = (const __m128i *) input;
3912         /* Unaligned. This is mainly for pointer arithmetic, and because
3913          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3914         const         __m128i* const xsecret = (const __m128i *) secret;
3915 
3916         size_t i;
3917         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3918             /* data_vec    = xinput[i]; */
3919             __m128i const data_vec    = _mm_loadu_si128   (xinput+i);
3920             /* key_vec     = xsecret[i]; */
3921             __m128i const key_vec     = _mm_loadu_si128   (xsecret+i);
3922             /* data_key    = data_vec ^ key_vec; */
3923             __m128i const data_key    = _mm_xor_si128     (data_vec, key_vec);
3924             /* data_key_lo = data_key >> 32; */
3925             __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3926             /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3927             __m128i const product     = _mm_mul_epu32     (data_key, data_key_lo);
3928             /* xacc[i] += swap(data_vec); */
3929             __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
3930             __m128i const sum       = _mm_add_epi64(xacc[i], data_swap);
3931             /* xacc[i] += product; */
3932             xacc[i] = _mm_add_epi64(product, sum);
3933     }   }
3934 }
3935 
3936 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_scrambleAcc_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3937 XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3938 {
3939     XXH_ASSERT((((size_t)acc) & 15) == 0);
3940     {   XXH_ALIGN(16) __m128i* const xacc = (__m128i*) acc;
3941         /* Unaligned. This is mainly for pointer arithmetic, and because
3942          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3943         const         __m128i* const xsecret = (const __m128i *) secret;
3944         const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
3945 
3946         size_t i;
3947         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3948             /* xacc[i] ^= (xacc[i] >> 47) */
3949             __m128i const acc_vec     = xacc[i];
3950             __m128i const shifted     = _mm_srli_epi64    (acc_vec, 47);
3951             __m128i const data_vec    = _mm_xor_si128     (acc_vec, shifted);
3952             /* xacc[i] ^= xsecret[i]; */
3953             __m128i const key_vec     = _mm_loadu_si128   (xsecret+i);
3954             __m128i const data_key    = _mm_xor_si128     (data_vec, key_vec);
3955 
3956             /* xacc[i] *= XXH_PRIME32_1; */
3957             __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3958             __m128i const prod_lo     = _mm_mul_epu32     (data_key, prime32);
3959             __m128i const prod_hi     = _mm_mul_epu32     (data_key_hi, prime32);
3960             xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
3961         }
3962     }
3963 }
3964 
XXH3_initCustomSecret_sse2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3965 XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3966 {
3967     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
3968     (void)(&XXH_writeLE64);
3969     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
3970 
3971 #       if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
3972         // MSVC 32bit mode does not support _mm_set_epi64x before 2015
3973         XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, -(xxh_i64)seed64 };
3974         __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
3975 #       else
3976         __m128i const seed = _mm_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64);
3977 #       endif
3978         int i;
3979 
3980         XXH_ALIGN(64)        const float* const src  = (float const*) XXH3_kSecret;
3981         XXH_ALIGN(XXH_SEC_ALIGN) __m128i*       dest = (__m128i*) customSecret;
3982 #       if defined(__GNUC__) || defined(__clang__)
3983         /*
3984          * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3985          *   - do not extract the secret from sse registers in the internal loop
3986          *   - use less common registers, and avoid pushing these reg into stack
3987          */
3988         __asm__("" : "+r" (dest));
3989 #       endif
3990 
3991         for (i=0; i < nbRounds; ++i) {
3992             dest[i] = _mm_add_epi64(_mm_castps_si128(_mm_load_ps(src+i*4)), seed);
3993     }   }
3994 }
3995 
3996 #endif
3997 
3998 #if (XXH_VECTOR == XXH_NEON)
3999 
4000 XXH_FORCE_INLINE void
XXH3_accumulate_512_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4001 XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
4002                     const void* XXH_RESTRICT input,
4003                     const void* XXH_RESTRICT secret)
4004 {
4005     XXH_ASSERT((((size_t)acc) & 15) == 0);
4006     {
4007         XXH_ALIGN(16) uint64x2_t* const xacc = (uint64x2_t *) acc;
4008         /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
4009         uint8_t const* const xinput = (const uint8_t *) input;
4010         uint8_t const* const xsecret  = (const uint8_t *) secret;
4011 
4012         size_t i;
4013         for (i=0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
4014             /* data_vec = xinput[i]; */
4015             uint8x16_t data_vec    = vld1q_u8(xinput  + (i * 16));
4016             /* key_vec  = xsecret[i];  */
4017             uint8x16_t key_vec     = vld1q_u8(xsecret + (i * 16));
4018             uint64x2_t data_key;
4019             uint32x2_t data_key_lo, data_key_hi;
4020             /* xacc[i] += swap(data_vec); */
4021             uint64x2_t const data64  = vreinterpretq_u64_u8(data_vec);
4022             uint64x2_t const swapped = vextq_u64(data64, data64, 1);
4023             xacc[i] = vaddq_u64 (xacc[i], swapped);
4024             /* data_key = data_vec ^ key_vec; */
4025             data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
4026             /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
4027              * data_key_hi = (uint32x2_t) (data_key >> 32);
4028              * data_key = UNDEFINED; */
4029             XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4030             /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
4031             xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi);
4032 
4033         }
4034     }
4035 }
4036 
4037 XXH_FORCE_INLINE void
XXH3_scrambleAcc_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4038 XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4039 {
4040     XXH_ASSERT((((size_t)acc) & 15) == 0);
4041 
4042     {   uint64x2_t* xacc       = (uint64x2_t*) acc;
4043         uint8_t const* xsecret = (uint8_t const*) secret;
4044         uint32x2_t prime       = vdup_n_u32 (XXH_PRIME32_1);
4045 
4046         size_t i;
4047         for (i=0; i < XXH_STRIPE_LEN/sizeof(uint64x2_t); i++) {
4048             /* xacc[i] ^= (xacc[i] >> 47); */
4049             uint64x2_t acc_vec  = xacc[i];
4050             uint64x2_t shifted  = vshrq_n_u64 (acc_vec, 47);
4051             uint64x2_t data_vec = veorq_u64   (acc_vec, shifted);
4052 
4053             /* xacc[i] ^= xsecret[i]; */
4054             uint8x16_t key_vec  = vld1q_u8(xsecret + (i * 16));
4055             uint64x2_t data_key = veorq_u64(data_vec, vreinterpretq_u64_u8(key_vec));
4056 
4057             /* xacc[i] *= XXH_PRIME32_1 */
4058             uint32x2_t data_key_lo, data_key_hi;
4059             /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
4060              * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
4061              * xacc[i] = UNDEFINED; */
4062             XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4063             {   /*
4064                  * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
4065                  *
4066                  * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
4067                  * incorrectly "optimize" this:
4068                  *   tmp     = vmul_u32(vmovn_u64(a), vmovn_u64(b));
4069                  *   shifted = vshll_n_u32(tmp, 32);
4070                  * to this:
4071                  *   tmp     = "vmulq_u64"(a, b); // no such thing!
4072                  *   shifted = vshlq_n_u64(tmp, 32);
4073                  *
4074                  * However, unlike SSE, Clang lacks a 64-bit multiply routine
4075                  * for NEON, and it scalarizes two 64-bit multiplies instead.
4076                  *
4077                  * vmull_u32 has the same timing as vmul_u32, and it avoids
4078                  * this bug completely.
4079                  * See https://bugs.llvm.org/show_bug.cgi?id=39967
4080                  */
4081                 uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
4082                 /* xacc[i] = prod_hi << 32; */
4083                 xacc[i] = vshlq_n_u64(prod_hi, 32);
4084                 /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
4085                 xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
4086             }
4087     }   }
4088 }
4089 
4090 #endif
4091 
4092 #if (XXH_VECTOR == XXH_VSX)
4093 
4094 XXH_FORCE_INLINE void
XXH3_accumulate_512_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4095 XXH3_accumulate_512_vsx(  void* XXH_RESTRICT acc,
4096                     const void* XXH_RESTRICT input,
4097                     const void* XXH_RESTRICT secret)
4098 {
4099           xxh_u64x2* const xacc     =       (xxh_u64x2*) acc;    /* presumed aligned */
4100     xxh_u64x2 const* const xinput   = (xxh_u64x2 const*) input;   /* no alignment restriction */
4101     xxh_u64x2 const* const xsecret  = (xxh_u64x2 const*) secret;    /* no alignment restriction */
4102     xxh_u64x2 const v32 = { 32, 32 };
4103     size_t i;
4104     for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4105         /* data_vec = xinput[i]; */
4106         xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
4107         /* key_vec = xsecret[i]; */
4108         xxh_u64x2 const key_vec  = XXH_vec_loadu(xsecret + i);
4109         xxh_u64x2 const data_key = data_vec ^ key_vec;
4110         /* shuffled = (data_key << 32) | (data_key >> 32); */
4111         xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
4112         /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
4113         xxh_u64x2 const product  = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
4114         xacc[i] += product;
4115 
4116         /* swap high and low halves */
4117 #ifdef __s390x__
4118         xacc[i] += vec_permi(data_vec, data_vec, 2);
4119 #else
4120         xacc[i] += vec_xxpermdi(data_vec, data_vec, 2);
4121 #endif
4122     }
4123 }
4124 
4125 XXH_FORCE_INLINE void
XXH3_scrambleAcc_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4126 XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4127 {
4128     XXH_ASSERT((((size_t)acc) & 15) == 0);
4129 
4130     {         xxh_u64x2* const xacc    =       (xxh_u64x2*) acc;
4131         const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
4132         /* constants */
4133         xxh_u64x2 const v32  = { 32, 32 };
4134         xxh_u64x2 const v47 = { 47, 47 };
4135         xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4136         size_t i;
4137         for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4138             /* xacc[i] ^= (xacc[i] >> 47); */
4139             xxh_u64x2 const acc_vec  = xacc[i];
4140             xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
4141 
4142             /* xacc[i] ^= xsecret[i]; */
4143             xxh_u64x2 const key_vec  = XXH_vec_loadu(xsecret + i);
4144             xxh_u64x2 const data_key = data_vec ^ key_vec;
4145 
4146             /* xacc[i] *= XXH_PRIME32_1 */
4147             /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF);  */
4148             xxh_u64x2 const prod_even  = XXH_vec_mule((xxh_u32x4)data_key, prime);
4149             /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32);  */
4150             xxh_u64x2 const prod_odd  = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4151             xacc[i] = prod_odd + (prod_even << v32);
4152     }   }
4153 }
4154 
4155 #endif
4156 
4157 /* scalar variants - universal */
4158 
4159 XXH_FORCE_INLINE void
XXH3_accumulate_512_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4160 XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
4161                      const void* XXH_RESTRICT input,
4162                      const void* XXH_RESTRICT secret)
4163 {
4164     XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
4165     const xxh_u8* const xinput  = (const xxh_u8*) input;  /* no alignment restriction */
4166     const xxh_u8* const xsecret = (const xxh_u8*) secret;   /* no alignment restriction */
4167     size_t i;
4168     XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
4169     for (i=0; i < XXH_ACC_NB; i++) {
4170         xxh_u64 const data_val = XXH_readLE64(xinput + 8*i);
4171         xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8);
4172         xacc[i ^ 1] += data_val; /* swap adjacent lanes */
4173         xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4174     }
4175 }
4176 
4177 XXH_FORCE_INLINE void
XXH3_scrambleAcc_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4178 XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4179 {
4180     XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc;   /* presumed aligned */
4181     const xxh_u8* const xsecret = (const xxh_u8*) secret;   /* no alignment restriction */
4182     size_t i;
4183     XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
4184     for (i=0; i < XXH_ACC_NB; i++) {
4185         xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i);
4186         xxh_u64 acc64 = xacc[i];
4187         acc64 = XXH_xorshift64(acc64, 47);
4188         acc64 ^= key64;
4189         acc64 *= XXH_PRIME32_1;
4190         xacc[i] = acc64;
4191     }
4192 }
4193 
4194 XXH_FORCE_INLINE void
XXH3_initCustomSecret_scalar(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4195 XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4196 {
4197     /*
4198      * We need a separate pointer for the hack below,
4199      * which requires a non-const pointer.
4200      * Any decent compiler will optimize this out otherwise.
4201      */
4202     const xxh_u8* kSecretPtr = XXH3_kSecret;
4203     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4204 
4205 #if defined(__clang__) && defined(__aarch64__)
4206     /*
4207      * UGLY HACK:
4208      * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
4209      * placed sequentially, in order, at the top of the unrolled loop.
4210      *
4211      * While MOVK is great for generating constants (2 cycles for a 64-bit
4212      * constant compared to 4 cycles for LDR), long MOVK chains stall the
4213      * integer pipelines:
4214      *   I   L   S
4215      * MOVK
4216      * MOVK
4217      * MOVK
4218      * MOVK
4219      * ADD
4220      * SUB      STR
4221      *          STR
4222      * By forcing loads from memory (as the asm line causes Clang to assume
4223      * that XXH3_kSecretPtr has been changed), the pipelines are used more
4224      * efficiently:
4225      *   I   L   S
4226      *      LDR
4227      *  ADD LDR
4228      *  SUB     STR
4229      *          STR
4230      * XXH3_64bits_withSeed, len == 256, Snapdragon 835
4231      *   without hack: 2654.4 MB/s
4232      *   with hack:    3202.9 MB/s
4233      */
4234     __asm__("" : "+r" (kSecretPtr));
4235 #endif
4236     /*
4237      * Note: in debug mode, this overrides the asm optimization
4238      * and Clang will emit MOVK chains again.
4239      */
4240     XXH_ASSERT(kSecretPtr == XXH3_kSecret);
4241 
4242     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
4243         int i;
4244         for (i=0; i < nbRounds; i++) {
4245             /*
4246              * The asm hack causes Clang to assume that kSecretPtr aliases with
4247              * customSecret, and on aarch64, this prevented LDP from merging two
4248              * loads together for free. Putting the loads together before the stores
4249              * properly generates LDP.
4250              */
4251             xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i)     + seed64;
4252             xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
4253             XXH_writeLE64((xxh_u8*)customSecret + 16*i,     lo);
4254             XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
4255     }   }
4256 }
4257 
4258 
4259 typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
4260 typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
4261 typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
4262 
4263 
4264 #if (XXH_VECTOR == XXH_AVX512)
4265 
4266 #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
4267 #define XXH3_scrambleAcc    XXH3_scrambleAcc_avx512
4268 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
4269 
4270 #elif (XXH_VECTOR == XXH_AVX2)
4271 
4272 #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
4273 #define XXH3_scrambleAcc    XXH3_scrambleAcc_avx2
4274 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
4275 
4276 #elif (XXH_VECTOR == XXH_SSE2)
4277 
4278 #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
4279 #define XXH3_scrambleAcc    XXH3_scrambleAcc_sse2
4280 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
4281 
4282 #elif (XXH_VECTOR == XXH_NEON)
4283 
4284 #define XXH3_accumulate_512 XXH3_accumulate_512_neon
4285 #define XXH3_scrambleAcc    XXH3_scrambleAcc_neon
4286 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4287 
4288 #elif (XXH_VECTOR == XXH_VSX)
4289 
4290 #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
4291 #define XXH3_scrambleAcc    XXH3_scrambleAcc_vsx
4292 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4293 
4294 #else /* scalar */
4295 
4296 #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
4297 #define XXH3_scrambleAcc    XXH3_scrambleAcc_scalar
4298 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4299 
4300 #endif
4301 
4302 
4303 
4304 #ifndef XXH_PREFETCH_DIST
4305 #  ifdef __clang__
4306 #    define XXH_PREFETCH_DIST 320
4307 #  else
4308 #    if (XXH_VECTOR == XXH_AVX512)
4309 #      define XXH_PREFETCH_DIST 512
4310 #    else
4311 #      define XXH_PREFETCH_DIST 384
4312 #    endif
4313 #  endif  /* __clang__ */
4314 #endif  /* XXH_PREFETCH_DIST */
4315 
4316 /*
4317  * XXH3_accumulate()
4318  * Loops over XXH3_accumulate_512().
4319  * Assumption: nbStripes will not overflow the secret size
4320  */
4321 XXH_FORCE_INLINE void
XXH3_accumulate(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,size_t nbStripes,XXH3_f_accumulate_512 f_acc512)4322 XXH3_accumulate(     xxh_u64* XXH_RESTRICT acc,
4323                 const xxh_u8* XXH_RESTRICT input,
4324                 const xxh_u8* XXH_RESTRICT secret,
4325                       size_t nbStripes,
4326                       XXH3_f_accumulate_512 f_acc512)
4327 {
4328     size_t n;
4329     for (n = 0; n < nbStripes; n++ ) {
4330         const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
4331         XXH_PREFETCH(in + XXH_PREFETCH_DIST);
4332         f_acc512(acc,
4333                  in,
4334                  secret + n*XXH_SECRET_CONSUME_RATE);
4335     }
4336 }
4337 
4338 XXH_FORCE_INLINE void
XXH3_hashLong_internal_loop(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4339 XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
4340                       const xxh_u8* XXH_RESTRICT input, size_t len,
4341                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4342                             XXH3_f_accumulate_512 f_acc512,
4343                             XXH3_f_scrambleAcc f_scramble)
4344 {
4345     size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
4346     size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
4347     size_t const nb_blocks = (len - 1) / block_len;
4348 
4349     size_t n;
4350 
4351     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4352 
4353     for (n = 0; n < nb_blocks; n++) {
4354         XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
4355         f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
4356     }
4357 
4358     /* last partial block */
4359     XXH_ASSERT(len > XXH_STRIPE_LEN);
4360     {   size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
4361         XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
4362         XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
4363 
4364         /* last stripe */
4365         {   const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
4366 #define XXH_SECRET_LASTACC_START 7  /* not aligned on 8, last secret is different from acc & scrambler */
4367             f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
4368     }   }
4369 }
4370 
4371 XXH_FORCE_INLINE xxh_u64
XXH3_mix2Accs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret)4372 XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
4373 {
4374     return XXH3_mul128_fold64(
4375                acc[0] ^ XXH_readLE64(secret),
4376                acc[1] ^ XXH_readLE64(secret+8) );
4377 }
4378 
4379 static XXH64_hash_t
XXH3_mergeAccs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 start)4380 XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
4381 {
4382     xxh_u64 result64 = start;
4383     size_t i = 0;
4384 
4385     for (i = 0; i < 4; i++) {
4386         result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
4387 #if defined(__clang__)                                /* Clang */ \
4388     && (defined(__arm__) || defined(__thumb__))       /* ARMv7 */ \
4389     && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */  \
4390     && !defined(XXH_ENABLE_AUTOVECTORIZE)             /* Define to disable */
4391         /*
4392          * UGLY HACK:
4393          * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
4394          * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
4395          * XXH3_64bits, len == 256, Snapdragon 835:
4396          *   without hack: 2063.7 MB/s
4397          *   with hack:    2560.7 MB/s
4398          */
4399         __asm__("" : "+r" (result64));
4400 #endif
4401     }
4402 
4403     return XXH3_avalanche(result64);
4404 }
4405 
4406 #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
4407                         XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
4408 
4409 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_internal(const void * XXH_RESTRICT input,size_t len,const void * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4410 XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
4411                            const void* XXH_RESTRICT secret, size_t secretSize,
4412                            XXH3_f_accumulate_512 f_acc512,
4413                            XXH3_f_scrambleAcc f_scramble)
4414 {
4415     XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
4416 
4417     XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
4418 
4419     /* converge into final hash */
4420     XXH_STATIC_ASSERT(sizeof(acc) == 64);
4421     /* do not align on 8, so that the secret is different from the accumulator */
4422 #define XXH_SECRET_MERGEACCS_START 11
4423     XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
4424     return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
4425 }
4426 
4427 /*
4428  * It's important for performance that XXH3_hashLong is not inlined.
4429  */
4430 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4431 XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
4432                              XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4433 {
4434     (void)seed64;
4435     return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
4436 }
4437 
4438 /*
4439  * It's important for performance that XXH3_hashLong is not inlined.
4440  * Since the function is not inlined, the compiler may not be able to understand that,
4441  * in some scenarios, its `secret` argument is actually a compile time constant.
4442  * This variant enforces that the compiler can detect that,
4443  * and uses this opportunity to streamline the generated code for better performance.
4444  */
4445 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4446 XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
4447                           XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4448 {
4449     (void)seed64; (void)secret; (void)secretLen;
4450     return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
4451 }
4452 
4453 /*
4454  * XXH3_hashLong_64b_withSeed():
4455  * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
4456  * and then use this key for long mode hashing.
4457  *
4458  * This operation is decently fast but nonetheless costs a little bit of time.
4459  * Try to avoid it whenever possible (typically when seed==0).
4460  *
4461  * It's important for performance that XXH3_hashLong is not inlined. Not sure
4462  * why (uop cache maybe?), but the difference is large and easily measurable.
4463  */
4464 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed_internal(const void * input,size_t len,XXH64_hash_t seed,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)4465 XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
4466                                     XXH64_hash_t seed,
4467                                     XXH3_f_accumulate_512 f_acc512,
4468                                     XXH3_f_scrambleAcc f_scramble,
4469                                     XXH3_f_initCustomSecret f_initSec)
4470 {
4471     if (seed == 0)
4472         return XXH3_hashLong_64b_internal(input, len,
4473                                           XXH3_kSecret, sizeof(XXH3_kSecret),
4474                                           f_acc512, f_scramble);
4475     {   XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
4476         f_initSec(secret, seed);
4477         return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
4478                                           f_acc512, f_scramble);
4479     }
4480 }
4481 
4482 /*
4483  * It's important for performance that XXH3_hashLong is not inlined.
4484  */
4485 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed(const void * input,size_t len,XXH64_hash_t seed,const xxh_u8 * secret,size_t secretLen)4486 XXH3_hashLong_64b_withSeed(const void* input, size_t len,
4487                            XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
4488 {
4489     (void)secret; (void)secretLen;
4490     return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
4491                 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
4492 }
4493 
4494 
4495 typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
4496                                           XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
4497 
4498 XXH_FORCE_INLINE XXH64_hash_t
XXH3_64bits_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong64_f f_hashLong)4499 XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
4500                      XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
4501                      XXH3_hashLong64_f f_hashLong)
4502 {
4503     XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
4504     /*
4505      * If an action is to be taken if `secretLen` condition is not respected,
4506      * it should be done here.
4507      * For now, it's a contract pre-condition.
4508      * Adding a check and a branch here would cost performance at every hash.
4509      * Also, note that function signature doesn't offer room to return an error.
4510      */
4511     if (len <= 16)
4512         return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
4513     if (len <= 128)
4514         return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4515     if (len <= XXH3_MIDSIZE_MAX)
4516         return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4517     return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
4518 }
4519 
4520 
4521 /* ===   Public entry point   === */
4522 
4523 /*! @ingroup xxh3_family */
XXH3_64bits(const void * input,size_t len)4524 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
4525 {
4526     return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
4527 }
4528 
4529 /*! @ingroup xxh3_family */
4530 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)4531 XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
4532 {
4533     return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
4534 }
4535 
4536 /*! @ingroup xxh3_family */
4537 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)4538 XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
4539 {
4540     return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
4541 }
4542 
4543 
4544 /* ===   XXH3 streaming   === */
4545 
4546 /*
4547  * Malloc's a pointer that is always aligned to align.
4548  *
4549  * This must be freed with `XXH_alignedFree()`.
4550  *
4551  * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
4552  * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
4553  * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
4554  *
4555  * This underalignment previously caused a rather obvious crash which went
4556  * completely unnoticed due to XXH3_createState() not actually being tested.
4557  * Credit to RedSpah for noticing this bug.
4558  *
4559  * The alignment is done manually: Functions like posix_memalign or _mm_malloc
4560  * are avoided: To maintain portability, we would have to write a fallback
4561  * like this anyways, and besides, testing for the existence of library
4562  * functions without relying on external build tools is impossible.
4563  *
4564  * The method is simple: Overallocate, manually align, and store the offset
4565  * to the original behind the returned pointer.
4566  *
4567  * Align must be a power of 2 and 8 <= align <= 128.
4568  */
XXH_alignedMalloc(size_t s,size_t align)4569 static void* XXH_alignedMalloc(size_t s, size_t align)
4570 {
4571     XXH_ASSERT(align <= 128 && align >= 8); /* range check */
4572     XXH_ASSERT((align & (align-1)) == 0);   /* power of 2 */
4573     XXH_ASSERT(s != 0 && s < (s + align));  /* empty/overflow */
4574     {   /* Overallocate to make room for manual realignment and an offset byte */
4575         xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
4576         if (base != NULL) {
4577             /*
4578              * Get the offset needed to align this pointer.
4579              *
4580              * Even if the returned pointer is aligned, there will always be
4581              * at least one byte to store the offset to the original pointer.
4582              */
4583             size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
4584             /* Add the offset for the now-aligned pointer */
4585             xxh_u8* ptr = base + offset;
4586 
4587             XXH_ASSERT((size_t)ptr % align == 0);
4588 
4589             /* Store the offset immediately before the returned pointer. */
4590             ptr[-1] = (xxh_u8)offset;
4591             return ptr;
4592         }
4593         return NULL;
4594     }
4595 }
4596 /*
4597  * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
4598  * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
4599  */
XXH_alignedFree(void * p)4600 static void XXH_alignedFree(void* p)
4601 {
4602     if (p != NULL) {
4603         xxh_u8* ptr = (xxh_u8*)p;
4604         /* Get the offset byte we added in XXH_malloc. */
4605         xxh_u8 offset = ptr[-1];
4606         /* Free the original malloc'd pointer */
4607         xxh_u8* base = ptr - offset;
4608         XXH_free(base);
4609     }
4610 }
4611 /*! @ingroup xxh3_family */
XXH3_createState(void)4612 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
4613 {
4614     XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
4615     if (state==NULL) return NULL;
4616     XXH3_INITSTATE(state);
4617     return state;
4618 }
4619 
4620 /*! @ingroup xxh3_family */
XXH3_freeState(XXH3_state_t * statePtr)4621 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
4622 {
4623     XXH_alignedFree(statePtr);
4624     return XXH_OK;
4625 }
4626 
4627 /*! @ingroup xxh3_family */
4628 XXH_PUBLIC_API void
XXH3_copyState(XXH3_state_t * dst_state,const XXH3_state_t * src_state)4629 XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
4630 {
4631     memcpy(dst_state, src_state, sizeof(*dst_state));
4632 }
4633 
4634 static void
XXH3_reset_internal(XXH3_state_t * statePtr,XXH64_hash_t seed,const void * secret,size_t secretSize)4635 XXH3_reset_internal(XXH3_state_t* statePtr,
4636                            XXH64_hash_t seed,
4637                            const void* secret, size_t secretSize)
4638 {
4639     size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
4640     size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
4641     XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
4642     XXH_ASSERT(statePtr != NULL);
4643     /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
4644     memset((char*)statePtr + initStart, 0, initLength);
4645     statePtr->acc[0] = XXH_PRIME32_3;
4646     statePtr->acc[1] = XXH_PRIME64_1;
4647     statePtr->acc[2] = XXH_PRIME64_2;
4648     statePtr->acc[3] = XXH_PRIME64_3;
4649     statePtr->acc[4] = XXH_PRIME64_4;
4650     statePtr->acc[5] = XXH_PRIME32_2;
4651     statePtr->acc[6] = XXH_PRIME64_5;
4652     statePtr->acc[7] = XXH_PRIME32_1;
4653     statePtr->seed = seed;
4654     statePtr->extSecret = (const unsigned char*)secret;
4655     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4656     statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
4657     statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
4658 }
4659 
4660 /*! @ingroup xxh3_family */
4661 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset(XXH3_state_t * statePtr)4662 XXH3_64bits_reset(XXH3_state_t* statePtr)
4663 {
4664     if (statePtr == NULL) return XXH_ERROR;
4665     XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4666     return XXH_OK;
4667 }
4668 
4669 /*! @ingroup xxh3_family */
4670 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)4671 XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
4672 {
4673     if (statePtr == NULL) return XXH_ERROR;
4674     XXH3_reset_internal(statePtr, 0, secret, secretSize);
4675     if (secret == NULL) return XXH_ERROR;
4676     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4677     return XXH_OK;
4678 }
4679 
4680 /*! @ingroup xxh3_family */
4681 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)4682 XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
4683 {
4684     if (statePtr == NULL) return XXH_ERROR;
4685     if (seed==0) return XXH3_64bits_reset(statePtr);
4686     if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed);
4687     XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
4688     return XXH_OK;
4689 }
4690 
4691 /* Note : when XXH3_consumeStripes() is invoked,
4692  * there must be a guarantee that at least one more byte must be consumed from input
4693  * so that the function can blindly consume all stripes using the "normal" secret segment */
4694 XXH_FORCE_INLINE void
XXH3_consumeStripes(xxh_u64 * XXH_RESTRICT acc,size_t * XXH_RESTRICT nbStripesSoFarPtr,size_t nbStripesPerBlock,const xxh_u8 * XXH_RESTRICT input,size_t nbStripes,const xxh_u8 * XXH_RESTRICT secret,size_t secretLimit,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4695 XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
4696                     size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
4697                     const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
4698                     const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
4699                     XXH3_f_accumulate_512 f_acc512,
4700                     XXH3_f_scrambleAcc f_scramble)
4701 {
4702     XXH_ASSERT(nbStripes <= nbStripesPerBlock);  /* can handle max 1 scramble per invocation */
4703     XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
4704     if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
4705         /* need a scrambling operation */
4706         size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
4707         size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
4708         XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
4709         f_scramble(acc, secret + secretLimit);
4710         XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
4711         *nbStripesSoFarPtr = nbStripesAfterBlock;
4712     } else {
4713         XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
4714         *nbStripesSoFarPtr += nbStripes;
4715     }
4716 }
4717 
4718 /*
4719  * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
4720  */
4721 XXH_FORCE_INLINE XXH_errorcode
XXH3_update(XXH3_state_t * state,const xxh_u8 * input,size_t len,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4722 XXH3_update(XXH3_state_t* state,
4723             const xxh_u8* input, size_t len,
4724             XXH3_f_accumulate_512 f_acc512,
4725             XXH3_f_scrambleAcc f_scramble)
4726 {
4727     if (input==NULL)
4728 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
4729         return XXH_OK;
4730 #else
4731         return XXH_ERROR;
4732 #endif
4733 
4734     {   const xxh_u8* const bEnd = input + len;
4735         const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4736 
4737         state->totalLen += len;
4738         XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
4739 
4740         if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) {  /* fill in tmp buffer */
4741             XXH_memcpy(state->buffer + state->bufferedSize, input, len);
4742             state->bufferedSize += (XXH32_hash_t)len;
4743             return XXH_OK;
4744         }
4745         /* total input is now > XXH3_INTERNALBUFFER_SIZE */
4746 
4747         #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
4748         XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0);   /* clean multiple */
4749 
4750         /*
4751          * Internal buffer is partially filled (always, except at beginning)
4752          * Complete it, then consume it.
4753          */
4754         if (state->bufferedSize) {
4755             size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
4756             XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
4757             input += loadSize;
4758             XXH3_consumeStripes(state->acc,
4759                                &state->nbStripesSoFar, state->nbStripesPerBlock,
4760                                 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
4761                                 secret, state->secretLimit,
4762                                 f_acc512, f_scramble);
4763             state->bufferedSize = 0;
4764         }
4765         XXH_ASSERT(input < bEnd);
4766 
4767         /* Consume input by a multiple of internal buffer size */
4768         if (input+XXH3_INTERNALBUFFER_SIZE < bEnd) {
4769             const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
4770             do {
4771                 XXH3_consumeStripes(state->acc,
4772                                    &state->nbStripesSoFar, state->nbStripesPerBlock,
4773                                     input, XXH3_INTERNALBUFFER_STRIPES,
4774                                     secret, state->secretLimit,
4775                                     f_acc512, f_scramble);
4776                 input += XXH3_INTERNALBUFFER_SIZE;
4777             } while (input<limit);
4778             /* for last partial stripe */
4779             memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4780         }
4781         XXH_ASSERT(input < bEnd);
4782 
4783         /* Some remaining input (always) : buffer it */
4784         XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
4785         state->bufferedSize = (XXH32_hash_t)(bEnd-input);
4786     }
4787 
4788     return XXH_OK;
4789 }
4790 
4791 /*! @ingroup xxh3_family */
4792 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_update(XXH3_state_t * state,const void * input,size_t len)4793 XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
4794 {
4795     return XXH3_update(state, (const xxh_u8*)input, len,
4796                        XXH3_accumulate_512, XXH3_scrambleAcc);
4797 }
4798 
4799 
4800 XXH_FORCE_INLINE void
XXH3_digest_long(XXH64_hash_t * acc,const XXH3_state_t * state,const unsigned char * secret)4801 XXH3_digest_long (XXH64_hash_t* acc,
4802                   const XXH3_state_t* state,
4803                   const unsigned char* secret)
4804 {
4805     /*
4806      * Digest on a local copy. This way, the state remains unaltered, and it can
4807      * continue ingesting more input afterwards.
4808      */
4809     memcpy(acc, state->acc, sizeof(state->acc));
4810     if (state->bufferedSize >= XXH_STRIPE_LEN) {
4811         size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
4812         size_t nbStripesSoFar = state->nbStripesSoFar;
4813         XXH3_consumeStripes(acc,
4814                            &nbStripesSoFar, state->nbStripesPerBlock,
4815                             state->buffer, nbStripes,
4816                             secret, state->secretLimit,
4817                             XXH3_accumulate_512, XXH3_scrambleAcc);
4818         /* last stripe */
4819         XXH3_accumulate_512(acc,
4820                             state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
4821                             secret + state->secretLimit - XXH_SECRET_LASTACC_START);
4822     } else {  /* bufferedSize < XXH_STRIPE_LEN */
4823         xxh_u8 lastStripe[XXH_STRIPE_LEN];
4824         size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
4825         XXH_ASSERT(state->bufferedSize > 0);  /* there is always some input buffered */
4826         memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
4827         memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
4828         XXH3_accumulate_512(acc,
4829                             lastStripe,
4830                             secret + state->secretLimit - XXH_SECRET_LASTACC_START);
4831     }
4832 }
4833 
4834 /*! @ingroup xxh3_family */
XXH3_64bits_digest(const XXH3_state_t * state)4835 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
4836 {
4837     const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4838     if (state->totalLen > XXH3_MIDSIZE_MAX) {
4839         XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
4840         XXH3_digest_long(acc, state, secret);
4841         return XXH3_mergeAccs(acc,
4842                               secret + XXH_SECRET_MERGEACCS_START,
4843                               (xxh_u64)state->totalLen * XXH_PRIME64_1);
4844     }
4845     /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
4846     if (state->seed)
4847         return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
4848     return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
4849                                   secret, state->secretLimit + XXH_STRIPE_LEN);
4850 }
4851 
4852 
4853 #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
4854 
4855 /*! @ingroup xxh3_family */
4856 XXH_PUBLIC_API void
XXH3_generateSecret(void * secretBuffer,const void * customSeed,size_t customSeedSize)4857 XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize)
4858 {
4859     XXH_ASSERT(secretBuffer != NULL);
4860     if (customSeedSize == 0) {
4861         memcpy(secretBuffer, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4862         return;
4863     }
4864     XXH_ASSERT(customSeed != NULL);
4865 
4866     {   size_t const segmentSize = sizeof(XXH128_hash_t);
4867         size_t const nbSegments = XXH_SECRET_DEFAULT_SIZE / segmentSize;
4868         XXH128_canonical_t scrambler;
4869         XXH64_hash_t seeds[12];
4870         size_t segnb;
4871         XXH_ASSERT(nbSegments == 12);
4872         XXH_ASSERT(segmentSize * nbSegments == XXH_SECRET_DEFAULT_SIZE); /* exact multiple */
4873         XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
4874 
4875         /*
4876         * Copy customSeed to seeds[], truncating or repeating as necessary.
4877         */
4878         {   size_t toFill = XXH_MIN(customSeedSize, sizeof(seeds));
4879             size_t filled = toFill;
4880             memcpy(seeds, customSeed, toFill);
4881             while (filled < sizeof(seeds)) {
4882                 toFill = XXH_MIN(filled, sizeof(seeds) - filled);
4883                 memcpy((char*)seeds + filled, seeds, toFill);
4884                 filled += toFill;
4885         }   }
4886 
4887         /* generate secret */
4888         memcpy(secretBuffer, &scrambler, sizeof(scrambler));
4889         for (segnb=1; segnb < nbSegments; segnb++) {
4890             size_t const segmentStart = segnb * segmentSize;
4891             XXH128_canonical_t segment;
4892             XXH128_canonicalFromHash(&segment,
4893                 XXH128(&scrambler, sizeof(scrambler), XXH_readLE64(seeds + segnb) + segnb) );
4894             memcpy((char*)secretBuffer + segmentStart, &segment, sizeof(segment));
4895     }   }
4896 }
4897 
4898 
4899 /* ==========================================
4900  * XXH3 128 bits (a.k.a XXH128)
4901  * ==========================================
4902  * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
4903  * even without counting the significantly larger output size.
4904  *
4905  * For example, extra steps are taken to avoid the seed-dependent collisions
4906  * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
4907  *
4908  * This strength naturally comes at the cost of some speed, especially on short
4909  * lengths. Note that longer hashes are about as fast as the 64-bit version
4910  * due to it using only a slight modification of the 64-bit loop.
4911  *
4912  * XXH128 is also more oriented towards 64-bit machines. It is still extremely
4913  * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
4914  */
4915 
4916 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_1to3_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4917 XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4918 {
4919     /* A doubled version of 1to3_64b with different constants. */
4920     XXH_ASSERT(input != NULL);
4921     XXH_ASSERT(1 <= len && len <= 3);
4922     XXH_ASSERT(secret != NULL);
4923     /*
4924      * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
4925      * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
4926      * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
4927      */
4928     {   xxh_u8 const c1 = input[0];
4929         xxh_u8 const c2 = input[len >> 1];
4930         xxh_u8 const c3 = input[len - 1];
4931         xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
4932                                 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
4933         xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
4934         xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
4935         xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
4936         xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
4937         xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
4938         XXH128_hash_t h128;
4939         h128.low64  = XXH64_avalanche(keyed_lo);
4940         h128.high64 = XXH64_avalanche(keyed_hi);
4941         return h128;
4942     }
4943 }
4944 
4945 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_4to8_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4946 XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4947 {
4948     XXH_ASSERT(input != NULL);
4949     XXH_ASSERT(secret != NULL);
4950     XXH_ASSERT(4 <= len && len <= 8);
4951     seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
4952     {   xxh_u32 const input_lo = XXH_readLE32(input);
4953         xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
4954         xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
4955         xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
4956         xxh_u64 const keyed = input_64 ^ bitflip;
4957 
4958         /* Shift len to the left to ensure it is even, this avoids even multiplies. */
4959         XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
4960 
4961         m128.high64 += (m128.low64 << 1);
4962         m128.low64  ^= (m128.high64 >> 3);
4963 
4964         m128.low64   = XXH_xorshift64(m128.low64, 35);
4965         m128.low64  *= 0x9FB21C651E98DF25ULL;
4966         m128.low64   = XXH_xorshift64(m128.low64, 28);
4967         m128.high64  = XXH3_avalanche(m128.high64);
4968         return m128;
4969     }
4970 }
4971 
4972 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_9to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4973 XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4974 {
4975     XXH_ASSERT(input != NULL);
4976     XXH_ASSERT(secret != NULL);
4977     XXH_ASSERT(9 <= len && len <= 16);
4978     {   xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
4979         xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
4980         xxh_u64 const input_lo = XXH_readLE64(input);
4981         xxh_u64       input_hi = XXH_readLE64(input + len - 8);
4982         XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
4983         /*
4984          * Put len in the middle of m128 to ensure that the length gets mixed to
4985          * both the low and high bits in the 128x64 multiply below.
4986          */
4987         m128.low64 += (xxh_u64)(len - 1) << 54;
4988         input_hi   ^= bitfliph;
4989         /*
4990          * Add the high 32 bits of input_hi to the high 32 bits of m128, then
4991          * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
4992          * the high 64 bits of m128.
4993          *
4994          * The best approach to this operation is different on 32-bit and 64-bit.
4995          */
4996         if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
4997             /*
4998              * 32-bit optimized version, which is more readable.
4999              *
5000              * On 32-bit, it removes an ADC and delays a dependency between the two
5001              * halves of m128.high64, but it generates an extra mask on 64-bit.
5002              */
5003             m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
5004         } else {
5005             /*
5006              * 64-bit optimized (albeit more confusing) version.
5007              *
5008              * Uses some properties of addition and multiplication to remove the mask:
5009              *
5010              * Let:
5011              *    a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
5012              *    b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
5013              *    c = XXH_PRIME32_2
5014              *
5015              *    a + (b * c)
5016              * Inverse Property: x + y - x == y
5017              *    a + (b * (1 + c - 1))
5018              * Distributive Property: x * (y + z) == (x * y) + (x * z)
5019              *    a + (b * 1) + (b * (c - 1))
5020              * Identity Property: x * 1 == x
5021              *    a + b + (b * (c - 1))
5022              *
5023              * Substitute a, b, and c:
5024              *    input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5025              *
5026              * Since input_hi.hi + input_hi.lo == input_hi, we get this:
5027              *    input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5028              */
5029             m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
5030         }
5031         /* m128 ^= XXH_swap64(m128 >> 64); */
5032         m128.low64  ^= XXH_swap64(m128.high64);
5033 
5034         {   /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
5035             XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
5036             h128.high64 += m128.high64 * XXH_PRIME64_2;
5037 
5038             h128.low64   = XXH3_avalanche(h128.low64);
5039             h128.high64  = XXH3_avalanche(h128.high64);
5040             return h128;
5041     }   }
5042 }
5043 
5044 /*
5045  * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
5046  */
5047 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_0to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5048 XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5049 {
5050     XXH_ASSERT(len <= 16);
5051     {   if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
5052         if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
5053         if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
5054         {   XXH128_hash_t h128;
5055             xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
5056             xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
5057             h128.low64 = XXH64_avalanche(seed ^ bitflipl);
5058             h128.high64 = XXH64_avalanche( seed ^ bitfliph);
5059             return h128;
5060     }   }
5061 }
5062 
5063 /*
5064  * A bit slower than XXH3_mix16B, but handles multiply by zero better.
5065  */
5066 XXH_FORCE_INLINE XXH128_hash_t
XXH128_mix32B(XXH128_hash_t acc,const xxh_u8 * input_1,const xxh_u8 * input_2,const xxh_u8 * secret,XXH64_hash_t seed)5067 XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
5068               const xxh_u8* secret, XXH64_hash_t seed)
5069 {
5070     acc.low64  += XXH3_mix16B (input_1, secret+0, seed);
5071     acc.low64  ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
5072     acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
5073     acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
5074     return acc;
5075 }
5076 
5077 
5078 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_17to128_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)5079 XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5080                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5081                       XXH64_hash_t seed)
5082 {
5083     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5084     XXH_ASSERT(16 < len && len <= 128);
5085 
5086     {   XXH128_hash_t acc;
5087         acc.low64 = len * XXH_PRIME64_1;
5088         acc.high64 = 0;
5089         if (len > 32) {
5090             if (len > 64) {
5091                 if (len > 96) {
5092                     acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
5093                 }
5094                 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
5095             }
5096             acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
5097         }
5098         acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
5099         {   XXH128_hash_t h128;
5100             h128.low64  = acc.low64 + acc.high64;
5101             h128.high64 = (acc.low64    * XXH_PRIME64_1)
5102                         + (acc.high64   * XXH_PRIME64_4)
5103                         + ((len - seed) * XXH_PRIME64_2);
5104             h128.low64  = XXH3_avalanche(h128.low64);
5105             h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5106             return h128;
5107         }
5108     }
5109 }
5110 
5111 XXH_NO_INLINE XXH128_hash_t
XXH3_len_129to240_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)5112 XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5113                        const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5114                        XXH64_hash_t seed)
5115 {
5116     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5117     XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
5118 
5119     {   XXH128_hash_t acc;
5120         int const nbRounds = (int)len / 32;
5121         int i;
5122         acc.low64 = len * XXH_PRIME64_1;
5123         acc.high64 = 0;
5124         for (i=0; i<4; i++) {
5125             acc = XXH128_mix32B(acc,
5126                                 input  + (32 * i),
5127                                 input  + (32 * i) + 16,
5128                                 secret + (32 * i),
5129                                 seed);
5130         }
5131         acc.low64 = XXH3_avalanche(acc.low64);
5132         acc.high64 = XXH3_avalanche(acc.high64);
5133         XXH_ASSERT(nbRounds >= 4);
5134         for (i=4 ; i < nbRounds; i++) {
5135             acc = XXH128_mix32B(acc,
5136                                 input + (32 * i),
5137                                 input + (32 * i) + 16,
5138                                 secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
5139                                 seed);
5140         }
5141         /* last bytes */
5142         acc = XXH128_mix32B(acc,
5143                             input + len - 16,
5144                             input + len - 32,
5145                             secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
5146                             0ULL - seed);
5147 
5148         {   XXH128_hash_t h128;
5149             h128.low64  = acc.low64 + acc.high64;
5150             h128.high64 = (acc.low64    * XXH_PRIME64_1)
5151                         + (acc.high64   * XXH_PRIME64_4)
5152                         + ((len - seed) * XXH_PRIME64_2);
5153             h128.low64  = XXH3_avalanche(h128.low64);
5154             h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5155             return h128;
5156         }
5157     }
5158 }
5159 
5160 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_internal(const void * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)5161 XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
5162                             const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5163                             XXH3_f_accumulate_512 f_acc512,
5164                             XXH3_f_scrambleAcc f_scramble)
5165 {
5166     XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5167 
5168     XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
5169 
5170     /* converge into final hash */
5171     XXH_STATIC_ASSERT(sizeof(acc) == 64);
5172     XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5173     {   XXH128_hash_t h128;
5174         h128.low64  = XXH3_mergeAccs(acc,
5175                                      secret + XXH_SECRET_MERGEACCS_START,
5176                                      (xxh_u64)len * XXH_PRIME64_1);
5177         h128.high64 = XXH3_mergeAccs(acc,
5178                                      secret + secretSize
5179                                             - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5180                                      ~((xxh_u64)len * XXH_PRIME64_2));
5181         return h128;
5182     }
5183 }
5184 
5185 /*
5186  * It's important for performance that XXH3_hashLong is not inlined.
5187  */
5188 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5189 XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
5190                            XXH64_hash_t seed64,
5191                            const void* XXH_RESTRICT secret, size_t secretLen)
5192 {
5193     (void)seed64; (void)secret; (void)secretLen;
5194     return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
5195                                        XXH3_accumulate_512, XXH3_scrambleAcc);
5196 }
5197 
5198 /*
5199  * It's important for performance that XXH3_hashLong is not inlined.
5200  */
5201 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5202 XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
5203                               XXH64_hash_t seed64,
5204                               const void* XXH_RESTRICT secret, size_t secretLen)
5205 {
5206     (void)seed64;
5207     return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
5208                                        XXH3_accumulate_512, XXH3_scrambleAcc);
5209 }
5210 
5211 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)5212 XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
5213                                 XXH64_hash_t seed64,
5214                                 XXH3_f_accumulate_512 f_acc512,
5215                                 XXH3_f_scrambleAcc f_scramble,
5216                                 XXH3_f_initCustomSecret f_initSec)
5217 {
5218     if (seed64 == 0)
5219         return XXH3_hashLong_128b_internal(input, len,
5220                                            XXH3_kSecret, sizeof(XXH3_kSecret),
5221                                            f_acc512, f_scramble);
5222     {   XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5223         f_initSec(secret, seed64);
5224         return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
5225                                            f_acc512, f_scramble);
5226     }
5227 }
5228 
5229 /*
5230  * It's important for performance that XXH3_hashLong is not inlined.
5231  */
5232 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5233 XXH3_hashLong_128b_withSeed(const void* input, size_t len,
5234                             XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
5235 {
5236     (void)secret; (void)secretLen;
5237     return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
5238                 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
5239 }
5240 
5241 typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
5242                                             XXH64_hash_t, const void* XXH_RESTRICT, size_t);
5243 
5244 XXH_FORCE_INLINE XXH128_hash_t
XXH3_128bits_internal(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong128_f f_hl128)5245 XXH3_128bits_internal(const void* input, size_t len,
5246                       XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5247                       XXH3_hashLong128_f f_hl128)
5248 {
5249     XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5250     /*
5251      * If an action is to be taken if `secret` conditions are not respected,
5252      * it should be done here.
5253      * For now, it's a contract pre-condition.
5254      * Adding a check and a branch here would cost performance at every hash.
5255      */
5256     if (len <= 16)
5257         return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5258     if (len <= 128)
5259         return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5260     if (len <= XXH3_MIDSIZE_MAX)
5261         return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5262     return f_hl128(input, len, seed64, secret, secretLen);
5263 }
5264 
5265 
5266 /* ===   Public XXH128 API   === */
5267 
5268 /*! @ingroup xxh3_family */
XXH3_128bits(const void * input,size_t len)5269 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
5270 {
5271     return XXH3_128bits_internal(input, len, 0,
5272                                  XXH3_kSecret, sizeof(XXH3_kSecret),
5273                                  XXH3_hashLong_128b_default);
5274 }
5275 
5276 /*! @ingroup xxh3_family */
5277 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)5278 XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
5279 {
5280     return XXH3_128bits_internal(input, len, 0,
5281                                  (const xxh_u8*)secret, secretSize,
5282                                  XXH3_hashLong_128b_withSecret);
5283 }
5284 
5285 /*! @ingroup xxh3_family */
5286 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)5287 XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
5288 {
5289     return XXH3_128bits_internal(input, len, seed,
5290                                  XXH3_kSecret, sizeof(XXH3_kSecret),
5291                                  XXH3_hashLong_128b_withSeed);
5292 }
5293 
5294 /*! @ingroup xxh3_family */
5295 XXH_PUBLIC_API XXH128_hash_t
XXH128(const void * input,size_t len,XXH64_hash_t seed)5296 XXH128(const void* input, size_t len, XXH64_hash_t seed)
5297 {
5298     return XXH3_128bits_withSeed(input, len, seed);
5299 }
5300 
5301 
5302 /* ===   XXH3 128-bit streaming   === */
5303 
5304 /*
5305  * All the functions are actually the same as for 64-bit streaming variant.
5306  * The only difference is the finalization routine.
5307  */
5308 
5309 /*! @ingroup xxh3_family */
5310 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset(XXH3_state_t * statePtr)5311 XXH3_128bits_reset(XXH3_state_t* statePtr)
5312 {
5313     if (statePtr == NULL) return XXH_ERROR;
5314     XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
5315     return XXH_OK;
5316 }
5317 
5318 /*! @ingroup xxh3_family */
5319 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)5320 XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
5321 {
5322     if (statePtr == NULL) return XXH_ERROR;
5323     XXH3_reset_internal(statePtr, 0, secret, secretSize);
5324     if (secret == NULL) return XXH_ERROR;
5325     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
5326     return XXH_OK;
5327 }
5328 
5329 /*! @ingroup xxh3_family */
5330 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)5331 XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
5332 {
5333     if (statePtr == NULL) return XXH_ERROR;
5334     if (seed==0) return XXH3_128bits_reset(statePtr);
5335     if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed);
5336     XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
5337     return XXH_OK;
5338 }
5339 
5340 /*! @ingroup xxh3_family */
5341 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_update(XXH3_state_t * state,const void * input,size_t len)5342 XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
5343 {
5344     return XXH3_update(state, (const xxh_u8*)input, len,
5345                        XXH3_accumulate_512, XXH3_scrambleAcc);
5346 }
5347 
5348 /*! @ingroup xxh3_family */
XXH3_128bits_digest(const XXH3_state_t * state)5349 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
5350 {
5351     const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5352     if (state->totalLen > XXH3_MIDSIZE_MAX) {
5353         XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5354         XXH3_digest_long(acc, state, secret);
5355         XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5356         {   XXH128_hash_t h128;
5357             h128.low64  = XXH3_mergeAccs(acc,
5358                                          secret + XXH_SECRET_MERGEACCS_START,
5359                                          (xxh_u64)state->totalLen * XXH_PRIME64_1);
5360             h128.high64 = XXH3_mergeAccs(acc,
5361                                          secret + state->secretLimit + XXH_STRIPE_LEN
5362                                                 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5363                                          ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
5364             return h128;
5365         }
5366     }
5367     /* len <= XXH3_MIDSIZE_MAX : short code */
5368     if (state->seed)
5369         return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5370     return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
5371                                    secret, state->secretLimit + XXH_STRIPE_LEN);
5372 }
5373 
5374 /* 128-bit utility functions */
5375 
5376 #include <string.h>   /* memcmp, memcpy */
5377 
5378 /* return : 1 is equal, 0 if different */
5379 /*! @ingroup xxh3_family */
XXH128_isEqual(XXH128_hash_t h1,XXH128_hash_t h2)5380 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
5381 {
5382     /* note : XXH128_hash_t is compact, it has no padding byte */
5383     return !(memcmp(&h1, &h2, sizeof(h1)));
5384 }
5385 
5386 /* This prototype is compatible with stdlib's qsort().
5387  * return : >0 if *h128_1  > *h128_2
5388  *          <0 if *h128_1  < *h128_2
5389  *          =0 if *h128_1 == *h128_2  */
5390 /*! @ingroup xxh3_family */
XXH128_cmp(const void * h128_1,const void * h128_2)5391 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
5392 {
5393     XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
5394     XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
5395     int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
5396     /* note : bets that, in most cases, hash values are different */
5397     if (hcmp) return hcmp;
5398     return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
5399 }
5400 
5401 
5402 /*======   Canonical representation   ======*/
5403 /*! @ingroup xxh3_family */
5404 XXH_PUBLIC_API void
XXH128_canonicalFromHash(XXH128_canonical_t * dst,XXH128_hash_t hash)5405 XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash)
5406 {
5407     XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
5408     if (XXH_CPU_LITTLE_ENDIAN) {
5409         hash.high64 = XXH_swap64(hash.high64);
5410         hash.low64  = XXH_swap64(hash.low64);
5411     }
5412     memcpy(dst, &hash.high64, sizeof(hash.high64));
5413     memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
5414 }
5415 
5416 /*! @ingroup xxh3_family */
5417 XXH_PUBLIC_API XXH128_hash_t
XXH128_hashFromCanonical(const XXH128_canonical_t * src)5418 XXH128_hashFromCanonical(const XXH128_canonical_t* src)
5419 {
5420     XXH128_hash_t h;
5421     h.high64 = XXH_readBE64(src);
5422     h.low64  = XXH_readBE64(src->digest + 8);
5423     return h;
5424 }
5425 
5426 /* Pop our optimization override from above */
5427 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
5428   && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
5429   && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
5430 #  pragma GCC pop_options
5431 #endif
5432 
5433 #endif  /* XXH_NO_LONG_LONG */
5434 
5435 /*!
5436  * @}
5437  */
5438 #endif  /* XXH_IMPLEMENTATION */
5439 
5440 
5441 #if defined (__cplusplus)
5442 }
5443 #endif
5444