├── .gitignore ├── README.md ├── config.m4 ├── config.w32 ├── php_xxhash.c ├── php_xxhash.h ├── xxhash.c └── xxhash.h /.gitignore: -------------------------------------------------------------------------------- 1 | .deps 2 | .libs 3 | Makefile 4 | Makefile.fragments 5 | Makefile.global 6 | Makefile.objects 7 | acinclude.m4 8 | aclocal.m4 9 | autom4te.cache 10 | build 11 | config.guess 12 | config.h 13 | config.h.in* 14 | config.log 15 | config.nice 16 | config.status 17 | config.sub 18 | configure 19 | configure.in 20 | *.la 21 | *.lo 22 | install-sh 23 | libtool 24 | ltmain.sh 25 | missing 26 | mkinstalldirs 27 | modules 28 | run-tests.php -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | PHP-xxHash 2 | ========== 3 | 4 | A PHP extension to add support for the [xxHash](https://github.com/Cyan4973/xxHash) fast hashing algorithm. 5 | 6 | [xxHash](https://github.com/Cyan4973/xxHash) is an Extremely fast Hash algorithm, running at RAM speed limits. 7 | It successfully completes the [SMHasher](http://code.google.com/p/smhasher/wiki/SMHasher) test suite which evaluates collision, dispersion and randomness qualities of hash functions. 8 | 9 | ## PHP7 Compatability 10 | 11 | Please note at the moment the master branch should be used for PHP 5.x and develop branch should be used for PHP 7.x 12 | 13 | ## Installation Instructions 14 | 15 | ``` 16 | phpize 17 | ./configure --enable-xxhash 18 | make 19 | sudo make install 20 | ``` 21 | Don't forget to load the extension in via php.ini or the like. 22 | 23 | ## Usage Instructions 24 | 25 | Upon installation and enabling the extension within php.ini the following two new functions will be available to you: 26 | 27 | ``` 28 | string xxhash32(string $data); 29 | string xxhash64(string $data); 30 | ``` 31 | 32 | In both cases a string will be returned, representing the digest (hash) of the $data input. 33 | 34 | 35 | ## Credits 36 | * Original implementation of [php-xxhash](https://github.com/stuartherbert/php-xxhash) by Stuart Herbert. 37 | * [xxHash](https://github.com/Cyan4973/xxHash) by Yann Collet. 38 | 39 | ## Licence 40 | 41 | BSD 2-clause license. -------------------------------------------------------------------------------- /config.m4: -------------------------------------------------------------------------------- 1 | dnl $Id$ 2 | dnl config.m4 for extension xxhash 3 | 4 | dnl Comments in this file start with the string 'dnl'. 5 | dnl Remove where necessary. This file will not work 6 | dnl without editing. 7 | 8 | dnl If your extension references something external, use with: 9 | 10 | dnl PHP_ARG_WITH(xxhash, for xxhash support, 11 | dnl Make sure that the comment is aligned: 12 | dnl [ --with-xxhash Include xxhash support]) 13 | 14 | dnl Otherwise use enable: 15 | 16 | PHP_ARG_ENABLE(xxhash, whether to enable xxhash support, 17 | dnl Make sure that the comment is aligned: 18 | [ --enable-xxhash Enable xxhash support]) 19 | 20 | if test "$PHP_XXHASH" != "no"; then 21 | dnl Write more examples of tests here... 22 | 23 | PHP_NEW_EXTENSION(xxhash, php_xxhash.c, $ext_shared) 24 | fi 25 | -------------------------------------------------------------------------------- /config.w32: -------------------------------------------------------------------------------- 1 | // $Id$ 2 | // vim:ft=javascript 3 | 4 | ARG_ENABLE("xxhash", "enable xxhash support", "no"); 5 | 6 | if (PHP_XXHASH != "no") { 7 | EXTENSION("xxhash", "php_xxhash.c"); 8 | } 9 | 10 | -------------------------------------------------------------------------------- /php_xxhash.c: -------------------------------------------------------------------------------- 1 | #ifdef HAVE_CONFIG_H 2 | #include "config.h" 3 | #endif 4 | 5 | #include "php.h" 6 | #include "php_ini.h" 7 | #include "ext/standard/info.h" 8 | #include "php_xxhash.h" 9 | 10 | #include "xxhash.c" 11 | 12 | #ifdef COMPILE_DL_XXHASH 13 | ZEND_GET_MODULE(xxhash); 14 | #ifdef ZTS 15 | ZEND_TSRMLS_CACHE_DEFINE(); 16 | #endif 17 | #endif 18 | 19 | PHP_MINFO_FUNCTION(xxhash) 20 | { 21 | php_info_print_table_start(); 22 | php_info_print_table_header(2, "xxhash support", "enabled"); 23 | php_info_print_table_row(2, "extension version", PHP_XXHASH_VERSION); 24 | php_info_print_table_row(2, "xxhash release", "r40"); 25 | php_info_print_table_end(); 26 | } 27 | 28 | PHP_FUNCTION(xxhash32) 29 | { 30 | char *arg = NULL; 31 | size_t arg_len, len; 32 | zend_string *strg; 33 | unsigned int sum; 34 | 35 | if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &arg, &arg_len) == FAILURE || arg_len < 1) { 36 | return; 37 | } 38 | 39 | // compute the checksum 40 | sum = XXH32(arg, arg_len, 0); 41 | 42 | //convert to a hex string 43 | strg = strpprintf(0, "%08x", sum); 44 | 45 | // return the checksum 46 | RETURN_STR(strg); 47 | } 48 | 49 | PHP_FUNCTION(xxhash64) 50 | { 51 | char *arg = NULL; 52 | size_t arg_len, len; 53 | zend_string *strg; 54 | unsigned long long sum; 55 | 56 | if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &arg, &arg_len) == FAILURE || arg_len < 1) { 57 | return; 58 | } 59 | 60 | // compute the checksum 61 | sum = XXH64(arg, arg_len, 0); 62 | 63 | //convert to a hex string 64 | strg = strpprintf(0, "%08x%08x", (U32)(sum >> 32), (U32)sum); 65 | 66 | // return the checksum 67 | RETURN_STR(strg); 68 | } 69 | 70 | const zend_function_entry xxhash_functions[] = { 71 | ZEND_FE(xxhash32, NULL) 72 | ZEND_FE(xxhash64, NULL) 73 | PHP_FE_END 74 | }; 75 | 76 | zend_module_entry xxhash_module_entry = { 77 | STANDARD_MODULE_HEADER, 78 | "xxhash", 79 | xxhash_functions, 80 | NULL, 81 | NULL, 82 | NULL, 83 | NULL, 84 | PHP_MINFO(xxhash), 85 | PHP_XXHASH_VERSION, 86 | STANDARD_MODULE_PROPERTIES 87 | }; 88 | -------------------------------------------------------------------------------- /php_xxhash.h: -------------------------------------------------------------------------------- 1 | #ifndef PHP_XXHASH_H 2 | #define PHP_XXHASH_H 3 | 4 | extern zend_module_entry xxhash_module_entry; 5 | #define phpext_xxhash_ptr &xxhash_module_entry 6 | 7 | #define PHP_XXHASH_VERSION "2.0" 8 | 9 | #ifdef PHP_WIN32 10 | # define PHP_XXHASH_API __declspec(dllexport) 11 | #elif defined(__GNUC__) && __GNUC__ >= 4 12 | # define PHP_XXHASH_API __attribute__ ((visibility("default"))) 13 | #else 14 | # define PHP_XXHASH_API 15 | #endif 16 | 17 | #ifdef ZTS 18 | #include "TSRM.h" 19 | #endif 20 | 21 | 22 | #ifdef ZTS 23 | #define XXHASH_G(v) ZEND_TSRMG(xxhash_globals_id, zend_xxhash_globals *, v) 24 | #ifdef COMPILE_DL_XXHASH 25 | ZEND_TSRMLS_CACHE_EXTERN(); 26 | #endif 27 | #else 28 | #define XXHASH_G(v) (xxhash_globals.v) 29 | #endif 30 | 31 | #endif /* PHP_XXHASH_H */ -------------------------------------------------------------------------------- /xxhash.c: -------------------------------------------------------------------------------- 1 | /* 2 | xxHash - Fast Hash algorithm 3 | Copyright (C) 2012-2015, Yann Collet 4 | 5 | BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are 9 | met: 10 | 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above 14 | copyright notice, this list of conditions and the following disclaimer 15 | in the documentation and/or other materials provided with the 16 | distribution. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | 30 | You can contact the author at : 31 | - xxHash source repository : https://github.com/Cyan4973/xxHash 32 | */ 33 | 34 | 35 | /************************************** 36 | * Tuning parameters 37 | **************************************/ 38 | /* Unaligned memory access is automatically enabled for "common" CPU, such as x86. 39 | * For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected. 40 | * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance. 41 | * You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32). 42 | */ 43 | #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) 44 | # define XXH_USE_UNALIGNED_ACCESS 1 45 | #endif 46 | 47 | /* XXH_ACCEPT_NULL_INPUT_POINTER : 48 | * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. 49 | * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. 50 | * By default, this option is disabled. To enable it, uncomment below define : 51 | */ 52 | /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ 53 | 54 | /* XXH_FORCE_NATIVE_FORMAT : 55 | * By default, xxHash library provides endian-independant Hash values, based on little-endian convention. 56 | * Results are therefore identical for little-endian and big-endian CPU. 57 | * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. 58 | * Should endian-independance be of no importance for your application, you may set the #define below to 1. 59 | * It will improve speed for Big-endian CPU. 60 | * This option has no impact on Little_Endian CPU. 61 | */ 62 | #define XXH_FORCE_NATIVE_FORMAT 0 63 | 64 | 65 | /************************************** 66 | * Compiler Specific Options 67 | ***************************************/ 68 | #ifdef _MSC_VER /* Visual Studio */ 69 | # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ 70 | # define FORCE_INLINE static __forceinline 71 | #else 72 | # if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ 73 | # ifdef __GNUC__ 74 | # define FORCE_INLINE static inline __attribute__((always_inline)) 75 | # else 76 | # define FORCE_INLINE static inline 77 | # endif 78 | # else 79 | # define FORCE_INLINE static 80 | # endif /* __STDC_VERSION__ */ 81 | #endif 82 | 83 | 84 | /************************************** 85 | * Includes & Memory related functions 86 | ***************************************/ 87 | #include "xxhash.h" 88 | /* Modify the local functions below should you wish to use some other memory routines */ 89 | /* for malloc(), free() */ 90 | #include 91 | static void* XXH_malloc(size_t s) { return malloc(s); } 92 | static void XXH_free (void* p) { free(p); } 93 | /* for memcpy() */ 94 | #include 95 | static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } 96 | 97 | 98 | /************************************** 99 | * Basic Types 100 | ***************************************/ 101 | #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ 102 | # include 103 | typedef uint8_t BYTE; 104 | typedef uint16_t U16; 105 | typedef uint32_t U32; 106 | typedef int32_t S32; 107 | typedef uint64_t U64; 108 | #else 109 | typedef unsigned char BYTE; 110 | typedef unsigned short U16; 111 | typedef unsigned int U32; 112 | typedef signed int S32; 113 | typedef unsigned long long U64; 114 | #endif 115 | 116 | static U32 XXH_read32(const void* memPtr) 117 | { 118 | U32 val32; 119 | memcpy(&val32, memPtr, 4); 120 | return val32; 121 | } 122 | 123 | static U64 XXH_read64(const void* memPtr) 124 | { 125 | U64 val64; 126 | memcpy(&val64, memPtr, 8); 127 | return val64; 128 | } 129 | 130 | 131 | 132 | /****************************************** 133 | * Compiler-specific Functions and Macros 134 | ******************************************/ 135 | #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) 136 | 137 | /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ 138 | #if defined(_MSC_VER) 139 | # define XXH_rotl32(x,r) _rotl(x,r) 140 | # define XXH_rotl64(x,r) _rotl64(x,r) 141 | #else 142 | # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) 143 | # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) 144 | #endif 145 | 146 | #if defined(_MSC_VER) /* Visual Studio */ 147 | # define XXH_swap32 _byteswap_ulong 148 | # define XXH_swap64 _byteswap_uint64 149 | #elif GCC_VERSION >= 403 150 | # define XXH_swap32 __builtin_bswap32 151 | # define XXH_swap64 __builtin_bswap64 152 | #else 153 | static U32 XXH_swap32 (U32 x) 154 | { 155 | return ((x << 24) & 0xff000000 ) | 156 | ((x << 8) & 0x00ff0000 ) | 157 | ((x >> 8) & 0x0000ff00 ) | 158 | ((x >> 24) & 0x000000ff ); 159 | } 160 | static U64 XXH_swap64 (U64 x) 161 | { 162 | return ((x << 56) & 0xff00000000000000ULL) | 163 | ((x << 40) & 0x00ff000000000000ULL) | 164 | ((x << 24) & 0x0000ff0000000000ULL) | 165 | ((x << 8) & 0x000000ff00000000ULL) | 166 | ((x >> 8) & 0x00000000ff000000ULL) | 167 | ((x >> 24) & 0x0000000000ff0000ULL) | 168 | ((x >> 40) & 0x000000000000ff00ULL) | 169 | ((x >> 56) & 0x00000000000000ffULL); 170 | } 171 | #endif 172 | 173 | 174 | /*************************************** 175 | * Architecture Macros 176 | ***************************************/ 177 | typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; 178 | #ifndef XXH_CPU_LITTLE_ENDIAN /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example using a compiler switch */ 179 | static const int one = 1; 180 | # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&one)) 181 | #endif 182 | 183 | 184 | /***************************** 185 | * Memory reads 186 | *****************************/ 187 | typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; 188 | 189 | FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) 190 | { 191 | if (align==XXH_unaligned) 192 | return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); 193 | else 194 | return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); 195 | } 196 | 197 | FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) 198 | { 199 | return XXH_readLE32_align(ptr, endian, XXH_unaligned); 200 | } 201 | 202 | FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) 203 | { 204 | if (align==XXH_unaligned) 205 | return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); 206 | else 207 | return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); 208 | } 209 | 210 | FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) 211 | { 212 | return XXH_readLE64_align(ptr, endian, XXH_unaligned); 213 | } 214 | 215 | 216 | /*************************************** 217 | * Macros 218 | ***************************************/ 219 | #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } /* use only *after* variable declarations */ 220 | 221 | 222 | /*************************************** 223 | * Constants 224 | ***************************************/ 225 | #define PRIME32_1 2654435761U 226 | #define PRIME32_2 2246822519U 227 | #define PRIME32_3 3266489917U 228 | #define PRIME32_4 668265263U 229 | #define PRIME32_5 374761393U 230 | 231 | #define PRIME64_1 11400714785074694791ULL 232 | #define PRIME64_2 14029467366897019727ULL 233 | #define PRIME64_3 1609587929392839161ULL 234 | #define PRIME64_4 9650029242287828579ULL 235 | #define PRIME64_5 2870177450012600261ULL 236 | 237 | 238 | /***************************** 239 | * Simple Hash Functions 240 | *****************************/ 241 | FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) 242 | { 243 | const BYTE* p = (const BYTE*)input; 244 | const BYTE* bEnd = p + len; 245 | U32 h32; 246 | #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) 247 | 248 | #ifdef XXH_ACCEPT_NULL_INPUT_POINTER 249 | if (p==NULL) 250 | { 251 | len=0; 252 | bEnd=p=(const BYTE*)(size_t)16; 253 | } 254 | #endif 255 | 256 | if (len>=16) 257 | { 258 | const BYTE* const limit = bEnd - 16; 259 | U32 v1 = seed + PRIME32_1 + PRIME32_2; 260 | U32 v2 = seed + PRIME32_2; 261 | U32 v3 = seed + 0; 262 | U32 v4 = seed - PRIME32_1; 263 | 264 | do 265 | { 266 | v1 += XXH_get32bits(p) * PRIME32_2; 267 | v1 = XXH_rotl32(v1, 13); 268 | v1 *= PRIME32_1; 269 | p+=4; 270 | v2 += XXH_get32bits(p) * PRIME32_2; 271 | v2 = XXH_rotl32(v2, 13); 272 | v2 *= PRIME32_1; 273 | p+=4; 274 | v3 += XXH_get32bits(p) * PRIME32_2; 275 | v3 = XXH_rotl32(v3, 13); 276 | v3 *= PRIME32_1; 277 | p+=4; 278 | v4 += XXH_get32bits(p) * PRIME32_2; 279 | v4 = XXH_rotl32(v4, 13); 280 | v4 *= PRIME32_1; 281 | p+=4; 282 | } 283 | while (p<=limit); 284 | 285 | h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); 286 | } 287 | else 288 | { 289 | h32 = seed + PRIME32_5; 290 | } 291 | 292 | h32 += (U32) len; 293 | 294 | while (p+4<=bEnd) 295 | { 296 | h32 += XXH_get32bits(p) * PRIME32_3; 297 | h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; 298 | p+=4; 299 | } 300 | 301 | while (p> 15; 309 | h32 *= PRIME32_2; 310 | h32 ^= h32 >> 13; 311 | h32 *= PRIME32_3; 312 | h32 ^= h32 >> 16; 313 | 314 | return h32; 315 | } 316 | 317 | 318 | unsigned XXH32 (const void* input, size_t len, unsigned seed) 319 | { 320 | #if 0 321 | /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ 322 | XXH32_state_t state; 323 | XXH32_reset(&state, seed); 324 | XXH32_update(&state, input, len); 325 | return XXH32_digest(&state); 326 | #else 327 | XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 328 | 329 | # if !defined(XXH_USE_UNALIGNED_ACCESS) 330 | if ((((size_t)input) & 3) == 0) /* Input is 4-bytes aligned, leverage the speed benefit */ 331 | { 332 | if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 333 | return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); 334 | else 335 | return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); 336 | } 337 | # endif 338 | 339 | if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 340 | return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); 341 | else 342 | return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); 343 | #endif 344 | } 345 | 346 | FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) 347 | { 348 | const BYTE* p = (const BYTE*)input; 349 | const BYTE* bEnd = p + len; 350 | U64 h64; 351 | #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) 352 | 353 | #ifdef XXH_ACCEPT_NULL_INPUT_POINTER 354 | if (p==NULL) 355 | { 356 | len=0; 357 | bEnd=p=(const BYTE*)(size_t)32; 358 | } 359 | #endif 360 | 361 | if (len>=32) 362 | { 363 | const BYTE* const limit = bEnd - 32; 364 | U64 v1 = seed + PRIME64_1 + PRIME64_2; 365 | U64 v2 = seed + PRIME64_2; 366 | U64 v3 = seed + 0; 367 | U64 v4 = seed - PRIME64_1; 368 | 369 | do 370 | { 371 | v1 += XXH_get64bits(p) * PRIME64_2; 372 | p+=8; 373 | v1 = XXH_rotl64(v1, 31); 374 | v1 *= PRIME64_1; 375 | v2 += XXH_get64bits(p) * PRIME64_2; 376 | p+=8; 377 | v2 = XXH_rotl64(v2, 31); 378 | v2 *= PRIME64_1; 379 | v3 += XXH_get64bits(p) * PRIME64_2; 380 | p+=8; 381 | v3 = XXH_rotl64(v3, 31); 382 | v3 *= PRIME64_1; 383 | v4 += XXH_get64bits(p) * PRIME64_2; 384 | p+=8; 385 | v4 = XXH_rotl64(v4, 31); 386 | v4 *= PRIME64_1; 387 | } 388 | while (p<=limit); 389 | 390 | h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); 391 | 392 | v1 *= PRIME64_2; 393 | v1 = XXH_rotl64(v1, 31); 394 | v1 *= PRIME64_1; 395 | h64 ^= v1; 396 | h64 = h64 * PRIME64_1 + PRIME64_4; 397 | 398 | v2 *= PRIME64_2; 399 | v2 = XXH_rotl64(v2, 31); 400 | v2 *= PRIME64_1; 401 | h64 ^= v2; 402 | h64 = h64 * PRIME64_1 + PRIME64_4; 403 | 404 | v3 *= PRIME64_2; 405 | v3 = XXH_rotl64(v3, 31); 406 | v3 *= PRIME64_1; 407 | h64 ^= v3; 408 | h64 = h64 * PRIME64_1 + PRIME64_4; 409 | 410 | v4 *= PRIME64_2; 411 | v4 = XXH_rotl64(v4, 31); 412 | v4 *= PRIME64_1; 413 | h64 ^= v4; 414 | h64 = h64 * PRIME64_1 + PRIME64_4; 415 | } 416 | else 417 | { 418 | h64 = seed + PRIME64_5; 419 | } 420 | 421 | h64 += (U64) len; 422 | 423 | while (p+8<=bEnd) 424 | { 425 | U64 k1 = XXH_get64bits(p); 426 | k1 *= PRIME64_2; 427 | k1 = XXH_rotl64(k1,31); 428 | k1 *= PRIME64_1; 429 | h64 ^= k1; 430 | h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; 431 | p+=8; 432 | } 433 | 434 | if (p+4<=bEnd) 435 | { 436 | h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; 437 | h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; 438 | p+=4; 439 | } 440 | 441 | while (p> 33; 449 | h64 *= PRIME64_2; 450 | h64 ^= h64 >> 29; 451 | h64 *= PRIME64_3; 452 | h64 ^= h64 >> 32; 453 | 454 | return h64; 455 | } 456 | 457 | 458 | unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) 459 | { 460 | #if 0 461 | /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ 462 | XXH64_state_t state; 463 | XXH64_reset(&state, seed); 464 | XXH64_update(&state, input, len); 465 | return XXH64_digest(&state); 466 | #else 467 | XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 468 | 469 | # if !defined(XXH_USE_UNALIGNED_ACCESS) 470 | if ((((size_t)input) & 7)==0) /* Input is aligned, let's leverage the speed advantage */ 471 | { 472 | if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 473 | return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); 474 | else 475 | return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); 476 | } 477 | # endif 478 | 479 | if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 480 | return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); 481 | else 482 | return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); 483 | #endif 484 | } 485 | 486 | /**************************************************** 487 | * Advanced Hash Functions 488 | ****************************************************/ 489 | 490 | /*** Allocation ***/ 491 | typedef struct 492 | { 493 | U64 total_len; 494 | U32 seed; 495 | U32 v1; 496 | U32 v2; 497 | U32 v3; 498 | U32 v4; 499 | U32 mem32[4]; /* defined as U32 for alignment */ 500 | U32 memsize; 501 | } XXH_istate32_t; 502 | 503 | typedef struct 504 | { 505 | U64 total_len; 506 | U64 seed; 507 | U64 v1; 508 | U64 v2; 509 | U64 v3; 510 | U64 v4; 511 | U64 mem64[4]; /* defined as U64 for alignment */ 512 | U32 memsize; 513 | } XXH_istate64_t; 514 | 515 | 516 | XXH32_state_t* XXH32_createState(void) 517 | { 518 | XXH_STATIC_ASSERT(sizeof(XXH32_state_t) >= sizeof(XXH_istate32_t)); /* A compilation error here means XXH32_state_t is not large enough */ 519 | return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); 520 | } 521 | XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) 522 | { 523 | XXH_free(statePtr); 524 | return XXH_OK; 525 | } 526 | 527 | XXH64_state_t* XXH64_createState(void) 528 | { 529 | XXH_STATIC_ASSERT(sizeof(XXH64_state_t) >= sizeof(XXH_istate64_t)); /* A compilation error here means XXH64_state_t is not large enough */ 530 | return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); 531 | } 532 | XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) 533 | { 534 | XXH_free(statePtr); 535 | return XXH_OK; 536 | } 537 | 538 | 539 | /*** Hash feed ***/ 540 | 541 | XXH_errorcode XXH32_reset(XXH32_state_t* state_in, U32 seed) 542 | { 543 | XXH_istate32_t* state = (XXH_istate32_t*) state_in; 544 | state->seed = seed; 545 | state->v1 = seed + PRIME32_1 + PRIME32_2; 546 | state->v2 = seed + PRIME32_2; 547 | state->v3 = seed + 0; 548 | state->v4 = seed - PRIME32_1; 549 | state->total_len = 0; 550 | state->memsize = 0; 551 | return XXH_OK; 552 | } 553 | 554 | XXH_errorcode XXH64_reset(XXH64_state_t* state_in, unsigned long long seed) 555 | { 556 | XXH_istate64_t* state = (XXH_istate64_t*) state_in; 557 | state->seed = seed; 558 | state->v1 = seed + PRIME64_1 + PRIME64_2; 559 | state->v2 = seed + PRIME64_2; 560 | state->v3 = seed + 0; 561 | state->v4 = seed - PRIME64_1; 562 | state->total_len = 0; 563 | state->memsize = 0; 564 | return XXH_OK; 565 | } 566 | 567 | 568 | FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state_in, const void* input, size_t len, XXH_endianess endian) 569 | { 570 | XXH_istate32_t* state = (XXH_istate32_t *) state_in; 571 | const BYTE* p = (const BYTE*)input; 572 | const BYTE* const bEnd = p + len; 573 | 574 | #ifdef XXH_ACCEPT_NULL_INPUT_POINTER 575 | if (input==NULL) return XXH_ERROR; 576 | #endif 577 | 578 | state->total_len += len; 579 | 580 | if (state->memsize + len < 16) /* fill in tmp buffer */ 581 | { 582 | XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); 583 | state->memsize += (U32)len; 584 | return XXH_OK; 585 | } 586 | 587 | if (state->memsize) /* some data left from previous update */ 588 | { 589 | XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); 590 | { 591 | const U32* p32 = state->mem32; 592 | state->v1 += XXH_readLE32(p32, endian) * PRIME32_2; 593 | state->v1 = XXH_rotl32(state->v1, 13); 594 | state->v1 *= PRIME32_1; 595 | p32++; 596 | state->v2 += XXH_readLE32(p32, endian) * PRIME32_2; 597 | state->v2 = XXH_rotl32(state->v2, 13); 598 | state->v2 *= PRIME32_1; 599 | p32++; 600 | state->v3 += XXH_readLE32(p32, endian) * PRIME32_2; 601 | state->v3 = XXH_rotl32(state->v3, 13); 602 | state->v3 *= PRIME32_1; 603 | p32++; 604 | state->v4 += XXH_readLE32(p32, endian) * PRIME32_2; 605 | state->v4 = XXH_rotl32(state->v4, 13); 606 | state->v4 *= PRIME32_1; 607 | p32++; 608 | } 609 | p += 16-state->memsize; 610 | state->memsize = 0; 611 | } 612 | 613 | if (p <= bEnd-16) 614 | { 615 | const BYTE* const limit = bEnd - 16; 616 | U32 v1 = state->v1; 617 | U32 v2 = state->v2; 618 | U32 v3 = state->v3; 619 | U32 v4 = state->v4; 620 | 621 | do 622 | { 623 | v1 += XXH_readLE32(p, endian) * PRIME32_2; 624 | v1 = XXH_rotl32(v1, 13); 625 | v1 *= PRIME32_1; 626 | p+=4; 627 | v2 += XXH_readLE32(p, endian) * PRIME32_2; 628 | v2 = XXH_rotl32(v2, 13); 629 | v2 *= PRIME32_1; 630 | p+=4; 631 | v3 += XXH_readLE32(p, endian) * PRIME32_2; 632 | v3 = XXH_rotl32(v3, 13); 633 | v3 *= PRIME32_1; 634 | p+=4; 635 | v4 += XXH_readLE32(p, endian) * PRIME32_2; 636 | v4 = XXH_rotl32(v4, 13); 637 | v4 *= PRIME32_1; 638 | p+=4; 639 | } 640 | while (p<=limit); 641 | 642 | state->v1 = v1; 643 | state->v2 = v2; 644 | state->v3 = v3; 645 | state->v4 = v4; 646 | } 647 | 648 | if (p < bEnd) 649 | { 650 | XXH_memcpy(state->mem32, p, bEnd-p); 651 | state->memsize = (int)(bEnd-p); 652 | } 653 | 654 | return XXH_OK; 655 | } 656 | 657 | XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) 658 | { 659 | XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 660 | 661 | if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 662 | return XXH32_update_endian(state_in, input, len, XXH_littleEndian); 663 | else 664 | return XXH32_update_endian(state_in, input, len, XXH_bigEndian); 665 | } 666 | 667 | 668 | 669 | FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state_in, XXH_endianess endian) 670 | { 671 | const XXH_istate32_t* state = (const XXH_istate32_t*) state_in; 672 | const BYTE * p = (const BYTE*)state->mem32; 673 | const BYTE* bEnd = (const BYTE*)(state->mem32) + state->memsize; 674 | U32 h32; 675 | 676 | if (state->total_len >= 16) 677 | { 678 | h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); 679 | } 680 | else 681 | { 682 | h32 = state->seed + PRIME32_5; 683 | } 684 | 685 | h32 += (U32) state->total_len; 686 | 687 | while (p+4<=bEnd) 688 | { 689 | h32 += XXH_readLE32(p, endian) * PRIME32_3; 690 | h32 = XXH_rotl32(h32, 17) * PRIME32_4; 691 | p+=4; 692 | } 693 | 694 | while (p> 15; 702 | h32 *= PRIME32_2; 703 | h32 ^= h32 >> 13; 704 | h32 *= PRIME32_3; 705 | h32 ^= h32 >> 16; 706 | 707 | return h32; 708 | } 709 | 710 | 711 | U32 XXH32_digest (const XXH32_state_t* state_in) 712 | { 713 | XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 714 | 715 | if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 716 | return XXH32_digest_endian(state_in, XXH_littleEndian); 717 | else 718 | return XXH32_digest_endian(state_in, XXH_bigEndian); 719 | } 720 | 721 | 722 | FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state_in, const void* input, size_t len, XXH_endianess endian) 723 | { 724 | XXH_istate64_t * state = (XXH_istate64_t *) state_in; 725 | const BYTE* p = (const BYTE*)input; 726 | const BYTE* const bEnd = p + len; 727 | 728 | #ifdef XXH_ACCEPT_NULL_INPUT_POINTER 729 | if (input==NULL) return XXH_ERROR; 730 | #endif 731 | 732 | state->total_len += len; 733 | 734 | if (state->memsize + len < 32) /* fill in tmp buffer */ 735 | { 736 | XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); 737 | state->memsize += (U32)len; 738 | return XXH_OK; 739 | } 740 | 741 | if (state->memsize) /* some data left from previous update */ 742 | { 743 | XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); 744 | { 745 | const U64* p64 = state->mem64; 746 | state->v1 += XXH_readLE64(p64, endian) * PRIME64_2; 747 | state->v1 = XXH_rotl64(state->v1, 31); 748 | state->v1 *= PRIME64_1; 749 | p64++; 750 | state->v2 += XXH_readLE64(p64, endian) * PRIME64_2; 751 | state->v2 = XXH_rotl64(state->v2, 31); 752 | state->v2 *= PRIME64_1; 753 | p64++; 754 | state->v3 += XXH_readLE64(p64, endian) * PRIME64_2; 755 | state->v3 = XXH_rotl64(state->v3, 31); 756 | state->v3 *= PRIME64_1; 757 | p64++; 758 | state->v4 += XXH_readLE64(p64, endian) * PRIME64_2; 759 | state->v4 = XXH_rotl64(state->v4, 31); 760 | state->v4 *= PRIME64_1; 761 | p64++; 762 | } 763 | p += 32-state->memsize; 764 | state->memsize = 0; 765 | } 766 | 767 | if (p+32 <= bEnd) 768 | { 769 | const BYTE* const limit = bEnd - 32; 770 | U64 v1 = state->v1; 771 | U64 v2 = state->v2; 772 | U64 v3 = state->v3; 773 | U64 v4 = state->v4; 774 | 775 | do 776 | { 777 | v1 += XXH_readLE64(p, endian) * PRIME64_2; 778 | v1 = XXH_rotl64(v1, 31); 779 | v1 *= PRIME64_1; 780 | p+=8; 781 | v2 += XXH_readLE64(p, endian) * PRIME64_2; 782 | v2 = XXH_rotl64(v2, 31); 783 | v2 *= PRIME64_1; 784 | p+=8; 785 | v3 += XXH_readLE64(p, endian) * PRIME64_2; 786 | v3 = XXH_rotl64(v3, 31); 787 | v3 *= PRIME64_1; 788 | p+=8; 789 | v4 += XXH_readLE64(p, endian) * PRIME64_2; 790 | v4 = XXH_rotl64(v4, 31); 791 | v4 *= PRIME64_1; 792 | p+=8; 793 | } 794 | while (p<=limit); 795 | 796 | state->v1 = v1; 797 | state->v2 = v2; 798 | state->v3 = v3; 799 | state->v4 = v4; 800 | } 801 | 802 | if (p < bEnd) 803 | { 804 | XXH_memcpy(state->mem64, p, bEnd-p); 805 | state->memsize = (int)(bEnd-p); 806 | } 807 | 808 | return XXH_OK; 809 | } 810 | 811 | XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) 812 | { 813 | XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 814 | 815 | if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 816 | return XXH64_update_endian(state_in, input, len, XXH_littleEndian); 817 | else 818 | return XXH64_update_endian(state_in, input, len, XXH_bigEndian); 819 | } 820 | 821 | 822 | 823 | FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state_in, XXH_endianess endian) 824 | { 825 | const XXH_istate64_t * state = (const XXH_istate64_t *) state_in; 826 | const BYTE * p = (const BYTE*)state->mem64; 827 | const BYTE* bEnd = (const BYTE*)state->mem64 + state->memsize; 828 | U64 h64; 829 | 830 | if (state->total_len >= 32) 831 | { 832 | U64 v1 = state->v1; 833 | U64 v2 = state->v2; 834 | U64 v3 = state->v3; 835 | U64 v4 = state->v4; 836 | 837 | h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); 838 | 839 | v1 *= PRIME64_2; 840 | v1 = XXH_rotl64(v1, 31); 841 | v1 *= PRIME64_1; 842 | h64 ^= v1; 843 | h64 = h64*PRIME64_1 + PRIME64_4; 844 | 845 | v2 *= PRIME64_2; 846 | v2 = XXH_rotl64(v2, 31); 847 | v2 *= PRIME64_1; 848 | h64 ^= v2; 849 | h64 = h64*PRIME64_1 + PRIME64_4; 850 | 851 | v3 *= PRIME64_2; 852 | v3 = XXH_rotl64(v3, 31); 853 | v3 *= PRIME64_1; 854 | h64 ^= v3; 855 | h64 = h64*PRIME64_1 + PRIME64_4; 856 | 857 | v4 *= PRIME64_2; 858 | v4 = XXH_rotl64(v4, 31); 859 | v4 *= PRIME64_1; 860 | h64 ^= v4; 861 | h64 = h64*PRIME64_1 + PRIME64_4; 862 | } 863 | else 864 | { 865 | h64 = state->seed + PRIME64_5; 866 | } 867 | 868 | h64 += (U64) state->total_len; 869 | 870 | while (p+8<=bEnd) 871 | { 872 | U64 k1 = XXH_readLE64(p, endian); 873 | k1 *= PRIME64_2; 874 | k1 = XXH_rotl64(k1,31); 875 | k1 *= PRIME64_1; 876 | h64 ^= k1; 877 | h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; 878 | p+=8; 879 | } 880 | 881 | if (p+4<=bEnd) 882 | { 883 | h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; 884 | h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; 885 | p+=4; 886 | } 887 | 888 | while (p> 33; 896 | h64 *= PRIME64_2; 897 | h64 ^= h64 >> 29; 898 | h64 *= PRIME64_3; 899 | h64 ^= h64 >> 32; 900 | 901 | return h64; 902 | } 903 | 904 | 905 | unsigned long long XXH64_digest (const XXH64_state_t* state_in) 906 | { 907 | XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 908 | 909 | if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 910 | return XXH64_digest_endian(state_in, XXH_littleEndian); 911 | else 912 | return XXH64_digest_endian(state_in, XXH_bigEndian); 913 | } 914 | 915 | 916 | -------------------------------------------------------------------------------- /xxhash.h: -------------------------------------------------------------------------------- 1 | /* 2 | xxHash - Extremely Fast Hash algorithm 3 | Header File 4 | Copyright (C) 2012-2015, Yann Collet. 5 | 6 | BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are 10 | met: 11 | 12 | * Redistributions of source code must retain the above copyright 13 | notice, this list of conditions and the following disclaimer. 14 | * Redistributions in binary form must reproduce the above 15 | copyright notice, this list of conditions and the following disclaimer 16 | in the documentation and/or other materials provided with the 17 | distribution. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | You can contact the author at : 32 | - xxHash source repository : https://github.com/Cyan4973/xxHash 33 | */ 34 | 35 | /* Notice extracted from xxHash homepage : 36 | 37 | xxHash is an extremely fast Hash algorithm, running at RAM speed limits. 38 | It also successfully passes all tests from the SMHasher suite. 39 | 40 | Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) 41 | 42 | Name Speed Q.Score Author 43 | xxHash 5.4 GB/s 10 44 | CrapWow 3.2 GB/s 2 Andrew 45 | MumurHash 3a 2.7 GB/s 10 Austin Appleby 46 | SpookyHash 2.0 GB/s 10 Bob Jenkins 47 | SBox 1.4 GB/s 9 Bret Mulvey 48 | Lookup3 1.2 GB/s 9 Bob Jenkins 49 | SuperFastHash 1.2 GB/s 1 Paul Hsieh 50 | CityHash64 1.05 GB/s 10 Pike & Alakuijala 51 | FNV 0.55 GB/s 5 Fowler, Noll, Vo 52 | CRC32 0.43 GB/s 9 53 | MD5-32 0.33 GB/s 10 Ronald L. Rivest 54 | SHA1-32 0.28 GB/s 10 55 | 56 | Q.Score is a measure of quality of the hash function. 57 | It depends on successfully passing SMHasher test set. 58 | 10 is a perfect score. 59 | 60 | A 64-bits version, named XXH64, is available since r35. 61 | It offers much better speed, but for 64-bits applications only. 62 | Name Speed on 64 bits Speed on 32 bits 63 | XXH64 13.8 GB/s 1.9 GB/s 64 | XXH32 6.8 GB/s 6.0 GB/s 65 | */ 66 | 67 | #pragma once 68 | 69 | #if defined (__cplusplus) 70 | extern "C" { 71 | #endif 72 | 73 | 74 | /***************************** 75 | * Definitions 76 | *****************************/ 77 | #include /* size_t */ 78 | typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; 79 | 80 | 81 | /***************************** 82 | * Namespace Emulation 83 | *****************************/ 84 | /* Motivations : 85 | 86 | If you need to include xxHash into your library, 87 | but wish to avoid xxHash symbols to be present on your library interface 88 | in an effort to avoid potential name collision if another library also includes xxHash, 89 | 90 | you can use XXH_NAMESPACE, which will automatically prefix any symbol from xxHash 91 | with the value of XXH_NAMESPACE (so avoid to keep it NULL, and avoid numeric values). 92 | 93 | Note that no change is required within the calling program : 94 | it can still call xxHash functions using their regular name. 95 | They will be automatically translated by this header. 96 | */ 97 | #ifdef XXH_NAMESPACE 98 | # define XXH_CAT(A,B) A##B 99 | # define XXH_NAME2(A,B) XXH_CAT(A,B) 100 | # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) 101 | # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) 102 | # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) 103 | # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) 104 | # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) 105 | # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) 106 | # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) 107 | # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) 108 | # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) 109 | # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) 110 | # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) 111 | # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) 112 | #endif 113 | 114 | 115 | /***************************** 116 | * Simple Hash Functions 117 | *****************************/ 118 | 119 | unsigned int XXH32 (const void* input, size_t length, unsigned seed); 120 | unsigned long long XXH64 (const void* input, size_t length, unsigned long long seed); 121 | 122 | /* 123 | XXH32() : 124 | Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". 125 | The memory between input & input+length must be valid (allocated and read-accessible). 126 | "seed" can be used to alter the result predictably. 127 | This function successfully passes all SMHasher tests. 128 | Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s 129 | XXH64() : 130 | Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". 131 | Faster on 64-bits systems. Slower on 32-bits systems. 132 | */ 133 | 134 | 135 | 136 | /***************************** 137 | * Advanced Hash Functions 138 | *****************************/ 139 | typedef struct { long long ll[ 6]; } XXH32_state_t; 140 | typedef struct { long long ll[11]; } XXH64_state_t; 141 | 142 | /* 143 | These structures allow static allocation of XXH states. 144 | States must then be initialized using XXHnn_reset() before first use. 145 | 146 | If you prefer dynamic allocation, please refer to functions below. 147 | */ 148 | 149 | XXH32_state_t* XXH32_createState(void); 150 | XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); 151 | 152 | XXH64_state_t* XXH64_createState(void); 153 | XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); 154 | 155 | /* 156 | These functions create and release memory for XXH state. 157 | States must then be initialized using XXHnn_reset() before first use. 158 | */ 159 | 160 | 161 | XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned seed); 162 | XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); 163 | unsigned int XXH32_digest (const XXH32_state_t* statePtr); 164 | 165 | XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); 166 | XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); 167 | unsigned long long XXH64_digest (const XXH64_state_t* statePtr); 168 | 169 | /* 170 | These functions calculate the xxHash of an input provided in multiple smaller packets, 171 | as opposed to an input provided as a single block. 172 | 173 | XXH state space must first be allocated, using either static or dynamic method provided above. 174 | 175 | Start a new hash by initializing state with a seed, using XXHnn_reset(). 176 | 177 | Then, feed the hash state by calling XXHnn_update() as many times as necessary. 178 | Obviously, input must be valid, meaning allocated and read accessible. 179 | The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. 180 | 181 | Finally, you can produce a hash anytime, by using XXHnn_digest(). 182 | This function returns the final nn-bits hash. 183 | You can nonetheless continue feeding the hash state with more input, 184 | and therefore get some new hashes, by calling again XXHnn_digest(). 185 | 186 | When you are done, don't forget to free XXH state space, using typically XXHnn_freeState(). 187 | */ 188 | 189 | 190 | #if defined (__cplusplus) 191 | } 192 | #endif 193 | --------------------------------------------------------------------------------