├── .gitattributes
├── .gitignore
├── .travis.yml
├── LICENSE
├── Makefile
├── README.md
├── appveyor.yml
├── cmake_unofficial
├── .gitignore
├── CMakeLists.txt
└── README.md
├── doc
└── xxhash_spec.md
├── xxh3.h
├── xxhash.c
└── xxhash.h
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Set the default behavior
2 | * text eol=lf
3 |
4 | # Explicitly declare source files
5 | *.c text eol=lf
6 | *.h text eol=lf
7 |
8 | # Denote files that should not be modified.
9 | *.odt binary
10 |
11 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # objects
2 | *.o
3 |
4 | # libraries
5 | libxxhash.*
6 |
7 | # Executables
8 | xxh32sum
9 | xxh64sum
10 | xxhsum
11 | xxhsum.exe
12 | xxhsum32
13 | xxhsum_privateXXH
14 | xxhsum_inlinedXXH
15 | xxhsum_inlinedXXH.exe
16 |
17 | # Mac OS-X artefacts
18 | *.dSYM
19 | .DS_Store
20 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: c
2 |
3 | matrix:
4 | fast_finish: true
5 | include:
6 |
7 | - name: General linux tests (Xenial)
8 | dist: xenial
9 | before_install:
10 | - sudo apt-get update -qq
11 | - sudo apt-get install -qq clang
12 | - sudo apt-get install -qq g++-multilib
13 | - sudo apt-get install -qq gcc-multilib
14 | - sudo apt-get install -qq cppcheck
15 | script:
16 | - make -B test-all
17 |
18 | - name: Check results consistency on x64
19 | script:
20 | - CPPFLAGS=-DXXH_VECTOR=0 make check # Scalar code path
21 | - make clean
22 | - CPPFLAGS=-DXXH_VECTOR=1 make check # SSE2 code path
23 | - make clean
24 | - CPPFLAGS="-mavx2 -DXXH_VECTOR=2" make check # AVX2 code path
25 |
26 | - name: ARM + aarch64 compilation and consistency checks
27 | dist: xenial
28 | install:
29 | - sudo apt-get install -qq
30 | qemu-system-arm
31 | qemu-user-static
32 | gcc-arm-linux-gnueabi
33 | libc6-dev-armel-cross
34 | gcc-aarch64-linux-gnu
35 | libc6-dev-arm64-cross
36 | script:
37 | # arm (32-bit)
38 | - CC=arm-linux-gnueabi-gcc CPPFLAGS=-DXXH_VECTOR=0 LDFLAGS=-static RUN_ENV=qemu-arm-static make check # Scalar code path
39 | - make clean
40 | # Note : the following test (ARM 32-bit + NEON) is disabled for the time being.
41 | # I haven't yet found a way to make it link on Travis CI using gcc cross-compilation.
42 | # NEON code path is fortunately validated through `aarch64` below.
43 | # - CC=arm-linux-gnueabi-gcc CPPFLAGS=-DXXH_VECTOR=3 CFLAGS="-O3 -march=armv7-a -mfloat-abi=hard -mfpu=neon" LDFLAGS=-static RUN_ENV=qemu-arm-static make check # NEON code path
44 | - make clean
45 | # aarch64
46 | - CC=aarch64-linux-gnu-gcc CPPFLAGS=-DXXH_VECTOR=0 LDFLAGS=-static RUN_ENV=qemu-aarch64-static make check # Scalar code path
47 | - make clean
48 | - CC=aarch64-linux-gnu-gcc CPPFLAGS=-DXXH_VECTOR=3 LDFLAGS=-static RUN_ENV=qemu-aarch64-static make check # NEON code path
49 | - make clean
50 |
51 | - name: PowerPC + PPC64 compilation and consistency checks
52 | install:
53 | - sudo apt-get install -qq qemu-system-ppc qemu-user-static gcc-powerpc-linux-gnu
54 | script:
55 | - CC=powerpc-linux-gnu-gcc RUN_ENV=qemu-ppc-static CPPFLAGS=-m32 LDFLAGS=-static make check # Only scalar code path available
56 | - make clean
57 | - CC=powerpc-linux-gnu-gcc RUN_ENV=qemu-ppc64-static CFLAGS="-O3 -m64" LDFLAGS="-static -m64" make check # Only scalar code path available
58 | - make clean
59 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | xxHash Library
2 | Copyright (c) 2012-2014, Yann Collet
3 | All rights reserved.
4 |
5 | Redistribution and use in source and binary forms, with or without modification,
6 | are permitted provided that the following conditions are met:
7 |
8 | * Redistributions of source code must retain the above copyright notice, this
9 | list of conditions and the following disclaimer.
10 |
11 | * Redistributions in binary form must reproduce the above copyright notice, this
12 | list of conditions and the following disclaimer in the documentation and/or
13 | other materials provided with the distribution.
14 |
15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
19 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
22 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # ################################################################
2 | # xxHash Makefile
3 | # Copyright (C) Yann Collet 2012-2015
4 | #
5 | # GPL v2 License
6 | #
7 | # This program is free software; you can redistribute it and/or modify
8 | # it under the terms of the GNU General Public License as published by
9 | # the Free Software Foundation; either version 2 of the License, or
10 | # (at your option) any later version.
11 | #
12 | # This program is distributed in the hope that it will be useful,
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | # GNU General Public License for more details.
16 | #
17 | # You should have received a copy of the GNU General Public License along
18 | # with this program; if not, write to the Free Software Foundation, Inc.,
19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | #
21 | # You can contact the author at :
22 | # - xxHash source repository : http://code.google.com/p/xxhash/
23 | # ################################################################
24 | # xxhsum : provides 32/64 bits hash of one or multiple files, or stdin
25 | # ################################################################
26 |
27 | # Version numbers
28 | LIBVER_MAJOR_SCRIPT:=`sed -n '/define XXH_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < xxhash.h`
29 | LIBVER_MINOR_SCRIPT:=`sed -n '/define XXH_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < xxhash.h`
30 | LIBVER_PATCH_SCRIPT:=`sed -n '/define XXH_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < xxhash.h`
31 | LIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT))
32 | LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT))
33 | LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT))
34 | LIBVER := $(LIBVER_MAJOR).$(LIBVER_MINOR).$(LIBVER_PATCH)
35 |
36 | CFLAGS ?= -O3
37 | DEBUGFLAGS+=-Wall -Wextra -Wconversion -Wcast-qual -Wcast-align -Wshadow \
38 | -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
39 | -Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security \
40 | -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
41 | -Wredundant-decls -Wstrict-overflow=5
42 | CFLAGS += $(DEBUGFLAGS)
43 | FLAGS = $(CFLAGS) $(CPPFLAGS) $(MOREFLAGS)
44 | XXHSUM_VERSION = $(LIBVER)
45 | MD2ROFF = ronn
46 | MD2ROFF_FLAGS = --roff --warnings --manual="User Commands" --organization="xxhsum $(XXHSUM_VERSION)"
47 |
48 | # Define *.exe as extension for Windows systems
49 | ifneq (,$(filter Windows%,$(OS)))
50 | EXT =.exe
51 | else
52 | EXT =
53 | endif
54 |
55 | # OS X linker doesn't support -soname, and use different extension
56 | # see : https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryDesignGuidelines.html
57 | ifeq ($(shell uname), Darwin)
58 | SHARED_EXT = dylib
59 | SHARED_EXT_MAJOR = $(LIBVER_MAJOR).$(SHARED_EXT)
60 | SHARED_EXT_VER = $(LIBVER).$(SHARED_EXT)
61 | SONAME_FLAGS = -install_name $(LIBDIR)/libxxhash.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER)
62 | else
63 | SONAME_FLAGS = -Wl,-soname=libxxhash.$(SHARED_EXT).$(LIBVER_MAJOR)
64 | SHARED_EXT = so
65 | SHARED_EXT_MAJOR = $(SHARED_EXT).$(LIBVER_MAJOR)
66 | SHARED_EXT_VER = $(SHARED_EXT).$(LIBVER)
67 | endif
68 |
69 | LIBXXH = libxxhash.$(SHARED_EXT_VER)
70 |
71 |
72 | .PHONY: default
73 | default: DEBUGFLAGS=
74 | default: lib xxhsum_and_links
75 |
76 | .PHONY: all
77 | all: lib xxhsum xxhsum_inlinedXXH
78 |
79 | xxhsum : xxhash.o xxhsum.o
80 |
81 | xxhsum32: CFLAGS += -m32
82 | xxhsum32: xxhash.c xxhsum.c
83 | $(CC) $(FLAGS) $^ $(LDFLAGS) -o $@$(EXT)
84 |
85 | xxhash.o: xxhash.h xxh3.h
86 |
87 | xxhsum.o: xxhash.h
88 |
89 | .PHONY: xxhsum_and_links
90 | xxhsum_and_links: xxhsum xxh32sum xxh64sum
91 |
92 | xxh32sum xxh64sum: xxhsum
93 | ln -sf $^ $@
94 |
95 | xxhsum_inlinedXXH: CPPFLAGS += -DXXH_INLINE_ALL
96 | xxhsum_inlinedXXH: xxhsum.c
97 | $(CC) $(FLAGS) $^ -o $@$(EXT)
98 |
99 |
100 | # library
101 |
102 | libxxhash.a: ARFLAGS = rcs
103 | libxxhash.a: xxhash.o
104 | $(AR) $(ARFLAGS) $@ $^
105 |
106 | $(LIBXXH): LDFLAGS += -shared
107 | ifeq (,$(filter Windows%,$(OS)))
108 | $(LIBXXH): CFLAGS += -fPIC
109 | endif
110 | $(LIBXXH): xxhash.c
111 | $(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@
112 | ln -sf $@ libxxhash.$(SHARED_EXT_MAJOR)
113 | ln -sf $@ libxxhash.$(SHARED_EXT)
114 |
115 | libxxhash : $(LIBXXH)
116 |
117 | .PHONY: lib
118 | lib: libxxhash.a libxxhash
119 |
120 |
121 | # =================================================
122 | # tests
123 | # =================================================
124 |
125 | # make check can be run with cross-compiled binaries on emulated environments (qemu user mode)
126 | # by setting $(RUN_ENV) to the target emulation environment
127 | .PHONY: check
128 | check: xxhsum
129 | # stdin
130 | $(RUN_ENV) ./xxhsum < xxhash.c
131 | # multiple files
132 | $(RUN_ENV) ./xxhsum xxhash.* xxhsum.*
133 | # internal bench
134 | $(RUN_ENV) ./xxhsum -bi1
135 | # file bench
136 | $(RUN_ENV) ./xxhsum -bi1 xxhash.c
137 |
138 | .PHONY: test-mem
139 | test-mem: xxhsum
140 | # memory tests
141 | valgrind --leak-check=yes --error-exitcode=1 ./xxhsum -bi1 xxhash.c
142 | valgrind --leak-check=yes --error-exitcode=1 ./xxhsum -H0 xxhash.c
143 | valgrind --leak-check=yes --error-exitcode=1 ./xxhsum -H1 xxhash.c
144 |
145 | .PHONY: test32
146 | test32: clean xxhsum32
147 | @echo ---- test 32-bit ----
148 | ./xxhsum32 -bi1 xxhash.c
149 |
150 | test-xxhsum-c: xxhsum
151 | # xxhsum to/from pipe
152 | ./xxhsum lib* | ./xxhsum -c -
153 | ./xxhsum -H0 lib* | ./xxhsum -c -
154 | # xxhsum to/from file, shell redirection
155 | ./xxhsum lib* > .test.xxh64
156 | ./xxhsum -H0 lib* > .test.xxh32
157 | ./xxhsum -c .test.xxh64
158 | ./xxhsum -c .test.xxh32
159 | ./xxhsum -c < .test.xxh64
160 | ./xxhsum -c < .test.xxh32
161 | # xxhsum -c warns improperly format lines.
162 | cat .test.xxh64 .test.xxh32 | ./xxhsum -c -
163 | cat .test.xxh32 .test.xxh64 | ./xxhsum -c -
164 | # Expects "FAILED"
165 | echo "0000000000000000 LICENSE" | ./xxhsum -c -; test $$? -eq 1
166 | echo "00000000 LICENSE" | ./xxhsum -c -; test $$? -eq 1
167 | # Expects "FAILED open or read"
168 | echo "0000000000000000 test-expects-file-not-found" | ./xxhsum -c -; test $$? -eq 1
169 | echo "00000000 test-expects-file-not-found" | ./xxhsum -c -; test $$? -eq 1
170 | @$(RM) -f .test.xxh32 .test.xxh64
171 |
172 | armtest: clean
173 | @echo ---- test ARM compilation ----
174 | CC=arm-linux-gnueabi-gcc MOREFLAGS="-Werror -static" $(MAKE) xxhsum
175 |
176 | clangtest: clean
177 | @echo ---- test clang compilation ----
178 | CC=clang MOREFLAGS="-Werror -Wconversion -Wno-sign-conversion" $(MAKE) all
179 |
180 | cxxtest: clean
181 | @echo ---- test C++ compilation ----
182 | CC="$(CXX) -Wno-deprecated" $(MAKE) all CFLAGS="-O3 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror -fPIC"
183 |
184 | .PHONY: c90test
185 | c90test: CPPFLAGS += -DXXH_NO_LONG_LONG
186 | c90test: CFLAGS += -std=c90 -Werror -pedantic
187 | c90test: xxhash.c
188 | @echo ---- test strict C90 compilation [xxh32 only] ----
189 | $(RM) xxhash.o
190 | $(CC) $(FLAGS) $^ $(LDFLAGS) -c
191 | $(RM) xxhash.o
192 |
193 | usan: CC=clang
194 | usan: clean
195 | @echo ---- check undefined behavior - sanitize ----
196 | $(MAKE) clean test CC=$(CC) MOREFLAGS="-g -fsanitize=undefined -fno-sanitize-recover=all"
197 |
198 | .PHONY: staticAnalyze
199 | staticAnalyze: clean
200 | @echo ---- static analyzer - scan-build ----
201 | CFLAGS="-g -Werror" scan-build --status-bugs -v $(MAKE) all
202 |
203 | .PHONY: cppcheck
204 | cppcheck:
205 | @echo ---- static analyzer - cppcheck ----
206 | cppcheck . --force --enable=warning,portability,performance,style --error-exitcode=1 > /dev/null
207 |
208 | .PHONY: namespaceTest
209 | namespaceTest:
210 | $(CC) -c xxhash.c
211 | $(CC) -DXXH_NAMESPACE=TEST_ -c xxhash.c -o xxhash2.o
212 | $(CC) xxhash.o xxhash2.o xxhsum.c -o xxhsum2 # will fail if one namespace missing (symbol collision)
213 | $(RM) *.o xxhsum2 # clean
214 |
215 | xxhsum.1: xxhsum.1.md
216 | cat $^ | $(MD2ROFF) $(MD2ROFF_FLAGS) | sed -n '/^\.\\\".*/!p' > $@
217 |
218 | .PHONY: man
219 | man: xxhsum.1
220 |
221 | clean-man:
222 | $(RM) xxhsum.1
223 |
224 | preview-man: clean-man man
225 | man ./xxhsum.1
226 |
227 | test: all namespaceTest check test-xxhsum-c c90test
228 |
229 | test-all: CFLAGS += -Werror
230 | test-all: test test32 clangtest cxxtest usan listL120 trailingWhitespace staticAnalyze
231 |
232 | .PHONY: listL120
233 | listL120: # extract lines >= 120 characters in *.{c,h}, by Takayuki Matsuoka (note : $$, for Makefile compatibility)
234 | find . -type f -name '*.c' -o -name '*.h' | while read -r filename; do awk 'length > 120 {print FILENAME "(" FNR "): " $$0}' $$filename; done
235 |
236 | .PHONY: trailingWhitespace
237 | trailingWhitespace:
238 | ! grep -E "`printf '[ \\t]$$'`" *.1 *.c *.h LICENSE Makefile cmake_unofficial/CMakeLists.txt
239 |
240 | .PHONY: clean
241 | clean:
242 | @$(RM) -r *.dSYM # Mac OS-X specific
243 | @$(RM) core *.o libxxhash.*
244 | @$(RM) xxhsum$(EXT) xxhsum32$(EXT) xxhsum_inlinedXXH$(EXT) xxh32sum xxh64sum
245 | @echo cleaning completed
246 |
247 |
248 | #-----------------------------------------------------------------------------
249 | # make install is validated only for the following targets
250 | #-----------------------------------------------------------------------------
251 | ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS))
252 |
253 | .PHONY: list
254 | list:
255 | @$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs
256 |
257 | DESTDIR ?=
258 | # directory variables : GNU conventions prefer lowercase
259 | # see https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html
260 | # support both lower and uppercase (BSD), use uppercase in script
261 | prefix ?= /usr/local
262 | PREFIX ?= $(prefix)
263 | exec_prefix ?= $(PREFIX)
264 | libdir ?= $(exec_prefix)/lib
265 | LIBDIR ?= $(libdir)
266 | includedir ?= $(PREFIX)/include
267 | INCLUDEDIR ?= $(includedir)
268 | bindir ?= $(exec_prefix)/bin
269 | BINDIR ?= $(bindir)
270 | datarootdir ?= $(PREFIX)/share
271 | mandir ?= $(datarootdir)/man
272 | man1dir ?= $(mandir)/man1
273 |
274 | ifneq (,$(filter $(shell uname),OpenBSD FreeBSD NetBSD DragonFly SunOS))
275 | MANDIR ?= $(PREFIX)/man/man1
276 | else
277 | MANDIR ?= $(man1dir)
278 | endif
279 |
280 | ifneq (,$(filter $(shell uname),SunOS))
281 | INSTALL ?= ginstall
282 | else
283 | INSTALL ?= install
284 | endif
285 |
286 | INSTALL_PROGRAM ?= $(INSTALL)
287 | INSTALL_DATA ?= $(INSTALL) -m 644
288 |
289 |
290 | .PHONY: install
291 | install: lib xxhsum
292 | @echo Installing libxxhash
293 | @$(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)
294 | @$(INSTALL_DATA) libxxhash.a $(DESTDIR)$(LIBDIR)
295 | @$(INSTALL_PROGRAM) $(LIBXXH) $(DESTDIR)$(LIBDIR)
296 | @ln -sf $(LIBXXH) $(DESTDIR)$(LIBDIR)/libxxhash.$(SHARED_EXT_MAJOR)
297 | @ln -sf $(LIBXXH) $(DESTDIR)$(LIBDIR)/libxxhash.$(SHARED_EXT)
298 | @$(INSTALL) -d -m 755 $(DESTDIR)$(INCLUDEDIR) # includes
299 | @$(INSTALL_DATA) xxhash.h $(DESTDIR)$(INCLUDEDIR)
300 | @echo Installing xxhsum
301 | @$(INSTALL) -d -m 755 $(DESTDIR)$(BINDIR)/ $(DESTDIR)$(MANDIR)/
302 | @$(INSTALL_PROGRAM) xxhsum $(DESTDIR)$(BINDIR)/xxhsum
303 | @ln -sf xxhsum $(DESTDIR)$(BINDIR)/xxh32sum
304 | @ln -sf xxhsum $(DESTDIR)$(BINDIR)/xxh64sum
305 | @echo Installing man pages
306 | @$(INSTALL_DATA) xxhsum.1 $(DESTDIR)$(MANDIR)/xxhsum.1
307 | @ln -sf xxhsum.1 $(DESTDIR)$(MANDIR)/xxh32sum.1
308 | @ln -sf xxhsum.1 $(DESTDIR)$(MANDIR)/xxh64sum.1
309 | @echo xxhash installation completed
310 |
311 | .PHONY: uninstall
312 | uninstall:
313 | @$(RM) $(DESTDIR)$(LIBDIR)/libxxhash.a
314 | @$(RM) $(DESTDIR)$(LIBDIR)/libxxhash.$(SHARED_EXT)
315 | @$(RM) $(DESTDIR)$(LIBDIR)/libxxhash.$(SHARED_EXT_MAJOR)
316 | @$(RM) $(DESTDIR)$(LIBDIR)/$(LIBXXH)
317 | @$(RM) $(DESTDIR)$(INCLUDEDIR)/xxhash.h
318 | @$(RM) $(DESTDIR)$(BINDIR)/xxh32sum
319 | @$(RM) $(DESTDIR)$(BINDIR)/xxh64sum
320 | @$(RM) $(DESTDIR)$(BINDIR)/xxhsum
321 | @$(RM) $(DESTDIR)$(MANDIR)/xxh32sum.1
322 | @$(RM) $(DESTDIR)$(MANDIR)/xxh64sum.1
323 | @$(RM) $(DESTDIR)$(MANDIR)/xxhsum.1
324 | @echo xxhsum successfully uninstalled
325 |
326 | endif
327 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | xxHash - Extremely fast hash algorithm
2 | ======================================
3 |
4 | xxHash is an Extremely fast Hash algorithm, running at RAM speed limits.
5 | It successfully completes the [SMHasher](http://code.google.com/p/smhasher/wiki/SMHasher) test suite
6 | which evaluates collision, dispersion and randomness qualities of hash functions.
7 | Code is highly portable, and hashes are identical on all platforms (little / big endian).
8 |
9 | |Branch |Status |
10 | |------------|---------|
11 | |master | [](https://travis-ci.org/Cyan4973/xxHash?branch=master) |
12 | |dev | [](https://travis-ci.org/Cyan4973/xxHash?branch=dev) |
13 |
14 |
15 |
16 | Benchmarks
17 | -------------------------
18 |
19 | The benchmark uses SMHasher speed test, compiled with Visual 2010 on a Windows Seven 32-bit box.
20 | The reference system uses a Core 2 Duo @3GHz
21 |
22 |
23 | | Name | Speed | Quality | Author |
24 | |---------------|----------|:-------:|------------------|
25 | | [xxHash] | 5.4 GB/s | 10 | Y.C. |
26 | | MurmurHash 3a | 2.7 GB/s | 10 | Austin Appleby |
27 | | SBox | 1.4 GB/s | 9 | Bret Mulvey |
28 | | Lookup3 | 1.2 GB/s | 9 | Bob Jenkins |
29 | | CityHash64 | 1.05 GB/s| 10 | Pike & Alakuijala|
30 | | FNV | 0.55 GB/s| 5 | Fowler, Noll, Vo |
31 | | CRC32 | 0.43 GB/s| 9 | |
32 | | MD5-32 | 0.33 GB/s| 10 | Ronald L.Rivest |
33 | | SHA1-32 | 0.28 GB/s| 10 | |
34 |
35 | [xxHash]: http://www.xxhash.com
36 |
37 | Q.Score is a measure of quality of the hash function.
38 | It depends on successfully passing SMHasher test set.
39 | 10 is a perfect score.
40 | Algorithms with a score < 5 are not listed on this table.
41 |
42 | A more recent version, XXH64, has been created thanks to [Mathias Westerdahl](https://github.com/JCash),
43 | which offers superior speed and dispersion for 64-bit systems.
44 | Note however that 32-bit applications will still run faster using the 32-bit version.
45 |
46 | SMHasher speed test, compiled using GCC 4.8.2, on Linux Mint 64-bit.
47 | The reference system uses a Core i5-3340M @2.7GHz
48 |
49 | | Version | Speed on 64-bit | Speed on 32-bit |
50 | |------------|------------------|------------------|
51 | | XXH64 | 13.8 GB/s | 1.9 GB/s |
52 | | XXH32 | 6.8 GB/s | 6.0 GB/s |
53 |
54 | This project also includes a command line utility, named `xxhsum`, offering similar features as `md5sum`,
55 | thanks to [Takayuki Matsuoka](https://github.com/t-mat) contributions.
56 |
57 |
58 | ### License
59 |
60 | The library files `xxhash.c` and `xxhash.h` are BSD licensed.
61 | The utility `xxhsum` is GPL licensed.
62 |
63 |
64 | ### Build modifiers
65 |
66 | The following macros can be set at compilation time,
67 | they modify xxhash behavior. They are all disabled by default.
68 |
69 | - `XXH_INLINE_ALL` : Make all functions `inline`, with bodies directly included within `xxhash.h`.
70 | There is no need for an `xxhash.o` module in this case.
71 | Inlining functions is generally beneficial for speed on small keys.
72 | It's especially effective when key length is a compile time constant,
73 | with observed performance improvement in the +200% range .
74 | See [this article](https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html) for details.
75 | - `XXH_ACCEPT_NULL_INPUT_POINTER` : if set to `1`, when input is a null-pointer,
76 | xxhash result is the same as a zero-length key
77 | (instead of a dereference segfault).
78 | - `XXH_FORCE_MEMORY_ACCESS` : default method `0` uses a portable `memcpy()` notation.
79 | Method `1` uses a gcc-specific `packed` attribute, which can provide better performance for some targets.
80 | Method `2` forces unaligned reads, which is not standard compliant, but might sometimes be the only way to extract better performance.
81 | - `XXH_CPU_LITTLE_ENDIAN` : by default, endianess is determined at compile time.
82 | It's possible to skip auto-detection and force format to little-endian, by setting this macro to 1.
83 | Setting it to 0 forces big-endian.
84 | - `XXH_PRIVATE_API` : same impact as `XXH_INLINE_ALL`.
85 | Name underlines that symbols will not be published on library public interface.
86 | - `XXH_NAMESPACE` : prefix all symbols with the value of `XXH_NAMESPACE`.
87 | Useful to evade symbol naming collisions,
88 | in case of multiple inclusions of xxHash source code.
89 | Client applications can still use regular function name,
90 | symbols are automatically translated through `xxhash.h`.
91 | - `XXH_STATIC_LINKING_ONLY` : gives access to state declaration for static allocation.
92 | Incompatible with dynamic linking, due to risks of ABI changes.
93 | - `XXH_NO_LONG_LONG` : removes support for XXH64,
94 | for targets without 64-bit support.
95 | - `XXH_IMPORT` : should only be defined for dynamic linking, it prevents linkage errors with MSVC.
96 |
97 |
98 | ### Example
99 |
100 | Calling xxhash 64-bit variant from a C program :
101 |
102 | ```C
103 | #include "xxhash.h"
104 |
105 | unsigned long long calcul_hash(const void* buffer, size_t length)
106 | {
107 | unsigned long long const seed = 0; /* or any other value */
108 | unsigned long long const hash = XXH64(buffer, length, seed);
109 | return hash;
110 | }
111 | ```
112 |
113 | Using streaming variant is more involved, but makes it possible to provide data in multiple rounds :
114 | ```C
115 | #include "stdlib.h" /* abort() */
116 | #include "xxhash.h"
117 |
118 |
119 | unsigned long long calcul_hash_streaming(someCustomType handler)
120 | {
121 | /* create a hash state */
122 | XXH64_state_t* const state = XXH64_createState();
123 | if (state==NULL) abort();
124 |
125 | size_t const bufferSize = SOME_SIZE;
126 | void* const buffer = malloc(bufferSize);
127 | if (buffer==NULL) abort();
128 |
129 | /* Initialize state with selected seed */
130 | unsigned long long const seed = 0; /* or any other value */
131 | XXH_errorcode const resetResult = XXH64_reset(state, seed);
132 | if (resetResult == XXH_ERROR) abort();
133 |
134 | /* Feed the state with input data, any size, any number of times */
135 | (...)
136 | while ( /* any condition */ ) {
137 | size_t const length = get_more_data(buffer, bufferSize, handler);
138 | XXH_errorcode const updateResult = XXH64_update(state, buffer, length);
139 | if (updateResult == XXH_ERROR) abort();
140 | (...)
141 | }
142 | (...)
143 |
144 | /* Get the hash */
145 | XXH64_hash_t const hash = XXH64_digest(state);
146 |
147 | /* State can then be re-used; in this example, it is simply freed */
148 | free(buffer);
149 | XXH64_freeState(state);
150 |
151 | return (unsigned long long)hash;
152 | }
153 | ```
154 |
155 | ### New experimental hash algorithm
156 |
157 | Starting with `v0.7.0`, the library includes a new algorithm, named `XXH3`,
158 | able to generate 64 and 128-bits hashes.
159 |
160 | The new algorithm is much faster than its predecessors,
161 | for both long and small inputs,
162 | as can be observed in following graphs :
163 |
164 | 
165 |
166 | 
167 |
168 | The algorithm is currently labelled experimental, as it may change in a future version.
169 | To access it, one need to unlock its declaration using macro `XXH_STATIC_LINKING_ONLY`.
170 | It can be used for ephemeral data, and for tests, but avoid storing long-term hash values yet.
171 | `XXH3` will be stabilized in a future version.
172 | This period will be used to collect users' feedback.
173 |
174 |
175 | ### Other programming languages
176 |
177 | Beyond the C reference version,
178 | xxHash is also available on many programming languages,
179 | thanks to great contributors.
180 | They are [listed here](http://www.xxhash.com/#other-languages).
181 |
182 |
183 | ### Branch Policy
184 |
185 | > - The "master" branch is considered stable, at all times.
186 | > - The "dev" branch is the one where all contributions must be merged
187 | before being promoted to master.
188 | > + If you plan to propose a patch, please commit into the "dev" branch,
189 | or its own feature branch.
190 | Direct commit to "master" are not permitted.
191 |
--------------------------------------------------------------------------------
/appveyor.yml:
--------------------------------------------------------------------------------
1 | version: 1.0.{build}
2 | environment:
3 | matrix:
4 | - COMPILER: "visual"
5 | PLATFORM: "visual_x64"
6 | - COMPILER: "visual"
7 | PLATFORM: "visual_x86"
8 | - COMPILER: "gcc"
9 | PLATFORM: "mingw64"
10 | - COMPILER: "gcc"
11 | PLATFORM: "mingw32"
12 |
13 | install:
14 | - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION%
15 | - MKDIR bin
16 | - if [%COMPILER%]==[gcc] SET PATH_ORIGINAL=%PATH%
17 | - if [%COMPILER%]==[gcc] (
18 | SET "PATH_MINGW32=c:\MinGW\bin;c:\MinGW\usr\bin" &&
19 | SET "PATH_MINGW64=c:\msys64\mingw64\bin;c:\msys64\usr\bin" &&
20 | COPY C:\MinGW\bin\mingw32-make.exe C:\MinGW\bin\make.exe &&
21 | COPY C:\MinGW\bin\gcc.exe C:\MinGW\bin\cc.exe
22 | ) else (
23 | IF [%PLATFORM%]==[x64] (SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;")
24 | )
25 |
26 | build_script:
27 | - if [%PLATFORM%]==[mingw32] SET PATH=%PATH_MINGW32%;%PATH_ORIGINAL%
28 | - if [%PLATFORM%]==[mingw64] SET PATH=%PATH_MINGW64%;%PATH_ORIGINAL%
29 | - if [%PLATFORM%]==[clang] SET PATH=%PATH_MINGW64%;%PATH_ORIGINAL%
30 | - ECHO *** &&
31 | ECHO Building %COMPILER% %PLATFORM% %CONFIGURATION% &&
32 | ECHO ***
33 | - if [%PLATFORM%]==[clang] (clang -v)
34 | - if [%COMPILER%]==[gcc] (gcc -v)
35 | - if [%COMPILER%]==[gcc] (
36 | echo ----- &&
37 | make -v &&
38 | echo ----- &&
39 | if not [%PLATFORM%]==[clang] (
40 | make -B clean test MOREFLAGS=-Werror
41 | ) ELSE (
42 | make -B clean test CC=clang MOREFLAGS="--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion"
43 | )
44 | )
45 | - if "%PLATFORM%"=="visual_x64" (
46 | cd cmake_unofficial &&
47 | cmake . -DCMAKE_BUILD_TYPE=Release -A x64 &&
48 | cmake --build . --config Release
49 | )
50 | - if "%PLATFORM%"=="visual_x86" (
51 | cd cmake_unofficial &&
52 | cmake . -DCMAKE_BUILD_TYPE=Release &&
53 | cmake --build . --config Release
54 | )
55 |
56 | test_script:
57 | - ECHO *** &&
58 | ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION% &&
59 | ECHO ***
60 | - if not [%COMPILER%]==[visual] (
61 | xxhsum -h &&
62 | xxhsum xxhsum.exe &&
63 | xxhsum -bi1 &&
64 | echo ------- xxhsum tested -------
65 | )
66 |
--------------------------------------------------------------------------------
/cmake_unofficial/.gitignore:
--------------------------------------------------------------------------------
1 | # cmake artifacts
2 |
3 | CMakeCache.txt
4 | CMakeFiles
5 | Makefile
6 | cmake_install.cmake
7 |
8 |
9 | # make compilation results
10 |
11 | *.dylib
12 | *.a
13 |
--------------------------------------------------------------------------------
/cmake_unofficial/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # To the extent possible under law, the author(s) have dedicated all
2 | # copyright and related and neighboring rights to this software to
3 | # the public domain worldwide. This software is distributed without
4 | # any warranty.
5 | #
6 | # For details, see .
7 |
8 | set(XXHASH_DIR "${CMAKE_CURRENT_SOURCE_DIR}/..")
9 |
10 | file(STRINGS "${XXHASH_DIR}/xxhash.h" XXHASH_VERSION_MAJOR REGEX "^#define XXH_VERSION_MAJOR +([0-9]+) *$")
11 | string(REGEX REPLACE "^#define XXH_VERSION_MAJOR +([0-9]+) *$" "\\1" XXHASH_VERSION_MAJOR "${XXHASH_VERSION_MAJOR}")
12 | file(STRINGS "${XXHASH_DIR}/xxhash.h" XXHASH_VERSION_MINOR REGEX "^#define XXH_VERSION_MINOR +([0-9]+) *$")
13 | string(REGEX REPLACE "^#define XXH_VERSION_MINOR +([0-9]+) *$" "\\1" XXHASH_VERSION_MINOR "${XXHASH_VERSION_MINOR}")
14 | file(STRINGS "${XXHASH_DIR}/xxhash.h" XXHASH_VERSION_RELEASE REGEX "^#define XXH_VERSION_RELEASE +([0-9]+) *$")
15 | string(REGEX REPLACE "^#define XXH_VERSION_RELEASE +([0-9]+) *$" "\\1" XXHASH_VERSION_RELEASE "${XXHASH_VERSION_RELEASE}")
16 | set(XXHASH_VERSION_STRING "${XXHASH_VERSION_MAJOR}.${XXHASH_VERSION_MINOR}.${XXHASH_VERSION_RELEASE}")
17 | set(XXHASH_LIB_VERSION ${XXHASH_VERSION_STRING})
18 | set(XXHASH_LIB_SOVERSION "${XXHASH_VERSION_MAJOR}")
19 | mark_as_advanced(XXHASH_VERSION_MAJOR XXHASH_VERSION_MINOR XXHASH_VERSION_RELEASE XXHASH_VERSION_STRING XXHASH_LIB_VERSION XXHASH_LIB_SOVERSION)
20 |
21 | option(BUILD_XXHSUM "Build the xxhsum binary" ON)
22 | option(BUILD_SHARED_LIBS "Build shared library" ON)
23 |
24 | if("${CMAKE_VERSION}" VERSION_LESS "3.0")
25 | project(XXHASH C)
26 | else()
27 | cmake_policy (SET CMP0048 NEW)
28 | project(XXHASH
29 | VERSION ${XXHASH_VERSION_STRING}
30 | LANGUAGES C)
31 | endif()
32 |
33 | cmake_minimum_required (VERSION 2.8.12)
34 |
35 | # If XXHASH is being bundled in another project, we don't want to
36 | # install anything. However, we want to let people override this, so
37 | # we'll use the XXHASH_BUNDLED_MODE variable to let them do that; just
38 | # set it to OFF in your project before you add_subdirectory(xxhash/contrib/cmake_unofficial).
39 | if(CMAKE_CURRENT_SOURCE_DIR STREQUAL "${CMAKE_SOURCE_DIR}")
40 | # Bundled mode hasn't been set one way or the other, set the default
41 | # depending on whether or not we are the top-level project.
42 | if("${XXHASH_PARENT_DIRECTORY}" STREQUAL "")
43 | set(XXHASH_BUNDLED_MODE OFF)
44 | else()
45 | set(XXHASH_BUNDLED_MODE ON)
46 | endif()
47 | endif()
48 | mark_as_advanced(XXHASH_BUNDLED_MODE)
49 |
50 | # Allow people to choose whether to build shared or static libraries
51 | # via the BUILD_SHARED_LIBS option unless we are in bundled mode, in
52 | # which case we always use static libraries.
53 | include(CMakeDependentOption)
54 | CMAKE_DEPENDENT_OPTION(BUILD_SHARED_LIBS "Build shared libraries" ON "NOT XXHASH_BUNDLED_MODE" OFF)
55 |
56 | include_directories("${XXHASH_DIR}")
57 |
58 | # libxxhash
59 | add_library(xxhash "${XXHASH_DIR}/xxhash.c")
60 | if (BUILD_SHARED_LIBS)
61 | target_compile_definitions(xxhash PUBLIC XXH_EXPORT)
62 | endif ()
63 | set_target_properties(xxhash PROPERTIES
64 | SOVERSION "${XXHASH_VERSION_STRING}"
65 | VERSION "${XXHASH_VERSION_STRING}")
66 |
67 | # xxhsum
68 | add_executable(xxhsum "${XXHASH_DIR}/xxhsum.c")
69 | target_link_libraries(xxhsum xxhash)
70 |
71 | # Extra warning flags
72 | include (CheckCCompilerFlag)
73 | foreach (flag
74 | -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow
75 | -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement
76 | -Wstrict-prototypes -Wundef)
77 | # Because https://gcc.gnu.org/wiki/FAQ#wnowarning
78 | string(REGEX REPLACE "\\-Wno\\-(.+)" "-W\\1" flag_to_test "${flag}")
79 | string(REGEX REPLACE "[^a-zA-Z0-9]+" "_" test_name "CFLAG_${flag_to_test}")
80 |
81 | check_c_compiler_flag("${ADD_COMPILER_FLAGS_PREPEND} ${flag_to_test}" ${test_name})
82 |
83 | if(${test_name})
84 | set(CMAKE_C_FLAGS "${flag} ${CMAKE_C_FLAGS}")
85 | endif()
86 |
87 | unset(test_name)
88 | unset(flag_to_test)
89 | endforeach (flag)
90 |
91 | if(NOT XXHASH_BUNDLED_MODE)
92 | include(GNUInstallDirs)
93 |
94 | install(TARGETS xxhsum
95 | RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}")
96 | install(TARGETS xxhash
97 | LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
98 | ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}")
99 | install(FILES "${XXHASH_DIR}/xxhash.h"
100 | DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
101 | install(FILES "${XXHASH_DIR}/xxhsum.1"
102 | DESTINATION "${CMAKE_INSTALL_MANDIR}/man1")
103 | endif(NOT XXHASH_BUNDLED_MODE)
104 |
--------------------------------------------------------------------------------
/cmake_unofficial/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | The `cmake` script present in this directory offers the following options :
4 |
5 | - `BUILD_XXHSUM` : build the command line binary. ON by default
6 | - `BUILD_SHARED_LIBS` : build dynamic library. ON by default.
7 |
--------------------------------------------------------------------------------
/doc/xxhash_spec.md:
--------------------------------------------------------------------------------
1 | xxHash fast digest algorithm
2 | ======================
3 |
4 | ### Notices
5 |
6 | Copyright (c) Yann Collet
7 |
8 | Permission is granted to copy and distribute this document
9 | for any purpose and without charge,
10 | including translations into other languages
11 | and incorporation into compilations,
12 | provided that the copyright notice and this notice are preserved,
13 | and that any substantive changes or deletions from the original
14 | are clearly marked.
15 | Distribution of this document is unlimited.
16 |
17 | ### Version
18 |
19 | 0.1.1 (10/10/18)
20 |
21 |
22 | Table of Contents
23 | ---------------------
24 | - [Introduction](#introduction)
25 | - [XXH32 algorithm description](#xxh32-algorithm-description)
26 | - [XXH64 algorithm description](#xxh64-algorithm-description)
27 | - [Performance considerations](#performance-considerations)
28 | - [Reference Implementation](#reference-implementation)
29 |
30 |
31 | Introduction
32 | ----------------
33 |
34 | This document describes the xxHash digest algorithm, for both 32 and 64 variants, named `XXH32` and `XXH64`. The algorithm takes as input a message of arbitrary length and an optional seed value, it then produces an output of 32 or 64-bit as "fingerprint" or "digest".
35 |
36 | xxHash is primarily designed for speed. It is labelled non-cryptographic, and is not meant to avoid intentional collisions (same digest for 2 different messages), or to prevent producing a message with predefined digest.
37 |
38 | XXH32 is designed to be fast on 32-bits machines.
39 | XXH64 is designed to be fast on 64-bits machines.
40 | Both variants produce different output.
41 | However, a given variant shall produce exactly the same output, irrespective of the cpu / os used. In particular, the result remains identical whatever the endianness and width of the cpu.
42 |
43 | ### Operation notations
44 |
45 | All operations are performed modulo {32,64} bits. Arithmetic overflows are expected.
46 | `XXH32` uses 32-bit modular operations. `XXH64` uses 64-bit modular operations.
47 |
48 | - `+` : denote modular addition
49 | - `*` : denote modular multiplication
50 | - `X <<< s` : denote the value obtained by circularly shifting (rotating) `X` left by `s` bit positions.
51 | - `X >> s` : denote the value obtained by shifting `X` right by s bit positions. Upper `s` bits become `0`.
52 | - `X xor Y` : denote the bit-wise XOR of `X` and `Y` (same width).
53 |
54 |
55 | XXH32 Algorithm Description
56 | -------------------------------------
57 |
58 | ### Overview
59 |
60 | We begin by supposing that we have a message of any length `L` as input, and that we wish to find its digest. Here `L` is an arbitrary nonnegative integer; `L` may be zero. The following steps are performed to compute the digest of the message.
61 |
62 | The algorithm collect and transform input in _stripes_ of 16 bytes. The transforms are stored inside 4 "accumulators", each one storing an unsigned 32-bit value. Each accumulator can be processed independently in parallel, speeding up processing for cpu with multiple execution units.
63 |
64 | The algorithm uses 32-bits addition, multiplication, rotate, shift and xor operations. Many operations require some 32-bits prime number constants, all defined below :
65 |
66 | static const u32 PRIME32_1 = 2654435761U; // 0b10011110001101110111100110110001
67 | static const u32 PRIME32_2 = 2246822519U; // 0b10000101111010111100101001110111
68 | static const u32 PRIME32_3 = 3266489917U; // 0b11000010101100101010111000111101
69 | static const u32 PRIME32_4 = 668265263U; // 0b00100111110101001110101100101111
70 | static const u32 PRIME32_5 = 374761393U; // 0b00010110010101100110011110110001
71 |
72 | These constants are prime numbers, and feature a good mix of bits 1 and 0, neither too regular, nor too dissymmetric. These properties help dispersion capabilities.
73 |
74 | ### Step 1. Initialize internal accumulators
75 |
76 | Each accumulator gets an initial value based on optional `seed` input. Since the `seed` is optional, it can be `0`.
77 |
78 | u32 acc1 = seed + PRIME32_1 + PRIME32_2;
79 | u32 acc2 = seed + PRIME32_2;
80 | u32 acc3 = seed + 0;
81 | u32 acc4 = seed - PRIME32_1;
82 |
83 | #### Special case : input is less than 16 bytes
84 |
85 | When input is too small (< 16 bytes), the algorithm will not process any stripe. Consequently, it will not make use of parallel accumulators.
86 |
87 | In which case, a simplified initialization is performed, using a single accumulator :
88 |
89 | u32 acc = seed + PRIME32_5;
90 |
91 | The algorithm then proceeds directly to step 4.
92 |
93 | ### Step 2. Process stripes
94 |
95 | A stripe is a contiguous segment of 16 bytes.
96 | It is evenly divided into 4 _lanes_, of 4 bytes each.
97 | The first lane is used to update accumulator 1, the second lane is used to update accumulator 2, and so on.
98 |
99 | Each lane read its associated 32-bit value using __little-endian__ convention.
100 |
101 | For each {lane, accumulator}, the update process is called a _round_, and applies the following formula :
102 |
103 | accN = accN + (laneN * PRIME32_2);
104 | accN = accN <<< 13;
105 | accN = accN * PRIME32_1;
106 |
107 | This shuffles the bits so that any bit from input _lane_ impacts several bits in output _accumulator_. All operations are performed modulo 2^32.
108 |
109 | Input is consumed one full stripe at a time. Step 2 is looped as many times as necessary to consume the whole input, except the last remaining bytes which cannot form a stripe (< 16 bytes).
110 | When that happens, move to step 3.
111 |
112 | ### Step 3. Accumulator convergence
113 |
114 | All 4 lane accumulators from previous steps are merged to produce a single remaining accumulator of same width (32-bit). The associated formula is as follows :
115 |
116 | acc = (acc1 <<< 1) + (acc2 <<< 7) + (acc3 <<< 12) + (acc4 <<< 18);
117 |
118 | ### Step 4. Add input length
119 |
120 | The input total length is presumed known at this stage. This step is just about adding the length to accumulator, so that it participates to final mixing.
121 |
122 | acc = acc + (u32)inputLength;
123 |
124 | Note that, if input length is so large that it requires more than 32-bits, only the lower 32-bits are added to the accumulator.
125 |
126 | ### Step 5. Consume remaining input
127 |
128 | There may be up to 15 bytes remaining to consume from the input.
129 | The final stage will digest them according to following pseudo-code :
130 |
131 | while (remainingLength >= 4) {
132 | lane = read_32bit_little_endian(input_ptr);
133 | acc = acc + lane * PRIME32_3;
134 | acc = (acc <<< 17) * PRIME32_4;
135 | input_ptr += 4; remainingLength -= 4;
136 | }
137 |
138 | while (remainingLength >= 1) {
139 | lane = read_byte(input_ptr);
140 | acc = acc + lane * PRIME32_5;
141 | acc = (acc <<< 11) * PRIME32_1;
142 | input_ptr += 1; remainingLength -= 1;
143 | }
144 |
145 | This process ensures that all input bytes are present in the final mix.
146 |
147 | ### Step 6. Final mix (avalanche)
148 |
149 | The final mix ensures that all input bits have a chance to impact any bit in the output digest, resulting in an unbiased distribution. This is also called avalanche effect.
150 |
151 | acc = acc xor (acc >> 15);
152 | acc = acc * PRIME32_2;
153 | acc = acc xor (acc >> 13);
154 | acc = acc * PRIME32_3;
155 | acc = acc xor (acc >> 16);
156 |
157 | ### Step 7. Output
158 |
159 | The `XXH32()` function produces an unsigned 32-bit value as output.
160 |
161 | For systems which require to store and/or display the result in binary or hexadecimal format, the canonical format is defined to reproduce the same value as the natural decimal format, hence follows __big-endian__ convention (most significant byte first).
162 |
163 |
164 | XXH64 Algorithm Description
165 | -------------------------------------
166 |
167 | ### Overview
168 |
169 | `XXH64` algorithm structure is very similar to `XXH32` one. The major difference is that `XXH64` uses 64-bit arithmetic, speeding up memory transfer for 64-bit compliant systems, but also relying on cpu capability to efficiently perform 64-bit operations.
170 |
171 | The algorithm collects and transforms input in _stripes_ of 32 bytes. The transforms are stored inside 4 "accumulators", each one storing an unsigned 64-bit value. Each accumulator can be processed independently in parallel, speeding up processing for cpu with multiple execution units.
172 |
173 | The algorithm uses 64-bit addition, multiplication, rotate, shift and xor operations. Many operations require some 64-bit prime number constants, all defined below :
174 |
175 | static const u64 PRIME64_1 = 11400714785074694791ULL; // 0b1001111000110111011110011011000110000101111010111100101010000111
176 | static const u64 PRIME64_2 = 14029467366897019727ULL; // 0b1100001010110010101011100011110100100111110101001110101101001111
177 | static const u64 PRIME64_3 = 1609587929392839161ULL; // 0b0001011001010110011001111011000110011110001101110111100111111001
178 | static const u64 PRIME64_4 = 9650029242287828579ULL; // 0b1000010111101011110010100111011111000010101100101010111001100011
179 | static const u64 PRIME64_5 = 2870177450012600261ULL; // 0b0010011111010100111010110010111100010110010101100110011111000101
180 |
181 | These constants are prime numbers, and feature a good mix of bits 1 and 0, neither too regular, nor too dissymmetric. These properties help dispersion capabilities.
182 |
183 | ### Step 1. Initialise internal accumulators
184 |
185 | Each accumulator gets an initial value based on optional `seed` input. Since the `seed` is optional, it can be `0`.
186 |
187 | u64 acc1 = seed + PRIME64_1 + PRIME64_2;
188 | u64 acc2 = seed + PRIME64_2;
189 | u64 acc3 = seed + 0;
190 | u64 acc4 = seed - PRIME64_1;
191 |
192 | #### Special case : input is less than 32 bytes
193 |
194 | When input is too small (< 32 bytes), the algorithm will not process any stripe. Consequently, it will not make use of parallel accumulators.
195 |
196 | In which case, a simplified initialization is performed, using a single accumulator :
197 |
198 | u64 acc = seed + PRIME64_5;
199 |
200 | The algorithm then proceeds directly to step 4.
201 |
202 | ### Step 2. Process stripes
203 |
204 | A stripe is a contiguous segment of 32 bytes.
205 | It is evenly divided into 4 _lanes_, of 8 bytes each.
206 | The first lane is used to update accumulator 1, the second lane is used to update accumulator 2, and so on.
207 |
208 | Each lane read its associated 64-bit value using __little-endian__ convention.
209 |
210 | For each {lane, accumulator}, the update process is called a _round_, and applies the following formula :
211 |
212 | round(accN,laneN):
213 | accN = accN + (laneN * PRIME64_2);
214 | accN = accN <<< 31;
215 | return accN * PRIME64_1;
216 |
217 | This shuffles the bits so that any bit from input _lane_ impacts several bits in output _accumulator_. All operations are performed modulo 2^64.
218 |
219 | Input is consumed one full stripe at a time. Step 2 is looped as many times as necessary to consume the whole input, except the last remaining bytes which cannot form a stripe (< 32 bytes).
220 | When that happens, move to step 3.
221 |
222 | ### Step 3. Accumulator convergence
223 |
224 | All 4 lane accumulators from previous steps are merged to produce a single remaining accumulator of same width (64-bit). The associated formula is as follows.
225 |
226 | Note that accumulator convergence is more complex than 32-bit variant, and requires to define another function called _mergeAccumulator()_ :
227 |
228 | mergeAccumulator(acc,accN):
229 | acc = acc xor round(0, accN);
230 | acc = acc * PRIME64_1
231 | return acc + PRIME64_4;
232 |
233 | which is then used in the convergence formula :
234 |
235 | acc = (acc1 <<< 1) + (acc2 <<< 7) + (acc3 <<< 12) + (acc4 <<< 18);
236 | acc = mergeAccumulator(acc, acc1);
237 | acc = mergeAccumulator(acc, acc2);
238 | acc = mergeAccumulator(acc, acc3);
239 | acc = mergeAccumulator(acc, acc4);
240 |
241 | ### Step 4. Add input length
242 |
243 | The input total length is presumed known at this stage. This step is just about adding the length to accumulator, so that it participates to final mixing.
244 |
245 | acc = acc + inputLength;
246 |
247 | ### Step 5. Consume remaining input
248 |
249 | There may be up to 31 bytes remaining to consume from the input.
250 | The final stage will digest them according to following pseudo-code :
251 |
252 | while (remainingLength >= 8) {
253 | lane = read_64bit_little_endian(input_ptr);
254 | acc = acc xor round(0, lane);
255 | acc = (acc <<< 27) * PRIME64_1;
256 | acc = acc + PRIME64_4;
257 | input_ptr += 8; remainingLength -= 8;
258 | }
259 |
260 | if (remainingLength >= 4) {
261 | lane = read_32bit_little_endian(input_ptr);
262 | acc = acc xor (lane * PRIME64_1);
263 | acc = (acc <<< 23) * PRIME64_2;
264 | acc = acc + PRIME64_3;
265 | input_ptr += 4; remainingLength -= 4;
266 | }
267 |
268 | while (remainingLength >= 1) {
269 | lane = read_byte(input_ptr);
270 | acc = acc xor (lane * PRIME64_5);
271 | acc = (acc <<< 11) * PRIME64_1;
272 | input_ptr += 1; remainingLength -= 1;
273 | }
274 |
275 | This process ensures that all input bytes are present in the final mix.
276 |
277 | ### Step 6. Final mix (avalanche)
278 |
279 | The final mix ensures that all input bits have a chance to impact any bit in the output digest, resulting in an unbiased distribution. This is also called avalanche effect.
280 |
281 | acc = acc xor (acc >> 33);
282 | acc = acc * PRIME64_2;
283 | acc = acc xor (acc >> 29);
284 | acc = acc * PRIME64_3;
285 | acc = acc xor (acc >> 32);
286 |
287 | ### Step 7. Output
288 |
289 | The `XXH64()` function produces an unsigned 64-bit value as output.
290 |
291 | For systems which require to store and/or display the result in binary or hexadecimal format, the canonical format is defined to reproduce the same value as the natural decimal format, hence follows __big-endian__ convention (most significant byte first).
292 |
293 | Performance considerations
294 | ----------------------------------
295 |
296 | The xxHash algorithms are simple and compact to implement. They provide a system independent "fingerprint" or digest of a message of arbitrary length.
297 |
298 | The algorithm allows input to be streamed and processed in multiple steps. In such case, an internal buffer is needed to ensure data is presented to the algorithm in full stripes.
299 |
300 | On 64-bit systems, the 64-bit variant `XXH64` is generally faster to compute, so it is a recommended variant, even when only 32-bit are needed.
301 |
302 | On 32-bit systems though, positions are reversed : `XXH64` performance is reduced, due to its usage of 64-bit arithmetic. `XXH32` becomes a faster variant.
303 |
304 |
305 | Reference Implementation
306 | ----------------------------------------
307 |
308 | A reference library written in C is available at http://www.xxhash.com .
309 | The web page also links to multiple other implementations written in many different languages.
310 | It links to the [github project page](https://github.com/Cyan4973/xxHash) where an [issue board](https://github.com/Cyan4973/xxHash/issues) can be used for further public discussions on the topic.
311 |
312 |
313 | Version changes
314 | --------------------
315 | v0.1.1 : added a note on rationale for selection of constants
316 | v0.1.0 : initial release
317 |
--------------------------------------------------------------------------------
/xxh3.h:
--------------------------------------------------------------------------------
1 | /*
2 | xxHash - Extremely Fast Hash algorithm
3 | Development source file for `xxh3`
4 | Copyright (C) 2019-present, Yann Collet.
5 |
6 | BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 |
8 | Redistribution and use in source and binary forms, with or without
9 | modification, are permitted provided that the following conditions are
10 | met:
11 |
12 | * Redistributions of source code must retain the above copyright
13 | notice, this list of conditions and the following disclaimer.
14 | * Redistributions in binary form must reproduce the above
15 | copyright notice, this list of conditions and the following disclaimer
16 | in the documentation and/or other materials provided with the
17 | distribution.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
31 | You can contact the author at :
32 | - xxHash source repository : https://github.com/Cyan4973/xxHash
33 | */
34 |
35 | /* Note :
36 | This file is separated for development purposes.
37 | It will be integrated into `xxhash.c` when development phase is complete.
38 | */
39 |
40 | #ifndef XXH3_H
41 | #define XXH3_H
42 |
43 |
44 | /* === Dependencies === */
45 |
46 | #undef XXH_INLINE_ALL /* in case it's already defined */
47 | #define XXH_INLINE_ALL
48 | #include "xxhash.h"
49 |
50 | #undef NDEBUG /* avoid redefinition */
51 | #define NDEBUG
52 | #include
53 |
54 |
55 | /* === Compiler versions === */
56 |
57 | #if !(defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) /* C99+ */
58 | # define restrict /* disable */
59 | #endif
60 |
61 | #if defined(__GNUC__)
62 | # if defined(__SSE2__)
63 | # include
64 | # elif defined(__ARM_NEON__) || defined(__ARM_NEON)
65 | # define inline __inline__ /* clang bug */
66 | # include
67 | # undef inline
68 | # endif
69 | # define ALIGN(n) __attribute__ ((aligned(n)))
70 | #elif defined(_MSC_VER)
71 | # include
72 | # define ALIGN(n) __declspec(align(n))
73 | #else
74 | # define ALIGN(n) /* disabled */
75 | #endif
76 |
77 |
78 |
79 | /* ==========================================
80 | * Vectorization detection
81 | * ========================================== */
82 | #define XXH_SCALAR 0
83 | #define XXH_SSE2 1
84 | #define XXH_AVX2 2
85 | #define XXH_NEON 3
86 |
87 | #ifndef XXH_VECTOR /* can be defined on command line */
88 | # if defined(__AVX2__)
89 | # define XXH_VECTOR XXH_AVX2
90 | # elif defined(__SSE2__)
91 | # define XXH_VECTOR XXH_SSE2
92 | /* msvc support maybe later */
93 | # elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__ARM_NEON))
94 | # define XXH_VECTOR XXH_NEON
95 | # else
96 | # define XXH_VECTOR XXH_SCALAR
97 | # endif
98 | #endif
99 |
100 | /* U64 XXH_mult32to64(U32 a, U64 b) { return (U64)a * (U64)b; } */
101 | #if defined(_MSC_VER) && !defined(_M_ARM64) && !defined(_M_ARM)
102 | # include
103 | /* MSVC doesn't do a good job with the mull detection. */
104 | # define XXH_mult32to64 __emulu
105 | #else
106 | # define XXH_mult32to64(x, y) ((U64)((x) & 0xFFFFFFFF) * (U64)((y) & 0xFFFFFFFF))
107 | #endif
108 |
109 |
110 | /* ==========================================
111 | * XXH3 default settings
112 | * ========================================== */
113 |
114 | #define KEYSET_DEFAULT_SIZE 48 /* minimum 32 */
115 |
116 |
117 | ALIGN(64) static const U32 kKey[KEYSET_DEFAULT_SIZE] = {
118 | 0xb8fe6c39,0x23a44bbe,0x7c01812c,0xf721ad1c,
119 | 0xded46de9,0x839097db,0x7240a4a4,0xb7b3671f,
120 | 0xcb79e64e,0xccc0e578,0x825ad07d,0xccff7221,
121 | 0xb8084674,0xf743248e,0xe03590e6,0x813a264c,
122 | 0x3c2852bb,0x91c300cb,0x88d0658b,0x1b532ea3,
123 | 0x71644897,0xa20df94e,0x3819ef46,0xa9deacd8,
124 | 0xa8fa763f,0xe39c343f,0xf9dcbbc7,0xc70b4f1d,
125 | 0x8a51e04b,0xcdb45931,0xc89f7ec9,0xd9787364,
126 |
127 | 0xeac5ac83,0x34d3ebc3,0xc581a0ff,0xfa1363eb,
128 | 0x170ddd51,0xb7f0da49,0xd3165526,0x29d4689e,
129 | 0x2b16be58,0x7d47a1fc,0x8ff8b8d1,0x7ad031ce,
130 | 0x45cb3a8f,0x95160428,0xafd7fbca,0xbb4b407e,
131 | };
132 |
133 |
134 | #if defined(__GNUC__) && defined(__i386__)
135 | /* GCC is stupid and tries to vectorize this.
136 | * This tells GCC that it is wrong. */
137 | __attribute__((__target__("no-sse")))
138 | #endif
139 | static U64
140 | XXH3_mul128(U64 ll1, U64 ll2)
141 | {
142 | #if defined(__SIZEOF_INT128__) || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
143 |
144 | __uint128_t lll = (__uint128_t)ll1 * ll2;
145 | return (U64)lll + (U64)(lll >> 64);
146 |
147 | #elif defined(_M_X64) || defined(_M_IA64)
148 |
149 | #ifndef _MSC_VER
150 | # pragma intrinsic(_umul128)
151 | #endif
152 | U64 llhigh;
153 | U64 const lllow = _umul128(ll1, ll2, &llhigh);
154 | return lllow + llhigh;
155 |
156 | #elif defined(__aarch64__) && defined(__GNUC__)
157 |
158 | U64 llow;
159 | U64 llhigh;
160 | __asm__("umulh %0, %1, %2" : "=r" (llhigh) : "r" (ll1), "r" (ll2));
161 | __asm__("madd %0, %1, %2, %3" : "=r" (llow) : "r" (ll1), "r" (ll2), "r" (llhigh));
162 | return lllow;
163 |
164 | /* Do it out manually on 32-bit.
165 | * This is a modified, unrolled, widened, and optimized version of the
166 | * mulqdu routine from Hacker's Delight.
167 | *
168 | * https://www.hackersdelight.org/hdcodetxt/mulqdu.c.txt
169 | *
170 | * This was modified to use U32->U64 multiplication instead
171 | * of U16->U32, to add the high and low values in the end,
172 | * be endian-independent, and I added a partial assembly
173 | * implementation for ARM. */
174 |
175 | /* An easy 128-bit folding multiply on ARMv6T2 and ARMv7-A/R can be done with
176 | * the mighty umaal (Unsigned Multiply Accumulate Accumulate Long) which takes 4 cycles
177 | * or less, doing a long multiply and adding two 32-bit integers:
178 | *
179 | * void umaal(U32 *RdLo, U32 *RdHi, U32 Rn, U32 Rm)
180 | * {
181 | * U64 prodAcc = (U64)Rn * (U64)Rm;
182 | * prodAcc += *RdLo;
183 | * prodAcc += *RdHi;
184 | * *RdLo = prodAcc & 0xFFFFFFFF;
185 | * *RdHi = prodAcc >> 32;
186 | * }
187 | *
188 | * This is compared to umlal which adds to a single 64-bit integer:
189 | *
190 | * void umlal(U32 *RdLo, U32 *RdHi, U32 Rn, U32 Rm)
191 | * {
192 | * U64 prodAcc = (U64)Rn * (U64)Rm;
193 | * prodAcc += (*RdLo | ((U64)*RdHi << 32);
194 | * *RdLo = prodAcc & 0xFFFFFFFF;
195 | * *RdHi = prodAcc >> 32;
196 | * }
197 | *
198 | * Getting the compiler to emit them is like pulling teeth, and checking
199 | * for it is annoying because ARMv7-M lacks this instruction. However, it
200 | * is worth it, because this is an otherwise expensive operation. */
201 |
202 | /* GCC-compatible, ARMv6t2 or ARMv7+, non-M variant, and 32-bit */
203 | #elif defined(__GNUC__) /* GCC-compatible */ \
204 | && defined(__ARM_ARCH) && !defined(__aarch64__) && !defined(__arm64__) /* 32-bit ARM */\
205 | && !defined(__ARM_ARCH_7M__) /* <- Not ARMv7-M vv*/ \
206 | && !(defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM == 0 && __TARGET_ARCH_THUMB == 4) \
207 | && (defined(__ARM_ARCH_6T2__) || __ARM_ARCH > 6) /* ARMv6T2 or later */
208 |
209 | U32 w[4] = { 0 };
210 | U32 u[2] = { (U32)(ll1 >> 32), (U32)ll1 };
211 | U32 v[2] = { (U32)(ll2 >> 32), (U32)ll2 };
212 | U32 k;
213 |
214 | /* U64 t = (U64)u[1] * (U64)v[1];
215 | * w[3] = t & 0xFFFFFFFF;
216 | * k = t >> 32; */
217 | __asm__("umull %0, %1, %2, %3"
218 | : "=r" (w[3]), "=r" (k)
219 | : "r" (u[1]), "r" (v[1]));
220 |
221 | /* t = (U64)u[0] * (U64)v[1] + w[2] + k;
222 | * w[2] = t & 0xFFFFFFFF;
223 | * k = t >> 32; */
224 | __asm__("umaal %0, %1, %2, %3"
225 | : "+r" (w[2]), "+r" (k)
226 | : "r" (u[0]), "r" (v[1]));
227 | w[1] = k;
228 | k = 0;
229 |
230 | /* t = (U64)u[1] * (U64)v[0] + w[2] + k;
231 | * w[2] = t & 0xFFFFFFFF;
232 | * k = t >> 32; */
233 | __asm__("umaal %0, %1, %2, %3"
234 | : "+r" (w[2]), "+r" (k)
235 | : "r" (u[1]), "r" (v[0]));
236 |
237 | /* t = (U64)u[0] * (U64)v[0] + w[1] + k;
238 | * w[1] = t & 0xFFFFFFFF;
239 | * k = t >> 32; */
240 | __asm__("umaal %0, %1, %2, %3"
241 | : "+r" (w[1]), "+r" (k)
242 | : "r" (u[0]), "r" (v[0]));
243 | w[0] = k;
244 |
245 | return (w[1] | ((U64)w[0] << 32)) + (w[3] | ((U64)w[2] << 32));
246 |
247 | #else /* Portable scalar version */
248 |
249 | /* emulate 64x64->128b multiplication, using four 32x32->64 */
250 | U32 const h1 = (U32)(ll1 >> 32);
251 | U32 const h2 = (U32)(ll2 >> 32);
252 | U32 const l1 = (U32)ll1;
253 | U32 const l2 = (U32)ll2;
254 |
255 | U64 const llh = XXH_mult32to64(h1, h2);
256 | U64 const llm1 = XXH_mult32to64(l1, h2);
257 | U64 const llm2 = XXH_mult32to64(h1, l2);
258 | U64 const lll = XXH_mult32to64(l1, l2);
259 |
260 | U64 const t = lll + (llm1 << 32);
261 | U64 const carry1 = t < lll;
262 |
263 | U64 const lllow = t + (llm2 << 32);
264 | U64 const carry2 = lllow < t;
265 | U64 const llhigh = llh + (llm1 >> 32) + (llm2 >> 32) + carry1 + carry2;
266 |
267 | return llhigh + lllow;
268 |
269 | #endif
270 | }
271 |
272 |
273 | static XXH64_hash_t XXH3_avalanche(U64 h64)
274 | {
275 | h64 ^= h64 >> 29;
276 | h64 *= PRIME64_3;
277 | h64 ^= h64 >> 32;
278 | return h64;
279 | }
280 |
281 |
282 | /* ==========================================
283 | * Short keys
284 | * ========================================== */
285 |
286 | XXH_FORCE_INLINE XXH64_hash_t
287 | XXH3_len_1to3_64b(const void* data, size_t len, const void* keyPtr, XXH64_hash_t seed)
288 | {
289 | assert(data != NULL);
290 | assert(len > 0 && len <= 3);
291 | assert(keyPtr != NULL);
292 | { const U32* const key32 = (const U32*) keyPtr;
293 | BYTE const c1 = ((const BYTE*)data)[0];
294 | BYTE const c2 = ((const BYTE*)data)[len >> 1];
295 | BYTE const c3 = ((const BYTE*)data)[len - 1];
296 | U32 const l1 = (U32)(c1) + ((U32)(c2) << 8);
297 | U32 const l2 = (U32)(len) + ((U32)(c3) << 2);
298 | U64 const ll11 = XXH_mult32to64((l1 + seed + key32[0]), (l2 + key32[1]));
299 | return XXH3_avalanche(ll11);
300 | }
301 | }
302 |
303 | XXH_FORCE_INLINE XXH64_hash_t
304 | XXH3_len_4to8_64b(const void* data, size_t len, const void* keyPtr, XXH64_hash_t seed)
305 | {
306 | assert(data != NULL);
307 | assert(len >= 4 && len <= 8);
308 | { const U32* const key32 = (const U32*) keyPtr;
309 | U64 acc = PRIME64_1 * (len + seed);
310 | U32 const l1 = XXH_readLE32(data) + key32[0];
311 | U32 const l2 = XXH_readLE32((const BYTE*)data + len - 4) + key32[1];
312 | acc += XXH_mult32to64(l1, l2);
313 | return XXH3_avalanche(acc);
314 | }
315 | }
316 |
317 | XXH_FORCE_INLINE U64
318 | XXH3_readKey64(const void* ptr)
319 | {
320 | assert(((size_t)ptr & 7) == 0); /* aligned on 8-bytes boundaries */
321 | if (XXH_CPU_LITTLE_ENDIAN) {
322 | return *(const U64*)ptr;
323 | } else {
324 | const U32* const ptr32 = (const U32*)ptr;
325 | return (U64)ptr32[0] + (((U64)ptr32[1]) << 32);
326 | }
327 | }
328 |
329 | XXH_FORCE_INLINE XXH64_hash_t
330 | XXH3_len_9to16_64b(const void* data, size_t len, const void* keyPtr, XXH64_hash_t seed)
331 | {
332 | assert(data != NULL);
333 | assert(key != NULL);
334 | assert(len >= 9 && len <= 16);
335 | { const U64* const key64 = (const U64*) keyPtr;
336 | U64 acc = PRIME64_1 * (len + seed);
337 | U64 const ll1 = XXH_readLE64(data) + XXH3_readKey64(key64);
338 | U64 const ll2 = XXH_readLE64((const BYTE*)data + len - 8) + XXH3_readKey64(key64+1);
339 | acc += XXH3_mul128(ll1, ll2);
340 | return XXH3_avalanche(acc);
341 | }
342 | }
343 |
344 | XXH_FORCE_INLINE XXH64_hash_t
345 | XXH3_len_0to16_64b(const void* data, size_t len, XXH64_hash_t seed)
346 | {
347 | assert(data != NULL);
348 | assert(len <= 16);
349 | { if (len > 8) return XXH3_len_9to16_64b(data, len, kKey, seed);
350 | if (len >= 4) return XXH3_len_4to8_64b(data, len, kKey, seed);
351 | if (len) return XXH3_len_1to3_64b(data, len, kKey, seed);
352 | return seed;
353 | }
354 | }
355 |
356 |
357 | /* === Long Keys === */
358 |
359 | #define STRIPE_LEN 64
360 | #define STRIPE_ELTS (STRIPE_LEN / sizeof(U32))
361 | #define ACC_NB (STRIPE_LEN / sizeof(U64))
362 |
363 | XXH_FORCE_INLINE void
364 | XXH3_accumulate_512(void* acc, const void *restrict data, const void *restrict key)
365 | {
366 | #if (XXH_VECTOR == XXH_AVX2)
367 |
368 | assert(((size_t)acc) & 31 == 0);
369 | { ALIGN(32) __m256i* const xacc = (__m256i *) acc;
370 | const __m256i* const xdata = (const __m256i *) data;
371 | const __m256i* const xkey = (const __m256i *) key;
372 |
373 | size_t i;
374 | for (i=0; i < STRIPE_LEN/sizeof(__m256i); i++) {
375 | __m256i const d = _mm256_loadu_si256 (xdata+i);
376 | __m256i const k = _mm256_loadu_si256 (xkey+i);
377 | __m256i const dk = _mm256_add_epi32 (d,k); /* uint32 dk[8] = {d0+k0, d1+k1, d2+k2, d3+k3, ...} */
378 | __m256i const res = _mm256_mul_epu32 (dk, _mm256_shuffle_epi32 (dk, 0x31)); /* uint64 res[4] = {dk0*dk1, dk2*dk3, ...} */
379 | __m256i const add = _mm256_add_epi64(d, xacc[i]);
380 | xacc[i] = _mm256_add_epi64(res, add);
381 | }
382 | }
383 |
384 | #elif (XXH_VECTOR == XXH_SSE2)
385 |
386 | assert(((size_t)acc) & 15 == 0);
387 | { ALIGN(16) __m128i* const xacc = (__m128i *) acc;
388 | const __m128i* const xdata = (const __m128i *) data;
389 | const __m128i* const xkey = (const __m128i *) key;
390 |
391 | size_t i;
392 | for (i=0; i < STRIPE_LEN/sizeof(__m128i); i++) {
393 | __m128i const d = _mm_loadu_si128 (xdata+i);
394 | __m128i const k = _mm_loadu_si128 (xkey+i);
395 | __m128i const dk = _mm_add_epi32 (d,k); /* uint32 dk[4] = {d0+k0, d1+k1, d2+k2, d3+k3} */
396 | __m128i const res = _mm_mul_epu32 (dk, _mm_shuffle_epi32 (dk, 0x31)); /* uint64 res[2] = {dk0*dk1,dk2*dk3} */
397 | __m128i const add = _mm_add_epi64(d, xacc[i]);
398 | xacc[i] = _mm_add_epi64(res, add);
399 | }
400 | }
401 |
402 | #elif (XXH_VECTOR == XXH_NEON)
403 |
404 | assert(((size_t)acc) & 15 == 0);
405 | { uint64x2_t* const xacc = (uint64x2_t *)acc;
406 | const uint32_t* const xdata = (const uint32_t *)data;
407 | const uint32_t* const xkey = (const uint32_t *)key;
408 |
409 | size_t i;
410 | for (i=0; i < STRIPE_LEN / sizeof(uint64x2_t); i++) {
411 | uint32x4_t const d = vld1q_u32(xdata+i*4); /* U32 d[4] = xdata[i]; */
412 | uint32x4_t const k = vld1q_u32(xkey+i*4); /* U32 k[4] = xkey[i]; */
413 | uint32x4_t dk = vaddq_u32(d, k); /* U32 dk[4] = {d0+k0, d1+k1, d2+k2, d3+k3} */
414 | #if !defined(__aarch64__) && !defined(__arm64__) /* ARM32-specific hack */
415 | /* vzip on ARMv7 Clang generates a lot of vmovs (technically vorrs) without this.
416 | * vzip on 32-bit ARM NEON will overwrite the original register, and I think that Clang
417 | * assumes I don't want to destroy it and tries to make a copy. This slows down the code
418 | * a lot.
419 | * aarch64 not only uses an entirely different syntax, but it requires three
420 | * instructions...
421 | * ext v1.16B, v0.16B, #8 // select high bits because aarch64 can't address them directly
422 | * zip1 v3.2s, v0.2s, v1.2s // first zip
423 | * zip2 v2.2s, v0.2s, v1.2s // second zip
424 | * ...to do what ARM does in one:
425 | * vzip.32 d0, d1 // Interleave high and low bits and overwrite. */
426 | __asm__("vzip.32 %e0, %f0" : "+w" (dk)); /* dk = { dk0, dk2, dk1, dk3 }; */
427 | xacc[i] = vaddq_u64(xacc[i], vreinterpretq_u64_u32(d)); /* xacc[i] += (U64x2)d; */
428 | xacc[i] = vmlal_u32(xacc[i], vget_low_u32(dk), vget_high_u32(dk)); /* xacc[i] += { (U64)dk0*dk1, (U64)dk2*dk3 }; */
429 | #else
430 | /* On aarch64, vshrn/vmovn seems to be equivalent to, if not faster than, the vzip method. */
431 | uint32x2_t dkL = vmovn_u64(vreinterpretq_u64_u32(dk)); /* U32 dkL[2] = dk & 0xFFFFFFFF; */
432 | uint32x2_t dkH = vshrn_n_u64(vreinterpretq_u64_u32(dk), 32); /* U32 dkH[2] = dk >> 32; */
433 | xacc[i] = vaddq_u64(xacc[i], vreinterpretq_u64_u32(d)); /* xacc[i] += (U64x2)d; */
434 | xacc[i] = vmlal_u32(xacc[i], dkL, dkH); /* xacc[i] += (U64x2)dkL*(U64x2)dkH; */
435 | #endif
436 | }
437 | }
438 |
439 | #else /* scalar variant - universal */
440 |
441 | U64* const xacc = (U64*) acc; /* presumed aligned */
442 | const U32* const xdata = (const U32*) data;
443 | const U32* const xkey = (const U32*) key;
444 |
445 | int i;
446 | for (i=0; i < (int)ACC_NB; i++) {
447 | int const left = 2*i;
448 | int const right= 2*i + 1;
449 | U32 const dataLeft = XXH_readLE32(xdata + left);
450 | U32 const dataRight = XXH_readLE32(xdata + right);
451 | xacc[i] += XXH_mult32to64(dataLeft + xkey[left], dataRight + xkey[right]);
452 | xacc[i] += dataLeft + ((U64)dataRight << 32);
453 | }
454 |
455 | #endif
456 | }
457 |
458 | static void XXH3_scrambleAcc(void* acc, const void* key)
459 | {
460 | #if (XXH_VECTOR == XXH_AVX2)
461 |
462 | assert(((size_t)acc) & 31 == 0);
463 | { ALIGN(32) __m256i* const xacc = (__m256i*) acc;
464 | const __m256i* const xkey = (const __m256i *) key;
465 |
466 | size_t i;
467 | for (i=0; i < STRIPE_LEN/sizeof(__m256i); i++) {
468 | __m256i data = xacc[i];
469 | __m256i const shifted = _mm256_srli_epi64(data, 47);
470 | data = _mm256_xor_si256(data, shifted);
471 |
472 | { __m256i const k = _mm256_loadu_si256 (xkey+i);
473 | __m256i const dk = _mm256_mul_epu32 (data,k); /* U32 dk[4] = {d0+k0, d1+k1, d2+k2, d3+k3} */
474 |
475 | __m256i const d2 = _mm256_shuffle_epi32 (data,0x31);
476 | __m256i const k2 = _mm256_shuffle_epi32 (k,0x31);
477 | __m256i const dk2 = _mm256_mul_epu32 (d2,k2); /* U32 dk[4] = {d0+k0, d1+k1, d2+k2, d3+k3} */
478 |
479 | xacc[i] = _mm256_xor_si256(dk, dk2);
480 | } }
481 | }
482 |
483 | #elif (XXH_VECTOR == XXH_SSE2)
484 |
485 | assert(((size_t)acc) & 15 == 0);
486 | { ALIGN(16) __m128i* const xacc = (__m128i*) acc;
487 | const __m128i* const xkey = (const __m128i *) key;
488 |
489 | size_t i;
490 | for (i=0; i < STRIPE_LEN/sizeof(__m128i); i++) {
491 | __m128i data = xacc[i];
492 | __m128i const shifted = _mm_srli_epi64(data, 47);
493 | data = _mm_xor_si128(data, shifted);
494 |
495 | { __m128i const k = _mm_loadu_si128 (xkey+i);
496 | __m128i const dk = _mm_mul_epu32 (data,k);
497 |
498 | __m128i const d2 = _mm_shuffle_epi32 (data, 0x31);
499 | __m128i const k2 = _mm_shuffle_epi32 (k, 0x31);
500 | __m128i const dk2 = _mm_mul_epu32 (d2,k2);
501 |
502 | xacc[i] = _mm_xor_si128(dk, dk2);
503 | } }
504 | }
505 |
506 | #elif (XXH_VECTOR == XXH_NEON)
507 |
508 | assert(((size_t)acc) & 15 == 0);
509 | { uint64x2_t* const xacc = (uint64x2_t*) acc;
510 | const uint32_t* const xkey = (const uint32_t*) key;
511 | size_t i;
512 |
513 | for (i=0; i < STRIPE_LEN/sizeof(uint64x2_t); i++) {
514 | uint64x2_t data = xacc[i];
515 | uint64x2_t const shifted = vshrq_n_u64(data, 47); /* uint64 shifted[2] = data >> 47; */
516 | data = veorq_u64(data, shifted); /* data ^= shifted; */
517 | {
518 | /* shuffle: 0, 1, 2, 3 -> 0, 2, 1, 3 */
519 | uint32x2x2_t const d =
520 | vzip_u32(
521 | vget_low_u32(vreinterpretq_u32_u64(data)),
522 | vget_high_u32(vreinterpretq_u32_u64(data))
523 | );
524 | uint32x2x2_t const k = vld2_u32(xkey+i*4); /* load and swap */
525 | uint64x2_t const dk = vmull_u32(d.val[0],k.val[0]); /* U64 dk[2] = {(U64)d0*k0, (U64)d2*k2} */
526 | uint64x2_t const dk2 = vmull_u32(d.val[1],k.val[1]); /* U64 dk2[2] = {(U64)d1*k1, (U64)d3*k3} */
527 | xacc[i] = veorq_u64(dk, dk2); /* xacc[i] = dk^dk2; */
528 | } }
529 | }
530 |
531 | #else /* scalar variant - universal */
532 |
533 | U64* const xacc = (U64*) acc;
534 | const U32* const xkey = (const U32*) key;
535 |
536 | int i;
537 | for (i=0; i < (int)ACC_NB; i++) {
538 | int const left = 2*i;
539 | int const right= 2*i + 1;
540 | xacc[i] ^= xacc[i] >> 47;
541 |
542 | { U64 const p1 = XXH_mult32to64(xacc[i] & 0xFFFFFFFF, xkey[left]);
543 | U64 const p2 = XXH_mult32to64(xacc[i] >> 32, xkey[right]);
544 | xacc[i] = p1 ^ p2;
545 | } }
546 |
547 | #endif
548 | }
549 |
550 | static void XXH3_accumulate(U64* acc, const void* restrict data, const U32* restrict key, size_t nbStripes)
551 | {
552 | size_t n;
553 | /* Clang doesn't unroll this loop without the pragma. Unrolling can be up to 1.4x faster. */
554 | #if defined(__clang__) && !defined(__OPTIMIZE_SIZE__)
555 | # pragma clang loop unroll(enable)
556 | #endif
557 | for (n = 0; n < nbStripes; n++ ) {
558 | XXH3_accumulate_512(acc, (const BYTE*)data + n*STRIPE_LEN, key);
559 | key += 2;
560 | }
561 | }
562 |
563 | static void
564 | XXH3_hashLong(U64* acc, const void* data, size_t len)
565 | {
566 | #define NB_KEYS ((KEYSET_DEFAULT_SIZE - STRIPE_ELTS) / 2)
567 |
568 | size_t const block_len = STRIPE_LEN * NB_KEYS;
569 | size_t const nb_blocks = len / block_len;
570 |
571 | size_t n;
572 | for (n = 0; n < nb_blocks; n++) {
573 | XXH3_accumulate(acc, (const BYTE*)data + n*block_len, kKey, NB_KEYS);
574 | XXH3_scrambleAcc(acc, kKey + (KEYSET_DEFAULT_SIZE - STRIPE_ELTS));
575 | }
576 |
577 | /* last partial block */
578 | assert(len > STRIPE_LEN);
579 | { size_t const nbStripes = (len % block_len) / STRIPE_LEN;
580 | assert(nbStripes < NB_KEYS);
581 | XXH3_accumulate(acc, (const BYTE*)data + nb_blocks*block_len, kKey, nbStripes);
582 |
583 | /* last stripe */
584 | if (len & (STRIPE_LEN - 1)) {
585 | const BYTE* const p = (const BYTE*) data + len - STRIPE_LEN;
586 | XXH3_accumulate_512(acc, p, kKey + nbStripes*2);
587 | } }
588 | }
589 |
590 |
591 | XXH_FORCE_INLINE U64 XXH3_mix16B(const void* data, const void* key)
592 | {
593 | const U64* const key64 = (const U64*)key;
594 | return XXH3_mul128(
595 | XXH_readLE64(data) ^ XXH3_readKey64(key64),
596 | XXH_readLE64((const BYTE*)data+8) ^ XXH3_readKey64(key64+1) );
597 | }
598 |
599 | XXH_FORCE_INLINE U64 XXH3_mix2Accs(const U64* acc, const void* key)
600 | {
601 | const U64* const key64 = (const U64*)key;
602 | return XXH3_mul128(
603 | acc[0] ^ XXH3_readKey64(key64),
604 | acc[1] ^ XXH3_readKey64(key64+1) );
605 | }
606 |
607 | static XXH64_hash_t XXH3_mergeAccs(const U64* acc, const U32* key, U64 start)
608 | {
609 | U64 result64 = start;
610 |
611 | result64 += XXH3_mix2Accs(acc+0, key+0);
612 | result64 += XXH3_mix2Accs(acc+2, key+4);
613 | result64 += XXH3_mix2Accs(acc+4, key+8);
614 | result64 += XXH3_mix2Accs(acc+6, key+12);
615 |
616 | return XXH3_avalanche(result64);
617 | }
618 |
619 | XXH_NO_INLINE XXH64_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */
620 | XXH3_hashLong_64b(const void* data, size_t len, XXH64_hash_t seed)
621 | {
622 | ALIGN(64) U64 acc[ACC_NB] = { seed, PRIME64_1, PRIME64_2, PRIME64_3, PRIME64_4, PRIME64_5, (U64)0 - seed, 0 };
623 |
624 | XXH3_hashLong(acc, data, len);
625 |
626 | /* converge into final hash */
627 | assert(sizeof(acc) == 64);
628 | return XXH3_mergeAccs(acc, kKey, (U64)len * PRIME64_1);
629 | }
630 |
631 |
632 | /* === Public entry point === */
633 |
634 | XXH_PUBLIC_API XXH64_hash_t
635 | XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed)
636 | {
637 | const BYTE* const p = (const BYTE*)data;
638 | const char* const key = (const char*)kKey;
639 |
640 | if (len <= 16) return XXH3_len_0to16_64b(data, len, seed);
641 |
642 | { U64 acc = PRIME64_1 * (len + seed);
643 | if (len > 32) {
644 | if (len > 64) {
645 | if (len > 96) {
646 | if (len > 128) return XXH3_hashLong_64b(data, len, seed);
647 |
648 | acc += XXH3_mix16B(p+48, key+96);
649 | acc += XXH3_mix16B(p+len-64, key+112);
650 | }
651 |
652 | acc += XXH3_mix16B(p+32, key+64);
653 | acc += XXH3_mix16B(p+len-48, key+80);
654 | }
655 |
656 | acc += XXH3_mix16B(p+16, key+32);
657 | acc += XXH3_mix16B(p+len-32, key+48);
658 |
659 | }
660 |
661 | acc += XXH3_mix16B(p+0, key+0);
662 | acc += XXH3_mix16B(p+len-16, key+16);
663 |
664 | return XXH3_avalanche(acc);
665 | }
666 | }
667 |
668 |
669 | XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len)
670 | {
671 | return XXH3_64bits_withSeed(data, len, 0);
672 | }
673 |
674 |
675 |
676 | /* ==========================================
677 | * XXH3 128 bits (=> XXH128)
678 | * ========================================== */
679 |
680 | XXH_FORCE_INLINE XXH128_hash_t
681 | XXH3_len_1to3_128b(const void* data, size_t len, const void* keyPtr, XXH64_hash_t seed)
682 | {
683 | assert(data != NULL);
684 | assert(len > 0 && len <= 3);
685 | assert(keyPtr != NULL);
686 | { const U32* const key32 = (const U32*) keyPtr;
687 | BYTE const c1 = ((const BYTE*)data)[0];
688 | BYTE const c2 = ((const BYTE*)data)[len >> 1];
689 | BYTE const c3 = ((const BYTE*)data)[len - 1];
690 | U32 const l1 = (U32)(c1) + ((U32)(c2) << 8);
691 | U32 const l2 = (U32)(len) + ((U32)(c3) << 2);
692 | U64 const ll11 = XXH_mult32to64(l1 + seed + key32[0], l2 + key32[1]);
693 | U64 const ll12 = XXH_mult32to64(l1 + key32[2], l2 - seed + key32[3]);
694 | XXH128_hash_t const h128 = { XXH3_avalanche(ll11), XXH3_avalanche(ll12) };
695 | return h128;
696 | }
697 | }
698 |
699 |
700 | XXH_FORCE_INLINE XXH128_hash_t
701 | XXH3_len_4to8_128b(const void* data, size_t len, const void* keyPtr, XXH64_hash_t seed)
702 | {
703 | assert(data != NULL);
704 | assert(len >= 4 && len <= 8);
705 | { const U32* const key32 = (const U32*) keyPtr;
706 | U64 acc1 = PRIME64_1 * ((U64)len + seed);
707 | U64 acc2 = PRIME64_2 * ((U64)len - seed);
708 | U32 const l1 = XXH_readLE32(data);
709 | U32 const l2 = XXH_readLE32((const BYTE*)data + len - 4);
710 | acc1 += XXH_mult32to64(l1 + key32[0], l2 + key32[1]);
711 | acc2 += XXH_mult32to64(l1 - key32[2], l2 + key32[3]);
712 | { XXH128_hash_t const h128 = { XXH3_avalanche(acc1), XXH3_avalanche(acc2) };
713 | return h128;
714 | }
715 | }
716 | }
717 |
718 | XXH_FORCE_INLINE XXH128_hash_t
719 | XXH3_len_9to16_128b(const void* data, size_t len, const void* keyPtr, XXH64_hash_t seed)
720 | {
721 | assert(data != NULL);
722 | assert(key != NULL);
723 | assert(len >= 9 && len <= 16);
724 | { const U64* const key64 = (const U64*) keyPtr;
725 | U64 acc1 = PRIME64_1 * ((U64)len + seed);
726 | U64 acc2 = PRIME64_2 * ((U64)len - seed);
727 | U64 const ll1 = XXH_readLE64(data);
728 | U64 const ll2 = XXH_readLE64((const BYTE*)data + len - 8);
729 | acc1 += XXH3_mul128(ll1 + XXH3_readKey64(key64+0), ll2 + XXH3_readKey64(key64+1));
730 | acc2 += XXH3_mul128(ll1 + XXH3_readKey64(key64+2), ll2 + XXH3_readKey64(key64+3));
731 | { XXH128_hash_t const h128 = { XXH3_avalanche(acc1), XXH3_avalanche(acc2) };
732 | return h128;
733 | }
734 | }
735 | }
736 |
737 | XXH_FORCE_INLINE XXH128_hash_t
738 | XXH3_len_0to16_128b(const void* data, size_t len, XXH64_hash_t seed)
739 | {
740 | assert(data != NULL);
741 | assert(len <= 16);
742 | { if (len > 8) return XXH3_len_9to16_128b(data, len, kKey, seed);
743 | if (len >= 4) return XXH3_len_4to8_128b(data, len, kKey, seed);
744 | if (len) return XXH3_len_1to3_128b(data, len, kKey, seed);
745 | { XXH128_hash_t const h128 = { seed, (XXH64_hash_t)0 - seed };
746 | return h128;
747 | }
748 | }
749 | }
750 |
751 | XXH_NO_INLINE XXH128_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */
752 | XXH3_hashLong_128b(const void* data, size_t len, XXH64_hash_t seed)
753 | {
754 | ALIGN(64) U64 acc[ACC_NB] = { seed, PRIME64_1, PRIME64_2, PRIME64_3, PRIME64_4, PRIME64_5, (U64)0 - seed, 0 };
755 | assert(len > 128);
756 |
757 | XXH3_hashLong(acc, data, len);
758 |
759 | /* converge into final hash */
760 | assert(sizeof(acc) == 64);
761 | { U64 const low64 = XXH3_mergeAccs(acc, kKey, (U64)len * PRIME64_1);
762 | U64 const high64 = XXH3_mergeAccs(acc, kKey+16, ((U64)len+1) * PRIME64_2);
763 | XXH128_hash_t const h128 = { low64, high64 };
764 | return h128;
765 | }
766 | }
767 |
768 | XXH_PUBLIC_API XXH128_hash_t
769 | XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed)
770 | {
771 | if (len <= 16) return XXH3_len_0to16_128b(data, len, seed);
772 |
773 | { U64 acc1 = PRIME64_1 * (len + seed);
774 | U64 acc2 = 0;
775 | const BYTE* const p = (const BYTE*)data;
776 | const char* const key = (const char*)kKey;
777 | if (len > 32) {
778 | if (len > 64) {
779 | if (len > 96) {
780 | if (len > 128) return XXH3_hashLong_128b(data, len, seed);
781 |
782 | acc1 += XXH3_mix16B(p+48, key+96);
783 | acc2 += XXH3_mix16B(p+len-64, key+112);
784 | }
785 |
786 | acc1 += XXH3_mix16B(p+32, key+64);
787 | acc2 += XXH3_mix16B(p+len-48, key+80);
788 | }
789 |
790 | acc1 += XXH3_mix16B(p+16, key+32);
791 | acc2 += XXH3_mix16B(p+len-32, key+48);
792 |
793 | }
794 |
795 | acc1 += XXH3_mix16B(p+0, key+0);
796 | acc2 += XXH3_mix16B(p+len-16, key+16);
797 |
798 | { U64 const part1 = acc1 + acc2;
799 | U64 const part2 = (acc1 * PRIME64_3) + (acc2 * PRIME64_4) + ((len - seed) * PRIME64_2);
800 | XXH128_hash_t const h128 = { XXH3_avalanche(part1), (XXH64_hash_t)0 - XXH3_avalanche(part2) };
801 | return h128;
802 | }
803 | }
804 | }
805 |
806 |
807 | XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len)
808 | {
809 | return XXH3_128bits_withSeed(data, len, 0);
810 | }
811 |
812 |
813 | XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed)
814 | {
815 | return XXH3_128bits_withSeed(data, len, seed);
816 | }
817 |
818 | #endif /* XXH3_H */
819 |
--------------------------------------------------------------------------------
/xxhash.c:
--------------------------------------------------------------------------------
1 | /*
2 | * xxHash - Fast Hash algorithm
3 | * Copyright (C) 2012-2016, Yann Collet
4 | *
5 | * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 | *
7 | * Redistribution and use in source and binary forms, with or without
8 | * modification, are permitted provided that the following conditions are
9 | * met:
10 | *
11 | * * Redistributions of source code must retain the above copyright
12 | * notice, this list of conditions and the following disclaimer.
13 | * * Redistributions in binary form must reproduce the above
14 | * copyright notice, this list of conditions and the following disclaimer
15 | * in the documentation and/or other materials provided with the
16 | * distribution.
17 | *
18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 | *
30 | * You can contact the author at :
31 | * - xxHash homepage: http://www.xxhash.com
32 | * - xxHash source repository : https://github.com/Cyan4973/xxHash
33 | */
34 |
35 |
36 | /* *************************************
37 | * Tuning parameters
38 | ***************************************/
39 | /*!XXH_FORCE_MEMORY_ACCESS :
40 | * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
41 | * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
42 | * The below switch allow to select different access method for improved performance.
43 | * Method 0 (default) : use `memcpy()`. Safe and portable.
44 | * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
45 | * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
46 | * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
47 | * It can generate buggy code on targets which do not support unaligned memory accesses.
48 | * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
49 | * See http://stackoverflow.com/a/32095106/646947 for details.
50 | * Prefer these methods in priority order (0 > 1 > 2)
51 | */
52 | #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
53 | # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
54 | || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
55 | || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
56 | # define XXH_FORCE_MEMORY_ACCESS 2
57 | # elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
58 | (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
59 | || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
60 | || defined(__ARM_ARCH_7S__) ))
61 | # define XXH_FORCE_MEMORY_ACCESS 1
62 | # endif
63 | #endif
64 |
65 | /*!XXH_ACCEPT_NULL_INPUT_POINTER :
66 | * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault.
67 | * When this macro is enabled, xxHash actively checks input for null pointer.
68 | * It it is, result for null input pointers is the same as a null-length input.
69 | */
70 | #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
71 | # define XXH_ACCEPT_NULL_INPUT_POINTER 0
72 | #endif
73 |
74 | /*!XXH_FORCE_ALIGN_CHECK :
75 | * This is a minor performance trick, only useful with lots of very small keys.
76 | * It means : check for aligned/unaligned input.
77 | * The check costs one initial branch per hash;
78 | * set it to 0 when the input is guaranteed to be aligned,
79 | * or when alignment doesn't matter for performance.
80 | */
81 | #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
82 | # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
83 | # define XXH_FORCE_ALIGN_CHECK 0
84 | # else
85 | # define XXH_FORCE_ALIGN_CHECK 1
86 | # endif
87 | #endif
88 |
89 |
90 | /* *************************************
91 | * Includes & Memory related functions
92 | ***************************************/
93 | /*! Modify the local functions below should you wish to use some other memory routines
94 | * for malloc(), free() */
95 | #include
96 | static void* XXH_malloc(size_t s) { return malloc(s); }
97 | static void XXH_free (void* p) { free(p); }
98 | /*! and for memcpy() */
99 | #include
100 | static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
101 |
102 | #include /* assert */
103 |
104 | #define XXH_STATIC_LINKING_ONLY
105 | #include "xxhash.h"
106 |
107 |
108 | /* *************************************
109 | * Compiler Specific Options
110 | ***************************************/
111 | #ifdef _MSC_VER /* Visual Studio */
112 | # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
113 | # define XXH_FORCE_INLINE static __forceinline
114 | # define XXH_NO_INLINE static __declspec(noinline)
115 | #else
116 | # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
117 | # ifdef __GNUC__
118 | # define XXH_FORCE_INLINE static inline __attribute__((always_inline))
119 | # define XXH_NO_INLINE static __attribute__((noinline))
120 | # else
121 | # define XXH_FORCE_INLINE static inline
122 | # define XXH_NO_INLINE static
123 | # endif
124 | # else
125 | # define XXH_FORCE_INLINE static
126 | # define XXH_NO_INLINE static
127 | # endif /* __STDC_VERSION__ */
128 | #endif
129 |
130 |
131 | /* *************************************
132 | * Basic Types
133 | ***************************************/
134 | #ifndef MEM_MODULE
135 | # if !defined (__VMS) \
136 | && (defined (__cplusplus) \
137 | || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
138 | # include
139 | typedef uint8_t BYTE;
140 | typedef uint16_t U16;
141 | typedef uint32_t U32;
142 | # else
143 | typedef unsigned char BYTE;
144 | typedef unsigned short U16;
145 | typedef unsigned int U32;
146 | # endif
147 | #endif
148 |
149 |
150 | /* === Memory access === */
151 |
152 | #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
153 |
154 | /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
155 | static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
156 |
157 | #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
158 |
159 | /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
160 | /* currently only defined for gcc and icc */
161 | typedef union { U32 u32; } __attribute__((packed)) unalign;
162 | static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
163 |
164 | #else
165 |
166 | /* portable and safe solution. Generally efficient.
167 | * see : http://stackoverflow.com/a/32095106/646947
168 | */
169 | static U32 XXH_read32(const void* memPtr)
170 | {
171 | U32 val;
172 | memcpy(&val, memPtr, sizeof(val));
173 | return val;
174 | }
175 |
176 | #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
177 |
178 |
179 | /* === Endianess === */
180 | typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
181 |
182 | /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
183 | #ifndef XXH_CPU_LITTLE_ENDIAN
184 | static int XXH_isLittleEndian(void)
185 | {
186 | const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
187 | return one.c[0];
188 | }
189 | # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
190 | #endif
191 |
192 |
193 |
194 |
195 | /* ****************************************
196 | * Compiler-specific Functions and Macros
197 | ******************************************/
198 | #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
199 |
200 | /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
201 | #if defined(_MSC_VER)
202 | # define XXH_rotl32(x,r) _rotl(x,r)
203 | # define XXH_rotl64(x,r) _rotl64(x,r)
204 | #else
205 | # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
206 | # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
207 | #endif
208 |
209 | #if defined(_MSC_VER) /* Visual Studio */
210 | # define XXH_swap32 _byteswap_ulong
211 | #elif XXH_GCC_VERSION >= 403
212 | # define XXH_swap32 __builtin_bswap32
213 | #else
214 | static U32 XXH_swap32 (U32 x)
215 | {
216 | return ((x << 24) & 0xff000000 ) |
217 | ((x << 8) & 0x00ff0000 ) |
218 | ((x >> 8) & 0x0000ff00 ) |
219 | ((x >> 24) & 0x000000ff );
220 | }
221 | #endif
222 |
223 |
224 | /* ***************************
225 | * Memory reads
226 | *****************************/
227 | typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
228 |
229 | XXH_FORCE_INLINE U32 XXH_readLE32(const void* ptr)
230 | {
231 | return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
232 | }
233 |
234 | static U32 XXH_readBE32(const void* ptr)
235 | {
236 | return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
237 | }
238 |
239 | XXH_FORCE_INLINE U32
240 | XXH_readLE32_align(const void* ptr, XXH_alignment align)
241 | {
242 | if (align==XXH_unaligned) {
243 | return XXH_readLE32(ptr);
244 | } else {
245 | return XXH_CPU_LITTLE_ENDIAN ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
246 | }
247 | }
248 |
249 |
250 | /* *************************************
251 | * Macros
252 | ***************************************/
253 | #define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */
254 | XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
255 |
256 |
257 | /* *******************************************************************
258 | * 32-bit hash functions
259 | *********************************************************************/
260 | static const U32 PRIME32_1 = 2654435761U; /* 0b10011110001101110111100110110001 */
261 | static const U32 PRIME32_2 = 2246822519U; /* 0b10000101111010111100101001110111 */
262 | static const U32 PRIME32_3 = 3266489917U; /* 0b11000010101100101010111000111101 */
263 | static const U32 PRIME32_4 = 668265263U; /* 0b00100111110101001110101100101111 */
264 | static const U32 PRIME32_5 = 374761393U; /* 0b00010110010101100110011110110001 */
265 |
266 | static U32 XXH32_round(U32 acc, U32 input)
267 | {
268 | acc += input * PRIME32_2;
269 | acc = XXH_rotl32(acc, 13);
270 | acc *= PRIME32_1;
271 | #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE)
272 | /* UGLY HACK:
273 | * This inline assembly hack forces acc into a normal register. This is the
274 | * only thing that prevents GCC and Clang from autovectorizing the XXH32 loop
275 | * (pragmas and attributes don't work for some resason) without globally
276 | * disabling SSE4.1.
277 | *
278 | * The reason we want to avoid vectorization is because despite working on
279 | * 4 integers at a time, there are multiple factors slowing XXH32 down on
280 | * SSE4:
281 | * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on newer chips!)
282 | * making it slightly slower to multiply four integers at once compared to four
283 | * integers independently. Even when pmulld was fastest, Sandy/Ivy Bridge, it is
284 | * still not worth it to go into SSE just to multiply unless doing a long operation.
285 | *
286 | * - Four instructions are required to rotate,
287 | * movqda tmp, v // not required with VEX encoding
288 | * pslld tmp, 13 // tmp <<= 13
289 | * psrld v, 19 // x >>= 19
290 | * por v, tmp // x |= tmp
291 | * compared to one for scalar:
292 | * roll v, 13 // reliably fast across the board
293 | * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
294 | *
295 | * - Instruction level parallelism is actually more beneficial here because the
296 | * SIMD actually serializes this operation: While v1 is rotating, v2 can load data,
297 | * while v3 can multiply. SSE forces them to operate together.
298 | *
299 | * How this hack works:
300 | * __asm__("" // Declare an assembly block but don't declare any instructions
301 | * : // However, as an Input/Output Operand,
302 | * "+r" // constrain a read/write operand (+) as a general purpose register (r).
303 | * (acc) // and set acc as the operand
304 | * );
305 | *
306 | * Because of the 'r', the compiler has promised that seed will be in a
307 | * general purpose register and the '+' says that it will be 'read/write',
308 | * so it has to assume it has changed. It is like volatile without all the
309 | * loads and stores.
310 | *
311 | * Since the argument has to be in a normal register (not an SSE register),
312 | * each time XXH32_round is called, it is impossible to vectorize. */
313 | __asm__("" : "+r" (acc));
314 | #endif
315 | return acc;
316 | }
317 |
318 | /* mix all bits */
319 | static U32 XXH32_avalanche(U32 h32)
320 | {
321 | h32 ^= h32 >> 15;
322 | h32 *= PRIME32_2;
323 | h32 ^= h32 >> 13;
324 | h32 *= PRIME32_3;
325 | h32 ^= h32 >> 16;
326 | return(h32);
327 | }
328 |
329 | #define XXH_get32bits(p) XXH_readLE32_align(p, align)
330 |
331 | static U32
332 | XXH32_finalize(U32 h32, const void* ptr, size_t len, XXH_alignment align)
333 |
334 | {
335 | const BYTE* p = (const BYTE*)ptr;
336 |
337 | #define PROCESS1 \
338 | h32 += (*p++) * PRIME32_5; \
339 | h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
340 |
341 | #define PROCESS4 \
342 | h32 += XXH_get32bits(p) * PRIME32_3; \
343 | p+=4; \
344 | h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
345 |
346 | switch(len&15) /* or switch(bEnd - p) */
347 | {
348 | case 12: PROCESS4;
349 | /* fallthrough */
350 | case 8: PROCESS4;
351 | /* fallthrough */
352 | case 4: PROCESS4;
353 | return XXH32_avalanche(h32);
354 |
355 | case 13: PROCESS4;
356 | /* fallthrough */
357 | case 9: PROCESS4;
358 | /* fallthrough */
359 | case 5: PROCESS4;
360 | PROCESS1;
361 | return XXH32_avalanche(h32);
362 |
363 | case 14: PROCESS4;
364 | /* fallthrough */
365 | case 10: PROCESS4;
366 | /* fallthrough */
367 | case 6: PROCESS4;
368 | PROCESS1;
369 | PROCESS1;
370 | return XXH32_avalanche(h32);
371 |
372 | case 15: PROCESS4;
373 | /* fallthrough */
374 | case 11: PROCESS4;
375 | /* fallthrough */
376 | case 7: PROCESS4;
377 | /* fallthrough */
378 | case 3: PROCESS1;
379 | /* fallthrough */
380 | case 2: PROCESS1;
381 | /* fallthrough */
382 | case 1: PROCESS1;
383 | /* fallthrough */
384 | case 0: return XXH32_avalanche(h32);
385 | }
386 | assert(0);
387 | return h32; /* reaching this point is deemed impossible */
388 | }
389 |
390 | XXH_FORCE_INLINE U32
391 | XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_alignment align)
392 | {
393 | const BYTE* p = (const BYTE*)input;
394 | const BYTE* bEnd = p + len;
395 | U32 h32;
396 |
397 | #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
398 | if (p==NULL) {
399 | len=0;
400 | bEnd=p=(const BYTE*)(size_t)16;
401 | }
402 | #endif
403 |
404 | if (len>=16) {
405 | const BYTE* const limit = bEnd - 15;
406 | U32 v1 = seed + PRIME32_1 + PRIME32_2;
407 | U32 v2 = seed + PRIME32_2;
408 | U32 v3 = seed + 0;
409 | U32 v4 = seed - PRIME32_1;
410 |
411 | do {
412 | v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
413 | v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
414 | v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
415 | v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
416 | } while (p < limit);
417 |
418 | h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
419 | + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
420 | } else {
421 | h32 = seed + PRIME32_5;
422 | }
423 |
424 | h32 += (U32)len;
425 |
426 | return XXH32_finalize(h32, p, len&15, align);
427 | }
428 |
429 |
430 | XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
431 | {
432 | #if 0
433 | /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
434 | XXH32_state_t state;
435 | XXH32_reset(&state, seed);
436 | XXH32_update(&state, input, len);
437 | return XXH32_digest(&state);
438 |
439 | #else
440 |
441 | if (XXH_FORCE_ALIGN_CHECK) {
442 | if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
443 | return XXH32_endian_align(input, len, seed, XXH_aligned);
444 | } }
445 |
446 | return XXH32_endian_align(input, len, seed, XXH_unaligned);
447 | #endif
448 | }
449 |
450 |
451 |
452 | /*====== Hash streaming ======*/
453 |
454 | XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
455 | {
456 | return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
457 | }
458 | XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
459 | {
460 | XXH_free(statePtr);
461 | return XXH_OK;
462 | }
463 |
464 | XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
465 | {
466 | memcpy(dstState, srcState, sizeof(*dstState));
467 | }
468 |
469 | XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
470 | {
471 | XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
472 | memset(&state, 0, sizeof(state));
473 | state.v1 = seed + PRIME32_1 + PRIME32_2;
474 | state.v2 = seed + PRIME32_2;
475 | state.v3 = seed + 0;
476 | state.v4 = seed - PRIME32_1;
477 | /* do not write into reserved, planned to be removed in a future version */
478 | memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
479 | return XXH_OK;
480 | }
481 |
482 |
483 | XXH_PUBLIC_API XXH_errorcode
484 | XXH32_update(XXH32_state_t* state, const void* input, size_t len)
485 | {
486 | if (input==NULL)
487 | #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
488 | return XXH_OK;
489 | #else
490 | return XXH_ERROR;
491 | #endif
492 |
493 | { const BYTE* p = (const BYTE*)input;
494 | const BYTE* const bEnd = p + len;
495 |
496 | state->total_len_32 += (XXH32_hash_t)len;
497 | state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
498 |
499 | if (state->memsize + len < 16) { /* fill in tmp buffer */
500 | XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
501 | state->memsize += (XXH32_hash_t)len;
502 | return XXH_OK;
503 | }
504 |
505 | if (state->memsize) { /* some data left from previous update */
506 | XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
507 | { const U32* p32 = state->mem32;
508 | state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
509 | state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
510 | state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
511 | state->v4 = XXH32_round(state->v4, XXH_readLE32(p32));
512 | }
513 | p += 16-state->memsize;
514 | state->memsize = 0;
515 | }
516 |
517 | if (p <= bEnd-16) {
518 | const BYTE* const limit = bEnd - 16;
519 | U32 v1 = state->v1;
520 | U32 v2 = state->v2;
521 | U32 v3 = state->v3;
522 | U32 v4 = state->v4;
523 |
524 | do {
525 | v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
526 | v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4;
527 | v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4;
528 | v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4;
529 | } while (p<=limit);
530 |
531 | state->v1 = v1;
532 | state->v2 = v2;
533 | state->v3 = v3;
534 | state->v4 = v4;
535 | }
536 |
537 | if (p < bEnd) {
538 | XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
539 | state->memsize = (unsigned)(bEnd-p);
540 | }
541 | }
542 |
543 | return XXH_OK;
544 | }
545 |
546 |
547 | XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state)
548 | {
549 | U32 h32;
550 |
551 | if (state->large_len) {
552 | h32 = XXH_rotl32(state->v1, 1)
553 | + XXH_rotl32(state->v2, 7)
554 | + XXH_rotl32(state->v3, 12)
555 | + XXH_rotl32(state->v4, 18);
556 | } else {
557 | h32 = state->v3 /* == seed */ + PRIME32_5;
558 | }
559 |
560 | h32 += state->total_len_32;
561 |
562 | return XXH32_finalize(h32, state->mem32, state->memsize, XXH_aligned);
563 | }
564 |
565 |
566 | /*====== Canonical representation ======*/
567 |
568 | /*! Default XXH result types are basic unsigned 32 and 64 bits.
569 | * The canonical representation follows human-readable write convention, aka big-endian (large digits first).
570 | * These functions allow transformation of hash result into and from its canonical format.
571 | * This way, hash values can be written into a file or buffer, remaining comparable across different systems.
572 | */
573 |
574 | XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
575 | {
576 | XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
577 | if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
578 | memcpy(dst, &hash, sizeof(*dst));
579 | }
580 |
581 | XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
582 | {
583 | return XXH_readBE32(src);
584 | }
585 |
586 |
587 | #ifndef XXH_NO_LONG_LONG
588 |
589 | /* *******************************************************************
590 | * 64-bit hash functions
591 | *********************************************************************/
592 |
593 | /*====== Memory access ======*/
594 |
595 | #ifndef MEM_MODULE
596 | # define MEM_MODULE
597 | # if !defined (__VMS) \
598 | && (defined (__cplusplus) \
599 | || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
600 | # include
601 | typedef uint64_t U64;
602 | # else
603 | /* if compiler doesn't support unsigned long long, replace by another 64-bit type */
604 | typedef unsigned long long U64;
605 | # endif
606 | #endif
607 |
608 |
609 | #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
610 |
611 | /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
612 | static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
613 |
614 | #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
615 |
616 | /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
617 | /* currently only defined for gcc and icc */
618 | typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
619 | static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
620 |
621 | #else
622 |
623 | /* portable and safe solution. Generally efficient.
624 | * see : http://stackoverflow.com/a/32095106/646947
625 | */
626 |
627 | static U64 XXH_read64(const void* memPtr)
628 | {
629 | U64 val;
630 | memcpy(&val, memPtr, sizeof(val));
631 | return val;
632 | }
633 |
634 | #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
635 |
636 | #if defined(_MSC_VER) /* Visual Studio */
637 | # define XXH_swap64 _byteswap_uint64
638 | #elif XXH_GCC_VERSION >= 403
639 | # define XXH_swap64 __builtin_bswap64
640 | #else
641 | static U64 XXH_swap64 (U64 x)
642 | {
643 | return ((x << 56) & 0xff00000000000000ULL) |
644 | ((x << 40) & 0x00ff000000000000ULL) |
645 | ((x << 24) & 0x0000ff0000000000ULL) |
646 | ((x << 8) & 0x000000ff00000000ULL) |
647 | ((x >> 8) & 0x00000000ff000000ULL) |
648 | ((x >> 24) & 0x0000000000ff0000ULL) |
649 | ((x >> 40) & 0x000000000000ff00ULL) |
650 | ((x >> 56) & 0x00000000000000ffULL);
651 | }
652 | #endif
653 |
654 | XXH_FORCE_INLINE U64 XXH_readLE64(const void* ptr)
655 | {
656 | return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
657 | }
658 |
659 | static U64 XXH_readBE64(const void* ptr)
660 | {
661 | return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
662 | }
663 |
664 | XXH_FORCE_INLINE U64
665 | XXH_readLE64_align(const void* ptr, XXH_alignment align)
666 | {
667 | if (align==XXH_unaligned)
668 | return XXH_readLE64(ptr);
669 | else
670 | return XXH_CPU_LITTLE_ENDIAN ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
671 | }
672 |
673 |
674 | /*====== xxh64 ======*/
675 |
676 | static const U64 PRIME64_1 = 11400714785074694791ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
677 | static const U64 PRIME64_2 = 14029467366897019727ULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
678 | static const U64 PRIME64_3 = 1609587929392839161ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
679 | static const U64 PRIME64_4 = 9650029242287828579ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
680 | static const U64 PRIME64_5 = 2870177450012600261ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
681 |
682 | static U64 XXH64_round(U64 acc, U64 input)
683 | {
684 | acc += input * PRIME64_2;
685 | acc = XXH_rotl64(acc, 31);
686 | acc *= PRIME64_1;
687 | return acc;
688 | }
689 |
690 | static U64 XXH64_mergeRound(U64 acc, U64 val)
691 | {
692 | val = XXH64_round(0, val);
693 | acc ^= val;
694 | acc = acc * PRIME64_1 + PRIME64_4;
695 | return acc;
696 | }
697 |
698 | static U64 XXH64_avalanche(U64 h64)
699 | {
700 | h64 ^= h64 >> 33;
701 | h64 *= PRIME64_2;
702 | h64 ^= h64 >> 29;
703 | h64 *= PRIME64_3;
704 | h64 ^= h64 >> 32;
705 | return h64;
706 | }
707 |
708 |
709 | #define XXH_get64bits(p) XXH_readLE64_align(p, align)
710 |
711 | static U64
712 | XXH64_finalize(U64 h64, const void* ptr, size_t len, XXH_alignment align)
713 | {
714 | const BYTE* p = (const BYTE*)ptr;
715 |
716 | #define PROCESS1_64 \
717 | h64 ^= (*p++) * PRIME64_5; \
718 | h64 = XXH_rotl64(h64, 11) * PRIME64_1;
719 |
720 | #define PROCESS4_64 \
721 | h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
722 | p+=4; \
723 | h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
724 |
725 | #define PROCESS8_64 { \
726 | U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
727 | p+=8; \
728 | h64 ^= k1; \
729 | h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
730 | }
731 |
732 | switch(len&31) {
733 | case 24: PROCESS8_64;
734 | /* fallthrough */
735 | case 16: PROCESS8_64;
736 | /* fallthrough */
737 | case 8: PROCESS8_64;
738 | return XXH64_avalanche(h64);
739 |
740 | case 28: PROCESS8_64;
741 | /* fallthrough */
742 | case 20: PROCESS8_64;
743 | /* fallthrough */
744 | case 12: PROCESS8_64;
745 | /* fallthrough */
746 | case 4: PROCESS4_64;
747 | return XXH64_avalanche(h64);
748 |
749 | case 25: PROCESS8_64;
750 | /* fallthrough */
751 | case 17: PROCESS8_64;
752 | /* fallthrough */
753 | case 9: PROCESS8_64;
754 | PROCESS1_64;
755 | return XXH64_avalanche(h64);
756 |
757 | case 29: PROCESS8_64;
758 | /* fallthrough */
759 | case 21: PROCESS8_64;
760 | /* fallthrough */
761 | case 13: PROCESS8_64;
762 | /* fallthrough */
763 | case 5: PROCESS4_64;
764 | PROCESS1_64;
765 | return XXH64_avalanche(h64);
766 |
767 | case 26: PROCESS8_64;
768 | /* fallthrough */
769 | case 18: PROCESS8_64;
770 | /* fallthrough */
771 | case 10: PROCESS8_64;
772 | PROCESS1_64;
773 | PROCESS1_64;
774 | return XXH64_avalanche(h64);
775 |
776 | case 30: PROCESS8_64;
777 | /* fallthrough */
778 | case 22: PROCESS8_64;
779 | /* fallthrough */
780 | case 14: PROCESS8_64;
781 | /* fallthrough */
782 | case 6: PROCESS4_64;
783 | PROCESS1_64;
784 | PROCESS1_64;
785 | return XXH64_avalanche(h64);
786 |
787 | case 27: PROCESS8_64;
788 | /* fallthrough */
789 | case 19: PROCESS8_64;
790 | /* fallthrough */
791 | case 11: PROCESS8_64;
792 | PROCESS1_64;
793 | PROCESS1_64;
794 | PROCESS1_64;
795 | return XXH64_avalanche(h64);
796 |
797 | case 31: PROCESS8_64;
798 | /* fallthrough */
799 | case 23: PROCESS8_64;
800 | /* fallthrough */
801 | case 15: PROCESS8_64;
802 | /* fallthrough */
803 | case 7: PROCESS4_64;
804 | /* fallthrough */
805 | case 3: PROCESS1_64;
806 | /* fallthrough */
807 | case 2: PROCESS1_64;
808 | /* fallthrough */
809 | case 1: PROCESS1_64;
810 | /* fallthrough */
811 | case 0: return XXH64_avalanche(h64);
812 | }
813 |
814 | /* impossible to reach */
815 | assert(0);
816 | return 0; /* unreachable, but some compilers complain without it */
817 | }
818 |
819 | XXH_FORCE_INLINE U64
820 | XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_alignment align)
821 | {
822 | const BYTE* p = (const BYTE*)input;
823 | const BYTE* bEnd = p + len;
824 | U64 h64;
825 |
826 | #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
827 | if (p==NULL) {
828 | len=0;
829 | bEnd=p=(const BYTE*)(size_t)32;
830 | }
831 | #endif
832 |
833 | if (len>=32) {
834 | const BYTE* const limit = bEnd - 32;
835 | U64 v1 = seed + PRIME64_1 + PRIME64_2;
836 | U64 v2 = seed + PRIME64_2;
837 | U64 v3 = seed + 0;
838 | U64 v4 = seed - PRIME64_1;
839 |
840 | do {
841 | v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
842 | v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
843 | v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
844 | v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
845 | } while (p<=limit);
846 |
847 | h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
848 | h64 = XXH64_mergeRound(h64, v1);
849 | h64 = XXH64_mergeRound(h64, v2);
850 | h64 = XXH64_mergeRound(h64, v3);
851 | h64 = XXH64_mergeRound(h64, v4);
852 |
853 | } else {
854 | h64 = seed + PRIME64_5;
855 | }
856 |
857 | h64 += (U64) len;
858 |
859 | return XXH64_finalize(h64, p, len, align);
860 | }
861 |
862 |
863 | XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
864 | {
865 | #if 0
866 | /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
867 | XXH64_state_t state;
868 | XXH64_reset(&state, seed);
869 | XXH64_update(&state, input, len);
870 | return XXH64_digest(&state);
871 |
872 | #else
873 |
874 | if (XXH_FORCE_ALIGN_CHECK) {
875 | if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
876 | return XXH64_endian_align(input, len, seed, XXH_aligned);
877 | } }
878 |
879 | return XXH64_endian_align(input, len, seed, XXH_unaligned);
880 |
881 | #endif
882 | }
883 |
884 | /*====== Hash Streaming ======*/
885 |
886 | XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
887 | {
888 | return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
889 | }
890 | XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
891 | {
892 | XXH_free(statePtr);
893 | return XXH_OK;
894 | }
895 |
896 | XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
897 | {
898 | memcpy(dstState, srcState, sizeof(*dstState));
899 | }
900 |
901 | XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
902 | {
903 | XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
904 | memset(&state, 0, sizeof(state));
905 | state.v1 = seed + PRIME64_1 + PRIME64_2;
906 | state.v2 = seed + PRIME64_2;
907 | state.v3 = seed + 0;
908 | state.v4 = seed - PRIME64_1;
909 | /* do not write into reserved, planned to be removed in a future version */
910 | memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
911 | return XXH_OK;
912 | }
913 |
914 | XXH_PUBLIC_API XXH_errorcode
915 | XXH64_update (XXH64_state_t* state, const void* input, size_t len)
916 | {
917 | if (input==NULL)
918 | #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
919 | return XXH_OK;
920 | #else
921 | return XXH_ERROR;
922 | #endif
923 |
924 | { const BYTE* p = (const BYTE*)input;
925 | const BYTE* const bEnd = p + len;
926 |
927 | state->total_len += len;
928 |
929 | if (state->memsize + len < 32) { /* fill in tmp buffer */
930 | XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
931 | state->memsize += (U32)len;
932 | return XXH_OK;
933 | }
934 |
935 | if (state->memsize) { /* tmp buffer is full */
936 | XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
937 | state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
938 | state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
939 | state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
940 | state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3));
941 | p += 32-state->memsize;
942 | state->memsize = 0;
943 | }
944 |
945 | if (p+32 <= bEnd) {
946 | const BYTE* const limit = bEnd - 32;
947 | U64 v1 = state->v1;
948 | U64 v2 = state->v2;
949 | U64 v3 = state->v3;
950 | U64 v4 = state->v4;
951 |
952 | do {
953 | v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
954 | v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8;
955 | v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8;
956 | v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8;
957 | } while (p<=limit);
958 |
959 | state->v1 = v1;
960 | state->v2 = v2;
961 | state->v3 = v3;
962 | state->v4 = v4;
963 | }
964 |
965 | if (p < bEnd) {
966 | XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
967 | state->memsize = (unsigned)(bEnd-p);
968 | }
969 | }
970 |
971 | return XXH_OK;
972 | }
973 |
974 |
975 | XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state)
976 | {
977 | U64 h64;
978 |
979 | if (state->total_len >= 32) {
980 | U64 const v1 = state->v1;
981 | U64 const v2 = state->v2;
982 | U64 const v3 = state->v3;
983 | U64 const v4 = state->v4;
984 |
985 | h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
986 | h64 = XXH64_mergeRound(h64, v1);
987 | h64 = XXH64_mergeRound(h64, v2);
988 | h64 = XXH64_mergeRound(h64, v3);
989 | h64 = XXH64_mergeRound(h64, v4);
990 | } else {
991 | h64 = state->v3 /*seed*/ + PRIME64_5;
992 | }
993 |
994 | h64 += (U64) state->total_len;
995 |
996 | return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, XXH_aligned);
997 | }
998 |
999 |
1000 | /*====== Canonical representation ======*/
1001 |
1002 | XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
1003 | {
1004 | XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
1005 | if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
1006 | memcpy(dst, &hash, sizeof(*dst));
1007 | }
1008 |
1009 | XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
1010 | {
1011 | return XXH_readBE64(src);
1012 | }
1013 |
1014 |
1015 |
1016 | /* *********************************************************************
1017 | * XXH3
1018 | * New generation hash designed for speed on small keys and vectorization
1019 | ************************************************************************ */
1020 |
1021 | #include "xxh3.h"
1022 |
1023 |
1024 | #endif /* XXH_NO_LONG_LONG */
1025 |
--------------------------------------------------------------------------------
/xxhash.h:
--------------------------------------------------------------------------------
1 | /*
2 | xxHash - Extremely Fast Hash algorithm
3 | Header File
4 | Copyright (C) 2012-2016, Yann Collet.
5 |
6 | BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 |
8 | Redistribution and use in source and binary forms, with or without
9 | modification, are permitted provided that the following conditions are
10 | met:
11 |
12 | * Redistributions of source code must retain the above copyright
13 | notice, this list of conditions and the following disclaimer.
14 | * Redistributions in binary form must reproduce the above
15 | copyright notice, this list of conditions and the following disclaimer
16 | in the documentation and/or other materials provided with the
17 | distribution.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
31 | You can contact the author at :
32 | - xxHash source repository : https://github.com/Cyan4973/xxHash
33 | */
34 |
35 | /* Notice extracted from xxHash homepage :
36 |
37 | xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
38 | It also successfully passes all tests from the SMHasher suite.
39 |
40 | Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
41 |
42 | Name Speed Q.Score Author
43 | xxHash 5.4 GB/s 10
44 | CrapWow 3.2 GB/s 2 Andrew
45 | MumurHash 3a 2.7 GB/s 10 Austin Appleby
46 | SpookyHash 2.0 GB/s 10 Bob Jenkins
47 | SBox 1.4 GB/s 9 Bret Mulvey
48 | Lookup3 1.2 GB/s 9 Bob Jenkins
49 | SuperFastHash 1.2 GB/s 1 Paul Hsieh
50 | CityHash64 1.05 GB/s 10 Pike & Alakuijala
51 | FNV 0.55 GB/s 5 Fowler, Noll, Vo
52 | CRC32 0.43 GB/s 9
53 | MD5-32 0.33 GB/s 10 Ronald L. Rivest
54 | SHA1-32 0.28 GB/s 10
55 |
56 | Q.Score is a measure of quality of the hash function.
57 | It depends on successfully passing SMHasher test set.
58 | 10 is a perfect score.
59 |
60 | A 64-bit version, named XXH64, is available since r35.
61 | It offers much better speed, but for 64-bit applications only.
62 | Name Speed on 64 bits Speed on 32 bits
63 | XXH64 13.8 GB/s 1.9 GB/s
64 | XXH32 6.8 GB/s 6.0 GB/s
65 | */
66 |
67 | #ifndef XXHASH_H_5627135585666179
68 | #define XXHASH_H_5627135585666179 1
69 |
70 | #if defined (__cplusplus)
71 | extern "C" {
72 | #endif
73 |
74 |
75 | /* ****************************
76 | * Definitions
77 | ******************************/
78 | #include /* size_t */
79 | typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
80 |
81 |
82 | /* ****************************
83 | * API modifier
84 | ******************************/
85 | /** XXH_INLINE_ALL (and XXH_PRIVATE_API)
86 | * This is useful to include xxhash functions in `static` mode
87 | * in order to inline them, and remove their symbol from the public list.
88 | * Inlining can offer dramatic performance improvement on small keys.
89 | * Methodology :
90 | * #define XXH_INLINE_ALL
91 | * #include "xxhash.h"
92 | * `xxhash.c` is automatically included.
93 | * It's not useful to compile and link it as a separate module.
94 | */
95 | #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
96 | # ifndef XXH_STATIC_LINKING_ONLY
97 | # define XXH_STATIC_LINKING_ONLY
98 | # endif
99 | # if defined(__GNUC__)
100 | # define XXH_PUBLIC_API static __inline __attribute__((unused))
101 | # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
102 | # define XXH_PUBLIC_API static inline
103 | # elif defined(_MSC_VER)
104 | # define XXH_PUBLIC_API static __inline
105 | # else
106 | /* this version may generate warnings for unused static functions */
107 | # define XXH_PUBLIC_API static
108 | # endif
109 | #else
110 | # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
111 | # ifdef XXH_EXPORT
112 | # define XXH_PUBLIC_API __declspec(dllexport)
113 | # elif XXH_IMPORT
114 | # define XXH_PUBLIC_API __declspec(dllimport)
115 | # endif
116 | # else
117 | # define XXH_PUBLIC_API /* do nothing */
118 | # endif
119 | #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
120 |
121 | /*! XXH_NAMESPACE, aka Namespace Emulation :
122 | *
123 | * If you want to include _and expose_ xxHash functions from within your own library,
124 | * but also want to avoid symbol collisions with other libraries which may also include xxHash,
125 | *
126 | * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
127 | * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
128 | *
129 | * Note that no change is required within the calling program as long as it includes `xxhash.h` :
130 | * regular symbol name will be automatically translated by this header.
131 | */
132 | #ifdef XXH_NAMESPACE
133 | # define XXH_CAT(A,B) A##B
134 | # define XXH_NAME2(A,B) XXH_CAT(A,B)
135 | # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
136 | # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
137 | # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
138 | # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
139 | # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
140 | # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
141 | # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
142 | # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
143 | # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
144 | # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
145 | # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
146 | # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
147 | # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
148 | # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
149 | # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
150 | # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
151 | # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
152 | # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
153 | # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
154 | #endif
155 |
156 |
157 | /* *************************************
158 | * Version
159 | ***************************************/
160 | #define XXH_VERSION_MAJOR 0
161 | #define XXH_VERSION_MINOR 7
162 | #define XXH_VERSION_RELEASE 0
163 | #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
164 | XXH_PUBLIC_API unsigned XXH_versionNumber (void);
165 |
166 |
167 | /*-**********************************************************************
168 | * 32-bit hash
169 | ************************************************************************/
170 | typedef unsigned int XXH32_hash_t;
171 |
172 | /*! XXH32() :
173 | Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input".
174 | The memory between input & input+length must be valid (allocated and read-accessible).
175 | "seed" can be used to alter the result predictably.
176 | Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
177 | XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
178 |
179 | /*====== Streaming ======*/
180 | typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
181 | XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
182 | XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
183 | XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
184 |
185 | XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
186 | XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
187 | XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
188 |
189 | /*
190 | * Streaming functions generate the xxHash of an input provided in multiple segments.
191 | * Note that, for small input, they are slower than single-call functions, due to state management.
192 | * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
193 | *
194 | * XXH state must first be allocated, using XXH*_createState() .
195 | *
196 | * Start a new hash by initializing state with a seed, using XXH*_reset().
197 | *
198 | * Then, feed the hash state by calling XXH*_update() as many times as necessary.
199 | * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
200 | *
201 | * Finally, a hash value can be produced anytime, by using XXH*_digest().
202 | * This function returns the nn-bits hash as an int or long long.
203 | *
204 | * It's still possible to continue inserting input into the hash state after a digest,
205 | * and generate some new hashes later on, by calling again XXH*_digest().
206 | *
207 | * When done, free XXH state space if it was allocated dynamically.
208 | */
209 |
210 | /*====== Canonical representation ======*/
211 |
212 | typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
213 | XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
214 | XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
215 |
216 | /* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
217 | * The canonical representation uses human-readable write convention, aka big-endian (large digits first).
218 | * These functions allow transformation of hash result into and from its canonical format.
219 | * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
220 | */
221 |
222 |
223 | #ifndef XXH_NO_LONG_LONG
224 | /*-**********************************************************************
225 | * 64-bit hash
226 | ************************************************************************/
227 | typedef unsigned long long XXH64_hash_t;
228 |
229 | /*! XXH64() :
230 | Calculate the 64-bit hash of sequence of length "len" stored at memory address "input".
231 | "seed" can be used to alter the result predictably.
232 | This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark).
233 | */
234 | XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
235 |
236 | /*====== Streaming ======*/
237 | typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
238 | XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
239 | XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
240 | XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
241 |
242 | XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
243 | XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
244 | XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
245 |
246 | /*====== Canonical representation ======*/
247 | typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
248 | XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
249 | XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
250 |
251 |
252 | #endif /* XXH_NO_LONG_LONG */
253 |
254 |
255 |
256 | #ifdef XXH_STATIC_LINKING_ONLY
257 |
258 | /* ================================================================================================
259 | This section contains declarations which are not guaranteed to remain stable.
260 | They may change in future versions, becoming incompatible with a different version of the library.
261 | These declarations should only be used with static linking.
262 | Never use them in association with dynamic linking !
263 | =================================================================================================== */
264 |
265 | /* These definitions are only present to allow
266 | * static allocation of XXH state, on stack or in a struct for example.
267 | * Never **ever** use members directly. */
268 |
269 | #if !defined (__VMS) \
270 | && (defined (__cplusplus) \
271 | || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
272 | # include
273 |
274 | struct XXH32_state_s {
275 | uint32_t total_len_32;
276 | uint32_t large_len;
277 | uint32_t v1;
278 | uint32_t v2;
279 | uint32_t v3;
280 | uint32_t v4;
281 | uint32_t mem32[4];
282 | uint32_t memsize;
283 | uint32_t reserved; /* never read nor write, might be removed in a future version */
284 | }; /* typedef'd to XXH32_state_t */
285 |
286 | struct XXH64_state_s {
287 | uint64_t total_len;
288 | uint64_t v1;
289 | uint64_t v2;
290 | uint64_t v3;
291 | uint64_t v4;
292 | uint64_t mem64[4];
293 | uint32_t memsize;
294 | uint32_t reserved[2]; /* never read nor write, might be removed in a future version */
295 | }; /* typedef'd to XXH64_state_t */
296 |
297 | # else
298 |
299 | struct XXH32_state_s {
300 | XXH32_hash_t total_len_32;
301 | XXH32_hash_t large_len;
302 | XXH32_hash_t v1;
303 | XXH32_hash_t v2;
304 | XXH32_hash_t v3;
305 | XXH32_hash_t v4;
306 | XXH32_hash_t mem32[4];
307 | XXH32_hash_t memsize;
308 | XXH32_hash_t reserved; /* never read nor write, might be removed in a future version */
309 | }; /* typedef'd to XXH32_state_t */
310 |
311 | # ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
312 | struct XXH64_state_s {
313 | XXH64_hash_t total_len;
314 | XXH64_hash_t v1;
315 | XXH64_hash_t v2;
316 | XXH64_hash_t v3;
317 | XXH64_hash_t v4;
318 | XXH64_hash_t mem64[4];
319 | XXH32_hash_t memsize;
320 | XXH32_hash_t reserved[2]; /* never read nor write, might be removed in a future version */
321 | }; /* typedef'd to XXH64_state_t */
322 | # endif
323 |
324 | # endif
325 |
326 |
327 | /*-**********************************************************************
328 | * XXH3
329 | * New experimental hash
330 | ************************************************************************/
331 | #ifndef XXH_NO_LONG_LONG
332 |
333 |
334 | /* ============================================
335 | * XXH3 is a new hash algorithm,
336 | * featuring vastly improved speed performance
337 | * for both small and large inputs.
338 | * A full speed analysis will be published,
339 | * it requires a lot more space than this comment can handle.
340 | * In general, expect XXH3 to run about ~2x faster on large inputs,
341 | * and >3x faster on small ones, though exact difference depend on platform.
342 | *
343 | * The algorithm is portable, will generate the same hash on all platforms.
344 | * It benefits greatly from vectorization units, but does not require it.
345 | *
346 | * XXH3 offers 2 variants, _64bits and _128bits.
347 | * When only 64 bits are needed, prefer calling the _64bits variant :
348 | * it reduces the amount of mixing, resulting in faster speed on small inputs.
349 | * It's also generally simpler to manipulate a scalar type than a struct.
350 | * Note : the low 64-bit field of the _128bits variant is the same as _64bits result.
351 | *
352 | * The XXH3 algorithm is still considered experimental.
353 | * It's possible to use it for ephemeral data, but avoid storing long-term values for later re-use.
354 | * While labelled experimental, the produced result can still change between versions.
355 | *
356 | * The API currently supports one-shot hashing only.
357 | * The full version will include streaming capability, and canonical representation
358 | * Long term optional feature may include custom secret keys, and secret key generation.
359 | *
360 | * There are still a number of opened questions that community can influence during the experimental period.
361 | * I'm trying to list a few of them below, though don't consider this list as complete.
362 | *
363 | * - 128-bits output type : currently defined as a structure of 2 64-bits fields.
364 | * That's because 128-bit values do not exist in C standard.
365 | * Note that it means that, at byte level, result is not identical depending on endianess.
366 | * However, at field level, they are identical on all platforms.
367 | * The canonical representation will solve the issue of identical byte-level representation across platforms,
368 | * which is necessary for serialization.
369 | * Would there be a better representation for a 128-bit hash result ?
370 | * Are the names of the inner 64-bit fields important ? Should they be changed ?
371 | *
372 | * - Canonical representation : for the 64-bit variant, canonical representation is the same as XXH64() (aka big-endian).
373 | * What should it be for the 128-bit variant ?
374 | * Since it's no longer a scalar value, big-endian representation is no longer an obvious choice.
375 | * One possibility : represent it as the concatenation of two 64-bits canonical representation (aka 2x big-endian)
376 | * Another one : represent it in the same order as natural order in the struct for little-endian platforms.
377 | * Less consistent with existing convention for XXH32/XXH64, but may be more natural for little-endian platforms.
378 | *
379 | * - Associated functions for 128-bit hash : simple things, such as checking if 2 hashes are equal, become more difficult with struct.
380 | * Granted, it's not terribly difficult to create a comparator, but it's still a workload.
381 | * Would it be beneficial to declare and define a comparator function for XXH128_hash_t ?
382 | * Are there other operations on XXH128_hash_t which would be desirable ?
383 | *
384 | * - Variant compatibility : The low 64-bit field of the _128bits variant is the same as the result of _64bits.
385 | * This is not a compulsory behavior. It just felt that it "wouldn't hurt", and might even help in some (unidentified) cases.
386 | * But it might influence the design of XXH128_hash_t, in ways which may block other possibilities.
387 | * Good idea, bad idea ?
388 | *
389 | * - Seed type for 128-bits variant : currently, it's a single 64-bit value, like the 64-bit variant.
390 | * It could be argued that it's more logical to offer a 128-bit seed input parameter for a 128-bit hash.
391 | * Although it's also more difficult to use, since it requires to declare and pass a structure instead of a value.
392 | * It would either replace current choice, or add a new one.
393 | * Farmhash, for example, offers both variants (the 128-bits seed variant is called `doubleSeed`).
394 | * If both 64-bit and 128-bit seeds are possible, which variant should be called XXH128 ?
395 | *
396 | * - Result for len==0 : Currently, the result of hashing a zero-length input is the seed.
397 | * This mimics the behavior of a crc : in which case, a seed is effectively an accumulator, so it's not updated if input is empty.
398 | * Consequently, by default, when no seed specified, it returns zero. That part seems okay (it used to be a request for XXH32/XXH64).
399 | * But is it still fine to return the seed when the seed is non-zero ?
400 | * Are there use case which would depend on this behavior, or would prefer a mixing of the seed ?
401 | */
402 |
403 | #ifdef XXH_NAMESPACE
404 | # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
405 | # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
406 | # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
407 | # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
408 | # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
409 | #endif
410 |
411 |
412 | typedef struct {
413 | XXH64_hash_t low64;
414 | XXH64_hash_t high64;
415 | } XXH128_hash_t;
416 |
417 | XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, unsigned long long seed);
418 |
419 | /* note : variants without seed produce same result as variant with seed == 0 */
420 | XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
421 | XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, unsigned long long seed);
422 | XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
423 | XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, unsigned long long seed); /* == XXH128() */
424 |
425 |
426 | #endif /* XXH_NO_LONG_LONG */
427 |
428 |
429 | /*-**********************************************************************
430 | * XXH_INLINE_ALL
431 | ************************************************************************/
432 | #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
433 | # include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */
434 | #endif
435 |
436 |
437 |
438 | #endif /* XXH_STATIC_LINKING_ONLY */
439 |
440 |
441 | #if defined (__cplusplus)
442 | }
443 | #endif
444 |
445 | #endif /* XXHASH_H_5627135585666179 */
446 |
--------------------------------------------------------------------------------