├── .gitignore ├── .travis.yml ├── Makefile ├── README.md ├── _config.yml ├── help.mk ├── jemalloc-5.2.1 ├── .appveyor.yml ├── .autom4te.cfg ├── .cirrus.yml ├── .gitattributes ├── .gitignore ├── .travis.yml ├── COPYING ├── ChangeLog ├── INSTALL.md ├── Makefile.in ├── README ├── TUNING.md ├── VERSION ├── autogen.sh ├── bin │ ├── jemalloc-config.in │ ├── jemalloc.sh.in │ └── jeprof.in ├── build-aux │ ├── config.guess │ ├── config.sub │ └── install-sh ├── config.stamp.in ├── configure.ac ├── doc │ ├── html.xsl.in │ ├── jemalloc.xml.in │ ├── manpages.xsl.in │ └── stylesheet.xsl ├── include │ ├── jemalloc │ │ ├── internal │ │ │ ├── arena_externs.h │ │ │ ├── arena_inlines_a.h │ │ │ ├── arena_inlines_b.h │ │ │ ├── arena_stats.h │ │ │ ├── arena_structs_a.h │ │ │ ├── arena_structs_b.h │ │ │ ├── arena_types.h │ │ │ ├── assert.h │ │ │ ├── atomic.h │ │ │ ├── atomic_c11.h │ │ │ ├── atomic_gcc_atomic.h │ │ │ ├── atomic_gcc_sync.h │ │ │ ├── atomic_msvc.h │ │ │ ├── background_thread_externs.h │ │ │ ├── background_thread_inlines.h │ │ │ ├── background_thread_structs.h │ │ │ ├── base_externs.h │ │ │ ├── base_inlines.h │ │ │ ├── base_structs.h │ │ │ ├── base_types.h │ │ │ ├── bin.h │ │ │ ├── bin_stats.h │ │ │ ├── bin_types.h │ │ │ ├── bit_util.h │ │ │ ├── bitmap.h │ │ │ ├── cache_bin.h │ │ │ ├── ckh.h │ │ │ ├── ctl.h │ │ │ ├── div.h │ │ │ ├── emitter.h │ │ │ ├── extent_dss.h │ │ │ ├── extent_externs.h │ │ │ ├── extent_inlines.h │ │ │ ├── extent_mmap.h │ │ │ ├── extent_structs.h │ │ │ ├── extent_types.h │ │ │ ├── hash.h │ │ │ ├── hook.h │ │ │ ├── jemalloc_internal_decls.h │ │ │ ├── jemalloc_internal_defs.h.in │ │ │ ├── jemalloc_internal_externs.h │ │ │ ├── jemalloc_internal_includes.h │ │ │ ├── jemalloc_internal_inlines_a.h │ │ │ ├── jemalloc_internal_inlines_b.h │ │ │ ├── jemalloc_internal_inlines_c.h │ │ │ ├── jemalloc_internal_macros.h │ │ │ ├── jemalloc_internal_types.h │ │ │ ├── jemalloc_preamble.h.in │ │ │ ├── large_externs.h │ │ │ ├── log.h │ │ │ ├── malloc_io.h │ │ │ ├── mutex.h │ │ │ ├── mutex_pool.h │ │ │ ├── mutex_prof.h │ │ │ ├── nstime.h │ │ │ ├── pages.h │ │ │ ├── ph.h │ │ │ ├── private_namespace.sh │ │ │ ├── private_symbols.sh │ │ │ ├── prng.h │ │ │ ├── prof_externs.h │ │ │ ├── prof_inlines_a.h │ │ │ ├── prof_inlines_b.h │ │ │ ├── prof_structs.h │ │ │ ├── prof_types.h │ │ │ ├── public_namespace.sh │ │ │ ├── public_unnamespace.sh │ │ │ ├── ql.h │ │ │ ├── qr.h │ │ │ ├── quantum.h │ │ │ ├── rb.h │ │ │ ├── rtree.h │ │ │ ├── rtree_tsd.h │ │ │ ├── safety_check.h │ │ │ ├── sc.h │ │ │ ├── seq.h │ │ │ ├── smoothstep.h │ │ │ ├── smoothstep.sh │ │ │ ├── spin.h │ │ │ ├── stats.h │ │ │ ├── sz.h │ │ │ ├── tcache_externs.h │ │ │ ├── tcache_inlines.h │ │ │ ├── tcache_structs.h │ │ │ ├── tcache_types.h │ │ │ ├── test_hooks.h │ │ │ ├── ticker.h │ │ │ ├── tsd.h │ │ │ ├── tsd_generic.h │ │ │ ├── tsd_malloc_thread_cleanup.h │ │ │ ├── tsd_tls.h │ │ │ ├── tsd_types.h │ │ │ ├── tsd_win.h │ │ │ ├── util.h │ │ │ └── witness.h │ │ ├── jemalloc.sh │ │ ├── jemalloc_defs.h.in │ │ ├── jemalloc_macros.h.in │ │ ├── jemalloc_mangle.sh │ │ ├── jemalloc_protos.h.in │ │ ├── jemalloc_rename.sh │ │ └── jemalloc_typedefs.h.in │ └── msvc_compat │ │ ├── C99 │ │ ├── stdbool.h │ │ └── stdint.h │ │ ├── strings.h │ │ └── windows_extra.h ├── jemalloc.pc.in ├── m4 │ └── ax_cxx_compile_stdcxx.m4 ├── msvc │ ├── ReadMe.txt │ ├── jemalloc_vc2015.sln │ ├── jemalloc_vc2017.sln │ ├── projects │ │ ├── vc2015 │ │ │ ├── jemalloc │ │ │ │ ├── jemalloc.vcxproj │ │ │ │ └── jemalloc.vcxproj.filters │ │ │ └── test_threads │ │ │ │ ├── test_threads.vcxproj │ │ │ │ └── test_threads.vcxproj.filters │ │ └── vc2017 │ │ │ ├── jemalloc │ │ │ ├── jemalloc.vcxproj │ │ │ └── jemalloc.vcxproj.filters │ │ │ └── test_threads │ │ │ ├── test_threads.vcxproj │ │ │ └── test_threads.vcxproj.filters │ └── test_threads │ │ ├── test_threads.cpp │ │ ├── test_threads.h │ │ └── test_threads_main.cpp ├── run_tests.sh ├── scripts │ ├── gen_run_tests.py │ └── gen_travis.py ├── src │ ├── arena.c │ ├── background_thread.c │ ├── base.c │ ├── bin.c │ ├── bitmap.c │ ├── ckh.c │ ├── ctl.c │ ├── div.c │ ├── extent.c │ ├── extent_dss.c │ ├── extent_mmap.c │ ├── hash.c │ ├── hook.c │ ├── jemalloc.c │ ├── jemalloc_cpp.cpp │ ├── large.c │ ├── log.c │ ├── malloc_io.c │ ├── mutex.c │ ├── mutex_pool.c │ ├── nstime.c │ ├── pages.c │ ├── prng.c │ ├── prof.c │ ├── rtree.c │ ├── safety_check.c │ ├── sc.c │ ├── stats.c │ ├── sz.c │ ├── tcache.c │ ├── test_hooks.c │ ├── ticker.c │ ├── tsd.c │ ├── witness.c │ └── zone.c └── test │ ├── include │ └── test │ │ ├── SFMT-alti.h │ │ ├── SFMT-params.h │ │ ├── SFMT-params11213.h │ │ ├── SFMT-params1279.h │ │ ├── SFMT-params132049.h │ │ ├── SFMT-params19937.h │ │ ├── SFMT-params216091.h │ │ ├── SFMT-params2281.h │ │ ├── SFMT-params4253.h │ │ ├── SFMT-params44497.h │ │ ├── SFMT-params607.h │ │ ├── SFMT-params86243.h │ │ ├── SFMT-sse2.h │ │ ├── SFMT.h │ │ ├── btalloc.h │ │ ├── extent_hooks.h │ │ ├── jemalloc_test.h.in │ │ ├── jemalloc_test_defs.h.in │ │ ├── math.h │ │ ├── mq.h │ │ ├── mtx.h │ │ ├── test.h │ │ ├── thd.h │ │ └── timer.h │ ├── integration │ ├── MALLOCX_ARENA.c │ ├── aligned_alloc.c │ ├── allocated.c │ ├── extent.c │ ├── extent.sh │ ├── malloc.c │ ├── mallocx.c │ ├── mallocx.sh │ ├── overflow.c │ ├── posix_memalign.c │ ├── rallocx.c │ ├── sdallocx.c │ ├── slab_sizes.c │ ├── slab_sizes.sh │ ├── smallocx.c │ ├── smallocx.sh │ ├── thread_arena.c │ ├── thread_tcache_enabled.c │ ├── xallocx.c │ └── xallocx.sh │ ├── src │ ├── SFMT.c │ ├── btalloc.c │ ├── btalloc_0.c │ ├── btalloc_1.c │ ├── math.c │ ├── mq.c │ ├── mtx.c │ ├── test.c │ ├── thd.c │ └── timer.c │ ├── stress │ ├── hookbench.c │ └── microbench.c │ ├── test.sh.in │ └── unit │ ├── SFMT.c │ ├── a0.c │ ├── arena_reset.c │ ├── arena_reset_prof.c │ ├── arena_reset_prof.sh │ ├── atomic.c │ ├── background_thread.c │ ├── background_thread_enable.c │ ├── base.c │ ├── binshard.c │ ├── binshard.sh │ ├── bit_util.c │ ├── bitmap.c │ ├── ckh.c │ ├── decay.c │ ├── decay.sh │ ├── div.c │ ├── emitter.c │ ├── extent_quantize.c │ ├── extent_util.c │ ├── fork.c │ ├── hash.c │ ├── hook.c │ ├── huge.c │ ├── junk.c │ ├── junk.sh │ ├── junk_alloc.c │ ├── junk_alloc.sh │ ├── junk_free.c │ ├── junk_free.sh │ ├── log.c │ ├── mallctl.c │ ├── malloc_io.c │ ├── math.c │ ├── mq.c │ ├── mtx.c │ ├── nstime.c │ ├── pack.c │ ├── pack.sh │ ├── pages.c │ ├── ph.c │ ├── prng.c │ ├── prof_accum.c │ ├── prof_accum.sh │ ├── prof_active.c │ ├── prof_active.sh │ ├── prof_gdump.c │ ├── prof_gdump.sh │ ├── prof_idump.c │ ├── prof_idump.sh │ ├── prof_log.c │ ├── prof_log.sh │ ├── prof_reset.c │ ├── prof_reset.sh │ ├── prof_tctx.c │ ├── prof_tctx.sh │ ├── prof_thread_name.c │ ├── prof_thread_name.sh │ ├── ql.c │ ├── qr.c │ ├── rb.c │ ├── retained.c │ ├── rtree.c │ ├── safety_check.c │ ├── safety_check.sh │ ├── sc.c │ ├── seq.c │ ├── size_classes.c │ ├── slab.c │ ├── smoothstep.c │ ├── spin.c │ ├── stats.c │ ├── stats_print.c │ ├── test_hooks.c │ ├── ticker.c │ ├── tsd.c │ ├── witness.c │ ├── zero.c │ └── zero.sh ├── jemalloc.go └── jemalloc_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | /lib 2 | /jemalloc 3 | /VERSION 4 | 5 | je_*.c 6 | *.swp 7 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | os: 4 | - linux 5 | - osx 6 | 7 | go: 8 | - 1.15.x 9 | - 1.14.x 10 | - 1.13.x 11 | 12 | script: 13 | - make install 14 | - make test 15 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL = build 2 | 3 | PWD := $(shell pwd) 4 | SRC := jemalloc-5.2.1 5 | 6 | build: 7 | @test -f $(SRC)/Makefile || make config --quiet 8 | 9 | install: build 10 | @go install -x -v ./ 11 | 12 | config: 13 | @cd $(SRC) && ./autogen.sh --with-jemalloc-prefix="je_" \ 14 | && make include/jemalloc/internal/private_namespace.h 15 | @rm -rf jemalloc VERSION 16 | @ln -s $(SRC)/include/jemalloc 17 | @ln -s $(SRC)/VERSION 18 | @make -f help.mk relink 19 | 20 | clean distclean: 21 | @test -f $(SRC)/Makefile && make -C $(SRC) --quiet distclean || true 22 | @rm -rf jemalloc VERSION 23 | @make -f help.mk unlink 24 | 25 | relink unlink: 26 | @make -f help.mk $@ 27 | 28 | test: 29 | @go test -v ./ 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # jemalloc 2 | [![Build Status](https://travis-ci.org/spinlock/jemalloc-go.svg)](https://travis-ci.org/spinlock/jemalloc-go) 3 | 4 | #### How to setup & install 5 | ```bash 6 | $ mkdir -p $GOPATH/src/github.com/spinlock 7 | $ cd $_ 8 | $ git clone https://github.com/spinlock/jemalloc-go.git 9 | $ cd jemalloc-go 10 | $ make install 11 | ``` 12 | 13 | #### How to use it 14 | 15 | ```go 16 | package demo 17 | 18 | // #cgo CPPFLAGS: -I/jemalloc-go 19 | // #cgo darwin LDFLAGS: -Wl,-undefined -Wl,dynamic_lookup 20 | // #cgo !darwin LDFLAGS: -Wl,-unresolved-symbols=ignore-all 21 | // #include 22 | import "C" 23 | 24 | import jemalloc "github.com/spinlock/jemalloc-go" 25 | 26 | func malloc1(n int) unsafe.Pointer { 27 | return C.je_malloc(C.size_t(n)) 28 | } 29 | 30 | func free1(p unsafe.Pointer) { 31 | C.je_free(p) 32 | } 33 | 34 | func malloc2(n int) unsafe.Pointer { 35 | return jemalloc.Malloc(n) 36 | } 37 | 38 | func free2(p unsafe.Pointer) { 39 | jemalloc.Free(p) 40 | } 41 | ``` 42 | 43 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-cayman -------------------------------------------------------------------------------- /help.mk: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL = relink 2 | 3 | PWD := $(shell pwd) 4 | SRC := jemalloc-5.2.1 5 | 6 | -include $(SRC)/Makefile 7 | 8 | relink: unlink 9 | @for i in $(C_SRCS); do \ 10 | rm -f je_$$(basename $$i); \ 11 | ln -s $(SRC)/$$i je_$$(basename $$i); \ 12 | done 13 | 14 | unlink: 15 | @rm -f je_*.c 16 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/.appveyor.yml: -------------------------------------------------------------------------------- 1 | version: '{build}' 2 | 3 | environment: 4 | matrix: 5 | - MSYSTEM: MINGW64 6 | CPU: x86_64 7 | MSVC: amd64 8 | CONFIG_FLAGS: --enable-debug 9 | - MSYSTEM: MINGW64 10 | CPU: x86_64 11 | CONFIG_FLAGS: --enable-debug 12 | - MSYSTEM: MINGW32 13 | CPU: i686 14 | MSVC: x86 15 | CONFIG_FLAGS: --enable-debug 16 | - MSYSTEM: MINGW32 17 | CPU: i686 18 | CONFIG_FLAGS: --enable-debug 19 | - MSYSTEM: MINGW64 20 | CPU: x86_64 21 | MSVC: amd64 22 | - MSYSTEM: MINGW64 23 | CPU: x86_64 24 | - MSYSTEM: MINGW32 25 | CPU: i686 26 | MSVC: x86 27 | - MSYSTEM: MINGW32 28 | CPU: i686 29 | 30 | install: 31 | - set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH% 32 | - if defined MSVC call "c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %MSVC% 33 | - if defined MSVC pacman --noconfirm -Rsc mingw-w64-%CPU%-gcc gcc 34 | - pacman --noconfirm -Suy mingw-w64-%CPU%-make 35 | 36 | build_script: 37 | - bash -c "autoconf" 38 | - bash -c "./configure $CONFIG_FLAGS" 39 | - mingw32-make 40 | - file lib/jemalloc.dll 41 | - mingw32-make tests 42 | - mingw32-make -k check 43 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/.autom4te.cfg: -------------------------------------------------------------------------------- 1 | begin-language: "Autoconf-without-aclocal-m4" 2 | args: --no-cache 3 | end-language: "Autoconf-without-aclocal-m4" 4 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/.cirrus.yml: -------------------------------------------------------------------------------- 1 | env: 2 | CIRRUS_CLONE_DEPTH: 1 3 | ARCH: amd64 4 | 5 | task: 6 | freebsd_instance: 7 | matrix: 8 | image: freebsd-12-0-release-amd64 9 | image: freebsd-11-2-release-amd64 10 | install_script: 11 | - sed -i.bak -e 's,pkg+http://pkg.FreeBSD.org/\${ABI}/quarterly,pkg+http://pkg.FreeBSD.org/\${ABI}/latest,' /etc/pkg/FreeBSD.conf 12 | - pkg upgrade -y 13 | - pkg install -y autoconf gmake 14 | script: 15 | - autoconf 16 | #- ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS" } $CONFIGURE_FLAGS 17 | - ./configure 18 | - export JFLAG=`sysctl -n kern.smp.cpus` 19 | - gmake -j${JFLAG} 20 | - gmake -j${JFLAG} tests 21 | - gmake check 22 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf 2 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/jemalloc-config 2 | /bin/jemalloc.sh 3 | /bin/jeprof 4 | 5 | /config.stamp 6 | /config.log 7 | /config.status 8 | /configure 9 | 10 | /doc/html.xsl 11 | /doc/manpages.xsl 12 | /doc/jemalloc.xml 13 | /doc/jemalloc.html 14 | /doc/jemalloc.3 15 | 16 | /jemalloc.pc 17 | 18 | /lib/ 19 | 20 | /Makefile 21 | 22 | /include/jemalloc/internal/jemalloc_preamble.h 23 | /include/jemalloc/internal/jemalloc_internal_defs.h 24 | /include/jemalloc/internal/private_namespace.gen.h 25 | /include/jemalloc/internal/private_namespace.h 26 | /include/jemalloc/internal/private_namespace_jet.gen.h 27 | /include/jemalloc/internal/private_namespace_jet.h 28 | /include/jemalloc/internal/private_symbols.awk 29 | /include/jemalloc/internal/private_symbols_jet.awk 30 | /include/jemalloc/internal/public_namespace.h 31 | /include/jemalloc/internal/public_symbols.txt 32 | /include/jemalloc/internal/public_unnamespace.h 33 | /include/jemalloc/jemalloc.h 34 | /include/jemalloc/jemalloc_defs.h 35 | /include/jemalloc/jemalloc_macros.h 36 | /include/jemalloc/jemalloc_mangle.h 37 | /include/jemalloc/jemalloc_mangle_jet.h 38 | /include/jemalloc/jemalloc_protos.h 39 | /include/jemalloc/jemalloc_protos_jet.h 40 | /include/jemalloc/jemalloc_rename.h 41 | /include/jemalloc/jemalloc_typedefs.h 42 | 43 | /src/*.[od] 44 | /src/*.sym 45 | 46 | /run_tests.out/ 47 | 48 | /test/test.sh 49 | test/include/test/jemalloc_test.h 50 | test/include/test/jemalloc_test_defs.h 51 | 52 | /test/integration/[A-Za-z]* 53 | !/test/integration/[A-Za-z]*.* 54 | /test/integration/*.[od] 55 | /test/integration/*.out 56 | 57 | /test/integration/cpp/[A-Za-z]* 58 | !/test/integration/cpp/[A-Za-z]*.* 59 | /test/integration/cpp/*.[od] 60 | /test/integration/cpp/*.out 61 | 62 | /test/src/*.[od] 63 | 64 | /test/stress/[A-Za-z]* 65 | !/test/stress/[A-Za-z]*.* 66 | /test/stress/*.[od] 67 | /test/stress/*.out 68 | 69 | /test/unit/[A-Za-z]* 70 | !/test/unit/[A-Za-z]*.* 71 | /test/unit/*.[od] 72 | /test/unit/*.out 73 | 74 | /VERSION 75 | 76 | *.pdb 77 | *.sdf 78 | *.opendb 79 | *.VC.db 80 | *.opensdf 81 | *.cachefile 82 | *.suo 83 | *.user 84 | *.sln.docstates 85 | *.tmp 86 | .vs/ 87 | /msvc/Win32/ 88 | /msvc/x64/ 89 | /msvc/projects/*/*/Debug*/ 90 | /msvc/projects/*/*/Release*/ 91 | /msvc/projects/*/*/Win32/ 92 | /msvc/projects/*/*/x64/ 93 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/COPYING: -------------------------------------------------------------------------------- 1 | Unless otherwise specified, files in the jemalloc source distribution are 2 | subject to the following license: 3 | -------------------------------------------------------------------------------- 4 | Copyright (C) 2002-present Jason Evans . 5 | All rights reserved. 6 | Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. 7 | Copyright (C) 2009-present Facebook, Inc. All rights reserved. 8 | 9 | Redistribution and use in source and binary forms, with or without 10 | modification, are permitted provided that the following conditions are met: 11 | 1. Redistributions of source code must retain the above copyright notice(s), 12 | this list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice(s), 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS 18 | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, 21 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 23 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 24 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 25 | OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 26 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | -------------------------------------------------------------------------------- 28 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/README: -------------------------------------------------------------------------------- 1 | jemalloc is a general purpose malloc(3) implementation that emphasizes 2 | fragmentation avoidance and scalable concurrency support. jemalloc first came 3 | into use as the FreeBSD libc allocator in 2005, and since then it has found its 4 | way into numerous applications that rely on its predictable behavior. In 2010 5 | jemalloc development efforts broadened to include developer support features 6 | such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc 7 | releases continue to be integrated back into FreeBSD, and therefore versatility 8 | remains critical. Ongoing development efforts trend toward making jemalloc 9 | among the best allocators for a broad range of demanding applications, and 10 | eliminating/mitigating weaknesses that have practical repercussions for real 11 | world applications. 12 | 13 | The COPYING file contains copyright and licensing information. 14 | 15 | The INSTALL file contains information on how to configure, build, and install 16 | jemalloc. 17 | 18 | The ChangeLog file contains a brief summary of changes for each release. 19 | 20 | URL: http://jemalloc.net/ 21 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/VERSION: -------------------------------------------------------------------------------- 1 | 5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756 2 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/autogen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | for i in autoconf; do 4 | echo "$i" 5 | $i 6 | if [ $? -ne 0 ]; then 7 | echo "Error $? in $i" 8 | exit 1 9 | fi 10 | done 11 | 12 | echo "./configure --enable-autogen $@" 13 | ./configure --enable-autogen $@ 14 | if [ $? -ne 0 ]; then 15 | echo "Error $? in ./configure" 16 | exit 1 17 | fi 18 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/bin/jemalloc-config.in: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | usage() { 4 | cat < 7 | Options: 8 | --help | -h : Print usage. 9 | --version : Print jemalloc version. 10 | --revision : Print shared library revision number. 11 | --config : Print configure options used to build jemalloc. 12 | --prefix : Print installation directory prefix. 13 | --bindir : Print binary installation directory. 14 | --datadir : Print data installation directory. 15 | --includedir : Print include installation directory. 16 | --libdir : Print library installation directory. 17 | --mandir : Print manual page installation directory. 18 | --cc : Print compiler used to build jemalloc. 19 | --cflags : Print compiler flags used to build jemalloc. 20 | --cppflags : Print preprocessor flags used to build jemalloc. 21 | --cxxflags : Print C++ compiler flags used to build jemalloc. 22 | --ldflags : Print library flags used to build jemalloc. 23 | --libs : Print libraries jemalloc was linked against. 24 | EOF 25 | } 26 | 27 | prefix="@prefix@" 28 | exec_prefix="@exec_prefix@" 29 | 30 | case "$1" in 31 | --help | -h) 32 | usage 33 | exit 0 34 | ;; 35 | --version) 36 | echo "@jemalloc_version@" 37 | ;; 38 | --revision) 39 | echo "@rev@" 40 | ;; 41 | --config) 42 | echo "@CONFIG@" 43 | ;; 44 | --prefix) 45 | echo "@PREFIX@" 46 | ;; 47 | --bindir) 48 | echo "@BINDIR@" 49 | ;; 50 | --datadir) 51 | echo "@DATADIR@" 52 | ;; 53 | --includedir) 54 | echo "@INCLUDEDIR@" 55 | ;; 56 | --libdir) 57 | echo "@LIBDIR@" 58 | ;; 59 | --mandir) 60 | echo "@MANDIR@" 61 | ;; 62 | --cc) 63 | echo "@CC@" 64 | ;; 65 | --cflags) 66 | echo "@CFLAGS@" 67 | ;; 68 | --cppflags) 69 | echo "@CPPFLAGS@" 70 | ;; 71 | --cxxflags) 72 | echo "@CXXFLAGS@" 73 | ;; 74 | --ldflags) 75 | echo "@LDFLAGS@ @EXTRA_LDFLAGS@" 76 | ;; 77 | --libs) 78 | echo "@LIBS@" 79 | ;; 80 | *) 81 | usage 82 | exit 1 83 | esac 84 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/bin/jemalloc.sh.in: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | prefix=@prefix@ 4 | exec_prefix=@exec_prefix@ 5 | libdir=@libdir@ 6 | 7 | @LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@ 8 | export @LD_PRELOAD_VAR@ 9 | exec "$@" 10 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/config.stamp.in: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spinlock/jemalloc-go/e81523fb852427b13c54834b7372c0c8a139535b/jemalloc-5.2.1/config.stamp.in -------------------------------------------------------------------------------- /jemalloc-5.2.1/doc/html.xsl.in: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/doc/manpages.xsl.in: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/doc/stylesheet.xsl: -------------------------------------------------------------------------------- 1 | 2 | ansi 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/arena_inlines_a.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H 2 | #define JEMALLOC_INTERNAL_ARENA_INLINES_A_H 3 | 4 | static inline unsigned 5 | arena_ind_get(const arena_t *arena) { 6 | return base_ind_get(arena->base); 7 | } 8 | 9 | static inline void 10 | arena_internal_add(arena_t *arena, size_t size) { 11 | atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 12 | } 13 | 14 | static inline void 15 | arena_internal_sub(arena_t *arena, size_t size) { 16 | atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 17 | } 18 | 19 | static inline size_t 20 | arena_internal_get(arena_t *arena) { 21 | return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED); 22 | } 23 | 24 | static inline bool 25 | arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { 26 | cassert(config_prof); 27 | 28 | if (likely(prof_interval == 0 || !prof_active_get_unlocked())) { 29 | return false; 30 | } 31 | 32 | return prof_accum_add(tsdn, &arena->prof_accum, accumbytes); 33 | } 34 | 35 | static inline void 36 | percpu_arena_update(tsd_t *tsd, unsigned cpu) { 37 | assert(have_percpu_arena); 38 | arena_t *oldarena = tsd_arena_get(tsd); 39 | assert(oldarena != NULL); 40 | unsigned oldind = arena_ind_get(oldarena); 41 | 42 | if (oldind != cpu) { 43 | unsigned newind = cpu; 44 | arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); 45 | assert(newarena != NULL); 46 | 47 | /* Set new arena/tcache associations. */ 48 | arena_migrate(tsd, oldind, newind); 49 | tcache_t *tcache = tcache_get(tsd); 50 | if (tcache != NULL) { 51 | tcache_arena_reassociate(tsd_tsdn(tsd), tcache, 52 | newarena); 53 | } 54 | } 55 | } 56 | 57 | #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */ 58 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/arena_structs_a.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H 2 | #define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H 3 | 4 | #include "jemalloc/internal/bitmap.h" 5 | 6 | struct arena_slab_data_s { 7 | /* Per region allocated/deallocated bitmap. */ 8 | bitmap_t bitmap[BITMAP_GROUPS_MAX]; 9 | }; 10 | 11 | #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */ 12 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/arena_types.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H 2 | #define JEMALLOC_INTERNAL_ARENA_TYPES_H 3 | 4 | #include "jemalloc/internal/sc.h" 5 | 6 | /* Maximum number of regions in one slab. */ 7 | #define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN) 8 | #define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS) 9 | 10 | /* Default decay times in milliseconds. */ 11 | #define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) 12 | #define MUZZY_DECAY_MS_DEFAULT (0) 13 | /* Number of event ticks between time checks. */ 14 | #define DECAY_NTICKS_PER_UPDATE 1000 15 | 16 | typedef struct arena_slab_data_s arena_slab_data_t; 17 | typedef struct arena_decay_s arena_decay_t; 18 | typedef struct arena_s arena_t; 19 | typedef struct arena_tdata_s arena_tdata_t; 20 | typedef struct alloc_ctx_s alloc_ctx_t; 21 | 22 | typedef enum { 23 | percpu_arena_mode_names_base = 0, /* Used for options processing. */ 24 | 25 | /* 26 | * *_uninit are used only during bootstrapping, and must correspond 27 | * to initialized variant plus percpu_arena_mode_enabled_base. 28 | */ 29 | percpu_arena_uninit = 0, 30 | per_phycpu_arena_uninit = 1, 31 | 32 | /* All non-disabled modes must come after percpu_arena_disabled. */ 33 | percpu_arena_disabled = 2, 34 | 35 | percpu_arena_mode_names_limit = 3, /* Used for options processing. */ 36 | percpu_arena_mode_enabled_base = 3, 37 | 38 | percpu_arena = 3, 39 | per_phycpu_arena = 4 /* Hyper threads share arena. */ 40 | } percpu_arena_mode_t; 41 | 42 | #define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base) 43 | #define PERCPU_ARENA_DEFAULT percpu_arena_disabled 44 | 45 | /* 46 | * When allocation_size >= oversize_threshold, use the dedicated huge arena 47 | * (unless have explicitly spicified arena index). 0 disables the feature. 48 | */ 49 | #define OVERSIZE_THRESHOLD_DEFAULT (8 << 20) 50 | 51 | #endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */ 52 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/assert.h: -------------------------------------------------------------------------------- 1 | #include "jemalloc/internal/malloc_io.h" 2 | #include "jemalloc/internal/util.h" 3 | 4 | /* 5 | * Define a custom assert() in order to reduce the chances of deadlock during 6 | * assertion failure. 7 | */ 8 | #ifndef assert 9 | #define assert(e) do { \ 10 | if (unlikely(config_debug && !(e))) { \ 11 | malloc_printf( \ 12 | ": %s:%d: Failed assertion: \"%s\"\n", \ 13 | __FILE__, __LINE__, #e); \ 14 | abort(); \ 15 | } \ 16 | } while (0) 17 | #endif 18 | 19 | #ifndef not_reached 20 | #define not_reached() do { \ 21 | if (config_debug) { \ 22 | malloc_printf( \ 23 | ": %s:%d: Unreachable code reached\n", \ 24 | __FILE__, __LINE__); \ 25 | abort(); \ 26 | } \ 27 | unreachable(); \ 28 | } while (0) 29 | #endif 30 | 31 | #ifndef not_implemented 32 | #define not_implemented() do { \ 33 | if (config_debug) { \ 34 | malloc_printf(": %s:%d: Not implemented\n", \ 35 | __FILE__, __LINE__); \ 36 | abort(); \ 37 | } \ 38 | } while (0) 39 | #endif 40 | 41 | #ifndef assert_not_implemented 42 | #define assert_not_implemented(e) do { \ 43 | if (unlikely(config_debug && !(e))) { \ 44 | not_implemented(); \ 45 | } \ 46 | } while (0) 47 | #endif 48 | 49 | /* Use to assert a particular configuration, e.g., cassert(config_debug). */ 50 | #ifndef cassert 51 | #define cassert(c) do { \ 52 | if (unlikely(!(c))) { \ 53 | not_reached(); \ 54 | } \ 55 | } while (0) 56 | #endif 57 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/atomic.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_ATOMIC_H 2 | #define JEMALLOC_INTERNAL_ATOMIC_H 3 | 4 | #define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE 5 | 6 | #define JEMALLOC_U8_ATOMICS 7 | #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) 8 | # include "jemalloc/internal/atomic_gcc_atomic.h" 9 | # if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS) 10 | # undef JEMALLOC_U8_ATOMICS 11 | # endif 12 | #elif defined(JEMALLOC_GCC_SYNC_ATOMICS) 13 | # include "jemalloc/internal/atomic_gcc_sync.h" 14 | # if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS) 15 | # undef JEMALLOC_U8_ATOMICS 16 | # endif 17 | #elif defined(_MSC_VER) 18 | # include "jemalloc/internal/atomic_msvc.h" 19 | #elif defined(JEMALLOC_C11_ATOMICS) 20 | # include "jemalloc/internal/atomic_c11.h" 21 | #else 22 | # error "Don't have atomics implemented on this platform." 23 | #endif 24 | 25 | /* 26 | * This header gives more or less a backport of C11 atomics. The user can write 27 | * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate 28 | * counterparts of the C11 atomic functions for type, as so: 29 | * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3); 30 | * and then write things like: 31 | * int *some_ptr; 32 | * atomic_pi_t atomic_ptr_to_int; 33 | * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED); 34 | * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL); 35 | * assert(some_ptr == prev_value); 36 | * and expect things to work in the obvious way. 37 | * 38 | * Also included (with naming differences to avoid conflicts with the standard 39 | * library): 40 | * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence). 41 | * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT). 42 | */ 43 | 44 | /* 45 | * Pure convenience, so that we don't have to type "atomic_memory_order_" 46 | * quite so often. 47 | */ 48 | #define ATOMIC_RELAXED atomic_memory_order_relaxed 49 | #define ATOMIC_ACQUIRE atomic_memory_order_acquire 50 | #define ATOMIC_RELEASE atomic_memory_order_release 51 | #define ATOMIC_ACQ_REL atomic_memory_order_acq_rel 52 | #define ATOMIC_SEQ_CST atomic_memory_order_seq_cst 53 | 54 | /* 55 | * Not all platforms have 64-bit atomics. If we do, this #define exposes that 56 | * fact. 57 | */ 58 | #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) 59 | # define JEMALLOC_ATOMIC_U64 60 | #endif 61 | 62 | JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) 63 | 64 | /* 65 | * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only 66 | * platform that actually needs to know the size, MSVC. 67 | */ 68 | JEMALLOC_GENERATE_ATOMICS(bool, b, 0) 69 | 70 | JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) 71 | 72 | JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) 73 | 74 | JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) 75 | 76 | JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0) 77 | 78 | JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2) 79 | 80 | #ifdef JEMALLOC_ATOMIC_U64 81 | JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3) 82 | #endif 83 | 84 | #undef ATOMIC_INLINE 85 | 86 | #endif /* JEMALLOC_INTERNAL_ATOMIC_H */ 87 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/background_thread_externs.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H 2 | #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H 3 | 4 | extern bool opt_background_thread; 5 | extern size_t opt_max_background_threads; 6 | extern malloc_mutex_t background_thread_lock; 7 | extern atomic_b_t background_thread_enabled_state; 8 | extern size_t n_background_threads; 9 | extern size_t max_background_threads; 10 | extern background_thread_info_t *background_thread_info; 11 | 12 | bool background_thread_create(tsd_t *tsd, unsigned arena_ind); 13 | bool background_threads_enable(tsd_t *tsd); 14 | bool background_threads_disable(tsd_t *tsd); 15 | void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, 16 | arena_decay_t *decay, size_t npages_new); 17 | void background_thread_prefork0(tsdn_t *tsdn); 18 | void background_thread_prefork1(tsdn_t *tsdn); 19 | void background_thread_postfork_parent(tsdn_t *tsdn); 20 | void background_thread_postfork_child(tsdn_t *tsdn); 21 | bool background_thread_stats_read(tsdn_t *tsdn, 22 | background_thread_stats_t *stats); 23 | void background_thread_ctl_init(tsdn_t *tsdn); 24 | 25 | #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER 26 | extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *, 27 | void *(*)(void *), void *__restrict); 28 | #endif 29 | bool background_thread_boot0(void); 30 | bool background_thread_boot1(tsdn_t *tsdn); 31 | 32 | #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */ 33 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/background_thread_inlines.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H 2 | #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H 3 | 4 | JEMALLOC_ALWAYS_INLINE bool 5 | background_thread_enabled(void) { 6 | return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED); 7 | } 8 | 9 | JEMALLOC_ALWAYS_INLINE void 10 | background_thread_enabled_set(tsdn_t *tsdn, bool state) { 11 | malloc_mutex_assert_owner(tsdn, &background_thread_lock); 12 | atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED); 13 | } 14 | 15 | JEMALLOC_ALWAYS_INLINE background_thread_info_t * 16 | arena_background_thread_info_get(arena_t *arena) { 17 | unsigned arena_ind = arena_ind_get(arena); 18 | return &background_thread_info[arena_ind % max_background_threads]; 19 | } 20 | 21 | JEMALLOC_ALWAYS_INLINE background_thread_info_t * 22 | background_thread_info_get(size_t ind) { 23 | return &background_thread_info[ind % max_background_threads]; 24 | } 25 | 26 | JEMALLOC_ALWAYS_INLINE uint64_t 27 | background_thread_wakeup_time_get(background_thread_info_t *info) { 28 | uint64_t next_wakeup = nstime_ns(&info->next_wakeup); 29 | assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) == 30 | (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP)); 31 | return next_wakeup; 32 | } 33 | 34 | JEMALLOC_ALWAYS_INLINE void 35 | background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info, 36 | uint64_t wakeup_time) { 37 | malloc_mutex_assert_owner(tsdn, &info->mtx); 38 | atomic_store_b(&info->indefinite_sleep, 39 | wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE); 40 | nstime_init(&info->next_wakeup, wakeup_time); 41 | } 42 | 43 | JEMALLOC_ALWAYS_INLINE bool 44 | background_thread_indefinite_sleep(background_thread_info_t *info) { 45 | return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE); 46 | } 47 | 48 | JEMALLOC_ALWAYS_INLINE void 49 | arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, 50 | bool is_background_thread) { 51 | if (!background_thread_enabled() || is_background_thread) { 52 | return; 53 | } 54 | background_thread_info_t *info = 55 | arena_background_thread_info_get(arena); 56 | if (background_thread_indefinite_sleep(info)) { 57 | background_thread_interval_check(tsdn, arena, 58 | &arena->decay_dirty, 0); 59 | } 60 | } 61 | 62 | #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */ 63 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/background_thread_structs.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H 2 | #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H 3 | 4 | /* This file really combines "structs" and "types", but only transitionally. */ 5 | 6 | #if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK) 7 | # define JEMALLOC_PTHREAD_CREATE_WRAPPER 8 | #endif 9 | 10 | #define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX 11 | #define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT 12 | #define DEFAULT_NUM_BACKGROUND_THREAD 4 13 | 14 | typedef enum { 15 | background_thread_stopped, 16 | background_thread_started, 17 | /* Thread waits on the global lock when paused (for arena_reset). */ 18 | background_thread_paused, 19 | } background_thread_state_t; 20 | 21 | struct background_thread_info_s { 22 | #ifdef JEMALLOC_BACKGROUND_THREAD 23 | /* Background thread is pthread specific. */ 24 | pthread_t thread; 25 | pthread_cond_t cond; 26 | #endif 27 | malloc_mutex_t mtx; 28 | background_thread_state_t state; 29 | /* When true, it means no wakeup scheduled. */ 30 | atomic_b_t indefinite_sleep; 31 | /* Next scheduled wakeup time (absolute time in ns). */ 32 | nstime_t next_wakeup; 33 | /* 34 | * Since the last background thread run, newly added number of pages 35 | * that need to be purged by the next wakeup. This is adjusted on 36 | * epoch advance, and is used to determine whether we should signal the 37 | * background thread to wake up earlier. 38 | */ 39 | size_t npages_to_purge_new; 40 | /* Stats: total number of runs since started. */ 41 | uint64_t tot_n_runs; 42 | /* Stats: total sleep time since started. */ 43 | nstime_t tot_sleep_time; 44 | }; 45 | typedef struct background_thread_info_s background_thread_info_t; 46 | 47 | struct background_thread_stats_s { 48 | size_t num_threads; 49 | uint64_t num_runs; 50 | nstime_t run_interval; 51 | }; 52 | typedef struct background_thread_stats_s background_thread_stats_t; 53 | 54 | #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */ 55 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/base_externs.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H 2 | #define JEMALLOC_INTERNAL_BASE_EXTERNS_H 3 | 4 | extern metadata_thp_mode_t opt_metadata_thp; 5 | extern const char *metadata_thp_mode_names[]; 6 | 7 | base_t *b0get(void); 8 | base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); 9 | void base_delete(tsdn_t *tsdn, base_t *base); 10 | extent_hooks_t *base_extent_hooks_get(base_t *base); 11 | extent_hooks_t *base_extent_hooks_set(base_t *base, 12 | extent_hooks_t *extent_hooks); 13 | void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); 14 | extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base); 15 | void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, 16 | size_t *resident, size_t *mapped, size_t *n_thp); 17 | void base_prefork(tsdn_t *tsdn, base_t *base); 18 | void base_postfork_parent(tsdn_t *tsdn, base_t *base); 19 | void base_postfork_child(tsdn_t *tsdn, base_t *base); 20 | bool base_boot(tsdn_t *tsdn); 21 | 22 | #endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */ 23 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/base_inlines.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_BASE_INLINES_H 2 | #define JEMALLOC_INTERNAL_BASE_INLINES_H 3 | 4 | static inline unsigned 5 | base_ind_get(const base_t *base) { 6 | return base->ind; 7 | } 8 | 9 | static inline bool 10 | metadata_thp_enabled(void) { 11 | return (opt_metadata_thp != metadata_thp_disabled); 12 | } 13 | #endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */ 14 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/base_structs.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H 2 | #define JEMALLOC_INTERNAL_BASE_STRUCTS_H 3 | 4 | #include "jemalloc/internal/jemalloc_internal_types.h" 5 | #include "jemalloc/internal/mutex.h" 6 | #include "jemalloc/internal/sc.h" 7 | 8 | /* Embedded at the beginning of every block of base-managed virtual memory. */ 9 | struct base_block_s { 10 | /* Total size of block's virtual memory mapping. */ 11 | size_t size; 12 | 13 | /* Next block in list of base's blocks. */ 14 | base_block_t *next; 15 | 16 | /* Tracks unused trailing space. */ 17 | extent_t extent; 18 | }; 19 | 20 | struct base_s { 21 | /* Associated arena's index within the arenas array. */ 22 | unsigned ind; 23 | 24 | /* 25 | * User-configurable extent hook functions. Points to an 26 | * extent_hooks_t. 27 | */ 28 | atomic_p_t extent_hooks; 29 | 30 | /* Protects base_alloc() and base_stats_get() operations. */ 31 | malloc_mutex_t mtx; 32 | 33 | /* Using THP when true (metadata_thp auto mode). */ 34 | bool auto_thp_switched; 35 | /* 36 | * Most recent size class in the series of increasingly large base 37 | * extents. Logarithmic spacing between subsequent allocations ensures 38 | * that the total number of distinct mappings remains small. 39 | */ 40 | pszind_t pind_last; 41 | 42 | /* Serial number generation state. */ 43 | size_t extent_sn_next; 44 | 45 | /* Chain of all blocks associated with base. */ 46 | base_block_t *blocks; 47 | 48 | /* Heap of extents that track unused trailing space within blocks. */ 49 | extent_heap_t avail[SC_NSIZES]; 50 | 51 | /* Stats, only maintained if config_stats. */ 52 | size_t allocated; 53 | size_t resident; 54 | size_t mapped; 55 | /* Number of THP regions touched. */ 56 | size_t n_thp; 57 | }; 58 | 59 | #endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */ 60 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/base_types.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_BASE_TYPES_H 2 | #define JEMALLOC_INTERNAL_BASE_TYPES_H 3 | 4 | typedef struct base_block_s base_block_t; 5 | typedef struct base_s base_t; 6 | 7 | #define METADATA_THP_DEFAULT metadata_thp_disabled 8 | 9 | /* 10 | * In auto mode, arenas switch to huge pages for the base allocator on the 11 | * second base block. a0 switches to thp on the 5th block (after 20 megabytes 12 | * of metadata), since more metadata (e.g. rtree nodes) come from a0's base. 13 | */ 14 | 15 | #define BASE_AUTO_THP_THRESHOLD 2 16 | #define BASE_AUTO_THP_THRESHOLD_A0 5 17 | 18 | typedef enum { 19 | metadata_thp_disabled = 0, 20 | /* 21 | * Lazily enable hugepage for metadata. To avoid high RSS caused by THP 22 | * + low usage arena (i.e. THP becomes a significant percentage), the 23 | * "auto" option only starts using THP after a base allocator used up 24 | * the first THP region. Starting from the second hugepage (in a single 25 | * arena), "auto" behaves the same as "always", i.e. madvise hugepage 26 | * right away. 27 | */ 28 | metadata_thp_auto = 1, 29 | metadata_thp_always = 2, 30 | metadata_thp_mode_limit = 3 31 | } metadata_thp_mode_t; 32 | 33 | #endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */ 34 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/bin_stats.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_BIN_STATS_H 2 | #define JEMALLOC_INTERNAL_BIN_STATS_H 3 | 4 | #include "jemalloc/internal/mutex_prof.h" 5 | 6 | typedef struct bin_stats_s bin_stats_t; 7 | struct bin_stats_s { 8 | /* 9 | * Total number of allocation/deallocation requests served directly by 10 | * the bin. Note that tcache may allocate an object, then recycle it 11 | * many times, resulting many increments to nrequests, but only one 12 | * each to nmalloc and ndalloc. 13 | */ 14 | uint64_t nmalloc; 15 | uint64_t ndalloc; 16 | 17 | /* 18 | * Number of allocation requests that correspond to the size of this 19 | * bin. This includes requests served by tcache, though tcache only 20 | * periodically merges into this counter. 21 | */ 22 | uint64_t nrequests; 23 | 24 | /* 25 | * Current number of regions of this size class, including regions 26 | * currently cached by tcache. 27 | */ 28 | size_t curregs; 29 | 30 | /* Number of tcache fills from this bin. */ 31 | uint64_t nfills; 32 | 33 | /* Number of tcache flushes to this bin. */ 34 | uint64_t nflushes; 35 | 36 | /* Total number of slabs created for this bin's size class. */ 37 | uint64_t nslabs; 38 | 39 | /* 40 | * Total number of slabs reused by extracting them from the slabs heap 41 | * for this bin's size class. 42 | */ 43 | uint64_t reslabs; 44 | 45 | /* Current number of slabs in this bin. */ 46 | size_t curslabs; 47 | 48 | /* Current size of nonfull slabs heap in this bin. */ 49 | size_t nonfull_slabs; 50 | 51 | mutex_prof_data_t mutex_data; 52 | }; 53 | 54 | #endif /* JEMALLOC_INTERNAL_BIN_STATS_H */ 55 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/bin_types.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_BIN_TYPES_H 2 | #define JEMALLOC_INTERNAL_BIN_TYPES_H 3 | 4 | #include "jemalloc/internal/sc.h" 5 | 6 | #define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH) 7 | #define N_BIN_SHARDS_DEFAULT 1 8 | 9 | /* Used in TSD static initializer only. Real init in arena_bind(). */ 10 | #define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}} 11 | 12 | typedef struct tsd_binshards_s tsd_binshards_t; 13 | struct tsd_binshards_s { 14 | uint8_t binshard[SC_NBINS]; 15 | }; 16 | 17 | #endif /* JEMALLOC_INTERNAL_BIN_TYPES_H */ 18 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/div.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_DIV_H 2 | #define JEMALLOC_INTERNAL_DIV_H 3 | 4 | #include "jemalloc/internal/assert.h" 5 | 6 | /* 7 | * This module does the division that computes the index of a region in a slab, 8 | * given its offset relative to the base. 9 | * That is, given a divisor d, an n = i * d (all integers), we'll return i. 10 | * We do some pre-computation to do this more quickly than a CPU division 11 | * instruction. 12 | * We bound n < 2^32, and don't support dividing by one. 13 | */ 14 | 15 | typedef struct div_info_s div_info_t; 16 | struct div_info_s { 17 | uint32_t magic; 18 | #ifdef JEMALLOC_DEBUG 19 | size_t d; 20 | #endif 21 | }; 22 | 23 | void div_init(div_info_t *div_info, size_t divisor); 24 | 25 | static inline size_t 26 | div_compute(div_info_t *div_info, size_t n) { 27 | assert(n <= (uint32_t)-1); 28 | /* 29 | * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine, 30 | * the compilers I tried were all smart enough to turn this into the 31 | * appropriate "get the high 32 bits of the result of a multiply" (e.g. 32 | * mul; mov edx eax; on x86, umull on arm, etc.). 33 | */ 34 | size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32; 35 | #ifdef JEMALLOC_DEBUG 36 | assert(i * div_info->d == n); 37 | #endif 38 | return i; 39 | } 40 | 41 | #endif /* JEMALLOC_INTERNAL_DIV_H */ 42 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/extent_dss.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H 2 | #define JEMALLOC_INTERNAL_EXTENT_DSS_H 3 | 4 | typedef enum { 5 | dss_prec_disabled = 0, 6 | dss_prec_primary = 1, 7 | dss_prec_secondary = 2, 8 | 9 | dss_prec_limit = 3 10 | } dss_prec_t; 11 | #define DSS_PREC_DEFAULT dss_prec_secondary 12 | #define DSS_DEFAULT "secondary" 13 | 14 | extern const char *dss_prec_names[]; 15 | 16 | extern const char *opt_dss; 17 | 18 | dss_prec_t extent_dss_prec_get(void); 19 | bool extent_dss_prec_set(dss_prec_t dss_prec); 20 | void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, 21 | size_t size, size_t alignment, bool *zero, bool *commit); 22 | bool extent_in_dss(void *addr); 23 | bool extent_dss_mergeable(void *addr_a, void *addr_b); 24 | void extent_dss_boot(void); 25 | 26 | #endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */ 27 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/extent_mmap.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H 2 | #define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H 3 | 4 | extern bool opt_retain; 5 | 6 | void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, 7 | bool *zero, bool *commit); 8 | bool extent_dalloc_mmap(void *addr, size_t size); 9 | 10 | #endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */ 11 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/extent_types.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H 2 | #define JEMALLOC_INTERNAL_EXTENT_TYPES_H 3 | 4 | typedef struct extent_s extent_t; 5 | typedef struct extents_s extents_t; 6 | 7 | typedef struct extent_util_stats_s extent_util_stats_t; 8 | typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t; 9 | 10 | #define EXTENT_HOOKS_INITIALIZER NULL 11 | 12 | /* 13 | * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit) 14 | * is the max ratio between the size of the active extent and the new extent. 15 | */ 16 | #define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 17 | 18 | typedef enum { 19 | EXTENT_NOT_HEAD, 20 | EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */ 21 | } extent_head_state_t; 22 | 23 | #endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */ 24 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/jemalloc_internal_decls.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_DECLS_H 2 | #define JEMALLOC_INTERNAL_DECLS_H 3 | 4 | #include 5 | #ifdef _WIN32 6 | # include 7 | # include "msvc_compat/windows_extra.h" 8 | # ifdef _WIN64 9 | # if LG_VADDR <= 32 10 | # error Generate the headers using x64 vcargs 11 | # endif 12 | # else 13 | # if LG_VADDR > 32 14 | # undef LG_VADDR 15 | # define LG_VADDR 32 16 | # endif 17 | # endif 18 | #else 19 | # include 20 | # include 21 | # if !defined(__pnacl__) && !defined(__native_client__) 22 | # include 23 | # if !defined(SYS_write) && defined(__NR_write) 24 | # define SYS_write __NR_write 25 | # endif 26 | # if defined(SYS_open) && defined(__aarch64__) 27 | /* Android headers may define SYS_open to __NR_open even though 28 | * __NR_open may not exist on AArch64 (superseded by __NR_openat). */ 29 | # undef SYS_open 30 | # endif 31 | # include 32 | # endif 33 | # include 34 | # ifdef __FreeBSD__ 35 | # include 36 | # endif 37 | # include 38 | # ifdef JEMALLOC_OS_UNFAIR_LOCK 39 | # include 40 | # endif 41 | # ifdef JEMALLOC_GLIBC_MALLOC_HOOK 42 | # include 43 | # endif 44 | # include 45 | # include 46 | # include 47 | # ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 48 | # include 49 | # endif 50 | #endif 51 | #include 52 | 53 | #include 54 | #ifndef SIZE_T_MAX 55 | # define SIZE_T_MAX SIZE_MAX 56 | #endif 57 | #ifndef SSIZE_MAX 58 | # define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1)) 59 | #endif 60 | #include 61 | #include 62 | #include 63 | #include 64 | #include 65 | #include 66 | #ifndef offsetof 67 | # define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 68 | #endif 69 | #include 70 | #include 71 | #include 72 | #ifdef _MSC_VER 73 | # include 74 | typedef intptr_t ssize_t; 75 | # define PATH_MAX 1024 76 | # define STDERR_FILENO 2 77 | # define __func__ __FUNCTION__ 78 | # ifdef JEMALLOC_HAS_RESTRICT 79 | # define restrict __restrict 80 | # endif 81 | /* Disable warnings about deprecated system functions. */ 82 | # pragma warning(disable: 4996) 83 | #if _MSC_VER < 1800 84 | static int 85 | isblank(int c) { 86 | return (c == '\t' || c == ' '); 87 | } 88 | #endif 89 | #else 90 | # include 91 | #endif 92 | #include 93 | 94 | #endif /* JEMALLOC_INTERNAL_H */ 95 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/jemalloc_internal_externs.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_EXTERNS_H 2 | #define JEMALLOC_INTERNAL_EXTERNS_H 3 | 4 | #include "jemalloc/internal/atomic.h" 5 | #include "jemalloc/internal/tsd_types.h" 6 | 7 | /* TSD checks this to set thread local slow state accordingly. */ 8 | extern bool malloc_slow; 9 | 10 | /* Run-time options. */ 11 | extern bool opt_abort; 12 | extern bool opt_abort_conf; 13 | extern bool opt_confirm_conf; 14 | extern const char *opt_junk; 15 | extern bool opt_junk_alloc; 16 | extern bool opt_junk_free; 17 | extern bool opt_utrace; 18 | extern bool opt_xmalloc; 19 | extern bool opt_zero; 20 | extern unsigned opt_narenas; 21 | 22 | /* Number of CPUs. */ 23 | extern unsigned ncpus; 24 | 25 | /* Number of arenas used for automatic multiplexing of threads and arenas. */ 26 | extern unsigned narenas_auto; 27 | 28 | /* Base index for manual arenas. */ 29 | extern unsigned manual_arena_base; 30 | 31 | /* 32 | * Arenas that are used to service external requests. Not all elements of the 33 | * arenas array are necessarily used; arenas are created lazily as needed. 34 | */ 35 | extern atomic_p_t arenas[]; 36 | 37 | void *a0malloc(size_t size); 38 | void a0dalloc(void *ptr); 39 | void *bootstrap_malloc(size_t size); 40 | void *bootstrap_calloc(size_t num, size_t size); 41 | void bootstrap_free(void *ptr); 42 | void arena_set(unsigned ind, arena_t *arena); 43 | unsigned narenas_total_get(void); 44 | arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); 45 | arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); 46 | arena_t *arena_choose_hard(tsd_t *tsd, bool internal); 47 | void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); 48 | void iarena_cleanup(tsd_t *tsd); 49 | void arena_cleanup(tsd_t *tsd); 50 | void arenas_tdata_cleanup(tsd_t *tsd); 51 | void jemalloc_prefork(void); 52 | void jemalloc_postfork_parent(void); 53 | void jemalloc_postfork_child(void); 54 | bool malloc_initialized(void); 55 | void je_sdallocx_noflags(void *ptr, size_t size); 56 | 57 | #endif /* JEMALLOC_INTERNAL_EXTERNS_H */ 58 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/jemalloc_internal_inlines_b.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_INLINES_B_H 2 | #define JEMALLOC_INTERNAL_INLINES_B_H 3 | 4 | #include "jemalloc/internal/rtree.h" 5 | 6 | /* Choose an arena based on a per-thread value. */ 7 | static inline arena_t * 8 | arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { 9 | arena_t *ret; 10 | 11 | if (arena != NULL) { 12 | return arena; 13 | } 14 | 15 | /* During reentrancy, arena 0 is the safest bet. */ 16 | if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) { 17 | return arena_get(tsd_tsdn(tsd), 0, true); 18 | } 19 | 20 | ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); 21 | if (unlikely(ret == NULL)) { 22 | ret = arena_choose_hard(tsd, internal); 23 | assert(ret); 24 | if (tcache_available(tsd)) { 25 | tcache_t *tcache = tcache_get(tsd); 26 | if (tcache->arena != NULL) { 27 | /* See comments in tcache_data_init().*/ 28 | assert(tcache->arena == 29 | arena_get(tsd_tsdn(tsd), 0, false)); 30 | if (tcache->arena != ret) { 31 | tcache_arena_reassociate(tsd_tsdn(tsd), 32 | tcache, ret); 33 | } 34 | } else { 35 | tcache_arena_associate(tsd_tsdn(tsd), tcache, 36 | ret); 37 | } 38 | } 39 | } 40 | 41 | /* 42 | * Note that for percpu arena, if the current arena is outside of the 43 | * auto percpu arena range, (i.e. thread is assigned to a manually 44 | * managed arena), then percpu arena is skipped. 45 | */ 46 | if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) && 47 | !internal && (arena_ind_get(ret) < 48 | percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd != 49 | tsd_tsdn(tsd))) { 50 | unsigned ind = percpu_arena_choose(); 51 | if (arena_ind_get(ret) != ind) { 52 | percpu_arena_update(tsd, ind); 53 | ret = tsd_arena_get(tsd); 54 | } 55 | ret->last_thd = tsd_tsdn(tsd); 56 | } 57 | 58 | return ret; 59 | } 60 | 61 | static inline arena_t * 62 | arena_choose(tsd_t *tsd, arena_t *arena) { 63 | return arena_choose_impl(tsd, arena, false); 64 | } 65 | 66 | static inline arena_t * 67 | arena_ichoose(tsd_t *tsd, arena_t *arena) { 68 | return arena_choose_impl(tsd, arena, true); 69 | } 70 | 71 | static inline bool 72 | arena_is_auto(arena_t *arena) { 73 | assert(narenas_auto > 0); 74 | 75 | return (arena_ind_get(arena) < manual_arena_base); 76 | } 77 | 78 | JEMALLOC_ALWAYS_INLINE extent_t * 79 | iealloc(tsdn_t *tsdn, const void *ptr) { 80 | rtree_ctx_t rtree_ctx_fallback; 81 | rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 82 | 83 | return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, 84 | (uintptr_t)ptr, true); 85 | } 86 | 87 | #endif /* JEMALLOC_INTERNAL_INLINES_B_H */ 88 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/large_externs.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H 2 | #define JEMALLOC_INTERNAL_LARGE_EXTERNS_H 3 | 4 | #include "jemalloc/internal/hook.h" 5 | 6 | void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); 7 | void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 8 | bool zero); 9 | bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, 10 | size_t usize_max, bool zero); 11 | void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, 12 | size_t alignment, bool zero, tcache_t *tcache, 13 | hook_ralloc_args_t *hook_args); 14 | 15 | typedef void (large_dalloc_junk_t)(void *, size_t); 16 | extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk; 17 | 18 | typedef void (large_dalloc_maybe_junk_t)(void *, size_t); 19 | extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk; 20 | 21 | void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent); 22 | void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent); 23 | void large_dalloc(tsdn_t *tsdn, extent_t *extent); 24 | size_t large_salloc(tsdn_t *tsdn, const extent_t *extent); 25 | prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent); 26 | void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx); 27 | void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent); 28 | 29 | nstime_t large_prof_alloc_time_get(const extent_t *extent); 30 | void large_prof_alloc_time_set(extent_t *extent, nstime_t time); 31 | 32 | #endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */ 33 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/malloc_io.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_MALLOC_IO_H 2 | #define JEMALLOC_INTERNAL_MALLOC_IO_H 3 | 4 | #ifdef _WIN32 5 | # ifdef _WIN64 6 | # define FMT64_PREFIX "ll" 7 | # define FMTPTR_PREFIX "ll" 8 | # else 9 | # define FMT64_PREFIX "ll" 10 | # define FMTPTR_PREFIX "" 11 | # endif 12 | # define FMTd32 "d" 13 | # define FMTu32 "u" 14 | # define FMTx32 "x" 15 | # define FMTd64 FMT64_PREFIX "d" 16 | # define FMTu64 FMT64_PREFIX "u" 17 | # define FMTx64 FMT64_PREFIX "x" 18 | # define FMTdPTR FMTPTR_PREFIX "d" 19 | # define FMTuPTR FMTPTR_PREFIX "u" 20 | # define FMTxPTR FMTPTR_PREFIX "x" 21 | #else 22 | # include 23 | # define FMTd32 PRId32 24 | # define FMTu32 PRIu32 25 | # define FMTx32 PRIx32 26 | # define FMTd64 PRId64 27 | # define FMTu64 PRIu64 28 | # define FMTx64 PRIx64 29 | # define FMTdPTR PRIdPTR 30 | # define FMTuPTR PRIuPTR 31 | # define FMTxPTR PRIxPTR 32 | #endif 33 | 34 | /* Size of stack-allocated buffer passed to buferror(). */ 35 | #define BUFERROR_BUF 64 36 | 37 | /* 38 | * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be 39 | * large enough for all possible uses within jemalloc. 40 | */ 41 | #define MALLOC_PRINTF_BUFSIZE 4096 42 | 43 | int buferror(int err, char *buf, size_t buflen); 44 | uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, 45 | int base); 46 | void malloc_write(const char *s); 47 | 48 | /* 49 | * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating 50 | * point math. 51 | */ 52 | size_t malloc_vsnprintf(char *str, size_t size, const char *format, 53 | va_list ap); 54 | size_t malloc_snprintf(char *str, size_t size, const char *format, ...) 55 | JEMALLOC_FORMAT_PRINTF(3, 4); 56 | /* 57 | * The caller can set write_cb to null to choose to print with the 58 | * je_malloc_message hook. 59 | */ 60 | void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, 61 | const char *format, va_list ap); 62 | void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, 63 | const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); 64 | void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); 65 | 66 | static inline ssize_t 67 | malloc_write_fd(int fd, const void *buf, size_t count) { 68 | #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) 69 | /* 70 | * Use syscall(2) rather than write(2) when possible in order to avoid 71 | * the possibility of memory allocation within libc. This is necessary 72 | * on FreeBSD; most operating systems do not have this problem though. 73 | * 74 | * syscall() returns long or int, depending on platform, so capture the 75 | * result in the widest plausible type to avoid compiler warnings. 76 | */ 77 | long result = syscall(SYS_write, fd, buf, count); 78 | #else 79 | ssize_t result = (ssize_t)write(fd, buf, 80 | #ifdef _WIN32 81 | (unsigned int) 82 | #endif 83 | count); 84 | #endif 85 | return (ssize_t)result; 86 | } 87 | 88 | static inline ssize_t 89 | malloc_read_fd(int fd, void *buf, size_t count) { 90 | #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) 91 | long result = syscall(SYS_read, fd, buf, count); 92 | #else 93 | ssize_t result = read(fd, buf, 94 | #ifdef _WIN32 95 | (unsigned int) 96 | #endif 97 | count); 98 | #endif 99 | return (ssize_t)result; 100 | } 101 | 102 | #endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */ 103 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/mutex_pool.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H 2 | #define JEMALLOC_INTERNAL_MUTEX_POOL_H 3 | 4 | #include "jemalloc/internal/hash.h" 5 | #include "jemalloc/internal/mutex.h" 6 | #include "jemalloc/internal/witness.h" 7 | 8 | /* We do mod reductions by this value, so it should be kept a power of 2. */ 9 | #define MUTEX_POOL_SIZE 256 10 | 11 | typedef struct mutex_pool_s mutex_pool_t; 12 | struct mutex_pool_s { 13 | malloc_mutex_t mutexes[MUTEX_POOL_SIZE]; 14 | }; 15 | 16 | bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank); 17 | 18 | /* Internal helper - not meant to be called outside this module. */ 19 | static inline malloc_mutex_t * 20 | mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) { 21 | size_t hash_result[2]; 22 | hash(&key, sizeof(key), 0xd50dcc1b, hash_result); 23 | return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE]; 24 | } 25 | 26 | static inline void 27 | mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) { 28 | for (int i = 0; i < MUTEX_POOL_SIZE; i++) { 29 | malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]); 30 | } 31 | } 32 | 33 | /* 34 | * Note that a mutex pool doesn't work exactly the way an embdedded mutex would. 35 | * You're not allowed to acquire mutexes in the pool one at a time. You have to 36 | * acquire all the mutexes you'll need in a single function call, and then 37 | * release them all in a single function call. 38 | */ 39 | 40 | static inline void 41 | mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { 42 | mutex_pool_assert_not_held(tsdn, pool); 43 | 44 | malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); 45 | malloc_mutex_lock(tsdn, mutex); 46 | } 47 | 48 | static inline void 49 | mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { 50 | malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); 51 | malloc_mutex_unlock(tsdn, mutex); 52 | 53 | mutex_pool_assert_not_held(tsdn, pool); 54 | } 55 | 56 | static inline void 57 | mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, 58 | uintptr_t key2) { 59 | mutex_pool_assert_not_held(tsdn, pool); 60 | 61 | malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); 62 | malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); 63 | if ((uintptr_t)mutex1 < (uintptr_t)mutex2) { 64 | malloc_mutex_lock(tsdn, mutex1); 65 | malloc_mutex_lock(tsdn, mutex2); 66 | } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) { 67 | malloc_mutex_lock(tsdn, mutex1); 68 | } else { 69 | malloc_mutex_lock(tsdn, mutex2); 70 | malloc_mutex_lock(tsdn, mutex1); 71 | } 72 | } 73 | 74 | static inline void 75 | mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, 76 | uintptr_t key2) { 77 | malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); 78 | malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); 79 | if (mutex1 == mutex2) { 80 | malloc_mutex_unlock(tsdn, mutex1); 81 | } else { 82 | malloc_mutex_unlock(tsdn, mutex1); 83 | malloc_mutex_unlock(tsdn, mutex2); 84 | } 85 | 86 | mutex_pool_assert_not_held(tsdn, pool); 87 | } 88 | 89 | static inline void 90 | mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { 91 | malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key)); 92 | } 93 | 94 | #endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */ 95 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/nstime.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_NSTIME_H 2 | #define JEMALLOC_INTERNAL_NSTIME_H 3 | 4 | /* Maximum supported number of seconds (~584 years). */ 5 | #define NSTIME_SEC_MAX KQU(18446744072) 6 | #define NSTIME_ZERO_INITIALIZER {0} 7 | 8 | typedef struct { 9 | uint64_t ns; 10 | } nstime_t; 11 | 12 | void nstime_init(nstime_t *time, uint64_t ns); 13 | void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); 14 | uint64_t nstime_ns(const nstime_t *time); 15 | uint64_t nstime_sec(const nstime_t *time); 16 | uint64_t nstime_msec(const nstime_t *time); 17 | uint64_t nstime_nsec(const nstime_t *time); 18 | void nstime_copy(nstime_t *time, const nstime_t *source); 19 | int nstime_compare(const nstime_t *a, const nstime_t *b); 20 | void nstime_add(nstime_t *time, const nstime_t *addend); 21 | void nstime_iadd(nstime_t *time, uint64_t addend); 22 | void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); 23 | void nstime_isubtract(nstime_t *time, uint64_t subtrahend); 24 | void nstime_imultiply(nstime_t *time, uint64_t multiplier); 25 | void nstime_idivide(nstime_t *time, uint64_t divisor); 26 | uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); 27 | 28 | typedef bool (nstime_monotonic_t)(void); 29 | extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic; 30 | 31 | typedef bool (nstime_update_t)(nstime_t *); 32 | extern nstime_update_t *JET_MUTABLE nstime_update; 33 | 34 | #endif /* JEMALLOC_INTERNAL_NSTIME_H */ 35 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/pages.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H 2 | #define JEMALLOC_INTERNAL_PAGES_EXTERNS_H 3 | 4 | /* Page size. LG_PAGE is determined by the configure script. */ 5 | #ifdef PAGE_MASK 6 | # undef PAGE_MASK 7 | #endif 8 | #define PAGE ((size_t)(1U << LG_PAGE)) 9 | #define PAGE_MASK ((size_t)(PAGE - 1)) 10 | /* Return the page base address for the page containing address a. */ 11 | #define PAGE_ADDR2BASE(a) \ 12 | ((void *)((uintptr_t)(a) & ~PAGE_MASK)) 13 | /* Return the smallest pagesize multiple that is >= s. */ 14 | #define PAGE_CEILING(s) \ 15 | (((s) + PAGE_MASK) & ~PAGE_MASK) 16 | 17 | /* Huge page size. LG_HUGEPAGE is determined by the configure script. */ 18 | #define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) 19 | #define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) 20 | /* Return the huge page base address for the huge page containing address a. */ 21 | #define HUGEPAGE_ADDR2BASE(a) \ 22 | ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK)) 23 | /* Return the smallest pagesize multiple that is >= s. */ 24 | #define HUGEPAGE_CEILING(s) \ 25 | (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK) 26 | 27 | /* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */ 28 | #if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE) 29 | # define PAGES_CAN_PURGE_LAZY 30 | #endif 31 | /* 32 | * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported. 33 | * 34 | * The only supported way to hard-purge on Windows is to decommit and then 35 | * re-commit, but doing so is racy, and if re-commit fails it's a pain to 36 | * propagate the "poisoned" memory state. Since we typically decommit as the 37 | * next step after purging on Windows anyway, there's no point in adding such 38 | * complexity. 39 | */ 40 | #if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ 41 | defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \ 42 | defined(JEMALLOC_MAPS_COALESCE)) 43 | # define PAGES_CAN_PURGE_FORCED 44 | #endif 45 | 46 | static const bool pages_can_purge_lazy = 47 | #ifdef PAGES_CAN_PURGE_LAZY 48 | true 49 | #else 50 | false 51 | #endif 52 | ; 53 | static const bool pages_can_purge_forced = 54 | #ifdef PAGES_CAN_PURGE_FORCED 55 | true 56 | #else 57 | false 58 | #endif 59 | ; 60 | 61 | typedef enum { 62 | thp_mode_default = 0, /* Do not change hugepage settings. */ 63 | thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */ 64 | thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */ 65 | 66 | thp_mode_names_limit = 3, /* Used for option processing. */ 67 | thp_mode_not_supported = 3 /* No THP support detected. */ 68 | } thp_mode_t; 69 | 70 | #define THP_MODE_DEFAULT thp_mode_default 71 | extern thp_mode_t opt_thp; 72 | extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */ 73 | extern const char *thp_mode_names[]; 74 | 75 | void *pages_map(void *addr, size_t size, size_t alignment, bool *commit); 76 | void pages_unmap(void *addr, size_t size); 77 | bool pages_commit(void *addr, size_t size); 78 | bool pages_decommit(void *addr, size_t size); 79 | bool pages_purge_lazy(void *addr, size_t size); 80 | bool pages_purge_forced(void *addr, size_t size); 81 | bool pages_huge(void *addr, size_t size); 82 | bool pages_nohuge(void *addr, size_t size); 83 | bool pages_dontdump(void *addr, size_t size); 84 | bool pages_dodump(void *addr, size_t size); 85 | bool pages_boot(void); 86 | void pages_set_thp_state (void *ptr, size_t size); 87 | 88 | #endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */ 89 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/private_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | for symbol in `cat "$@"` ; do 4 | echo "#define ${symbol} JEMALLOC_N(${symbol})" 5 | done 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/private_symbols.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Generate private_symbols[_jet].awk. 4 | # 5 | # Usage: private_symbols.sh * 6 | # 7 | # is typically "" or "_". 8 | 9 | sym_prefix=$1 10 | shift 11 | 12 | cat <' output. 35 | # 36 | # Handle lines like: 37 | # 0000000000000008 D opt_junk 38 | # 0000000000007574 T malloc_initialized 39 | (NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) { 40 | print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix)) 41 | } 42 | 43 | # Process 'dumpbin /SYMBOLS ' output. 44 | # 45 | # Handle lines like: 46 | # 353 00008098 SECT4 notype External | opt_junk 47 | # 3F1 00000000 SECT7 notype () External | malloc_initialized 48 | ($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) { 49 | print $NF 50 | } 51 | EOF 52 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/prof_inlines_a.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H 2 | #define JEMALLOC_INTERNAL_PROF_INLINES_A_H 3 | 4 | #include "jemalloc/internal/mutex.h" 5 | 6 | static inline bool 7 | prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, 8 | uint64_t accumbytes) { 9 | cassert(config_prof); 10 | 11 | bool overflow; 12 | uint64_t a0, a1; 13 | 14 | /* 15 | * If the application allocates fast enough (and/or if idump is slow 16 | * enough), extreme overflow here (a1 >= prof_interval * 2) can cause 17 | * idump trigger coalescing. This is an intentional mechanism that 18 | * avoids rate-limiting allocation. 19 | */ 20 | #ifdef JEMALLOC_ATOMIC_U64 21 | a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); 22 | do { 23 | a1 = a0 + accumbytes; 24 | assert(a1 >= a0); 25 | overflow = (a1 >= prof_interval); 26 | if (overflow) { 27 | a1 %= prof_interval; 28 | } 29 | } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, 30 | a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); 31 | #else 32 | malloc_mutex_lock(tsdn, &prof_accum->mtx); 33 | a0 = prof_accum->accumbytes; 34 | a1 = a0 + accumbytes; 35 | overflow = (a1 >= prof_interval); 36 | if (overflow) { 37 | a1 %= prof_interval; 38 | } 39 | prof_accum->accumbytes = a1; 40 | malloc_mutex_unlock(tsdn, &prof_accum->mtx); 41 | #endif 42 | return overflow; 43 | } 44 | 45 | static inline void 46 | prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, 47 | size_t usize) { 48 | cassert(config_prof); 49 | 50 | /* 51 | * Cancel out as much of the excessive prof_accumbytes increase as 52 | * possible without underflowing. Interval-triggered dumps occur 53 | * slightly more often than intended as a result of incomplete 54 | * canceling. 55 | */ 56 | uint64_t a0, a1; 57 | #ifdef JEMALLOC_ATOMIC_U64 58 | a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); 59 | do { 60 | a1 = (a0 >= SC_LARGE_MINCLASS - usize) 61 | ? a0 - (SC_LARGE_MINCLASS - usize) : 0; 62 | } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, 63 | a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); 64 | #else 65 | malloc_mutex_lock(tsdn, &prof_accum->mtx); 66 | a0 = prof_accum->accumbytes; 67 | a1 = (a0 >= SC_LARGE_MINCLASS - usize) 68 | ? a0 - (SC_LARGE_MINCLASS - usize) : 0; 69 | prof_accum->accumbytes = a1; 70 | malloc_mutex_unlock(tsdn, &prof_accum->mtx); 71 | #endif 72 | } 73 | 74 | JEMALLOC_ALWAYS_INLINE bool 75 | prof_active_get_unlocked(void) { 76 | /* 77 | * Even if opt_prof is true, sampling can be temporarily disabled by 78 | * setting prof_active to false. No locking is used when reading 79 | * prof_active in the fast path, so there are no guarantees regarding 80 | * how long it will take for all threads to notice state changes. 81 | */ 82 | return prof_active; 83 | } 84 | 85 | #endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */ 86 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/prof_types.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_PROF_TYPES_H 2 | #define JEMALLOC_INTERNAL_PROF_TYPES_H 3 | 4 | typedef struct prof_bt_s prof_bt_t; 5 | typedef struct prof_accum_s prof_accum_t; 6 | typedef struct prof_cnt_s prof_cnt_t; 7 | typedef struct prof_tctx_s prof_tctx_t; 8 | typedef struct prof_gctx_s prof_gctx_t; 9 | typedef struct prof_tdata_s prof_tdata_t; 10 | 11 | /* Option defaults. */ 12 | #ifdef JEMALLOC_PROF 13 | # define PROF_PREFIX_DEFAULT "jeprof" 14 | #else 15 | # define PROF_PREFIX_DEFAULT "" 16 | #endif 17 | #define LG_PROF_SAMPLE_DEFAULT 19 18 | #define LG_PROF_INTERVAL_DEFAULT -1 19 | 20 | /* 21 | * Hard limit on stack backtrace depth. The version of prof_backtrace() that 22 | * is based on __builtin_return_address() necessarily has a hard-coded number 23 | * of backtrace frame handlers, and should be kept in sync with this setting. 24 | */ 25 | #define PROF_BT_MAX 128 26 | 27 | /* Initial hash table size. */ 28 | #define PROF_CKH_MINITEMS 64 29 | 30 | /* Size of memory buffer to use when writing dump files. */ 31 | #define PROF_DUMP_BUFSIZE 65536 32 | 33 | /* Size of stack-allocated buffer used by prof_printf(). */ 34 | #define PROF_PRINTF_BUFSIZE 128 35 | 36 | /* 37 | * Number of mutexes shared among all gctx's. No space is allocated for these 38 | * unless profiling is enabled, so it's okay to over-provision. 39 | */ 40 | #define PROF_NCTX_LOCKS 1024 41 | 42 | /* 43 | * Number of mutexes shared among all tdata's. No space is allocated for these 44 | * unless profiling is enabled, so it's okay to over-provision. 45 | */ 46 | #define PROF_NTDATA_LOCKS 256 47 | 48 | /* 49 | * prof_tdata pointers close to NULL are used to encode state information that 50 | * is used for cleaning up during thread shutdown. 51 | */ 52 | #define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) 53 | #define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) 54 | #define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY 55 | 56 | #endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */ 57 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/public_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | for nm in `cat $1` ; do 4 | n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` 5 | echo "#define je_${n} JEMALLOC_N(${n})" 6 | done 7 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/public_unnamespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | for nm in `cat $1` ; do 4 | n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` 5 | echo "#undef je_${n}" 6 | done 7 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/ql.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_QL_H 2 | #define JEMALLOC_INTERNAL_QL_H 3 | 4 | #include "jemalloc/internal/qr.h" 5 | 6 | /* List definitions. */ 7 | #define ql_head(a_type) \ 8 | struct { \ 9 | a_type *qlh_first; \ 10 | } 11 | 12 | #define ql_head_initializer(a_head) {NULL} 13 | 14 | #define ql_elm(a_type) qr(a_type) 15 | 16 | /* List functions. */ 17 | #define ql_new(a_head) do { \ 18 | (a_head)->qlh_first = NULL; \ 19 | } while (0) 20 | 21 | #define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) 22 | 23 | #define ql_first(a_head) ((a_head)->qlh_first) 24 | 25 | #define ql_last(a_head, a_field) \ 26 | ((ql_first(a_head) != NULL) \ 27 | ? qr_prev(ql_first(a_head), a_field) : NULL) 28 | 29 | #define ql_next(a_head, a_elm, a_field) \ 30 | ((ql_last(a_head, a_field) != (a_elm)) \ 31 | ? qr_next((a_elm), a_field) : NULL) 32 | 33 | #define ql_prev(a_head, a_elm, a_field) \ 34 | ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ 35 | : NULL) 36 | 37 | #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ 38 | qr_before_insert((a_qlelm), (a_elm), a_field); \ 39 | if (ql_first(a_head) == (a_qlelm)) { \ 40 | ql_first(a_head) = (a_elm); \ 41 | } \ 42 | } while (0) 43 | 44 | #define ql_after_insert(a_qlelm, a_elm, a_field) \ 45 | qr_after_insert((a_qlelm), (a_elm), a_field) 46 | 47 | #define ql_head_insert(a_head, a_elm, a_field) do { \ 48 | if (ql_first(a_head) != NULL) { \ 49 | qr_before_insert(ql_first(a_head), (a_elm), a_field); \ 50 | } \ 51 | ql_first(a_head) = (a_elm); \ 52 | } while (0) 53 | 54 | #define ql_tail_insert(a_head, a_elm, a_field) do { \ 55 | if (ql_first(a_head) != NULL) { \ 56 | qr_before_insert(ql_first(a_head), (a_elm), a_field); \ 57 | } \ 58 | ql_first(a_head) = qr_next((a_elm), a_field); \ 59 | } while (0) 60 | 61 | #define ql_remove(a_head, a_elm, a_field) do { \ 62 | if (ql_first(a_head) == (a_elm)) { \ 63 | ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ 64 | } \ 65 | if (ql_first(a_head) != (a_elm)) { \ 66 | qr_remove((a_elm), a_field); \ 67 | } else { \ 68 | ql_first(a_head) = NULL; \ 69 | } \ 70 | } while (0) 71 | 72 | #define ql_head_remove(a_head, a_type, a_field) do { \ 73 | a_type *t = ql_first(a_head); \ 74 | ql_remove((a_head), t, a_field); \ 75 | } while (0) 76 | 77 | #define ql_tail_remove(a_head, a_type, a_field) do { \ 78 | a_type *t = ql_last(a_head, a_field); \ 79 | ql_remove((a_head), t, a_field); \ 80 | } while (0) 81 | 82 | #define ql_foreach(a_var, a_head, a_field) \ 83 | qr_foreach((a_var), ql_first(a_head), a_field) 84 | 85 | #define ql_reverse_foreach(a_var, a_head, a_field) \ 86 | qr_reverse_foreach((a_var), ql_first(a_head), a_field) 87 | 88 | #endif /* JEMALLOC_INTERNAL_QL_H */ 89 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/qr.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_QR_H 2 | #define JEMALLOC_INTERNAL_QR_H 3 | 4 | /* Ring definitions. */ 5 | #define qr(a_type) \ 6 | struct { \ 7 | a_type *qre_next; \ 8 | a_type *qre_prev; \ 9 | } 10 | 11 | /* Ring functions. */ 12 | #define qr_new(a_qr, a_field) do { \ 13 | (a_qr)->a_field.qre_next = (a_qr); \ 14 | (a_qr)->a_field.qre_prev = (a_qr); \ 15 | } while (0) 16 | 17 | #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) 18 | 19 | #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) 20 | 21 | #define qr_before_insert(a_qrelm, a_qr, a_field) do { \ 22 | (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ 23 | (a_qr)->a_field.qre_next = (a_qrelm); \ 24 | (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ 25 | (a_qrelm)->a_field.qre_prev = (a_qr); \ 26 | } while (0) 27 | 28 | #define qr_after_insert(a_qrelm, a_qr, a_field) do { \ 29 | (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ 30 | (a_qr)->a_field.qre_prev = (a_qrelm); \ 31 | (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ 32 | (a_qrelm)->a_field.qre_next = (a_qr); \ 33 | } while (0) 34 | 35 | #define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \ 36 | a_type *t; \ 37 | (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ 38 | (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ 39 | t = (a_qr_a)->a_field.qre_prev; \ 40 | (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ 41 | (a_qr_b)->a_field.qre_prev = t; \ 42 | } while (0) 43 | 44 | /* 45 | * qr_meld() and qr_split() are functionally equivalent, so there's no need to 46 | * have two copies of the code. 47 | */ 48 | #define qr_split(a_qr_a, a_qr_b, a_type, a_field) \ 49 | qr_meld((a_qr_a), (a_qr_b), a_type, a_field) 50 | 51 | #define qr_remove(a_qr, a_field) do { \ 52 | (a_qr)->a_field.qre_prev->a_field.qre_next \ 53 | = (a_qr)->a_field.qre_next; \ 54 | (a_qr)->a_field.qre_next->a_field.qre_prev \ 55 | = (a_qr)->a_field.qre_prev; \ 56 | (a_qr)->a_field.qre_next = (a_qr); \ 57 | (a_qr)->a_field.qre_prev = (a_qr); \ 58 | } while (0) 59 | 60 | #define qr_foreach(var, a_qr, a_field) \ 61 | for ((var) = (a_qr); \ 62 | (var) != NULL; \ 63 | (var) = (((var)->a_field.qre_next != (a_qr)) \ 64 | ? (var)->a_field.qre_next : NULL)) 65 | 66 | #define qr_reverse_foreach(var, a_qr, a_field) \ 67 | for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ 68 | (var) != NULL; \ 69 | (var) = (((var) != (a_qr)) \ 70 | ? (var)->a_field.qre_prev : NULL)) 71 | 72 | #endif /* JEMALLOC_INTERNAL_QR_H */ 73 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/quantum.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_QUANTUM_H 2 | #define JEMALLOC_INTERNAL_QUANTUM_H 3 | 4 | /* 5 | * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size 6 | * classes). 7 | */ 8 | #ifndef LG_QUANTUM 9 | # if (defined(__i386__) || defined(_M_IX86)) 10 | # define LG_QUANTUM 4 11 | # endif 12 | # ifdef __ia64__ 13 | # define LG_QUANTUM 4 14 | # endif 15 | # ifdef __alpha__ 16 | # define LG_QUANTUM 4 17 | # endif 18 | # if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) 19 | # define LG_QUANTUM 4 20 | # endif 21 | # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) 22 | # define LG_QUANTUM 4 23 | # endif 24 | # ifdef __arm__ 25 | # define LG_QUANTUM 3 26 | # endif 27 | # ifdef __aarch64__ 28 | # define LG_QUANTUM 4 29 | # endif 30 | # ifdef __hppa__ 31 | # define LG_QUANTUM 4 32 | # endif 33 | # ifdef __m68k__ 34 | # define LG_QUANTUM 3 35 | # endif 36 | # ifdef __mips__ 37 | # define LG_QUANTUM 3 38 | # endif 39 | # ifdef __nios2__ 40 | # define LG_QUANTUM 3 41 | # endif 42 | # ifdef __or1k__ 43 | # define LG_QUANTUM 3 44 | # endif 45 | # ifdef __powerpc__ 46 | # define LG_QUANTUM 4 47 | # endif 48 | # if defined(__riscv) || defined(__riscv__) 49 | # define LG_QUANTUM 4 50 | # endif 51 | # ifdef __s390__ 52 | # define LG_QUANTUM 4 53 | # endif 54 | # if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \ 55 | defined(__SH4_SINGLE_ONLY__)) 56 | # define LG_QUANTUM 4 57 | # endif 58 | # ifdef __tile__ 59 | # define LG_QUANTUM 4 60 | # endif 61 | # ifdef __le32__ 62 | # define LG_QUANTUM 4 63 | # endif 64 | # ifndef LG_QUANTUM 65 | # error "Unknown minimum alignment for architecture; specify via " 66 | "--with-lg-quantum" 67 | # endif 68 | #endif 69 | 70 | #define QUANTUM ((size_t)(1U << LG_QUANTUM)) 71 | #define QUANTUM_MASK (QUANTUM - 1) 72 | 73 | /* Return the smallest quantum multiple that is >= a. */ 74 | #define QUANTUM_CEILING(a) \ 75 | (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 76 | 77 | #endif /* JEMALLOC_INTERNAL_QUANTUM_H */ 78 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/rtree_tsd.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_RTREE_CTX_H 2 | #define JEMALLOC_INTERNAL_RTREE_CTX_H 3 | 4 | /* 5 | * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each 6 | * entry supports an entire leaf, so the cache hit rate is typically high even 7 | * with a small number of entries. In rare cases extent activity will straddle 8 | * the boundary between two leaf nodes. Furthermore, an arena may use a 9 | * combination of dss and mmap. Note that as memory usage grows past the amount 10 | * that this cache can directly cover, the cache will become less effective if 11 | * locality of reference is low, but the consequence is merely cache misses 12 | * while traversing the tree nodes. 13 | * 14 | * The L1 direct mapped cache offers consistent and low cost on cache hit. 15 | * However collision could affect hit rate negatively. This is resolved by 16 | * combining with a L2 LRU cache, which requires linear search and re-ordering 17 | * on access but suffers no collision. Note that, the cache will itself suffer 18 | * cache misses if made overly large, plus the cost of linear search in the LRU 19 | * cache. 20 | */ 21 | #define RTREE_CTX_LG_NCACHE 4 22 | #define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE) 23 | #define RTREE_CTX_NCACHE_L2 8 24 | 25 | /* 26 | * Zero initializer required for tsd initialization only. Proper initialization 27 | * done via rtree_ctx_data_init(). 28 | */ 29 | #define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}} 30 | 31 | 32 | typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; 33 | 34 | typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t; 35 | struct rtree_ctx_cache_elm_s { 36 | uintptr_t leafkey; 37 | rtree_leaf_elm_t *leaf; 38 | }; 39 | 40 | typedef struct rtree_ctx_s rtree_ctx_t; 41 | struct rtree_ctx_s { 42 | /* Direct mapped cache. */ 43 | rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE]; 44 | /* L2 LRU cache. */ 45 | rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2]; 46 | }; 47 | 48 | void rtree_ctx_data_init(rtree_ctx_t *ctx); 49 | 50 | #endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */ 51 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/safety_check.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H 2 | #define JEMALLOC_INTERNAL_SAFETY_CHECK_H 3 | 4 | void safety_check_fail(const char *format, ...); 5 | /* Can set to NULL for a default. */ 6 | void safety_check_set_abort(void (*abort_fn)()); 7 | 8 | JEMALLOC_ALWAYS_INLINE void 9 | safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) { 10 | assert(usize < bumped_usize); 11 | for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) { 12 | *((unsigned char *)ptr + i) = 0xBC; 13 | } 14 | } 15 | 16 | JEMALLOC_ALWAYS_INLINE void 17 | safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize) 18 | { 19 | for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) { 20 | if (unlikely(*((unsigned char *)ptr + i) != 0xBC)) { 21 | safety_check_fail("Use after free error\n"); 22 | } 23 | } 24 | } 25 | 26 | #endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */ 27 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/seq.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_SEQ_H 2 | #define JEMALLOC_INTERNAL_SEQ_H 3 | 4 | #include "jemalloc/internal/atomic.h" 5 | 6 | /* 7 | * A simple seqlock implementation. 8 | */ 9 | 10 | #define seq_define(type, short_type) \ 11 | typedef struct { \ 12 | atomic_zu_t seq; \ 13 | atomic_zu_t data[ \ 14 | (sizeof(type) + sizeof(size_t) - 1) / sizeof(size_t)]; \ 15 | } seq_##short_type##_t; \ 16 | \ 17 | /* \ 18 | * No internal synchronization -- the caller must ensure that there's \ 19 | * only a single writer at a time. \ 20 | */ \ 21 | static inline void \ 22 | seq_store_##short_type(seq_##short_type##_t *dst, type *src) { \ 23 | size_t buf[sizeof(dst->data) / sizeof(size_t)]; \ 24 | buf[sizeof(buf) / sizeof(size_t) - 1] = 0; \ 25 | memcpy(buf, src, sizeof(type)); \ 26 | size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED); \ 27 | atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED); \ 28 | atomic_fence(ATOMIC_RELEASE); \ 29 | for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \ 30 | atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED); \ 31 | } \ 32 | atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE); \ 33 | } \ 34 | \ 35 | /* Returns whether or not the read was consistent. */ \ 36 | static inline bool \ 37 | seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) { \ 38 | size_t buf[sizeof(src->data) / sizeof(size_t)]; \ 39 | size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE); \ 40 | if (seq1 % 2 != 0) { \ 41 | return false; \ 42 | } \ 43 | for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \ 44 | buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED); \ 45 | } \ 46 | atomic_fence(ATOMIC_ACQUIRE); \ 47 | size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED); \ 48 | if (seq1 != seq2) { \ 49 | return false; \ 50 | } \ 51 | memcpy(dst, buf, sizeof(type)); \ 52 | return true; \ 53 | } 54 | 55 | #endif /* JEMALLOC_INTERNAL_SEQ_H */ 56 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/smoothstep.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Generate a discrete lookup table for a sigmoid function in the smoothstep 4 | # family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table 5 | # entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode 6 | # the entries using a binary fixed point representation. 7 | # 8 | # Usage: smoothstep.sh 9 | # 10 | # is in {smooth, smoother, smoothest}. 11 | # must be greater than zero. 12 | # must be in [0..62]; reasonable values are roughly [10..30]. 13 | # is x decimal precision. 14 | # is y decimal precision. 15 | 16 | #set -x 17 | 18 | cmd="sh smoothstep.sh $*" 19 | variant=$1 20 | nsteps=$2 21 | bfp=$3 22 | xprec=$4 23 | yprec=$5 24 | 25 | case "${variant}" in 26 | smooth) 27 | ;; 28 | smoother) 29 | ;; 30 | smoothest) 31 | ;; 32 | *) 33 | echo "Unsupported variant" 34 | exit 1 35 | ;; 36 | esac 37 | 38 | smooth() { 39 | step=$1 40 | y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` 41 | h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` 42 | } 43 | 44 | smoother() { 45 | step=$1 46 | y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` 47 | h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` 48 | } 49 | 50 | smoothest() { 51 | step=$1 52 | y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` 53 | h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` 54 | } 55 | 56 | cat <iteration < 5) { 25 | for (i = 0; i < (1U << spin->iteration); i++) { 26 | spin_cpu_spinwait(); 27 | } 28 | spin->iteration++; 29 | } else { 30 | #ifdef _WIN32 31 | SwitchToThread(); 32 | #else 33 | sched_yield(); 34 | #endif 35 | } 36 | } 37 | 38 | #undef SPIN_INLINE 39 | 40 | #endif /* JEMALLOC_INTERNAL_SPIN_H */ 41 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/stats.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_STATS_H 2 | #define JEMALLOC_INTERNAL_STATS_H 3 | 4 | /* OPTION(opt, var_name, default, set_value_to) */ 5 | #define STATS_PRINT_OPTIONS \ 6 | OPTION('J', json, false, true) \ 7 | OPTION('g', general, true, false) \ 8 | OPTION('m', merged, config_stats, false) \ 9 | OPTION('d', destroyed, config_stats, false) \ 10 | OPTION('a', unmerged, config_stats, false) \ 11 | OPTION('b', bins, true, false) \ 12 | OPTION('l', large, true, false) \ 13 | OPTION('x', mutex, true, false) \ 14 | OPTION('e', extents, true, false) 15 | 16 | enum { 17 | #define OPTION(o, v, d, s) stats_print_option_num_##v, 18 | STATS_PRINT_OPTIONS 19 | #undef OPTION 20 | stats_print_tot_num_options 21 | }; 22 | 23 | /* Options for stats_print. */ 24 | extern bool opt_stats_print; 25 | extern char opt_stats_print_opts[stats_print_tot_num_options+1]; 26 | 27 | /* Implements je_malloc_stats_print. */ 28 | void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 29 | const char *opts); 30 | 31 | #endif /* JEMALLOC_INTERNAL_STATS_H */ 32 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/tcache_externs.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H 2 | #define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H 3 | 4 | extern bool opt_tcache; 5 | extern ssize_t opt_lg_tcache_max; 6 | 7 | extern cache_bin_info_t *tcache_bin_info; 8 | 9 | /* 10 | * Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more 11 | * large-object bins. 12 | */ 13 | extern unsigned nhbins; 14 | 15 | /* Maximum cached size class. */ 16 | extern size_t tcache_maxclass; 17 | 18 | /* 19 | * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and 20 | * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are 21 | * completely disjoint from this data structure. tcaches starts off as a sparse 22 | * array, so it has no physical memory footprint until individual pages are 23 | * touched. This allows the entire array to be allocated the first time an 24 | * explicit tcache is created without a disproportionate impact on memory usage. 25 | */ 26 | extern tcaches_t *tcaches; 27 | 28 | size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); 29 | void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); 30 | void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, 31 | cache_bin_t *tbin, szind_t binind, bool *tcache_success); 32 | void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, 33 | szind_t binind, unsigned rem); 34 | void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, 35 | unsigned rem, tcache_t *tcache); 36 | void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, 37 | arena_t *arena); 38 | tcache_t *tcache_create_explicit(tsd_t *tsd); 39 | void tcache_cleanup(tsd_t *tsd); 40 | void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); 41 | bool tcaches_create(tsd_t *tsd, unsigned *r_ind); 42 | void tcaches_flush(tsd_t *tsd, unsigned ind); 43 | void tcaches_destroy(tsd_t *tsd, unsigned ind); 44 | bool tcache_boot(tsdn_t *tsdn); 45 | void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); 46 | void tcache_prefork(tsdn_t *tsdn); 47 | void tcache_postfork_parent(tsdn_t *tsdn); 48 | void tcache_postfork_child(tsdn_t *tsdn); 49 | void tcache_flush(tsd_t *tsd); 50 | bool tsd_tcache_data_init(tsd_t *tsd); 51 | bool tsd_tcache_enabled_data_init(tsd_t *tsd); 52 | 53 | #endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */ 54 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/tcache_structs.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H 2 | #define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H 3 | 4 | #include "jemalloc/internal/cache_bin.h" 5 | #include "jemalloc/internal/ql.h" 6 | #include "jemalloc/internal/sc.h" 7 | #include "jemalloc/internal/ticker.h" 8 | #include "jemalloc/internal/tsd_types.h" 9 | 10 | /* Various uses of this struct need it to be a named type. */ 11 | typedef ql_elm(tsd_t) tsd_link_t; 12 | 13 | struct tcache_s { 14 | /* 15 | * To minimize our cache-footprint, we put the frequently accessed data 16 | * together at the start of this struct. 17 | */ 18 | 19 | /* Cleared after arena_prof_accum(). */ 20 | uint64_t prof_accumbytes; 21 | /* Drives incremental GC. */ 22 | ticker_t gc_ticker; 23 | /* 24 | * The pointer stacks associated with bins follow as a contiguous array. 25 | * During tcache initialization, the avail pointer in each element of 26 | * tbins is initialized to point to the proper offset within this array. 27 | */ 28 | cache_bin_t bins_small[SC_NBINS]; 29 | 30 | /* 31 | * This data is less hot; we can be a little less careful with our 32 | * footprint here. 33 | */ 34 | /* Lets us track all the tcaches in an arena. */ 35 | ql_elm(tcache_t) link; 36 | 37 | /* Logically scoped to tsd, but put here for cache layout reasons. */ 38 | ql_elm(tsd_t) tsd_link; 39 | bool in_hook; 40 | 41 | /* 42 | * The descriptor lets the arena find our cache bins without seeing the 43 | * tcache definition. This enables arenas to aggregate stats across 44 | * tcaches without having a tcache dependency. 45 | */ 46 | cache_bin_array_descriptor_t cache_bin_array_descriptor; 47 | 48 | /* The arena this tcache is associated with. */ 49 | arena_t *arena; 50 | /* Next bin to GC. */ 51 | szind_t next_gc_bin; 52 | /* For small bins, fill (ncached_max >> lg_fill_div). */ 53 | uint8_t lg_fill_div[SC_NBINS]; 54 | /* 55 | * We put the cache bins for large size classes at the end of the 56 | * struct, since some of them might not get used. This might end up 57 | * letting us avoid touching an extra page if we don't have to. 58 | */ 59 | cache_bin_t bins_large[SC_NSIZES-SC_NBINS]; 60 | }; 61 | 62 | /* Linkage for list of available (previously used) explicit tcache IDs. */ 63 | struct tcaches_s { 64 | union { 65 | tcache_t *tcache; 66 | tcaches_t *next; 67 | }; 68 | }; 69 | 70 | #endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */ 71 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/tcache_types.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H 2 | #define JEMALLOC_INTERNAL_TCACHE_TYPES_H 3 | 4 | #include "jemalloc/internal/sc.h" 5 | 6 | typedef struct tcache_s tcache_t; 7 | typedef struct tcaches_s tcaches_t; 8 | 9 | /* 10 | * tcache pointers close to NULL are used to encode state information that is 11 | * used for two purposes: preventing thread caching on a per thread basis and 12 | * cleaning up during thread shutdown. 13 | */ 14 | #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) 15 | #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) 16 | #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) 17 | #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY 18 | 19 | /* 20 | * Absolute minimum number of cache slots for each small bin. 21 | */ 22 | #define TCACHE_NSLOTS_SMALL_MIN 20 23 | 24 | /* 25 | * Absolute maximum number of cache slots for each small bin in the thread 26 | * cache. This is an additional constraint beyond that imposed as: twice the 27 | * number of regions per slab for this size class. 28 | * 29 | * This constant must be an even number. 30 | */ 31 | #define TCACHE_NSLOTS_SMALL_MAX 200 32 | 33 | /* Number of cache slots for large size classes. */ 34 | #define TCACHE_NSLOTS_LARGE 20 35 | 36 | /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ 37 | #define LG_TCACHE_MAXCLASS_DEFAULT 15 38 | 39 | /* 40 | * TCACHE_GC_SWEEP is the approximate number of allocation events between 41 | * full GC sweeps. Integer rounding may cause the actual number to be 42 | * slightly higher, since GC is performed incrementally. 43 | */ 44 | #define TCACHE_GC_SWEEP 8192 45 | 46 | /* Number of tcache allocation/deallocation events between incremental GCs. */ 47 | #define TCACHE_GC_INCR \ 48 | ((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1)) 49 | 50 | /* Used in TSD static initializer only. Real init in tcache_data_init(). */ 51 | #define TCACHE_ZERO_INITIALIZER {0} 52 | 53 | /* Used in TSD static initializer only. Will be initialized to opt_tcache. */ 54 | #define TCACHE_ENABLED_ZERO_INITIALIZER false 55 | 56 | /* Used for explicit tcache only. Means flushed but not destroyed. */ 57 | #define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1) 58 | 59 | #endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */ 60 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/test_hooks.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H 2 | #define JEMALLOC_INTERNAL_TEST_HOOKS_H 3 | 4 | extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)(); 5 | extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)(); 6 | 7 | #define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) 8 | 9 | #define open JEMALLOC_HOOK(open, test_hooks_libc_hook) 10 | #define read JEMALLOC_HOOK(read, test_hooks_libc_hook) 11 | #define write JEMALLOC_HOOK(write, test_hooks_libc_hook) 12 | #define readlink JEMALLOC_HOOK(readlink, test_hooks_libc_hook) 13 | #define close JEMALLOC_HOOK(close, test_hooks_libc_hook) 14 | #define creat JEMALLOC_HOOK(creat, test_hooks_libc_hook) 15 | #define secure_getenv JEMALLOC_HOOK(secure_getenv, test_hooks_libc_hook) 16 | /* Note that this is undef'd and re-define'd in src/prof.c. */ 17 | #define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) 18 | 19 | #endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */ 20 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/ticker.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_TICKER_H 2 | #define JEMALLOC_INTERNAL_TICKER_H 3 | 4 | #include "jemalloc/internal/util.h" 5 | 6 | /** 7 | * A ticker makes it easy to count-down events until some limit. You 8 | * ticker_init the ticker to trigger every nticks events. You then notify it 9 | * that an event has occurred with calls to ticker_tick (or that nticks events 10 | * have occurred with a call to ticker_ticks), which will return true (and reset 11 | * the counter) if the countdown hit zero. 12 | */ 13 | 14 | typedef struct { 15 | int32_t tick; 16 | int32_t nticks; 17 | } ticker_t; 18 | 19 | static inline void 20 | ticker_init(ticker_t *ticker, int32_t nticks) { 21 | ticker->tick = nticks; 22 | ticker->nticks = nticks; 23 | } 24 | 25 | static inline void 26 | ticker_copy(ticker_t *ticker, const ticker_t *other) { 27 | *ticker = *other; 28 | } 29 | 30 | static inline int32_t 31 | ticker_read(const ticker_t *ticker) { 32 | return ticker->tick; 33 | } 34 | 35 | /* 36 | * Not intended to be a public API. Unfortunately, on x86, neither gcc nor 37 | * clang seems smart enough to turn 38 | * ticker->tick -= nticks; 39 | * if (unlikely(ticker->tick < 0)) { 40 | * fixup ticker 41 | * return true; 42 | * } 43 | * return false; 44 | * into 45 | * subq %nticks_reg, (%ticker_reg) 46 | * js fixup ticker 47 | * 48 | * unless we force "fixup ticker" out of line. In that case, gcc gets it right, 49 | * but clang now does worse than before. So, on x86 with gcc, we force it out 50 | * of line, but otherwise let the inlining occur. Ordinarily this wouldn't be 51 | * worth the hassle, but this is on the fast path of both malloc and free (via 52 | * tcache_event). 53 | */ 54 | #if defined(__GNUC__) && !defined(__clang__) \ 55 | && (defined(__x86_64__) || defined(__i386__)) 56 | JEMALLOC_NOINLINE 57 | #endif 58 | static bool 59 | ticker_fixup(ticker_t *ticker) { 60 | ticker->tick = ticker->nticks; 61 | return true; 62 | } 63 | 64 | static inline bool 65 | ticker_ticks(ticker_t *ticker, int32_t nticks) { 66 | ticker->tick -= nticks; 67 | if (unlikely(ticker->tick < 0)) { 68 | return ticker_fixup(ticker); 69 | } 70 | return false; 71 | } 72 | 73 | static inline bool 74 | ticker_tick(ticker_t *ticker) { 75 | return ticker_ticks(ticker, 1); 76 | } 77 | 78 | /* 79 | * Try to tick. If ticker would fire, return true, but rely on 80 | * slowpath to reset ticker. 81 | */ 82 | static inline bool 83 | ticker_trytick(ticker_t *ticker) { 84 | --ticker->tick; 85 | if (unlikely(ticker->tick < 0)) { 86 | return true; 87 | } 88 | return false; 89 | } 90 | 91 | #endif /* JEMALLOC_INTERNAL_TICKER_H */ 92 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/tsd_malloc_thread_cleanup.h: -------------------------------------------------------------------------------- 1 | #ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H 2 | #error This file should be included only once, by tsd.h. 3 | #endif 4 | #define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H 5 | 6 | #define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL 7 | 8 | extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls; 9 | extern JEMALLOC_TSD_TYPE_ATTR(bool) tsd_initialized; 10 | extern bool tsd_booted; 11 | 12 | /* Initialization/cleanup. */ 13 | JEMALLOC_ALWAYS_INLINE bool 14 | tsd_cleanup_wrapper(void) { 15 | if (tsd_initialized) { 16 | tsd_initialized = false; 17 | tsd_cleanup(&tsd_tls); 18 | } 19 | return tsd_initialized; 20 | } 21 | 22 | JEMALLOC_ALWAYS_INLINE bool 23 | tsd_boot0(void) { 24 | malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); 25 | tsd_booted = true; 26 | return false; 27 | } 28 | 29 | JEMALLOC_ALWAYS_INLINE void 30 | tsd_boot1(void) { 31 | /* Do nothing. */ 32 | } 33 | 34 | JEMALLOC_ALWAYS_INLINE bool 35 | tsd_boot(void) { 36 | return tsd_boot0(); 37 | } 38 | 39 | JEMALLOC_ALWAYS_INLINE bool 40 | tsd_booted_get(void) { 41 | return tsd_booted; 42 | } 43 | 44 | JEMALLOC_ALWAYS_INLINE bool 45 | tsd_get_allocates(void) { 46 | return false; 47 | } 48 | 49 | /* Get/set. */ 50 | JEMALLOC_ALWAYS_INLINE tsd_t * 51 | tsd_get(bool init) { 52 | return &tsd_tls; 53 | } 54 | JEMALLOC_ALWAYS_INLINE void 55 | tsd_set(tsd_t *val) { 56 | assert(tsd_booted); 57 | if (likely(&tsd_tls != val)) { 58 | tsd_tls = (*val); 59 | } 60 | tsd_initialized = true; 61 | } 62 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/tsd_tls.h: -------------------------------------------------------------------------------- 1 | #ifdef JEMALLOC_INTERNAL_TSD_TLS_H 2 | #error This file should be included only once, by tsd.h. 3 | #endif 4 | #define JEMALLOC_INTERNAL_TSD_TLS_H 5 | 6 | #define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL 7 | 8 | extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls; 9 | extern pthread_key_t tsd_tsd; 10 | extern bool tsd_booted; 11 | 12 | /* Initialization/cleanup. */ 13 | JEMALLOC_ALWAYS_INLINE bool 14 | tsd_boot0(void) { 15 | if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) { 16 | return true; 17 | } 18 | tsd_booted = true; 19 | return false; 20 | } 21 | 22 | JEMALLOC_ALWAYS_INLINE void 23 | tsd_boot1(void) { 24 | /* Do nothing. */ 25 | } 26 | 27 | JEMALLOC_ALWAYS_INLINE bool 28 | tsd_boot(void) { 29 | return tsd_boot0(); 30 | } 31 | 32 | JEMALLOC_ALWAYS_INLINE bool 33 | tsd_booted_get(void) { 34 | return tsd_booted; 35 | } 36 | 37 | JEMALLOC_ALWAYS_INLINE bool 38 | tsd_get_allocates(void) { 39 | return false; 40 | } 41 | 42 | /* Get/set. */ 43 | JEMALLOC_ALWAYS_INLINE tsd_t * 44 | tsd_get(bool init) { 45 | return &tsd_tls; 46 | } 47 | 48 | JEMALLOC_ALWAYS_INLINE void 49 | tsd_set(tsd_t *val) { 50 | assert(tsd_booted); 51 | if (likely(&tsd_tls != val)) { 52 | tsd_tls = (*val); 53 | } 54 | if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) { 55 | malloc_write(": Error setting tsd.\n"); 56 | if (opt_abort) { 57 | abort(); 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/tsd_types.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_TSD_TYPES_H 2 | #define JEMALLOC_INTERNAL_TSD_TYPES_H 3 | 4 | #define MALLOC_TSD_CLEANUPS_MAX 2 5 | 6 | typedef struct tsd_s tsd_t; 7 | typedef struct tsdn_s tsdn_t; 8 | typedef bool (*malloc_tsd_cleanup_t)(void); 9 | 10 | #endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */ 11 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/internal/util.h: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_INTERNAL_UTIL_H 2 | #define JEMALLOC_INTERNAL_UTIL_H 3 | 4 | #define UTIL_INLINE static inline 5 | 6 | /* Junk fill patterns. */ 7 | #ifndef JEMALLOC_ALLOC_JUNK 8 | # define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) 9 | #endif 10 | #ifndef JEMALLOC_FREE_JUNK 11 | # define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) 12 | #endif 13 | 14 | /* 15 | * Wrap a cpp argument that contains commas such that it isn't broken up into 16 | * multiple arguments. 17 | */ 18 | #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ 19 | 20 | /* cpp macro definition stringification. */ 21 | #define STRINGIFY_HELPER(x) #x 22 | #define STRINGIFY(x) STRINGIFY_HELPER(x) 23 | 24 | /* 25 | * Silence compiler warnings due to uninitialized values. This is used 26 | * wherever the compiler fails to recognize that the variable is never used 27 | * uninitialized. 28 | */ 29 | #define JEMALLOC_CC_SILENCE_INIT(v) = v 30 | 31 | #ifdef __GNUC__ 32 | # define likely(x) __builtin_expect(!!(x), 1) 33 | # define unlikely(x) __builtin_expect(!!(x), 0) 34 | #else 35 | # define likely(x) !!(x) 36 | # define unlikely(x) !!(x) 37 | #endif 38 | 39 | #if !defined(JEMALLOC_INTERNAL_UNREACHABLE) 40 | # error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure 41 | #endif 42 | 43 | #define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() 44 | 45 | /* Set error code. */ 46 | UTIL_INLINE void 47 | set_errno(int errnum) { 48 | #ifdef _WIN32 49 | SetLastError(errnum); 50 | #else 51 | errno = errnum; 52 | #endif 53 | } 54 | 55 | /* Get last error code. */ 56 | UTIL_INLINE int 57 | get_errno(void) { 58 | #ifdef _WIN32 59 | return GetLastError(); 60 | #else 61 | return errno; 62 | #endif 63 | } 64 | 65 | #undef UTIL_INLINE 66 | 67 | #endif /* JEMALLOC_INTERNAL_UTIL_H */ 68 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/jemalloc/jemalloc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | objroot=$1 4 | 5 | cat < 5 | 6 | /* MSVC doesn't define _Bool or bool in C, but does have BOOL */ 7 | /* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ 8 | /* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as 9 | * a built-in type. */ 10 | #ifndef __clang__ 11 | typedef BOOL _Bool; 12 | #endif 13 | 14 | #define bool _Bool 15 | #define true 1 16 | #define false 0 17 | 18 | #define __bool_true_false_are_defined 1 19 | 20 | #endif /* stdbool_h */ 21 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/msvc_compat/strings.h: -------------------------------------------------------------------------------- 1 | #ifndef strings_h 2 | #define strings_h 3 | 4 | /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided 5 | * for both */ 6 | #ifdef _MSC_VER 7 | # include 8 | # pragma intrinsic(_BitScanForward) 9 | static __forceinline int ffsl(long x) { 10 | unsigned long i; 11 | 12 | if (_BitScanForward(&i, x)) { 13 | return i + 1; 14 | } 15 | return 0; 16 | } 17 | 18 | static __forceinline int ffs(int x) { 19 | return ffsl(x); 20 | } 21 | 22 | # ifdef _M_X64 23 | # pragma intrinsic(_BitScanForward64) 24 | # endif 25 | 26 | static __forceinline int ffsll(unsigned __int64 x) { 27 | unsigned long i; 28 | #ifdef _M_X64 29 | if (_BitScanForward64(&i, x)) { 30 | return i + 1; 31 | } 32 | return 0; 33 | #else 34 | // Fallback for 32-bit build where 64-bit version not available 35 | // assuming little endian 36 | union { 37 | unsigned __int64 ll; 38 | unsigned long l[2]; 39 | } s; 40 | 41 | s.ll = x; 42 | 43 | if (_BitScanForward(&i, s.l[0])) { 44 | return i + 1; 45 | } else if(_BitScanForward(&i, s.l[1])) { 46 | return i + 33; 47 | } 48 | return 0; 49 | #endif 50 | } 51 | 52 | #else 53 | # define ffsll(x) __builtin_ffsll(x) 54 | # define ffsl(x) __builtin_ffsl(x) 55 | # define ffs(x) __builtin_ffs(x) 56 | #endif 57 | 58 | #endif /* strings_h */ 59 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/include/msvc_compat/windows_extra.h: -------------------------------------------------------------------------------- 1 | #ifndef MSVC_COMPAT_WINDOWS_EXTRA_H 2 | #define MSVC_COMPAT_WINDOWS_EXTRA_H 3 | 4 | #include 5 | 6 | #endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ 7 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/jemalloc.pc.in: -------------------------------------------------------------------------------- 1 | prefix=@prefix@ 2 | exec_prefix=@exec_prefix@ 3 | libdir=@libdir@ 4 | includedir=@includedir@ 5 | install_suffix=@install_suffix@ 6 | 7 | Name: jemalloc 8 | Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. 9 | URL: http://jemalloc.net/ 10 | Version: @jemalloc_version_major@.@jemalloc_version_minor@.@jemalloc_version_bugfix@_@jemalloc_version_nrev@ 11 | Cflags: -I${includedir} 12 | Libs: -L${libdir} -ljemalloc${install_suffix} 13 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/msvc/ReadMe.txt: -------------------------------------------------------------------------------- 1 | 2 | How to build jemalloc for Windows 3 | ================================= 4 | 5 | 1. Install Cygwin with at least the following packages: 6 | * autoconf 7 | * autogen 8 | * gawk 9 | * grep 10 | * sed 11 | 12 | 2. Install Visual Studio 2015 or 2017 with Visual C++ 13 | 14 | 3. Add Cygwin\bin to the PATH environment variable 15 | 16 | 4. Open "x64 Native Tools Command Prompt for VS 2017" 17 | (note: x86/x64 doesn't matter at this point) 18 | 19 | 5. Generate header files: 20 | sh -c "CC=cl ./autogen.sh" 21 | 22 | 6. Now the project can be opened and built in Visual Studio: 23 | msvc\jemalloc_vc2017.sln 24 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF} 6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx 7 | 8 | 9 | {93995380-89BD-4b04-88EB-625FBE52EBFB} 10 | h;hh;hpp;hxx;hm;inl;inc;xsd 11 | 12 | 13 | 14 | 15 | Source Files 16 | 17 | 18 | Source Files 19 | 20 | 21 | 22 | 23 | Header Files 24 | 25 | 26 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF} 6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx 7 | 8 | 9 | {93995380-89BD-4b04-88EB-625FBE52EBFB} 10 | h;hh;hpp;hxx;hm;inl;inc;xsd 11 | 12 | 13 | 14 | 15 | Source Files 16 | 17 | 18 | Source Files 19 | 20 | 21 | 22 | 23 | Header Files 24 | 25 | 26 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/msvc/test_threads/test_threads.cpp: -------------------------------------------------------------------------------- 1 | // jemalloc C++ threaded test 2 | // Author: Rustam Abdullaev 3 | // Public Domain 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | using std::vector; 15 | using std::thread; 16 | using std::uniform_int_distribution; 17 | using std::minstd_rand; 18 | 19 | int test_threads() { 20 | je_malloc_conf = "narenas:3"; 21 | int narenas = 0; 22 | size_t sz = sizeof(narenas); 23 | je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0); 24 | if (narenas != 3) { 25 | printf("Error: unexpected number of arenas: %d\n", narenas); 26 | return 1; 27 | } 28 | static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 }; 29 | static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0])); 30 | vector workers; 31 | static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50; 32 | je_malloc_stats_print(NULL, NULL, NULL); 33 | size_t allocated1; 34 | size_t sz1 = sizeof(allocated1); 35 | je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0); 36 | printf("\nPress Enter to start threads...\n"); 37 | getchar(); 38 | printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2); 39 | for (int i = 0; i < numThreads; i++) { 40 | workers.emplace_back([tid=i]() { 41 | uniform_int_distribution sizeDist(0, numSizes - 1); 42 | minstd_rand rnd(tid * 17); 43 | uint8_t* ptrs[numAllocsMax]; 44 | int ptrsz[numAllocsMax]; 45 | for (int i = 0; i < numIter1; ++i) { 46 | thread t([&]() { 47 | for (int i = 0; i < numIter2; ++i) { 48 | const int numAllocs = numAllocsMax - sizeDist(rnd); 49 | for (int j = 0; j < numAllocs; j += 64) { 50 | const int x = sizeDist(rnd); 51 | const int sz = sizes[x]; 52 | ptrsz[j] = sz; 53 | ptrs[j] = (uint8_t*)je_malloc(sz); 54 | if (!ptrs[j]) { 55 | printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x); 56 | exit(1); 57 | } 58 | for (int k = 0; k < sz; k++) 59 | ptrs[j][k] = tid + k; 60 | } 61 | for (int j = 0; j < numAllocs; j += 64) { 62 | for (int k = 0, sz = ptrsz[j]; k < sz; k++) 63 | if (ptrs[j][k] != (uint8_t)(tid + k)) { 64 | printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k)); 65 | exit(1); 66 | } 67 | je_free(ptrs[j]); 68 | } 69 | } 70 | }); 71 | t.join(); 72 | } 73 | }); 74 | } 75 | for (thread& t : workers) { 76 | t.join(); 77 | } 78 | je_malloc_stats_print(NULL, NULL, NULL); 79 | size_t allocated2; 80 | je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0); 81 | size_t leaked = allocated2 - allocated1; 82 | printf("\nDone. Leaked: %zd bytes\n", leaked); 83 | bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet) 84 | printf("\nTest %s!\n", (failed ? "FAILED" : "successful")); 85 | printf("\nPress Enter to continue...\n"); 86 | getchar(); 87 | return failed ? 1 : 0; 88 | } 89 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/msvc/test_threads/test_threads.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | int test_threads(); 4 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/msvc/test_threads/test_threads_main.cpp: -------------------------------------------------------------------------------- 1 | #include "test_threads.h" 2 | #include 3 | #include 4 | #include 5 | 6 | using namespace std::chrono_literals; 7 | 8 | int main(int argc, char** argv) { 9 | int rc = test_threads(); 10 | return rc; 11 | } 12 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/run_tests.sh: -------------------------------------------------------------------------------- 1 | $(dirname "$)")/scripts/gen_run_tests.py | bash 2 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/bin.c: -------------------------------------------------------------------------------- 1 | #include "jemalloc/internal/jemalloc_preamble.h" 2 | #include "jemalloc/internal/jemalloc_internal_includes.h" 3 | 4 | #include "jemalloc/internal/assert.h" 5 | #include "jemalloc/internal/bin.h" 6 | #include "jemalloc/internal/sc.h" 7 | #include "jemalloc/internal/witness.h" 8 | 9 | bin_info_t bin_infos[SC_NBINS]; 10 | 11 | static void 12 | bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], 13 | bin_info_t bin_infos[SC_NBINS]) { 14 | for (unsigned i = 0; i < SC_NBINS; i++) { 15 | bin_info_t *bin_info = &bin_infos[i]; 16 | sc_t *sc = &sc_data->sc[i]; 17 | bin_info->reg_size = ((size_t)1U << sc->lg_base) 18 | + ((size_t)sc->ndelta << sc->lg_delta); 19 | bin_info->slab_size = (sc->pgs << LG_PAGE); 20 | bin_info->nregs = 21 | (uint32_t)(bin_info->slab_size / bin_info->reg_size); 22 | bin_info->n_shards = bin_shard_sizes[i]; 23 | bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER( 24 | bin_info->nregs); 25 | bin_info->bitmap_info = bitmap_info; 26 | } 27 | } 28 | 29 | bool 30 | bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size, 31 | size_t end_size, size_t nshards) { 32 | if (nshards > BIN_SHARDS_MAX || nshards == 0) { 33 | return true; 34 | } 35 | 36 | if (start_size > SC_SMALL_MAXCLASS) { 37 | return false; 38 | } 39 | if (end_size > SC_SMALL_MAXCLASS) { 40 | end_size = SC_SMALL_MAXCLASS; 41 | } 42 | 43 | /* Compute the index since this may happen before sz init. */ 44 | szind_t ind1 = sz_size2index_compute(start_size); 45 | szind_t ind2 = sz_size2index_compute(end_size); 46 | for (unsigned i = ind1; i <= ind2; i++) { 47 | bin_shard_sizes[i] = (unsigned)nshards; 48 | } 49 | 50 | return false; 51 | } 52 | 53 | void 54 | bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) { 55 | /* Load the default number of shards. */ 56 | for (unsigned i = 0; i < SC_NBINS; i++) { 57 | bin_shard_sizes[i] = N_BIN_SHARDS_DEFAULT; 58 | } 59 | } 60 | 61 | void 62 | bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { 63 | assert(sc_data->initialized); 64 | bin_infos_init(sc_data, bin_shard_sizes, bin_infos); 65 | } 66 | 67 | bool 68 | bin_init(bin_t *bin) { 69 | if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN, 70 | malloc_mutex_rank_exclusive)) { 71 | return true; 72 | } 73 | bin->slabcur = NULL; 74 | extent_heap_new(&bin->slabs_nonfull); 75 | extent_list_init(&bin->slabs_full); 76 | if (config_stats) { 77 | memset(&bin->stats, 0, sizeof(bin_stats_t)); 78 | } 79 | return false; 80 | } 81 | 82 | void 83 | bin_prefork(tsdn_t *tsdn, bin_t *bin) { 84 | malloc_mutex_prefork(tsdn, &bin->lock); 85 | } 86 | 87 | void 88 | bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) { 89 | malloc_mutex_postfork_parent(tsdn, &bin->lock); 90 | } 91 | 92 | void 93 | bin_postfork_child(tsdn_t *tsdn, bin_t *bin) { 94 | malloc_mutex_postfork_child(tsdn, &bin->lock); 95 | } 96 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/div.c: -------------------------------------------------------------------------------- 1 | #include "jemalloc/internal/jemalloc_preamble.h" 2 | 3 | #include "jemalloc/internal/div.h" 4 | 5 | #include "jemalloc/internal/assert.h" 6 | 7 | /* 8 | * Suppose we have n = q * d, all integers. We know n and d, and want q = n / d. 9 | * 10 | * For any k, we have (here, all division is exact; not C-style rounding): 11 | * floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where 12 | * r = (-2^k) mod d. 13 | * 14 | * Expanding this out: 15 | * ... = floor(2^k / d * n / 2^k + r / d * n / 2^k) 16 | * = floor(n / d + (r / d) * (n / 2^k)). 17 | * 18 | * The fractional part of n / d is 0 (because of the assumption that d divides n 19 | * exactly), so we have: 20 | * ... = n / d + floor((r / d) * (n / 2^k)) 21 | * 22 | * So that our initial expression is equal to the quantity we seek, so long as 23 | * (r / d) * (n / 2^k) < 1. 24 | * 25 | * r is a remainder mod d, so r < d and r / d < 1 always. We can make 26 | * n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works. 27 | */ 28 | 29 | void 30 | div_init(div_info_t *div_info, size_t d) { 31 | /* Nonsensical. */ 32 | assert(d != 0); 33 | /* 34 | * This would make the value of magic too high to fit into a uint32_t 35 | * (we would want magic = 2^32 exactly). This would mess with code gen 36 | * on 32-bit machines. 37 | */ 38 | assert(d != 1); 39 | 40 | uint64_t two_to_k = ((uint64_t)1 << 32); 41 | uint32_t magic = (uint32_t)(two_to_k / d); 42 | 43 | /* 44 | * We want magic = ceil(2^k / d), but C gives us floor. We have to 45 | * increment it unless the result was exact (i.e. unless d is a power of 46 | * two). 47 | */ 48 | if (two_to_k % d != 0) { 49 | magic++; 50 | } 51 | div_info->magic = magic; 52 | #ifdef JEMALLOC_DEBUG 53 | div_info->d = d; 54 | #endif 55 | } 56 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/extent_mmap.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_EXTENT_MMAP_C_ 2 | #include "jemalloc/internal/jemalloc_preamble.h" 3 | #include "jemalloc/internal/jemalloc_internal_includes.h" 4 | 5 | #include "jemalloc/internal/assert.h" 6 | #include "jemalloc/internal/extent_mmap.h" 7 | 8 | /******************************************************************************/ 9 | /* Data. */ 10 | 11 | bool opt_retain = 12 | #ifdef JEMALLOC_RETAIN 13 | true 14 | #else 15 | false 16 | #endif 17 | ; 18 | 19 | /******************************************************************************/ 20 | 21 | void * 22 | extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, 23 | bool *commit) { 24 | assert(alignment == ALIGNMENT_CEILING(alignment, PAGE)); 25 | void *ret = pages_map(new_addr, size, alignment, commit); 26 | if (ret == NULL) { 27 | return NULL; 28 | } 29 | assert(ret != NULL); 30 | if (*commit) { 31 | *zero = true; 32 | } 33 | return ret; 34 | } 35 | 36 | bool 37 | extent_dalloc_mmap(void *addr, size_t size) { 38 | if (!opt_retain) { 39 | pages_unmap(addr, size); 40 | } 41 | return opt_retain; 42 | } 43 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/hash.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_HASH_C_ 2 | #include "jemalloc/internal/jemalloc_preamble.h" 3 | #include "jemalloc/internal/jemalloc_internal_includes.h" 4 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/log.c: -------------------------------------------------------------------------------- 1 | #include "jemalloc/internal/jemalloc_preamble.h" 2 | #include "jemalloc/internal/jemalloc_internal_includes.h" 3 | 4 | #include "jemalloc/internal/log.h" 5 | 6 | char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; 7 | atomic_b_t log_init_done = ATOMIC_INIT(false); 8 | 9 | /* 10 | * Returns true if we were able to pick out a segment. Fills in r_segment_end 11 | * with a pointer to the first character after the end of the string. 12 | */ 13 | static const char * 14 | log_var_extract_segment(const char* segment_begin) { 15 | const char *end; 16 | for (end = segment_begin; *end != '\0' && *end != '|'; end++) { 17 | } 18 | return end; 19 | } 20 | 21 | static bool 22 | log_var_matches_segment(const char *segment_begin, const char *segment_end, 23 | const char *log_var_begin, const char *log_var_end) { 24 | assert(segment_begin <= segment_end); 25 | assert(log_var_begin < log_var_end); 26 | 27 | ptrdiff_t segment_len = segment_end - segment_begin; 28 | ptrdiff_t log_var_len = log_var_end - log_var_begin; 29 | /* The special '.' segment matches everything. */ 30 | if (segment_len == 1 && *segment_begin == '.') { 31 | return true; 32 | } 33 | if (segment_len == log_var_len) { 34 | return strncmp(segment_begin, log_var_begin, segment_len) == 0; 35 | } else if (segment_len < log_var_len) { 36 | return strncmp(segment_begin, log_var_begin, segment_len) == 0 37 | && log_var_begin[segment_len] == '.'; 38 | } else { 39 | return false; 40 | } 41 | } 42 | 43 | unsigned 44 | log_var_update_state(log_var_t *log_var) { 45 | const char *log_var_begin = log_var->name; 46 | const char *log_var_end = log_var->name + strlen(log_var->name); 47 | 48 | /* Pointer to one before the beginning of the current segment. */ 49 | const char *segment_begin = log_var_names; 50 | 51 | /* 52 | * If log_init done is false, we haven't parsed the malloc conf yet. To 53 | * avoid log-spew, we default to not displaying anything. 54 | */ 55 | if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) { 56 | return LOG_INITIALIZED_NOT_ENABLED; 57 | } 58 | 59 | while (true) { 60 | const char *segment_end = log_var_extract_segment( 61 | segment_begin); 62 | assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE); 63 | if (log_var_matches_segment(segment_begin, segment_end, 64 | log_var_begin, log_var_end)) { 65 | atomic_store_u(&log_var->state, LOG_ENABLED, 66 | ATOMIC_RELAXED); 67 | return LOG_ENABLED; 68 | } 69 | if (*segment_end == '\0') { 70 | /* Hit the end of the segment string with no match. */ 71 | atomic_store_u(&log_var->state, 72 | LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED); 73 | return LOG_INITIALIZED_NOT_ENABLED; 74 | } 75 | /* Otherwise, skip the delimiter and continue. */ 76 | segment_begin = segment_end + 1; 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/mutex_pool.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MUTEX_POOL_C_ 2 | 3 | #include "jemalloc/internal/jemalloc_preamble.h" 4 | #include "jemalloc/internal/jemalloc_internal_includes.h" 5 | 6 | #include "jemalloc/internal/mutex.h" 7 | #include "jemalloc/internal/mutex_pool.h" 8 | 9 | bool 10 | mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) { 11 | for (int i = 0; i < MUTEX_POOL_SIZE; ++i) { 12 | if (malloc_mutex_init(&pool->mutexes[i], name, rank, 13 | malloc_mutex_address_ordered)) { 14 | return true; 15 | } 16 | } 17 | return false; 18 | } 19 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/prng.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_PRNG_C_ 2 | #include "jemalloc/internal/jemalloc_preamble.h" 3 | #include "jemalloc/internal/jemalloc_internal_includes.h" 4 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/safety_check.c: -------------------------------------------------------------------------------- 1 | #include "jemalloc/internal/jemalloc_preamble.h" 2 | #include "jemalloc/internal/jemalloc_internal_includes.h" 3 | 4 | static void (*safety_check_abort)(const char *message); 5 | 6 | void safety_check_set_abort(void (*abort_fn)(const char *)) { 7 | safety_check_abort = abort_fn; 8 | } 9 | 10 | void safety_check_fail(const char *format, ...) { 11 | char buf[MALLOC_PRINTF_BUFSIZE]; 12 | 13 | va_list ap; 14 | va_start(ap, format); 15 | malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap); 16 | va_end(ap); 17 | 18 | if (safety_check_abort == NULL) { 19 | malloc_write(buf); 20 | abort(); 21 | } else { 22 | safety_check_abort(buf); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/sz.c: -------------------------------------------------------------------------------- 1 | #include "jemalloc/internal/jemalloc_preamble.h" 2 | #include "jemalloc/internal/sz.h" 3 | 4 | JEMALLOC_ALIGNED(CACHELINE) 5 | size_t sz_pind2sz_tab[SC_NPSIZES+1]; 6 | 7 | static void 8 | sz_boot_pind2sz_tab(const sc_data_t *sc_data) { 9 | int pind = 0; 10 | for (unsigned i = 0; i < SC_NSIZES; i++) { 11 | const sc_t *sc = &sc_data->sc[i]; 12 | if (sc->psz) { 13 | sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base) 14 | + (ZU(sc->ndelta) << sc->lg_delta); 15 | pind++; 16 | } 17 | } 18 | for (int i = pind; i <= (int)SC_NPSIZES; i++) { 19 | sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE; 20 | } 21 | } 22 | 23 | JEMALLOC_ALIGNED(CACHELINE) 24 | size_t sz_index2size_tab[SC_NSIZES]; 25 | 26 | static void 27 | sz_boot_index2size_tab(const sc_data_t *sc_data) { 28 | for (unsigned i = 0; i < SC_NSIZES; i++) { 29 | const sc_t *sc = &sc_data->sc[i]; 30 | sz_index2size_tab[i] = (ZU(1) << sc->lg_base) 31 | + (ZU(sc->ndelta) << (sc->lg_delta)); 32 | } 33 | } 34 | 35 | /* 36 | * To keep this table small, we divide sizes by the tiny min size, which gives 37 | * the smallest interval for which the result can change. 38 | */ 39 | JEMALLOC_ALIGNED(CACHELINE) 40 | uint8_t sz_size2index_tab[(SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1]; 41 | 42 | static void 43 | sz_boot_size2index_tab(const sc_data_t *sc_data) { 44 | size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1; 45 | size_t dst_ind = 0; 46 | for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max; 47 | sc_ind++) { 48 | const sc_t *sc = &sc_data->sc[sc_ind]; 49 | size_t sz = (ZU(1) << sc->lg_base) 50 | + (ZU(sc->ndelta) << sc->lg_delta); 51 | size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1) 52 | >> SC_LG_TINY_MIN); 53 | for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) { 54 | sz_size2index_tab[dst_ind] = sc_ind; 55 | } 56 | } 57 | } 58 | 59 | void 60 | sz_boot(const sc_data_t *sc_data) { 61 | sz_boot_pind2sz_tab(sc_data); 62 | sz_boot_index2size_tab(sc_data); 63 | sz_boot_size2index_tab(sc_data); 64 | } 65 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/test_hooks.c: -------------------------------------------------------------------------------- 1 | #include "jemalloc/internal/jemalloc_preamble.h" 2 | 3 | /* 4 | * The hooks are a little bit screwy -- they're not genuinely exported in the 5 | * sense that we want them available to end-users, but we do want them visible 6 | * from outside the generated library, so that we can use them in test code. 7 | */ 8 | JEMALLOC_EXPORT 9 | void (*test_hooks_arena_new_hook)() = NULL; 10 | 11 | JEMALLOC_EXPORT 12 | void (*test_hooks_libc_hook)() = NULL; 13 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/ticker.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_TICKER_C_ 2 | #include "jemalloc/internal/jemalloc_preamble.h" 3 | #include "jemalloc/internal/jemalloc_internal_includes.h" 4 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/src/witness.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_WITNESS_C_ 2 | #include "jemalloc/internal/jemalloc_preamble.h" 3 | #include "jemalloc/internal/jemalloc_internal_includes.h" 4 | 5 | #include "jemalloc/internal/assert.h" 6 | #include "jemalloc/internal/malloc_io.h" 7 | 8 | void 9 | witness_init(witness_t *witness, const char *name, witness_rank_t rank, 10 | witness_comp_t *comp, void *opaque) { 11 | witness->name = name; 12 | witness->rank = rank; 13 | witness->comp = comp; 14 | witness->opaque = opaque; 15 | } 16 | 17 | static void 18 | witness_lock_error_impl(const witness_list_t *witnesses, 19 | const witness_t *witness) { 20 | witness_t *w; 21 | 22 | malloc_printf(": Lock rank order reversal:"); 23 | ql_foreach(w, witnesses, link) { 24 | malloc_printf(" %s(%u)", w->name, w->rank); 25 | } 26 | malloc_printf(" %s(%u)\n", witness->name, witness->rank); 27 | abort(); 28 | } 29 | witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl; 30 | 31 | static void 32 | witness_owner_error_impl(const witness_t *witness) { 33 | malloc_printf(": Should own %s(%u)\n", witness->name, 34 | witness->rank); 35 | abort(); 36 | } 37 | witness_owner_error_t *JET_MUTABLE witness_owner_error = 38 | witness_owner_error_impl; 39 | 40 | static void 41 | witness_not_owner_error_impl(const witness_t *witness) { 42 | malloc_printf(": Should not own %s(%u)\n", witness->name, 43 | witness->rank); 44 | abort(); 45 | } 46 | witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error = 47 | witness_not_owner_error_impl; 48 | 49 | static void 50 | witness_depth_error_impl(const witness_list_t *witnesses, 51 | witness_rank_t rank_inclusive, unsigned depth) { 52 | witness_t *w; 53 | 54 | malloc_printf(": Should own %u lock%s of rank >= %u:", depth, 55 | (depth != 1) ? "s" : "", rank_inclusive); 56 | ql_foreach(w, witnesses, link) { 57 | malloc_printf(" %s(%u)", w->name, w->rank); 58 | } 59 | malloc_printf("\n"); 60 | abort(); 61 | } 62 | witness_depth_error_t *JET_MUTABLE witness_depth_error = 63 | witness_depth_error_impl; 64 | 65 | void 66 | witnesses_cleanup(witness_tsd_t *witness_tsd) { 67 | witness_assert_lockless(witness_tsd_tsdn(witness_tsd)); 68 | 69 | /* Do nothing. */ 70 | } 71 | 72 | void 73 | witness_prefork(witness_tsd_t *witness_tsd) { 74 | if (!config_debug) { 75 | return; 76 | } 77 | witness_tsd->forking = true; 78 | } 79 | 80 | void 81 | witness_postfork_parent(witness_tsd_t *witness_tsd) { 82 | if (!config_debug) { 83 | return; 84 | } 85 | witness_tsd->forking = false; 86 | } 87 | 88 | void 89 | witness_postfork_child(witness_tsd_t *witness_tsd) { 90 | if (!config_debug) { 91 | return; 92 | } 93 | #ifndef JEMALLOC_MUTEX_INIT_CB 94 | witness_list_t *witnesses; 95 | 96 | witnesses = &witness_tsd->witnesses; 97 | ql_new(witnesses); 98 | #endif 99 | witness_tsd->forking = false; 100 | } 101 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/include/test/btalloc.h: -------------------------------------------------------------------------------- 1 | /* btalloc() provides a mechanism for allocating via permuted backtraces. */ 2 | void *btalloc(size_t size, unsigned bits); 3 | 4 | #define btalloc_n_proto(n) \ 5 | void *btalloc_##n(size_t size, unsigned bits); 6 | btalloc_n_proto(0) 7 | btalloc_n_proto(1) 8 | 9 | #define btalloc_n_gen(n) \ 10 | void * \ 11 | btalloc_##n(size_t size, unsigned bits) { \ 12 | void *p; \ 13 | \ 14 | if (bits == 0) { \ 15 | p = mallocx(size, 0); \ 16 | } else { \ 17 | switch (bits & 0x1U) { \ 18 | case 0: \ 19 | p = (btalloc_0(size, bits >> 1)); \ 20 | break; \ 21 | case 1: \ 22 | p = (btalloc_1(size, bits >> 1)); \ 23 | break; \ 24 | default: not_reached(); \ 25 | } \ 26 | } \ 27 | /* Intentionally sabotage tail call optimization. */ \ 28 | assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ 29 | return p; \ 30 | } 31 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/include/test/jemalloc_test_defs.h.in: -------------------------------------------------------------------------------- 1 | #include "jemalloc/internal/jemalloc_internal_defs.h" 2 | #include "jemalloc/internal/jemalloc_internal_decls.h" 3 | 4 | /* 5 | * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its 6 | * dependencies are notoriously unportable in practice. 7 | */ 8 | #undef HAVE_SSE2 9 | #undef HAVE_ALTIVEC 10 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/include/test/mq.h: -------------------------------------------------------------------------------- 1 | void mq_nanosleep(unsigned ns); 2 | 3 | /* 4 | * Simple templated message queue implementation that relies on only mutexes for 5 | * synchronization (which reduces portability issues). Given the following 6 | * setup: 7 | * 8 | * typedef struct mq_msg_s mq_msg_t; 9 | * struct mq_msg_s { 10 | * mq_msg(mq_msg_t) link; 11 | * [message data] 12 | * }; 13 | * mq_gen(, mq_, mq_t, mq_msg_t, link) 14 | * 15 | * The API is as follows: 16 | * 17 | * bool mq_init(mq_t *mq); 18 | * void mq_fini(mq_t *mq); 19 | * unsigned mq_count(mq_t *mq); 20 | * mq_msg_t *mq_tryget(mq_t *mq); 21 | * mq_msg_t *mq_get(mq_t *mq); 22 | * void mq_put(mq_t *mq, mq_msg_t *msg); 23 | * 24 | * The message queue linkage embedded in each message is to be treated as 25 | * externally opaque (no need to initialize or clean up externally). mq_fini() 26 | * does not perform any cleanup of messages, since it knows nothing of their 27 | * payloads. 28 | */ 29 | #define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) 30 | 31 | #define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ 32 | typedef struct { \ 33 | mtx_t lock; \ 34 | ql_head(a_mq_msg_type) msgs; \ 35 | unsigned count; \ 36 | } a_mq_type; \ 37 | a_attr bool \ 38 | a_prefix##init(a_mq_type *mq) { \ 39 | \ 40 | if (mtx_init(&mq->lock)) { \ 41 | return true; \ 42 | } \ 43 | ql_new(&mq->msgs); \ 44 | mq->count = 0; \ 45 | return false; \ 46 | } \ 47 | a_attr void \ 48 | a_prefix##fini(a_mq_type *mq) { \ 49 | mtx_fini(&mq->lock); \ 50 | } \ 51 | a_attr unsigned \ 52 | a_prefix##count(a_mq_type *mq) { \ 53 | unsigned count; \ 54 | \ 55 | mtx_lock(&mq->lock); \ 56 | count = mq->count; \ 57 | mtx_unlock(&mq->lock); \ 58 | return count; \ 59 | } \ 60 | a_attr a_mq_msg_type * \ 61 | a_prefix##tryget(a_mq_type *mq) { \ 62 | a_mq_msg_type *msg; \ 63 | \ 64 | mtx_lock(&mq->lock); \ 65 | msg = ql_first(&mq->msgs); \ 66 | if (msg != NULL) { \ 67 | ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ 68 | mq->count--; \ 69 | } \ 70 | mtx_unlock(&mq->lock); \ 71 | return msg; \ 72 | } \ 73 | a_attr a_mq_msg_type * \ 74 | a_prefix##get(a_mq_type *mq) { \ 75 | a_mq_msg_type *msg; \ 76 | unsigned ns; \ 77 | \ 78 | msg = a_prefix##tryget(mq); \ 79 | if (msg != NULL) { \ 80 | return msg; \ 81 | } \ 82 | \ 83 | ns = 1; \ 84 | while (true) { \ 85 | mq_nanosleep(ns); \ 86 | msg = a_prefix##tryget(mq); \ 87 | if (msg != NULL) { \ 88 | return msg; \ 89 | } \ 90 | if (ns < 1000*1000*1000) { \ 91 | /* Double sleep time, up to max 1 second. */ \ 92 | ns <<= 1; \ 93 | if (ns > 1000*1000*1000) { \ 94 | ns = 1000*1000*1000; \ 95 | } \ 96 | } \ 97 | } \ 98 | } \ 99 | a_attr void \ 100 | a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \ 101 | \ 102 | mtx_lock(&mq->lock); \ 103 | ql_elm_new(msg, a_field); \ 104 | ql_tail_insert(&mq->msgs, msg, a_field); \ 105 | mq->count++; \ 106 | mtx_unlock(&mq->lock); \ 107 | } 108 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/include/test/mtx.h: -------------------------------------------------------------------------------- 1 | /* 2 | * mtx is a slightly simplified version of malloc_mutex. This code duplication 3 | * is unfortunate, but there are allocator bootstrapping considerations that 4 | * would leak into the test infrastructure if malloc_mutex were used directly 5 | * in tests. 6 | */ 7 | 8 | typedef struct { 9 | #ifdef _WIN32 10 | CRITICAL_SECTION lock; 11 | #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) 12 | os_unfair_lock lock; 13 | #else 14 | pthread_mutex_t lock; 15 | #endif 16 | } mtx_t; 17 | 18 | bool mtx_init(mtx_t *mtx); 19 | void mtx_fini(mtx_t *mtx); 20 | void mtx_lock(mtx_t *mtx); 21 | void mtx_unlock(mtx_t *mtx); 22 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/include/test/thd.h: -------------------------------------------------------------------------------- 1 | /* Abstraction layer for threading in tests. */ 2 | #ifdef _WIN32 3 | typedef HANDLE thd_t; 4 | #else 5 | typedef pthread_t thd_t; 6 | #endif 7 | 8 | void thd_create(thd_t *thd, void *(*proc)(void *), void *arg); 9 | void thd_join(thd_t thd, void **ret); 10 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/include/test/timer.h: -------------------------------------------------------------------------------- 1 | /* Simple timer, for use in benchmark reporting. */ 2 | 3 | typedef struct { 4 | nstime_t t0; 5 | nstime_t t1; 6 | } timedelta_t; 7 | 8 | void timer_start(timedelta_t *timer); 9 | void timer_stop(timedelta_t *timer); 10 | uint64_t timer_usec(const timedelta_t *timer); 11 | void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); 12 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/MALLOCX_ARENA.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #define NTHREADS 10 4 | 5 | static bool have_dss = 6 | #ifdef JEMALLOC_DSS 7 | true 8 | #else 9 | false 10 | #endif 11 | ; 12 | 13 | void * 14 | thd_start(void *arg) { 15 | unsigned thread_ind = (unsigned)(uintptr_t)arg; 16 | unsigned arena_ind; 17 | void *p; 18 | size_t sz; 19 | 20 | sz = sizeof(arena_ind); 21 | assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 22 | 0, "Error in arenas.create"); 23 | 24 | if (thread_ind % 4 != 3) { 25 | size_t mib[3]; 26 | size_t miblen = sizeof(mib) / sizeof(size_t); 27 | const char *dss_precs[] = {"disabled", "primary", "secondary"}; 28 | unsigned prec_ind = thread_ind % 29 | (sizeof(dss_precs)/sizeof(char*)); 30 | const char *dss = dss_precs[prec_ind]; 31 | int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT; 32 | assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, 33 | "Error in mallctlnametomib()"); 34 | mib[1] = arena_ind; 35 | assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, 36 | sizeof(const char *)), expected_err, 37 | "Error in mallctlbymib()"); 38 | } 39 | 40 | p = mallocx(1, MALLOCX_ARENA(arena_ind)); 41 | assert_ptr_not_null(p, "Unexpected mallocx() error"); 42 | dallocx(p, 0); 43 | 44 | return NULL; 45 | } 46 | 47 | TEST_BEGIN(test_MALLOCX_ARENA) { 48 | thd_t thds[NTHREADS]; 49 | unsigned i; 50 | 51 | for (i = 0; i < NTHREADS; i++) { 52 | thd_create(&thds[i], thd_start, 53 | (void *)(uintptr_t)i); 54 | } 55 | 56 | for (i = 0; i < NTHREADS; i++) { 57 | thd_join(thds[i], NULL); 58 | } 59 | } 60 | TEST_END 61 | 62 | int 63 | main(void) { 64 | return test( 65 | test_MALLOCX_ARENA); 66 | } 67 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/allocated.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | static const bool config_stats = 4 | #ifdef JEMALLOC_STATS 5 | true 6 | #else 7 | false 8 | #endif 9 | ; 10 | 11 | void * 12 | thd_start(void *arg) { 13 | int err; 14 | void *p; 15 | uint64_t a0, a1, d0, d1; 16 | uint64_t *ap0, *ap1, *dp0, *dp1; 17 | size_t sz, usize; 18 | 19 | sz = sizeof(a0); 20 | if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) { 21 | if (err == ENOENT) { 22 | goto label_ENOENT; 23 | } 24 | test_fail("%s(): Error in mallctl(): %s", __func__, 25 | strerror(err)); 26 | } 27 | sz = sizeof(ap0); 28 | if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) { 29 | if (err == ENOENT) { 30 | goto label_ENOENT; 31 | } 32 | test_fail("%s(): Error in mallctl(): %s", __func__, 33 | strerror(err)); 34 | } 35 | assert_u64_eq(*ap0, a0, 36 | "\"thread.allocatedp\" should provide a pointer to internal " 37 | "storage"); 38 | 39 | sz = sizeof(d0); 40 | if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) { 41 | if (err == ENOENT) { 42 | goto label_ENOENT; 43 | } 44 | test_fail("%s(): Error in mallctl(): %s", __func__, 45 | strerror(err)); 46 | } 47 | sz = sizeof(dp0); 48 | if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL, 49 | 0))) { 50 | if (err == ENOENT) { 51 | goto label_ENOENT; 52 | } 53 | test_fail("%s(): Error in mallctl(): %s", __func__, 54 | strerror(err)); 55 | } 56 | assert_u64_eq(*dp0, d0, 57 | "\"thread.deallocatedp\" should provide a pointer to internal " 58 | "storage"); 59 | 60 | p = malloc(1); 61 | assert_ptr_not_null(p, "Unexpected malloc() error"); 62 | 63 | sz = sizeof(a1); 64 | mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0); 65 | sz = sizeof(ap1); 66 | mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0); 67 | assert_u64_eq(*ap1, a1, 68 | "Dereferenced \"thread.allocatedp\" value should equal " 69 | "\"thread.allocated\" value"); 70 | assert_ptr_eq(ap0, ap1, 71 | "Pointer returned by \"thread.allocatedp\" should not change"); 72 | 73 | usize = malloc_usable_size(p); 74 | assert_u64_le(a0 + usize, a1, 75 | "Allocated memory counter should increase by at least the amount " 76 | "explicitly allocated"); 77 | 78 | free(p); 79 | 80 | sz = sizeof(d1); 81 | mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0); 82 | sz = sizeof(dp1); 83 | mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0); 84 | assert_u64_eq(*dp1, d1, 85 | "Dereferenced \"thread.deallocatedp\" value should equal " 86 | "\"thread.deallocated\" value"); 87 | assert_ptr_eq(dp0, dp1, 88 | "Pointer returned by \"thread.deallocatedp\" should not change"); 89 | 90 | assert_u64_le(d0 + usize, d1, 91 | "Deallocated memory counter should increase by at least the amount " 92 | "explicitly deallocated"); 93 | 94 | return NULL; 95 | label_ENOENT: 96 | assert_false(config_stats, 97 | "ENOENT should only be returned if stats are disabled"); 98 | test_skip("\"thread.allocated\" mallctl not available"); 99 | return NULL; 100 | } 101 | 102 | TEST_BEGIN(test_main_thread) { 103 | thd_start(NULL); 104 | } 105 | TEST_END 106 | 107 | TEST_BEGIN(test_subthread) { 108 | thd_t thd; 109 | 110 | thd_create(&thd, thd_start, NULL); 111 | thd_join(thd, NULL); 112 | } 113 | TEST_END 114 | 115 | int 116 | main(void) { 117 | /* Run tests multiple times to check for bad interactions. */ 118 | return test( 119 | test_main_thread, 120 | test_subthread, 121 | test_main_thread, 122 | test_subthread, 123 | test_main_thread); 124 | } 125 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/extent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_fill}" = "x1" ] ; then 4 | export MALLOC_CONF="junk:false" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/malloc.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | TEST_BEGIN(test_zero_alloc) { 4 | void *res = malloc(0); 5 | assert(res); 6 | size_t usable = malloc_usable_size(res); 7 | assert(usable > 0); 8 | free(res); 9 | } 10 | TEST_END 11 | 12 | int 13 | main(void) { 14 | return test( 15 | test_zero_alloc); 16 | } 17 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/mallocx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_fill}" = "x1" ] ; then 4 | export MALLOC_CONF="junk:false" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/overflow.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | /* 4 | * GCC "-Walloc-size-larger-than" warning detects when one of the memory 5 | * allocation functions is called with a size larger than the maximum size that 6 | * they support. Here we want to explicitly test that the allocation functions 7 | * do indeed fail properly when this is the case, which triggers the warning. 8 | * Therefore we disable the warning for these tests. 9 | */ 10 | JEMALLOC_DIAGNOSTIC_PUSH 11 | JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN 12 | 13 | TEST_BEGIN(test_overflow) { 14 | unsigned nlextents; 15 | size_t mib[4]; 16 | size_t sz, miblen, max_size_class; 17 | void *p; 18 | 19 | sz = sizeof(unsigned); 20 | assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 21 | 0), 0, "Unexpected mallctl() error"); 22 | 23 | miblen = sizeof(mib) / sizeof(size_t); 24 | assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, 25 | "Unexpected mallctlnametomib() error"); 26 | mib[2] = nlextents - 1; 27 | 28 | sz = sizeof(size_t); 29 | assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, 30 | NULL, 0), 0, "Unexpected mallctlbymib() error"); 31 | 32 | assert_ptr_null(malloc(max_size_class + 1), 33 | "Expected OOM due to over-sized allocation request"); 34 | assert_ptr_null(malloc(SIZE_T_MAX), 35 | "Expected OOM due to over-sized allocation request"); 36 | 37 | assert_ptr_null(calloc(1, max_size_class + 1), 38 | "Expected OOM due to over-sized allocation request"); 39 | assert_ptr_null(calloc(1, SIZE_T_MAX), 40 | "Expected OOM due to over-sized allocation request"); 41 | 42 | p = malloc(1); 43 | assert_ptr_not_null(p, "Unexpected malloc() OOM"); 44 | assert_ptr_null(realloc(p, max_size_class + 1), 45 | "Expected OOM due to over-sized allocation request"); 46 | assert_ptr_null(realloc(p, SIZE_T_MAX), 47 | "Expected OOM due to over-sized allocation request"); 48 | free(p); 49 | } 50 | TEST_END 51 | 52 | /* Re-enable the "-Walloc-size-larger-than=" warning */ 53 | JEMALLOC_DIAGNOSTIC_POP 54 | 55 | int 56 | main(void) { 57 | return test( 58 | test_overflow); 59 | } 60 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/posix_memalign.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #define MAXALIGN (((size_t)1) << 23) 4 | 5 | /* 6 | * On systems which can't merge extents, tests that call this function generate 7 | * a lot of dirty memory very quickly. Purging between cycles mitigates 8 | * potential OOM on e.g. 32-bit Windows. 9 | */ 10 | static void 11 | purge(void) { 12 | assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, 13 | "Unexpected mallctl error"); 14 | } 15 | 16 | TEST_BEGIN(test_alignment_errors) { 17 | size_t alignment; 18 | void *p; 19 | 20 | for (alignment = 0; alignment < sizeof(void *); alignment++) { 21 | assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, 22 | "Expected error for invalid alignment %zu", 23 | alignment); 24 | } 25 | 26 | for (alignment = sizeof(size_t); alignment < MAXALIGN; 27 | alignment <<= 1) { 28 | assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0, 29 | "Expected error for invalid alignment %zu", 30 | alignment + 1); 31 | } 32 | } 33 | TEST_END 34 | 35 | TEST_BEGIN(test_oom_errors) { 36 | size_t alignment, size; 37 | void *p; 38 | 39 | #if LG_SIZEOF_PTR == 3 40 | alignment = UINT64_C(0x8000000000000000); 41 | size = UINT64_C(0x8000000000000000); 42 | #else 43 | alignment = 0x80000000LU; 44 | size = 0x80000000LU; 45 | #endif 46 | assert_d_ne(posix_memalign(&p, alignment, size), 0, 47 | "Expected error for posix_memalign(&p, %zu, %zu)", 48 | alignment, size); 49 | 50 | #if LG_SIZEOF_PTR == 3 51 | alignment = UINT64_C(0x4000000000000000); 52 | size = UINT64_C(0xc000000000000001); 53 | #else 54 | alignment = 0x40000000LU; 55 | size = 0xc0000001LU; 56 | #endif 57 | assert_d_ne(posix_memalign(&p, alignment, size), 0, 58 | "Expected error for posix_memalign(&p, %zu, %zu)", 59 | alignment, size); 60 | 61 | alignment = 0x10LU; 62 | #if LG_SIZEOF_PTR == 3 63 | size = UINT64_C(0xfffffffffffffff0); 64 | #else 65 | size = 0xfffffff0LU; 66 | #endif 67 | assert_d_ne(posix_memalign(&p, alignment, size), 0, 68 | "Expected error for posix_memalign(&p, %zu, %zu)", 69 | alignment, size); 70 | } 71 | TEST_END 72 | 73 | TEST_BEGIN(test_alignment_and_size) { 74 | #define NITER 4 75 | size_t alignment, size, total; 76 | unsigned i; 77 | int err; 78 | void *ps[NITER]; 79 | 80 | for (i = 0; i < NITER; i++) { 81 | ps[i] = NULL; 82 | } 83 | 84 | for (alignment = 8; 85 | alignment <= MAXALIGN; 86 | alignment <<= 1) { 87 | total = 0; 88 | for (size = 0; 89 | size < 3 * alignment && size < (1U << 31); 90 | size += ((size == 0) ? 1 : 91 | (alignment >> (LG_SIZEOF_PTR-1)) - 1)) { 92 | for (i = 0; i < NITER; i++) { 93 | err = posix_memalign(&ps[i], 94 | alignment, size); 95 | if (err) { 96 | char buf[BUFERROR_BUF]; 97 | 98 | buferror(get_errno(), buf, sizeof(buf)); 99 | test_fail( 100 | "Error for alignment=%zu, " 101 | "size=%zu (%#zx): %s", 102 | alignment, size, size, buf); 103 | } 104 | total += malloc_usable_size(ps[i]); 105 | if (total >= (MAXALIGN << 1)) { 106 | break; 107 | } 108 | } 109 | for (i = 0; i < NITER; i++) { 110 | if (ps[i] != NULL) { 111 | free(ps[i]); 112 | ps[i] = NULL; 113 | } 114 | } 115 | } 116 | purge(); 117 | } 118 | #undef NITER 119 | } 120 | TEST_END 121 | 122 | int 123 | main(void) { 124 | return test( 125 | test_alignment_errors, 126 | test_oom_errors, 127 | test_alignment_and_size); 128 | } 129 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/sdallocx.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #define MAXALIGN (((size_t)1) << 22) 4 | #define NITER 3 5 | 6 | TEST_BEGIN(test_basic) { 7 | void *ptr = mallocx(64, 0); 8 | sdallocx(ptr, 64, 0); 9 | } 10 | TEST_END 11 | 12 | TEST_BEGIN(test_alignment_and_size) { 13 | size_t nsz, sz, alignment, total; 14 | unsigned i; 15 | void *ps[NITER]; 16 | 17 | for (i = 0; i < NITER; i++) { 18 | ps[i] = NULL; 19 | } 20 | 21 | for (alignment = 8; 22 | alignment <= MAXALIGN; 23 | alignment <<= 1) { 24 | total = 0; 25 | for (sz = 1; 26 | sz < 3 * alignment && sz < (1U << 31); 27 | sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { 28 | for (i = 0; i < NITER; i++) { 29 | nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | 30 | MALLOCX_ZERO); 31 | ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | 32 | MALLOCX_ZERO); 33 | total += nsz; 34 | if (total >= (MAXALIGN << 1)) { 35 | break; 36 | } 37 | } 38 | for (i = 0; i < NITER; i++) { 39 | if (ps[i] != NULL) { 40 | sdallocx(ps[i], sz, 41 | MALLOCX_ALIGN(alignment)); 42 | ps[i] = NULL; 43 | } 44 | } 45 | } 46 | } 47 | } 48 | TEST_END 49 | 50 | int 51 | main(void) { 52 | return test_no_reentrancy( 53 | test_basic, 54 | test_alignment_and_size); 55 | } 56 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/slab_sizes.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | /* Note that this test relies on the unusual slab sizes set in slab_sizes.sh. */ 4 | 5 | TEST_BEGIN(test_slab_sizes) { 6 | unsigned nbins; 7 | size_t page; 8 | size_t sizemib[4]; 9 | size_t slabmib[4]; 10 | size_t len; 11 | 12 | len = sizeof(nbins); 13 | assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, 14 | "nbins mallctl failure"); 15 | 16 | len = sizeof(page); 17 | assert_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0, 18 | "page mallctl failure"); 19 | 20 | len = 4; 21 | assert_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0, 22 | "bin size mallctlnametomib failure"); 23 | 24 | len = 4; 25 | assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len), 26 | 0, "slab size mallctlnametomib failure"); 27 | 28 | size_t biggest_slab_seen = 0; 29 | 30 | for (unsigned i = 0; i < nbins; i++) { 31 | size_t bin_size; 32 | size_t slab_size; 33 | len = sizeof(size_t); 34 | sizemib[2] = i; 35 | slabmib[2] = i; 36 | assert_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len, 37 | NULL, 0), 0, "bin size mallctlbymib failure"); 38 | 39 | len = sizeof(size_t); 40 | assert_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len, 41 | NULL, 0), 0, "slab size mallctlbymib failure"); 42 | 43 | if (bin_size < 100) { 44 | /* 45 | * Then we should be as close to 17 as possible. Since 46 | * not all page sizes are valid (because of bitmap 47 | * limitations on the number of items in a slab), we 48 | * should at least make sure that the number of pages 49 | * goes up. 50 | */ 51 | assert_zu_ge(slab_size, biggest_slab_seen, 52 | "Slab sizes should go up"); 53 | biggest_slab_seen = slab_size; 54 | } else if ( 55 | (100 <= bin_size && bin_size < 128) 56 | || (128 < bin_size && bin_size <= 200)) { 57 | assert_zu_eq(slab_size, page, 58 | "Forced-small slabs should be small"); 59 | } else if (bin_size == 128) { 60 | assert_zu_eq(slab_size, 2 * page, 61 | "Forced-2-page slab should be 2 pages"); 62 | } else if (200 < bin_size && bin_size <= 4096) { 63 | assert_zu_ge(slab_size, biggest_slab_seen, 64 | "Slab sizes should go up"); 65 | biggest_slab_seen = slab_size; 66 | } 67 | } 68 | /* 69 | * For any reasonable configuration, 17 pages should be a valid slab 70 | * size for 4096-byte items. 71 | */ 72 | assert_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target"); 73 | } 74 | TEST_END 75 | 76 | int 77 | main(void) { 78 | return test( 79 | test_slab_sizes); 80 | } 81 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/slab_sizes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Some screwy-looking slab sizes. 4 | export MALLOC_CONF="slab_sizes:1-4096:17|100-200:1|128-128:2" 5 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/smallocx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_fill}" = "x1" ] ; then 4 | export MALLOC_CONF="junk:false" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/thread_arena.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #define NTHREADS 10 4 | 5 | void * 6 | thd_start(void *arg) { 7 | unsigned main_arena_ind = *(unsigned *)arg; 8 | void *p; 9 | unsigned arena_ind; 10 | size_t size; 11 | int err; 12 | 13 | p = malloc(1); 14 | assert_ptr_not_null(p, "Error in malloc()"); 15 | free(p); 16 | 17 | size = sizeof(arena_ind); 18 | if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, 19 | (void *)&main_arena_ind, sizeof(main_arena_ind)))) { 20 | char buf[BUFERROR_BUF]; 21 | 22 | buferror(err, buf, sizeof(buf)); 23 | test_fail("Error in mallctl(): %s", buf); 24 | } 25 | 26 | size = sizeof(arena_ind); 27 | if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, 28 | 0))) { 29 | char buf[BUFERROR_BUF]; 30 | 31 | buferror(err, buf, sizeof(buf)); 32 | test_fail("Error in mallctl(): %s", buf); 33 | } 34 | assert_u_eq(arena_ind, main_arena_ind, 35 | "Arena index should be same as for main thread"); 36 | 37 | return NULL; 38 | } 39 | 40 | static void 41 | mallctl_failure(int err) { 42 | char buf[BUFERROR_BUF]; 43 | 44 | buferror(err, buf, sizeof(buf)); 45 | test_fail("Error in mallctl(): %s", buf); 46 | } 47 | 48 | TEST_BEGIN(test_thread_arena) { 49 | void *p; 50 | int err; 51 | thd_t thds[NTHREADS]; 52 | unsigned i; 53 | 54 | p = malloc(1); 55 | assert_ptr_not_null(p, "Error in malloc()"); 56 | 57 | unsigned arena_ind, old_arena_ind; 58 | size_t sz = sizeof(unsigned); 59 | assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 60 | 0, "Arena creation failure"); 61 | 62 | size_t size = sizeof(arena_ind); 63 | if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size, 64 | (void *)&arena_ind, sizeof(arena_ind))) != 0) { 65 | mallctl_failure(err); 66 | } 67 | 68 | for (i = 0; i < NTHREADS; i++) { 69 | thd_create(&thds[i], thd_start, 70 | (void *)&arena_ind); 71 | } 72 | 73 | for (i = 0; i < NTHREADS; i++) { 74 | intptr_t join_ret; 75 | thd_join(thds[i], (void *)&join_ret); 76 | assert_zd_eq(join_ret, 0, "Unexpected thread join error"); 77 | } 78 | free(p); 79 | } 80 | TEST_END 81 | 82 | int 83 | main(void) { 84 | return test( 85 | test_thread_arena); 86 | } 87 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/thread_tcache_enabled.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | void * 4 | thd_start(void *arg) { 5 | bool e0, e1; 6 | size_t sz = sizeof(bool); 7 | assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, 8 | 0), 0, "Unexpected mallctl failure"); 9 | 10 | if (e0) { 11 | e1 = false; 12 | assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, 13 | (void *)&e1, sz), 0, "Unexpected mallctl() error"); 14 | assert_true(e0, "tcache should be enabled"); 15 | } 16 | 17 | e1 = true; 18 | assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, 19 | (void *)&e1, sz), 0, "Unexpected mallctl() error"); 20 | assert_false(e0, "tcache should be disabled"); 21 | 22 | e1 = true; 23 | assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, 24 | (void *)&e1, sz), 0, "Unexpected mallctl() error"); 25 | assert_true(e0, "tcache should be enabled"); 26 | 27 | e1 = false; 28 | assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, 29 | (void *)&e1, sz), 0, "Unexpected mallctl() error"); 30 | assert_true(e0, "tcache should be enabled"); 31 | 32 | e1 = false; 33 | assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, 34 | (void *)&e1, sz), 0, "Unexpected mallctl() error"); 35 | assert_false(e0, "tcache should be disabled"); 36 | 37 | free(malloc(1)); 38 | e1 = true; 39 | assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, 40 | (void *)&e1, sz), 0, "Unexpected mallctl() error"); 41 | assert_false(e0, "tcache should be disabled"); 42 | 43 | free(malloc(1)); 44 | e1 = true; 45 | assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, 46 | (void *)&e1, sz), 0, "Unexpected mallctl() error"); 47 | assert_true(e0, "tcache should be enabled"); 48 | 49 | free(malloc(1)); 50 | e1 = false; 51 | assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, 52 | (void *)&e1, sz), 0, "Unexpected mallctl() error"); 53 | assert_true(e0, "tcache should be enabled"); 54 | 55 | free(malloc(1)); 56 | e1 = false; 57 | assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, 58 | (void *)&e1, sz), 0, "Unexpected mallctl() error"); 59 | assert_false(e0, "tcache should be disabled"); 60 | 61 | free(malloc(1)); 62 | return NULL; 63 | } 64 | 65 | TEST_BEGIN(test_main_thread) { 66 | thd_start(NULL); 67 | } 68 | TEST_END 69 | 70 | TEST_BEGIN(test_subthread) { 71 | thd_t thd; 72 | 73 | thd_create(&thd, thd_start, NULL); 74 | thd_join(thd, NULL); 75 | } 76 | TEST_END 77 | 78 | int 79 | main(void) { 80 | /* Run tests multiple times to check for bad interactions. */ 81 | return test( 82 | test_main_thread, 83 | test_subthread, 84 | test_main_thread, 85 | test_subthread, 86 | test_main_thread); 87 | } 88 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/integration/xallocx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_fill}" = "x1" ] ; then 4 | export MALLOC_CONF="junk:false" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/src/btalloc.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | void * 4 | btalloc(size_t size, unsigned bits) { 5 | return btalloc_0(size, bits); 6 | } 7 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/src/btalloc_0.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | btalloc_n_gen(0) 4 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/src/btalloc_1.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | btalloc_n_gen(1) 4 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/src/math.c: -------------------------------------------------------------------------------- 1 | #define MATH_C_ 2 | #include "test/jemalloc_test.h" 3 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/src/mq.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | /* 4 | * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep 5 | * time is guaranteed. 6 | */ 7 | void 8 | mq_nanosleep(unsigned ns) { 9 | assert(ns <= 1000*1000*1000); 10 | 11 | #ifdef _WIN32 12 | Sleep(ns / 1000); 13 | #else 14 | { 15 | struct timespec timeout; 16 | 17 | if (ns < 1000*1000*1000) { 18 | timeout.tv_sec = 0; 19 | timeout.tv_nsec = ns; 20 | } else { 21 | timeout.tv_sec = 1; 22 | timeout.tv_nsec = 0; 23 | } 24 | nanosleep(&timeout, NULL); 25 | } 26 | #endif 27 | } 28 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/src/mtx.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #ifndef _CRT_SPINCOUNT 4 | #define _CRT_SPINCOUNT 4000 5 | #endif 6 | 7 | bool 8 | mtx_init(mtx_t *mtx) { 9 | #ifdef _WIN32 10 | if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, 11 | _CRT_SPINCOUNT)) { 12 | return true; 13 | } 14 | #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) 15 | mtx->lock = OS_UNFAIR_LOCK_INIT; 16 | #else 17 | pthread_mutexattr_t attr; 18 | 19 | if (pthread_mutexattr_init(&attr) != 0) { 20 | return true; 21 | } 22 | pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); 23 | if (pthread_mutex_init(&mtx->lock, &attr) != 0) { 24 | pthread_mutexattr_destroy(&attr); 25 | return true; 26 | } 27 | pthread_mutexattr_destroy(&attr); 28 | #endif 29 | return false; 30 | } 31 | 32 | void 33 | mtx_fini(mtx_t *mtx) { 34 | #ifdef _WIN32 35 | #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) 36 | #else 37 | pthread_mutex_destroy(&mtx->lock); 38 | #endif 39 | } 40 | 41 | void 42 | mtx_lock(mtx_t *mtx) { 43 | #ifdef _WIN32 44 | EnterCriticalSection(&mtx->lock); 45 | #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) 46 | os_unfair_lock_lock(&mtx->lock); 47 | #else 48 | pthread_mutex_lock(&mtx->lock); 49 | #endif 50 | } 51 | 52 | void 53 | mtx_unlock(mtx_t *mtx) { 54 | #ifdef _WIN32 55 | LeaveCriticalSection(&mtx->lock); 56 | #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) 57 | os_unfair_lock_unlock(&mtx->lock); 58 | #else 59 | pthread_mutex_unlock(&mtx->lock); 60 | #endif 61 | } 62 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/src/thd.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #ifdef _WIN32 4 | void 5 | thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { 6 | LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; 7 | *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); 8 | if (*thd == NULL) { 9 | test_fail("Error in CreateThread()\n"); 10 | } 11 | } 12 | 13 | void 14 | thd_join(thd_t thd, void **ret) { 15 | if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) { 16 | DWORD exit_code; 17 | GetExitCodeThread(thd, (LPDWORD) &exit_code); 18 | *ret = (void *)(uintptr_t)exit_code; 19 | } 20 | } 21 | 22 | #else 23 | void 24 | thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { 25 | if (pthread_create(thd, NULL, proc, arg) != 0) { 26 | test_fail("Error in pthread_create()\n"); 27 | } 28 | } 29 | 30 | void 31 | thd_join(thd_t thd, void **ret) { 32 | pthread_join(thd, ret); 33 | } 34 | #endif 35 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/src/timer.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | void 4 | timer_start(timedelta_t *timer) { 5 | nstime_init(&timer->t0, 0); 6 | nstime_update(&timer->t0); 7 | } 8 | 9 | void 10 | timer_stop(timedelta_t *timer) { 11 | nstime_copy(&timer->t1, &timer->t0); 12 | nstime_update(&timer->t1); 13 | } 14 | 15 | uint64_t 16 | timer_usec(const timedelta_t *timer) { 17 | nstime_t delta; 18 | 19 | nstime_copy(&delta, &timer->t1); 20 | nstime_subtract(&delta, &timer->t0); 21 | return nstime_ns(&delta) / 1000; 22 | } 23 | 24 | void 25 | timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { 26 | uint64_t t0 = timer_usec(a); 27 | uint64_t t1 = timer_usec(b); 28 | uint64_t mult; 29 | size_t i = 0; 30 | size_t j, n; 31 | 32 | /* Whole. */ 33 | n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); 34 | i += n; 35 | if (i >= buflen) { 36 | return; 37 | } 38 | mult = 1; 39 | for (j = 0; j < n; j++) { 40 | mult *= 10; 41 | } 42 | 43 | /* Decimal. */ 44 | n = malloc_snprintf(&buf[i], buflen-i, "."); 45 | i += n; 46 | 47 | /* Fraction. */ 48 | while (i < buflen-1) { 49 | uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10 50 | >= 5)) ? 1 : 0; 51 | n = malloc_snprintf(&buf[i], buflen-i, 52 | "%"FMTu64, (t0 * mult / t1) % 10 + round); 53 | i += n; 54 | mult *= 10; 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/stress/hookbench.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | static void 4 | noop_alloc_hook(void *extra, hook_alloc_t type, void *result, 5 | uintptr_t result_raw, uintptr_t args_raw[3]) { 6 | } 7 | 8 | static void 9 | noop_dalloc_hook(void *extra, hook_dalloc_t type, void *address, 10 | uintptr_t args_raw[3]) { 11 | } 12 | 13 | static void 14 | noop_expand_hook(void *extra, hook_expand_t type, void *address, 15 | size_t old_usize, size_t new_usize, uintptr_t result_raw, 16 | uintptr_t args_raw[4]) { 17 | } 18 | 19 | static void 20 | malloc_free_loop(int iters) { 21 | for (int i = 0; i < iters; i++) { 22 | void *p = mallocx(1, 0); 23 | free(p); 24 | } 25 | } 26 | 27 | static void 28 | test_hooked(int iters) { 29 | hooks_t hooks = {&noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook, 30 | NULL}; 31 | 32 | int err; 33 | void *handles[HOOK_MAX]; 34 | size_t sz = sizeof(handles[0]); 35 | 36 | for (int i = 0; i < HOOK_MAX; i++) { 37 | err = mallctl("experimental.hooks.install", &handles[i], 38 | &sz, &hooks, sizeof(hooks)); 39 | assert(err == 0); 40 | 41 | timedelta_t timer; 42 | timer_start(&timer); 43 | malloc_free_loop(iters); 44 | timer_stop(&timer); 45 | malloc_printf("With %d hook%s: %"FMTu64"us\n", i + 1, 46 | i + 1 == 1 ? "" : "s", timer_usec(&timer)); 47 | } 48 | for (int i = 0; i < HOOK_MAX; i++) { 49 | err = mallctl("experimental.hooks.remove", NULL, NULL, 50 | &handles[i], sizeof(handles[i])); 51 | assert(err == 0); 52 | } 53 | } 54 | 55 | static void 56 | test_unhooked(int iters) { 57 | timedelta_t timer; 58 | timer_start(&timer); 59 | malloc_free_loop(iters); 60 | timer_stop(&timer); 61 | 62 | malloc_printf("Without hooks: %"FMTu64"us\n", timer_usec(&timer)); 63 | } 64 | 65 | int 66 | main(void) { 67 | /* Initialize */ 68 | free(mallocx(1, 0)); 69 | int iters = 10 * 1000 * 1000; 70 | malloc_printf("Benchmarking hooks with %d iterations:\n", iters); 71 | test_hooked(iters); 72 | test_unhooked(iters); 73 | } 74 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/test.sh.in: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | case @abi@ in 4 | macho) 5 | export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" 6 | ;; 7 | pecoff) 8 | export PATH="${PATH}:@objroot@lib" 9 | ;; 10 | *) 11 | ;; 12 | esac 13 | 14 | # Make a copy of the @JEMALLOC_CPREFIX@MALLOC_CONF passed in to this script, so 15 | # it can be repeatedly concatenated with per test settings. 16 | export MALLOC_CONF_ALL=${@JEMALLOC_CPREFIX@MALLOC_CONF} 17 | # Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL. 18 | export_malloc_conf() { 19 | if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then 20 | export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}" 21 | else 22 | export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}" 23 | fi 24 | } 25 | 26 | # Corresponds to test_status_t. 27 | pass_code=0 28 | skip_code=1 29 | fail_code=2 30 | 31 | pass_count=0 32 | skip_count=0 33 | fail_count=0 34 | for t in $@; do 35 | if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then 36 | echo 37 | fi 38 | echo "=== ${t} ===" 39 | if [ -e "@srcroot@${t}.sh" ] ; then 40 | # Source the shell script corresponding to the test in a subshell and 41 | # execute the test. This allows the shell script to set MALLOC_CONF, which 42 | # is then used to set @JEMALLOC_CPREFIX@MALLOC_CONF (thus allowing the 43 | # per test shell script to ignore the @JEMALLOC_CPREFIX@ detail). 44 | enable_fill=@enable_fill@ \ 45 | enable_prof=@enable_prof@ \ 46 | . @srcroot@${t}.sh && \ 47 | export_malloc_conf && \ 48 | $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ 49 | else 50 | export MALLOC_CONF= && \ 51 | export_malloc_conf && \ 52 | $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ 53 | fi 54 | result_code=$? 55 | case ${result_code} in 56 | ${pass_code}) 57 | pass_count=$((pass_count+1)) 58 | ;; 59 | ${skip_code}) 60 | skip_count=$((skip_count+1)) 61 | ;; 62 | ${fail_code}) 63 | fail_count=$((fail_count+1)) 64 | ;; 65 | *) 66 | echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2 67 | echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2 68 | exit 1 69 | esac 70 | done 71 | 72 | total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` 73 | echo 74 | echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" 75 | 76 | if [ ${fail_count} -eq 0 ] ; then 77 | exit 0 78 | else 79 | exit 1 80 | fi 81 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/a0.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | TEST_BEGIN(test_a0) { 4 | void *p; 5 | 6 | p = a0malloc(1); 7 | assert_ptr_not_null(p, "Unexpected a0malloc() error"); 8 | a0dalloc(p); 9 | } 10 | TEST_END 11 | 12 | int 13 | main(void) { 14 | return test_no_malloc_init( 15 | test_a0); 16 | } 17 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/arena_reset_prof.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | #define ARENA_RESET_PROF_C_ 3 | 4 | #include "arena_reset.c" 5 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/arena_reset_prof.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | export MALLOC_CONF="prof:true,lg_prof_sample:0" 4 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/background_thread_enable.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:20"; 4 | 5 | TEST_BEGIN(test_deferred) { 6 | test_skip_if(!have_background_thread); 7 | 8 | unsigned id; 9 | size_t sz_u = sizeof(unsigned); 10 | 11 | /* 12 | * 10 here is somewhat arbitrary, except insofar as we want to ensure 13 | * that the number of background threads is smaller than the number of 14 | * arenas. I'll ragequit long before we have to spin up 10 threads per 15 | * cpu to handle background purging, so this is a conservative 16 | * approximation. 17 | */ 18 | for (unsigned i = 0; i < 10 * ncpus; i++) { 19 | assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, 20 | "Failed to create arena"); 21 | } 22 | 23 | bool enable = true; 24 | size_t sz_b = sizeof(bool); 25 | assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, 26 | "Failed to enable background threads"); 27 | enable = false; 28 | assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, 29 | "Failed to disable background threads"); 30 | } 31 | TEST_END 32 | 33 | TEST_BEGIN(test_max_background_threads) { 34 | test_skip_if(!have_background_thread); 35 | 36 | size_t max_n_thds; 37 | size_t opt_max_n_thds; 38 | size_t sz_m = sizeof(max_n_thds); 39 | assert_d_eq(mallctl("opt.max_background_threads", 40 | &opt_max_n_thds, &sz_m, NULL, 0), 0, 41 | "Failed to get opt.max_background_threads"); 42 | assert_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL, 43 | 0), 0, "Failed to get max background threads"); 44 | assert_zu_eq(opt_max_n_thds, max_n_thds, 45 | "max_background_threads and " 46 | "opt.max_background_threads should match"); 47 | assert_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds, 48 | sz_m), 0, "Failed to set max background threads"); 49 | 50 | unsigned id; 51 | size_t sz_u = sizeof(unsigned); 52 | 53 | for (unsigned i = 0; i < 10 * ncpus; i++) { 54 | assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, 55 | "Failed to create arena"); 56 | } 57 | 58 | bool enable = true; 59 | size_t sz_b = sizeof(bool); 60 | assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, 61 | "Failed to enable background threads"); 62 | assert_zu_eq(n_background_threads, max_n_thds, 63 | "Number of background threads should not change.\n"); 64 | size_t new_max_thds = max_n_thds - 1; 65 | if (new_max_thds > 0) { 66 | assert_d_eq(mallctl("max_background_threads", NULL, NULL, 67 | &new_max_thds, sz_m), 0, 68 | "Failed to set max background threads"); 69 | assert_zu_eq(n_background_threads, new_max_thds, 70 | "Number of background threads should decrease by 1.\n"); 71 | } 72 | new_max_thds = 1; 73 | assert_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds, 74 | sz_m), 0, "Failed to set max background threads"); 75 | assert_zu_eq(n_background_threads, new_max_thds, 76 | "Number of background threads should be 1.\n"); 77 | } 78 | TEST_END 79 | 80 | int 81 | main(void) { 82 | return test_no_reentrancy( 83 | test_deferred, 84 | test_max_background_threads); 85 | } 86 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/binshard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | export MALLOC_CONF="narenas:1,bin_shards:1-160:16|129-512:4|256-256:8" 4 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/bit_util.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #include "jemalloc/internal/bit_util.h" 4 | 5 | #define TEST_POW2_CEIL(t, suf, pri) do { \ 6 | unsigned i, pow2; \ 7 | t x; \ 8 | \ 9 | assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ 10 | \ 11 | for (i = 0; i < sizeof(t) * 8; i++) { \ 12 | assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ 13 | << i, "Unexpected result"); \ 14 | } \ 15 | \ 16 | for (i = 2; i < sizeof(t) * 8; i++) { \ 17 | assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ 18 | ((t)1) << i, "Unexpected result"); \ 19 | } \ 20 | \ 21 | for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ 22 | assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ 23 | ((t)1) << (i+1), "Unexpected result"); \ 24 | } \ 25 | \ 26 | for (pow2 = 1; pow2 < 25; pow2++) { \ 27 | for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ 28 | x++) { \ 29 | assert_##suf##_eq(pow2_ceil_##suf(x), \ 30 | ((t)1) << pow2, \ 31 | "Unexpected result, x=%"pri, x); \ 32 | } \ 33 | } \ 34 | } while (0) 35 | 36 | TEST_BEGIN(test_pow2_ceil_u64) { 37 | TEST_POW2_CEIL(uint64_t, u64, FMTu64); 38 | } 39 | TEST_END 40 | 41 | TEST_BEGIN(test_pow2_ceil_u32) { 42 | TEST_POW2_CEIL(uint32_t, u32, FMTu32); 43 | } 44 | TEST_END 45 | 46 | TEST_BEGIN(test_pow2_ceil_zu) { 47 | TEST_POW2_CEIL(size_t, zu, "zu"); 48 | } 49 | TEST_END 50 | 51 | void 52 | assert_lg_ceil_range(size_t input, unsigned answer) { 53 | if (input == 1) { 54 | assert_u_eq(0, answer, "Got %u as lg_ceil of 1", answer); 55 | return; 56 | } 57 | assert_zu_le(input, (ZU(1) << answer), 58 | "Got %u as lg_ceil of %zu", answer, input); 59 | assert_zu_gt(input, (ZU(1) << (answer - 1)), 60 | "Got %u as lg_ceil of %zu", answer, input); 61 | } 62 | 63 | void 64 | assert_lg_floor_range(size_t input, unsigned answer) { 65 | if (input == 1) { 66 | assert_u_eq(0, answer, "Got %u as lg_floor of 1", answer); 67 | return; 68 | } 69 | assert_zu_ge(input, (ZU(1) << answer), 70 | "Got %u as lg_floor of %zu", answer, input); 71 | assert_zu_lt(input, (ZU(1) << (answer + 1)), 72 | "Got %u as lg_floor of %zu", answer, input); 73 | } 74 | 75 | TEST_BEGIN(test_lg_ceil_floor) { 76 | for (size_t i = 1; i < 10 * 1000 * 1000; i++) { 77 | assert_lg_ceil_range(i, lg_ceil(i)); 78 | assert_lg_ceil_range(i, LG_CEIL(i)); 79 | assert_lg_floor_range(i, lg_floor(i)); 80 | assert_lg_floor_range(i, LG_FLOOR(i)); 81 | } 82 | for (int i = 10; i < 8 * (1 << LG_SIZEOF_PTR) - 5; i++) { 83 | for (size_t j = 0; j < (1 << 4); j++) { 84 | size_t num1 = ((size_t)1 << i) 85 | - j * ((size_t)1 << (i - 4)); 86 | size_t num2 = ((size_t)1 << i) 87 | + j * ((size_t)1 << (i - 4)); 88 | assert_zu_ne(num1, 0, "Invalid lg argument"); 89 | assert_zu_ne(num2, 0, "Invalid lg argument"); 90 | assert_lg_ceil_range(num1, lg_ceil(num1)); 91 | assert_lg_ceil_range(num1, LG_CEIL(num1)); 92 | assert_lg_ceil_range(num2, lg_ceil(num2)); 93 | assert_lg_ceil_range(num2, LG_CEIL(num2)); 94 | 95 | assert_lg_floor_range(num1, lg_floor(num1)); 96 | assert_lg_floor_range(num1, LG_FLOOR(num1)); 97 | assert_lg_floor_range(num2, lg_floor(num2)); 98 | assert_lg_floor_range(num2, LG_FLOOR(num2)); 99 | } 100 | } 101 | } 102 | TEST_END 103 | 104 | int 105 | main(void) { 106 | return test( 107 | test_pow2_ceil_u64, 108 | test_pow2_ceil_u32, 109 | test_pow2_ceil_zu, 110 | test_lg_ceil_floor); 111 | } 112 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/decay.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,lg_tcache_max:0" 4 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/div.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #include "jemalloc/internal/div.h" 4 | 5 | TEST_BEGIN(test_div_exhaustive) { 6 | for (size_t divisor = 2; divisor < 1000 * 1000; ++divisor) { 7 | div_info_t div_info; 8 | div_init(&div_info, divisor); 9 | size_t max = 1000 * divisor; 10 | if (max < 1000 * 1000) { 11 | max = 1000 * 1000; 12 | } 13 | for (size_t dividend = 0; dividend < 1000 * divisor; 14 | dividend += divisor) { 15 | size_t quotient = div_compute( 16 | &div_info, dividend); 17 | assert_zu_eq(dividend, quotient * divisor, 18 | "With divisor = %zu, dividend = %zu, " 19 | "got quotient %zu", divisor, dividend, quotient); 20 | } 21 | } 22 | } 23 | TEST_END 24 | 25 | int 26 | main(void) { 27 | return test_no_reentrancy( 28 | test_div_exhaustive); 29 | } 30 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/junk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_fill}" = "x1" ] ; then 4 | export MALLOC_CONF="abort:false,zero:false,junk:true" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/junk_alloc.c: -------------------------------------------------------------------------------- 1 | #include "junk.c" 2 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/junk_alloc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_fill}" = "x1" ] ; then 4 | export MALLOC_CONF="abort:false,zero:false,junk:alloc" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/junk_free.c: -------------------------------------------------------------------------------- 1 | #include "junk.c" 2 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/junk_free.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_fill}" = "x1" ] ; then 4 | export MALLOC_CONF="abort:false,zero:false,junk:free" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/mq.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #define NSENDERS 3 4 | #define NMSGS 100000 5 | 6 | typedef struct mq_msg_s mq_msg_t; 7 | struct mq_msg_s { 8 | mq_msg(mq_msg_t) link; 9 | }; 10 | mq_gen(static, mq_, mq_t, mq_msg_t, link) 11 | 12 | TEST_BEGIN(test_mq_basic) { 13 | mq_t mq; 14 | mq_msg_t msg; 15 | 16 | assert_false(mq_init(&mq), "Unexpected mq_init() failure"); 17 | assert_u_eq(mq_count(&mq), 0, "mq should be empty"); 18 | assert_ptr_null(mq_tryget(&mq), 19 | "mq_tryget() should fail when the queue is empty"); 20 | 21 | mq_put(&mq, &msg); 22 | assert_u_eq(mq_count(&mq), 1, "mq should contain one message"); 23 | assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg"); 24 | 25 | mq_put(&mq, &msg); 26 | assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg"); 27 | 28 | mq_fini(&mq); 29 | } 30 | TEST_END 31 | 32 | static void * 33 | thd_receiver_start(void *arg) { 34 | mq_t *mq = (mq_t *)arg; 35 | unsigned i; 36 | 37 | for (i = 0; i < (NSENDERS * NMSGS); i++) { 38 | mq_msg_t *msg = mq_get(mq); 39 | assert_ptr_not_null(msg, "mq_get() should never return NULL"); 40 | dallocx(msg, 0); 41 | } 42 | return NULL; 43 | } 44 | 45 | static void * 46 | thd_sender_start(void *arg) { 47 | mq_t *mq = (mq_t *)arg; 48 | unsigned i; 49 | 50 | for (i = 0; i < NMSGS; i++) { 51 | mq_msg_t *msg; 52 | void *p; 53 | p = mallocx(sizeof(mq_msg_t), 0); 54 | assert_ptr_not_null(p, "Unexpected mallocx() failure"); 55 | msg = (mq_msg_t *)p; 56 | mq_put(mq, msg); 57 | } 58 | return NULL; 59 | } 60 | 61 | TEST_BEGIN(test_mq_threaded) { 62 | mq_t mq; 63 | thd_t receiver; 64 | thd_t senders[NSENDERS]; 65 | unsigned i; 66 | 67 | assert_false(mq_init(&mq), "Unexpected mq_init() failure"); 68 | 69 | thd_create(&receiver, thd_receiver_start, (void *)&mq); 70 | for (i = 0; i < NSENDERS; i++) { 71 | thd_create(&senders[i], thd_sender_start, (void *)&mq); 72 | } 73 | 74 | thd_join(receiver, NULL); 75 | for (i = 0; i < NSENDERS; i++) { 76 | thd_join(senders[i], NULL); 77 | } 78 | 79 | mq_fini(&mq); 80 | } 81 | TEST_END 82 | 83 | int 84 | main(void) { 85 | return test( 86 | test_mq_basic, 87 | test_mq_threaded); 88 | } 89 | 90 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/mtx.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #define NTHREADS 2 4 | #define NINCRS 2000000 5 | 6 | TEST_BEGIN(test_mtx_basic) { 7 | mtx_t mtx; 8 | 9 | assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); 10 | mtx_lock(&mtx); 11 | mtx_unlock(&mtx); 12 | mtx_fini(&mtx); 13 | } 14 | TEST_END 15 | 16 | typedef struct { 17 | mtx_t mtx; 18 | unsigned x; 19 | } thd_start_arg_t; 20 | 21 | static void * 22 | thd_start(void *varg) { 23 | thd_start_arg_t *arg = (thd_start_arg_t *)varg; 24 | unsigned i; 25 | 26 | for (i = 0; i < NINCRS; i++) { 27 | mtx_lock(&arg->mtx); 28 | arg->x++; 29 | mtx_unlock(&arg->mtx); 30 | } 31 | return NULL; 32 | } 33 | 34 | TEST_BEGIN(test_mtx_race) { 35 | thd_start_arg_t arg; 36 | thd_t thds[NTHREADS]; 37 | unsigned i; 38 | 39 | assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); 40 | arg.x = 0; 41 | for (i = 0; i < NTHREADS; i++) { 42 | thd_create(&thds[i], thd_start, (void *)&arg); 43 | } 44 | for (i = 0; i < NTHREADS; i++) { 45 | thd_join(thds[i], NULL); 46 | } 47 | assert_u_eq(arg.x, NTHREADS * NINCRS, 48 | "Race-related counter corruption"); 49 | } 50 | TEST_END 51 | 52 | int 53 | main(void) { 54 | return test( 55 | test_mtx_basic, 56 | test_mtx_race); 57 | } 58 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/pack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Immediately purge to minimize fragmentation. 4 | export MALLOC_CONF="dirty_decay_ms:0,muzzy_decay_ms:0" 5 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/pages.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | TEST_BEGIN(test_pages_huge) { 4 | size_t alloc_size; 5 | bool commit; 6 | void *pages, *hugepage; 7 | 8 | alloc_size = HUGEPAGE * 2 - PAGE; 9 | commit = true; 10 | pages = pages_map(NULL, alloc_size, PAGE, &commit); 11 | assert_ptr_not_null(pages, "Unexpected pages_map() error"); 12 | 13 | if (init_system_thp_mode == thp_mode_default) { 14 | hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE)); 15 | assert_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge, 16 | "Unexpected pages_huge() result"); 17 | assert_false(pages_nohuge(hugepage, HUGEPAGE), 18 | "Unexpected pages_nohuge() result"); 19 | } 20 | 21 | pages_unmap(pages, alloc_size); 22 | } 23 | TEST_END 24 | 25 | int 26 | main(void) { 27 | return test( 28 | test_pages_huge); 29 | } 30 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_accum.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #define NTHREADS 4 4 | #define NALLOCS_PER_THREAD 50 5 | #define DUMP_INTERVAL 1 6 | #define BT_COUNT_CHECK_INTERVAL 5 7 | 8 | static int 9 | prof_dump_open_intercept(bool propagate_err, const char *filename) { 10 | int fd; 11 | 12 | fd = open("/dev/null", O_WRONLY); 13 | assert_d_ne(fd, -1, "Unexpected open() failure"); 14 | 15 | return fd; 16 | } 17 | 18 | static void * 19 | alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) { 20 | return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration); 21 | } 22 | 23 | static void * 24 | thd_start(void *varg) { 25 | unsigned thd_ind = *(unsigned *)varg; 26 | size_t bt_count_prev, bt_count; 27 | unsigned i_prev, i; 28 | 29 | i_prev = 0; 30 | bt_count_prev = 0; 31 | for (i = 0; i < NALLOCS_PER_THREAD; i++) { 32 | void *p = alloc_from_permuted_backtrace(thd_ind, i); 33 | dallocx(p, 0); 34 | if (i % DUMP_INTERVAL == 0) { 35 | assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 36 | 0, "Unexpected error while dumping heap profile"); 37 | } 38 | 39 | if (i % BT_COUNT_CHECK_INTERVAL == 0 || 40 | i+1 == NALLOCS_PER_THREAD) { 41 | bt_count = prof_bt_count(); 42 | assert_zu_le(bt_count_prev+(i-i_prev), bt_count, 43 | "Expected larger backtrace count increase"); 44 | i_prev = i; 45 | bt_count_prev = bt_count; 46 | } 47 | } 48 | 49 | return NULL; 50 | } 51 | 52 | TEST_BEGIN(test_idump) { 53 | bool active; 54 | thd_t thds[NTHREADS]; 55 | unsigned thd_args[NTHREADS]; 56 | unsigned i; 57 | 58 | test_skip_if(!config_prof); 59 | 60 | active = true; 61 | assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, 62 | sizeof(active)), 0, 63 | "Unexpected mallctl failure while activating profiling"); 64 | 65 | prof_dump_open = prof_dump_open_intercept; 66 | 67 | for (i = 0; i < NTHREADS; i++) { 68 | thd_args[i] = i; 69 | thd_create(&thds[i], thd_start, (void *)&thd_args[i]); 70 | } 71 | for (i = 0; i < NTHREADS; i++) { 72 | thd_join(thds[i], NULL); 73 | } 74 | } 75 | TEST_END 76 | 77 | int 78 | main(void) { 79 | return test_no_reentrancy( 80 | test_idump); 81 | } 82 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_accum.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_prof}" = "x1" ] ; then 4 | export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_active.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_prof}" = "x1" ] ; then 4 | export MALLOC_CONF="prof:true,prof_thread_active_init:false,lg_prof_sample:0" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_gdump.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | static bool did_prof_dump_open; 4 | 5 | static int 6 | prof_dump_open_intercept(bool propagate_err, const char *filename) { 7 | int fd; 8 | 9 | did_prof_dump_open = true; 10 | 11 | fd = open("/dev/null", O_WRONLY); 12 | assert_d_ne(fd, -1, "Unexpected open() failure"); 13 | 14 | return fd; 15 | } 16 | 17 | TEST_BEGIN(test_gdump) { 18 | bool active, gdump, gdump_old; 19 | void *p, *q, *r, *s; 20 | size_t sz; 21 | 22 | test_skip_if(!config_prof); 23 | 24 | active = true; 25 | assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, 26 | sizeof(active)), 0, 27 | "Unexpected mallctl failure while activating profiling"); 28 | 29 | prof_dump_open = prof_dump_open_intercept; 30 | 31 | did_prof_dump_open = false; 32 | p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); 33 | assert_ptr_not_null(p, "Unexpected mallocx() failure"); 34 | assert_true(did_prof_dump_open, "Expected a profile dump"); 35 | 36 | did_prof_dump_open = false; 37 | q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); 38 | assert_ptr_not_null(q, "Unexpected mallocx() failure"); 39 | assert_true(did_prof_dump_open, "Expected a profile dump"); 40 | 41 | gdump = false; 42 | sz = sizeof(gdump_old); 43 | assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, 44 | (void *)&gdump, sizeof(gdump)), 0, 45 | "Unexpected mallctl failure while disabling prof.gdump"); 46 | assert(gdump_old); 47 | did_prof_dump_open = false; 48 | r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); 49 | assert_ptr_not_null(q, "Unexpected mallocx() failure"); 50 | assert_false(did_prof_dump_open, "Unexpected profile dump"); 51 | 52 | gdump = true; 53 | sz = sizeof(gdump_old); 54 | assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, 55 | (void *)&gdump, sizeof(gdump)), 0, 56 | "Unexpected mallctl failure while enabling prof.gdump"); 57 | assert(!gdump_old); 58 | did_prof_dump_open = false; 59 | s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); 60 | assert_ptr_not_null(q, "Unexpected mallocx() failure"); 61 | assert_true(did_prof_dump_open, "Expected a profile dump"); 62 | 63 | dallocx(p, 0); 64 | dallocx(q, 0); 65 | dallocx(r, 0); 66 | dallocx(s, 0); 67 | } 68 | TEST_END 69 | 70 | int 71 | main(void) { 72 | return test_no_reentrancy( 73 | test_gdump); 74 | } 75 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_gdump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_prof}" = "x1" ] ; then 4 | export MALLOC_CONF="prof:true,prof_active:false,prof_gdump:true" 5 | fi 6 | 7 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_idump.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | static bool did_prof_dump_open; 4 | 5 | static int 6 | prof_dump_open_intercept(bool propagate_err, const char *filename) { 7 | int fd; 8 | 9 | did_prof_dump_open = true; 10 | 11 | fd = open("/dev/null", O_WRONLY); 12 | assert_d_ne(fd, -1, "Unexpected open() failure"); 13 | 14 | return fd; 15 | } 16 | 17 | TEST_BEGIN(test_idump) { 18 | bool active; 19 | void *p; 20 | 21 | test_skip_if(!config_prof); 22 | 23 | active = true; 24 | assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, 25 | sizeof(active)), 0, 26 | "Unexpected mallctl failure while activating profiling"); 27 | 28 | prof_dump_open = prof_dump_open_intercept; 29 | 30 | did_prof_dump_open = false; 31 | p = mallocx(1, 0); 32 | assert_ptr_not_null(p, "Unexpected mallocx() failure"); 33 | dallocx(p, 0); 34 | assert_true(did_prof_dump_open, "Expected a profile dump"); 35 | } 36 | TEST_END 37 | 38 | int 39 | main(void) { 40 | return test( 41 | test_idump); 42 | } 43 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_idump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | export MALLOC_CONF="tcache:false" 4 | if [ "x${enable_prof}" = "x1" ] ; then 5 | export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0" 6 | fi 7 | 8 | 9 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_log.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_prof}" = "x1" ] ; then 4 | export MALLOC_CONF="prof:true,lg_prof_sample:0" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_reset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_prof}" = "x1" ] ; then 4 | export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_tctx.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | TEST_BEGIN(test_prof_realloc) { 4 | tsdn_t *tsdn; 5 | int flags; 6 | void *p, *q; 7 | prof_tctx_t *tctx_p, *tctx_q; 8 | uint64_t curobjs_0, curobjs_1, curobjs_2, curobjs_3; 9 | 10 | test_skip_if(!config_prof); 11 | 12 | tsdn = tsdn_fetch(); 13 | flags = MALLOCX_TCACHE_NONE; 14 | 15 | prof_cnt_all(&curobjs_0, NULL, NULL, NULL); 16 | p = mallocx(1024, flags); 17 | assert_ptr_not_null(p, "Unexpected mallocx() failure"); 18 | tctx_p = prof_tctx_get(tsdn, p, NULL); 19 | assert_ptr_ne(tctx_p, (prof_tctx_t *)(uintptr_t)1U, 20 | "Expected valid tctx"); 21 | prof_cnt_all(&curobjs_1, NULL, NULL, NULL); 22 | assert_u64_eq(curobjs_0 + 1, curobjs_1, 23 | "Allocation should have increased sample size"); 24 | 25 | q = rallocx(p, 2048, flags); 26 | assert_ptr_ne(p, q, "Expected move"); 27 | assert_ptr_not_null(p, "Unexpected rmallocx() failure"); 28 | tctx_q = prof_tctx_get(tsdn, q, NULL); 29 | assert_ptr_ne(tctx_q, (prof_tctx_t *)(uintptr_t)1U, 30 | "Expected valid tctx"); 31 | prof_cnt_all(&curobjs_2, NULL, NULL, NULL); 32 | assert_u64_eq(curobjs_1, curobjs_2, 33 | "Reallocation should not have changed sample size"); 34 | 35 | dallocx(q, flags); 36 | prof_cnt_all(&curobjs_3, NULL, NULL, NULL); 37 | assert_u64_eq(curobjs_0, curobjs_3, 38 | "Sample size should have returned to base level"); 39 | } 40 | TEST_END 41 | 42 | int 43 | main(void) { 44 | return test_no_reentrancy( 45 | test_prof_realloc); 46 | } 47 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_tctx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_prof}" = "x1" ] ; then 4 | export MALLOC_CONF="prof:true,lg_prof_sample:0" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/prof_thread_name.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_prof}" = "x1" ] ; then 4 | export MALLOC_CONF="prof:true,prof_active:false" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/safety_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_prof}" = "x1" ] ; then 4 | export MALLOC_CONF="prof:true,lg_prof_sample:0" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/sc.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | TEST_BEGIN(test_update_slab_size) { 4 | sc_data_t data; 5 | memset(&data, 0, sizeof(data)); 6 | sc_data_init(&data); 7 | sc_t *tiny = &data.sc[0]; 8 | size_t tiny_size = (ZU(1) << tiny->lg_base) 9 | + (ZU(tiny->ndelta) << tiny->lg_delta); 10 | size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1; 11 | sc_data_update_slab_size(&data, tiny_size, tiny_size, (int)pgs_too_big); 12 | assert_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages"); 13 | 14 | sc_data_update_slab_size(&data, 1, 10 * PAGE, 1); 15 | for (int i = 0; i < data.nbins; i++) { 16 | sc_t *sc = &data.sc[i]; 17 | size_t reg_size = (ZU(1) << sc->lg_base) 18 | + (ZU(sc->ndelta) << sc->lg_delta); 19 | if (reg_size <= PAGE) { 20 | assert_d_eq(sc->pgs, 1, "Ignored valid page size hint"); 21 | } else { 22 | assert_d_gt(sc->pgs, 1, 23 | "Allowed invalid page size hint"); 24 | } 25 | } 26 | } 27 | TEST_END 28 | 29 | int 30 | main(void) { 31 | return test( 32 | test_update_slab_size); 33 | } 34 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/seq.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #include "jemalloc/internal/seq.h" 4 | 5 | typedef struct data_s data_t; 6 | struct data_s { 7 | int arr[10]; 8 | }; 9 | 10 | static void 11 | set_data(data_t *data, int num) { 12 | for (int i = 0; i < 10; i++) { 13 | data->arr[i] = num; 14 | } 15 | } 16 | 17 | static void 18 | assert_data(data_t *data) { 19 | int num = data->arr[0]; 20 | for (int i = 0; i < 10; i++) { 21 | assert_d_eq(num, data->arr[i], "Data consistency error"); 22 | } 23 | } 24 | 25 | seq_define(data_t, data) 26 | 27 | typedef struct thd_data_s thd_data_t; 28 | struct thd_data_s { 29 | seq_data_t data; 30 | }; 31 | 32 | static void * 33 | seq_reader_thd(void *arg) { 34 | thd_data_t *thd_data = (thd_data_t *)arg; 35 | int iter = 0; 36 | data_t local_data; 37 | while (iter < 1000 * 1000 - 1) { 38 | bool success = seq_try_load_data(&local_data, &thd_data->data); 39 | if (success) { 40 | assert_data(&local_data); 41 | assert_d_le(iter, local_data.arr[0], 42 | "Seq read went back in time."); 43 | iter = local_data.arr[0]; 44 | } 45 | } 46 | return NULL; 47 | } 48 | 49 | static void * 50 | seq_writer_thd(void *arg) { 51 | thd_data_t *thd_data = (thd_data_t *)arg; 52 | data_t local_data; 53 | memset(&local_data, 0, sizeof(local_data)); 54 | for (int i = 0; i < 1000 * 1000; i++) { 55 | set_data(&local_data, i); 56 | seq_store_data(&thd_data->data, &local_data); 57 | } 58 | return NULL; 59 | } 60 | 61 | TEST_BEGIN(test_seq_threaded) { 62 | thd_data_t thd_data; 63 | memset(&thd_data, 0, sizeof(thd_data)); 64 | 65 | thd_t reader; 66 | thd_t writer; 67 | 68 | thd_create(&reader, seq_reader_thd, &thd_data); 69 | thd_create(&writer, seq_writer_thd, &thd_data); 70 | 71 | thd_join(reader, NULL); 72 | thd_join(writer, NULL); 73 | } 74 | TEST_END 75 | 76 | TEST_BEGIN(test_seq_simple) { 77 | data_t data; 78 | seq_data_t seq; 79 | memset(&seq, 0, sizeof(seq)); 80 | for (int i = 0; i < 1000 * 1000; i++) { 81 | set_data(&data, i); 82 | seq_store_data(&seq, &data); 83 | set_data(&data, 0); 84 | bool success = seq_try_load_data(&data, &seq); 85 | assert_b_eq(success, true, "Failed non-racing read"); 86 | assert_data(&data); 87 | } 88 | } 89 | TEST_END 90 | 91 | int main(void) { 92 | return test_no_reentrancy( 93 | test_seq_simple, 94 | test_seq_threaded); 95 | } 96 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/slab.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | TEST_BEGIN(test_arena_slab_regind) { 4 | szind_t binind; 5 | 6 | for (binind = 0; binind < SC_NBINS; binind++) { 7 | size_t regind; 8 | extent_t slab; 9 | const bin_info_t *bin_info = &bin_infos[binind]; 10 | extent_init(&slab, NULL, mallocx(bin_info->slab_size, 11 | MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, 12 | binind, 0, extent_state_active, false, true, true, 13 | EXTENT_NOT_HEAD); 14 | assert_ptr_not_null(extent_addr_get(&slab), 15 | "Unexpected malloc() failure"); 16 | for (regind = 0; regind < bin_info->nregs; regind++) { 17 | void *reg = (void *)((uintptr_t)extent_addr_get(&slab) + 18 | (bin_info->reg_size * regind)); 19 | assert_zu_eq(arena_slab_regind(&slab, binind, reg), 20 | regind, 21 | "Incorrect region index computed for size %zu", 22 | bin_info->reg_size); 23 | } 24 | free(extent_addr_get(&slab)); 25 | } 26 | } 27 | TEST_END 28 | 29 | int 30 | main(void) { 31 | return test( 32 | test_arena_slab_regind); 33 | } 34 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/smoothstep.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | static const uint64_t smoothstep_tab[] = { 4 | #define STEP(step, h, x, y) \ 5 | h, 6 | SMOOTHSTEP 7 | #undef STEP 8 | }; 9 | 10 | TEST_BEGIN(test_smoothstep_integral) { 11 | uint64_t sum, min, max; 12 | unsigned i; 13 | 14 | /* 15 | * The integral of smoothstep in the [0..1] range equals 1/2. Verify 16 | * that the fixed point representation's integral is no more than 17 | * rounding error distant from 1/2. Regarding rounding, each table 18 | * element is rounded down to the nearest fixed point value, so the 19 | * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps. 20 | */ 21 | sum = 0; 22 | for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { 23 | sum += smoothstep_tab[i]; 24 | } 25 | 26 | max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1); 27 | min = max - SMOOTHSTEP_NSTEPS; 28 | 29 | assert_u64_ge(sum, min, 30 | "Integral too small, even accounting for truncation"); 31 | assert_u64_le(sum, max, "Integral exceeds 1/2"); 32 | if (false) { 33 | malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n", 34 | max - sum, SMOOTHSTEP_NSTEPS); 35 | } 36 | } 37 | TEST_END 38 | 39 | TEST_BEGIN(test_smoothstep_monotonic) { 40 | uint64_t prev_h; 41 | unsigned i; 42 | 43 | /* 44 | * The smoothstep function is monotonic in [0..1], i.e. its slope is 45 | * non-negative. In practice we want to parametrize table generation 46 | * such that piecewise slope is greater than zero, but do not require 47 | * that here. 48 | */ 49 | prev_h = 0; 50 | for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { 51 | uint64_t h = smoothstep_tab[i]; 52 | assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i); 53 | prev_h = h; 54 | } 55 | assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1], 56 | (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1"); 57 | } 58 | TEST_END 59 | 60 | TEST_BEGIN(test_smoothstep_slope) { 61 | uint64_t prev_h, prev_delta; 62 | unsigned i; 63 | 64 | /* 65 | * The smoothstep slope strictly increases until x=0.5, and then 66 | * strictly decreases until x=1.0. Verify the slightly weaker 67 | * requirement of monotonicity, so that inadequate table precision does 68 | * not cause false test failures. 69 | */ 70 | prev_h = 0; 71 | prev_delta = 0; 72 | for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) { 73 | uint64_t h = smoothstep_tab[i]; 74 | uint64_t delta = h - prev_h; 75 | assert_u64_ge(delta, prev_delta, 76 | "Slope must monotonically increase in 0.0 <= x <= 0.5, " 77 | "i=%u", i); 78 | prev_h = h; 79 | prev_delta = delta; 80 | } 81 | 82 | prev_h = KQU(1) << SMOOTHSTEP_BFP; 83 | prev_delta = 0; 84 | for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) { 85 | uint64_t h = smoothstep_tab[i]; 86 | uint64_t delta = prev_h - h; 87 | assert_u64_ge(delta, prev_delta, 88 | "Slope must monotonically decrease in 0.5 <= x <= 1.0, " 89 | "i=%u", i); 90 | prev_h = h; 91 | prev_delta = delta; 92 | } 93 | } 94 | TEST_END 95 | 96 | int 97 | main(void) { 98 | return test( 99 | test_smoothstep_integral, 100 | test_smoothstep_monotonic, 101 | test_smoothstep_slope); 102 | } 103 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/spin.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #include "jemalloc/internal/spin.h" 4 | 5 | TEST_BEGIN(test_spin) { 6 | spin_t spinner = SPIN_INITIALIZER; 7 | 8 | for (unsigned i = 0; i < 100; i++) { 9 | spin_adaptive(&spinner); 10 | } 11 | } 12 | TEST_END 13 | 14 | int 15 | main(void) { 16 | return test( 17 | test_spin); 18 | } 19 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/test_hooks.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | static bool hook_called = false; 4 | 5 | static void 6 | hook() { 7 | hook_called = true; 8 | } 9 | 10 | static int 11 | func_to_hook(int arg1, int arg2) { 12 | return arg1 + arg2; 13 | } 14 | 15 | #define func_to_hook JEMALLOC_HOOK(func_to_hook, test_hooks_libc_hook) 16 | 17 | TEST_BEGIN(unhooked_call) { 18 | test_hooks_libc_hook = NULL; 19 | hook_called = false; 20 | assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); 21 | assert_false(hook_called, "Nulling out hook didn't take."); 22 | } 23 | TEST_END 24 | 25 | TEST_BEGIN(hooked_call) { 26 | test_hooks_libc_hook = &hook; 27 | hook_called = false; 28 | assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); 29 | assert_true(hook_called, "Hook should have executed."); 30 | } 31 | TEST_END 32 | 33 | int 34 | main(void) { 35 | return test( 36 | unhooked_call, 37 | hooked_call); 38 | } 39 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/ticker.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | #include "jemalloc/internal/ticker.h" 4 | 5 | TEST_BEGIN(test_ticker_tick) { 6 | #define NREPS 2 7 | #define NTICKS 3 8 | ticker_t ticker; 9 | int32_t i, j; 10 | 11 | ticker_init(&ticker, NTICKS); 12 | for (i = 0; i < NREPS; i++) { 13 | for (j = 0; j < NTICKS; j++) { 14 | assert_u_eq(ticker_read(&ticker), NTICKS - j, 15 | "Unexpected ticker value (i=%d, j=%d)", i, j); 16 | assert_false(ticker_tick(&ticker), 17 | "Unexpected ticker fire (i=%d, j=%d)", i, j); 18 | } 19 | assert_u32_eq(ticker_read(&ticker), 0, 20 | "Expected ticker depletion"); 21 | assert_true(ticker_tick(&ticker), 22 | "Expected ticker fire (i=%d)", i); 23 | assert_u32_eq(ticker_read(&ticker), NTICKS, 24 | "Expected ticker reset"); 25 | } 26 | #undef NTICKS 27 | } 28 | TEST_END 29 | 30 | TEST_BEGIN(test_ticker_ticks) { 31 | #define NTICKS 3 32 | ticker_t ticker; 33 | 34 | ticker_init(&ticker, NTICKS); 35 | 36 | assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); 37 | assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire"); 38 | assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value"); 39 | assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire"); 40 | assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); 41 | 42 | assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire"); 43 | assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); 44 | #undef NTICKS 45 | } 46 | TEST_END 47 | 48 | TEST_BEGIN(test_ticker_copy) { 49 | #define NTICKS 3 50 | ticker_t ta, tb; 51 | 52 | ticker_init(&ta, NTICKS); 53 | ticker_copy(&tb, &ta); 54 | assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); 55 | assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire"); 56 | assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); 57 | 58 | ticker_tick(&ta); 59 | ticker_copy(&tb, &ta); 60 | assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value"); 61 | assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire"); 62 | assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); 63 | #undef NTICKS 64 | } 65 | TEST_END 66 | 67 | int 68 | main(void) { 69 | return test( 70 | test_ticker_tick, 71 | test_ticker_ticks, 72 | test_ticker_copy); 73 | } 74 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/zero.c: -------------------------------------------------------------------------------- 1 | #include "test/jemalloc_test.h" 2 | 3 | static void 4 | test_zero(size_t sz_min, size_t sz_max) { 5 | uint8_t *s; 6 | size_t sz_prev, sz, i; 7 | #define MAGIC ((uint8_t)0x61) 8 | 9 | sz_prev = 0; 10 | s = (uint8_t *)mallocx(sz_min, 0); 11 | assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); 12 | 13 | for (sz = sallocx(s, 0); sz <= sz_max; 14 | sz_prev = sz, sz = sallocx(s, 0)) { 15 | if (sz_prev > 0) { 16 | assert_u_eq(s[0], MAGIC, 17 | "Previously allocated byte %zu/%zu is corrupted", 18 | ZU(0), sz_prev); 19 | assert_u_eq(s[sz_prev-1], MAGIC, 20 | "Previously allocated byte %zu/%zu is corrupted", 21 | sz_prev-1, sz_prev); 22 | } 23 | 24 | for (i = sz_prev; i < sz; i++) { 25 | assert_u_eq(s[i], 0x0, 26 | "Newly allocated byte %zu/%zu isn't zero-filled", 27 | i, sz); 28 | s[i] = MAGIC; 29 | } 30 | 31 | if (xallocx(s, sz+1, 0, 0) == sz) { 32 | s = (uint8_t *)rallocx(s, sz+1, 0); 33 | assert_ptr_not_null((void *)s, 34 | "Unexpected rallocx() failure"); 35 | } 36 | } 37 | 38 | dallocx(s, 0); 39 | #undef MAGIC 40 | } 41 | 42 | TEST_BEGIN(test_zero_small) { 43 | test_skip_if(!config_fill); 44 | test_zero(1, SC_SMALL_MAXCLASS - 1); 45 | } 46 | TEST_END 47 | 48 | TEST_BEGIN(test_zero_large) { 49 | test_skip_if(!config_fill); 50 | test_zero(SC_SMALL_MAXCLASS + 1, 1U << (SC_LG_LARGE_MINCLASS + 1)); 51 | } 52 | TEST_END 53 | 54 | int 55 | main(void) { 56 | return test( 57 | test_zero_small, 58 | test_zero_large); 59 | } 60 | -------------------------------------------------------------------------------- /jemalloc-5.2.1/test/unit/zero.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "x${enable_fill}" = "x1" ] ; then 4 | export MALLOC_CONF="abort:false,junk:false,zero:true" 5 | fi 6 | -------------------------------------------------------------------------------- /jemalloc.go: -------------------------------------------------------------------------------- 1 | package jemalloc 2 | 3 | // #cgo CFLAGS: -I. -std=gnu99 4 | // #cgo CPPFLAGS: -D_REENTRANT 5 | // #cgo linux CPPFLAGS: -D_GNU_SOURCE 6 | // #cgo LDFLAGS: -lm 7 | // #cgo linux LDFLAGS: -lrt -ldl 8 | // #include 9 | import "C" 10 | 11 | import "unsafe" 12 | 13 | func Calloc(count, size int) unsafe.Pointer { 14 | return C.je_calloc(C.size_t(count), C.size_t(size)) 15 | } 16 | 17 | func Malloc(size int) unsafe.Pointer { 18 | return C.je_malloc(C.size_t(size)) 19 | } 20 | 21 | func Valloc(size int) unsafe.Pointer { 22 | return C.je_valloc(C.size_t(size)) 23 | } 24 | 25 | func Realloc(ptr unsafe.Pointer, size int) unsafe.Pointer { 26 | return C.je_realloc(ptr, C.size_t(size)) 27 | } 28 | 29 | func Free(ptr unsafe.Pointer) { 30 | C.je_free(ptr) 31 | } 32 | -------------------------------------------------------------------------------- /jemalloc_test.go: -------------------------------------------------------------------------------- 1 | package jemalloc_test 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | "unsafe" 7 | 8 | jemalloc "github.com/spinlock/jemalloc-go" 9 | ) 10 | 11 | func toBytes(ptr unsafe.Pointer, size int) []byte { 12 | p := &reflect.SliceHeader{} 13 | p.Data = uintptr(ptr) 14 | p.Len = size 15 | p.Cap = size 16 | return *(*[]byte)(unsafe.Pointer(p)) 17 | } 18 | 19 | func TestMalloc(t *testing.T) { 20 | p1 := jemalloc.Malloc(100) 21 | if p1 == nil { 22 | t.Fatalf("malloc failed") 23 | } 24 | b1 := toBytes(p1, 100) 25 | for i := 0; i < 100; i++ { 26 | b1[i] = byte(i) 27 | } 28 | 29 | p2 := jemalloc.Realloc(p1, 200) 30 | if p2 == nil { 31 | t.Fatalf("realloc failed") 32 | } 33 | b2 := toBytes(p2, 200) 34 | for i := 0; i < 100; i++ { 35 | if b2[i] != byte(i) { 36 | t.Fatalf("realloc failed") 37 | } 38 | } 39 | jemalloc.Free(p2) 40 | } 41 | --------------------------------------------------------------------------------