├── .gitignore ├── COPYING ├── ChangeLog ├── INSTALL ├── Makefile.in ├── README ├── VERSION ├── autogen.sh ├── bin ├── jemalloc.sh ├── jemalloc.sh.in └── pprof ├── config.guess ├── config.stamp.in ├── config.sub ├── configure ├── configure.ac ├── doc ├── html.xsl.in ├── jemalloc.3 ├── jemalloc.html ├── jemalloc.xml.in ├── manpages.xsl.in └── stylesheet.xsl ├── include ├── jemalloc │ ├── internal │ │ ├── arena.h │ │ ├── atomic.h │ │ ├── base.h │ │ ├── bitmap.h │ │ ├── chunk.h │ │ ├── chunk_dss.h │ │ ├── chunk_mmap.h │ │ ├── ckh.h │ │ ├── ctl.h │ │ ├── extent.h │ │ ├── hash.h │ │ ├── huge.h │ │ ├── jemalloc_internal.h.in │ │ ├── mb.h │ │ ├── mutex.h │ │ ├── private_namespace.h │ │ ├── prng.h │ │ ├── prof.h │ │ ├── ql.h │ │ ├── qr.h │ │ ├── quarantine.h │ │ ├── rb.h │ │ ├── rtree.h │ │ ├── size_classes.sh │ │ ├── stats.h │ │ ├── tcache.h │ │ ├── tsd.h │ │ └── util.h │ ├── jemalloc.h.in │ └── jemalloc_defs.h.in └── msvc_compat │ ├── inttypes.h │ ├── stdbool.h │ ├── stdint.h │ └── strings.h ├── install-sh ├── src ├── arena.c ├── atomic.c ├── base.c ├── bitmap.c ├── chunk.c ├── chunk_dss.c ├── chunk_mmap.c ├── ckh.c ├── ctl.c ├── extent.c ├── hash.c ├── huge.c ├── jemalloc.c ├── mb.c ├── mutex.c ├── prof.c ├── quarantine.c ├── rtree.c ├── stats.c ├── tcache.c ├── tsd.c ├── util.c └── zone.c └── test ├── ALLOCM_ARENA.c ├── ALLOCM_ARENA.exp ├── aligned_alloc.c ├── aligned_alloc.exp ├── allocated.c ├── allocated.exp ├── allocm.c ├── allocm.exp ├── bitmap.c ├── bitmap.exp ├── jemalloc_test.h.in ├── mremap.c ├── mremap.exp ├── posix_memalign.c ├── posix_memalign.exp ├── rallocm.c ├── rallocm.exp ├── thread_arena.c ├── thread_arena.exp ├── thread_tcache_enabled.c └── thread_tcache_enabled.exp /.gitignore: -------------------------------------------------------------------------------- 1 | /autom4te.cache/ 2 | /config.stamp 3 | /config.log 4 | /config.status 5 | /configure 6 | /doc/html.xsl 7 | /doc/manpages.xsl 8 | /doc/jemalloc.xml 9 | /doc/jemalloc.html 10 | /doc/jemalloc.3 11 | /lib/ 12 | /Makefile 13 | /include/jemalloc/internal/jemalloc_internal\.h 14 | /include/jemalloc/internal/size_classes\.h 15 | /include/jemalloc/jemalloc\.h 16 | /include/jemalloc/jemalloc_defs\.h 17 | /test/jemalloc_test\.h 18 | /src/*.[od] 19 | /test/*.[od] 20 | /test/*.out 21 | /test/[a-zA-Z_]* 22 | !test/*.c 23 | !test/*.exp 24 | /VERSION 25 | /bin/jemalloc.sh 26 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | Unless otherwise specified, files in the jemalloc source distribution are 2 | subject to the following license: 3 | -------------------------------------------------------------------------------- 4 | Copyright (C) 2002-2013 Jason Evans . 5 | All rights reserved. 6 | Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. 7 | Copyright (C) 2009-2013 Facebook, Inc. All rights reserved. 8 | 9 | Redistribution and use in source and binary forms, with or without 10 | modification, are permitted provided that the following conditions are met: 11 | 1. Redistributions of source code must retain the above copyright notice(s), 12 | this list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice(s), 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS 18 | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, 21 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 23 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 24 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 25 | OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 26 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | -------------------------------------------------------------------------------- 28 | -------------------------------------------------------------------------------- /Makefile.in: -------------------------------------------------------------------------------- 1 | # Clear out all vpaths, then set just one (default vpath) for the main build 2 | # directory. 3 | vpath 4 | vpath % . 5 | 6 | # Clear the default suffixes, so that built-in rules are not used. 7 | .SUFFIXES : 8 | 9 | SHELL := /bin/sh 10 | 11 | CC := @CC@ 12 | 13 | # Configuration parameters. 14 | DESTDIR = 15 | BINDIR := $(DESTDIR)@BINDIR@ 16 | INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@ 17 | LIBDIR := $(DESTDIR)@LIBDIR@ 18 | DATADIR := $(DESTDIR)@DATADIR@ 19 | MANDIR := $(DESTDIR)@MANDIR@ 20 | srcroot := @srcroot@ 21 | objroot := @objroot@ 22 | abs_srcroot := @abs_srcroot@ 23 | abs_objroot := @abs_objroot@ 24 | 25 | # Build parameters. 26 | CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include 27 | CFLAGS := @CFLAGS@ 28 | LDFLAGS := @LDFLAGS@ 29 | EXTRA_LDFLAGS := @EXTRA_LDFLAGS@ 30 | LIBS := @LIBS@ 31 | RPATH_EXTRA := @RPATH_EXTRA@ 32 | SO := @so@ 33 | IMPORTLIB := @importlib@ 34 | O := @o@ 35 | A := @a@ 36 | EXE := @exe@ 37 | LIBPREFIX := @libprefix@ 38 | REV := @rev@ 39 | install_suffix := @install_suffix@ 40 | ABI := @abi@ 41 | XSLTPROC := @XSLTPROC@ 42 | AUTOCONF := @AUTOCONF@ 43 | _RPATH = @RPATH@ 44 | RPATH = $(if $(1),$(call _RPATH,$(1))) 45 | cfghdrs_in := @cfghdrs_in@ 46 | cfghdrs_out := @cfghdrs_out@ 47 | cfgoutputs_in := @cfgoutputs_in@ 48 | cfgoutputs_out := @cfgoutputs_out@ 49 | enable_autogen := @enable_autogen@ 50 | enable_experimental := @enable_experimental@ 51 | enable_zone_allocator := @enable_zone_allocator@ 52 | DSO_LDFLAGS = @DSO_LDFLAGS@ 53 | SOREV = @SOREV@ 54 | PIC_CFLAGS = @PIC_CFLAGS@ 55 | CTARGET = @CTARGET@ 56 | LDTARGET = @LDTARGET@ 57 | MKLIB = @MKLIB@ 58 | CC_MM = @CC_MM@ 59 | 60 | ifeq (macho, $(ABI)) 61 | TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib" 62 | else 63 | ifeq (pecoff, $(ABI)) 64 | TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib" 65 | else 66 | TEST_LIBRARY_PATH := 67 | endif 68 | endif 69 | 70 | LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix) 71 | 72 | # Lists of files. 73 | BINS := $(srcroot)bin/pprof $(objroot)bin/jemalloc.sh 74 | CHDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h \ 75 | $(objroot)include/jemalloc/jemalloc_defs$(install_suffix).h 76 | CSRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c $(srcroot)src/atomic.c \ 77 | $(srcroot)src/base.c $(srcroot)src/bitmap.c $(srcroot)src/chunk.c \ 78 | $(srcroot)src/chunk_dss.c $(srcroot)src/chunk_mmap.c \ 79 | $(srcroot)src/ckh.c $(srcroot)src/ctl.c $(srcroot)src/extent.c \ 80 | $(srcroot)src/hash.c $(srcroot)src/huge.c $(srcroot)src/mb.c \ 81 | $(srcroot)src/mutex.c $(srcroot)src/prof.c $(srcroot)src/quarantine.c \ 82 | $(srcroot)src/rtree.c $(srcroot)src/stats.c $(srcroot)src/tcache.c \ 83 | $(srcroot)src/util.c $(srcroot)src/tsd.c 84 | ifeq ($(enable_zone_allocator), 1) 85 | CSRCS += $(srcroot)src/zone.c 86 | endif 87 | ifeq ($(IMPORTLIB),$(SO)) 88 | STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A) 89 | endif 90 | ifdef PIC_CFLAGS 91 | STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A) 92 | else 93 | STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A) 94 | endif 95 | DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV) 96 | ifneq ($(SOREV),$(SO)) 97 | DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO) 98 | endif 99 | MAN3 := $(objroot)doc/jemalloc$(install_suffix).3 100 | DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml 101 | DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.html) 102 | DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3) 103 | DOCS := $(DOCS_HTML) $(DOCS_MAN3) 104 | CTESTS := $(srcroot)test/aligned_alloc.c $(srcroot)test/allocated.c \ 105 | $(srcroot)test/ALLOCM_ARENA.c $(srcroot)test/bitmap.c \ 106 | $(srcroot)test/mremap.c $(srcroot)test/posix_memalign.c \ 107 | $(srcroot)test/thread_arena.c $(srcroot)test/thread_tcache_enabled.c 108 | ifeq ($(enable_experimental), 1) 109 | CTESTS += $(srcroot)test/allocm.c $(srcroot)test/rallocm.c 110 | endif 111 | 112 | COBJS := $(CSRCS:$(srcroot)%.c=$(objroot)%.$(O)) 113 | CPICOBJS := $(CSRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) 114 | CTESTOBJS := $(CTESTS:$(srcroot)%.c=$(objroot)%.$(O)) 115 | 116 | .PHONY: all dist build_doc_html build_doc_man build_doc 117 | .PHONY: install_bin install_include install_lib 118 | .PHONY: install_doc_html install_doc_man install_doc install 119 | .PHONY: tests check clean distclean relclean 120 | 121 | .SECONDARY : $(CTESTOBJS) 122 | 123 | # Default target. 124 | all: build 125 | 126 | dist: build_doc 127 | 128 | $(srcroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl 129 | $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $< 130 | 131 | $(srcroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl 132 | $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $< 133 | 134 | build_doc_html: $(DOCS_HTML) 135 | build_doc_man: $(DOCS_MAN3) 136 | build_doc: $(DOCS) 137 | 138 | # 139 | # Include generated dependency files. 140 | # 141 | ifdef CC_MM 142 | -include $(COBJS:%.$(O)=%.d) 143 | -include $(CPICOBJS:%.$(O)=%.d) 144 | -include $(CTESTOBJS:%.$(O)=%.d) 145 | endif 146 | 147 | $(COBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c 148 | $(CPICOBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c 149 | $(CPICOBJS): CFLAGS += $(PIC_CFLAGS) 150 | $(CTESTOBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c 151 | $(CTESTOBJS): CPPFLAGS += -I$(objroot)test 152 | ifneq ($(IMPORTLIB),$(SO)) 153 | $(COBJS): CPPFLAGS += -DDLLEXPORT 154 | endif 155 | 156 | ifndef CC_MM 157 | # Dependencies 158 | HEADER_DIRS = $(srcroot)include/jemalloc/internal \ 159 | $(objroot)include/jemalloc $(objroot)include/jemalloc/internal 160 | HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)) 161 | $(COBJS) $(CPICOBJS) $(CTESTOBJS): $(HEADERS) 162 | $(CTESTOBJS): $(objroot)test/jemalloc_test.h 163 | endif 164 | 165 | $(COBJS) $(CPICOBJS) $(CTESTOBJS): %.$(O): 166 | @mkdir -p $(@D) 167 | $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $< 168 | ifdef CC_MM 169 | @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< 170 | endif 171 | 172 | ifneq ($(SOREV),$(SO)) 173 | %.$(SO) : %.$(SOREV) 174 | @mkdir -p $(@D) 175 | ln -sf $( $(objroot)$${t}.out 2>&1; \ 260 | if test -e "$(srcroot)$${t}.exp"; then \ 261 | diff -w -u $(srcroot)$${t}.exp \ 262 | $(objroot)$${t}.out >/dev/null 2>&1; \ 263 | fail=$$?; \ 264 | if test "$${fail}" -eq "1" ; then \ 265 | failures=`expr $${failures} + 1`; \ 266 | echo "*** FAIL ***"; \ 267 | else \ 268 | echo "pass"; \ 269 | fi; \ 270 | else \ 271 | echo "*** FAIL *** (.exp file is missing)"; \ 272 | failures=`expr $${failures} + 1`; \ 273 | fi; \ 274 | done; \ 275 | echo "========================================="; \ 276 | echo "Failures: $${failures}/$${total}"' 277 | 278 | clean: 279 | rm -f $(COBJS) 280 | rm -f $(CPICOBJS) 281 | rm -f $(COBJS:%.$(O)=%.d) 282 | rm -f $(CPICOBJS:%.$(O)=%.d) 283 | rm -f $(CTESTOBJS:%.$(O)=%$(EXE)) 284 | rm -f $(CTESTOBJS) 285 | rm -f $(CTESTOBJS:%.$(O)=%.d) 286 | rm -f $(CTESTOBJS:%.$(O)=%.out) 287 | rm -f $(DSOS) $(STATIC_LIBS) 288 | 289 | distclean: clean 290 | rm -rf $(objroot)autom4te.cache 291 | rm -f $(objroot)config.log 292 | rm -f $(objroot)config.status 293 | rm -f $(objroot)config.stamp 294 | rm -f $(cfghdrs_out) 295 | rm -f $(cfgoutputs_out) 296 | 297 | relclean: distclean 298 | rm -f $(objroot)configure 299 | rm -f $(srcroot)VERSION 300 | rm -f $(DOCS_HTML) 301 | rm -f $(DOCS_MAN3) 302 | 303 | #=============================================================================== 304 | # Re-configuration rules. 305 | 306 | ifeq ($(enable_autogen), 1) 307 | $(srcroot)configure : $(srcroot)configure.ac 308 | cd ./$(srcroot) && $(AUTOCONF) 309 | 310 | $(objroot)config.status : $(srcroot)configure 311 | ./$(objroot)config.status --recheck 312 | 313 | $(srcroot)config.stamp.in : $(srcroot)configure.ac 314 | echo stamp > $(srcroot)config.stamp.in 315 | 316 | $(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure 317 | ./$(objroot)config.status 318 | @touch $@ 319 | 320 | # There must be some action in order for make to re-read Makefile when it is 321 | # out of date. 322 | $(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp 323 | @true 324 | endif 325 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | jemalloc is a general-purpose scalable concurrent malloc(3) implementation. 2 | This distribution is a "portable" implementation that currently targets 3 | FreeBSD, Linux, Apple OS X, and MinGW. jemalloc is included as the default 4 | allocator in the FreeBSD and NetBSD operating systems, and it is used by the 5 | Mozilla Firefox web browser on Microsoft Windows-related platforms. Depending 6 | on your needs, one of the other divergent versions may suit your needs better 7 | than this distribution. 8 | 9 | The COPYING file contains copyright and licensing information. 10 | 11 | The INSTALL file contains information on how to configure, build, and install 12 | jemalloc. 13 | 14 | The ChangeLog file contains a brief summary of changes for each release. 15 | 16 | URL: http://www.canonware.com/jemalloc/ 17 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784 2 | -------------------------------------------------------------------------------- /autogen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | for i in autoconf; do 4 | echo "$i" 5 | $i 6 | if [ $? -ne 0 ]; then 7 | echo "Error $? in $i" 8 | exit 1 9 | fi 10 | done 11 | 12 | echo "./configure --enable-autogen $@" 13 | ./configure --enable-autogen $@ 14 | if [ $? -ne 0 ]; then 15 | echo "Error $? in ./configure" 16 | exit 1 17 | fi 18 | -------------------------------------------------------------------------------- /bin/jemalloc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | prefix=/usr/local 4 | exec_prefix=/usr/local 5 | libdir=${exec_prefix}/lib 6 | 7 | LD_PRELOAD=${libdir}/libjemalloc.so.1 8 | export LD_PRELOAD 9 | exec "$@" 10 | -------------------------------------------------------------------------------- /bin/jemalloc.sh.in: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | prefix=@prefix@ 4 | exec_prefix=@exec_prefix@ 5 | libdir=@libdir@ 6 | 7 | @LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@ 8 | export @LD_PRELOAD_VAR@ 9 | exec "$@" 10 | -------------------------------------------------------------------------------- /config.stamp.in: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/percona/jemalloc/041bf7010e27940851ab7efd07028416762ea370/config.stamp.in -------------------------------------------------------------------------------- /doc/html.xsl.in: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /doc/jemalloc.html: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/percona/jemalloc/041bf7010e27940851ab7efd07028416762ea370/doc/jemalloc.html -------------------------------------------------------------------------------- /doc/manpages.xsl.in: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /doc/stylesheet.xsl: -------------------------------------------------------------------------------- 1 | 2 | ansi 3 | 4 | 5 | "" 6 | 7 | 8 | -------------------------------------------------------------------------------- /include/jemalloc/internal/atomic.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | #endif /* JEMALLOC_H_TYPES */ 5 | /******************************************************************************/ 6 | #ifdef JEMALLOC_H_STRUCTS 7 | 8 | #endif /* JEMALLOC_H_STRUCTS */ 9 | /******************************************************************************/ 10 | #ifdef JEMALLOC_H_EXTERNS 11 | 12 | #define atomic_read_uint64(p) atomic_add_uint64(p, 0) 13 | #define atomic_read_uint32(p) atomic_add_uint32(p, 0) 14 | #define atomic_read_z(p) atomic_add_z(p, 0) 15 | #define atomic_read_u(p) atomic_add_u(p, 0) 16 | 17 | #endif /* JEMALLOC_H_EXTERNS */ 18 | /******************************************************************************/ 19 | #ifdef JEMALLOC_H_INLINES 20 | 21 | #ifndef JEMALLOC_ENABLE_INLINE 22 | uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); 23 | uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); 24 | uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); 25 | uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); 26 | size_t atomic_add_z(size_t *p, size_t x); 27 | size_t atomic_sub_z(size_t *p, size_t x); 28 | unsigned atomic_add_u(unsigned *p, unsigned x); 29 | unsigned atomic_sub_u(unsigned *p, unsigned x); 30 | #endif 31 | 32 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) 33 | /******************************************************************************/ 34 | /* 64-bit operations. */ 35 | #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) 36 | # ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 37 | JEMALLOC_INLINE uint64_t 38 | atomic_add_uint64(uint64_t *p, uint64_t x) 39 | { 40 | 41 | return (__sync_add_and_fetch(p, x)); 42 | } 43 | 44 | JEMALLOC_INLINE uint64_t 45 | atomic_sub_uint64(uint64_t *p, uint64_t x) 46 | { 47 | 48 | return (__sync_sub_and_fetch(p, x)); 49 | } 50 | #elif (defined(_MSC_VER)) 51 | JEMALLOC_INLINE uint64_t 52 | atomic_add_uint64(uint64_t *p, uint64_t x) 53 | { 54 | 55 | return (InterlockedExchangeAdd64(p, x)); 56 | } 57 | 58 | JEMALLOC_INLINE uint64_t 59 | atomic_sub_uint64(uint64_t *p, uint64_t x) 60 | { 61 | 62 | return (InterlockedExchangeAdd64(p, -((int64_t)x))); 63 | } 64 | #elif (defined(JEMALLOC_OSATOMIC)) 65 | JEMALLOC_INLINE uint64_t 66 | atomic_add_uint64(uint64_t *p, uint64_t x) 67 | { 68 | 69 | return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); 70 | } 71 | 72 | JEMALLOC_INLINE uint64_t 73 | atomic_sub_uint64(uint64_t *p, uint64_t x) 74 | { 75 | 76 | return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); 77 | } 78 | # elif (defined(__amd64__) || defined(__x86_64__)) 79 | JEMALLOC_INLINE uint64_t 80 | atomic_add_uint64(uint64_t *p, uint64_t x) 81 | { 82 | 83 | asm volatile ( 84 | "lock; xaddq %0, %1;" 85 | : "+r" (x), "=m" (*p) /* Outputs. */ 86 | : "m" (*p) /* Inputs. */ 87 | ); 88 | 89 | return (x); 90 | } 91 | 92 | JEMALLOC_INLINE uint64_t 93 | atomic_sub_uint64(uint64_t *p, uint64_t x) 94 | { 95 | 96 | x = (uint64_t)(-(int64_t)x); 97 | asm volatile ( 98 | "lock; xaddq %0, %1;" 99 | : "+r" (x), "=m" (*p) /* Outputs. */ 100 | : "m" (*p) /* Inputs. */ 101 | ); 102 | 103 | return (x); 104 | } 105 | # elif (defined(JEMALLOC_ATOMIC9)) 106 | JEMALLOC_INLINE uint64_t 107 | atomic_add_uint64(uint64_t *p, uint64_t x) 108 | { 109 | 110 | /* 111 | * atomic_fetchadd_64() doesn't exist, but we only ever use this 112 | * function on LP64 systems, so atomic_fetchadd_long() will do. 113 | */ 114 | assert(sizeof(uint64_t) == sizeof(unsigned long)); 115 | 116 | return (atomic_fetchadd_long(p, (unsigned long)x) + x); 117 | } 118 | 119 | JEMALLOC_INLINE uint64_t 120 | atomic_sub_uint64(uint64_t *p, uint64_t x) 121 | { 122 | 123 | assert(sizeof(uint64_t) == sizeof(unsigned long)); 124 | 125 | return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); 126 | } 127 | # elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) 128 | JEMALLOC_INLINE uint64_t 129 | atomic_add_uint64(uint64_t *p, uint64_t x) 130 | { 131 | 132 | return (__sync_add_and_fetch(p, x)); 133 | } 134 | 135 | JEMALLOC_INLINE uint64_t 136 | atomic_sub_uint64(uint64_t *p, uint64_t x) 137 | { 138 | 139 | return (__sync_sub_and_fetch(p, x)); 140 | } 141 | # else 142 | # error "Missing implementation for 64-bit atomic operations" 143 | # endif 144 | #endif 145 | 146 | /******************************************************************************/ 147 | /* 32-bit operations. */ 148 | #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 149 | JEMALLOC_INLINE uint32_t 150 | atomic_add_uint32(uint32_t *p, uint32_t x) 151 | { 152 | 153 | return (__sync_add_and_fetch(p, x)); 154 | } 155 | 156 | JEMALLOC_INLINE uint32_t 157 | atomic_sub_uint32(uint32_t *p, uint32_t x) 158 | { 159 | 160 | return (__sync_sub_and_fetch(p, x)); 161 | } 162 | #elif (defined(_MSC_VER)) 163 | JEMALLOC_INLINE uint32_t 164 | atomic_add_uint32(uint32_t *p, uint32_t x) 165 | { 166 | 167 | return (InterlockedExchangeAdd(p, x)); 168 | } 169 | 170 | JEMALLOC_INLINE uint32_t 171 | atomic_sub_uint32(uint32_t *p, uint32_t x) 172 | { 173 | 174 | return (InterlockedExchangeAdd(p, -((int32_t)x))); 175 | } 176 | #elif (defined(JEMALLOC_OSATOMIC)) 177 | JEMALLOC_INLINE uint32_t 178 | atomic_add_uint32(uint32_t *p, uint32_t x) 179 | { 180 | 181 | return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); 182 | } 183 | 184 | JEMALLOC_INLINE uint32_t 185 | atomic_sub_uint32(uint32_t *p, uint32_t x) 186 | { 187 | 188 | return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); 189 | } 190 | #elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) 191 | JEMALLOC_INLINE uint32_t 192 | atomic_add_uint32(uint32_t *p, uint32_t x) 193 | { 194 | 195 | asm volatile ( 196 | "lock; xaddl %0, %1;" 197 | : "+r" (x), "=m" (*p) /* Outputs. */ 198 | : "m" (*p) /* Inputs. */ 199 | ); 200 | 201 | return (x); 202 | } 203 | 204 | JEMALLOC_INLINE uint32_t 205 | atomic_sub_uint32(uint32_t *p, uint32_t x) 206 | { 207 | 208 | x = (uint32_t)(-(int32_t)x); 209 | asm volatile ( 210 | "lock; xaddl %0, %1;" 211 | : "+r" (x), "=m" (*p) /* Outputs. */ 212 | : "m" (*p) /* Inputs. */ 213 | ); 214 | 215 | return (x); 216 | } 217 | #elif (defined(JEMALLOC_ATOMIC9)) 218 | JEMALLOC_INLINE uint32_t 219 | atomic_add_uint32(uint32_t *p, uint32_t x) 220 | { 221 | 222 | return (atomic_fetchadd_32(p, x) + x); 223 | } 224 | 225 | JEMALLOC_INLINE uint32_t 226 | atomic_sub_uint32(uint32_t *p, uint32_t x) 227 | { 228 | 229 | return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); 230 | } 231 | #elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) 232 | JEMALLOC_INLINE uint32_t 233 | atomic_add_uint32(uint32_t *p, uint32_t x) 234 | { 235 | 236 | return (__sync_add_and_fetch(p, x)); 237 | } 238 | 239 | JEMALLOC_INLINE uint32_t 240 | atomic_sub_uint32(uint32_t *p, uint32_t x) 241 | { 242 | 243 | return (__sync_sub_and_fetch(p, x)); 244 | } 245 | #else 246 | # error "Missing implementation for 32-bit atomic operations" 247 | #endif 248 | 249 | /******************************************************************************/ 250 | /* size_t operations. */ 251 | JEMALLOC_INLINE size_t 252 | atomic_add_z(size_t *p, size_t x) 253 | { 254 | 255 | #if (LG_SIZEOF_PTR == 3) 256 | return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); 257 | #elif (LG_SIZEOF_PTR == 2) 258 | return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); 259 | #endif 260 | } 261 | 262 | JEMALLOC_INLINE size_t 263 | atomic_sub_z(size_t *p, size_t x) 264 | { 265 | 266 | #if (LG_SIZEOF_PTR == 3) 267 | return ((size_t)atomic_add_uint64((uint64_t *)p, 268 | (uint64_t)-((int64_t)x))); 269 | #elif (LG_SIZEOF_PTR == 2) 270 | return ((size_t)atomic_add_uint32((uint32_t *)p, 271 | (uint32_t)-((int32_t)x))); 272 | #endif 273 | } 274 | 275 | /******************************************************************************/ 276 | /* unsigned operations. */ 277 | JEMALLOC_INLINE unsigned 278 | atomic_add_u(unsigned *p, unsigned x) 279 | { 280 | 281 | #if (LG_SIZEOF_INT == 3) 282 | return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); 283 | #elif (LG_SIZEOF_INT == 2) 284 | return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); 285 | #endif 286 | } 287 | 288 | JEMALLOC_INLINE unsigned 289 | atomic_sub_u(unsigned *p, unsigned x) 290 | { 291 | 292 | #if (LG_SIZEOF_INT == 3) 293 | return ((unsigned)atomic_add_uint64((uint64_t *)p, 294 | (uint64_t)-((int64_t)x))); 295 | #elif (LG_SIZEOF_INT == 2) 296 | return ((unsigned)atomic_add_uint32((uint32_t *)p, 297 | (uint32_t)-((int32_t)x))); 298 | #endif 299 | } 300 | /******************************************************************************/ 301 | #endif 302 | 303 | #endif /* JEMALLOC_H_INLINES */ 304 | /******************************************************************************/ 305 | -------------------------------------------------------------------------------- /include/jemalloc/internal/base.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | #endif /* JEMALLOC_H_TYPES */ 5 | /******************************************************************************/ 6 | #ifdef JEMALLOC_H_STRUCTS 7 | 8 | #endif /* JEMALLOC_H_STRUCTS */ 9 | /******************************************************************************/ 10 | #ifdef JEMALLOC_H_EXTERNS 11 | 12 | void *base_alloc(size_t size); 13 | void *base_calloc(size_t number, size_t size); 14 | extent_node_t *base_node_alloc(void); 15 | void base_node_dealloc(extent_node_t *node); 16 | bool base_boot(void); 17 | void base_prefork(void); 18 | void base_postfork_parent(void); 19 | void base_postfork_child(void); 20 | 21 | #endif /* JEMALLOC_H_EXTERNS */ 22 | /******************************************************************************/ 23 | #ifdef JEMALLOC_H_INLINES 24 | 25 | #endif /* JEMALLOC_H_INLINES */ 26 | /******************************************************************************/ 27 | -------------------------------------------------------------------------------- /include/jemalloc/internal/bitmap.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ 5 | #define LG_BITMAP_MAXBITS LG_RUN_MAXREGS 6 | 7 | typedef struct bitmap_level_s bitmap_level_t; 8 | typedef struct bitmap_info_s bitmap_info_t; 9 | typedef unsigned long bitmap_t; 10 | #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG 11 | 12 | /* Number of bits per group. */ 13 | #define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) 14 | #define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) 15 | #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) 16 | 17 | /* Maximum number of levels possible. */ 18 | #define BITMAP_MAX_LEVELS \ 19 | (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ 20 | + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) 21 | 22 | #endif /* JEMALLOC_H_TYPES */ 23 | /******************************************************************************/ 24 | #ifdef JEMALLOC_H_STRUCTS 25 | 26 | struct bitmap_level_s { 27 | /* Offset of this level's groups within the array of groups. */ 28 | size_t group_offset; 29 | }; 30 | 31 | struct bitmap_info_s { 32 | /* Logical number of bits in bitmap (stored at bottom level). */ 33 | size_t nbits; 34 | 35 | /* Number of levels necessary for nbits. */ 36 | unsigned nlevels; 37 | 38 | /* 39 | * Only the first (nlevels+1) elements are used, and levels are ordered 40 | * bottom to top (e.g. the bottom level is stored in levels[0]). 41 | */ 42 | bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; 43 | }; 44 | 45 | #endif /* JEMALLOC_H_STRUCTS */ 46 | /******************************************************************************/ 47 | #ifdef JEMALLOC_H_EXTERNS 48 | 49 | void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); 50 | size_t bitmap_info_ngroups(const bitmap_info_t *binfo); 51 | size_t bitmap_size(size_t nbits); 52 | void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); 53 | 54 | #endif /* JEMALLOC_H_EXTERNS */ 55 | /******************************************************************************/ 56 | #ifdef JEMALLOC_H_INLINES 57 | 58 | #ifndef JEMALLOC_ENABLE_INLINE 59 | bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); 60 | bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); 61 | void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); 62 | size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); 63 | void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); 64 | #endif 65 | 66 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) 67 | JEMALLOC_INLINE bool 68 | bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) 69 | { 70 | unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1; 71 | bitmap_t rg = bitmap[rgoff]; 72 | /* The bitmap is full iff the root group is 0. */ 73 | return (rg == 0); 74 | } 75 | 76 | JEMALLOC_INLINE bool 77 | bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) 78 | { 79 | size_t goff; 80 | bitmap_t g; 81 | 82 | assert(bit < binfo->nbits); 83 | goff = bit >> LG_BITMAP_GROUP_NBITS; 84 | g = bitmap[goff]; 85 | return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))); 86 | } 87 | 88 | JEMALLOC_INLINE void 89 | bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) 90 | { 91 | size_t goff; 92 | bitmap_t *gp; 93 | bitmap_t g; 94 | 95 | assert(bit < binfo->nbits); 96 | assert(bitmap_get(bitmap, binfo, bit) == false); 97 | goff = bit >> LG_BITMAP_GROUP_NBITS; 98 | gp = &bitmap[goff]; 99 | g = *gp; 100 | assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); 101 | g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); 102 | *gp = g; 103 | assert(bitmap_get(bitmap, binfo, bit)); 104 | /* Propagate group state transitions up the tree. */ 105 | if (g == 0) { 106 | unsigned i; 107 | for (i = 1; i < binfo->nlevels; i++) { 108 | bit = goff; 109 | goff = bit >> LG_BITMAP_GROUP_NBITS; 110 | gp = &bitmap[binfo->levels[i].group_offset + goff]; 111 | g = *gp; 112 | assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); 113 | g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); 114 | *gp = g; 115 | if (g != 0) 116 | break; 117 | } 118 | } 119 | } 120 | 121 | /* sfu: set first unset. */ 122 | JEMALLOC_INLINE size_t 123 | bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) 124 | { 125 | size_t bit; 126 | bitmap_t g; 127 | unsigned i; 128 | 129 | assert(bitmap_full(bitmap, binfo) == false); 130 | 131 | i = binfo->nlevels - 1; 132 | g = bitmap[binfo->levels[i].group_offset]; 133 | bit = ffsl(g) - 1; 134 | while (i > 0) { 135 | i--; 136 | g = bitmap[binfo->levels[i].group_offset + bit]; 137 | bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1); 138 | } 139 | 140 | bitmap_set(bitmap, binfo, bit); 141 | return (bit); 142 | } 143 | 144 | JEMALLOC_INLINE void 145 | bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) 146 | { 147 | size_t goff; 148 | bitmap_t *gp; 149 | bitmap_t g; 150 | bool propagate; 151 | 152 | assert(bit < binfo->nbits); 153 | assert(bitmap_get(bitmap, binfo, bit)); 154 | goff = bit >> LG_BITMAP_GROUP_NBITS; 155 | gp = &bitmap[goff]; 156 | g = *gp; 157 | propagate = (g == 0); 158 | assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); 159 | g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); 160 | *gp = g; 161 | assert(bitmap_get(bitmap, binfo, bit) == false); 162 | /* Propagate group state transitions up the tree. */ 163 | if (propagate) { 164 | unsigned i; 165 | for (i = 1; i < binfo->nlevels; i++) { 166 | bit = goff; 167 | goff = bit >> LG_BITMAP_GROUP_NBITS; 168 | gp = &bitmap[binfo->levels[i].group_offset + goff]; 169 | g = *gp; 170 | propagate = (g == 0); 171 | assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) 172 | == 0); 173 | g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); 174 | *gp = g; 175 | if (propagate == false) 176 | break; 177 | } 178 | } 179 | } 180 | 181 | #endif 182 | 183 | #endif /* JEMALLOC_H_INLINES */ 184 | /******************************************************************************/ 185 | -------------------------------------------------------------------------------- /include/jemalloc/internal/chunk.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | /* 5 | * Size and alignment of memory chunks that are allocated by the OS's virtual 6 | * memory system. 7 | */ 8 | #define LG_CHUNK_DEFAULT 22 9 | 10 | /* Return the chunk address for allocation address a. */ 11 | #define CHUNK_ADDR2BASE(a) \ 12 | ((void *)((uintptr_t)(a) & ~chunksize_mask)) 13 | 14 | /* Return the chunk offset of address a. */ 15 | #define CHUNK_ADDR2OFFSET(a) \ 16 | ((size_t)((uintptr_t)(a) & chunksize_mask)) 17 | 18 | /* Return the smallest chunk multiple that is >= s. */ 19 | #define CHUNK_CEILING(s) \ 20 | (((s) + chunksize_mask) & ~chunksize_mask) 21 | 22 | #endif /* JEMALLOC_H_TYPES */ 23 | /******************************************************************************/ 24 | #ifdef JEMALLOC_H_STRUCTS 25 | 26 | #endif /* JEMALLOC_H_STRUCTS */ 27 | /******************************************************************************/ 28 | #ifdef JEMALLOC_H_EXTERNS 29 | 30 | extern size_t opt_lg_chunk; 31 | extern const char *opt_dss; 32 | 33 | /* Protects stats_chunks; currently not used for any other purpose. */ 34 | extern malloc_mutex_t chunks_mtx; 35 | /* Chunk statistics. */ 36 | extern chunk_stats_t stats_chunks; 37 | 38 | extern rtree_t *chunks_rtree; 39 | 40 | extern size_t chunksize; 41 | extern size_t chunksize_mask; /* (chunksize - 1). */ 42 | extern size_t chunk_npages; 43 | extern size_t map_bias; /* Number of arena chunk header pages. */ 44 | extern size_t arena_maxclass; /* Max size class for arenas. */ 45 | 46 | void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, 47 | dss_prec_t dss_prec); 48 | void chunk_unmap(void *chunk, size_t size); 49 | void chunk_dealloc(void *chunk, size_t size, bool unmap); 50 | bool chunk_boot(void); 51 | void chunk_prefork(void); 52 | void chunk_postfork_parent(void); 53 | void chunk_postfork_child(void); 54 | 55 | #endif /* JEMALLOC_H_EXTERNS */ 56 | /******************************************************************************/ 57 | #ifdef JEMALLOC_H_INLINES 58 | 59 | #endif /* JEMALLOC_H_INLINES */ 60 | /******************************************************************************/ 61 | 62 | #include "jemalloc/internal/chunk_dss.h" 63 | #include "jemalloc/internal/chunk_mmap.h" 64 | -------------------------------------------------------------------------------- /include/jemalloc/internal/chunk_dss.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | typedef enum { 5 | dss_prec_disabled = 0, 6 | dss_prec_primary = 1, 7 | dss_prec_secondary = 2, 8 | 9 | dss_prec_limit = 3 10 | } dss_prec_t ; 11 | #define DSS_PREC_DEFAULT dss_prec_secondary 12 | #define DSS_DEFAULT "secondary" 13 | 14 | #endif /* JEMALLOC_H_TYPES */ 15 | /******************************************************************************/ 16 | #ifdef JEMALLOC_H_STRUCTS 17 | 18 | extern const char *dss_prec_names[]; 19 | 20 | #endif /* JEMALLOC_H_STRUCTS */ 21 | /******************************************************************************/ 22 | #ifdef JEMALLOC_H_EXTERNS 23 | 24 | dss_prec_t chunk_dss_prec_get(void); 25 | bool chunk_dss_prec_set(dss_prec_t dss_prec); 26 | void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero); 27 | bool chunk_in_dss(void *chunk); 28 | bool chunk_dss_boot(void); 29 | void chunk_dss_prefork(void); 30 | void chunk_dss_postfork_parent(void); 31 | void chunk_dss_postfork_child(void); 32 | 33 | #endif /* JEMALLOC_H_EXTERNS */ 34 | /******************************************************************************/ 35 | #ifdef JEMALLOC_H_INLINES 36 | 37 | #endif /* JEMALLOC_H_INLINES */ 38 | /******************************************************************************/ 39 | -------------------------------------------------------------------------------- /include/jemalloc/internal/chunk_mmap.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | #endif /* JEMALLOC_H_TYPES */ 5 | /******************************************************************************/ 6 | #ifdef JEMALLOC_H_STRUCTS 7 | 8 | #endif /* JEMALLOC_H_STRUCTS */ 9 | /******************************************************************************/ 10 | #ifdef JEMALLOC_H_EXTERNS 11 | 12 | bool pages_purge(void *addr, size_t length); 13 | 14 | void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero); 15 | bool chunk_dealloc_mmap(void *chunk, size_t size); 16 | 17 | #endif /* JEMALLOC_H_EXTERNS */ 18 | /******************************************************************************/ 19 | #ifdef JEMALLOC_H_INLINES 20 | 21 | #endif /* JEMALLOC_H_INLINES */ 22 | /******************************************************************************/ 23 | -------------------------------------------------------------------------------- /include/jemalloc/internal/ckh.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | typedef struct ckh_s ckh_t; 5 | typedef struct ckhc_s ckhc_t; 6 | 7 | /* Typedefs to allow easy function pointer passing. */ 8 | typedef void ckh_hash_t (const void *, size_t[2]); 9 | typedef bool ckh_keycomp_t (const void *, const void *); 10 | 11 | /* Maintain counters used to get an idea of performance. */ 12 | /* #define CKH_COUNT */ 13 | /* Print counter values in ckh_delete() (requires CKH_COUNT). */ 14 | /* #define CKH_VERBOSE */ 15 | 16 | /* 17 | * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit 18 | * one bucket per L1 cache line. 19 | */ 20 | #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) 21 | 22 | #endif /* JEMALLOC_H_TYPES */ 23 | /******************************************************************************/ 24 | #ifdef JEMALLOC_H_STRUCTS 25 | 26 | /* Hash table cell. */ 27 | struct ckhc_s { 28 | const void *key; 29 | const void *data; 30 | }; 31 | 32 | struct ckh_s { 33 | #ifdef CKH_COUNT 34 | /* Counters used to get an idea of performance. */ 35 | uint64_t ngrows; 36 | uint64_t nshrinks; 37 | uint64_t nshrinkfails; 38 | uint64_t ninserts; 39 | uint64_t nrelocs; 40 | #endif 41 | 42 | /* Used for pseudo-random number generation. */ 43 | #define CKH_A 1103515241 44 | #define CKH_C 12347 45 | uint32_t prng_state; 46 | 47 | /* Total number of items. */ 48 | size_t count; 49 | 50 | /* 51 | * Minimum and current number of hash table buckets. There are 52 | * 2^LG_CKH_BUCKET_CELLS cells per bucket. 53 | */ 54 | unsigned lg_minbuckets; 55 | unsigned lg_curbuckets; 56 | 57 | /* Hash and comparison functions. */ 58 | ckh_hash_t *hash; 59 | ckh_keycomp_t *keycomp; 60 | 61 | /* Hash table with 2^lg_curbuckets buckets. */ 62 | ckhc_t *tab; 63 | }; 64 | 65 | #endif /* JEMALLOC_H_STRUCTS */ 66 | /******************************************************************************/ 67 | #ifdef JEMALLOC_H_EXTERNS 68 | 69 | bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, 70 | ckh_keycomp_t *keycomp); 71 | void ckh_delete(ckh_t *ckh); 72 | size_t ckh_count(ckh_t *ckh); 73 | bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); 74 | bool ckh_insert(ckh_t *ckh, const void *key, const void *data); 75 | bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key, 76 | void **data); 77 | bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); 78 | void ckh_string_hash(const void *key, size_t r_hash[2]); 79 | bool ckh_string_keycomp(const void *k1, const void *k2); 80 | void ckh_pointer_hash(const void *key, size_t r_hash[2]); 81 | bool ckh_pointer_keycomp(const void *k1, const void *k2); 82 | 83 | #endif /* JEMALLOC_H_EXTERNS */ 84 | /******************************************************************************/ 85 | #ifdef JEMALLOC_H_INLINES 86 | 87 | #endif /* JEMALLOC_H_INLINES */ 88 | /******************************************************************************/ 89 | -------------------------------------------------------------------------------- /include/jemalloc/internal/ctl.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | typedef struct ctl_node_s ctl_node_t; 5 | typedef struct ctl_named_node_s ctl_named_node_t; 6 | typedef struct ctl_indexed_node_s ctl_indexed_node_t; 7 | typedef struct ctl_arena_stats_s ctl_arena_stats_t; 8 | typedef struct ctl_stats_s ctl_stats_t; 9 | 10 | #endif /* JEMALLOC_H_TYPES */ 11 | /******************************************************************************/ 12 | #ifdef JEMALLOC_H_STRUCTS 13 | 14 | struct ctl_node_s { 15 | bool named; 16 | }; 17 | 18 | struct ctl_named_node_s { 19 | struct ctl_node_s node; 20 | const char *name; 21 | /* If (nchildren == 0), this is a terminal node. */ 22 | unsigned nchildren; 23 | const ctl_node_t *children; 24 | int (*ctl)(const size_t *, size_t, void *, size_t *, 25 | void *, size_t); 26 | }; 27 | 28 | struct ctl_indexed_node_s { 29 | struct ctl_node_s node; 30 | const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); 31 | }; 32 | 33 | struct ctl_arena_stats_s { 34 | bool initialized; 35 | unsigned nthreads; 36 | const char *dss; 37 | size_t pactive; 38 | size_t pdirty; 39 | arena_stats_t astats; 40 | 41 | /* Aggregate stats for small size classes, based on bin stats. */ 42 | size_t allocated_small; 43 | uint64_t nmalloc_small; 44 | uint64_t ndalloc_small; 45 | uint64_t nrequests_small; 46 | 47 | malloc_bin_stats_t bstats[NBINS]; 48 | malloc_large_stats_t *lstats; /* nlclasses elements. */ 49 | }; 50 | 51 | struct ctl_stats_s { 52 | size_t allocated; 53 | size_t active; 54 | size_t mapped; 55 | struct { 56 | size_t current; /* stats_chunks.curchunks */ 57 | uint64_t total; /* stats_chunks.nchunks */ 58 | size_t high; /* stats_chunks.highchunks */ 59 | } chunks; 60 | struct { 61 | size_t allocated; /* huge_allocated */ 62 | uint64_t nmalloc; /* huge_nmalloc */ 63 | uint64_t ndalloc; /* huge_ndalloc */ 64 | } huge; 65 | unsigned narenas; 66 | ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ 67 | }; 68 | 69 | #endif /* JEMALLOC_H_STRUCTS */ 70 | /******************************************************************************/ 71 | #ifdef JEMALLOC_H_EXTERNS 72 | 73 | int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, 74 | size_t newlen); 75 | int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); 76 | 77 | int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 78 | void *newp, size_t newlen); 79 | bool ctl_boot(void); 80 | void ctl_prefork(void); 81 | void ctl_postfork_parent(void); 82 | void ctl_postfork_child(void); 83 | 84 | #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ 85 | if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ 86 | != 0) { \ 87 | malloc_printf( \ 88 | ": Failure in xmallctl(\"%s\", ...)\n", \ 89 | name); \ 90 | abort(); \ 91 | } \ 92 | } while (0) 93 | 94 | #define xmallctlnametomib(name, mibp, miblenp) do { \ 95 | if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ 96 | malloc_printf(": Failure in " \ 97 | "xmallctlnametomib(\"%s\", ...)\n", name); \ 98 | abort(); \ 99 | } \ 100 | } while (0) 101 | 102 | #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ 103 | if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ 104 | newlen) != 0) { \ 105 | malloc_write( \ 106 | ": Failure in xmallctlbymib()\n"); \ 107 | abort(); \ 108 | } \ 109 | } while (0) 110 | 111 | #endif /* JEMALLOC_H_EXTERNS */ 112 | /******************************************************************************/ 113 | #ifdef JEMALLOC_H_INLINES 114 | 115 | #endif /* JEMALLOC_H_INLINES */ 116 | /******************************************************************************/ 117 | 118 | -------------------------------------------------------------------------------- /include/jemalloc/internal/extent.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | typedef struct extent_node_s extent_node_t; 5 | 6 | #endif /* JEMALLOC_H_TYPES */ 7 | /******************************************************************************/ 8 | #ifdef JEMALLOC_H_STRUCTS 9 | 10 | /* Tree of extents. */ 11 | struct extent_node_s { 12 | /* Linkage for the size/address-ordered tree. */ 13 | rb_node(extent_node_t) link_szad; 14 | 15 | /* Linkage for the address-ordered tree. */ 16 | rb_node(extent_node_t) link_ad; 17 | 18 | /* Profile counters, used for huge objects. */ 19 | prof_ctx_t *prof_ctx; 20 | 21 | /* Pointer to the extent that this tree node is responsible for. */ 22 | void *addr; 23 | 24 | /* Total region size. */ 25 | size_t size; 26 | 27 | /* True if zero-filled; used by chunk recycling code. */ 28 | bool zeroed; 29 | }; 30 | typedef rb_tree(extent_node_t) extent_tree_t; 31 | 32 | #endif /* JEMALLOC_H_STRUCTS */ 33 | /******************************************************************************/ 34 | #ifdef JEMALLOC_H_EXTERNS 35 | 36 | rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) 37 | 38 | rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) 39 | 40 | #endif /* JEMALLOC_H_EXTERNS */ 41 | /******************************************************************************/ 42 | #ifdef JEMALLOC_H_INLINES 43 | 44 | #endif /* JEMALLOC_H_INLINES */ 45 | /******************************************************************************/ 46 | 47 | -------------------------------------------------------------------------------- /include/jemalloc/internal/hash.h: -------------------------------------------------------------------------------- 1 | /* 2 | * The following hash function is based on MurmurHash3, placed into the public 3 | * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for 4 | * details. 5 | */ 6 | /******************************************************************************/ 7 | #ifdef JEMALLOC_H_TYPES 8 | 9 | #endif /* JEMALLOC_H_TYPES */ 10 | /******************************************************************************/ 11 | #ifdef JEMALLOC_H_STRUCTS 12 | 13 | #endif /* JEMALLOC_H_STRUCTS */ 14 | /******************************************************************************/ 15 | #ifdef JEMALLOC_H_EXTERNS 16 | 17 | #endif /* JEMALLOC_H_EXTERNS */ 18 | /******************************************************************************/ 19 | #ifdef JEMALLOC_H_INLINES 20 | 21 | #ifndef JEMALLOC_ENABLE_INLINE 22 | void hash(const void *key, size_t len, const uint32_t seed, 23 | size_t r_hash[2]); 24 | #endif 25 | 26 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) 27 | /******************************************************************************/ 28 | /* Internal implementation. */ 29 | JEMALLOC_INLINE uint32_t 30 | hash_rotl_32(uint32_t x, int8_t r) 31 | { 32 | 33 | return (x << r) | (x >> (32 - r)); 34 | } 35 | 36 | JEMALLOC_INLINE uint64_t 37 | hash_rotl_64(uint64_t x, int8_t r) 38 | { 39 | return (x << r) | (x >> (64 - r)); 40 | } 41 | 42 | JEMALLOC_INLINE uint32_t 43 | hash_get_block_32(const uint32_t *p, int i) 44 | { 45 | 46 | return p[i]; 47 | } 48 | 49 | JEMALLOC_INLINE uint64_t 50 | hash_get_block_64(const uint64_t *p, int i) 51 | { 52 | 53 | return p[i]; 54 | } 55 | 56 | JEMALLOC_INLINE uint32_t 57 | hash_fmix_32(uint32_t h) 58 | { 59 | 60 | h ^= h >> 16; 61 | h *= 0x85ebca6b; 62 | h ^= h >> 13; 63 | h *= 0xc2b2ae35; 64 | h ^= h >> 16; 65 | 66 | return h; 67 | } 68 | 69 | JEMALLOC_INLINE uint64_t 70 | hash_fmix_64(uint64_t k) 71 | { 72 | 73 | k ^= k >> 33; 74 | k *= QU(0xff51afd7ed558ccdLLU); 75 | k ^= k >> 33; 76 | k *= QU(0xc4ceb9fe1a85ec53LLU); 77 | k ^= k >> 33; 78 | 79 | return k; 80 | } 81 | 82 | JEMALLOC_INLINE uint32_t 83 | hash_x86_32(const void *key, int len, uint32_t seed) 84 | { 85 | const uint8_t *data = (const uint8_t *) key; 86 | const int nblocks = len / 4; 87 | 88 | uint32_t h1 = seed; 89 | 90 | const uint32_t c1 = 0xcc9e2d51; 91 | const uint32_t c2 = 0x1b873593; 92 | 93 | /* body */ 94 | { 95 | const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); 96 | int i; 97 | 98 | for (i = -nblocks; i; i++) { 99 | uint32_t k1 = hash_get_block_32(blocks, i); 100 | 101 | k1 *= c1; 102 | k1 = hash_rotl_32(k1, 15); 103 | k1 *= c2; 104 | 105 | h1 ^= k1; 106 | h1 = hash_rotl_32(h1, 13); 107 | h1 = h1*5 + 0xe6546b64; 108 | } 109 | } 110 | 111 | /* tail */ 112 | { 113 | const uint8_t *tail = (const uint8_t *) (data + nblocks*4); 114 | 115 | uint32_t k1 = 0; 116 | 117 | switch (len & 3) { 118 | case 3: k1 ^= tail[2] << 16; 119 | case 2: k1 ^= tail[1] << 8; 120 | case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); 121 | k1 *= c2; h1 ^= k1; 122 | } 123 | } 124 | 125 | /* finalization */ 126 | h1 ^= len; 127 | 128 | h1 = hash_fmix_32(h1); 129 | 130 | return h1; 131 | } 132 | 133 | UNUSED JEMALLOC_INLINE void 134 | hash_x86_128(const void *key, const int len, uint32_t seed, 135 | uint64_t r_out[2]) 136 | { 137 | const uint8_t * data = (const uint8_t *) key; 138 | const int nblocks = len / 16; 139 | 140 | uint32_t h1 = seed; 141 | uint32_t h2 = seed; 142 | uint32_t h3 = seed; 143 | uint32_t h4 = seed; 144 | 145 | const uint32_t c1 = 0x239b961b; 146 | const uint32_t c2 = 0xab0e9789; 147 | const uint32_t c3 = 0x38b34ae5; 148 | const uint32_t c4 = 0xa1e38b93; 149 | 150 | /* body */ 151 | { 152 | const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); 153 | int i; 154 | 155 | for (i = -nblocks; i; i++) { 156 | uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); 157 | uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); 158 | uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); 159 | uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); 160 | 161 | k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; 162 | 163 | h1 = hash_rotl_32(h1, 19); h1 += h2; 164 | h1 = h1*5 + 0x561ccd1b; 165 | 166 | k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; 167 | 168 | h2 = hash_rotl_32(h2, 17); h2 += h3; 169 | h2 = h2*5 + 0x0bcaa747; 170 | 171 | k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; 172 | 173 | h3 = hash_rotl_32(h3, 15); h3 += h4; 174 | h3 = h3*5 + 0x96cd1c35; 175 | 176 | k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; 177 | 178 | h4 = hash_rotl_32(h4, 13); h4 += h1; 179 | h4 = h4*5 + 0x32ac3b17; 180 | } 181 | } 182 | 183 | /* tail */ 184 | { 185 | const uint8_t *tail = (const uint8_t *) (data + nblocks*16); 186 | uint32_t k1 = 0; 187 | uint32_t k2 = 0; 188 | uint32_t k3 = 0; 189 | uint32_t k4 = 0; 190 | 191 | switch (len & 15) { 192 | case 15: k4 ^= tail[14] << 16; 193 | case 14: k4 ^= tail[13] << 8; 194 | case 13: k4 ^= tail[12] << 0; 195 | k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; 196 | 197 | case 12: k3 ^= tail[11] << 24; 198 | case 11: k3 ^= tail[10] << 16; 199 | case 10: k3 ^= tail[ 9] << 8; 200 | case 9: k3 ^= tail[ 8] << 0; 201 | k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; 202 | 203 | case 8: k2 ^= tail[ 7] << 24; 204 | case 7: k2 ^= tail[ 6] << 16; 205 | case 6: k2 ^= tail[ 5] << 8; 206 | case 5: k2 ^= tail[ 4] << 0; 207 | k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; 208 | 209 | case 4: k1 ^= tail[ 3] << 24; 210 | case 3: k1 ^= tail[ 2] << 16; 211 | case 2: k1 ^= tail[ 1] << 8; 212 | case 1: k1 ^= tail[ 0] << 0; 213 | k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; 214 | } 215 | } 216 | 217 | /* finalization */ 218 | h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; 219 | 220 | h1 += h2; h1 += h3; h1 += h4; 221 | h2 += h1; h3 += h1; h4 += h1; 222 | 223 | h1 = hash_fmix_32(h1); 224 | h2 = hash_fmix_32(h2); 225 | h3 = hash_fmix_32(h3); 226 | h4 = hash_fmix_32(h4); 227 | 228 | h1 += h2; h1 += h3; h1 += h4; 229 | h2 += h1; h3 += h1; h4 += h1; 230 | 231 | r_out[0] = (((uint64_t) h2) << 32) | h1; 232 | r_out[1] = (((uint64_t) h4) << 32) | h3; 233 | } 234 | 235 | UNUSED JEMALLOC_INLINE void 236 | hash_x64_128(const void *key, const int len, const uint32_t seed, 237 | uint64_t r_out[2]) 238 | { 239 | const uint8_t *data = (const uint8_t *) key; 240 | const int nblocks = len / 16; 241 | 242 | uint64_t h1 = seed; 243 | uint64_t h2 = seed; 244 | 245 | const uint64_t c1 = QU(0x87c37b91114253d5LLU); 246 | const uint64_t c2 = QU(0x4cf5ad432745937fLLU); 247 | 248 | /* body */ 249 | { 250 | const uint64_t *blocks = (const uint64_t *) (data); 251 | int i; 252 | 253 | for (i = 0; i < nblocks; i++) { 254 | uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); 255 | uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); 256 | 257 | k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; 258 | 259 | h1 = hash_rotl_64(h1, 27); h1 += h2; 260 | h1 = h1*5 + 0x52dce729; 261 | 262 | k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; 263 | 264 | h2 = hash_rotl_64(h2, 31); h2 += h1; 265 | h2 = h2*5 + 0x38495ab5; 266 | } 267 | } 268 | 269 | /* tail */ 270 | { 271 | const uint8_t *tail = (const uint8_t*)(data + nblocks*16); 272 | uint64_t k1 = 0; 273 | uint64_t k2 = 0; 274 | 275 | switch (len & 15) { 276 | case 15: k2 ^= ((uint64_t)(tail[14])) << 48; 277 | case 14: k2 ^= ((uint64_t)(tail[13])) << 40; 278 | case 13: k2 ^= ((uint64_t)(tail[12])) << 32; 279 | case 12: k2 ^= ((uint64_t)(tail[11])) << 24; 280 | case 11: k2 ^= ((uint64_t)(tail[10])) << 16; 281 | case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; 282 | case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; 283 | k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; 284 | 285 | case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; 286 | case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; 287 | case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; 288 | case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; 289 | case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; 290 | case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; 291 | case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; 292 | case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; 293 | k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; 294 | } 295 | } 296 | 297 | /* finalization */ 298 | h1 ^= len; h2 ^= len; 299 | 300 | h1 += h2; 301 | h2 += h1; 302 | 303 | h1 = hash_fmix_64(h1); 304 | h2 = hash_fmix_64(h2); 305 | 306 | h1 += h2; 307 | h2 += h1; 308 | 309 | r_out[0] = h1; 310 | r_out[1] = h2; 311 | } 312 | 313 | 314 | /******************************************************************************/ 315 | /* API. */ 316 | JEMALLOC_INLINE void 317 | hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) 318 | { 319 | #if (LG_SIZEOF_PTR == 3) 320 | hash_x64_128(key, len, seed, (uint64_t *)r_hash); 321 | #else 322 | uint64_t hashes[2]; 323 | hash_x86_128(key, len, seed, hashes); 324 | r_hash[0] = (size_t)hashes[0]; 325 | r_hash[1] = (size_t)hashes[1]; 326 | #endif 327 | } 328 | #endif 329 | 330 | #endif /* JEMALLOC_H_INLINES */ 331 | /******************************************************************************/ 332 | -------------------------------------------------------------------------------- /include/jemalloc/internal/huge.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | #endif /* JEMALLOC_H_TYPES */ 5 | /******************************************************************************/ 6 | #ifdef JEMALLOC_H_STRUCTS 7 | 8 | #endif /* JEMALLOC_H_STRUCTS */ 9 | /******************************************************************************/ 10 | #ifdef JEMALLOC_H_EXTERNS 11 | 12 | /* Huge allocation statistics. */ 13 | extern uint64_t huge_nmalloc; 14 | extern uint64_t huge_ndalloc; 15 | extern size_t huge_allocated; 16 | 17 | /* Protects chunk-related data structures. */ 18 | extern malloc_mutex_t huge_mtx; 19 | 20 | void *huge_malloc(size_t size, bool zero); 21 | void *huge_palloc(size_t size, size_t alignment, bool zero); 22 | void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, 23 | size_t extra); 24 | void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, 25 | size_t alignment, bool zero, bool try_tcache_dalloc); 26 | void huge_dalloc(void *ptr, bool unmap); 27 | size_t huge_salloc(const void *ptr); 28 | prof_ctx_t *huge_prof_ctx_get(const void *ptr); 29 | void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); 30 | bool huge_boot(void); 31 | void huge_prefork(void); 32 | void huge_postfork_parent(void); 33 | void huge_postfork_child(void); 34 | 35 | #endif /* JEMALLOC_H_EXTERNS */ 36 | /******************************************************************************/ 37 | #ifdef JEMALLOC_H_INLINES 38 | 39 | #endif /* JEMALLOC_H_INLINES */ 40 | /******************************************************************************/ 41 | -------------------------------------------------------------------------------- /include/jemalloc/internal/mb.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | #endif /* JEMALLOC_H_TYPES */ 5 | /******************************************************************************/ 6 | #ifdef JEMALLOC_H_STRUCTS 7 | 8 | #endif /* JEMALLOC_H_STRUCTS */ 9 | /******************************************************************************/ 10 | #ifdef JEMALLOC_H_EXTERNS 11 | 12 | #endif /* JEMALLOC_H_EXTERNS */ 13 | /******************************************************************************/ 14 | #ifdef JEMALLOC_H_INLINES 15 | 16 | #ifndef JEMALLOC_ENABLE_INLINE 17 | void mb_write(void); 18 | #endif 19 | 20 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) 21 | #ifdef __i386__ 22 | /* 23 | * According to the Intel Architecture Software Developer's Manual, current 24 | * processors execute instructions in order from the perspective of other 25 | * processors in a multiprocessor system, but 1) Intel reserves the right to 26 | * change that, and 2) the compiler's optimizer could re-order instructions if 27 | * there weren't some form of barrier. Therefore, even if running on an 28 | * architecture that does not need memory barriers (everything through at least 29 | * i686), an "optimizer barrier" is necessary. 30 | */ 31 | JEMALLOC_INLINE void 32 | mb_write(void) 33 | { 34 | 35 | # if 0 36 | /* This is a true memory barrier. */ 37 | asm volatile ("pusha;" 38 | "xor %%eax,%%eax;" 39 | "cpuid;" 40 | "popa;" 41 | : /* Outputs. */ 42 | : /* Inputs. */ 43 | : "memory" /* Clobbers. */ 44 | ); 45 | #else 46 | /* 47 | * This is hopefully enough to keep the compiler from reordering 48 | * instructions around this one. 49 | */ 50 | asm volatile ("nop;" 51 | : /* Outputs. */ 52 | : /* Inputs. */ 53 | : "memory" /* Clobbers. */ 54 | ); 55 | #endif 56 | } 57 | #elif (defined(__amd64__) || defined(__x86_64__)) 58 | JEMALLOC_INLINE void 59 | mb_write(void) 60 | { 61 | 62 | asm volatile ("sfence" 63 | : /* Outputs. */ 64 | : /* Inputs. */ 65 | : "memory" /* Clobbers. */ 66 | ); 67 | } 68 | #elif defined(__powerpc__) 69 | JEMALLOC_INLINE void 70 | mb_write(void) 71 | { 72 | 73 | asm volatile ("eieio" 74 | : /* Outputs. */ 75 | : /* Inputs. */ 76 | : "memory" /* Clobbers. */ 77 | ); 78 | } 79 | #elif defined(__sparc64__) 80 | JEMALLOC_INLINE void 81 | mb_write(void) 82 | { 83 | 84 | asm volatile ("membar #StoreStore" 85 | : /* Outputs. */ 86 | : /* Inputs. */ 87 | : "memory" /* Clobbers. */ 88 | ); 89 | } 90 | #elif defined(__tile__) 91 | JEMALLOC_INLINE void 92 | mb_write(void) 93 | { 94 | 95 | __sync_synchronize(); 96 | } 97 | #else 98 | /* 99 | * This is much slower than a simple memory barrier, but the semantics of mutex 100 | * unlock make this work. 101 | */ 102 | JEMALLOC_INLINE void 103 | mb_write(void) 104 | { 105 | malloc_mutex_t mtx; 106 | 107 | malloc_mutex_init(&mtx); 108 | malloc_mutex_lock(&mtx); 109 | malloc_mutex_unlock(&mtx); 110 | } 111 | #endif 112 | #endif 113 | 114 | #endif /* JEMALLOC_H_INLINES */ 115 | /******************************************************************************/ 116 | -------------------------------------------------------------------------------- /include/jemalloc/internal/mutex.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | typedef struct malloc_mutex_s malloc_mutex_t; 5 | 6 | #ifdef _WIN32 7 | # define MALLOC_MUTEX_INITIALIZER 8 | #elif (defined(JEMALLOC_OSSPIN)) 9 | # define MALLOC_MUTEX_INITIALIZER {0} 10 | #elif (defined(JEMALLOC_MUTEX_INIT_CB)) 11 | # define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} 12 | #else 13 | # if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \ 14 | defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) 15 | # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP 16 | # define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} 17 | # else 18 | # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT 19 | # define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} 20 | # endif 21 | #endif 22 | 23 | #endif /* JEMALLOC_H_TYPES */ 24 | /******************************************************************************/ 25 | #ifdef JEMALLOC_H_STRUCTS 26 | 27 | struct malloc_mutex_s { 28 | #ifdef _WIN32 29 | CRITICAL_SECTION lock; 30 | #elif (defined(JEMALLOC_OSSPIN)) 31 | OSSpinLock lock; 32 | #elif (defined(JEMALLOC_MUTEX_INIT_CB)) 33 | pthread_mutex_t lock; 34 | malloc_mutex_t *postponed_next; 35 | #else 36 | pthread_mutex_t lock; 37 | #endif 38 | }; 39 | 40 | #endif /* JEMALLOC_H_STRUCTS */ 41 | /******************************************************************************/ 42 | #ifdef JEMALLOC_H_EXTERNS 43 | 44 | #ifdef JEMALLOC_LAZY_LOCK 45 | extern bool isthreaded; 46 | #else 47 | # undef isthreaded /* Undo private_namespace.h definition. */ 48 | # define isthreaded true 49 | #endif 50 | 51 | bool malloc_mutex_init(malloc_mutex_t *mutex); 52 | void malloc_mutex_prefork(malloc_mutex_t *mutex); 53 | void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); 54 | void malloc_mutex_postfork_child(malloc_mutex_t *mutex); 55 | bool mutex_boot(void); 56 | 57 | #endif /* JEMALLOC_H_EXTERNS */ 58 | /******************************************************************************/ 59 | #ifdef JEMALLOC_H_INLINES 60 | 61 | #ifndef JEMALLOC_ENABLE_INLINE 62 | void malloc_mutex_lock(malloc_mutex_t *mutex); 63 | void malloc_mutex_unlock(malloc_mutex_t *mutex); 64 | #endif 65 | 66 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) 67 | JEMALLOC_INLINE void 68 | malloc_mutex_lock(malloc_mutex_t *mutex) 69 | { 70 | 71 | if (isthreaded) { 72 | #ifdef _WIN32 73 | EnterCriticalSection(&mutex->lock); 74 | #elif (defined(JEMALLOC_OSSPIN)) 75 | OSSpinLockLock(&mutex->lock); 76 | #else 77 | pthread_mutex_lock(&mutex->lock); 78 | #endif 79 | } 80 | } 81 | 82 | JEMALLOC_INLINE void 83 | malloc_mutex_unlock(malloc_mutex_t *mutex) 84 | { 85 | 86 | if (isthreaded) { 87 | #ifdef _WIN32 88 | LeaveCriticalSection(&mutex->lock); 89 | #elif (defined(JEMALLOC_OSSPIN)) 90 | OSSpinLockUnlock(&mutex->lock); 91 | #else 92 | pthread_mutex_unlock(&mutex->lock); 93 | #endif 94 | } 95 | } 96 | #endif 97 | 98 | #endif /* JEMALLOC_H_INLINES */ 99 | /******************************************************************************/ 100 | -------------------------------------------------------------------------------- /include/jemalloc/internal/prng.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | /* 5 | * Simple linear congruential pseudo-random number generator: 6 | * 7 | * prng(y) = (a*x + c) % m 8 | * 9 | * where the following constants ensure maximal period: 10 | * 11 | * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. 12 | * c == Odd number (relatively prime to 2^n). 13 | * m == 2^32 14 | * 15 | * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. 16 | * 17 | * This choice of m has the disadvantage that the quality of the bits is 18 | * proportional to bit position. For example. the lowest bit has a cycle of 2, 19 | * the next has a cycle of 4, etc. For this reason, we prefer to use the upper 20 | * bits. 21 | * 22 | * Macro parameters: 23 | * uint32_t r : Result. 24 | * unsigned lg_range : (0..32], number of least significant bits to return. 25 | * uint32_t state : Seed value. 26 | * const uint32_t a, c : See above discussion. 27 | */ 28 | #define prng32(r, lg_range, state, a, c) do { \ 29 | assert(lg_range > 0); \ 30 | assert(lg_range <= 32); \ 31 | \ 32 | r = (state * (a)) + (c); \ 33 | state = r; \ 34 | r >>= (32 - lg_range); \ 35 | } while (false) 36 | 37 | /* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ 38 | #define prng64(r, lg_range, state, a, c) do { \ 39 | assert(lg_range > 0); \ 40 | assert(lg_range <= 64); \ 41 | \ 42 | r = (state * (a)) + (c); \ 43 | state = r; \ 44 | r >>= (64 - lg_range); \ 45 | } while (false) 46 | 47 | #endif /* JEMALLOC_H_TYPES */ 48 | /******************************************************************************/ 49 | #ifdef JEMALLOC_H_STRUCTS 50 | 51 | #endif /* JEMALLOC_H_STRUCTS */ 52 | /******************************************************************************/ 53 | #ifdef JEMALLOC_H_EXTERNS 54 | 55 | #endif /* JEMALLOC_H_EXTERNS */ 56 | /******************************************************************************/ 57 | #ifdef JEMALLOC_H_INLINES 58 | 59 | #endif /* JEMALLOC_H_INLINES */ 60 | /******************************************************************************/ 61 | -------------------------------------------------------------------------------- /include/jemalloc/internal/ql.h: -------------------------------------------------------------------------------- 1 | /* 2 | * List definitions. 3 | */ 4 | #define ql_head(a_type) \ 5 | struct { \ 6 | a_type *qlh_first; \ 7 | } 8 | 9 | #define ql_head_initializer(a_head) {NULL} 10 | 11 | #define ql_elm(a_type) qr(a_type) 12 | 13 | /* List functions. */ 14 | #define ql_new(a_head) do { \ 15 | (a_head)->qlh_first = NULL; \ 16 | } while (0) 17 | 18 | #define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) 19 | 20 | #define ql_first(a_head) ((a_head)->qlh_first) 21 | 22 | #define ql_last(a_head, a_field) \ 23 | ((ql_first(a_head) != NULL) \ 24 | ? qr_prev(ql_first(a_head), a_field) : NULL) 25 | 26 | #define ql_next(a_head, a_elm, a_field) \ 27 | ((ql_last(a_head, a_field) != (a_elm)) \ 28 | ? qr_next((a_elm), a_field) : NULL) 29 | 30 | #define ql_prev(a_head, a_elm, a_field) \ 31 | ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ 32 | : NULL) 33 | 34 | #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ 35 | qr_before_insert((a_qlelm), (a_elm), a_field); \ 36 | if (ql_first(a_head) == (a_qlelm)) { \ 37 | ql_first(a_head) = (a_elm); \ 38 | } \ 39 | } while (0) 40 | 41 | #define ql_after_insert(a_qlelm, a_elm, a_field) \ 42 | qr_after_insert((a_qlelm), (a_elm), a_field) 43 | 44 | #define ql_head_insert(a_head, a_elm, a_field) do { \ 45 | if (ql_first(a_head) != NULL) { \ 46 | qr_before_insert(ql_first(a_head), (a_elm), a_field); \ 47 | } \ 48 | ql_first(a_head) = (a_elm); \ 49 | } while (0) 50 | 51 | #define ql_tail_insert(a_head, a_elm, a_field) do { \ 52 | if (ql_first(a_head) != NULL) { \ 53 | qr_before_insert(ql_first(a_head), (a_elm), a_field); \ 54 | } \ 55 | ql_first(a_head) = qr_next((a_elm), a_field); \ 56 | } while (0) 57 | 58 | #define ql_remove(a_head, a_elm, a_field) do { \ 59 | if (ql_first(a_head) == (a_elm)) { \ 60 | ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ 61 | } \ 62 | if (ql_first(a_head) != (a_elm)) { \ 63 | qr_remove((a_elm), a_field); \ 64 | } else { \ 65 | ql_first(a_head) = NULL; \ 66 | } \ 67 | } while (0) 68 | 69 | #define ql_head_remove(a_head, a_type, a_field) do { \ 70 | a_type *t = ql_first(a_head); \ 71 | ql_remove((a_head), t, a_field); \ 72 | } while (0) 73 | 74 | #define ql_tail_remove(a_head, a_type, a_field) do { \ 75 | a_type *t = ql_last(a_head, a_field); \ 76 | ql_remove((a_head), t, a_field); \ 77 | } while (0) 78 | 79 | #define ql_foreach(a_var, a_head, a_field) \ 80 | qr_foreach((a_var), ql_first(a_head), a_field) 81 | 82 | #define ql_reverse_foreach(a_var, a_head, a_field) \ 83 | qr_reverse_foreach((a_var), ql_first(a_head), a_field) 84 | -------------------------------------------------------------------------------- /include/jemalloc/internal/qr.h: -------------------------------------------------------------------------------- 1 | /* Ring definitions. */ 2 | #define qr(a_type) \ 3 | struct { \ 4 | a_type *qre_next; \ 5 | a_type *qre_prev; \ 6 | } 7 | 8 | /* Ring functions. */ 9 | #define qr_new(a_qr, a_field) do { \ 10 | (a_qr)->a_field.qre_next = (a_qr); \ 11 | (a_qr)->a_field.qre_prev = (a_qr); \ 12 | } while (0) 13 | 14 | #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) 15 | 16 | #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) 17 | 18 | #define qr_before_insert(a_qrelm, a_qr, a_field) do { \ 19 | (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ 20 | (a_qr)->a_field.qre_next = (a_qrelm); \ 21 | (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ 22 | (a_qrelm)->a_field.qre_prev = (a_qr); \ 23 | } while (0) 24 | 25 | #define qr_after_insert(a_qrelm, a_qr, a_field) \ 26 | do \ 27 | { \ 28 | (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ 29 | (a_qr)->a_field.qre_prev = (a_qrelm); \ 30 | (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ 31 | (a_qrelm)->a_field.qre_next = (a_qr); \ 32 | } while (0) 33 | 34 | #define qr_meld(a_qr_a, a_qr_b, a_field) do { \ 35 | void *t; \ 36 | (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ 37 | (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ 38 | t = (a_qr_a)->a_field.qre_prev; \ 39 | (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ 40 | (a_qr_b)->a_field.qre_prev = t; \ 41 | } while (0) 42 | 43 | /* qr_meld() and qr_split() are functionally equivalent, so there's no need to 44 | * have two copies of the code. */ 45 | #define qr_split(a_qr_a, a_qr_b, a_field) \ 46 | qr_meld((a_qr_a), (a_qr_b), a_field) 47 | 48 | #define qr_remove(a_qr, a_field) do { \ 49 | (a_qr)->a_field.qre_prev->a_field.qre_next \ 50 | = (a_qr)->a_field.qre_next; \ 51 | (a_qr)->a_field.qre_next->a_field.qre_prev \ 52 | = (a_qr)->a_field.qre_prev; \ 53 | (a_qr)->a_field.qre_next = (a_qr); \ 54 | (a_qr)->a_field.qre_prev = (a_qr); \ 55 | } while (0) 56 | 57 | #define qr_foreach(var, a_qr, a_field) \ 58 | for ((var) = (a_qr); \ 59 | (var) != NULL; \ 60 | (var) = (((var)->a_field.qre_next != (a_qr)) \ 61 | ? (var)->a_field.qre_next : NULL)) 62 | 63 | #define qr_reverse_foreach(var, a_qr, a_field) \ 64 | for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ 65 | (var) != NULL; \ 66 | (var) = (((var) != (a_qr)) \ 67 | ? (var)->a_field.qre_prev : NULL)) 68 | -------------------------------------------------------------------------------- /include/jemalloc/internal/quarantine.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | typedef struct quarantine_obj_s quarantine_obj_t; 5 | typedef struct quarantine_s quarantine_t; 6 | 7 | /* Default per thread quarantine size if valgrind is enabled. */ 8 | #define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) 9 | 10 | #endif /* JEMALLOC_H_TYPES */ 11 | /******************************************************************************/ 12 | #ifdef JEMALLOC_H_STRUCTS 13 | 14 | struct quarantine_obj_s { 15 | void *ptr; 16 | size_t usize; 17 | }; 18 | 19 | struct quarantine_s { 20 | size_t curbytes; 21 | size_t curobjs; 22 | size_t first; 23 | #define LG_MAXOBJS_INIT 10 24 | size_t lg_maxobjs; 25 | quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ 26 | }; 27 | 28 | #endif /* JEMALLOC_H_STRUCTS */ 29 | /******************************************************************************/ 30 | #ifdef JEMALLOC_H_EXTERNS 31 | 32 | quarantine_t *quarantine_init(size_t lg_maxobjs); 33 | void quarantine(void *ptr); 34 | void quarantine_cleanup(void *arg); 35 | bool quarantine_boot(void); 36 | 37 | #endif /* JEMALLOC_H_EXTERNS */ 38 | /******************************************************************************/ 39 | #ifdef JEMALLOC_H_INLINES 40 | 41 | #ifndef JEMALLOC_ENABLE_INLINE 42 | malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *) 43 | 44 | void quarantine_alloc_hook(void); 45 | #endif 46 | 47 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) 48 | malloc_tsd_externs(quarantine, quarantine_t *) 49 | malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL, 50 | quarantine_cleanup) 51 | 52 | JEMALLOC_ALWAYS_INLINE void 53 | quarantine_alloc_hook(void) 54 | { 55 | quarantine_t *quarantine; 56 | 57 | assert(config_fill && opt_quarantine); 58 | 59 | quarantine = *quarantine_tsd_get(); 60 | if (quarantine == NULL) 61 | quarantine_init(LG_MAXOBJS_INIT); 62 | } 63 | #endif 64 | 65 | #endif /* JEMALLOC_H_INLINES */ 66 | /******************************************************************************/ 67 | 68 | -------------------------------------------------------------------------------- /include/jemalloc/internal/rtree.h: -------------------------------------------------------------------------------- 1 | /* 2 | * This radix tree implementation is tailored to the singular purpose of 3 | * tracking which chunks are currently owned by jemalloc. This functionality 4 | * is mandatory for OS X, where jemalloc must be able to respond to object 5 | * ownership queries. 6 | * 7 | ******************************************************************************* 8 | */ 9 | #ifdef JEMALLOC_H_TYPES 10 | 11 | typedef struct rtree_s rtree_t; 12 | 13 | /* 14 | * Size of each radix tree node (must be a power of 2). This impacts tree 15 | * depth. 16 | */ 17 | #if (LG_SIZEOF_PTR == 2) 18 | # define RTREE_NODESIZE (1U << 14) 19 | #else 20 | # define RTREE_NODESIZE CACHELINE 21 | #endif 22 | 23 | #endif /* JEMALLOC_H_TYPES */ 24 | /******************************************************************************/ 25 | #ifdef JEMALLOC_H_STRUCTS 26 | 27 | struct rtree_s { 28 | malloc_mutex_t mutex; 29 | void **root; 30 | unsigned height; 31 | unsigned level2bits[1]; /* Dynamically sized. */ 32 | }; 33 | 34 | #endif /* JEMALLOC_H_STRUCTS */ 35 | /******************************************************************************/ 36 | #ifdef JEMALLOC_H_EXTERNS 37 | 38 | rtree_t *rtree_new(unsigned bits); 39 | void rtree_prefork(rtree_t *rtree); 40 | void rtree_postfork_parent(rtree_t *rtree); 41 | void rtree_postfork_child(rtree_t *rtree); 42 | 43 | #endif /* JEMALLOC_H_EXTERNS */ 44 | /******************************************************************************/ 45 | #ifdef JEMALLOC_H_INLINES 46 | 47 | #ifndef JEMALLOC_ENABLE_INLINE 48 | #ifndef JEMALLOC_DEBUG 49 | void *rtree_get_locked(rtree_t *rtree, uintptr_t key); 50 | #endif 51 | void *rtree_get(rtree_t *rtree, uintptr_t key); 52 | bool rtree_set(rtree_t *rtree, uintptr_t key, void *val); 53 | #endif 54 | 55 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) 56 | #define RTREE_GET_GENERATE(f) \ 57 | /* The least significant bits of the key are ignored. */ \ 58 | JEMALLOC_INLINE void * \ 59 | f(rtree_t *rtree, uintptr_t key) \ 60 | { \ 61 | void *ret; \ 62 | uintptr_t subkey; \ 63 | unsigned i, lshift, height, bits; \ 64 | void **node, **child; \ 65 | \ 66 | RTREE_LOCK(&rtree->mutex); \ 67 | for (i = lshift = 0, height = rtree->height, node = rtree->root;\ 68 | i < height - 1; \ 69 | i++, lshift += bits, node = child) { \ 70 | bits = rtree->level2bits[i]; \ 71 | subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \ 72 | 3)) - bits); \ 73 | child = (void**)node[subkey]; \ 74 | if (child == NULL) { \ 75 | RTREE_UNLOCK(&rtree->mutex); \ 76 | return (NULL); \ 77 | } \ 78 | } \ 79 | \ 80 | /* \ 81 | * node is a leaf, so it contains values rather than node \ 82 | * pointers. \ 83 | */ \ 84 | bits = rtree->level2bits[i]; \ 85 | subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \ 86 | bits); \ 87 | ret = node[subkey]; \ 88 | RTREE_UNLOCK(&rtree->mutex); \ 89 | \ 90 | RTREE_GET_VALIDATE \ 91 | return (ret); \ 92 | } 93 | 94 | #ifdef JEMALLOC_DEBUG 95 | # define RTREE_LOCK(l) malloc_mutex_lock(l) 96 | # define RTREE_UNLOCK(l) malloc_mutex_unlock(l) 97 | # define RTREE_GET_VALIDATE 98 | RTREE_GET_GENERATE(rtree_get_locked) 99 | # undef RTREE_LOCK 100 | # undef RTREE_UNLOCK 101 | # undef RTREE_GET_VALIDATE 102 | #endif 103 | 104 | #define RTREE_LOCK(l) 105 | #define RTREE_UNLOCK(l) 106 | #ifdef JEMALLOC_DEBUG 107 | /* 108 | * Suppose that it were possible for a jemalloc-allocated chunk to be 109 | * munmap()ped, followed by a different allocator in another thread re-using 110 | * overlapping virtual memory, all without invalidating the cached rtree 111 | * value. The result would be a false positive (the rtree would claim that 112 | * jemalloc owns memory that it had actually discarded). This scenario 113 | * seems impossible, but the following assertion is a prudent sanity check. 114 | */ 115 | # define RTREE_GET_VALIDATE \ 116 | assert(rtree_get_locked(rtree, key) == ret); 117 | #else 118 | # define RTREE_GET_VALIDATE 119 | #endif 120 | RTREE_GET_GENERATE(rtree_get) 121 | #undef RTREE_LOCK 122 | #undef RTREE_UNLOCK 123 | #undef RTREE_GET_VALIDATE 124 | 125 | JEMALLOC_INLINE bool 126 | rtree_set(rtree_t *rtree, uintptr_t key, void *val) 127 | { 128 | uintptr_t subkey; 129 | unsigned i, lshift, height, bits; 130 | void **node, **child; 131 | 132 | malloc_mutex_lock(&rtree->mutex); 133 | for (i = lshift = 0, height = rtree->height, node = rtree->root; 134 | i < height - 1; 135 | i++, lshift += bits, node = child) { 136 | bits = rtree->level2bits[i]; 137 | subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - 138 | bits); 139 | child = (void**)node[subkey]; 140 | if (child == NULL) { 141 | child = (void**)base_alloc(sizeof(void *) << 142 | rtree->level2bits[i+1]); 143 | if (child == NULL) { 144 | malloc_mutex_unlock(&rtree->mutex); 145 | return (true); 146 | } 147 | memset(child, 0, sizeof(void *) << 148 | rtree->level2bits[i+1]); 149 | node[subkey] = child; 150 | } 151 | } 152 | 153 | /* node is a leaf, so it contains values rather than node pointers. */ 154 | bits = rtree->level2bits[i]; 155 | subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits); 156 | node[subkey] = val; 157 | malloc_mutex_unlock(&rtree->mutex); 158 | 159 | return (false); 160 | } 161 | #endif 162 | 163 | #endif /* JEMALLOC_H_INLINES */ 164 | /******************************************************************************/ 165 | -------------------------------------------------------------------------------- /include/jemalloc/internal/size_classes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # The following limits are chosen such that they cover all supported platforms. 4 | 5 | # Range of quanta. 6 | lg_qmin=3 7 | lg_qmax=4 8 | 9 | # The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. 10 | lg_tmin=3 11 | 12 | # Range of page sizes. 13 | lg_pmin=12 14 | lg_pmax=16 15 | 16 | pow2() { 17 | e=$1 18 | pow2_result=1 19 | while [ ${e} -gt 0 ] ; do 20 | pow2_result=$((${pow2_result} + ${pow2_result})) 21 | e=$((${e} - 1)) 22 | done 23 | } 24 | 25 | cat < 255) 102 | # error "Too many small size classes" 103 | #endif 104 | 105 | #endif /* JEMALLOC_H_TYPES */ 106 | /******************************************************************************/ 107 | #ifdef JEMALLOC_H_STRUCTS 108 | 109 | 110 | #endif /* JEMALLOC_H_STRUCTS */ 111 | /******************************************************************************/ 112 | #ifdef JEMALLOC_H_EXTERNS 113 | 114 | 115 | #endif /* JEMALLOC_H_EXTERNS */ 116 | /******************************************************************************/ 117 | #ifdef JEMALLOC_H_INLINES 118 | 119 | 120 | #endif /* JEMALLOC_H_INLINES */ 121 | /******************************************************************************/ 122 | EOF 123 | -------------------------------------------------------------------------------- /include/jemalloc/internal/stats.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | typedef struct tcache_bin_stats_s tcache_bin_stats_t; 5 | typedef struct malloc_bin_stats_s malloc_bin_stats_t; 6 | typedef struct malloc_large_stats_s malloc_large_stats_t; 7 | typedef struct arena_stats_s arena_stats_t; 8 | typedef struct chunk_stats_s chunk_stats_t; 9 | 10 | #endif /* JEMALLOC_H_TYPES */ 11 | /******************************************************************************/ 12 | #ifdef JEMALLOC_H_STRUCTS 13 | 14 | struct tcache_bin_stats_s { 15 | /* 16 | * Number of allocation requests that corresponded to the size of this 17 | * bin. 18 | */ 19 | uint64_t nrequests; 20 | }; 21 | 22 | struct malloc_bin_stats_s { 23 | /* 24 | * Current number of bytes allocated, including objects currently 25 | * cached by tcache. 26 | */ 27 | size_t allocated; 28 | 29 | /* 30 | * Total number of allocation/deallocation requests served directly by 31 | * the bin. Note that tcache may allocate an object, then recycle it 32 | * many times, resulting many increments to nrequests, but only one 33 | * each to nmalloc and ndalloc. 34 | */ 35 | uint64_t nmalloc; 36 | uint64_t ndalloc; 37 | 38 | /* 39 | * Number of allocation requests that correspond to the size of this 40 | * bin. This includes requests served by tcache, though tcache only 41 | * periodically merges into this counter. 42 | */ 43 | uint64_t nrequests; 44 | 45 | /* Number of tcache fills from this bin. */ 46 | uint64_t nfills; 47 | 48 | /* Number of tcache flushes to this bin. */ 49 | uint64_t nflushes; 50 | 51 | /* Total number of runs created for this bin's size class. */ 52 | uint64_t nruns; 53 | 54 | /* 55 | * Total number of runs reused by extracting them from the runs tree for 56 | * this bin's size class. 57 | */ 58 | uint64_t reruns; 59 | 60 | /* Current number of runs in this bin. */ 61 | size_t curruns; 62 | }; 63 | 64 | struct malloc_large_stats_s { 65 | /* 66 | * Total number of allocation/deallocation requests served directly by 67 | * the arena. Note that tcache may allocate an object, then recycle it 68 | * many times, resulting many increments to nrequests, but only one 69 | * each to nmalloc and ndalloc. 70 | */ 71 | uint64_t nmalloc; 72 | uint64_t ndalloc; 73 | 74 | /* 75 | * Number of allocation requests that correspond to this size class. 76 | * This includes requests served by tcache, though tcache only 77 | * periodically merges into this counter. 78 | */ 79 | uint64_t nrequests; 80 | 81 | /* Current number of runs of this size class. */ 82 | size_t curruns; 83 | }; 84 | 85 | struct arena_stats_s { 86 | /* Number of bytes currently mapped. */ 87 | size_t mapped; 88 | 89 | /* 90 | * Total number of purge sweeps, total number of madvise calls made, 91 | * and total pages purged in order to keep dirty unused memory under 92 | * control. 93 | */ 94 | uint64_t npurge; 95 | uint64_t nmadvise; 96 | uint64_t purged; 97 | 98 | /* Per-size-category statistics. */ 99 | size_t allocated_large; 100 | uint64_t nmalloc_large; 101 | uint64_t ndalloc_large; 102 | uint64_t nrequests_large; 103 | 104 | /* 105 | * One element for each possible size class, including sizes that 106 | * overlap with bin size classes. This is necessary because ipalloc() 107 | * sometimes has to use such large objects in order to assure proper 108 | * alignment. 109 | */ 110 | malloc_large_stats_t *lstats; 111 | }; 112 | 113 | struct chunk_stats_s { 114 | /* Number of chunks that were allocated. */ 115 | uint64_t nchunks; 116 | 117 | /* High-water mark for number of chunks allocated. */ 118 | size_t highchunks; 119 | 120 | /* 121 | * Current number of chunks allocated. This value isn't maintained for 122 | * any other purpose, so keep track of it in order to be able to set 123 | * highchunks. 124 | */ 125 | size_t curchunks; 126 | }; 127 | 128 | #endif /* JEMALLOC_H_STRUCTS */ 129 | /******************************************************************************/ 130 | #ifdef JEMALLOC_H_EXTERNS 131 | 132 | extern bool opt_stats_print; 133 | 134 | extern size_t stats_cactive; 135 | 136 | void stats_print(void (*write)(void *, const char *), void *cbopaque, 137 | const char *opts); 138 | 139 | #endif /* JEMALLOC_H_EXTERNS */ 140 | /******************************************************************************/ 141 | #ifdef JEMALLOC_H_INLINES 142 | 143 | #ifndef JEMALLOC_ENABLE_INLINE 144 | size_t stats_cactive_get(void); 145 | void stats_cactive_add(size_t size); 146 | void stats_cactive_sub(size_t size); 147 | #endif 148 | 149 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) 150 | JEMALLOC_INLINE size_t 151 | stats_cactive_get(void) 152 | { 153 | 154 | return (atomic_read_z(&stats_cactive)); 155 | } 156 | 157 | JEMALLOC_INLINE void 158 | stats_cactive_add(size_t size) 159 | { 160 | 161 | atomic_add_z(&stats_cactive, size); 162 | } 163 | 164 | JEMALLOC_INLINE void 165 | stats_cactive_sub(size_t size) 166 | { 167 | 168 | atomic_sub_z(&stats_cactive, size); 169 | } 170 | #endif 171 | 172 | #endif /* JEMALLOC_H_INLINES */ 173 | /******************************************************************************/ 174 | -------------------------------------------------------------------------------- /include/jemalloc/internal/util.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************/ 2 | #ifdef JEMALLOC_H_TYPES 3 | 4 | /* Size of stack-allocated buffer passed to buferror(). */ 5 | #define BUFERROR_BUF 64 6 | 7 | /* 8 | * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be 9 | * large enough for all possible uses within jemalloc. 10 | */ 11 | #define MALLOC_PRINTF_BUFSIZE 4096 12 | 13 | /* 14 | * Wrap a cpp argument that contains commas such that it isn't broken up into 15 | * multiple arguments. 16 | */ 17 | #define JEMALLOC_CONCAT(...) __VA_ARGS__ 18 | 19 | /* 20 | * Silence compiler warnings due to uninitialized values. This is used 21 | * wherever the compiler fails to recognize that the variable is never used 22 | * uninitialized. 23 | */ 24 | #ifdef JEMALLOC_CC_SILENCE 25 | # define JEMALLOC_CC_SILENCE_INIT(v) = v 26 | #else 27 | # define JEMALLOC_CC_SILENCE_INIT(v) 28 | #endif 29 | 30 | /* 31 | * Define a custom assert() in order to reduce the chances of deadlock during 32 | * assertion failure. 33 | */ 34 | #ifndef assert 35 | #define assert(e) do { \ 36 | if (config_debug && !(e)) { \ 37 | malloc_printf( \ 38 | ": %s:%d: Failed assertion: \"%s\"\n", \ 39 | __FILE__, __LINE__, #e); \ 40 | abort(); \ 41 | } \ 42 | } while (0) 43 | #endif 44 | 45 | /* Use to assert a particular configuration, e.g., cassert(config_debug). */ 46 | #define cassert(c) do { \ 47 | if ((c) == false) \ 48 | assert(false); \ 49 | } while (0) 50 | 51 | #ifndef not_reached 52 | #define not_reached() do { \ 53 | if (config_debug) { \ 54 | malloc_printf( \ 55 | ": %s:%d: Unreachable code reached\n", \ 56 | __FILE__, __LINE__); \ 57 | abort(); \ 58 | } \ 59 | } while (0) 60 | #endif 61 | 62 | #ifndef not_implemented 63 | #define not_implemented() do { \ 64 | if (config_debug) { \ 65 | malloc_printf(": %s:%d: Not implemented\n", \ 66 | __FILE__, __LINE__); \ 67 | abort(); \ 68 | } \ 69 | } while (0) 70 | #endif 71 | 72 | #define assert_not_implemented(e) do { \ 73 | if (config_debug && !(e)) \ 74 | not_implemented(); \ 75 | } while (0) 76 | 77 | #endif /* JEMALLOC_H_TYPES */ 78 | /******************************************************************************/ 79 | #ifdef JEMALLOC_H_STRUCTS 80 | 81 | #endif /* JEMALLOC_H_STRUCTS */ 82 | /******************************************************************************/ 83 | #ifdef JEMALLOC_H_EXTERNS 84 | 85 | int buferror(char *buf, size_t buflen); 86 | uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base); 87 | void malloc_write(const char *s); 88 | 89 | /* 90 | * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating 91 | * point math. 92 | */ 93 | int malloc_vsnprintf(char *str, size_t size, const char *format, 94 | va_list ap); 95 | int malloc_snprintf(char *str, size_t size, const char *format, ...) 96 | JEMALLOC_ATTR(format(printf, 3, 4)); 97 | void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, 98 | const char *format, va_list ap); 99 | void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, 100 | const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); 101 | void malloc_printf(const char *format, ...) 102 | JEMALLOC_ATTR(format(printf, 1, 2)); 103 | 104 | #endif /* JEMALLOC_H_EXTERNS */ 105 | /******************************************************************************/ 106 | #ifdef JEMALLOC_H_INLINES 107 | 108 | #ifndef JEMALLOC_ENABLE_INLINE 109 | size_t pow2_ceil(size_t x); 110 | void malloc_write(const char *s); 111 | void set_errno(int errnum); 112 | int get_errno(void); 113 | #endif 114 | 115 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) 116 | /* Compute the smallest power of 2 that is >= x. */ 117 | JEMALLOC_INLINE size_t 118 | pow2_ceil(size_t x) 119 | { 120 | 121 | x--; 122 | x |= x >> 1; 123 | x |= x >> 2; 124 | x |= x >> 4; 125 | x |= x >> 8; 126 | x |= x >> 16; 127 | #if (LG_SIZEOF_PTR == 3) 128 | x |= x >> 32; 129 | #endif 130 | x++; 131 | return (x); 132 | } 133 | 134 | /* Sets error code */ 135 | JEMALLOC_INLINE void 136 | set_errno(int errnum) 137 | { 138 | 139 | #ifdef _WIN32 140 | SetLastError(errnum); 141 | #else 142 | errno = errnum; 143 | #endif 144 | } 145 | 146 | /* Get last error code */ 147 | JEMALLOC_INLINE int 148 | get_errno(void) 149 | { 150 | 151 | #ifdef _WIN32 152 | return (GetLastError()); 153 | #else 154 | return (errno); 155 | #endif 156 | } 157 | #endif 158 | 159 | #endif /* JEMALLOC_H_INLINES */ 160 | /******************************************************************************/ 161 | -------------------------------------------------------------------------------- /include/jemalloc/jemalloc.h.in: -------------------------------------------------------------------------------- 1 | #ifndef JEMALLOC_H_ 2 | #define JEMALLOC_H_ 3 | #ifdef __cplusplus 4 | extern "C" { 5 | #endif 6 | 7 | #include 8 | #include 9 | 10 | #define JEMALLOC_VERSION "@jemalloc_version@" 11 | #define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ 12 | #define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ 13 | #define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ 14 | #define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ 15 | #define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" 16 | 17 | #include "jemalloc_defs@install_suffix@.h" 18 | 19 | #ifdef JEMALLOC_EXPERIMENTAL 20 | #define ALLOCM_LG_ALIGN(la) (la) 21 | #if LG_SIZEOF_PTR == 2 22 | #define ALLOCM_ALIGN(a) (ffs(a)-1) 23 | #else 24 | #define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) 25 | #endif 26 | #define ALLOCM_ZERO ((int)0x40) 27 | #define ALLOCM_NO_MOVE ((int)0x80) 28 | /* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */ 29 | #define ALLOCM_ARENA(a) ((int)(((a)+1) << 8)) 30 | 31 | #define ALLOCM_SUCCESS 0 32 | #define ALLOCM_ERR_OOM 1 33 | #define ALLOCM_ERR_NOT_MOVED 2 34 | #endif 35 | 36 | /* 37 | * The je_ prefix on the following public symbol declarations is an artifact of 38 | * namespace management, and should be omitted in application code unless 39 | * JEMALLOC_NO_DEMANGLE is defined (see below). 40 | */ 41 | extern JEMALLOC_EXPORT const char *je_malloc_conf; 42 | extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, 43 | const char *s); 44 | 45 | JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc); 46 | JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size) 47 | JEMALLOC_ATTR(malloc); 48 | JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment, 49 | size_t size) JEMALLOC_ATTR(nonnull(1)); 50 | JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size) 51 | JEMALLOC_ATTR(malloc); 52 | JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size); 53 | JEMALLOC_EXPORT void je_free(void *ptr); 54 | 55 | #ifdef JEMALLOC_OVERRIDE_MEMALIGN 56 | JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size) 57 | JEMALLOC_ATTR(malloc); 58 | #endif 59 | 60 | #ifdef JEMALLOC_OVERRIDE_VALLOC 61 | JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc); 62 | #endif 63 | 64 | JEMALLOC_EXPORT size_t je_malloc_usable_size( 65 | JEMALLOC_USABLE_SIZE_CONST void *ptr); 66 | JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *, 67 | const char *), void *je_cbopaque, const char *opts); 68 | JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp, 69 | size_t *oldlenp, void *newp, size_t newlen); 70 | JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp, 71 | size_t *miblenp); 72 | JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen, 73 | void *oldp, size_t *oldlenp, void *newp, size_t newlen); 74 | 75 | #ifdef JEMALLOC_EXPERIMENTAL 76 | JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size, 77 | int flags) JEMALLOC_ATTR(nonnull(1)); 78 | JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size, 79 | size_t extra, int flags) JEMALLOC_ATTR(nonnull(1)); 80 | JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags) 81 | JEMALLOC_ATTR(nonnull(1)); 82 | JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags) 83 | JEMALLOC_ATTR(nonnull(1)); 84 | JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags); 85 | #endif 86 | 87 | /* 88 | * By default application code must explicitly refer to mangled symbol names, 89 | * so that it is possible to use jemalloc in conjunction with another allocator 90 | * in the same application. Define JEMALLOC_MANGLE in order to cause automatic 91 | * name mangling that matches the API prefixing that happened as a result of 92 | * --with-mangling and/or --with-jemalloc-prefix configuration settings. 93 | */ 94 | #ifdef JEMALLOC_MANGLE 95 | #ifndef JEMALLOC_NO_DEMANGLE 96 | #define JEMALLOC_NO_DEMANGLE 97 | #endif 98 | #define malloc_conf je_malloc_conf 99 | #define malloc_message je_malloc_message 100 | #define malloc je_malloc 101 | #define calloc je_calloc 102 | #define posix_memalign je_posix_memalign 103 | #define aligned_alloc je_aligned_alloc 104 | #define realloc je_realloc 105 | #define free je_free 106 | #define malloc_usable_size je_malloc_usable_size 107 | #define malloc_stats_print je_malloc_stats_print 108 | #define mallctl je_mallctl 109 | #define mallctlnametomib je_mallctlnametomib 110 | #define mallctlbymib je_mallctlbymib 111 | #define memalign je_memalign 112 | #define valloc je_valloc 113 | #ifdef JEMALLOC_EXPERIMENTAL 114 | #define allocm je_allocm 115 | #define rallocm je_rallocm 116 | #define sallocm je_sallocm 117 | #define dallocm je_dallocm 118 | #define nallocm je_nallocm 119 | #endif 120 | #endif 121 | 122 | /* 123 | * The je_* macros can be used as stable alternative names for the public 124 | * jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant 125 | * for use in jemalloc itself, but it can be used by application code to 126 | * provide isolation from the name mangling specified via --with-mangling 127 | * and/or --with-jemalloc-prefix. 128 | */ 129 | #ifndef JEMALLOC_NO_DEMANGLE 130 | #undef je_malloc_conf 131 | #undef je_malloc_message 132 | #undef je_malloc 133 | #undef je_calloc 134 | #undef je_posix_memalign 135 | #undef je_aligned_alloc 136 | #undef je_realloc 137 | #undef je_free 138 | #undef je_malloc_usable_size 139 | #undef je_malloc_stats_print 140 | #undef je_mallctl 141 | #undef je_mallctlnametomib 142 | #undef je_mallctlbymib 143 | #undef je_memalign 144 | #undef je_valloc 145 | #ifdef JEMALLOC_EXPERIMENTAL 146 | #undef je_allocm 147 | #undef je_rallocm 148 | #undef je_sallocm 149 | #undef je_dallocm 150 | #undef je_nallocm 151 | #endif 152 | #endif 153 | 154 | #ifdef __cplusplus 155 | }; 156 | #endif 157 | #endif /* JEMALLOC_H_ */ 158 | -------------------------------------------------------------------------------- /include/jemalloc/jemalloc_defs.h.in: -------------------------------------------------------------------------------- 1 | /* 2 | * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all 3 | * public APIs to be prefixed. This makes it possible, with some care, to use 4 | * multiple allocators simultaneously. 5 | */ 6 | #undef JEMALLOC_PREFIX 7 | #undef JEMALLOC_CPREFIX 8 | 9 | /* 10 | * Name mangling for public symbols is controlled by --with-mangling and 11 | * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by 12 | * these macro definitions. 13 | */ 14 | #undef je_malloc_conf 15 | #undef je_malloc_message 16 | #undef je_malloc 17 | #undef je_calloc 18 | #undef je_posix_memalign 19 | #undef je_aligned_alloc 20 | #undef je_realloc 21 | #undef je_free 22 | #undef je_malloc_usable_size 23 | #undef je_malloc_stats_print 24 | #undef je_mallctl 25 | #undef je_mallctlnametomib 26 | #undef je_mallctlbymib 27 | #undef je_memalign 28 | #undef je_valloc 29 | #undef je_allocm 30 | #undef je_rallocm 31 | #undef je_sallocm 32 | #undef je_dallocm 33 | #undef je_nallocm 34 | 35 | /* 36 | * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. 37 | * For shared libraries, symbol visibility mechanisms prevent these symbols 38 | * from being exported, but for static libraries, naming collisions are a real 39 | * possibility. 40 | */ 41 | #undef JEMALLOC_PRIVATE_NAMESPACE 42 | #undef JEMALLOC_N 43 | 44 | /* 45 | * Hyper-threaded CPUs may need a special instruction inside spin loops in 46 | * order to yield to another virtual CPU. 47 | */ 48 | #undef CPU_SPINWAIT 49 | 50 | /* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ 51 | #undef JEMALLOC_ATOMIC9 52 | 53 | /* 54 | * Defined if OSAtomic*() functions are available, as provided by Darwin, and 55 | * documented in the atomic(3) manual page. 56 | */ 57 | #undef JEMALLOC_OSATOMIC 58 | 59 | /* 60 | * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and 61 | * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite 62 | * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the 63 | * functions are defined in libgcc instead of being inlines) 64 | */ 65 | #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 66 | 67 | /* 68 | * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and 69 | * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite 70 | * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the 71 | * functions are defined in libgcc instead of being inlines) 72 | */ 73 | #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 74 | 75 | /* 76 | * Defined if OSSpin*() functions are available, as provided by Darwin, and 77 | * documented in the spinlock(3) manual page. 78 | */ 79 | #undef JEMALLOC_OSSPIN 80 | 81 | /* 82 | * Defined if _malloc_thread_cleanup() exists. At least in the case of 83 | * FreeBSD, pthread_key_create() allocates, which if used during malloc 84 | * bootstrapping will cause recursion into the pthreads library. Therefore, if 85 | * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in 86 | * malloc_tsd. 87 | */ 88 | #undef JEMALLOC_MALLOC_THREAD_CLEANUP 89 | 90 | /* 91 | * Defined if threaded initialization is known to be safe on this platform. 92 | * Among other things, it must be possible to initialize a mutex without 93 | * triggering allocation in order for threaded allocation to be safe. 94 | */ 95 | #undef JEMALLOC_THREADED_INIT 96 | 97 | /* 98 | * Defined if the pthreads implementation defines 99 | * _pthread_mutex_init_calloc_cb(), in which case the function is used in order 100 | * to avoid recursive allocation during mutex initialization. 101 | */ 102 | #undef JEMALLOC_MUTEX_INIT_CB 103 | 104 | /* Defined if __attribute__((...)) syntax is supported. */ 105 | #undef JEMALLOC_HAVE_ATTR 106 | #ifdef JEMALLOC_HAVE_ATTR 107 | # define JEMALLOC_ATTR(s) __attribute__((s)) 108 | # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) 109 | # define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) 110 | # define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) 111 | # define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) 112 | #elif _MSC_VER 113 | # define JEMALLOC_ATTR(s) 114 | # ifdef DLLEXPORT 115 | # define JEMALLOC_EXPORT __declspec(dllexport) 116 | # else 117 | # define JEMALLOC_EXPORT __declspec(dllimport) 118 | # endif 119 | # define JEMALLOC_ALIGNED(s) __declspec(align(s)) 120 | # define JEMALLOC_SECTION(s) __declspec(allocate(s)) 121 | # define JEMALLOC_NOINLINE __declspec(noinline) 122 | #else 123 | # define JEMALLOC_ATTR(s) 124 | # define JEMALLOC_EXPORT 125 | # define JEMALLOC_ALIGNED(s) 126 | # define JEMALLOC_SECTION(s) 127 | # define JEMALLOC_NOINLINE 128 | #endif 129 | 130 | /* Defined if sbrk() is supported. */ 131 | #undef JEMALLOC_HAVE_SBRK 132 | 133 | /* Non-empty if the tls_model attribute is supported. */ 134 | #undef JEMALLOC_TLS_MODEL 135 | 136 | /* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ 137 | #undef JEMALLOC_CC_SILENCE 138 | 139 | /* 140 | * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables 141 | * inline functions. 142 | */ 143 | #undef JEMALLOC_DEBUG 144 | 145 | /* JEMALLOC_STATS enables statistics calculation. */ 146 | #undef JEMALLOC_STATS 147 | 148 | /* JEMALLOC_PROF enables allocation profiling. */ 149 | #undef JEMALLOC_PROF 150 | 151 | /* Use libunwind for profile backtracing if defined. */ 152 | #undef JEMALLOC_PROF_LIBUNWIND 153 | 154 | /* Use libgcc for profile backtracing if defined. */ 155 | #undef JEMALLOC_PROF_LIBGCC 156 | 157 | /* Use gcc intrinsics for profile backtracing if defined. */ 158 | #undef JEMALLOC_PROF_GCC 159 | 160 | /* 161 | * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. 162 | * This makes it possible to allocate/deallocate objects without any locking 163 | * when the cache is in the steady state. 164 | */ 165 | #undef JEMALLOC_TCACHE 166 | 167 | /* 168 | * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage 169 | * segment (DSS). 170 | */ 171 | #undef JEMALLOC_DSS 172 | 173 | /* Support memory filling (junk/zero/quarantine/redzone). */ 174 | #undef JEMALLOC_FILL 175 | 176 | /* Support the experimental API. */ 177 | #undef JEMALLOC_EXPERIMENTAL 178 | 179 | /* Support utrace(2)-based tracing. */ 180 | #undef JEMALLOC_UTRACE 181 | 182 | /* Support Valgrind. */ 183 | #undef JEMALLOC_VALGRIND 184 | 185 | /* Support optional abort() on OOM. */ 186 | #undef JEMALLOC_XMALLOC 187 | 188 | /* Support lazy locking (avoid locking unless a second thread is launched). */ 189 | #undef JEMALLOC_LAZY_LOCK 190 | 191 | /* One page is 2^STATIC_PAGE_SHIFT bytes. */ 192 | #undef STATIC_PAGE_SHIFT 193 | 194 | /* 195 | * If defined, use munmap() to unmap freed chunks, rather than storing them for 196 | * later reuse. This is disabled by default on Linux because common sequences 197 | * of mmap()/munmap() calls will cause virtual memory map holes. 198 | */ 199 | #undef JEMALLOC_MUNMAP 200 | 201 | /* 202 | * If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is 203 | * disabled by default because it is Linux-specific and it will cause virtual 204 | * memory map holes, much like munmap(2) does. 205 | */ 206 | #undef JEMALLOC_MREMAP 207 | 208 | /* TLS is used to map arenas and magazine caches to threads. */ 209 | #undef JEMALLOC_TLS 210 | 211 | /* 212 | * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside 213 | * within jemalloc-owned chunks before dereferencing them. 214 | */ 215 | #undef JEMALLOC_IVSALLOC 216 | 217 | /* 218 | * Define overrides for non-standard allocator-related functions if they 219 | * are present on the system. 220 | */ 221 | #undef JEMALLOC_OVERRIDE_MEMALIGN 222 | #undef JEMALLOC_OVERRIDE_VALLOC 223 | 224 | /* 225 | * At least Linux omits the "const" in: 226 | * 227 | * size_t malloc_usable_size(const void *ptr); 228 | * 229 | * Match the operating system's prototype. 230 | */ 231 | #undef JEMALLOC_USABLE_SIZE_CONST 232 | 233 | /* 234 | * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. 235 | */ 236 | #undef JEMALLOC_ZONE 237 | #undef JEMALLOC_ZONE_VERSION 238 | 239 | /* 240 | * Methods for purging unused pages differ between operating systems. 241 | * 242 | * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, 243 | * such that new pages will be demand-zeroed if 244 | * the address region is later touched. 245 | * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being 246 | * unused, such that they will be discarded rather 247 | * than swapped out. 248 | */ 249 | #undef JEMALLOC_PURGE_MADVISE_DONTNEED 250 | #undef JEMALLOC_PURGE_MADVISE_FREE 251 | 252 | /* 253 | * Define if operating system has alloca.h header. 254 | */ 255 | #undef JEMALLOC_HAS_ALLOCA_H 256 | 257 | /* sizeof(void *) == 2^LG_SIZEOF_PTR. */ 258 | #undef LG_SIZEOF_PTR 259 | 260 | /* sizeof(int) == 2^LG_SIZEOF_INT. */ 261 | #undef LG_SIZEOF_INT 262 | 263 | /* sizeof(long) == 2^LG_SIZEOF_LONG. */ 264 | #undef LG_SIZEOF_LONG 265 | 266 | /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ 267 | #undef LG_SIZEOF_INTMAX_T 268 | -------------------------------------------------------------------------------- /include/msvc_compat/inttypes.h: -------------------------------------------------------------------------------- 1 | // ISO C9x compliant inttypes.h for Microsoft Visual Studio 2 | // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 3 | // 4 | // Copyright (c) 2006 Alexander Chemeris 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright notice, 10 | // this list of conditions and the following disclaimer. 11 | // 12 | // 2. Redistributions in binary form must reproduce the above copyright 13 | // notice, this list of conditions and the following disclaimer in the 14 | // documentation and/or other materials provided with the distribution. 15 | // 16 | // 3. The name of the author may be used to endorse or promote products 17 | // derived from this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 20 | // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 21 | // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 22 | // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 24 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 25 | // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 26 | // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 27 | // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 28 | // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | // 30 | /////////////////////////////////////////////////////////////////////////////// 31 | 32 | #ifndef _MSC_VER // [ 33 | #error "Use this header only with Microsoft Visual C++ compilers!" 34 | #endif // _MSC_VER ] 35 | 36 | #ifndef _MSC_INTTYPES_H_ // [ 37 | #define _MSC_INTTYPES_H_ 38 | 39 | #if _MSC_VER > 1000 40 | #pragma once 41 | #endif 42 | 43 | #include "stdint.h" 44 | 45 | // 7.8 Format conversion of integer types 46 | 47 | typedef struct { 48 | intmax_t quot; 49 | intmax_t rem; 50 | } imaxdiv_t; 51 | 52 | // 7.8.1 Macros for format specifiers 53 | 54 | #if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198 55 | 56 | #ifdef _WIN64 57 | # define __PRI64_PREFIX "l" 58 | # define __PRIPTR_PREFIX "l" 59 | #else 60 | # define __PRI64_PREFIX "ll" 61 | # define __PRIPTR_PREFIX 62 | #endif 63 | 64 | // The fprintf macros for signed integers are: 65 | #define PRId8 "d" 66 | #define PRIi8 "i" 67 | #define PRIdLEAST8 "d" 68 | #define PRIiLEAST8 "i" 69 | #define PRIdFAST8 "d" 70 | #define PRIiFAST8 "i" 71 | 72 | #define PRId16 "hd" 73 | #define PRIi16 "hi" 74 | #define PRIdLEAST16 "hd" 75 | #define PRIiLEAST16 "hi" 76 | #define PRIdFAST16 "hd" 77 | #define PRIiFAST16 "hi" 78 | 79 | #define PRId32 "d" 80 | #define PRIi32 "i" 81 | #define PRIdLEAST32 "d" 82 | #define PRIiLEAST32 "i" 83 | #define PRIdFAST32 "d" 84 | #define PRIiFAST32 "i" 85 | 86 | #define PRId64 __PRI64_PREFIX "d" 87 | #define PRIi64 __PRI64_PREFIX "i" 88 | #define PRIdLEAST64 __PRI64_PREFIX "d" 89 | #define PRIiLEAST64 __PRI64_PREFIX "i" 90 | #define PRIdFAST64 __PRI64_PREFIX "d" 91 | #define PRIiFAST64 __PRI64_PREFIX "i" 92 | 93 | #define PRIdMAX __PRI64_PREFIX "d" 94 | #define PRIiMAX __PRI64_PREFIX "i" 95 | 96 | #define PRIdPTR __PRIPTR_PREFIX "d" 97 | #define PRIiPTR __PRIPTR_PREFIX "i" 98 | 99 | // The fprintf macros for unsigned integers are: 100 | #define PRIo8 "o" 101 | #define PRIu8 "u" 102 | #define PRIx8 "x" 103 | #define PRIX8 "X" 104 | #define PRIoLEAST8 "o" 105 | #define PRIuLEAST8 "u" 106 | #define PRIxLEAST8 "x" 107 | #define PRIXLEAST8 "X" 108 | #define PRIoFAST8 "o" 109 | #define PRIuFAST8 "u" 110 | #define PRIxFAST8 "x" 111 | #define PRIXFAST8 "X" 112 | 113 | #define PRIo16 "ho" 114 | #define PRIu16 "hu" 115 | #define PRIx16 "hx" 116 | #define PRIX16 "hX" 117 | #define PRIoLEAST16 "ho" 118 | #define PRIuLEAST16 "hu" 119 | #define PRIxLEAST16 "hx" 120 | #define PRIXLEAST16 "hX" 121 | #define PRIoFAST16 "ho" 122 | #define PRIuFAST16 "hu" 123 | #define PRIxFAST16 "hx" 124 | #define PRIXFAST16 "hX" 125 | 126 | #define PRIo32 "o" 127 | #define PRIu32 "u" 128 | #define PRIx32 "x" 129 | #define PRIX32 "X" 130 | #define PRIoLEAST32 "o" 131 | #define PRIuLEAST32 "u" 132 | #define PRIxLEAST32 "x" 133 | #define PRIXLEAST32 "X" 134 | #define PRIoFAST32 "o" 135 | #define PRIuFAST32 "u" 136 | #define PRIxFAST32 "x" 137 | #define PRIXFAST32 "X" 138 | 139 | #define PRIo64 __PRI64_PREFIX "o" 140 | #define PRIu64 __PRI64_PREFIX "u" 141 | #define PRIx64 __PRI64_PREFIX "x" 142 | #define PRIX64 __PRI64_PREFIX "X" 143 | #define PRIoLEAST64 __PRI64_PREFIX "o" 144 | #define PRIuLEAST64 __PRI64_PREFIX "u" 145 | #define PRIxLEAST64 __PRI64_PREFIX "x" 146 | #define PRIXLEAST64 __PRI64_PREFIX "X" 147 | #define PRIoFAST64 __PRI64_PREFIX "o" 148 | #define PRIuFAST64 __PRI64_PREFIX "u" 149 | #define PRIxFAST64 __PRI64_PREFIX "x" 150 | #define PRIXFAST64 __PRI64_PREFIX "X" 151 | 152 | #define PRIoMAX __PRI64_PREFIX "o" 153 | #define PRIuMAX __PRI64_PREFIX "u" 154 | #define PRIxMAX __PRI64_PREFIX "x" 155 | #define PRIXMAX __PRI64_PREFIX "X" 156 | 157 | #define PRIoPTR __PRIPTR_PREFIX "o" 158 | #define PRIuPTR __PRIPTR_PREFIX "u" 159 | #define PRIxPTR __PRIPTR_PREFIX "x" 160 | #define PRIXPTR __PRIPTR_PREFIX "X" 161 | 162 | // The fscanf macros for signed integers are: 163 | #define SCNd8 "d" 164 | #define SCNi8 "i" 165 | #define SCNdLEAST8 "d" 166 | #define SCNiLEAST8 "i" 167 | #define SCNdFAST8 "d" 168 | #define SCNiFAST8 "i" 169 | 170 | #define SCNd16 "hd" 171 | #define SCNi16 "hi" 172 | #define SCNdLEAST16 "hd" 173 | #define SCNiLEAST16 "hi" 174 | #define SCNdFAST16 "hd" 175 | #define SCNiFAST16 "hi" 176 | 177 | #define SCNd32 "ld" 178 | #define SCNi32 "li" 179 | #define SCNdLEAST32 "ld" 180 | #define SCNiLEAST32 "li" 181 | #define SCNdFAST32 "ld" 182 | #define SCNiFAST32 "li" 183 | 184 | #define SCNd64 "I64d" 185 | #define SCNi64 "I64i" 186 | #define SCNdLEAST64 "I64d" 187 | #define SCNiLEAST64 "I64i" 188 | #define SCNdFAST64 "I64d" 189 | #define SCNiFAST64 "I64i" 190 | 191 | #define SCNdMAX "I64d" 192 | #define SCNiMAX "I64i" 193 | 194 | #ifdef _WIN64 // [ 195 | # define SCNdPTR "I64d" 196 | # define SCNiPTR "I64i" 197 | #else // _WIN64 ][ 198 | # define SCNdPTR "ld" 199 | # define SCNiPTR "li" 200 | #endif // _WIN64 ] 201 | 202 | // The fscanf macros for unsigned integers are: 203 | #define SCNo8 "o" 204 | #define SCNu8 "u" 205 | #define SCNx8 "x" 206 | #define SCNX8 "X" 207 | #define SCNoLEAST8 "o" 208 | #define SCNuLEAST8 "u" 209 | #define SCNxLEAST8 "x" 210 | #define SCNXLEAST8 "X" 211 | #define SCNoFAST8 "o" 212 | #define SCNuFAST8 "u" 213 | #define SCNxFAST8 "x" 214 | #define SCNXFAST8 "X" 215 | 216 | #define SCNo16 "ho" 217 | #define SCNu16 "hu" 218 | #define SCNx16 "hx" 219 | #define SCNX16 "hX" 220 | #define SCNoLEAST16 "ho" 221 | #define SCNuLEAST16 "hu" 222 | #define SCNxLEAST16 "hx" 223 | #define SCNXLEAST16 "hX" 224 | #define SCNoFAST16 "ho" 225 | #define SCNuFAST16 "hu" 226 | #define SCNxFAST16 "hx" 227 | #define SCNXFAST16 "hX" 228 | 229 | #define SCNo32 "lo" 230 | #define SCNu32 "lu" 231 | #define SCNx32 "lx" 232 | #define SCNX32 "lX" 233 | #define SCNoLEAST32 "lo" 234 | #define SCNuLEAST32 "lu" 235 | #define SCNxLEAST32 "lx" 236 | #define SCNXLEAST32 "lX" 237 | #define SCNoFAST32 "lo" 238 | #define SCNuFAST32 "lu" 239 | #define SCNxFAST32 "lx" 240 | #define SCNXFAST32 "lX" 241 | 242 | #define SCNo64 "I64o" 243 | #define SCNu64 "I64u" 244 | #define SCNx64 "I64x" 245 | #define SCNX64 "I64X" 246 | #define SCNoLEAST64 "I64o" 247 | #define SCNuLEAST64 "I64u" 248 | #define SCNxLEAST64 "I64x" 249 | #define SCNXLEAST64 "I64X" 250 | #define SCNoFAST64 "I64o" 251 | #define SCNuFAST64 "I64u" 252 | #define SCNxFAST64 "I64x" 253 | #define SCNXFAST64 "I64X" 254 | 255 | #define SCNoMAX "I64o" 256 | #define SCNuMAX "I64u" 257 | #define SCNxMAX "I64x" 258 | #define SCNXMAX "I64X" 259 | 260 | #ifdef _WIN64 // [ 261 | # define SCNoPTR "I64o" 262 | # define SCNuPTR "I64u" 263 | # define SCNxPTR "I64x" 264 | # define SCNXPTR "I64X" 265 | #else // _WIN64 ][ 266 | # define SCNoPTR "lo" 267 | # define SCNuPTR "lu" 268 | # define SCNxPTR "lx" 269 | # define SCNXPTR "lX" 270 | #endif // _WIN64 ] 271 | 272 | #endif // __STDC_FORMAT_MACROS ] 273 | 274 | // 7.8.2 Functions for greatest-width integer types 275 | 276 | // 7.8.2.1 The imaxabs function 277 | #define imaxabs _abs64 278 | 279 | // 7.8.2.2 The imaxdiv function 280 | 281 | // This is modified version of div() function from Microsoft's div.c found 282 | // in %MSVC.NET%\crt\src\div.c 283 | #ifdef STATIC_IMAXDIV // [ 284 | static 285 | #else // STATIC_IMAXDIV ][ 286 | _inline 287 | #endif // STATIC_IMAXDIV ] 288 | imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom) 289 | { 290 | imaxdiv_t result; 291 | 292 | result.quot = numer / denom; 293 | result.rem = numer % denom; 294 | 295 | if (numer < 0 && result.rem > 0) { 296 | // did division wrong; must fix up 297 | ++result.quot; 298 | result.rem -= denom; 299 | } 300 | 301 | return result; 302 | } 303 | 304 | // 7.8.2.3 The strtoimax and strtoumax functions 305 | #define strtoimax _strtoi64 306 | #define strtoumax _strtoui64 307 | 308 | // 7.8.2.4 The wcstoimax and wcstoumax functions 309 | #define wcstoimax _wcstoi64 310 | #define wcstoumax _wcstoui64 311 | 312 | 313 | #endif // _MSC_INTTYPES_H_ ] 314 | -------------------------------------------------------------------------------- /include/msvc_compat/stdbool.h: -------------------------------------------------------------------------------- 1 | #ifndef stdbool_h 2 | #define stdbool_h 3 | 4 | #include 5 | 6 | /* MSVC doesn't define _Bool or bool in C, but does have BOOL */ 7 | /* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ 8 | typedef BOOL _Bool; 9 | 10 | #define bool _Bool 11 | #define true 1 12 | #define false 0 13 | 14 | #define __bool_true_false_are_defined 1 15 | 16 | #endif /* stdbool_h */ 17 | -------------------------------------------------------------------------------- /include/msvc_compat/stdint.h: -------------------------------------------------------------------------------- 1 | // ISO C9x compliant stdint.h for Microsoft Visual Studio 2 | // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 3 | // 4 | // Copyright (c) 2006-2008 Alexander Chemeris 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright notice, 10 | // this list of conditions and the following disclaimer. 11 | // 12 | // 2. Redistributions in binary form must reproduce the above copyright 13 | // notice, this list of conditions and the following disclaimer in the 14 | // documentation and/or other materials provided with the distribution. 15 | // 16 | // 3. The name of the author may be used to endorse or promote products 17 | // derived from this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 20 | // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 21 | // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 22 | // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 24 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 25 | // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 26 | // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 27 | // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 28 | // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | // 30 | /////////////////////////////////////////////////////////////////////////////// 31 | 32 | #ifndef _MSC_VER // [ 33 | #error "Use this header only with Microsoft Visual C++ compilers!" 34 | #endif // _MSC_VER ] 35 | 36 | #ifndef _MSC_STDINT_H_ // [ 37 | #define _MSC_STDINT_H_ 38 | 39 | #if _MSC_VER > 1000 40 | #pragma once 41 | #endif 42 | 43 | #include 44 | 45 | // For Visual Studio 6 in C++ mode and for many Visual Studio versions when 46 | // compiling for ARM we should wrap include with 'extern "C++" {}' 47 | // or compiler give many errors like this: 48 | // error C2733: second C linkage of overloaded function 'wmemchr' not allowed 49 | #ifdef __cplusplus 50 | extern "C" { 51 | #endif 52 | # include 53 | #ifdef __cplusplus 54 | } 55 | #endif 56 | 57 | // Define _W64 macros to mark types changing their size, like intptr_t. 58 | #ifndef _W64 59 | # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 60 | # define _W64 __w64 61 | # else 62 | # define _W64 63 | # endif 64 | #endif 65 | 66 | 67 | // 7.18.1 Integer types 68 | 69 | // 7.18.1.1 Exact-width integer types 70 | 71 | // Visual Studio 6 and Embedded Visual C++ 4 doesn't 72 | // realize that, e.g. char has the same size as __int8 73 | // so we give up on __intX for them. 74 | #if (_MSC_VER < 1300) 75 | typedef signed char int8_t; 76 | typedef signed short int16_t; 77 | typedef signed int int32_t; 78 | typedef unsigned char uint8_t; 79 | typedef unsigned short uint16_t; 80 | typedef unsigned int uint32_t; 81 | #else 82 | typedef signed __int8 int8_t; 83 | typedef signed __int16 int16_t; 84 | typedef signed __int32 int32_t; 85 | typedef unsigned __int8 uint8_t; 86 | typedef unsigned __int16 uint16_t; 87 | typedef unsigned __int32 uint32_t; 88 | #endif 89 | typedef signed __int64 int64_t; 90 | typedef unsigned __int64 uint64_t; 91 | 92 | 93 | // 7.18.1.2 Minimum-width integer types 94 | typedef int8_t int_least8_t; 95 | typedef int16_t int_least16_t; 96 | typedef int32_t int_least32_t; 97 | typedef int64_t int_least64_t; 98 | typedef uint8_t uint_least8_t; 99 | typedef uint16_t uint_least16_t; 100 | typedef uint32_t uint_least32_t; 101 | typedef uint64_t uint_least64_t; 102 | 103 | // 7.18.1.3 Fastest minimum-width integer types 104 | typedef int8_t int_fast8_t; 105 | typedef int16_t int_fast16_t; 106 | typedef int32_t int_fast32_t; 107 | typedef int64_t int_fast64_t; 108 | typedef uint8_t uint_fast8_t; 109 | typedef uint16_t uint_fast16_t; 110 | typedef uint32_t uint_fast32_t; 111 | typedef uint64_t uint_fast64_t; 112 | 113 | // 7.18.1.4 Integer types capable of holding object pointers 114 | #ifdef _WIN64 // [ 115 | typedef signed __int64 intptr_t; 116 | typedef unsigned __int64 uintptr_t; 117 | #else // _WIN64 ][ 118 | typedef _W64 signed int intptr_t; 119 | typedef _W64 unsigned int uintptr_t; 120 | #endif // _WIN64 ] 121 | 122 | // 7.18.1.5 Greatest-width integer types 123 | typedef int64_t intmax_t; 124 | typedef uint64_t uintmax_t; 125 | 126 | 127 | // 7.18.2 Limits of specified-width integer types 128 | 129 | #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 130 | 131 | // 7.18.2.1 Limits of exact-width integer types 132 | #define INT8_MIN ((int8_t)_I8_MIN) 133 | #define INT8_MAX _I8_MAX 134 | #define INT16_MIN ((int16_t)_I16_MIN) 135 | #define INT16_MAX _I16_MAX 136 | #define INT32_MIN ((int32_t)_I32_MIN) 137 | #define INT32_MAX _I32_MAX 138 | #define INT64_MIN ((int64_t)_I64_MIN) 139 | #define INT64_MAX _I64_MAX 140 | #define UINT8_MAX _UI8_MAX 141 | #define UINT16_MAX _UI16_MAX 142 | #define UINT32_MAX _UI32_MAX 143 | #define UINT64_MAX _UI64_MAX 144 | 145 | // 7.18.2.2 Limits of minimum-width integer types 146 | #define INT_LEAST8_MIN INT8_MIN 147 | #define INT_LEAST8_MAX INT8_MAX 148 | #define INT_LEAST16_MIN INT16_MIN 149 | #define INT_LEAST16_MAX INT16_MAX 150 | #define INT_LEAST32_MIN INT32_MIN 151 | #define INT_LEAST32_MAX INT32_MAX 152 | #define INT_LEAST64_MIN INT64_MIN 153 | #define INT_LEAST64_MAX INT64_MAX 154 | #define UINT_LEAST8_MAX UINT8_MAX 155 | #define UINT_LEAST16_MAX UINT16_MAX 156 | #define UINT_LEAST32_MAX UINT32_MAX 157 | #define UINT_LEAST64_MAX UINT64_MAX 158 | 159 | // 7.18.2.3 Limits of fastest minimum-width integer types 160 | #define INT_FAST8_MIN INT8_MIN 161 | #define INT_FAST8_MAX INT8_MAX 162 | #define INT_FAST16_MIN INT16_MIN 163 | #define INT_FAST16_MAX INT16_MAX 164 | #define INT_FAST32_MIN INT32_MIN 165 | #define INT_FAST32_MAX INT32_MAX 166 | #define INT_FAST64_MIN INT64_MIN 167 | #define INT_FAST64_MAX INT64_MAX 168 | #define UINT_FAST8_MAX UINT8_MAX 169 | #define UINT_FAST16_MAX UINT16_MAX 170 | #define UINT_FAST32_MAX UINT32_MAX 171 | #define UINT_FAST64_MAX UINT64_MAX 172 | 173 | // 7.18.2.4 Limits of integer types capable of holding object pointers 174 | #ifdef _WIN64 // [ 175 | # define INTPTR_MIN INT64_MIN 176 | # define INTPTR_MAX INT64_MAX 177 | # define UINTPTR_MAX UINT64_MAX 178 | #else // _WIN64 ][ 179 | # define INTPTR_MIN INT32_MIN 180 | # define INTPTR_MAX INT32_MAX 181 | # define UINTPTR_MAX UINT32_MAX 182 | #endif // _WIN64 ] 183 | 184 | // 7.18.2.5 Limits of greatest-width integer types 185 | #define INTMAX_MIN INT64_MIN 186 | #define INTMAX_MAX INT64_MAX 187 | #define UINTMAX_MAX UINT64_MAX 188 | 189 | // 7.18.3 Limits of other integer types 190 | 191 | #ifdef _WIN64 // [ 192 | # define PTRDIFF_MIN _I64_MIN 193 | # define PTRDIFF_MAX _I64_MAX 194 | #else // _WIN64 ][ 195 | # define PTRDIFF_MIN _I32_MIN 196 | # define PTRDIFF_MAX _I32_MAX 197 | #endif // _WIN64 ] 198 | 199 | #define SIG_ATOMIC_MIN INT_MIN 200 | #define SIG_ATOMIC_MAX INT_MAX 201 | 202 | #ifndef SIZE_MAX // [ 203 | # ifdef _WIN64 // [ 204 | # define SIZE_MAX _UI64_MAX 205 | # else // _WIN64 ][ 206 | # define SIZE_MAX _UI32_MAX 207 | # endif // _WIN64 ] 208 | #endif // SIZE_MAX ] 209 | 210 | // WCHAR_MIN and WCHAR_MAX are also defined in 211 | #ifndef WCHAR_MIN // [ 212 | # define WCHAR_MIN 0 213 | #endif // WCHAR_MIN ] 214 | #ifndef WCHAR_MAX // [ 215 | # define WCHAR_MAX _UI16_MAX 216 | #endif // WCHAR_MAX ] 217 | 218 | #define WINT_MIN 0 219 | #define WINT_MAX _UI16_MAX 220 | 221 | #endif // __STDC_LIMIT_MACROS ] 222 | 223 | 224 | // 7.18.4 Limits of other integer types 225 | 226 | #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 227 | 228 | // 7.18.4.1 Macros for minimum-width integer constants 229 | 230 | #define INT8_C(val) val##i8 231 | #define INT16_C(val) val##i16 232 | #define INT32_C(val) val##i32 233 | #define INT64_C(val) val##i64 234 | 235 | #define UINT8_C(val) val##ui8 236 | #define UINT16_C(val) val##ui16 237 | #define UINT32_C(val) val##ui32 238 | #define UINT64_C(val) val##ui64 239 | 240 | // 7.18.4.2 Macros for greatest-width integer constants 241 | #define INTMAX_C INT64_C 242 | #define UINTMAX_C UINT64_C 243 | 244 | #endif // __STDC_CONSTANT_MACROS ] 245 | 246 | 247 | #endif // _MSC_STDINT_H_ ] 248 | -------------------------------------------------------------------------------- /include/msvc_compat/strings.h: -------------------------------------------------------------------------------- 1 | #ifndef strings_h 2 | #define strings_h 3 | 4 | /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided 5 | * for both */ 6 | #include 7 | #pragma intrinsic(_BitScanForward) 8 | static __forceinline int ffsl(long x) 9 | { 10 | unsigned long i; 11 | 12 | if (_BitScanForward(&i, x)) 13 | return (i + 1); 14 | return (0); 15 | } 16 | 17 | static __forceinline int ffs(int x) 18 | { 19 | 20 | return (ffsl(x)); 21 | } 22 | 23 | #endif 24 | -------------------------------------------------------------------------------- /install-sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | # install - install a program, script, or datafile 4 | # This comes from X11R5 (mit/util/scripts/install.sh). 5 | # 6 | # Copyright 1991 by the Massachusetts Institute of Technology 7 | # 8 | # Permission to use, copy, modify, distribute, and sell this software and its 9 | # documentation for any purpose is hereby granted without fee, provided that 10 | # the above copyright notice appear in all copies and that both that 11 | # copyright notice and this permission notice appear in supporting 12 | # documentation, and that the name of M.I.T. not be used in advertising or 13 | # publicity pertaining to distribution of the software without specific, 14 | # written prior permission. M.I.T. makes no representations about the 15 | # suitability of this software for any purpose. It is provided "as is" 16 | # without express or implied warranty. 17 | # 18 | # Calling this script install-sh is preferred over install.sh, to prevent 19 | # `make' implicit rules from creating a file called install from it 20 | # when there is no Makefile. 21 | # 22 | # This script is compatible with the BSD install script, but was written 23 | # from scratch. It can only install one file at a time, a restriction 24 | # shared with many OS's install programs. 25 | 26 | 27 | # set DOITPROG to echo to test this script 28 | 29 | # Don't use :- since 4.3BSD and earlier shells don't like it. 30 | doit="${DOITPROG-}" 31 | 32 | 33 | # put in absolute paths if you don't have them in your path; or use env. vars. 34 | 35 | mvprog="${MVPROG-mv}" 36 | cpprog="${CPPROG-cp}" 37 | chmodprog="${CHMODPROG-chmod}" 38 | chownprog="${CHOWNPROG-chown}" 39 | chgrpprog="${CHGRPPROG-chgrp}" 40 | stripprog="${STRIPPROG-strip}" 41 | rmprog="${RMPROG-rm}" 42 | mkdirprog="${MKDIRPROG-mkdir}" 43 | 44 | transformbasename="" 45 | transform_arg="" 46 | instcmd="$mvprog" 47 | chmodcmd="$chmodprog 0755" 48 | chowncmd="" 49 | chgrpcmd="" 50 | stripcmd="" 51 | rmcmd="$rmprog -f" 52 | mvcmd="$mvprog" 53 | src="" 54 | dst="" 55 | dir_arg="" 56 | 57 | while [ x"$1" != x ]; do 58 | case $1 in 59 | -c) instcmd="$cpprog" 60 | shift 61 | continue;; 62 | 63 | -d) dir_arg=true 64 | shift 65 | continue;; 66 | 67 | -m) chmodcmd="$chmodprog $2" 68 | shift 69 | shift 70 | continue;; 71 | 72 | -o) chowncmd="$chownprog $2" 73 | shift 74 | shift 75 | continue;; 76 | 77 | -g) chgrpcmd="$chgrpprog $2" 78 | shift 79 | shift 80 | continue;; 81 | 82 | -s) stripcmd="$stripprog" 83 | shift 84 | continue;; 85 | 86 | -t=*) transformarg=`echo $1 | sed 's/-t=//'` 87 | shift 88 | continue;; 89 | 90 | -b=*) transformbasename=`echo $1 | sed 's/-b=//'` 91 | shift 92 | continue;; 93 | 94 | *) if [ x"$src" = x ] 95 | then 96 | src=$1 97 | else 98 | # this colon is to work around a 386BSD /bin/sh bug 99 | : 100 | dst=$1 101 | fi 102 | shift 103 | continue;; 104 | esac 105 | done 106 | 107 | if [ x"$src" = x ] 108 | then 109 | echo "install: no input file specified" 110 | exit 1 111 | else 112 | true 113 | fi 114 | 115 | if [ x"$dir_arg" != x ]; then 116 | dst=$src 117 | src="" 118 | 119 | if [ -d $dst ]; then 120 | instcmd=: 121 | else 122 | instcmd=mkdir 123 | fi 124 | else 125 | 126 | # Waiting for this to be detected by the "$instcmd $src $dsttmp" command 127 | # might cause directories to be created, which would be especially bad 128 | # if $src (and thus $dsttmp) contains '*'. 129 | 130 | if [ -f $src -o -d $src ] 131 | then 132 | true 133 | else 134 | echo "install: $src does not exist" 135 | exit 1 136 | fi 137 | 138 | if [ x"$dst" = x ] 139 | then 140 | echo "install: no destination specified" 141 | exit 1 142 | else 143 | true 144 | fi 145 | 146 | # If destination is a directory, append the input filename; if your system 147 | # does not like double slashes in filenames, you may need to add some logic 148 | 149 | if [ -d $dst ] 150 | then 151 | dst="$dst"/`basename $src` 152 | else 153 | true 154 | fi 155 | fi 156 | 157 | ## this sed command emulates the dirname command 158 | dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` 159 | 160 | # Make sure that the destination directory exists. 161 | # this part is taken from Noah Friedman's mkinstalldirs script 162 | 163 | # Skip lots of stat calls in the usual case. 164 | if [ ! -d "$dstdir" ]; then 165 | defaultIFS=' 166 | ' 167 | IFS="${IFS-${defaultIFS}}" 168 | 169 | oIFS="${IFS}" 170 | # Some sh's can't handle IFS=/ for some reason. 171 | IFS='%' 172 | set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` 173 | IFS="${oIFS}" 174 | 175 | pathcomp='' 176 | 177 | while [ $# -ne 0 ] ; do 178 | pathcomp="${pathcomp}${1}" 179 | shift 180 | 181 | if [ ! -d "${pathcomp}" ] ; 182 | then 183 | $mkdirprog "${pathcomp}" 184 | else 185 | true 186 | fi 187 | 188 | pathcomp="${pathcomp}/" 189 | done 190 | fi 191 | 192 | if [ x"$dir_arg" != x ] 193 | then 194 | $doit $instcmd $dst && 195 | 196 | if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && 197 | if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && 198 | if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && 199 | if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi 200 | else 201 | 202 | # If we're going to rename the final executable, determine the name now. 203 | 204 | if [ x"$transformarg" = x ] 205 | then 206 | dstfile=`basename $dst` 207 | else 208 | dstfile=`basename $dst $transformbasename | 209 | sed $transformarg`$transformbasename 210 | fi 211 | 212 | # don't allow the sed command to completely eliminate the filename 213 | 214 | if [ x"$dstfile" = x ] 215 | then 216 | dstfile=`basename $dst` 217 | else 218 | true 219 | fi 220 | 221 | # Make a temp file name in the proper directory. 222 | 223 | dsttmp=$dstdir/#inst.$$# 224 | 225 | # Move or copy the file name to the temp name 226 | 227 | $doit $instcmd $src $dsttmp && 228 | 229 | trap "rm -f ${dsttmp}" 0 && 230 | 231 | # and set any options; do chmod last to preserve setuid bits 232 | 233 | # If any of these fail, we abort the whole thing. If we want to 234 | # ignore errors from any of these, just make sure not to ignore 235 | # errors from the above "$doit $instcmd $src $dsttmp" command. 236 | 237 | if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && 238 | if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && 239 | if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && 240 | if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && 241 | 242 | # Now rename the file to the real destination. 243 | 244 | $doit $rmcmd -f $dstdir/$dstfile && 245 | $doit $mvcmd $dsttmp $dstdir/$dstfile 246 | 247 | fi && 248 | 249 | 250 | exit 0 251 | -------------------------------------------------------------------------------- /src/atomic.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_ATOMIC_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | -------------------------------------------------------------------------------- /src/base.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_BASE_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | 4 | /******************************************************************************/ 5 | /* Data. */ 6 | 7 | static malloc_mutex_t base_mtx; 8 | 9 | /* 10 | * Current pages that are being used for internal memory allocations. These 11 | * pages are carved up in cacheline-size quanta, so that there is no chance of 12 | * false cache line sharing. 13 | */ 14 | static void *base_pages; 15 | static void *base_next_addr; 16 | static void *base_past_addr; /* Addr immediately past base_pages. */ 17 | static extent_node_t *base_nodes; 18 | 19 | /******************************************************************************/ 20 | /* Function prototypes for non-inline static functions. */ 21 | 22 | static bool base_pages_alloc(size_t minsize); 23 | 24 | /******************************************************************************/ 25 | 26 | static bool 27 | base_pages_alloc(size_t minsize) 28 | { 29 | size_t csize; 30 | bool zero; 31 | 32 | assert(minsize != 0); 33 | csize = CHUNK_CEILING(minsize); 34 | zero = false; 35 | base_pages = chunk_alloc(csize, chunksize, true, &zero, 36 | chunk_dss_prec_get()); 37 | if (base_pages == NULL) 38 | return (true); 39 | base_next_addr = base_pages; 40 | base_past_addr = (void *)((uintptr_t)base_pages + csize); 41 | 42 | return (false); 43 | } 44 | 45 | void * 46 | base_alloc(size_t size) 47 | { 48 | void *ret; 49 | size_t csize; 50 | 51 | /* Round size up to nearest multiple of the cacheline size. */ 52 | csize = CACHELINE_CEILING(size); 53 | 54 | malloc_mutex_lock(&base_mtx); 55 | /* Make sure there's enough space for the allocation. */ 56 | if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { 57 | if (base_pages_alloc(csize)) { 58 | malloc_mutex_unlock(&base_mtx); 59 | return (NULL); 60 | } 61 | } 62 | /* Allocate. */ 63 | ret = base_next_addr; 64 | base_next_addr = (void *)((uintptr_t)base_next_addr + csize); 65 | malloc_mutex_unlock(&base_mtx); 66 | VALGRIND_MAKE_MEM_UNDEFINED(ret, csize); 67 | 68 | return (ret); 69 | } 70 | 71 | void * 72 | base_calloc(size_t number, size_t size) 73 | { 74 | void *ret = base_alloc(number * size); 75 | 76 | if (ret != NULL) 77 | memset(ret, 0, number * size); 78 | 79 | return (ret); 80 | } 81 | 82 | extent_node_t * 83 | base_node_alloc(void) 84 | { 85 | extent_node_t *ret; 86 | 87 | malloc_mutex_lock(&base_mtx); 88 | if (base_nodes != NULL) { 89 | ret = base_nodes; 90 | base_nodes = *(extent_node_t **)ret; 91 | malloc_mutex_unlock(&base_mtx); 92 | VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t)); 93 | } else { 94 | malloc_mutex_unlock(&base_mtx); 95 | ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); 96 | } 97 | 98 | return (ret); 99 | } 100 | 101 | void 102 | base_node_dealloc(extent_node_t *node) 103 | { 104 | 105 | VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); 106 | malloc_mutex_lock(&base_mtx); 107 | *(extent_node_t **)node = base_nodes; 108 | base_nodes = node; 109 | malloc_mutex_unlock(&base_mtx); 110 | } 111 | 112 | bool 113 | base_boot(void) 114 | { 115 | 116 | base_nodes = NULL; 117 | if (malloc_mutex_init(&base_mtx)) 118 | return (true); 119 | 120 | return (false); 121 | } 122 | 123 | void 124 | base_prefork(void) 125 | { 126 | 127 | malloc_mutex_prefork(&base_mtx); 128 | } 129 | 130 | void 131 | base_postfork_parent(void) 132 | { 133 | 134 | malloc_mutex_postfork_parent(&base_mtx); 135 | } 136 | 137 | void 138 | base_postfork_child(void) 139 | { 140 | 141 | malloc_mutex_postfork_child(&base_mtx); 142 | } 143 | -------------------------------------------------------------------------------- /src/bitmap.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_BITMAP_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | 4 | /******************************************************************************/ 5 | /* Function prototypes for non-inline static functions. */ 6 | 7 | static size_t bits2groups(size_t nbits); 8 | 9 | /******************************************************************************/ 10 | 11 | static size_t 12 | bits2groups(size_t nbits) 13 | { 14 | 15 | return ((nbits >> LG_BITMAP_GROUP_NBITS) + 16 | !!(nbits & BITMAP_GROUP_NBITS_MASK)); 17 | } 18 | 19 | void 20 | bitmap_info_init(bitmap_info_t *binfo, size_t nbits) 21 | { 22 | unsigned i; 23 | size_t group_count; 24 | 25 | assert(nbits > 0); 26 | assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); 27 | 28 | /* 29 | * Compute the number of groups necessary to store nbits bits, and 30 | * progressively work upward through the levels until reaching a level 31 | * that requires only one group. 32 | */ 33 | binfo->levels[0].group_offset = 0; 34 | group_count = bits2groups(nbits); 35 | for (i = 1; group_count > 1; i++) { 36 | assert(i < BITMAP_MAX_LEVELS); 37 | binfo->levels[i].group_offset = binfo->levels[i-1].group_offset 38 | + group_count; 39 | group_count = bits2groups(group_count); 40 | } 41 | binfo->levels[i].group_offset = binfo->levels[i-1].group_offset 42 | + group_count; 43 | binfo->nlevels = i; 44 | binfo->nbits = nbits; 45 | } 46 | 47 | size_t 48 | bitmap_info_ngroups(const bitmap_info_t *binfo) 49 | { 50 | 51 | return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); 52 | } 53 | 54 | size_t 55 | bitmap_size(size_t nbits) 56 | { 57 | bitmap_info_t binfo; 58 | 59 | bitmap_info_init(&binfo, nbits); 60 | return (bitmap_info_ngroups(&binfo)); 61 | } 62 | 63 | void 64 | bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) 65 | { 66 | size_t extra; 67 | unsigned i; 68 | 69 | /* 70 | * Bits are actually inverted with regard to the external bitmap 71 | * interface, so the bitmap starts out with all 1 bits, except for 72 | * trailing unused bits (if any). Note that each group uses bit 0 to 73 | * correspond to the first logical bit in the group, so extra bits 74 | * are the most significant bits of the last group. 75 | */ 76 | memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << 77 | LG_SIZEOF_BITMAP); 78 | extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) 79 | & BITMAP_GROUP_NBITS_MASK; 80 | if (extra != 0) 81 | bitmap[binfo->levels[1].group_offset - 1] >>= extra; 82 | for (i = 1; i < binfo->nlevels; i++) { 83 | size_t group_count = binfo->levels[i].group_offset - 84 | binfo->levels[i-1].group_offset; 85 | extra = (BITMAP_GROUP_NBITS - (group_count & 86 | BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; 87 | if (extra != 0) 88 | bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/chunk_dss.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_CHUNK_DSS_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | /******************************************************************************/ 4 | /* Data. */ 5 | 6 | const char *dss_prec_names[] = { 7 | "disabled", 8 | "primary", 9 | "secondary", 10 | "N/A" 11 | }; 12 | 13 | /* Current dss precedence default, used when creating new arenas. */ 14 | static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; 15 | 16 | /* 17 | * Protects sbrk() calls. This avoids malloc races among threads, though it 18 | * does not protect against races with threads that call sbrk() directly. 19 | */ 20 | static malloc_mutex_t dss_mtx; 21 | 22 | /* Base address of the DSS. */ 23 | static void *dss_base; 24 | /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ 25 | static void *dss_prev; 26 | /* Current upper limit on DSS addresses. */ 27 | static void *dss_max; 28 | 29 | /******************************************************************************/ 30 | 31 | #ifndef JEMALLOC_HAVE_SBRK 32 | static void * 33 | sbrk(intptr_t increment) 34 | { 35 | 36 | not_implemented(); 37 | 38 | return (NULL); 39 | } 40 | #endif 41 | 42 | dss_prec_t 43 | chunk_dss_prec_get(void) 44 | { 45 | dss_prec_t ret; 46 | 47 | if (config_dss == false) 48 | return (dss_prec_disabled); 49 | malloc_mutex_lock(&dss_mtx); 50 | ret = dss_prec_default; 51 | malloc_mutex_unlock(&dss_mtx); 52 | return (ret); 53 | } 54 | 55 | bool 56 | chunk_dss_prec_set(dss_prec_t dss_prec) 57 | { 58 | 59 | if (config_dss == false) 60 | return (true); 61 | malloc_mutex_lock(&dss_mtx); 62 | dss_prec_default = dss_prec; 63 | malloc_mutex_unlock(&dss_mtx); 64 | return (false); 65 | } 66 | 67 | void * 68 | chunk_alloc_dss(size_t size, size_t alignment, bool *zero) 69 | { 70 | void *ret; 71 | 72 | cassert(config_dss); 73 | assert(size > 0 && (size & chunksize_mask) == 0); 74 | assert(alignment > 0 && (alignment & chunksize_mask) == 0); 75 | 76 | /* 77 | * sbrk() uses a signed increment argument, so take care not to 78 | * interpret a huge allocation request as a negative increment. 79 | */ 80 | if ((intptr_t)size < 0) 81 | return (NULL); 82 | 83 | malloc_mutex_lock(&dss_mtx); 84 | if (dss_prev != (void *)-1) { 85 | size_t gap_size, cpad_size; 86 | void *cpad, *dss_next; 87 | intptr_t incr; 88 | 89 | /* 90 | * The loop is necessary to recover from races with other 91 | * threads that are using the DSS for something other than 92 | * malloc. 93 | */ 94 | do { 95 | /* Get the current end of the DSS. */ 96 | dss_max = sbrk(0); 97 | /* 98 | * Calculate how much padding is necessary to 99 | * chunk-align the end of the DSS. 100 | */ 101 | gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & 102 | chunksize_mask; 103 | /* 104 | * Compute how much chunk-aligned pad space (if any) is 105 | * necessary to satisfy alignment. This space can be 106 | * recycled for later use. 107 | */ 108 | cpad = (void *)((uintptr_t)dss_max + gap_size); 109 | ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, 110 | alignment); 111 | cpad_size = (uintptr_t)ret - (uintptr_t)cpad; 112 | dss_next = (void *)((uintptr_t)ret + size); 113 | if ((uintptr_t)ret < (uintptr_t)dss_max || 114 | (uintptr_t)dss_next < (uintptr_t)dss_max) { 115 | /* Wrap-around. */ 116 | malloc_mutex_unlock(&dss_mtx); 117 | return (NULL); 118 | } 119 | incr = gap_size + cpad_size + size; 120 | dss_prev = sbrk(incr); 121 | if (dss_prev == dss_max) { 122 | /* Success. */ 123 | dss_max = dss_next; 124 | malloc_mutex_unlock(&dss_mtx); 125 | if (cpad_size != 0) 126 | chunk_unmap(cpad, cpad_size); 127 | if (*zero) { 128 | VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 129 | memset(ret, 0, size); 130 | } 131 | return (ret); 132 | } 133 | } while (dss_prev != (void *)-1); 134 | } 135 | malloc_mutex_unlock(&dss_mtx); 136 | 137 | return (NULL); 138 | } 139 | 140 | bool 141 | chunk_in_dss(void *chunk) 142 | { 143 | bool ret; 144 | 145 | cassert(config_dss); 146 | 147 | malloc_mutex_lock(&dss_mtx); 148 | if ((uintptr_t)chunk >= (uintptr_t)dss_base 149 | && (uintptr_t)chunk < (uintptr_t)dss_max) 150 | ret = true; 151 | else 152 | ret = false; 153 | malloc_mutex_unlock(&dss_mtx); 154 | 155 | return (ret); 156 | } 157 | 158 | bool 159 | chunk_dss_boot(void) 160 | { 161 | 162 | cassert(config_dss); 163 | 164 | if (malloc_mutex_init(&dss_mtx)) 165 | return (true); 166 | dss_base = sbrk(0); 167 | dss_prev = dss_base; 168 | dss_max = dss_base; 169 | 170 | return (false); 171 | } 172 | 173 | void 174 | chunk_dss_prefork(void) 175 | { 176 | 177 | if (config_dss) 178 | malloc_mutex_prefork(&dss_mtx); 179 | } 180 | 181 | void 182 | chunk_dss_postfork_parent(void) 183 | { 184 | 185 | if (config_dss) 186 | malloc_mutex_postfork_parent(&dss_mtx); 187 | } 188 | 189 | void 190 | chunk_dss_postfork_child(void) 191 | { 192 | 193 | if (config_dss) 194 | malloc_mutex_postfork_child(&dss_mtx); 195 | } 196 | 197 | /******************************************************************************/ 198 | -------------------------------------------------------------------------------- /src/chunk_mmap.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_CHUNK_MMAP_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | 4 | /******************************************************************************/ 5 | /* Function prototypes for non-inline static functions. */ 6 | 7 | static void *pages_map(void *addr, size_t size); 8 | static void pages_unmap(void *addr, size_t size); 9 | static void *chunk_alloc_mmap_slow(size_t size, size_t alignment, 10 | bool *zero); 11 | 12 | /******************************************************************************/ 13 | 14 | static void * 15 | pages_map(void *addr, size_t size) 16 | { 17 | void *ret; 18 | 19 | assert(size != 0); 20 | 21 | #ifdef _WIN32 22 | /* 23 | * If VirtualAlloc can't allocate at the given address when one is 24 | * given, it fails and returns NULL. 25 | */ 26 | ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, 27 | PAGE_READWRITE); 28 | #else 29 | /* 30 | * We don't use MAP_FIXED here, because it can cause the *replacement* 31 | * of existing mappings, and we only want to create new mappings. 32 | */ 33 | ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 34 | -1, 0); 35 | assert(ret != NULL); 36 | 37 | if (ret == MAP_FAILED) 38 | ret = NULL; 39 | else if (addr != NULL && ret != addr) { 40 | /* 41 | * We succeeded in mapping memory, but not in the right place. 42 | */ 43 | if (munmap(ret, size) == -1) { 44 | char buf[BUFERROR_BUF]; 45 | 46 | buferror(buf, sizeof(buf)); 47 | malloc_printf(": Error in " 74 | #ifdef _WIN32 75 | "VirtualFree" 76 | #else 77 | "munmap" 78 | #endif 79 | "(): %s\n", buf); 80 | if (opt_abort) 81 | abort(); 82 | } 83 | } 84 | 85 | static void * 86 | pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) 87 | { 88 | void *ret = (void *)((uintptr_t)addr + leadsize); 89 | 90 | assert(alloc_size >= leadsize + size); 91 | #ifdef _WIN32 92 | { 93 | void *new_addr; 94 | 95 | pages_unmap(addr, alloc_size); 96 | new_addr = pages_map(ret, size); 97 | if (new_addr == ret) 98 | return (ret); 99 | if (new_addr) 100 | pages_unmap(new_addr, size); 101 | return (NULL); 102 | } 103 | #else 104 | { 105 | size_t trailsize = alloc_size - leadsize - size; 106 | 107 | if (leadsize != 0) 108 | pages_unmap(addr, leadsize); 109 | if (trailsize != 0) 110 | pages_unmap((void *)((uintptr_t)ret + size), trailsize); 111 | return (ret); 112 | } 113 | #endif 114 | } 115 | 116 | bool 117 | pages_purge(void *addr, size_t length) 118 | { 119 | bool unzeroed; 120 | 121 | #ifdef _WIN32 122 | VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE); 123 | unzeroed = true; 124 | #else 125 | # ifdef JEMALLOC_PURGE_MADVISE_DONTNEED 126 | # define JEMALLOC_MADV_PURGE MADV_DONTNEED 127 | # define JEMALLOC_MADV_ZEROS true 128 | # elif defined(JEMALLOC_PURGE_MADVISE_FREE) 129 | # define JEMALLOC_MADV_PURGE MADV_FREE 130 | # define JEMALLOC_MADV_ZEROS false 131 | # else 132 | # error "No method defined for purging unused dirty pages." 133 | # endif 134 | int err = madvise(addr, length, JEMALLOC_MADV_PURGE); 135 | unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0); 136 | # undef JEMALLOC_MADV_PURGE 137 | # undef JEMALLOC_MADV_ZEROS 138 | #endif 139 | return (unzeroed); 140 | } 141 | 142 | static void * 143 | chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero) 144 | { 145 | void *ret, *pages; 146 | size_t alloc_size, leadsize; 147 | 148 | alloc_size = size + alignment - PAGE; 149 | /* Beware size_t wrap-around. */ 150 | if (alloc_size < size) 151 | return (NULL); 152 | do { 153 | pages = pages_map(NULL, alloc_size); 154 | if (pages == NULL) 155 | return (NULL); 156 | leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - 157 | (uintptr_t)pages; 158 | ret = pages_trim(pages, alloc_size, leadsize, size); 159 | } while (ret == NULL); 160 | 161 | assert(ret != NULL); 162 | *zero = true; 163 | return (ret); 164 | } 165 | 166 | void * 167 | chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) 168 | { 169 | void *ret; 170 | size_t offset; 171 | 172 | /* 173 | * Ideally, there would be a way to specify alignment to mmap() (like 174 | * NetBSD has), but in the absence of such a feature, we have to work 175 | * hard to efficiently create aligned mappings. The reliable, but 176 | * slow method is to create a mapping that is over-sized, then trim the 177 | * excess. However, that always results in one or two calls to 178 | * pages_unmap(). 179 | * 180 | * Optimistically try mapping precisely the right amount before falling 181 | * back to the slow method, with the expectation that the optimistic 182 | * approach works most of the time. 183 | */ 184 | 185 | assert(alignment != 0); 186 | assert((alignment & chunksize_mask) == 0); 187 | 188 | ret = pages_map(NULL, size); 189 | if (ret == NULL) 190 | return (NULL); 191 | offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); 192 | if (offset != 0) { 193 | pages_unmap(ret, size); 194 | return (chunk_alloc_mmap_slow(size, alignment, zero)); 195 | } 196 | 197 | assert(ret != NULL); 198 | *zero = true; 199 | return (ret); 200 | } 201 | 202 | bool 203 | chunk_dealloc_mmap(void *chunk, size_t size) 204 | { 205 | 206 | if (config_munmap) 207 | pages_unmap(chunk, size); 208 | 209 | return (config_munmap == false); 210 | } 211 | -------------------------------------------------------------------------------- /src/extent.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_EXTENT_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | 4 | /******************************************************************************/ 5 | 6 | static inline int 7 | extent_szad_comp(extent_node_t *a, extent_node_t *b) 8 | { 9 | int ret; 10 | size_t a_size = a->size; 11 | size_t b_size = b->size; 12 | 13 | ret = (a_size > b_size) - (a_size < b_size); 14 | if (ret == 0) { 15 | uintptr_t a_addr = (uintptr_t)a->addr; 16 | uintptr_t b_addr = (uintptr_t)b->addr; 17 | 18 | ret = (a_addr > b_addr) - (a_addr < b_addr); 19 | } 20 | 21 | return (ret); 22 | } 23 | 24 | /* Generate red-black tree functions. */ 25 | rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad, 26 | extent_szad_comp) 27 | 28 | static inline int 29 | extent_ad_comp(extent_node_t *a, extent_node_t *b) 30 | { 31 | uintptr_t a_addr = (uintptr_t)a->addr; 32 | uintptr_t b_addr = (uintptr_t)b->addr; 33 | 34 | return ((a_addr > b_addr) - (a_addr < b_addr)); 35 | } 36 | 37 | /* Generate red-black tree functions. */ 38 | rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad, 39 | extent_ad_comp) 40 | -------------------------------------------------------------------------------- /src/hash.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_HASH_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | -------------------------------------------------------------------------------- /src/huge.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_HUGE_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | 4 | /******************************************************************************/ 5 | /* Data. */ 6 | 7 | uint64_t huge_nmalloc; 8 | uint64_t huge_ndalloc; 9 | size_t huge_allocated; 10 | 11 | malloc_mutex_t huge_mtx; 12 | 13 | /******************************************************************************/ 14 | 15 | /* Tree of chunks that are stand-alone huge allocations. */ 16 | static extent_tree_t huge; 17 | 18 | void * 19 | huge_malloc(size_t size, bool zero) 20 | { 21 | 22 | return (huge_palloc(size, chunksize, zero)); 23 | } 24 | 25 | void * 26 | huge_palloc(size_t size, size_t alignment, bool zero) 27 | { 28 | void *ret; 29 | size_t csize; 30 | extent_node_t *node; 31 | bool is_zeroed; 32 | 33 | /* Allocate one or more contiguous chunks for this request. */ 34 | 35 | csize = CHUNK_CEILING(size); 36 | if (csize == 0) { 37 | /* size is large enough to cause size_t wrap-around. */ 38 | return (NULL); 39 | } 40 | 41 | /* Allocate an extent node with which to track the chunk. */ 42 | node = base_node_alloc(); 43 | if (node == NULL) 44 | return (NULL); 45 | 46 | /* 47 | * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that 48 | * it is possible to make correct junk/zero fill decisions below. 49 | */ 50 | is_zeroed = zero; 51 | ret = chunk_alloc(csize, alignment, false, &is_zeroed, 52 | chunk_dss_prec_get()); 53 | if (ret == NULL) { 54 | base_node_dealloc(node); 55 | return (NULL); 56 | } 57 | 58 | /* Insert node into huge. */ 59 | node->addr = ret; 60 | node->size = csize; 61 | 62 | malloc_mutex_lock(&huge_mtx); 63 | extent_tree_ad_insert(&huge, node); 64 | if (config_stats) { 65 | stats_cactive_add(csize); 66 | huge_nmalloc++; 67 | huge_allocated += csize; 68 | } 69 | malloc_mutex_unlock(&huge_mtx); 70 | 71 | if (config_fill && zero == false) { 72 | if (opt_junk) 73 | memset(ret, 0xa5, csize); 74 | else if (opt_zero && is_zeroed == false) 75 | memset(ret, 0, csize); 76 | } 77 | 78 | return (ret); 79 | } 80 | 81 | void * 82 | huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) 83 | { 84 | 85 | /* 86 | * Avoid moving the allocation if the size class can be left the same. 87 | */ 88 | if (oldsize > arena_maxclass 89 | && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) 90 | && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { 91 | assert(CHUNK_CEILING(oldsize) == oldsize); 92 | if (config_fill && opt_junk && size < oldsize) { 93 | memset((void *)((uintptr_t)ptr + size), 0x5a, 94 | oldsize - size); 95 | } 96 | return (ptr); 97 | } 98 | 99 | /* Reallocation would require a move. */ 100 | return (NULL); 101 | } 102 | 103 | void * 104 | huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, 105 | size_t alignment, bool zero, bool try_tcache_dalloc) 106 | { 107 | void *ret; 108 | size_t copysize; 109 | 110 | /* Try to avoid moving the allocation. */ 111 | ret = huge_ralloc_no_move(ptr, oldsize, size, extra); 112 | if (ret != NULL) 113 | return (ret); 114 | 115 | /* 116 | * size and oldsize are different enough that we need to use a 117 | * different size class. In that case, fall back to allocating new 118 | * space and copying. 119 | */ 120 | if (alignment > chunksize) 121 | ret = huge_palloc(size + extra, alignment, zero); 122 | else 123 | ret = huge_malloc(size + extra, zero); 124 | 125 | if (ret == NULL) { 126 | if (extra == 0) 127 | return (NULL); 128 | /* Try again, this time without extra. */ 129 | if (alignment > chunksize) 130 | ret = huge_palloc(size, alignment, zero); 131 | else 132 | ret = huge_malloc(size, zero); 133 | 134 | if (ret == NULL) 135 | return (NULL); 136 | } 137 | 138 | /* 139 | * Copy at most size bytes (not size+extra), since the caller has no 140 | * expectation that the extra bytes will be reliably preserved. 141 | */ 142 | copysize = (size < oldsize) ? size : oldsize; 143 | 144 | #ifdef JEMALLOC_MREMAP 145 | /* 146 | * Use mremap(2) if this is a huge-->huge reallocation, and neither the 147 | * source nor the destination are in dss. 148 | */ 149 | if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr) 150 | == false && chunk_in_dss(ret) == false))) { 151 | size_t newsize = huge_salloc(ret); 152 | 153 | /* 154 | * Remove ptr from the tree of huge allocations before 155 | * performing the remap operation, in order to avoid the 156 | * possibility of another thread acquiring that mapping before 157 | * this one removes it from the tree. 158 | */ 159 | huge_dalloc(ptr, false); 160 | if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, 161 | ret) == MAP_FAILED) { 162 | /* 163 | * Assuming no chunk management bugs in the allocator, 164 | * the only documented way an error can occur here is 165 | * if the application changed the map type for a 166 | * portion of the old allocation. This is firmly in 167 | * undefined behavior territory, so write a diagnostic 168 | * message, and optionally abort. 169 | */ 170 | char buf[BUFERROR_BUF]; 171 | 172 | buferror(buf, sizeof(buf)); 173 | malloc_printf(": Error in mremap(): %s\n", 174 | buf); 175 | if (opt_abort) 176 | abort(); 177 | memcpy(ret, ptr, copysize); 178 | chunk_dealloc_mmap(ptr, oldsize); 179 | } 180 | } else 181 | #endif 182 | { 183 | memcpy(ret, ptr, copysize); 184 | iqallocx(ptr, try_tcache_dalloc); 185 | } 186 | return (ret); 187 | } 188 | 189 | void 190 | huge_dalloc(void *ptr, bool unmap) 191 | { 192 | extent_node_t *node, key; 193 | 194 | malloc_mutex_lock(&huge_mtx); 195 | 196 | /* Extract from tree of huge allocations. */ 197 | key.addr = ptr; 198 | node = extent_tree_ad_search(&huge, &key); 199 | assert(node != NULL); 200 | assert(node->addr == ptr); 201 | extent_tree_ad_remove(&huge, node); 202 | 203 | if (config_stats) { 204 | stats_cactive_sub(node->size); 205 | huge_ndalloc++; 206 | huge_allocated -= node->size; 207 | } 208 | 209 | malloc_mutex_unlock(&huge_mtx); 210 | 211 | if (unmap && config_fill && config_dss && opt_junk) 212 | memset(node->addr, 0x5a, node->size); 213 | 214 | chunk_dealloc(node->addr, node->size, unmap); 215 | 216 | base_node_dealloc(node); 217 | } 218 | 219 | size_t 220 | huge_salloc(const void *ptr) 221 | { 222 | size_t ret; 223 | extent_node_t *node, key; 224 | 225 | malloc_mutex_lock(&huge_mtx); 226 | 227 | /* Extract from tree of huge allocations. */ 228 | key.addr = __DECONST(void *, ptr); 229 | node = extent_tree_ad_search(&huge, &key); 230 | assert(node != NULL); 231 | 232 | ret = node->size; 233 | 234 | malloc_mutex_unlock(&huge_mtx); 235 | 236 | return (ret); 237 | } 238 | 239 | prof_ctx_t * 240 | huge_prof_ctx_get(const void *ptr) 241 | { 242 | prof_ctx_t *ret; 243 | extent_node_t *node, key; 244 | 245 | malloc_mutex_lock(&huge_mtx); 246 | 247 | /* Extract from tree of huge allocations. */ 248 | key.addr = __DECONST(void *, ptr); 249 | node = extent_tree_ad_search(&huge, &key); 250 | assert(node != NULL); 251 | 252 | ret = node->prof_ctx; 253 | 254 | malloc_mutex_unlock(&huge_mtx); 255 | 256 | return (ret); 257 | } 258 | 259 | void 260 | huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) 261 | { 262 | extent_node_t *node, key; 263 | 264 | malloc_mutex_lock(&huge_mtx); 265 | 266 | /* Extract from tree of huge allocations. */ 267 | key.addr = __DECONST(void *, ptr); 268 | node = extent_tree_ad_search(&huge, &key); 269 | assert(node != NULL); 270 | 271 | node->prof_ctx = ctx; 272 | 273 | malloc_mutex_unlock(&huge_mtx); 274 | } 275 | 276 | bool 277 | huge_boot(void) 278 | { 279 | 280 | /* Initialize chunks data. */ 281 | if (malloc_mutex_init(&huge_mtx)) 282 | return (true); 283 | extent_tree_ad_new(&huge); 284 | 285 | if (config_stats) { 286 | huge_nmalloc = 0; 287 | huge_ndalloc = 0; 288 | huge_allocated = 0; 289 | } 290 | 291 | return (false); 292 | } 293 | 294 | void 295 | huge_prefork(void) 296 | { 297 | 298 | malloc_mutex_prefork(&huge_mtx); 299 | } 300 | 301 | void 302 | huge_postfork_parent(void) 303 | { 304 | 305 | malloc_mutex_postfork_parent(&huge_mtx); 306 | } 307 | 308 | void 309 | huge_postfork_child(void) 310 | { 311 | 312 | malloc_mutex_postfork_child(&huge_mtx); 313 | } 314 | -------------------------------------------------------------------------------- /src/mb.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MB_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | -------------------------------------------------------------------------------- /src/mutex.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MUTEX_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | 4 | #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) 5 | #include 6 | #endif 7 | 8 | #ifndef _CRT_SPINCOUNT 9 | #define _CRT_SPINCOUNT 4000 10 | #endif 11 | 12 | /******************************************************************************/ 13 | /* Data. */ 14 | 15 | #ifdef JEMALLOC_LAZY_LOCK 16 | bool isthreaded = false; 17 | #endif 18 | #ifdef JEMALLOC_MUTEX_INIT_CB 19 | static bool postpone_init = true; 20 | static malloc_mutex_t *postponed_mutexes = NULL; 21 | #endif 22 | 23 | #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) 24 | static void pthread_create_once(void); 25 | #endif 26 | 27 | /******************************************************************************/ 28 | /* 29 | * We intercept pthread_create() calls in order to toggle isthreaded if the 30 | * process goes multi-threaded. 31 | */ 32 | 33 | #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) 34 | static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, 35 | void *(*)(void *), void *__restrict); 36 | 37 | static void 38 | pthread_create_once(void) 39 | { 40 | 41 | pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); 42 | if (pthread_create_fptr == NULL) { 43 | malloc_write(": Error in dlsym(RTLD_NEXT, " 44 | "\"pthread_create\")\n"); 45 | abort(); 46 | } 47 | 48 | isthreaded = true; 49 | } 50 | 51 | JEMALLOC_EXPORT int 52 | pthread_create(pthread_t *__restrict thread, 53 | const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), 54 | void *__restrict arg) 55 | { 56 | static pthread_once_t once_control = PTHREAD_ONCE_INIT; 57 | 58 | pthread_once(&once_control, pthread_create_once); 59 | 60 | return (pthread_create_fptr(thread, attr, start_routine, arg)); 61 | } 62 | #endif 63 | 64 | /******************************************************************************/ 65 | 66 | #ifdef JEMALLOC_MUTEX_INIT_CB 67 | JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 68 | void *(calloc_cb)(size_t, size_t)); 69 | #endif 70 | 71 | bool 72 | malloc_mutex_init(malloc_mutex_t *mutex) 73 | { 74 | 75 | #ifdef _WIN32 76 | if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, 77 | _CRT_SPINCOUNT)) 78 | return (true); 79 | #elif (defined(JEMALLOC_OSSPIN)) 80 | mutex->lock = 0; 81 | #elif (defined(JEMALLOC_MUTEX_INIT_CB)) 82 | if (postpone_init) { 83 | mutex->postponed_next = postponed_mutexes; 84 | postponed_mutexes = mutex; 85 | } else { 86 | if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) != 87 | 0) 88 | return (true); 89 | } 90 | #else 91 | pthread_mutexattr_t attr; 92 | 93 | if (pthread_mutexattr_init(&attr) != 0) 94 | return (true); 95 | pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); 96 | if (pthread_mutex_init(&mutex->lock, &attr) != 0) { 97 | pthread_mutexattr_destroy(&attr); 98 | return (true); 99 | } 100 | pthread_mutexattr_destroy(&attr); 101 | #endif 102 | return (false); 103 | } 104 | 105 | void 106 | malloc_mutex_prefork(malloc_mutex_t *mutex) 107 | { 108 | 109 | malloc_mutex_lock(mutex); 110 | } 111 | 112 | void 113 | malloc_mutex_postfork_parent(malloc_mutex_t *mutex) 114 | { 115 | 116 | malloc_mutex_unlock(mutex); 117 | } 118 | 119 | void 120 | malloc_mutex_postfork_child(malloc_mutex_t *mutex) 121 | { 122 | 123 | #ifdef JEMALLOC_MUTEX_INIT_CB 124 | malloc_mutex_unlock(mutex); 125 | #else 126 | if (malloc_mutex_init(mutex)) { 127 | malloc_printf(": Error re-initializing mutex in " 128 | "child\n"); 129 | if (opt_abort) 130 | abort(); 131 | } 132 | #endif 133 | } 134 | 135 | bool 136 | mutex_boot(void) 137 | { 138 | 139 | #ifdef JEMALLOC_MUTEX_INIT_CB 140 | postpone_init = false; 141 | while (postponed_mutexes != NULL) { 142 | if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, 143 | base_calloc) != 0) 144 | return (true); 145 | postponed_mutexes = postponed_mutexes->postponed_next; 146 | } 147 | #endif 148 | return (false); 149 | } 150 | -------------------------------------------------------------------------------- /src/quarantine.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_QUARANTINE_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | 4 | /* 5 | * quarantine pointers close to NULL are used to encode state information that 6 | * is used for cleaning up during thread shutdown. 7 | */ 8 | #define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) 9 | #define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) 10 | #define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY 11 | 12 | /******************************************************************************/ 13 | /* Data. */ 14 | 15 | malloc_tsd_data(, quarantine, quarantine_t *, NULL) 16 | 17 | /******************************************************************************/ 18 | /* Function prototypes for non-inline static functions. */ 19 | 20 | static quarantine_t *quarantine_grow(quarantine_t *quarantine); 21 | static void quarantine_drain_one(quarantine_t *quarantine); 22 | static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound); 23 | 24 | /******************************************************************************/ 25 | 26 | quarantine_t * 27 | quarantine_init(size_t lg_maxobjs) 28 | { 29 | quarantine_t *quarantine; 30 | 31 | quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) + 32 | ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t))); 33 | if (quarantine == NULL) 34 | return (NULL); 35 | quarantine->curbytes = 0; 36 | quarantine->curobjs = 0; 37 | quarantine->first = 0; 38 | quarantine->lg_maxobjs = lg_maxobjs; 39 | 40 | quarantine_tsd_set(&quarantine); 41 | 42 | return (quarantine); 43 | } 44 | 45 | static quarantine_t * 46 | quarantine_grow(quarantine_t *quarantine) 47 | { 48 | quarantine_t *ret; 49 | 50 | ret = quarantine_init(quarantine->lg_maxobjs + 1); 51 | if (ret == NULL) { 52 | quarantine_drain_one(quarantine); 53 | return (quarantine); 54 | } 55 | 56 | ret->curbytes = quarantine->curbytes; 57 | ret->curobjs = quarantine->curobjs; 58 | if (quarantine->first + quarantine->curobjs <= (ZU(1) << 59 | quarantine->lg_maxobjs)) { 60 | /* objs ring buffer data are contiguous. */ 61 | memcpy(ret->objs, &quarantine->objs[quarantine->first], 62 | quarantine->curobjs * sizeof(quarantine_obj_t)); 63 | } else { 64 | /* objs ring buffer data wrap around. */ 65 | size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - 66 | quarantine->first; 67 | size_t ncopy_b = quarantine->curobjs - ncopy_a; 68 | 69 | memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a 70 | * sizeof(quarantine_obj_t)); 71 | memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * 72 | sizeof(quarantine_obj_t)); 73 | } 74 | idalloc(quarantine); 75 | 76 | return (ret); 77 | } 78 | 79 | static void 80 | quarantine_drain_one(quarantine_t *quarantine) 81 | { 82 | quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; 83 | assert(obj->usize == isalloc(obj->ptr, config_prof)); 84 | idalloc(obj->ptr); 85 | quarantine->curbytes -= obj->usize; 86 | quarantine->curobjs--; 87 | quarantine->first = (quarantine->first + 1) & ((ZU(1) << 88 | quarantine->lg_maxobjs) - 1); 89 | } 90 | 91 | static void 92 | quarantine_drain(quarantine_t *quarantine, size_t upper_bound) 93 | { 94 | 95 | while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) 96 | quarantine_drain_one(quarantine); 97 | } 98 | 99 | void 100 | quarantine(void *ptr) 101 | { 102 | quarantine_t *quarantine; 103 | size_t usize = isalloc(ptr, config_prof); 104 | 105 | cassert(config_fill); 106 | assert(opt_quarantine); 107 | 108 | quarantine = *quarantine_tsd_get(); 109 | if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) { 110 | if (quarantine == QUARANTINE_STATE_PURGATORY) { 111 | /* 112 | * Make a note that quarantine() was called after 113 | * quarantine_cleanup() was called. 114 | */ 115 | quarantine = QUARANTINE_STATE_REINCARNATED; 116 | quarantine_tsd_set(&quarantine); 117 | } 118 | idalloc(ptr); 119 | return; 120 | } 121 | /* 122 | * Drain one or more objects if the quarantine size limit would be 123 | * exceeded by appending ptr. 124 | */ 125 | if (quarantine->curbytes + usize > opt_quarantine) { 126 | size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine 127 | - usize : 0; 128 | quarantine_drain(quarantine, upper_bound); 129 | } 130 | /* Grow the quarantine ring buffer if it's full. */ 131 | if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) 132 | quarantine = quarantine_grow(quarantine); 133 | /* quarantine_grow() must free a slot if it fails to grow. */ 134 | assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); 135 | /* Append ptr if its size doesn't exceed the quarantine size. */ 136 | if (quarantine->curbytes + usize <= opt_quarantine) { 137 | size_t offset = (quarantine->first + quarantine->curobjs) & 138 | ((ZU(1) << quarantine->lg_maxobjs) - 1); 139 | quarantine_obj_t *obj = &quarantine->objs[offset]; 140 | obj->ptr = ptr; 141 | obj->usize = usize; 142 | quarantine->curbytes += usize; 143 | quarantine->curobjs++; 144 | if (opt_junk) 145 | memset(ptr, 0x5a, usize); 146 | } else { 147 | assert(quarantine->curbytes == 0); 148 | idalloc(ptr); 149 | } 150 | } 151 | 152 | void 153 | quarantine_cleanup(void *arg) 154 | { 155 | quarantine_t *quarantine = *(quarantine_t **)arg; 156 | 157 | if (quarantine == QUARANTINE_STATE_REINCARNATED) { 158 | /* 159 | * Another destructor deallocated memory after this destructor 160 | * was called. Reset quarantine to QUARANTINE_STATE_PURGATORY 161 | * in order to receive another callback. 162 | */ 163 | quarantine = QUARANTINE_STATE_PURGATORY; 164 | quarantine_tsd_set(&quarantine); 165 | } else if (quarantine == QUARANTINE_STATE_PURGATORY) { 166 | /* 167 | * The previous time this destructor was called, we set the key 168 | * to QUARANTINE_STATE_PURGATORY so that other destructors 169 | * wouldn't cause re-creation of the quarantine. This time, do 170 | * nothing, so that the destructor will not be called again. 171 | */ 172 | } else if (quarantine != NULL) { 173 | quarantine_drain(quarantine, 0); 174 | idalloc(quarantine); 175 | quarantine = QUARANTINE_STATE_PURGATORY; 176 | quarantine_tsd_set(&quarantine); 177 | } 178 | } 179 | 180 | bool 181 | quarantine_boot(void) 182 | { 183 | 184 | cassert(config_fill); 185 | 186 | if (quarantine_tsd_boot()) 187 | return (true); 188 | 189 | return (false); 190 | } 191 | -------------------------------------------------------------------------------- /src/rtree.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_RTREE_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | 4 | rtree_t * 5 | rtree_new(unsigned bits) 6 | { 7 | rtree_t *ret; 8 | unsigned bits_per_level, height, i; 9 | 10 | bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1; 11 | height = bits / bits_per_level; 12 | if (height * bits_per_level != bits) 13 | height++; 14 | assert(height * bits_per_level >= bits); 15 | 16 | ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) + 17 | (sizeof(unsigned) * height)); 18 | if (ret == NULL) 19 | return (NULL); 20 | memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) * 21 | height)); 22 | 23 | if (malloc_mutex_init(&ret->mutex)) { 24 | /* Leak the rtree. */ 25 | return (NULL); 26 | } 27 | ret->height = height; 28 | if (bits_per_level * height > bits) 29 | ret->level2bits[0] = bits % bits_per_level; 30 | else 31 | ret->level2bits[0] = bits_per_level; 32 | for (i = 1; i < height; i++) 33 | ret->level2bits[i] = bits_per_level; 34 | 35 | ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]); 36 | if (ret->root == NULL) { 37 | /* 38 | * We leak the rtree here, since there's no generic base 39 | * deallocation. 40 | */ 41 | return (NULL); 42 | } 43 | memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]); 44 | 45 | return (ret); 46 | } 47 | 48 | void 49 | rtree_prefork(rtree_t *rtree) 50 | { 51 | 52 | malloc_mutex_prefork(&rtree->mutex); 53 | } 54 | 55 | void 56 | rtree_postfork_parent(rtree_t *rtree) 57 | { 58 | 59 | malloc_mutex_postfork_parent(&rtree->mutex); 60 | } 61 | 62 | void 63 | rtree_postfork_child(rtree_t *rtree) 64 | { 65 | 66 | malloc_mutex_postfork_child(&rtree->mutex); 67 | } 68 | -------------------------------------------------------------------------------- /src/tsd.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_TSD_C_ 2 | #include "jemalloc/internal/jemalloc_internal.h" 3 | 4 | /******************************************************************************/ 5 | /* Data. */ 6 | 7 | static unsigned ncleanups; 8 | static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; 9 | 10 | /******************************************************************************/ 11 | 12 | void * 13 | malloc_tsd_malloc(size_t size) 14 | { 15 | 16 | /* Avoid choose_arena() in order to dodge bootstrapping issues. */ 17 | return (arena_malloc(arenas[0], size, false, false)); 18 | } 19 | 20 | void 21 | malloc_tsd_dalloc(void *wrapper) 22 | { 23 | 24 | idalloc(wrapper); 25 | } 26 | 27 | void 28 | malloc_tsd_no_cleanup(void *arg) 29 | { 30 | 31 | not_reached(); 32 | } 33 | 34 | #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) 35 | #ifndef _WIN32 36 | JEMALLOC_EXPORT 37 | #endif 38 | void 39 | _malloc_thread_cleanup(void) 40 | { 41 | bool pending[MALLOC_TSD_CLEANUPS_MAX], again; 42 | unsigned i; 43 | 44 | for (i = 0; i < ncleanups; i++) 45 | pending[i] = true; 46 | 47 | do { 48 | again = false; 49 | for (i = 0; i < ncleanups; i++) { 50 | if (pending[i]) { 51 | pending[i] = cleanups[i](); 52 | if (pending[i]) 53 | again = true; 54 | } 55 | } 56 | } while (again); 57 | } 58 | #endif 59 | 60 | void 61 | malloc_tsd_cleanup_register(bool (*f)(void)) 62 | { 63 | 64 | assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); 65 | cleanups[ncleanups] = f; 66 | ncleanups++; 67 | } 68 | 69 | void 70 | malloc_tsd_boot(void) 71 | { 72 | 73 | ncleanups = 0; 74 | } 75 | 76 | #ifdef _WIN32 77 | static BOOL WINAPI 78 | _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) 79 | { 80 | 81 | switch (fdwReason) { 82 | #ifdef JEMALLOC_LAZY_LOCK 83 | case DLL_THREAD_ATTACH: 84 | isthreaded = true; 85 | break; 86 | #endif 87 | case DLL_THREAD_DETACH: 88 | _malloc_thread_cleanup(); 89 | break; 90 | default: 91 | break; 92 | } 93 | return (true); 94 | } 95 | 96 | #ifdef _MSC_VER 97 | # ifdef _M_IX86 98 | # pragma comment(linker, "/INCLUDE:__tls_used") 99 | # else 100 | # pragma comment(linker, "/INCLUDE:_tls_used") 101 | # endif 102 | # pragma section(".CRT$XLY",long,read) 103 | #endif 104 | JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) 105 | static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL, 106 | DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; 107 | #endif 108 | -------------------------------------------------------------------------------- /src/zone.c: -------------------------------------------------------------------------------- 1 | #include "jemalloc/internal/jemalloc_internal.h" 2 | #ifndef JEMALLOC_ZONE 3 | # error "This source file is for zones on Darwin (OS X)." 4 | #endif 5 | 6 | /* 7 | * The malloc_default_purgeable_zone function is only available on >= 10.6. 8 | * We need to check whether it is present at runtime, thus the weak_import. 9 | */ 10 | extern malloc_zone_t *malloc_default_purgeable_zone(void) 11 | JEMALLOC_ATTR(weak_import); 12 | 13 | /******************************************************************************/ 14 | /* Data. */ 15 | 16 | static malloc_zone_t zone; 17 | static struct malloc_introspection_t zone_introspect; 18 | 19 | /******************************************************************************/ 20 | /* Function prototypes for non-inline static functions. */ 21 | 22 | static size_t zone_size(malloc_zone_t *zone, void *ptr); 23 | static void *zone_malloc(malloc_zone_t *zone, size_t size); 24 | static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); 25 | static void *zone_valloc(malloc_zone_t *zone, size_t size); 26 | static void zone_free(malloc_zone_t *zone, void *ptr); 27 | static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); 28 | #if (JEMALLOC_ZONE_VERSION >= 5) 29 | static void *zone_memalign(malloc_zone_t *zone, size_t alignment, 30 | #endif 31 | #if (JEMALLOC_ZONE_VERSION >= 6) 32 | size_t size); 33 | static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, 34 | size_t size); 35 | #endif 36 | static void *zone_destroy(malloc_zone_t *zone); 37 | static size_t zone_good_size(malloc_zone_t *zone, size_t size); 38 | static void zone_force_lock(malloc_zone_t *zone); 39 | static void zone_force_unlock(malloc_zone_t *zone); 40 | 41 | /******************************************************************************/ 42 | /* 43 | * Functions. 44 | */ 45 | 46 | static size_t 47 | zone_size(malloc_zone_t *zone, void *ptr) 48 | { 49 | 50 | /* 51 | * There appear to be places within Darwin (such as setenv(3)) that 52 | * cause calls to this function with pointers that *no* zone owns. If 53 | * we knew that all pointers were owned by *some* zone, we could split 54 | * our zone into two parts, and use one as the default allocator and 55 | * the other as the default deallocator/reallocator. Since that will 56 | * not work in practice, we must check all pointers to assure that they 57 | * reside within a mapped chunk before determining size. 58 | */ 59 | return (ivsalloc(ptr, config_prof)); 60 | } 61 | 62 | static void * 63 | zone_malloc(malloc_zone_t *zone, size_t size) 64 | { 65 | 66 | return (je_malloc(size)); 67 | } 68 | 69 | static void * 70 | zone_calloc(malloc_zone_t *zone, size_t num, size_t size) 71 | { 72 | 73 | return (je_calloc(num, size)); 74 | } 75 | 76 | static void * 77 | zone_valloc(malloc_zone_t *zone, size_t size) 78 | { 79 | void *ret = NULL; /* Assignment avoids useless compiler warning. */ 80 | 81 | je_posix_memalign(&ret, PAGE, size); 82 | 83 | return (ret); 84 | } 85 | 86 | static void 87 | zone_free(malloc_zone_t *zone, void *ptr) 88 | { 89 | 90 | if (ivsalloc(ptr, config_prof) != 0) { 91 | je_free(ptr); 92 | return; 93 | } 94 | 95 | free(ptr); 96 | } 97 | 98 | static void * 99 | zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) 100 | { 101 | 102 | if (ivsalloc(ptr, config_prof) != 0) 103 | return (je_realloc(ptr, size)); 104 | 105 | return (realloc(ptr, size)); 106 | } 107 | 108 | #if (JEMALLOC_ZONE_VERSION >= 5) 109 | static void * 110 | zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) 111 | { 112 | void *ret = NULL; /* Assignment avoids useless compiler warning. */ 113 | 114 | je_posix_memalign(&ret, alignment, size); 115 | 116 | return (ret); 117 | } 118 | #endif 119 | 120 | #if (JEMALLOC_ZONE_VERSION >= 6) 121 | static void 122 | zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) 123 | { 124 | 125 | if (ivsalloc(ptr, config_prof) != 0) { 126 | assert(ivsalloc(ptr, config_prof) == size); 127 | je_free(ptr); 128 | return; 129 | } 130 | 131 | free(ptr); 132 | } 133 | #endif 134 | 135 | static void * 136 | zone_destroy(malloc_zone_t *zone) 137 | { 138 | 139 | /* This function should never be called. */ 140 | assert(false); 141 | return (NULL); 142 | } 143 | 144 | static size_t 145 | zone_good_size(malloc_zone_t *zone, size_t size) 146 | { 147 | 148 | if (size == 0) 149 | size = 1; 150 | return (s2u(size)); 151 | } 152 | 153 | static void 154 | zone_force_lock(malloc_zone_t *zone) 155 | { 156 | 157 | if (isthreaded) 158 | jemalloc_prefork(); 159 | } 160 | 161 | static void 162 | zone_force_unlock(malloc_zone_t *zone) 163 | { 164 | 165 | if (isthreaded) 166 | jemalloc_postfork_parent(); 167 | } 168 | 169 | JEMALLOC_ATTR(constructor) 170 | void 171 | register_zone(void) 172 | { 173 | 174 | /* 175 | * If something else replaced the system default zone allocator, don't 176 | * register jemalloc's. 177 | */ 178 | malloc_zone_t *default_zone = malloc_default_zone(); 179 | if (!default_zone->zone_name || 180 | strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { 181 | return; 182 | } 183 | 184 | zone.size = (void *)zone_size; 185 | zone.malloc = (void *)zone_malloc; 186 | zone.calloc = (void *)zone_calloc; 187 | zone.valloc = (void *)zone_valloc; 188 | zone.free = (void *)zone_free; 189 | zone.realloc = (void *)zone_realloc; 190 | zone.destroy = (void *)zone_destroy; 191 | zone.zone_name = "jemalloc_zone"; 192 | zone.batch_malloc = NULL; 193 | zone.batch_free = NULL; 194 | zone.introspect = &zone_introspect; 195 | zone.version = JEMALLOC_ZONE_VERSION; 196 | #if (JEMALLOC_ZONE_VERSION >= 5) 197 | zone.memalign = zone_memalign; 198 | #endif 199 | #if (JEMALLOC_ZONE_VERSION >= 6) 200 | zone.free_definite_size = zone_free_definite_size; 201 | #endif 202 | #if (JEMALLOC_ZONE_VERSION >= 8) 203 | zone.pressure_relief = NULL; 204 | #endif 205 | 206 | zone_introspect.enumerator = NULL; 207 | zone_introspect.good_size = (void *)zone_good_size; 208 | zone_introspect.check = NULL; 209 | zone_introspect.print = NULL; 210 | zone_introspect.log = NULL; 211 | zone_introspect.force_lock = (void *)zone_force_lock; 212 | zone_introspect.force_unlock = (void *)zone_force_unlock; 213 | zone_introspect.statistics = NULL; 214 | #if (JEMALLOC_ZONE_VERSION >= 6) 215 | zone_introspect.zone_locked = NULL; 216 | #endif 217 | #if (JEMALLOC_ZONE_VERSION >= 7) 218 | zone_introspect.enable_discharge_checking = NULL; 219 | zone_introspect.disable_discharge_checking = NULL; 220 | zone_introspect.discharge = NULL; 221 | #ifdef __BLOCKS__ 222 | zone_introspect.enumerate_discharged_pointers = NULL; 223 | #else 224 | zone_introspect.enumerate_unavailable_without_blocks = NULL; 225 | #endif 226 | #endif 227 | 228 | /* 229 | * The default purgeable zone is created lazily by OSX's libc. It uses 230 | * the default zone when it is created for "small" allocations 231 | * (< 15 KiB), but assumes the default zone is a scalable_zone. This 232 | * obviously fails when the default zone is the jemalloc zone, so 233 | * malloc_default_purgeable_zone is called beforehand so that the 234 | * default purgeable zone is created when the default zone is still 235 | * a scalable_zone. As purgeable zones only exist on >= 10.6, we need 236 | * to check for the existence of malloc_default_purgeable_zone() at 237 | * run time. 238 | */ 239 | if (malloc_default_purgeable_zone != NULL) 240 | malloc_default_purgeable_zone(); 241 | 242 | /* Register the custom zone. At this point it won't be the default. */ 243 | malloc_zone_register(&zone); 244 | 245 | /* 246 | * Unregister and reregister the default zone. On OSX >= 10.6, 247 | * unregistering takes the last registered zone and places it at the 248 | * location of the specified zone. Unregistering the default zone thus 249 | * makes the last registered one the default. On OSX < 10.6, 250 | * unregistering shifts all registered zones. The first registered zone 251 | * then becomes the default. 252 | */ 253 | do { 254 | default_zone = malloc_default_zone(); 255 | malloc_zone_unregister(default_zone); 256 | malloc_zone_register(default_zone); 257 | } while (malloc_default_zone() != &zone); 258 | } 259 | -------------------------------------------------------------------------------- /test/ALLOCM_ARENA.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MANGLE 2 | #include "jemalloc_test.h" 3 | 4 | #define NTHREADS 10 5 | 6 | void * 7 | je_thread_start(void *arg) 8 | { 9 | unsigned thread_ind = (unsigned)(uintptr_t)arg; 10 | unsigned arena_ind; 11 | int r; 12 | void *p; 13 | size_t rsz, sz; 14 | 15 | sz = sizeof(arena_ind); 16 | if (mallctl("arenas.extend", &arena_ind, &sz, NULL, 0) 17 | != 0) { 18 | malloc_printf("Error in arenas.extend\n"); 19 | abort(); 20 | } 21 | 22 | if (thread_ind % 4 != 3) { 23 | size_t mib[3]; 24 | size_t miblen = sizeof(mib) / sizeof(size_t); 25 | const char *dss_precs[] = {"disabled", "primary", "secondary"}; 26 | const char *dss = dss_precs[thread_ind % 4]; 27 | if (mallctlnametomib("arena.0.dss", mib, &miblen) != 0) { 28 | malloc_printf("Error in mallctlnametomib()\n"); 29 | abort(); 30 | } 31 | mib[1] = arena_ind; 32 | if (mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, 33 | sizeof(const char *))) { 34 | malloc_printf("Error in mallctlbymib()\n"); 35 | abort(); 36 | } 37 | } 38 | 39 | r = allocm(&p, &rsz, 1, ALLOCM_ARENA(arena_ind)); 40 | if (r != ALLOCM_SUCCESS) { 41 | malloc_printf("Unexpected allocm() error\n"); 42 | abort(); 43 | } 44 | dallocm(p, 0); 45 | 46 | return (NULL); 47 | } 48 | 49 | int 50 | main(void) 51 | { 52 | je_thread_t threads[NTHREADS]; 53 | unsigned i; 54 | 55 | malloc_printf("Test begin\n"); 56 | 57 | for (i = 0; i < NTHREADS; i++) { 58 | je_thread_create(&threads[i], je_thread_start, 59 | (void *)(uintptr_t)i); 60 | } 61 | 62 | for (i = 0; i < NTHREADS; i++) 63 | je_thread_join(threads[i], NULL); 64 | 65 | malloc_printf("Test end\n"); 66 | return (0); 67 | } 68 | -------------------------------------------------------------------------------- /test/ALLOCM_ARENA.exp: -------------------------------------------------------------------------------- 1 | Test begin 2 | Test end 3 | -------------------------------------------------------------------------------- /test/aligned_alloc.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MANGLE 2 | #include "jemalloc_test.h" 3 | 4 | #define CHUNK 0x400000 5 | /* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ 6 | #define MAXALIGN ((size_t)0x2000000LU) 7 | #define NITER 4 8 | 9 | int 10 | main(void) 11 | { 12 | size_t alignment, size, total; 13 | unsigned i; 14 | void *p, *ps[NITER]; 15 | 16 | malloc_printf("Test begin\n"); 17 | 18 | /* Test error conditions. */ 19 | alignment = 0; 20 | set_errno(0); 21 | p = aligned_alloc(alignment, 1); 22 | if (p != NULL || get_errno() != EINVAL) { 23 | malloc_printf( 24 | "Expected error for invalid alignment %zu\n", alignment); 25 | } 26 | 27 | for (alignment = sizeof(size_t); alignment < MAXALIGN; 28 | alignment <<= 1) { 29 | set_errno(0); 30 | p = aligned_alloc(alignment + 1, 1); 31 | if (p != NULL || get_errno() != EINVAL) { 32 | malloc_printf( 33 | "Expected error for invalid alignment %zu\n", 34 | alignment + 1); 35 | } 36 | } 37 | 38 | #if LG_SIZEOF_PTR == 3 39 | alignment = UINT64_C(0x8000000000000000); 40 | size = UINT64_C(0x8000000000000000); 41 | #else 42 | alignment = 0x80000000LU; 43 | size = 0x80000000LU; 44 | #endif 45 | set_errno(0); 46 | p = aligned_alloc(alignment, size); 47 | if (p != NULL || get_errno() != ENOMEM) { 48 | malloc_printf( 49 | "Expected error for aligned_alloc(%zu, %zu)\n", 50 | alignment, size); 51 | } 52 | 53 | #if LG_SIZEOF_PTR == 3 54 | alignment = UINT64_C(0x4000000000000000); 55 | size = UINT64_C(0x8400000000000001); 56 | #else 57 | alignment = 0x40000000LU; 58 | size = 0x84000001LU; 59 | #endif 60 | set_errno(0); 61 | p = aligned_alloc(alignment, size); 62 | if (p != NULL || get_errno() != ENOMEM) { 63 | malloc_printf( 64 | "Expected error for aligned_alloc(%zu, %zu)\n", 65 | alignment, size); 66 | } 67 | 68 | alignment = 0x10LU; 69 | #if LG_SIZEOF_PTR == 3 70 | size = UINT64_C(0xfffffffffffffff0); 71 | #else 72 | size = 0xfffffff0LU; 73 | #endif 74 | set_errno(0); 75 | p = aligned_alloc(alignment, size); 76 | if (p != NULL || get_errno() != ENOMEM) { 77 | malloc_printf( 78 | "Expected error for aligned_alloc(&p, %zu, %zu)\n", 79 | alignment, size); 80 | } 81 | 82 | for (i = 0; i < NITER; i++) 83 | ps[i] = NULL; 84 | 85 | for (alignment = 8; 86 | alignment <= MAXALIGN; 87 | alignment <<= 1) { 88 | total = 0; 89 | malloc_printf("Alignment: %zu\n", alignment); 90 | for (size = 1; 91 | size < 3 * alignment && size < (1U << 31); 92 | size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { 93 | for (i = 0; i < NITER; i++) { 94 | ps[i] = aligned_alloc(alignment, size); 95 | if (ps[i] == NULL) { 96 | char buf[BUFERROR_BUF]; 97 | 98 | buferror(buf, sizeof(buf)); 99 | malloc_printf( 100 | "Error for size %zu (%#zx): %s\n", 101 | size, size, buf); 102 | exit(1); 103 | } 104 | total += malloc_usable_size(ps[i]); 105 | if (total >= (MAXALIGN << 1)) 106 | break; 107 | } 108 | for (i = 0; i < NITER; i++) { 109 | if (ps[i] != NULL) { 110 | free(ps[i]); 111 | ps[i] = NULL; 112 | } 113 | } 114 | } 115 | } 116 | 117 | malloc_printf("Test end\n"); 118 | return (0); 119 | } 120 | -------------------------------------------------------------------------------- /test/aligned_alloc.exp: -------------------------------------------------------------------------------- 1 | Test begin 2 | Alignment: 8 3 | Alignment: 16 4 | Alignment: 32 5 | Alignment: 64 6 | Alignment: 128 7 | Alignment: 256 8 | Alignment: 512 9 | Alignment: 1024 10 | Alignment: 2048 11 | Alignment: 4096 12 | Alignment: 8192 13 | Alignment: 16384 14 | Alignment: 32768 15 | Alignment: 65536 16 | Alignment: 131072 17 | Alignment: 262144 18 | Alignment: 524288 19 | Alignment: 1048576 20 | Alignment: 2097152 21 | Alignment: 4194304 22 | Alignment: 8388608 23 | Alignment: 16777216 24 | Alignment: 33554432 25 | Test end 26 | -------------------------------------------------------------------------------- /test/allocated.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MANGLE 2 | #include "jemalloc_test.h" 3 | 4 | void * 5 | je_thread_start(void *arg) 6 | { 7 | int err; 8 | void *p; 9 | uint64_t a0, a1, d0, d1; 10 | uint64_t *ap0, *ap1, *dp0, *dp1; 11 | size_t sz, usize; 12 | 13 | sz = sizeof(a0); 14 | if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) { 15 | if (err == ENOENT) { 16 | #ifdef JEMALLOC_STATS 17 | assert(false); 18 | #endif 19 | goto label_return; 20 | } 21 | malloc_printf("%s(): Error in mallctl(): %s\n", __func__, 22 | strerror(err)); 23 | exit(1); 24 | } 25 | sz = sizeof(ap0); 26 | if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) { 27 | if (err == ENOENT) { 28 | #ifdef JEMALLOC_STATS 29 | assert(false); 30 | #endif 31 | goto label_return; 32 | } 33 | malloc_printf("%s(): Error in mallctl(): %s\n", __func__, 34 | strerror(err)); 35 | exit(1); 36 | } 37 | assert(*ap0 == a0); 38 | 39 | sz = sizeof(d0); 40 | if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) { 41 | if (err == ENOENT) { 42 | #ifdef JEMALLOC_STATS 43 | assert(false); 44 | #endif 45 | goto label_return; 46 | } 47 | malloc_printf("%s(): Error in mallctl(): %s\n", __func__, 48 | strerror(err)); 49 | exit(1); 50 | } 51 | sz = sizeof(dp0); 52 | if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) { 53 | if (err == ENOENT) { 54 | #ifdef JEMALLOC_STATS 55 | assert(false); 56 | #endif 57 | goto label_return; 58 | } 59 | malloc_printf("%s(): Error in mallctl(): %s\n", __func__, 60 | strerror(err)); 61 | exit(1); 62 | } 63 | assert(*dp0 == d0); 64 | 65 | p = malloc(1); 66 | if (p == NULL) { 67 | malloc_printf("%s(): Error in malloc()\n", __func__); 68 | exit(1); 69 | } 70 | 71 | sz = sizeof(a1); 72 | mallctl("thread.allocated", &a1, &sz, NULL, 0); 73 | sz = sizeof(ap1); 74 | mallctl("thread.allocatedp", &ap1, &sz, NULL, 0); 75 | assert(*ap1 == a1); 76 | assert(ap0 == ap1); 77 | 78 | usize = malloc_usable_size(p); 79 | assert(a0 + usize <= a1); 80 | 81 | free(p); 82 | 83 | sz = sizeof(d1); 84 | mallctl("thread.deallocated", &d1, &sz, NULL, 0); 85 | sz = sizeof(dp1); 86 | mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0); 87 | assert(*dp1 == d1); 88 | assert(dp0 == dp1); 89 | 90 | assert(d0 + usize <= d1); 91 | 92 | label_return: 93 | return (NULL); 94 | } 95 | 96 | int 97 | main(void) 98 | { 99 | int ret = 0; 100 | je_thread_t thread; 101 | 102 | malloc_printf("Test begin\n"); 103 | 104 | je_thread_start(NULL); 105 | 106 | je_thread_create(&thread, je_thread_start, NULL); 107 | je_thread_join(thread, (void *)&ret); 108 | 109 | je_thread_start(NULL); 110 | 111 | je_thread_create(&thread, je_thread_start, NULL); 112 | je_thread_join(thread, (void *)&ret); 113 | 114 | je_thread_start(NULL); 115 | 116 | malloc_printf("Test end\n"); 117 | return (ret); 118 | } 119 | -------------------------------------------------------------------------------- /test/allocated.exp: -------------------------------------------------------------------------------- 1 | Test begin 2 | Test end 3 | -------------------------------------------------------------------------------- /test/allocm.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MANGLE 2 | #include "jemalloc_test.h" 3 | 4 | #define CHUNK 0x400000 5 | /* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ 6 | #define MAXALIGN ((size_t)0x2000000LU) 7 | #define NITER 4 8 | 9 | int 10 | main(void) 11 | { 12 | int r; 13 | void *p; 14 | size_t nsz, rsz, sz, alignment, total; 15 | unsigned i; 16 | void *ps[NITER]; 17 | 18 | malloc_printf("Test begin\n"); 19 | 20 | sz = 42; 21 | nsz = 0; 22 | r = nallocm(&nsz, sz, 0); 23 | if (r != ALLOCM_SUCCESS) { 24 | malloc_printf("Unexpected nallocm() error\n"); 25 | abort(); 26 | } 27 | rsz = 0; 28 | r = allocm(&p, &rsz, sz, 0); 29 | if (r != ALLOCM_SUCCESS) { 30 | malloc_printf("Unexpected allocm() error\n"); 31 | abort(); 32 | } 33 | if (rsz < sz) 34 | malloc_printf("Real size smaller than expected\n"); 35 | if (nsz != rsz) 36 | malloc_printf("nallocm()/allocm() rsize mismatch\n"); 37 | if (dallocm(p, 0) != ALLOCM_SUCCESS) 38 | malloc_printf("Unexpected dallocm() error\n"); 39 | 40 | r = allocm(&p, NULL, sz, 0); 41 | if (r != ALLOCM_SUCCESS) { 42 | malloc_printf("Unexpected allocm() error\n"); 43 | abort(); 44 | } 45 | if (dallocm(p, 0) != ALLOCM_SUCCESS) 46 | malloc_printf("Unexpected dallocm() error\n"); 47 | 48 | nsz = 0; 49 | r = nallocm(&nsz, sz, ALLOCM_ZERO); 50 | if (r != ALLOCM_SUCCESS) { 51 | malloc_printf("Unexpected nallocm() error\n"); 52 | abort(); 53 | } 54 | rsz = 0; 55 | r = allocm(&p, &rsz, sz, ALLOCM_ZERO); 56 | if (r != ALLOCM_SUCCESS) { 57 | malloc_printf("Unexpected allocm() error\n"); 58 | abort(); 59 | } 60 | if (nsz != rsz) 61 | malloc_printf("nallocm()/allocm() rsize mismatch\n"); 62 | if (dallocm(p, 0) != ALLOCM_SUCCESS) 63 | malloc_printf("Unexpected dallocm() error\n"); 64 | 65 | #if LG_SIZEOF_PTR == 3 66 | alignment = UINT64_C(0x8000000000000000); 67 | sz = UINT64_C(0x8000000000000000); 68 | #else 69 | alignment = 0x80000000LU; 70 | sz = 0x80000000LU; 71 | #endif 72 | nsz = 0; 73 | r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)); 74 | if (r == ALLOCM_SUCCESS) { 75 | malloc_printf( 76 | "Expected error for nallocm(&nsz, %zu, %#x)\n", 77 | sz, ALLOCM_ALIGN(alignment)); 78 | } 79 | rsz = 0; 80 | r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)); 81 | if (r == ALLOCM_SUCCESS) { 82 | malloc_printf( 83 | "Expected error for allocm(&p, %zu, %#x)\n", 84 | sz, ALLOCM_ALIGN(alignment)); 85 | } 86 | if (nsz != rsz) 87 | malloc_printf("nallocm()/allocm() rsize mismatch\n"); 88 | 89 | #if LG_SIZEOF_PTR == 3 90 | alignment = UINT64_C(0x4000000000000000); 91 | sz = UINT64_C(0x8400000000000001); 92 | #else 93 | alignment = 0x40000000LU; 94 | sz = 0x84000001LU; 95 | #endif 96 | nsz = 0; 97 | r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)); 98 | if (r != ALLOCM_SUCCESS) 99 | malloc_printf("Unexpected nallocm() error\n"); 100 | rsz = 0; 101 | r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)); 102 | if (r == ALLOCM_SUCCESS) { 103 | malloc_printf( 104 | "Expected error for allocm(&p, %zu, %#x)\n", 105 | sz, ALLOCM_ALIGN(alignment)); 106 | } 107 | 108 | alignment = 0x10LU; 109 | #if LG_SIZEOF_PTR == 3 110 | sz = UINT64_C(0xfffffffffffffff0); 111 | #else 112 | sz = 0xfffffff0LU; 113 | #endif 114 | nsz = 0; 115 | r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)); 116 | if (r == ALLOCM_SUCCESS) { 117 | malloc_printf( 118 | "Expected error for nallocm(&nsz, %zu, %#x)\n", 119 | sz, ALLOCM_ALIGN(alignment)); 120 | } 121 | rsz = 0; 122 | r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)); 123 | if (r == ALLOCM_SUCCESS) { 124 | malloc_printf( 125 | "Expected error for allocm(&p, %zu, %#x)\n", 126 | sz, ALLOCM_ALIGN(alignment)); 127 | } 128 | if (nsz != rsz) 129 | malloc_printf("nallocm()/allocm() rsize mismatch\n"); 130 | 131 | for (i = 0; i < NITER; i++) 132 | ps[i] = NULL; 133 | 134 | for (alignment = 8; 135 | alignment <= MAXALIGN; 136 | alignment <<= 1) { 137 | total = 0; 138 | malloc_printf("Alignment: %zu\n", alignment); 139 | for (sz = 1; 140 | sz < 3 * alignment && sz < (1U << 31); 141 | sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { 142 | for (i = 0; i < NITER; i++) { 143 | nsz = 0; 144 | r = nallocm(&nsz, sz, 145 | ALLOCM_ALIGN(alignment) | ALLOCM_ZERO); 146 | if (r != ALLOCM_SUCCESS) { 147 | malloc_printf( 148 | "nallocm() error for size %zu" 149 | " (%#zx): %d\n", 150 | sz, sz, r); 151 | exit(1); 152 | } 153 | rsz = 0; 154 | r = allocm(&ps[i], &rsz, sz, 155 | ALLOCM_ALIGN(alignment) | ALLOCM_ZERO); 156 | if (r != ALLOCM_SUCCESS) { 157 | malloc_printf( 158 | "allocm() error for size %zu" 159 | " (%#zx): %d\n", 160 | sz, sz, r); 161 | exit(1); 162 | } 163 | if (rsz < sz) { 164 | malloc_printf( 165 | "Real size smaller than" 166 | " expected\n"); 167 | } 168 | if (nsz != rsz) { 169 | malloc_printf( 170 | "nallocm()/allocm() rsize" 171 | " mismatch\n"); 172 | } 173 | if ((uintptr_t)p & (alignment-1)) { 174 | malloc_printf( 175 | "%p inadequately aligned for" 176 | " alignment: %zu\n", p, alignment); 177 | } 178 | sallocm(ps[i], &rsz, 0); 179 | total += rsz; 180 | if (total >= (MAXALIGN << 1)) 181 | break; 182 | } 183 | for (i = 0; i < NITER; i++) { 184 | if (ps[i] != NULL) { 185 | dallocm(ps[i], 0); 186 | ps[i] = NULL; 187 | } 188 | } 189 | } 190 | } 191 | 192 | malloc_printf("Test end\n"); 193 | return (0); 194 | } 195 | -------------------------------------------------------------------------------- /test/allocm.exp: -------------------------------------------------------------------------------- 1 | Test begin 2 | Alignment: 8 3 | Alignment: 16 4 | Alignment: 32 5 | Alignment: 64 6 | Alignment: 128 7 | Alignment: 256 8 | Alignment: 512 9 | Alignment: 1024 10 | Alignment: 2048 11 | Alignment: 4096 12 | Alignment: 8192 13 | Alignment: 16384 14 | Alignment: 32768 15 | Alignment: 65536 16 | Alignment: 131072 17 | Alignment: 262144 18 | Alignment: 524288 19 | Alignment: 1048576 20 | Alignment: 2097152 21 | Alignment: 4194304 22 | Alignment: 8388608 23 | Alignment: 16777216 24 | Alignment: 33554432 25 | Test end 26 | -------------------------------------------------------------------------------- /test/bitmap.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MANGLE 2 | #include "jemalloc_test.h" 3 | 4 | #if (LG_BITMAP_MAXBITS > 12) 5 | # define MAXBITS 4500 6 | #else 7 | # define MAXBITS (1U << LG_BITMAP_MAXBITS) 8 | #endif 9 | 10 | static void 11 | test_bitmap_size(void) 12 | { 13 | size_t i, prev_size; 14 | 15 | prev_size = 0; 16 | for (i = 1; i <= MAXBITS; i++) { 17 | size_t size = bitmap_size(i); 18 | assert(size >= prev_size); 19 | prev_size = size; 20 | } 21 | } 22 | 23 | static void 24 | test_bitmap_init(void) 25 | { 26 | size_t i; 27 | 28 | for (i = 1; i <= MAXBITS; i++) { 29 | bitmap_info_t binfo; 30 | bitmap_info_init(&binfo, i); 31 | { 32 | size_t j; 33 | bitmap_t *bitmap = malloc(sizeof(bitmap_t) * 34 | bitmap_info_ngroups(&binfo)); 35 | bitmap_init(bitmap, &binfo); 36 | 37 | for (j = 0; j < i; j++) 38 | assert(bitmap_get(bitmap, &binfo, j) == false); 39 | free(bitmap); 40 | 41 | } 42 | } 43 | } 44 | 45 | static void 46 | test_bitmap_set(void) 47 | { 48 | size_t i; 49 | 50 | for (i = 1; i <= MAXBITS; i++) { 51 | bitmap_info_t binfo; 52 | bitmap_info_init(&binfo, i); 53 | { 54 | size_t j; 55 | bitmap_t *bitmap = malloc(sizeof(bitmap_t) * 56 | bitmap_info_ngroups(&binfo)); 57 | bitmap_init(bitmap, &binfo); 58 | 59 | for (j = 0; j < i; j++) 60 | bitmap_set(bitmap, &binfo, j); 61 | assert(bitmap_full(bitmap, &binfo)); 62 | free(bitmap); 63 | } 64 | } 65 | } 66 | 67 | static void 68 | test_bitmap_unset(void) 69 | { 70 | size_t i; 71 | 72 | for (i = 1; i <= MAXBITS; i++) { 73 | bitmap_info_t binfo; 74 | bitmap_info_init(&binfo, i); 75 | { 76 | size_t j; 77 | bitmap_t *bitmap = malloc(sizeof(bitmap_t) * 78 | bitmap_info_ngroups(&binfo)); 79 | bitmap_init(bitmap, &binfo); 80 | 81 | for (j = 0; j < i; j++) 82 | bitmap_set(bitmap, &binfo, j); 83 | assert(bitmap_full(bitmap, &binfo)); 84 | for (j = 0; j < i; j++) 85 | bitmap_unset(bitmap, &binfo, j); 86 | for (j = 0; j < i; j++) 87 | bitmap_set(bitmap, &binfo, j); 88 | assert(bitmap_full(bitmap, &binfo)); 89 | free(bitmap); 90 | } 91 | } 92 | } 93 | 94 | static void 95 | test_bitmap_sfu(void) 96 | { 97 | size_t i; 98 | 99 | for (i = 1; i <= MAXBITS; i++) { 100 | bitmap_info_t binfo; 101 | bitmap_info_init(&binfo, i); 102 | { 103 | ssize_t j; 104 | bitmap_t *bitmap = malloc(sizeof(bitmap_t) * 105 | bitmap_info_ngroups(&binfo)); 106 | bitmap_init(bitmap, &binfo); 107 | 108 | /* Iteratively set bits starting at the beginning. */ 109 | for (j = 0; j < i; j++) 110 | assert(bitmap_sfu(bitmap, &binfo) == j); 111 | assert(bitmap_full(bitmap, &binfo)); 112 | 113 | /* 114 | * Iteratively unset bits starting at the end, and 115 | * verify that bitmap_sfu() reaches the unset bits. 116 | */ 117 | for (j = i - 1; j >= 0; j--) { 118 | bitmap_unset(bitmap, &binfo, j); 119 | assert(bitmap_sfu(bitmap, &binfo) == j); 120 | bitmap_unset(bitmap, &binfo, j); 121 | } 122 | assert(bitmap_get(bitmap, &binfo, 0) == false); 123 | 124 | /* 125 | * Iteratively set bits starting at the beginning, and 126 | * verify that bitmap_sfu() looks past them. 127 | */ 128 | for (j = 1; j < i; j++) { 129 | bitmap_set(bitmap, &binfo, j - 1); 130 | assert(bitmap_sfu(bitmap, &binfo) == j); 131 | bitmap_unset(bitmap, &binfo, j); 132 | } 133 | assert(bitmap_sfu(bitmap, &binfo) == i - 1); 134 | assert(bitmap_full(bitmap, &binfo)); 135 | free(bitmap); 136 | } 137 | } 138 | } 139 | 140 | int 141 | main(void) 142 | { 143 | malloc_printf("Test begin\n"); 144 | 145 | test_bitmap_size(); 146 | test_bitmap_init(); 147 | test_bitmap_set(); 148 | test_bitmap_unset(); 149 | test_bitmap_sfu(); 150 | 151 | malloc_printf("Test end\n"); 152 | return (0); 153 | } 154 | -------------------------------------------------------------------------------- /test/bitmap.exp: -------------------------------------------------------------------------------- 1 | Test begin 2 | Test end 3 | -------------------------------------------------------------------------------- /test/jemalloc_test.h.in: -------------------------------------------------------------------------------- 1 | /* 2 | * This header should be included by tests, rather than directly including 3 | * jemalloc/jemalloc.h, because --with-install-suffix may cause the header to 4 | * have a different name. 5 | */ 6 | #include "jemalloc/jemalloc@install_suffix@.h" 7 | #include "jemalloc/internal/jemalloc_internal.h" 8 | 9 | /* Abstraction layer for threading in tests */ 10 | #ifdef _WIN32 11 | #include 12 | 13 | typedef HANDLE je_thread_t; 14 | 15 | void 16 | je_thread_create(je_thread_t *thread, void *(*proc)(void *), void *arg) 17 | { 18 | LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; 19 | *thread = CreateThread(NULL, 0, routine, arg, 0, NULL); 20 | if (*thread == NULL) { 21 | malloc_printf("Error in CreateThread()\n"); 22 | exit(1); 23 | } 24 | } 25 | 26 | void 27 | je_thread_join(je_thread_t thread, void **ret) 28 | { 29 | WaitForSingleObject(thread, INFINITE); 30 | } 31 | 32 | #else 33 | #include 34 | 35 | typedef pthread_t je_thread_t; 36 | 37 | void 38 | je_thread_create(je_thread_t *thread, void *(*proc)(void *), void *arg) 39 | { 40 | 41 | if (pthread_create(thread, NULL, proc, arg) != 0) { 42 | malloc_printf("Error in pthread_create()\n"); 43 | exit(1); 44 | } 45 | } 46 | 47 | void 48 | je_thread_join(je_thread_t thread, void **ret) 49 | { 50 | 51 | pthread_join(thread, ret); 52 | } 53 | #endif 54 | -------------------------------------------------------------------------------- /test/mremap.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MANGLE 2 | #include "jemalloc_test.h" 3 | 4 | int 5 | main(void) 6 | { 7 | int ret, err; 8 | size_t sz, lg_chunk, chunksize, i; 9 | char *p, *q; 10 | 11 | malloc_printf("Test begin\n"); 12 | 13 | sz = sizeof(lg_chunk); 14 | if ((err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0))) { 15 | assert(err != ENOENT); 16 | malloc_printf("%s(): Error in mallctl(): %s\n", __func__, 17 | strerror(err)); 18 | ret = 1; 19 | goto label_return; 20 | } 21 | chunksize = ((size_t)1U) << lg_chunk; 22 | 23 | p = (char *)malloc(chunksize); 24 | if (p == NULL) { 25 | malloc_printf("malloc(%zu) --> %p\n", chunksize, p); 26 | ret = 1; 27 | goto label_return; 28 | } 29 | memset(p, 'a', chunksize); 30 | 31 | q = (char *)realloc(p, chunksize * 2); 32 | if (q == NULL) { 33 | malloc_printf("realloc(%p, %zu) --> %p\n", p, chunksize * 2, 34 | q); 35 | ret = 1; 36 | goto label_return; 37 | } 38 | for (i = 0; i < chunksize; i++) { 39 | assert(q[i] == 'a'); 40 | } 41 | 42 | p = q; 43 | 44 | q = (char *)realloc(p, chunksize); 45 | if (q == NULL) { 46 | malloc_printf("realloc(%p, %zu) --> %p\n", p, chunksize, q); 47 | ret = 1; 48 | goto label_return; 49 | } 50 | for (i = 0; i < chunksize; i++) { 51 | assert(q[i] == 'a'); 52 | } 53 | 54 | free(q); 55 | 56 | ret = 0; 57 | label_return: 58 | malloc_printf("Test end\n"); 59 | return (ret); 60 | } 61 | -------------------------------------------------------------------------------- /test/mremap.exp: -------------------------------------------------------------------------------- 1 | Test begin 2 | Test end 3 | -------------------------------------------------------------------------------- /test/posix_memalign.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MANGLE 2 | #include "jemalloc_test.h" 3 | 4 | #define CHUNK 0x400000 5 | /* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ 6 | #define MAXALIGN ((size_t)0x2000000LU) 7 | #define NITER 4 8 | 9 | int 10 | main(void) 11 | { 12 | size_t alignment, size, total; 13 | unsigned i; 14 | int err; 15 | void *p, *ps[NITER]; 16 | 17 | malloc_printf("Test begin\n"); 18 | 19 | /* Test error conditions. */ 20 | for (alignment = 0; alignment < sizeof(void *); alignment++) { 21 | err = posix_memalign(&p, alignment, 1); 22 | if (err != EINVAL) { 23 | malloc_printf( 24 | "Expected error for invalid alignment %zu\n", 25 | alignment); 26 | } 27 | } 28 | 29 | for (alignment = sizeof(size_t); alignment < MAXALIGN; 30 | alignment <<= 1) { 31 | err = posix_memalign(&p, alignment + 1, 1); 32 | if (err == 0) { 33 | malloc_printf( 34 | "Expected error for invalid alignment %zu\n", 35 | alignment + 1); 36 | } 37 | } 38 | 39 | #if LG_SIZEOF_PTR == 3 40 | alignment = UINT64_C(0x8000000000000000); 41 | size = UINT64_C(0x8000000000000000); 42 | #else 43 | alignment = 0x80000000LU; 44 | size = 0x80000000LU; 45 | #endif 46 | err = posix_memalign(&p, alignment, size); 47 | if (err == 0) { 48 | malloc_printf( 49 | "Expected error for posix_memalign(&p, %zu, %zu)\n", 50 | alignment, size); 51 | } 52 | 53 | #if LG_SIZEOF_PTR == 3 54 | alignment = UINT64_C(0x4000000000000000); 55 | size = UINT64_C(0x8400000000000001); 56 | #else 57 | alignment = 0x40000000LU; 58 | size = 0x84000001LU; 59 | #endif 60 | err = posix_memalign(&p, alignment, size); 61 | if (err == 0) { 62 | malloc_printf( 63 | "Expected error for posix_memalign(&p, %zu, %zu)\n", 64 | alignment, size); 65 | } 66 | 67 | alignment = 0x10LU; 68 | #if LG_SIZEOF_PTR == 3 69 | size = UINT64_C(0xfffffffffffffff0); 70 | #else 71 | size = 0xfffffff0LU; 72 | #endif 73 | err = posix_memalign(&p, alignment, size); 74 | if (err == 0) { 75 | malloc_printf( 76 | "Expected error for posix_memalign(&p, %zu, %zu)\n", 77 | alignment, size); 78 | } 79 | 80 | for (i = 0; i < NITER; i++) 81 | ps[i] = NULL; 82 | 83 | for (alignment = 8; 84 | alignment <= MAXALIGN; 85 | alignment <<= 1) { 86 | total = 0; 87 | malloc_printf("Alignment: %zu\n", alignment); 88 | for (size = 1; 89 | size < 3 * alignment && size < (1U << 31); 90 | size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { 91 | for (i = 0; i < NITER; i++) { 92 | err = posix_memalign(&ps[i], 93 | alignment, size); 94 | if (err) { 95 | malloc_printf( 96 | "Error for size %zu (%#zx): %s\n", 97 | size, size, strerror(err)); 98 | exit(1); 99 | } 100 | total += malloc_usable_size(ps[i]); 101 | if (total >= (MAXALIGN << 1)) 102 | break; 103 | } 104 | for (i = 0; i < NITER; i++) { 105 | if (ps[i] != NULL) { 106 | free(ps[i]); 107 | ps[i] = NULL; 108 | } 109 | } 110 | } 111 | } 112 | 113 | malloc_printf("Test end\n"); 114 | return (0); 115 | } 116 | -------------------------------------------------------------------------------- /test/posix_memalign.exp: -------------------------------------------------------------------------------- 1 | Test begin 2 | Alignment: 8 3 | Alignment: 16 4 | Alignment: 32 5 | Alignment: 64 6 | Alignment: 128 7 | Alignment: 256 8 | Alignment: 512 9 | Alignment: 1024 10 | Alignment: 2048 11 | Alignment: 4096 12 | Alignment: 8192 13 | Alignment: 16384 14 | Alignment: 32768 15 | Alignment: 65536 16 | Alignment: 131072 17 | Alignment: 262144 18 | Alignment: 524288 19 | Alignment: 1048576 20 | Alignment: 2097152 21 | Alignment: 4194304 22 | Alignment: 8388608 23 | Alignment: 16777216 24 | Alignment: 33554432 25 | Test end 26 | -------------------------------------------------------------------------------- /test/rallocm.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MANGLE 2 | #include "jemalloc_test.h" 3 | 4 | int 5 | main(void) 6 | { 7 | size_t pagesize; 8 | void *p, *q; 9 | size_t sz, tsz; 10 | int r; 11 | 12 | malloc_printf("Test begin\n"); 13 | 14 | /* Get page size. */ 15 | { 16 | #ifdef _WIN32 17 | SYSTEM_INFO si; 18 | GetSystemInfo(&si); 19 | pagesize = (size_t)si.dwPageSize; 20 | #else 21 | long result = sysconf(_SC_PAGESIZE); 22 | assert(result != -1); 23 | pagesize = (size_t)result; 24 | #endif 25 | } 26 | 27 | r = allocm(&p, &sz, 42, 0); 28 | if (r != ALLOCM_SUCCESS) { 29 | malloc_printf("Unexpected allocm() error\n"); 30 | abort(); 31 | } 32 | 33 | q = p; 34 | r = rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE); 35 | if (r != ALLOCM_SUCCESS) 36 | malloc_printf("Unexpected rallocm() error\n"); 37 | if (q != p) 38 | malloc_printf("Unexpected object move\n"); 39 | if (tsz != sz) { 40 | malloc_printf("Unexpected size change: %zu --> %zu\n", 41 | sz, tsz); 42 | } 43 | 44 | q = p; 45 | r = rallocm(&q, &tsz, sz, 5, ALLOCM_NO_MOVE); 46 | if (r != ALLOCM_SUCCESS) 47 | malloc_printf("Unexpected rallocm() error\n"); 48 | if (q != p) 49 | malloc_printf("Unexpected object move\n"); 50 | if (tsz != sz) { 51 | malloc_printf("Unexpected size change: %zu --> %zu\n", 52 | sz, tsz); 53 | } 54 | 55 | q = p; 56 | r = rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE); 57 | if (r != ALLOCM_ERR_NOT_MOVED) 58 | malloc_printf("Unexpected rallocm() result\n"); 59 | if (q != p) 60 | malloc_printf("Unexpected object move\n"); 61 | if (tsz != sz) { 62 | malloc_printf("Unexpected size change: %zu --> %zu\n", 63 | sz, tsz); 64 | } 65 | 66 | q = p; 67 | r = rallocm(&q, &tsz, sz + 5, 0, 0); 68 | if (r != ALLOCM_SUCCESS) 69 | malloc_printf("Unexpected rallocm() error\n"); 70 | if (q == p) 71 | malloc_printf("Expected object move\n"); 72 | if (tsz == sz) { 73 | malloc_printf("Expected size change: %zu --> %zu\n", 74 | sz, tsz); 75 | } 76 | p = q; 77 | sz = tsz; 78 | 79 | r = rallocm(&q, &tsz, pagesize*2, 0, 0); 80 | if (r != ALLOCM_SUCCESS) 81 | malloc_printf("Unexpected rallocm() error\n"); 82 | if (q == p) 83 | malloc_printf("Expected object move\n"); 84 | if (tsz == sz) { 85 | malloc_printf("Expected size change: %zu --> %zu\n", 86 | sz, tsz); 87 | } 88 | p = q; 89 | sz = tsz; 90 | 91 | r = rallocm(&q, &tsz, pagesize*4, 0, 0); 92 | if (r != ALLOCM_SUCCESS) 93 | malloc_printf("Unexpected rallocm() error\n"); 94 | if (tsz == sz) { 95 | malloc_printf("Expected size change: %zu --> %zu\n", 96 | sz, tsz); 97 | } 98 | p = q; 99 | sz = tsz; 100 | 101 | r = rallocm(&q, &tsz, pagesize*2, 0, ALLOCM_NO_MOVE); 102 | if (r != ALLOCM_SUCCESS) 103 | malloc_printf("Unexpected rallocm() error\n"); 104 | if (q != p) 105 | malloc_printf("Unexpected object move\n"); 106 | if (tsz == sz) { 107 | malloc_printf("Expected size change: %zu --> %zu\n", 108 | sz, tsz); 109 | } 110 | sz = tsz; 111 | 112 | r = rallocm(&q, &tsz, pagesize*4, 0, ALLOCM_NO_MOVE); 113 | if (r != ALLOCM_SUCCESS) 114 | malloc_printf("Unexpected rallocm() error\n"); 115 | if (q != p) 116 | malloc_printf("Unexpected object move\n"); 117 | if (tsz == sz) { 118 | malloc_printf("Expected size change: %zu --> %zu\n", 119 | sz, tsz); 120 | } 121 | sz = tsz; 122 | 123 | dallocm(p, 0); 124 | 125 | malloc_printf("Test end\n"); 126 | return (0); 127 | } 128 | -------------------------------------------------------------------------------- /test/rallocm.exp: -------------------------------------------------------------------------------- 1 | Test begin 2 | Test end 3 | -------------------------------------------------------------------------------- /test/thread_arena.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MANGLE 2 | #include "jemalloc_test.h" 3 | 4 | #define NTHREADS 10 5 | 6 | void * 7 | je_thread_start(void *arg) 8 | { 9 | unsigned main_arena_ind = *(unsigned *)arg; 10 | void *p; 11 | unsigned arena_ind; 12 | size_t size; 13 | int err; 14 | 15 | p = malloc(1); 16 | if (p == NULL) { 17 | malloc_printf("%s(): Error in malloc()\n", __func__); 18 | return (void *)1; 19 | } 20 | free(p); 21 | 22 | size = sizeof(arena_ind); 23 | if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, 24 | sizeof(main_arena_ind)))) { 25 | malloc_printf("%s(): Error in mallctl(): %s\n", __func__, 26 | strerror(err)); 27 | return (void *)1; 28 | } 29 | 30 | size = sizeof(arena_ind); 31 | if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 32 | 0))) { 33 | malloc_printf("%s(): Error in mallctl(): %s\n", __func__, 34 | strerror(err)); 35 | return (void *)1; 36 | } 37 | assert(arena_ind == main_arena_ind); 38 | 39 | return (NULL); 40 | } 41 | 42 | int 43 | main(void) 44 | { 45 | int ret = 0; 46 | void *p; 47 | unsigned arena_ind; 48 | size_t size; 49 | int err; 50 | je_thread_t threads[NTHREADS]; 51 | unsigned i; 52 | 53 | malloc_printf("Test begin\n"); 54 | 55 | p = malloc(1); 56 | if (p == NULL) { 57 | malloc_printf("%s(): Error in malloc()\n", __func__); 58 | ret = 1; 59 | goto label_return; 60 | } 61 | 62 | size = sizeof(arena_ind); 63 | if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { 64 | malloc_printf("%s(): Error in mallctl(): %s\n", __func__, 65 | strerror(err)); 66 | ret = 1; 67 | goto label_return; 68 | } 69 | 70 | for (i = 0; i < NTHREADS; i++) { 71 | je_thread_create(&threads[i], je_thread_start, 72 | (void *)&arena_ind); 73 | } 74 | 75 | for (i = 0; i < NTHREADS; i++) 76 | je_thread_join(threads[i], (void *)&ret); 77 | 78 | label_return: 79 | malloc_printf("Test end\n"); 80 | return (ret); 81 | } 82 | -------------------------------------------------------------------------------- /test/thread_arena.exp: -------------------------------------------------------------------------------- 1 | Test begin 2 | Test end 3 | -------------------------------------------------------------------------------- /test/thread_tcache_enabled.c: -------------------------------------------------------------------------------- 1 | #define JEMALLOC_MANGLE 2 | #include "jemalloc_test.h" 3 | 4 | void * 5 | je_thread_start(void *arg) 6 | { 7 | int err; 8 | size_t sz; 9 | bool e0, e1; 10 | 11 | sz = sizeof(bool); 12 | if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) { 13 | if (err == ENOENT) { 14 | #ifdef JEMALLOC_TCACHE 15 | assert(false); 16 | #endif 17 | } 18 | goto label_return; 19 | } 20 | 21 | if (e0) { 22 | e1 = false; 23 | assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) 24 | == 0); 25 | assert(e0); 26 | } 27 | 28 | e1 = true; 29 | assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); 30 | assert(e0 == false); 31 | 32 | e1 = true; 33 | assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); 34 | assert(e0); 35 | 36 | e1 = false; 37 | assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); 38 | assert(e0); 39 | 40 | e1 = false; 41 | assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); 42 | assert(e0 == false); 43 | 44 | free(malloc(1)); 45 | e1 = true; 46 | assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); 47 | assert(e0 == false); 48 | 49 | free(malloc(1)); 50 | e1 = true; 51 | assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); 52 | assert(e0); 53 | 54 | free(malloc(1)); 55 | e1 = false; 56 | assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); 57 | assert(e0); 58 | 59 | free(malloc(1)); 60 | e1 = false; 61 | assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); 62 | assert(e0 == false); 63 | 64 | free(malloc(1)); 65 | label_return: 66 | return (NULL); 67 | } 68 | 69 | int 70 | main(void) 71 | { 72 | int ret = 0; 73 | je_thread_t thread; 74 | 75 | malloc_printf("Test begin\n"); 76 | 77 | je_thread_start(NULL); 78 | 79 | je_thread_create(&thread, je_thread_start, NULL); 80 | je_thread_join(thread, (void *)&ret); 81 | 82 | je_thread_start(NULL); 83 | 84 | je_thread_create(&thread, je_thread_start, NULL); 85 | je_thread_join(thread, (void *)&ret); 86 | 87 | je_thread_start(NULL); 88 | 89 | malloc_printf("Test end\n"); 90 | return (ret); 91 | } 92 | -------------------------------------------------------------------------------- /test/thread_tcache_enabled.exp: -------------------------------------------------------------------------------- 1 | Test begin 2 | Test end 3 | --------------------------------------------------------------------------------