├── AddressSpaceRandomization.cpp ├── AddressSpaceRandomization.h ├── Assertions.cpp ├── Assertions.h ├── Atomics.h ├── BitwiseOperations.h ├── ByteSwap.h ├── CPU.h ├── Compiler.h ├── Makefile ├── PageAllocator.cpp ├── PageAllocator.h ├── PartitionAlloc.cpp ├── PartitionAlloc.h ├── README.md ├── SpinLock.h ├── WTFExport.h ├── config.h └── tests ├── linear_overflow.cpp ├── pa_test.cpp └── pointer_check.cpp /AddressSpaceRandomization.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The Chromium Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. 4 | 5 | #include "config.h" 6 | #include "AddressSpaceRandomization.h" 7 | 8 | #include "PageAllocator.h" 9 | #include "SpinLock.h" 10 | 11 | #if OS(WIN) 12 | #include 13 | #else 14 | #include 15 | #include 16 | #endif 17 | 18 | namespace WTF { 19 | 20 | namespace { 21 | 22 | // This is the same PRNG as used by tcmalloc for mapping address randomness; 23 | // see http://burtleburtle.net/bob/rand/smallprng.html 24 | struct ranctx { 25 | int lock; 26 | bool initialized; 27 | uint32_t a; 28 | uint32_t b; 29 | uint32_t c; 30 | uint32_t d; 31 | }; 32 | 33 | #define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k)))) 34 | 35 | uint32_t ranvalInternal(ranctx* x) 36 | { 37 | uint32_t e = x->a - rot(x->b, 27); 38 | x->a = x->b ^ rot(x->c, 17); 39 | x->b = x->c + x->d; 40 | x->c = x->d + e; 41 | x->d = e + x->a; 42 | return x->d; 43 | } 44 | 45 | #undef rot 46 | 47 | uint32_t ranval(ranctx* x) 48 | { 49 | spinLockLock(&x->lock); 50 | if (UNLIKELY(!x->initialized)) { 51 | x->initialized = true; 52 | char c; 53 | uint32_t seed = static_cast(reinterpret_cast(&c)); 54 | uint32_t pid; 55 | uint32_t usec; 56 | #if OS(WIN) 57 | pid = GetCurrentProcessId(); 58 | SYSTEMTIME st; 59 | GetSystemTime(&st); 60 | usec = static_cast(st.wMilliseconds * 1000); 61 | #else 62 | pid = static_cast(getpid()); 63 | struct timeval tv; 64 | gettimeofday(&tv, 0); 65 | usec = static_cast(tv.tv_usec); 66 | #endif 67 | seed ^= pid; 68 | seed ^= usec; 69 | x->a = 0xf1ea5eed; 70 | x->b = x->c = x->d = seed; 71 | for (int i = 0; i < 20; ++i) { 72 | (void) ranvalInternal(x); 73 | } 74 | } 75 | uint32_t ret = ranvalInternal(x); 76 | spinLockUnlock(&x->lock); 77 | return ret; 78 | } 79 | 80 | static struct ranctx s_ranctx; 81 | 82 | } 83 | 84 | // Calculates a random preferred mapping address. In calculating an 85 | // address, we balance good ASLR against not fragmenting the address 86 | // space too badly. 87 | void* getRandomPageBase() 88 | { 89 | uintptr_t random; 90 | random = static_cast(ranval(&s_ranctx)); 91 | #if CPU(X86_64) 92 | random <<= 32UL; 93 | random |= static_cast(ranval(&s_ranctx)); 94 | // This address mask gives a low liklihood of address space collisions. 95 | // We handle the situation gracefully if there is a collision. 96 | #if OS(WIN) 97 | // 64-bit Windows has a bizarrely small 8TB user address space. 98 | // Allocates in the 1-5TB region. 99 | // TODO(cevans): I think Win 8.1 has 47-bits like Linux. 100 | random &= 0x3ffffffffffUL; 101 | random += 0x10000000000UL; 102 | #else 103 | // Linux and OS X support the full 47-bit user space of x64 processors. 104 | random &= 0x3fffffffffffUL; 105 | #endif 106 | #elif CPU(ARM64) 107 | // ARM64 on Linux has 39-bit user space. 108 | random &= 0x3fffffffffUL; 109 | random += 0x1000000000UL; 110 | #else // !CPU(X86_64) && !CPU(ARM64) 111 | // This is a good range on Windows, Linux and Mac. 112 | // Allocates in the 0.5-1.5GB region. 113 | random &= 0x3fffffff; 114 | random += 0x20000000; 115 | #endif // CPU(X86_64) 116 | random &= kPageAllocationGranularityBaseMask; 117 | return reinterpret_cast(random); 118 | } 119 | 120 | } 121 | -------------------------------------------------------------------------------- /AddressSpaceRandomization.h: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The Chromium Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. 4 | 5 | #ifndef WTF_AddressSpaceRandomization_h 6 | #define WTF_AddressSpaceRandomization_h 7 | 8 | #include "WTFExport.h" 9 | 10 | namespace WTF { 11 | 12 | // Calculates a random preferred mapping address. In calculating an 13 | // address, we balance good ASLR against not fragmenting the address 14 | // space too badly. 15 | WTF_EXPORT void* getRandomPageBase(); 16 | 17 | } 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /Assertions.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2003, 2006, 2007 Apple Inc. All rights reserved. 3 | * Copyright (C) 2007-2009 Torch Mobile, Inc. 4 | * Copyright (C) 2011 University of Szeged. All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions 8 | * are met: 9 | * 1. Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * 2. Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in the 13 | * documentation and/or other materials provided with the distribution. 14 | * 15 | * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY 16 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR 19 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 20 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 22 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 23 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | */ 27 | 28 | // If the vprintf_stderr_common function triggers an error in your 29 | // Mac build, then try adding this pragma. 30 | // According to http://gcc.gnu.org/onlinedocs/gcc-4.2.1/gcc/Diagnostic-Pragmas.html#Diagnostic-Pragmas 31 | // we need to place this directive before any data or functions are defined. 32 | //#pragma GCC diagnostic ignored "-Wmissing-format-attribute" 33 | 34 | #include "config.h" 35 | #include "Assertions.h" 36 | 37 | #include "Compiler.h" 38 | 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | 45 | #if USE(CF) 46 | #include 47 | #include 48 | #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080 49 | #define WTF_USE_APPLE_SYSTEM_LOG 1 50 | #include 51 | #endif 52 | #endif // USE(CF) 53 | 54 | #if COMPILER(MSVC) 55 | #include 56 | #endif 57 | 58 | #if OS(WIN) 59 | #include 60 | #define HAVE_ISDEBUGGERPRESENT 1 61 | #endif 62 | 63 | #if OS(MACOSX) || (OS(LINUX) && !defined(__UCLIBC__)) 64 | #include 65 | #include 66 | #include 67 | #include 68 | #endif 69 | 70 | #if OS(LINUX) 71 | #include 72 | #endif 73 | 74 | #if OS(ANDROID) 75 | #include 76 | #endif 77 | 78 | WTF_ATTRIBUTE_PRINTF(1, 0) 79 | static void vprintf_stderr_common(const char* format, va_list args) 80 | { 81 | #if USE(CF) && !OS(WIN) 82 | if (strstr(format, "%@")) { 83 | CFStringRef cfFormat = CFStringCreateWithCString(NULL, format, kCFStringEncodingUTF8); 84 | 85 | #if COMPILER(CLANG) 86 | #pragma clang diagnostic push 87 | #pragma clang diagnostic ignored "-Wformat-nonliteral" 88 | #endif 89 | CFStringRef str = CFStringCreateWithFormatAndArguments(NULL, NULL, cfFormat, args); 90 | #if COMPILER(CLANG) 91 | #pragma clang diagnostic pop 92 | #endif 93 | CFIndex length = CFStringGetMaximumSizeForEncoding(CFStringGetLength(str), kCFStringEncodingUTF8); 94 | char* buffer = (char*)malloc(length + 1); 95 | 96 | CFStringGetCString(str, buffer, length, kCFStringEncodingUTF8); 97 | 98 | #if USE(APPLE_SYSTEM_LOG) 99 | os_log(OS_LOG_DEFAULT, "%s", buffer); 100 | #endif 101 | fputs(buffer, stderr); 102 | 103 | #if HPA_DEBUG 104 | fprintf(stdout, "%s", buffer); 105 | #endif 106 | 107 | free(buffer); 108 | CFRelease(str); 109 | CFRelease(cfFormat); 110 | return; 111 | } 112 | 113 | #if USE(APPLE_SYSTEM_LOG) 114 | va_list copyOfArgs; 115 | va_copy(copyOfArgs, args); 116 | 117 | size_t size = 1024; 118 | char *buffer = (char *) malloc(size); 119 | 120 | vsnprintf(buffer, size, format, args); 121 | os_log(OS_LOG_DEFAULT, "%s", (const char *) buffer); 122 | 123 | #if HPA_DEBUG 124 | fprintf(stdout, "%s", buffer); 125 | #endif 126 | 127 | free(buffer); 128 | 129 | va_end(copyOfArgs); 130 | #endif 131 | 132 | // Fall through to write to stderr in the same manner as other platforms. 133 | 134 | #elif OS(ANDROID) 135 | __android_log_vprint(ANDROID_LOG_WARN, "WebKit", format, args); 136 | #elif HAVE(ISDEBUGGERPRESENT) 137 | if (IsDebuggerPresent()) { 138 | size_t size = 1024; 139 | 140 | do { 141 | char *buffer = (char *) malloc(size); 142 | 143 | if (buffer == NULL) 144 | break; 145 | 146 | if (_vsnprintf(buffer, size, format, args) != -1) { 147 | OutputDebugStringA(buffer); 148 | free(buffer); 149 | break; 150 | } 151 | 152 | #if HPA_DEBUG 153 | fprintf(stdout, "%s", buffer); 154 | #endif 155 | free(buffer); 156 | size *= 2; 157 | } while (size > 1024); 158 | } 159 | #endif 160 | } 161 | 162 | #if COMPILER(CLANG) || (COMPILER(GCC) && GCC_VERSION_AT_LEAST(4, 6, 0)) 163 | #pragma GCC diagnostic push 164 | #pragma GCC diagnostic ignored "-Wformat-nonliteral" 165 | #endif 166 | 167 | static void vprintf_stderr_with_prefix(const char* prefix, const char* format, va_list args) 168 | { 169 | size_t prefixLength = strlen(prefix); 170 | size_t formatLength = strlen(format); 171 | //OwnPtr formatWithPrefix = adoptArrayPtr(new char[prefixLength + formatLength + 1]); 172 | std::unique_ptr formatWithPrefix (new char[prefixLength + formatLength + 1]); 173 | memcpy(formatWithPrefix.get(), prefix, prefixLength); 174 | memcpy(formatWithPrefix.get() + prefixLength, format, formatLength); 175 | formatWithPrefix[prefixLength + formatLength] = 0; 176 | 177 | vprintf_stderr_common(formatWithPrefix.get(), args); 178 | } 179 | 180 | static void vprintf_stderr_with_trailing_newline(const char* format, va_list args) 181 | { 182 | size_t formatLength = strlen(format); 183 | if (formatLength && format[formatLength - 1] == '\n') { 184 | vprintf_stderr_common(format, args); 185 | return; 186 | } 187 | 188 | //OwnPtr formatWithNewline = adoptArrayPtr(new char[formatLength + 2]); 189 | std::unique_ptr formatWithNewline (new char[formatLength + 2]); 190 | memcpy(formatWithNewline.get(), format, formatLength); 191 | formatWithNewline[formatLength] = '\n'; 192 | formatWithNewline[formatLength + 1] = 0; 193 | 194 | vprintf_stderr_common(formatWithNewline.get(), args); 195 | } 196 | 197 | #if COMPILER(CLANG) || (COMPILER(GCC) && GCC_VERSION_AT_LEAST(4, 6, 0)) 198 | #pragma GCC diagnostic pop 199 | #endif 200 | 201 | WTF_ATTRIBUTE_PRINTF(1, 2) 202 | static void printf_stderr_common(const char* format, ...) 203 | { 204 | va_list args; 205 | va_start(args, format); 206 | vprintf_stderr_common(format, args); 207 | va_end(args); 208 | } 209 | 210 | static void printCallSite(const char* file, int line, const char* function) 211 | { 212 | #if OS(WIN) && defined(_DEBUG) 213 | _CrtDbgReport(_CRT_WARN, file, line, NULL, "%s\n", function); 214 | #else 215 | // By using this format, which matches the format used by MSVC for compiler errors, developers 216 | // using Visual Studio can double-click the file/line number in the Output Window to have the 217 | // editor navigate to that line of code. It seems fine for other developers, too. 218 | printf_stderr_common("%s(%d) : %s\n", file, line, function); 219 | #endif 220 | } 221 | 222 | void WTFReportAssertionFailure(const char* file, int line, const char* function, const char* assertion) 223 | { 224 | if (assertion) 225 | printf_stderr_common("ASSERTION FAILED: %s\n", assertion); 226 | else 227 | printf_stderr_common("SHOULD NEVER BE REACHED\n"); 228 | printCallSite(file, line, function); 229 | } 230 | 231 | void WTFReportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* assertion, const char* format, ...) 232 | { 233 | va_list args; 234 | va_start(args, format); 235 | vprintf_stderr_with_prefix("ASSERTION FAILED: ", format, args); 236 | va_end(args); 237 | printf_stderr_common("\n%s\n", assertion); 238 | printCallSite(file, line, function); 239 | } 240 | 241 | void WTFReportArgumentAssertionFailure(const char* file, int line, const char* function, const char* argName, const char* assertion) 242 | { 243 | printf_stderr_common("ARGUMENT BAD: %s, %s\n", argName, assertion); 244 | printCallSite(file, line, function); 245 | } 246 | 247 | void WTFGetBacktrace(void** stack, int* size) 248 | { 249 | #if OS(MACOSX) || (OS(LINUX) && !defined(__UCLIBC__)) 250 | *size = backtrace(stack, *size); 251 | #elif OS(WIN) 252 | // The CaptureStackBackTrace function is available in XP, but it is not defined 253 | // in the Windows Server 2003 R2 Platform SDK. So, we'll grab the function 254 | // through GetProcAddress. 255 | typedef WORD (NTAPI* RtlCaptureStackBackTraceFunc)(DWORD, DWORD, PVOID*, PDWORD); 256 | HMODULE kernel32 = ::GetModuleHandleW(L"Kernel32.dll"); 257 | if (!kernel32) { 258 | *size = 0; 259 | return; 260 | } 261 | RtlCaptureStackBackTraceFunc captureStackBackTraceFunc = reinterpret_cast( 262 | ::GetProcAddress(kernel32, "RtlCaptureStackBackTrace")); 263 | if (captureStackBackTraceFunc) 264 | *size = captureStackBackTraceFunc(0, *size, stack, 0); 265 | else 266 | *size = 0; 267 | #else 268 | *size = 0; 269 | #endif 270 | } 271 | 272 | void WTFReportBacktrace(int framesToShow) 273 | { 274 | static const int framesToSkip = 2; 275 | // Use alloca to allocate on the stack since this function is used in OOM situations. 276 | void** samples = static_cast(alloca((framesToShow + framesToSkip) * sizeof(void *))); 277 | int frames = framesToShow + framesToSkip; 278 | 279 | WTFGetBacktrace(samples, &frames); 280 | WTFPrintBacktrace(samples + framesToSkip, frames - framesToSkip); 281 | } 282 | 283 | FrameToNameScope::FrameToNameScope(void* addr) 284 | : m_name(0) 285 | , m_cxaDemangled(0) 286 | { 287 | #if OS(MACOSX) || (OS(LINUX) && !defined(__UCLIBC__)) 288 | Dl_info info; 289 | if (!dladdr(addr, &info) || !info.dli_sname) 290 | return; 291 | const char* mangledName = info.dli_sname; 292 | if ((m_cxaDemangled = abi::__cxa_demangle(mangledName, 0, 0, 0))) 293 | m_name = m_cxaDemangled; 294 | else 295 | m_name = mangledName; 296 | #else 297 | (void)addr; 298 | #endif 299 | } 300 | 301 | FrameToNameScope::~FrameToNameScope() 302 | { 303 | free(m_cxaDemangled); 304 | } 305 | 306 | void WTFPrintBacktrace(void** stack, int size) 307 | { 308 | for (int i = 0; i < size; ++i) { 309 | FrameToNameScope frameToName(stack[i]); 310 | const int frameNumber = i + 1; 311 | if (frameToName.nullableName()) 312 | printf_stderr_common("%-3d %p %s\n", frameNumber, stack[i], frameToName.nullableName()); 313 | else 314 | printf_stderr_common("%-3d %p\n", frameNumber, stack[i]); 315 | } 316 | } 317 | 318 | void WTFReportFatalError(const char* file, int line, const char* function, const char* format, ...) 319 | { 320 | va_list args; 321 | va_start(args, format); 322 | vprintf_stderr_with_prefix("FATAL ERROR: ", format, args); 323 | va_end(args); 324 | printf_stderr_common("\n"); 325 | printCallSite(file, line, function); 326 | } 327 | 328 | void WTFReportError(const char* file, int line, const char* function, const char* format, ...) 329 | { 330 | va_list args; 331 | va_start(args, format); 332 | vprintf_stderr_with_prefix("ERROR: ", format, args); 333 | va_end(args); 334 | printf_stderr_common("\n"); 335 | printCallSite(file, line, function); 336 | } 337 | 338 | void WTFLog(WTFLogChannel* channel, const char* format, ...) 339 | { 340 | if (channel->state != WTFLogChannelOn) 341 | return; 342 | 343 | va_list args; 344 | va_start(args, format); 345 | vprintf_stderr_with_trailing_newline(format, args); 346 | va_end(args); 347 | } 348 | 349 | void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChannel* channel, const char* format, ...) 350 | { 351 | if (channel->state != WTFLogChannelOn) 352 | return; 353 | 354 | va_list args; 355 | va_start(args, format); 356 | vprintf_stderr_with_trailing_newline(format, args); 357 | va_end(args); 358 | 359 | printCallSite(file, line, function); 360 | } 361 | 362 | void WTFLogAlways(const char* format, ...) 363 | { 364 | va_list args; 365 | va_start(args, format); 366 | vprintf_stderr_with_trailing_newline(format, args); 367 | va_end(args); 368 | } 369 | 370 | -------------------------------------------------------------------------------- /Assertions.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2003, 2006, 2007 Apple Inc. All rights reserved. 3 | * Copyright (C) 2013 Google Inc. All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY 15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR 18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | #ifndef WTF_Assertions_h 28 | #define WTF_Assertions_h 29 | 30 | /* 31 | No namespaces because this file has to be includable from C and Objective-C. 32 | 33 | Note, this file uses many GCC extensions, but it should be compatible with 34 | C, Objective C, C++, and Objective C++. 35 | 36 | For non-debug builds, everything is disabled by default, except for the 37 | RELEASE_ASSERT family of macros. 38 | 39 | Defining any of the symbols explicitly prevents this from having any effect. 40 | 41 | */ 42 | 43 | #include "Compiler.h" 44 | #include "WTFExport.h" 45 | 46 | // Users must test "#if ENABLE(ASSERT)", which helps ensure that code 47 | // testing this macro has included this header. 48 | #ifndef ENABLE_ASSERT 49 | #if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON) 50 | /* Disable ASSERT* macros in release mode by default. */ 51 | #define ENABLE_ASSERT 0 52 | #else 53 | #define ENABLE_ASSERT 1 54 | #endif /* defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON) */ 55 | #endif 56 | 57 | #ifndef BACKTRACE_DISABLED 58 | #define BACKTRACE_DISABLED !ENABLE(ASSERT) 59 | #endif 60 | 61 | #ifndef ASSERT_MSG_DISABLED 62 | #define ASSERT_MSG_DISABLED !ENABLE(ASSERT) 63 | #endif 64 | 65 | #ifndef ASSERT_ARG_DISABLED 66 | #define ASSERT_ARG_DISABLED !ENABLE(ASSERT) 67 | #endif 68 | 69 | #ifndef FATAL_DISABLED 70 | #define FATAL_DISABLED !ENABLE(ASSERT) 71 | #endif 72 | 73 | #ifndef ERROR_DISABLED 74 | #define ERROR_DISABLED !ENABLE(ASSERT) 75 | #endif 76 | 77 | #ifndef LOG_DISABLED 78 | #define LOG_DISABLED !ENABLE(ASSERT) 79 | #endif 80 | 81 | /* WTF logging functions can process %@ in the format string to log a NSObject* but the printf format attribute 82 | emits a warning when %@ is used in the format string. Until is resolved we can't include 83 | the attribute when being used from Objective-C code in case it decides to use %@. */ 84 | #if COMPILER(GCC) && !defined(__OBJC__) 85 | #define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments) __attribute__((__format__(printf, formatStringArgument, extraArguments))) 86 | #else 87 | #define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments) 88 | #endif 89 | 90 | /* These helper functions are always declared, but not necessarily always defined if the corresponding function is disabled. */ 91 | 92 | typedef enum { WTFLogChannelOff, WTFLogChannelOn } WTFLogChannelState; 93 | 94 | typedef struct { 95 | WTFLogChannelState state; 96 | } WTFLogChannel; 97 | 98 | WTF_EXPORT void WTFReportAssertionFailure(const char* file, int line, const char* function, const char* assertion); 99 | WTF_EXPORT void WTFReportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* assertion, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6); 100 | WTF_EXPORT void WTFReportArgumentAssertionFailure(const char* file, int line, const char* function, const char* argName, const char* assertion); 101 | WTF_EXPORT void WTFReportFatalError(const char* file, int line, const char* function, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5); 102 | WTF_EXPORT void WTFReportError(const char* file, int line, const char* function, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5); 103 | WTF_EXPORT void WTFLog(WTFLogChannel*, const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3); 104 | WTF_EXPORT void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChannel*, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6); 105 | WTF_EXPORT void WTFLogAlways(const char* format, ...) WTF_ATTRIBUTE_PRINTF(1, 2); 106 | 107 | WTF_EXPORT void WTFGetBacktrace(void** stack, int* size); 108 | WTF_EXPORT void WTFReportBacktrace(int framesToShow = 31); 109 | WTF_EXPORT void WTFPrintBacktrace(void** stack, int size); 110 | 111 | namespace WTF { 112 | 113 | class WTF_EXPORT FrameToNameScope { 114 | public: 115 | explicit FrameToNameScope(void*); 116 | ~FrameToNameScope(); 117 | const char* nullableName() { return m_name; } 118 | 119 | private: 120 | const char* m_name; 121 | char* m_cxaDemangled; 122 | }; 123 | 124 | } // namespace WTF 125 | 126 | using WTF::FrameToNameScope; 127 | 128 | /* IMMEDIATE_CRASH() - Like CRASH() below but crashes in the fastest, simplest possible way with no attempt at logging. */ 129 | #ifndef IMMEDIATE_CRASH 130 | #if COMPILER(GCC) || COMPILER(CLANG) 131 | #define IMMEDIATE_CRASH() __builtin_trap() 132 | #else 133 | #define IMMEDIATE_CRASH() ((void)(*(volatile char*)0 = 0)) 134 | #endif 135 | #endif 136 | 137 | /* CRASH() - Raises a fatal error resulting in program termination and triggering either the debugger or the crash reporter. 138 | 139 | Use CRASH() in response to known, unrecoverable errors like out-of-memory. 140 | Macro is enabled in both debug and release mode. 141 | To test for unknown errors and verify assumptions, use ASSERT instead, to avoid impacting performance in release builds. 142 | 143 | Signals are ignored by the crash reporter on OS X so we must do better. 144 | */ 145 | #ifndef CRASH 146 | #if COMPILER(MSVC) 147 | #define CRASH() (__debugbreak(), IMMEDIATE_CRASH()) 148 | #else 149 | #define CRASH() \ 150 | (WTFReportBacktrace(), \ 151 | (*(int*)0xfbadbeef = 0), \ 152 | IMMEDIATE_CRASH()) 153 | #endif 154 | #endif 155 | 156 | #if COMPILER(CLANG) 157 | #define NO_RETURN_DUE_TO_CRASH NO_RETURN 158 | #else 159 | #define NO_RETURN_DUE_TO_CRASH 160 | #endif 161 | 162 | /* BACKTRACE 163 | 164 | Print a backtrace to the same location as ASSERT messages. 165 | */ 166 | #if BACKTRACE_DISABLED 167 | 168 | #define BACKTRACE() ((void)0) 169 | 170 | #else 171 | 172 | #define BACKTRACE() do { \ 173 | WTFReportBacktrace(); \ 174 | } while(false) 175 | 176 | #endif 177 | 178 | /* ASSERT, ASSERT_NOT_REACHED, ASSERT_UNUSED 179 | 180 | These macros are compiled out of release builds. 181 | Expressions inside them are evaluated in debug builds only. 182 | */ 183 | #if OS(WIN) 184 | /* FIXME: Change to use something other than ASSERT to avoid this conflict with the underlying platform */ 185 | #undef ASSERT 186 | #endif 187 | 188 | #if ENABLE(ASSERT) 189 | 190 | #define ASSERT(assertion) \ 191 | (!(assertion) ? \ 192 | (WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion), \ 193 | CRASH()) : \ 194 | (void)0) 195 | 196 | #define ASSERT_AT(assertion, file, line, function) \ 197 | (!(assertion) ? \ 198 | (WTFReportAssertionFailure(file, line, function, #assertion), \ 199 | CRASH()) : \ 200 | (void)0) 201 | 202 | #define ASSERT_NOT_REACHED() do { \ 203 | WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 0); \ 204 | CRASH(); \ 205 | } while (0) 206 | 207 | #define ASSERT_UNUSED(variable, assertion) ASSERT(assertion) 208 | 209 | #define NO_RETURN_DUE_TO_ASSERT NO_RETURN_DUE_TO_CRASH 210 | 211 | #else 212 | 213 | #define ASSERT(assertion) ((void)0) 214 | #define ASSERT_AT(assertion, file, line, function) ((void)0) 215 | #define ASSERT_NOT_REACHED() ((void)0) 216 | #define NO_RETURN_DUE_TO_ASSERT 217 | 218 | #define ASSERT_UNUSED(variable, assertion) ((void)variable) 219 | 220 | #endif 221 | 222 | /* ASSERT_WITH_SECURITY_IMPLICATION / RELEASE_ASSERT_WITH_SECURITY_IMPLICATION 223 | 224 | Use in places where failure of the assertion indicates a possible security 225 | vulnerability. Classes of these vulnerabilities include bad casts, out of 226 | bounds accesses, use-after-frees, etc. Please be sure to file bugs for these 227 | failures using the security template: 228 | http://code.google.com/p/chromium/issues/entry?template=Security%20Bug 229 | */ 230 | #ifdef ADDRESS_SANITIZER 231 | 232 | #define ASSERT_WITH_SECURITY_IMPLICATION(assertion) \ 233 | (!(assertion) ? \ 234 | (WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion), \ 235 | CRASH()) : \ 236 | (void)0) 237 | 238 | #define RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(assertion) ASSERT_WITH_SECURITY_IMPLICATION(assertion) 239 | 240 | #else 241 | 242 | #define ASSERT_WITH_SECURITY_IMPLICATION(assertion) ASSERT(assertion) 243 | #define RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(assertion) RELEASE_ASSERT(assertion) 244 | 245 | #endif 246 | 247 | // Users must test "#if ENABLE(SECURITY_ASSERT)", which helps ensure 248 | // that code testing this macro has included this header. 249 | #if defined(ADDRESS_SANITIZER) || ENABLE(ASSERT) 250 | #define ENABLE_SECURITY_ASSERT 1 251 | #else 252 | #define ENABLE_SECURITY_ASSERT 0 253 | #endif 254 | 255 | /* ASSERT_WITH_MESSAGE */ 256 | 257 | #if ASSERT_MSG_DISABLED 258 | #define ASSERT_WITH_MESSAGE(assertion, ...) ((void)0) 259 | #else 260 | #define ASSERT_WITH_MESSAGE(assertion, ...) do \ 261 | if (!(assertion)) { \ 262 | WTFReportAssertionFailureWithMessage(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion, __VA_ARGS__); \ 263 | CRASH(); \ 264 | } \ 265 | while (0) 266 | #endif 267 | 268 | /* ASSERT_WITH_MESSAGE_UNUSED */ 269 | 270 | #if ASSERT_MSG_DISABLED 271 | #define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) ((void)variable) 272 | #else 273 | #define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) do \ 274 | if (!(assertion)) { \ 275 | WTFReportAssertionFailureWithMessage(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion, __VA_ARGS__); \ 276 | CRASH(); \ 277 | } \ 278 | while (0) 279 | #endif 280 | 281 | /* ASSERT_ARG */ 282 | 283 | #if ASSERT_ARG_DISABLED 284 | 285 | #define ASSERT_ARG(argName, assertion) ((void)0) 286 | 287 | #else 288 | 289 | #define ASSERT_ARG(argName, assertion) do \ 290 | if (!(assertion)) { \ 291 | WTFReportArgumentAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #argName, #assertion); \ 292 | CRASH(); \ 293 | } \ 294 | while (0) 295 | 296 | #endif 297 | 298 | /* FATAL */ 299 | 300 | #if FATAL_DISABLED 301 | #define FATAL(...) ((void)0) 302 | #else 303 | #define FATAL(...) do { \ 304 | WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, __VA_ARGS__); \ 305 | CRASH(); \ 306 | } while (0) 307 | #endif 308 | 309 | /* WTF_LOG_ERROR */ 310 | 311 | #if ERROR_DISABLED 312 | #define WTF_LOG_ERROR(...) ((void)0) 313 | #else 314 | #define WTF_LOG_ERROR(...) WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, __VA_ARGS__) 315 | #endif 316 | 317 | /* WTF_LOG */ 318 | 319 | #if LOG_DISABLED 320 | #define WTF_LOG(channel, ...) ((void)0) 321 | #else 322 | #define WTF_LOG(channel, ...) WTFLog(&JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__) 323 | #define JOIN_LOG_CHANNEL_WITH_PREFIX(prefix, channel) JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel) 324 | #define JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel) prefix ## channel 325 | #endif 326 | 327 | /* UNREACHABLE_FOR_PLATFORM */ 328 | 329 | #if COMPILER(CLANG) 330 | /* This would be a macro except that its use of #pragma works best around 331 | a function. Hence it uses macro naming convention. */ 332 | #pragma clang diagnostic push 333 | #pragma clang diagnostic ignored "-Wmissing-noreturn" 334 | static inline void UNREACHABLE_FOR_PLATFORM() 335 | { 336 | ASSERT_NOT_REACHED(); 337 | } 338 | #pragma clang diagnostic pop 339 | #else 340 | #define UNREACHABLE_FOR_PLATFORM() ASSERT_NOT_REACHED() 341 | #endif 342 | 343 | /* RELEASE_ASSERT 344 | 345 | Use in places where failure of an assertion indicates a definite security 346 | vulnerability from which execution must not continue even in a release build. 347 | Please sure to file bugs for these failures using the security template: 348 | http://code.google.com/p/chromium/issues/entry?template=Security%20Bug 349 | */ 350 | 351 | #if ENABLE(ASSERT) 352 | #define RELEASE_ASSERT(assertion) ASSERT(assertion) 353 | #define RELEASE_ASSERT_WITH_MESSAGE(assertion, ...) ASSERT_WITH_MESSAGE(assertion, __VA_ARGS__) 354 | #define RELEASE_ASSERT_NOT_REACHED() ASSERT_NOT_REACHED() 355 | #else 356 | #define RELEASE_ASSERT(assertion) (UNLIKELY(!(assertion)) ? (IMMEDIATE_CRASH()) : (void)0) 357 | #define RELEASE_ASSERT_WITH_MESSAGE(assertion, ...) RELEASE_ASSERT(assertion) 358 | #define RELEASE_ASSERT_NOT_REACHED() IMMEDIATE_CRASH() 359 | #endif 360 | 361 | /* DEFINE_COMPARISON_OPERATORS_WITH_REFERENCES */ 362 | 363 | // Allow equality comparisons of Objects by reference or pointer, interchangeably. 364 | // This can be only used on types whose equality makes no other sense than pointer equality. 365 | #define DEFINE_COMPARISON_OPERATORS_WITH_REFERENCES(thisType) \ 366 | inline bool operator==(const thisType& a, const thisType& b) { return &a == &b; } \ 367 | inline bool operator==(const thisType& a, const thisType* b) { return &a == b; } \ 368 | inline bool operator==(const thisType* a, const thisType& b) { return a == &b; } \ 369 | inline bool operator!=(const thisType& a, const thisType& b) { return !(a == b); } \ 370 | inline bool operator!=(const thisType& a, const thisType* b) { return !(a == b); } \ 371 | inline bool operator!=(const thisType* a, const thisType& b) { return !(a == b); } 372 | 373 | #define DEFINE_COMPARISON_OPERATORS_WITH_REFERENCES_REFCOUNTED(thisType) \ 374 | DEFINE_COMPARISON_OPERATORS_WITH_REFERENCES(thisType) \ 375 | inline bool operator==(const PassRefPtr& a, const thisType& b) { return a.get() == &b; } \ 376 | inline bool operator==(const thisType& a, const PassRefPtr& b) { return &a == b.get(); } \ 377 | inline bool operator!=(const PassRefPtr& a, const thisType& b) { return !(a == b); } \ 378 | inline bool operator!=(const thisType& a, const PassRefPtr& b) { return !(a == b); } 379 | 380 | /* DEFINE_TYPE_CASTS */ 381 | 382 | #define DEFINE_TYPE_CASTS(thisType, argumentType, argumentName, pointerPredicate, referencePredicate) \ 383 | inline thisType* to##thisType(argumentType* argumentName) \ 384 | { \ 385 | ASSERT_WITH_SECURITY_IMPLICATION(!argumentName || (pointerPredicate)); \ 386 | return static_cast(argumentName); \ 387 | } \ 388 | inline const thisType* to##thisType(const argumentType* argumentName) \ 389 | { \ 390 | ASSERT_WITH_SECURITY_IMPLICATION(!argumentName || (pointerPredicate)); \ 391 | return static_cast(argumentName); \ 392 | } \ 393 | inline thisType& to##thisType(argumentType& argumentName) \ 394 | { \ 395 | ASSERT_WITH_SECURITY_IMPLICATION(referencePredicate); \ 396 | return static_cast(argumentName); \ 397 | } \ 398 | inline const thisType& to##thisType(const argumentType& argumentName) \ 399 | { \ 400 | ASSERT_WITH_SECURITY_IMPLICATION(referencePredicate); \ 401 | return static_cast(argumentName); \ 402 | } \ 403 | void to##thisType(const thisType*); \ 404 | void to##thisType(const thisType&) 405 | 406 | #endif /* WTF_Assertions_h */ 407 | -------------------------------------------------------------------------------- /Atomics.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. 3 | * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * 2. Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in the 13 | * documentation and/or other materials provided with the distribution. 14 | * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of 15 | * its contributors may be used to endorse or promote products derived 16 | * from this software without specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY 19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY 22 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | */ 29 | 30 | #ifndef Atomics_h 31 | #define Atomics_h 32 | 33 | #include "Assertions.h" 34 | 35 | #include 36 | 37 | #if COMPILER(MSVC) 38 | #include 39 | #endif 40 | 41 | namespace WTF { 42 | 43 | #if COMPILER(MSVC) 44 | 45 | // atomicAdd returns the result of the addition. 46 | ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) 47 | { 48 | return InterlockedExchangeAdd(reinterpret_cast(addend), static_cast(increment)) + increment; 49 | } 50 | ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, unsigned increment) 51 | { 52 | return InterlockedExchangeAdd(reinterpret_cast(addend), static_cast(increment)) + increment; 53 | } 54 | #if defined(_WIN64) 55 | ALWAYS_INLINE unsigned long long atomicAdd(unsigned long long volatile* addend, unsigned long long increment) 56 | { 57 | return InterlockedExchangeAdd64(reinterpret_cast(addend), static_cast(increment)) + increment; 58 | } 59 | #endif 60 | 61 | // atomicSubtract returns the result of the subtraction. 62 | ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) 63 | { 64 | return InterlockedExchangeAdd(reinterpret_cast(addend), static_cast(-decrement)) - decrement; 65 | } 66 | ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, unsigned decrement) 67 | { 68 | return InterlockedExchangeAdd(reinterpret_cast(addend), -static_cast(decrement)) - decrement; 69 | } 70 | #if defined(_WIN64) 71 | ALWAYS_INLINE unsigned long long atomicSubtract(unsigned long long volatile* addend, unsigned long long decrement) 72 | { 73 | return InterlockedExchangeAdd64(reinterpret_cast(addend), -static_cast(decrement)) - decrement; 74 | } 75 | #endif 76 | 77 | ALWAYS_INLINE int atomicIncrement(int volatile* addend) { return InterlockedIncrement(reinterpret_cast(addend)); } 78 | ALWAYS_INLINE int atomicDecrement(int volatile* addend) { return InterlockedDecrement(reinterpret_cast(addend)); } 79 | 80 | ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { return InterlockedIncrement64(reinterpret_cast(addend)); } 81 | ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { return InterlockedDecrement64(reinterpret_cast(addend)); } 82 | 83 | ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) 84 | { 85 | int ret = InterlockedExchange(reinterpret_cast(ptr), 1); 86 | ASSERT(!ret || ret == 1); 87 | return ret; 88 | } 89 | 90 | ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) 91 | { 92 | ASSERT(*ptr == 1); 93 | InterlockedExchange(reinterpret_cast(ptr), 0); 94 | } 95 | 96 | #else 97 | 98 | // atomicAdd returns the result of the addition. 99 | ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { return __sync_add_and_fetch(addend, increment); } 100 | ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, unsigned increment) { return __sync_add_and_fetch(addend, increment); } 101 | ALWAYS_INLINE unsigned long atomicAdd(unsigned long volatile* addend, unsigned long increment) { return __sync_add_and_fetch(addend, increment); } 102 | // atomicSubtract returns the result of the subtraction. 103 | ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) { return __sync_sub_and_fetch(addend, decrement); } 104 | ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, unsigned decrement) { return __sync_sub_and_fetch(addend, decrement); } 105 | ALWAYS_INLINE unsigned long atomicSubtract(unsigned long volatile* addend, unsigned long decrement) { return __sync_sub_and_fetch(addend, decrement); } 106 | 107 | ALWAYS_INLINE int atomicIncrement(int volatile* addend) { return atomicAdd(addend, 1); } 108 | ALWAYS_INLINE int atomicDecrement(int volatile* addend) { return atomicSubtract(addend, 1); } 109 | 110 | ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { return __sync_add_and_fetch(addend, 1); } 111 | ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { return __sync_sub_and_fetch(addend, 1); } 112 | 113 | ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) 114 | { 115 | int ret = __sync_lock_test_and_set(ptr, 1); 116 | ASSERT(!ret || ret == 1); 117 | return ret; 118 | } 119 | 120 | ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) 121 | { 122 | ASSERT(*ptr == 1); 123 | __sync_lock_release(ptr); 124 | } 125 | #endif 126 | 127 | #if defined(THREAD_SANITIZER) 128 | // The definitions below assume an LP64 data model. This is fine because 129 | // TSan is only supported on x86_64 Linux. 130 | #if CPU(64BIT) && OS(LINUX) 131 | ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) 132 | { 133 | __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); 134 | } 135 | ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) 136 | { 137 | __tsan_atomic32_store(reinterpret_cast(ptr), static_cast(value), __tsan_memory_order_release); 138 | } 139 | ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) 140 | { 141 | __tsan_atomic64_store(reinterpret_cast(ptr), static_cast<__tsan_atomic64>(value), __tsan_memory_order_release); 142 | } 143 | ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, unsigned long value) 144 | { 145 | __tsan_atomic64_store(reinterpret_cast(ptr), static_cast<__tsan_atomic64>(value), __tsan_memory_order_release); 146 | } 147 | ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, unsigned long long value) 148 | { 149 | __tsan_atomic64_store(reinterpret_cast(ptr), static_cast<__tsan_atomic64>(value), __tsan_memory_order_release); 150 | } 151 | ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) 152 | { 153 | __tsan_atomic64_store(reinterpret_cast(ptr), reinterpret_cast<__tsan_atomic64>(value), __tsan_memory_order_release); 154 | } 155 | 156 | ALWAYS_INLINE int acquireLoad(volatile const int* ptr) 157 | { 158 | return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); 159 | } 160 | ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) 161 | { 162 | return static_cast(__tsan_atomic32_load(reinterpret_cast(ptr), __tsan_memory_order_acquire)); 163 | } 164 | ALWAYS_INLINE long acquireLoad(volatile const long* ptr) 165 | { 166 | return static_cast(__tsan_atomic64_load(reinterpret_cast(ptr), __tsan_memory_order_acquire)); 167 | } 168 | ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) 169 | { 170 | return static_cast(__tsan_atomic64_load(reinterpret_cast(ptr), __tsan_memory_order_acquire)); 171 | } 172 | ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) 173 | { 174 | return reinterpret_cast(__tsan_atomic64_load(reinterpret_cast(ptr), __tsan_memory_order_acquire)); 175 | } 176 | #endif 177 | 178 | #else // defined(THREAD_SANITIZER) 179 | 180 | #if CPU(X86) || CPU(X86_64) 181 | // Only compiler barrier is needed. 182 | #if COMPILER(MSVC) 183 | // Starting from Visual Studio 2005 compiler guarantees acquire and release 184 | // semantics for operations on volatile variables. See MSDN entry for 185 | // MemoryBarrier macro. 186 | #define MEMORY_BARRIER() 187 | #else 188 | #define MEMORY_BARRIER() __asm__ __volatile__("" : : : "memory") 189 | #endif 190 | #elif CPU(ARM) && (OS(LINUX) || OS(ANDROID)) 191 | // On ARM __sync_synchronize generates dmb which is very expensive on single 192 | // core devices which don't actually need it. Avoid the cost by calling into 193 | // kuser_memory_barrier helper. 194 | inline void memoryBarrier() 195 | { 196 | // Note: This is a function call, which is also an implicit compiler barrier. 197 | typedef void (*KernelMemoryBarrierFunc)(); 198 | ((KernelMemoryBarrierFunc)0xffff0fa0)(); 199 | } 200 | #define MEMORY_BARRIER() memoryBarrier() 201 | #else 202 | // Fallback to the compiler intrinsic on all other platforms. 203 | #define MEMORY_BARRIER() __sync_synchronize() 204 | #endif 205 | 206 | ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) 207 | { 208 | MEMORY_BARRIER(); 209 | *ptr = value; 210 | } 211 | ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) 212 | { 213 | MEMORY_BARRIER(); 214 | *ptr = value; 215 | } 216 | ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) 217 | { 218 | MEMORY_BARRIER(); 219 | *ptr = value; 220 | } 221 | ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, unsigned long value) 222 | { 223 | MEMORY_BARRIER(); 224 | *ptr = value; 225 | } 226 | #if CPU(64BIT) 227 | ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, unsigned long long value) 228 | { 229 | MEMORY_BARRIER(); 230 | *ptr = value; 231 | } 232 | #endif 233 | ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) 234 | { 235 | MEMORY_BARRIER(); 236 | *ptr = value; 237 | } 238 | 239 | ALWAYS_INLINE int acquireLoad(volatile const int* ptr) 240 | { 241 | int value = *ptr; 242 | MEMORY_BARRIER(); 243 | return value; 244 | } 245 | ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) 246 | { 247 | unsigned value = *ptr; 248 | MEMORY_BARRIER(); 249 | return value; 250 | } 251 | ALWAYS_INLINE long acquireLoad(volatile const long* ptr) 252 | { 253 | long value = *ptr; 254 | MEMORY_BARRIER(); 255 | return value; 256 | } 257 | ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) 258 | { 259 | unsigned long value = *ptr; 260 | MEMORY_BARRIER(); 261 | return value; 262 | } 263 | #if CPU(64BIT) 264 | ALWAYS_INLINE unsigned long long acquireLoad(volatile const unsigned long long* ptr) 265 | { 266 | unsigned long long value = *ptr; 267 | MEMORY_BARRIER(); 268 | return value; 269 | } 270 | #endif 271 | ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) 272 | { 273 | void* value = *ptr; 274 | MEMORY_BARRIER(); 275 | return value; 276 | } 277 | 278 | #if defined(ADDRESS_SANITIZER) 279 | 280 | NO_SANITIZE_ADDRESS ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr, unsigned value) 281 | { 282 | MEMORY_BARRIER(); 283 | *ptr = value; 284 | } 285 | 286 | NO_SANITIZE_ADDRESS ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr) 287 | { 288 | unsigned value = *ptr; 289 | MEMORY_BARRIER(); 290 | return value; 291 | } 292 | 293 | #endif // defined(ADDRESS_SANITIZER) 294 | 295 | #undef MEMORY_BARRIER 296 | 297 | #endif 298 | 299 | #if !defined(ADDRESS_SANITIZER) 300 | 301 | ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr, unsigned value) 302 | { 303 | releaseStore(ptr, value); 304 | } 305 | 306 | ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr) 307 | { 308 | return acquireLoad(ptr); 309 | } 310 | 311 | #endif 312 | 313 | } // namespace WTF 314 | 315 | using WTF::atomicAdd; 316 | using WTF::atomicSubtract; 317 | using WTF::atomicDecrement; 318 | using WTF::atomicIncrement; 319 | using WTF::atomicTestAndSetToOne; 320 | using WTF::atomicSetOneToZero; 321 | using WTF::acquireLoad; 322 | using WTF::releaseStore; 323 | 324 | // These methods allow loading from and storing to poisoned memory. Only 325 | // use these methods if you know what you are doing since they will 326 | // silence use-after-poison errors from ASan. 327 | using WTF::asanUnsafeAcquireLoad; 328 | using WTF::asanUnsafeReleaseStore; 329 | 330 | #endif // Atomics_h 331 | -------------------------------------------------------------------------------- /BitwiseOperations.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2013 Google Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * Neither the name of Google Inc. nor the names of its 15 | * contributors may be used to endorse or promote products derived from 16 | * this software without specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | #ifndef WTF_BitwiseOperations_h 32 | #define WTF_BitwiseOperations_h 33 | 34 | // DESCRIPTION 35 | // countLeadingZeros() is a bitwise operation that counts the number of leading 36 | // zeros in a binary value, starting with the most significant bit. C does not 37 | // have an operator to do this, but fortunately the various compilers have 38 | // built-ins that map to fast underlying processor instructions. 39 | 40 | #include "CPU.h" 41 | #include "Compiler.h" 42 | 43 | #include 44 | 45 | #if COMPILER(MSVC) 46 | #include 47 | #endif 48 | 49 | namespace WTF { 50 | 51 | #if COMPILER(MSVC) 52 | 53 | ALWAYS_INLINE uint32_t countLeadingZeros32(uint32_t x) 54 | { 55 | unsigned long index; 56 | return LIKELY(_BitScanReverse(&index, x)) ? (31 - index) : 32; 57 | } 58 | 59 | #if CPU(64BIT) 60 | 61 | // MSVC only supplies _BitScanForward64 when building for a 64-bit target. 62 | ALWAYS_INLINE uint64_t countLeadingZeros64(uint64_t x) 63 | { 64 | unsigned long index; 65 | return LIKELY(_BitScanReverse64(&index, x)) ? (63 - index) : 64; 66 | } 67 | 68 | #endif 69 | 70 | #elif COMPILER(GCC) 71 | 72 | // This is very annoying. __builtin_clz has undefined behaviour for an input of 73 | // 0, even though these's clearly a return value that makes sense, and even 74 | // though nascent processor clz instructions have defined behaviour for 0. 75 | // We could drop to raw __asm__ to do better, but we'll avoid doing that unless 76 | // we see proof that we need to. 77 | ALWAYS_INLINE uint32_t countLeadingZeros32(uint32_t x) 78 | { 79 | return LIKELY(x) ? __builtin_clz(x) : 32; 80 | } 81 | 82 | ALWAYS_INLINE uint64_t countLeadingZeros64(uint64_t x) 83 | { 84 | return LIKELY(x) ? __builtin_clzll(x) : 64; 85 | } 86 | 87 | #endif 88 | 89 | #if CPU(64BIT) 90 | 91 | ALWAYS_INLINE size_t countLeadingZerosSizet(size_t x) { return countLeadingZeros64(x); } 92 | 93 | #else 94 | 95 | ALWAYS_INLINE size_t countLeadingZerosSizet(size_t x) { return countLeadingZeros32(x); } 96 | 97 | #endif 98 | 99 | } // namespace WTF 100 | 101 | #endif // WTF_BitwiseOperations_h 102 | -------------------------------------------------------------------------------- /ByteSwap.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2013 Google Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * Neither the name of Google Inc. nor the names of its 15 | * contributors may be used to endorse or promote products derived from 16 | * this software without specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | #ifndef WTF_ByteSwap_h 32 | #define WTF_ByteSwap_h 33 | 34 | #include "CPU.h" 35 | #include "Compiler.h" 36 | 37 | #include 38 | 39 | #if COMPILER(MSVC) 40 | #include 41 | #endif 42 | 43 | namespace WTF { 44 | 45 | inline uint32_t wswap32(uint32_t x) { return ((x & 0xffff0000) >> 16) | ((x & 0x0000ffff) << 16); } 46 | 47 | #if COMPILER(MSVC) 48 | 49 | ALWAYS_INLINE uint64_t bswap64(uint64_t x) { return _byteswap_uint64(x); } 50 | ALWAYS_INLINE uint32_t bswap32(uint32_t x) { return _byteswap_ulong(x); } 51 | ALWAYS_INLINE uint16_t bswap16(uint16_t x) { return _byteswap_ushort(x); } 52 | 53 | #else 54 | 55 | ALWAYS_INLINE uint64_t bswap64(uint64_t x) { return __builtin_bswap64(x); } 56 | ALWAYS_INLINE uint32_t bswap32(uint32_t x) { return __builtin_bswap32(x); } 57 | ALWAYS_INLINE uint16_t bswap16(uint16_t x) { return __builtin_bswap16(x); } 58 | 59 | #endif 60 | 61 | #if CPU(64BIT) 62 | 63 | ALWAYS_INLINE size_t bswapuintptrt(size_t x) { return bswap64(x); } 64 | 65 | #else 66 | 67 | ALWAYS_INLINE size_t bswapuintptrt(size_t x) { return bswap32(x); } 68 | 69 | #endif 70 | 71 | } // namespace WTF 72 | 73 | #endif // WTF_ByteSwap_h 74 | -------------------------------------------------------------------------------- /CPU.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2006, 2007, 2008, 2009, 2013 Apple Inc. All rights reserved. 3 | * Copyright (C) 2007-2009 Torch Mobile, Inc. 4 | * Copyright (C) 2010, 2011 Research In Motion Limited. All rights reserved. 5 | * Copyright (C) 2013 Samsung Electronics. All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without 8 | * modification, are permitted provided that the following conditions 9 | * are met: 10 | * 1. Redistributions of source code must retain the above copyright 11 | * notice, this list of conditions and the following disclaimer. 12 | * 2. Redistributions in binary form must reproduce the above copyright 13 | * notice, this list of conditions and the following disclaimer in the 14 | * documentation and/or other materials provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR 20 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 21 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 23 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 24 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | */ 28 | 29 | #ifndef WTF_CPU_h 30 | #define WTF_CPU_h 31 | 32 | #include "Compiler.h" 33 | 34 | /* CPU() - the target CPU architecture */ 35 | #define CPU(WTF_FEATURE) (defined WTF_CPU_##WTF_FEATURE && WTF_CPU_##WTF_FEATURE) 36 | 37 | /* ==== CPU() - the target CPU architecture ==== */ 38 | 39 | /* This defines CPU(BIG_ENDIAN) or nothing, as appropriate. */ 40 | /* This defines CPU(32BIT) or CPU(64BIT), as appropriate. */ 41 | 42 | /* CPU(X86) - i386 / x86 32-bit */ 43 | #if defined(__i386__) \ 44 | || defined(i386) \ 45 | || defined(_M_IX86) \ 46 | || defined(_X86_) \ 47 | || defined(__THW_INTEL) 48 | #define WTF_CPU_X86 1 49 | #endif 50 | 51 | /* CPU(X86_64) - AMD64 / Intel64 / x86_64 64-bit */ 52 | #if defined(__x86_64__) \ 53 | || defined(_M_X64) 54 | #define WTF_CPU_X86_64 1 55 | #define WTF_CPU_64BIT 1 56 | #endif 57 | 58 | /* CPU(ARM) - ARM, any version*/ 59 | #define WTF_ARM_ARCH_AT_LEAST(N) (CPU(ARM) && defined(WTF_ARM_ARCH_VERSION) && WTF_ARM_ARCH_VERSION >= N) 60 | 61 | #if defined(arm) \ 62 | || defined(__arm__) \ 63 | || defined(ARM) \ 64 | || defined(_ARM_) 65 | #define WTF_CPU_ARM 1 66 | 67 | #if defined(__ARMEB__) 68 | #define WTF_CPU_BIG_ENDIAN 1 69 | 70 | #elif !defined(__ARM_EABI__) \ 71 | && !defined(__EABI__) \ 72 | && !defined(__VFP_FP__) \ 73 | && !defined(_WIN32_WCE) \ 74 | && !defined(ANDROID) 75 | #define WTF_CPU_MIDDLE_ENDIAN 1 76 | 77 | #endif 78 | 79 | /* Set WTF_ARM_ARCH_VERSION */ 80 | #if defined(__ARM_ARCH_4__) \ 81 | || defined(__ARM_ARCH_4T__) \ 82 | || defined(__MARM_ARMV4__) 83 | #define WTF_ARM_ARCH_VERSION 4 84 | 85 | #elif defined(__ARM_ARCH_5__) \ 86 | || defined(__ARM_ARCH_5T__) \ 87 | || defined(__MARM_ARMV5__) 88 | #define WTF_ARM_ARCH_VERSION 5 89 | 90 | #elif defined(__ARM_ARCH_5E__) \ 91 | || defined(__ARM_ARCH_5TE__) \ 92 | || defined(__ARM_ARCH_5TEJ__) 93 | #define WTF_ARM_ARCH_VERSION 5 94 | 95 | #elif defined(__ARM_ARCH_6__) \ 96 | || defined(__ARM_ARCH_6J__) \ 97 | || defined(__ARM_ARCH_6K__) \ 98 | || defined(__ARM_ARCH_6Z__) \ 99 | || defined(__ARM_ARCH_6ZK__) \ 100 | || defined(__ARM_ARCH_6T2__) \ 101 | || defined(__ARMV6__) 102 | #define WTF_ARM_ARCH_VERSION 6 103 | 104 | #elif defined(__ARM_ARCH_7A__) \ 105 | || defined(__ARM_ARCH_7R__) \ 106 | || defined(__ARM_ARCH_7S__) 107 | #define WTF_ARM_ARCH_VERSION 7 108 | 109 | /* MSVC sets _M_ARM */ 110 | #elif defined(_M_ARM) 111 | #define WTF_ARM_ARCH_VERSION _M_ARM 112 | #else 113 | #define WTF_ARM_ARCH_VERSION 0 114 | 115 | #endif 116 | 117 | /* Set WTF_THUMB_ARCH_VERSION */ 118 | #if defined(__ARM_ARCH_4T__) 119 | #define WTF_THUMB_ARCH_VERSION 1 120 | 121 | #elif defined(__ARM_ARCH_5T__) \ 122 | || defined(__ARM_ARCH_5TE__) \ 123 | || defined(__ARM_ARCH_5TEJ__) 124 | #define WTF_THUMB_ARCH_VERSION 2 125 | 126 | #elif defined(__ARM_ARCH_6J__) \ 127 | || defined(__ARM_ARCH_6K__) \ 128 | || defined(__ARM_ARCH_6Z__) \ 129 | || defined(__ARM_ARCH_6ZK__) \ 130 | || defined(__ARM_ARCH_6M__) 131 | #define WTF_THUMB_ARCH_VERSION 3 132 | 133 | #elif defined(__ARM_ARCH_6T2__) \ 134 | || defined(__ARM_ARCH_7__) \ 135 | || defined(__ARM_ARCH_7A__) \ 136 | || defined(__ARM_ARCH_7M__) \ 137 | || defined(__ARM_ARCH_7R__) \ 138 | || defined(__ARM_ARCH_7S__) 139 | #define WTF_THUMB_ARCH_VERSION 4 140 | 141 | #else 142 | #define WTF_THUMB_ARCH_VERSION 0 143 | #endif 144 | 145 | 146 | /* CPU(ARM_THUMB2) - Thumb2 instruction set is available */ 147 | #if !defined(WTF_CPU_ARM_THUMB2) 148 | # if defined(thumb2) || defined(__thumb2__) \ 149 | || ((defined(__thumb) || defined(__thumb__)) && WTF_THUMB_ARCH_VERSION == 4) 150 | # define WTF_CPU_ARM_THUMB2 1 151 | # elif WTF_ARM_ARCH_AT_LEAST(4) 152 | # define WTF_CPU_ARM_THUMB2 0 153 | # else 154 | # error "Unsupported ARM architecture" 155 | # endif 156 | #endif /* !defined(WTF_CPU_ARM_THUMB2) */ 157 | 158 | #if defined(__ARM_NEON__) && !defined(WTF_CPU_ARM_NEON) 159 | #define WTF_CPU_ARM_NEON 1 160 | #endif 161 | 162 | #if CPU(ARM_NEON) && (!COMPILER(GCC) || GCC_VERSION_AT_LEAST(4, 7, 0)) 163 | // All NEON intrinsics usage can be disabled by this macro. 164 | #define HAVE_ARM_NEON_INTRINSICS 1 165 | #endif 166 | 167 | #if defined(__ARM_ARCH_7S__) 168 | #define WTF_CPU_APPLE_ARMV7S 1 169 | #endif 170 | 171 | #endif /* ARM */ 172 | 173 | /* CPU(ARM64) - AArch64 64-bit */ 174 | #if defined(__aarch64__) 175 | #define WTF_CPU_ARM64 1 176 | #define WTF_CPU_64BIT 1 177 | #endif 178 | 179 | /* This defines CPU(64BIT). */ 180 | #if defined(__mips__) && (_MIPS_SIM == _ABI64) 181 | #define WTF_CPU_64BIT 1 182 | #endif 183 | 184 | #if !defined(WTF_CPU_64BIT) 185 | #define WTF_CPU_32BIT 1 186 | #endif 187 | 188 | #endif /* WTF_CPU_h */ 189 | -------------------------------------------------------------------------------- /Compiler.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2011, 2012 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef WTF_Compiler_h 27 | #define WTF_Compiler_h 28 | 29 | /* COMPILER() - the compiler being used to build the project */ 30 | #define COMPILER(WTF_FEATURE) (defined WTF_COMPILER_##WTF_FEATURE && WTF_COMPILER_##WTF_FEATURE) 31 | 32 | /* COMPILER_SUPPORTS() - whether the compiler being used to build the project supports the given feature. */ 33 | #define COMPILER_SUPPORTS(WTF_COMPILER_FEATURE) (defined WTF_COMPILER_SUPPORTS_##WTF_COMPILER_FEATURE && WTF_COMPILER_SUPPORTS_##WTF_COMPILER_FEATURE) 34 | 35 | /* COMPILER_QUIRK() - whether the compiler being used to build the project requires a given quirk. */ 36 | #define COMPILER_QUIRK(WTF_COMPILER_QUIRK) (defined WTF_COMPILER_QUIRK_##WTF_COMPILER_QUIRK && WTF_COMPILER_QUIRK_##WTF_COMPILER_QUIRK) 37 | 38 | /* ==== COMPILER() - the compiler being used to build the project ==== */ 39 | 40 | /* COMPILER(CLANG) - Clang */ 41 | #if defined(__clang__) 42 | #define WTF_COMPILER_CLANG 1 43 | #endif 44 | 45 | /* COMPILER(MSVC) - Microsoft Visual C++ */ 46 | #if defined(_MSC_VER) 47 | #define WTF_COMPILER_MSVC 1 48 | #endif 49 | 50 | /* COMPILER(GCC) - GNU Compiler Collection */ 51 | #if defined(__GNUC__) 52 | #define WTF_COMPILER_GCC 1 53 | #define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) 54 | #define GCC_VERSION_AT_LEAST(major, minor, patch) (GCC_VERSION >= (major * 10000 + minor * 100 + patch)) 55 | #else 56 | /* Define this for !GCC compilers, just so we can write things like GCC_VERSION_AT_LEAST(4, 1, 0). */ 57 | #define GCC_VERSION_AT_LEAST(major, minor, patch) 0 58 | #endif 59 | 60 | /* ==== Compiler features ==== */ 61 | 62 | 63 | /* ALWAYS_INLINE */ 64 | 65 | #ifndef ALWAYS_INLINE 66 | #if COMPILER(GCC) && defined(NDEBUG) && !COMPILER(MINGW) 67 | #define ALWAYS_INLINE inline __attribute__((__always_inline__)) 68 | #elif COMPILER(MSVC) && defined(NDEBUG) 69 | #define ALWAYS_INLINE __forceinline 70 | #else 71 | #define ALWAYS_INLINE inline 72 | #endif 73 | #endif 74 | 75 | 76 | /* NEVER_INLINE */ 77 | 78 | #ifndef NEVER_INLINE 79 | #if COMPILER(GCC) 80 | #define NEVER_INLINE __attribute__((__noinline__)) 81 | #elif COMPILER(MSVC) 82 | #define NEVER_INLINE __declspec(noinline) 83 | #else 84 | #define NEVER_INLINE 85 | #endif 86 | #endif 87 | 88 | 89 | /* UNLIKELY */ 90 | 91 | #ifndef UNLIKELY 92 | #if COMPILER(GCC) 93 | #define UNLIKELY(x) __builtin_expect((x), 0) 94 | #else 95 | #define UNLIKELY(x) (x) 96 | #endif 97 | #endif 98 | 99 | 100 | /* LIKELY */ 101 | 102 | #ifndef LIKELY 103 | #if COMPILER(GCC) 104 | #define LIKELY(x) __builtin_expect((x), 1) 105 | #else 106 | #define LIKELY(x) (x) 107 | #endif 108 | #endif 109 | 110 | 111 | /* NO_RETURN */ 112 | 113 | #ifndef NO_RETURN 114 | #if COMPILER(GCC) 115 | #define NO_RETURN __attribute((__noreturn__)) 116 | #elif COMPILER(MSVC) 117 | #define NO_RETURN __declspec(noreturn) 118 | #else 119 | #define NO_RETURN 120 | #endif 121 | #endif 122 | 123 | 124 | /* WARN_UNUSED_RETURN */ 125 | 126 | #if COMPILER(GCC) 127 | #define WARN_UNUSED_RETURN __attribute__ ((warn_unused_result)) 128 | #else 129 | #define WARN_UNUSED_RETURN 130 | #endif 131 | 132 | 133 | /* ALLOW_UNUSED_LOCAL */ 134 | 135 | #define ALLOW_UNUSED_LOCAL(x) false ? (void)x : (void)0 136 | 137 | 138 | /* OBJC_CLASS */ 139 | 140 | #ifndef OBJC_CLASS 141 | #ifdef __OBJC__ 142 | #define OBJC_CLASS @class 143 | #else 144 | #define OBJC_CLASS class 145 | #endif 146 | #endif 147 | 148 | 149 | /* WTF_PRETTY_FUNCTION */ 150 | 151 | #if COMPILER(GCC) 152 | #define WTF_COMPILER_SUPPORTS_PRETTY_FUNCTION 1 153 | #define WTF_PRETTY_FUNCTION __PRETTY_FUNCTION__ 154 | #elif COMPILER(MSVC) 155 | #define WTF_COMPILER_SUPPORTS_PRETTY_FUNCTION 1 156 | #define WTF_PRETTY_FUNCTION __FUNCSIG__ 157 | #else 158 | #define WTF_PRETTY_FUNCTION __FUNCTION__ 159 | #endif 160 | 161 | 162 | /* NO_SANITIZE_UNRELATED_CAST - Disable runtime checks related to casts between 163 | * unrelated objects (-fsanitize=cfi-unrelated-cast or -fsanitize=vptr). */ 164 | 165 | #if COMPILER(CLANG) 166 | #define NO_SANITIZE_UNRELATED_CAST __attribute__((no_sanitize("cfi-unrelated-cast", "vptr"))) 167 | #else 168 | #define NO_SANITIZE_UNRELATED_CAST 169 | #endif 170 | 171 | #endif /* WTF_Compiler_h */ 172 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ## Hardened Partition Alloc Makefile 2 | ## chris.rohlf@gmail.com - 2018 3 | 4 | UNAME := $(shell uname) 5 | 6 | CXX = clang++ 7 | DEBUG = -ggdb 8 | HPA_DEBUG = -DHPA_DEBUG=1 9 | ASAN = -fsanitize=address 10 | LINUX_LDFLAGS = -ldl 11 | MACOS_FLAGS = -framework CoreFoundation 12 | CFLAGS = -std=c++11 -Wall -pedantic -D_FORTIFY_SOURCE=2 -fstack-protector-all -DENABLE_ASSERT=1 13 | 14 | ifeq ($(UNAME), Darwin) 15 | CXXFLAGS = $(CFLAGS) -fPIC $(MACOS_FLAGS) 16 | endif 17 | 18 | ifeq ($(UNAME), Linux) 19 | CXXFLAGS = $(CFLAGS) -fPIC -fPIE $(LINUX_LDFLAGS) 20 | endif 21 | 22 | library: 23 | @mkdir -p build 24 | $(CXX) $(CXXFLAGS) AddressSpaceRandomization.cpp Assertions.cpp PageAllocator.cpp \ 25 | PartitionAlloc.cpp -shared $(LDFLAGS) -o build/partitionalloc.so 26 | 27 | debug_library: 28 | @mkdir -p build 29 | $(CXX) $(CXXFLAGS) $(DEBUG) $(HPA_DEBUG) AddressSpaceRandomization.cpp Assertions.cpp PageAllocator.cpp \ 30 | PartitionAlloc.cpp -shared $(LDFLAGS) -o build/partitionalloc.so 31 | 32 | tests: debug_library 33 | $(CXX) $(CXXFLAGS) $(DEBUG) tests/pa_test.cpp build/partitionalloc.so -o build/pa_test 34 | $(CXX) $(CXXFLAGS) $(DEBUG) tests/pointer_check.cpp build/partitionalloc.so -o build/pointer_check 35 | $(CXX) $(CXXFLAGS) $(DEBUG) tests/linear_overflow.cpp build/partitionalloc.so -o build/linear_overflow 36 | 37 | clean: 38 | rm -rf */*.o build/* -------------------------------------------------------------------------------- /PageAllocator.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2013 Google Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * Neither the name of Google Inc. nor the names of its 15 | * contributors may be used to endorse or promote products derived from 16 | * this software without specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | #include "config.h" 32 | #include "PageAllocator.h" 33 | 34 | #include "AddressSpaceRandomization.h" 35 | #include "Assertions.h" 36 | 37 | #include 38 | 39 | #if OS(POSIX) 40 | 41 | #include 42 | 43 | #ifndef MADV_FREE 44 | #define MADV_FREE MADV_DONTNEED 45 | #endif 46 | 47 | #ifndef MAP_ANONYMOUS 48 | #define MAP_ANONYMOUS MAP_ANON 49 | #endif 50 | 51 | #elif OS(WIN) 52 | 53 | #include 54 | 55 | #else 56 | #error Unknown OS 57 | #endif // OS(POSIX) 58 | 59 | namespace WTF { 60 | 61 | // This simple internal function wraps the OS-specific page allocation call so 62 | // that it behaves consistently: the address is a hint and if it cannot be used, 63 | // the allocation will be placed elsewhere. 64 | static void* systemAllocPages(void* addr, size_t len, PageAccessibilityConfiguration pageAccessibility) 65 | { 66 | ASSERT(!(len & kPageAllocationGranularityOffsetMask)); 67 | ASSERT(!(reinterpret_cast(addr) & kPageAllocationGranularityOffsetMask)); 68 | void* ret; 69 | #if OS(WIN) 70 | int accessFlag = pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS; 71 | ret = VirtualAlloc(addr, len, MEM_RESERVE | MEM_COMMIT, accessFlag); 72 | if (!ret) 73 | ret = VirtualAlloc(0, len, MEM_RESERVE | MEM_COMMIT, accessFlag); 74 | #else 75 | int accessFlag = pageAccessibility == PageAccessible ? (PROT_READ | PROT_WRITE) : PROT_NONE; 76 | ret = mmap(addr, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 77 | if (ret == MAP_FAILED) 78 | ret = 0; 79 | #endif 80 | return ret; 81 | } 82 | 83 | static bool trimMapping(void* baseAddr, size_t baseLen, void* trimAddr, size_t trimLen) 84 | { 85 | #if OS(WIN) 86 | return false; 87 | #else 88 | char* basePtr = static_cast(baseAddr); 89 | char* trimPtr = static_cast(trimAddr); 90 | ASSERT(trimPtr >= basePtr); 91 | ASSERT(trimPtr + trimLen <= basePtr + baseLen); 92 | size_t preLen = trimPtr - basePtr; 93 | if (preLen) { 94 | int ret = munmap(basePtr, preLen); 95 | RELEASE_ASSERT(!ret); 96 | } 97 | size_t postLen = (basePtr + baseLen) - (trimPtr + trimLen); 98 | if (postLen) { 99 | int ret = munmap(trimPtr + trimLen, postLen); 100 | RELEASE_ASSERT(!ret); 101 | } 102 | return true; 103 | #endif 104 | } 105 | 106 | void* allocPages(void* addr, size_t len, size_t align, PageAccessibilityConfiguration pageAccessibility) 107 | { 108 | ASSERT(len >= kPageAllocationGranularity); 109 | ASSERT(!(len & kPageAllocationGranularityOffsetMask)); 110 | ASSERT(align >= kPageAllocationGranularity); 111 | ASSERT(!(align & kPageAllocationGranularityOffsetMask)); 112 | ASSERT(!(reinterpret_cast(addr) & kPageAllocationGranularityOffsetMask)); 113 | size_t alignOffsetMask = align - 1; 114 | size_t alignBaseMask = ~alignOffsetMask; 115 | ASSERT(!(reinterpret_cast(addr) & alignOffsetMask)); 116 | // If the client passed null as the address, choose a good one. 117 | if (!addr) { 118 | addr = getRandomPageBase(); 119 | addr = reinterpret_cast(reinterpret_cast(addr) & alignBaseMask); 120 | } 121 | 122 | // The common case, which is also the least work we can do, is that the 123 | // address and length are suitable. Just try it. 124 | void* ret = systemAllocPages(addr, len, pageAccessibility); 125 | // If the alignment is to our liking, we're done. 126 | if (!ret || !(reinterpret_cast(ret) & alignOffsetMask)) 127 | return ret; 128 | 129 | // Annoying. Unmap and map a larger range to be sure to succeed on the 130 | // second, slower attempt. 131 | freePages(ret, len); 132 | 133 | size_t tryLen = len + (align - kPageAllocationGranularity); 134 | RELEASE_ASSERT(tryLen > len); 135 | 136 | // We loop to cater for the unlikely case where another thread maps on top 137 | // of the aligned location we choose. 138 | int count = 0; 139 | while (count++ < 100) { 140 | ret = systemAllocPages(addr, tryLen, pageAccessibility); 141 | if (!ret) 142 | return 0; 143 | // We can now try and trim out a subset of the mapping. 144 | addr = reinterpret_cast((reinterpret_cast(ret) + alignOffsetMask) & alignBaseMask); 145 | 146 | // On POSIX systems, we can trim the oversized mapping to fit exactly. 147 | // This will always work on POSIX systems. 148 | if (trimMapping(ret, tryLen, addr, len)) 149 | return addr; 150 | 151 | // On Windows, you can't trim an existing mapping so we unmap and remap 152 | // a subset. We used to do for all platforms, but OSX 10.8 has a 153 | // broken mmap() that ignores address hints for valid, unused addresses. 154 | freePages(ret, tryLen); 155 | ret = systemAllocPages(addr, len, pageAccessibility); 156 | if (ret == addr || !ret) 157 | return ret; 158 | 159 | // Unlikely race / collision. Do the simple thing and just start again. 160 | freePages(ret, len); 161 | addr = getRandomPageBase(); 162 | addr = reinterpret_cast(reinterpret_cast(addr) & alignBaseMask); 163 | } 164 | IMMEDIATE_CRASH(); 165 | return 0; 166 | } 167 | 168 | void freePages(void* addr, size_t len) 169 | { 170 | ASSERT(!(reinterpret_cast(addr) & kPageAllocationGranularityOffsetMask)); 171 | ASSERT(!(len & kPageAllocationGranularityOffsetMask)); 172 | #if OS(POSIX) 173 | int ret = munmap(addr, len); 174 | RELEASE_ASSERT(!ret); 175 | #else 176 | BOOL ret = VirtualFree(addr, 0, MEM_RELEASE); 177 | RELEASE_ASSERT(ret); 178 | #endif 179 | } 180 | 181 | void setSystemPagesInaccessible(void* addr, size_t len) 182 | { 183 | ASSERT(!(len & kSystemPageOffsetMask)); 184 | #if OS(POSIX) 185 | int ret = mprotect(addr, len, PROT_NONE); 186 | RELEASE_ASSERT(!ret); 187 | #else 188 | BOOL ret = VirtualFree(addr, len, MEM_DECOMMIT); 189 | RELEASE_ASSERT(ret); 190 | #endif 191 | } 192 | 193 | bool setSystemPagesAccessible(void* addr, size_t len) 194 | { 195 | ASSERT(!(len & kSystemPageOffsetMask)); 196 | #if OS(POSIX) 197 | return !mprotect(addr, len, PROT_READ | PROT_WRITE); 198 | #else 199 | return !!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_READWRITE); 200 | #endif 201 | } 202 | 203 | void decommitSystemPages(void* addr, size_t len) 204 | { 205 | ASSERT(!(len & kSystemPageOffsetMask)); 206 | #if OS(POSIX) 207 | int ret = madvise(addr, len, MADV_FREE); 208 | RELEASE_ASSERT(!ret); 209 | #else 210 | setSystemPagesInaccessible(addr, len); 211 | #endif 212 | } 213 | 214 | void recommitSystemPages(void* addr, size_t len) 215 | { 216 | ASSERT(!(len & kSystemPageOffsetMask)); 217 | #if OS(POSIX) 218 | (void) addr; 219 | #else 220 | RELEASE_ASSERT(setSystemPagesAccessible(addr, len)); 221 | #endif 222 | } 223 | 224 | void discardSystemPages(void* addr, size_t len) 225 | { 226 | ASSERT(!(len & kSystemPageOffsetMask)); 227 | #if OS(POSIX) 228 | // On POSIX, the implementation detail is that discard and decommit are the 229 | // same, and lead to pages that are returned to the system immediately and 230 | // get replaced with zeroed pages when touched. So we just call 231 | // decommitSystemPages() here to avoid code duplication. 232 | decommitSystemPages(addr, len); 233 | #else 234 | (void) addr; 235 | (void) len; 236 | // TODO(cevans): implement this using MEM_RESET for Windows, once we've 237 | // decided that the semantics are a match. 238 | #endif 239 | } 240 | 241 | } // namespace WTF 242 | 243 | -------------------------------------------------------------------------------- /PageAllocator.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2013 Google Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * Neither the name of Google Inc. nor the names of its 15 | * contributors may be used to endorse or promote products derived from 16 | * this software without specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | #ifndef WTF_PageAllocator_h 32 | #define WTF_PageAllocator_h 33 | 34 | #include "Assertions.h" 35 | #include "CPU.h" 36 | #include "WTFExport.h" 37 | #include 38 | 39 | namespace WTF { 40 | 41 | #if OS(WIN) 42 | static const size_t kPageAllocationGranularityShift = 16; // 64KB 43 | #else 44 | static const size_t kPageAllocationGranularityShift = 12; // 4KB 45 | #endif 46 | static const size_t kPageAllocationGranularity = 1 << kPageAllocationGranularityShift; 47 | static const size_t kPageAllocationGranularityOffsetMask = kPageAllocationGranularity - 1; 48 | static const size_t kPageAllocationGranularityBaseMask = ~kPageAllocationGranularityOffsetMask; 49 | 50 | // All Blink-supported systems have 4096 sized system pages and can handle 51 | // permissions and commit / decommit at this granularity. 52 | static const size_t kSystemPageSize = 4096; 53 | static const size_t kSystemPageOffsetMask = kSystemPageSize - 1; 54 | static const size_t kSystemPageBaseMask = ~kSystemPageOffsetMask; 55 | 56 | enum PageAccessibilityConfiguration { 57 | PageAccessible, 58 | PageInaccessible, 59 | }; 60 | 61 | // Allocate one or more pages. 62 | // The requested address is just a hint; the actual address returned may 63 | // differ. The returned address will be aligned at least to align bytes. 64 | // len is in bytes, and must be a multiple of kPageAllocationGranularity. 65 | // align is in bytes, and must be a power-of-two multiple of 66 | // kPageAllocationGranularity. 67 | // If addr is null, then a suitable and randomized address will be chosen 68 | // automatically. 69 | // PageAccessibilityConfiguration controls the permission of the 70 | // allocated pages. 71 | // This call will return null if the allocation cannot be satisfied. 72 | WTF_EXPORT void* allocPages(void* addr, size_t len, size_t align, PageAccessibilityConfiguration); 73 | 74 | // Free one or more pages. 75 | // addr and len must match a previous call to allocPages(). 76 | WTF_EXPORT void freePages(void* addr, size_t len); 77 | 78 | // Mark one or more system pages as being inaccessible. 79 | // Subsequently accessing any address in the range will fault, and the 80 | // addresses will not be re-used by future allocations. 81 | // len must be a multiple of kSystemPageSize bytes. 82 | WTF_EXPORT void setSystemPagesInaccessible(void* addr, size_t len); 83 | 84 | // Mark one or more system pages as being accessible. 85 | // The pages will be readable and writeable. 86 | // len must be a multiple of kSystemPageSize bytes. 87 | // The result bool value indicates whether the permission 88 | // change succeeded or not. You must check the result 89 | // (in most cases you need to RELEASE_ASSERT that it is 90 | // true). 91 | WTF_EXPORT WARN_UNUSED_RETURN bool setSystemPagesAccessible(void* addr, size_t len); 92 | 93 | // Decommit one or more system pages. Decommitted means that the physical memory 94 | // is released to the system, but the virtual address space remains reserved. 95 | // System pages are re-committed by calling recommitSystemPages(). Touching 96 | // a decommitted page _may_ fault. 97 | // Clients should not make any assumptions about the contents of decommitted 98 | // system pages, before or after they write to the page. The only guarantee 99 | // provided is that the contents of the system page will be deterministic again 100 | // after recommitting and writing to it. In particlar note that system pages are// not guaranteed to be zero-filled upon re-commit. 101 | // len must be a multiple of kSystemPageSize bytes. 102 | WTF_EXPORT void decommitSystemPages(void* addr, size_t len); 103 | 104 | // Recommit one or more system pages. Decommitted system pages must be 105 | // recommitted before they are read are written again. 106 | // Note that this operation may be a no-op on some platforms. 107 | // len must be a multiple of kSystemPageSize bytes. 108 | WTF_EXPORT void recommitSystemPages(void* addr, size_t len); 109 | 110 | // Discard one or more system pages. Discarding is a hint to the system that 111 | // the page is no longer required. The hint may: 112 | // - Do nothing. 113 | // - Discard the page immediately, freeing up physical pages. 114 | // - Discard the page at some time in the future in response to memory pressure. 115 | // Only committed pages should be discarded. Discarding a page does not 116 | // decommit it, and it is valid to discard an already-discarded page. 117 | // A read or write to a discarded page will not fault. 118 | // Reading from a discarded page may return the original page content, or a 119 | // page full of zeroes. 120 | // Writing to a discarded page is the only guaranteed way to tell the system 121 | // that the page is required again. Once written to, the content of the page is // guaranteed stable once more. After being written to, the page content may be 122 | // based on the original page content, or a page of zeroes. 123 | // len must be a multiple of kSystemPageSize bytes. 124 | WTF_EXPORT void discardSystemPages(void* addr, size_t len); 125 | 126 | } // namespace WTF 127 | 128 | #endif // WTF_PageAllocator_h 129 | -------------------------------------------------------------------------------- /PartitionAlloc.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2013 Google Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * Neither the name of Google Inc. nor the names of its 15 | * contributors may be used to endorse or promote products derived from 16 | * this software without specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | #include "config.h" 32 | #include "PartitionAlloc.h" 33 | 34 | #include 35 | #include 36 | 37 | #ifndef NDEBUG 38 | #include 39 | #endif 40 | 41 | // Two partition pages are used as guard / metadata page so make sure the super 42 | // page size is bigger. 43 | static_assert(WTF::kPartitionPageSize * 4 <= WTF::kSuperPageSize, "ok super page size"); 44 | static_assert(!(WTF::kSuperPageSize % WTF::kPartitionPageSize), "ok super page multiple"); 45 | // Four system pages gives us room to hack out a still-guard-paged piece 46 | // of metadata in the middle of a guard partition page. 47 | static_assert(WTF::kSystemPageSize * 4 <= WTF::kPartitionPageSize, "ok partition page size"); 48 | static_assert(!(WTF::kPartitionPageSize % WTF::kSystemPageSize), "ok partition page multiple"); 49 | static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, "PartitionPage should not be too big"); 50 | static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, "PartitionBucket should not be too big"); 51 | static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= WTF::kPageMetadataSize, "PartitionSuperPageExtentEntry should not be too big"); 52 | static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= WTF::kSystemPageSize, "page metadata fits in hole"); 53 | // Check that some of our zanier calculations worked out as expected. 54 | static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket"); 55 | static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed"); 56 | 57 | namespace WTF { 58 | 59 | int PartitionRootBase::gInitializedLock = 0; 60 | bool PartitionRootBase::gInitialized = false; 61 | PartitionPage PartitionRootBase::gSeedPage; 62 | PartitionBucket PartitionRootBase::gPagedBucket; 63 | 64 | static uint16_t partitionBucketNumSystemPages(size_t size) 65 | { 66 | // This works out reasonably for the current bucket sizes of the generic 67 | // allocator, and the current values of partition page size and constants. 68 | // Specifically, we have enough room to always pack the slots perfectly into 69 | // some number of system pages. The only waste is the waste associated with 70 | // unfaulted pages (i.e. wasted address space). 71 | // TODO: we end up using a lot of system pages for very small sizes. For 72 | // example, we'll use 12 system pages for slot size 24. The slot size is 73 | // so small that the waste would be tiny with just 4, or 1, system pages. 74 | // Later, we can investigate whether there are anti-fragmentation benefits 75 | // to using fewer system pages. 76 | double bestWasteRatio = 1.0f; 77 | uint16_t bestPages = 0; 78 | if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { 79 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!(size % kSystemPageSize)); 80 | return static_cast(size / kSystemPageSize); 81 | } 82 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); 83 | for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; i <= kMaxSystemPagesPerSlotSpan; ++i) { 84 | size_t pageSize = kSystemPageSize * i; 85 | size_t numSlots = pageSize / size; 86 | size_t waste = pageSize - (numSlots * size); 87 | // Leaving a page unfaulted is not free; the page will occupy an empty page table entry. 88 | // Make a simple attempt to account for that. 89 | size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); 90 | size_t numUnfaultedPages = numRemainderPages ? (kNumSystemPagesPerPartitionPage - numRemainderPages) : 0; 91 | waste += sizeof(void*) * numUnfaultedPages; 92 | double wasteRatio = (double) waste / (double) pageSize; 93 | if (wasteRatio < bestWasteRatio) { 94 | bestWasteRatio = wasteRatio; 95 | bestPages = i; 96 | } 97 | } 98 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bestPages > 0); 99 | return bestPages; 100 | } 101 | 102 | static void parititonAllocBaseInit(PartitionRootBase* root) 103 | { 104 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!root->initialized); 105 | 106 | // Reseed the RNG using time() with some bits 107 | // from the current random pool 108 | srand(time(NULL) + (rand() % 100000) + (uint64_t) &root); 109 | 110 | // We don't want the delayed freelist to be too large 111 | root->delayed_free_list_max_sz = 16; 112 | 113 | spinLockLock(&PartitionRootBase::gInitializedLock); 114 | if (!PartitionRootBase::gInitialized) { 115 | PartitionRootBase::gInitialized = true; 116 | // We mark the seed page as free to make sure it is skipped by our 117 | // logic to find a new active page. 118 | PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric::gSeedPage; 119 | } 120 | spinLockUnlock(&PartitionRootBase::gInitializedLock); 121 | 122 | root->initialized = true; 123 | root->totalSizeOfCommittedPages = 0; 124 | root->totalSizeOfSuperPages = 0; 125 | root->totalSizeOfDirectMappedPages = 0; 126 | root->nextSuperPage = 0; 127 | root->nextPartitionPage = 0; 128 | root->nextPartitionPageEnd = 0; 129 | root->firstExtent = 0; 130 | root->currentExtent = 0; 131 | root->directMapList = 0; 132 | 133 | memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing)); 134 | root->globalEmptyPageRingIndex = 0; 135 | 136 | for(int i = 0; i < kCookieSize; i++) { 137 | root->kCookieValue[i] = (unsigned char) _rand(255); 138 | } 139 | 140 | // This is a "magic" value so we can test if a root pointer is valid. 141 | root->invertedSelf = ~reinterpret_cast(root); 142 | } 143 | 144 | static void partitionBucketInitBase(PartitionBucket* bucket, PartitionRootBase* root) 145 | { 146 | bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; 147 | bucket->emptyPagesHead = 0; 148 | bucket->decommittedPagesHead = 0; 149 | bucket->numFullPages = 0; 150 | bucket->numSystemPagesPerSlotSpan = partitionBucketNumSystemPages(bucket->slotSize); 151 | } 152 | 153 | void partitionAllocInit(PartitionRoot* root, size_t numBuckets, size_t maxAllocation) 154 | { 155 | parititonAllocBaseInit(root); 156 | root->numBuckets = numBuckets; 157 | root->maxAllocation = maxAllocation; 158 | size_t i; 159 | for (i = 0; i < root->numBuckets; ++i) { 160 | PartitionBucket* bucket = &root->buckets()[i]; 161 | if (!i) 162 | bucket->slotSize = kAllocationGranularity; 163 | else 164 | bucket->slotSize = i << kBucketShift; 165 | partitionBucketInitBase(bucket, root); 166 | } 167 | } 168 | 169 | void partitionAllocGenericInit(PartitionRootGeneric* root) 170 | { 171 | parititonAllocBaseInit(root); 172 | 173 | root->lock = 0; 174 | 175 | // Precalculate some shift and mask constants used in the hot path. 176 | // Example: malloc(41) == 101001 binary. 177 | // Order is 6 (1 << 6-1)==32 is highest bit set. 178 | // orderIndex is the next three MSB == 010 == 2. 179 | // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for the subOrderIndex). 180 | size_t order; 181 | for (order = 0; order <= kBitsPerSizet; ++order) { 182 | size_t orderIndexShift; 183 | if (order < kGenericNumBucketsPerOrderBits + 1) 184 | orderIndexShift = 0; 185 | else 186 | orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1); 187 | root->orderIndexShifts[order] = orderIndexShift; 188 | size_t subOrderIndexMask; 189 | if (order == kBitsPerSizet) { 190 | // This avoids invoking undefined behavior for an excessive shift. 191 | subOrderIndexMask = static_cast(-1) >> (kGenericNumBucketsPerOrderBits + 1); 192 | } else { 193 | subOrderIndexMask = ((static_cast(1) << order) - 1) >> (kGenericNumBucketsPerOrderBits + 1); 194 | } 195 | root->orderSubIndexMasks[order] = subOrderIndexMask; 196 | } 197 | 198 | // Set up the actual usable buckets first. 199 | // Note that typical values (i.e. min allocation size of 8) will result in 200 | // pseudo buckets (size==9 etc. or more generally, size is not a multiple 201 | // of the smallest allocation granularity). 202 | // We avoid them in the bucket lookup map, but we tolerate them to keep the 203 | // code simpler and the structures more generic. 204 | size_t i, j; 205 | size_t currentSize = kGenericSmallestBucket; 206 | size_t currentIncrement = kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; 207 | PartitionBucket* bucket = &root->buckets[0]; 208 | for (i = 0; i < kGenericNumBucketedOrders; ++i) { 209 | for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { 210 | bucket->slotSize = currentSize; 211 | partitionBucketInitBase(bucket, root); 212 | // Disable psuedo buckets so that touching them faults. 213 | if (currentSize % kGenericSmallestBucket) 214 | bucket->activePagesHead = 0; 215 | currentSize += currentIncrement; 216 | ++bucket; 217 | } 218 | currentIncrement <<= 1; 219 | } 220 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(currentSize == 1 << kGenericMaxBucketedOrder); 221 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucket == &root->buckets[0] + kGenericNumBuckets); 222 | 223 | // Then set up the fast size -> bucket lookup table. 224 | bucket = &root->buckets[0]; 225 | PartitionBucket** bucketPtr = &root->bucketLookups[0]; 226 | for (order = 0; order <= kBitsPerSizet; ++order) { 227 | for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { 228 | if (order < kGenericMinBucketedOrder) { 229 | // Use the bucket of the finest granularity for malloc(0) etc. 230 | *bucketPtr++ = &root->buckets[0]; 231 | } else if (order > kGenericMaxBucketedOrder) { 232 | *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; 233 | } else { 234 | PartitionBucket* validBucket = bucket; 235 | // Skip over invalid buckets. 236 | while (validBucket->slotSize % kGenericSmallestBucket) 237 | validBucket++; 238 | *bucketPtr++ = validBucket; 239 | bucket++; 240 | } 241 | } 242 | } 243 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucket == &root->buckets[0] + kGenericNumBuckets); 244 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder)); 245 | // And there's one last bucket lookup that will be hit for e.g. malloc(-1), 246 | // which tries to overflow to a non-existant order. 247 | *bucketPtr = &PartitionRootGeneric::gPagedBucket; 248 | } 249 | 250 | static bool partitionAllocShutdownBucket(PartitionBucket* bucket) 251 | { 252 | // Failure here indicates a memory leak. 253 | bool foundLeak = bucket->numFullPages; 254 | for (PartitionPage* page = bucket->activePagesHead; page; page = page->nextPage) 255 | foundLeak |= (page->numAllocatedSlots > 0); 256 | return foundLeak; 257 | } 258 | 259 | static bool partitionAllocBaseShutdown(PartitionRootBase* root) 260 | { 261 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->initialized); 262 | root->initialized = false; 263 | 264 | // Now that we've examined all partition pages in all buckets, it's safe 265 | // to free all our super pages. Since the super page extent entries are 266 | // stored in the super pages, we need to be careful not to access them 267 | // after we've released the corresponding super page. 268 | PartitionSuperPageExtentEntry* entry = root->firstExtent; 269 | while (entry) { 270 | PartitionSuperPageExtentEntry* nextEntry = entry->next; 271 | char* superPage = entry->superPageBase; 272 | char* superPagesEnd = entry->superPagesEnd; 273 | while (superPage < superPagesEnd) { 274 | freePages(superPage, kSuperPageSize); 275 | superPage += kSuperPageSize; 276 | } 277 | entry = nextEntry; 278 | } 279 | return root->directMapList; 280 | } 281 | 282 | bool partitionAllocShutdown(PartitionRoot* root) 283 | { 284 | bool foundLeak = false; 285 | size_t i; 286 | 287 | for(auto p : root->delayed_free_list) { 288 | partitionFreeWithPage(p, partitionPointerToPage(p), false); 289 | } 290 | 291 | for (i = 0; i < root->numBuckets; ++i) { 292 | PartitionBucket* bucket = &root->buckets()[i]; 293 | foundLeak |= partitionAllocShutdownBucket(bucket); 294 | } 295 | foundLeak |= partitionAllocBaseShutdown(root); 296 | return !foundLeak; 297 | } 298 | 299 | bool partitionAllocGenericShutdown(PartitionRootGeneric* root) 300 | { 301 | bool foundLeak = false; 302 | size_t i; 303 | 304 | for(auto p : root->delayed_free_list) { 305 | partitionFreeWithPage(p, partitionPointerToPage(p), false); 306 | } 307 | 308 | for (i = 0; i < kGenericNumBuckets; ++i) { 309 | PartitionBucket* bucket = &root->buckets[i]; 310 | foundLeak |= partitionAllocShutdownBucket(bucket); 311 | } 312 | foundLeak |= partitionAllocBaseShutdown(root); 313 | return !foundLeak; 314 | } 315 | 316 | #if !CPU(64BIT) 317 | static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() 318 | { 319 | IMMEDIATE_CRASH(); 320 | } 321 | #endif 322 | 323 | static NEVER_INLINE void partitionOutOfMemory(const PartitionRootBase* root) 324 | { 325 | #if !CPU(64BIT) 326 | // Check whether this OOM is due to a lot of super pages that are allocated 327 | // but not committed, probably due to http://crbug.com/421387. 328 | if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - root->totalSizeOfCommittedPages > kReasonableSizeOfUnusedPages) { 329 | partitionOutOfMemoryWithLotsOfUncommitedPages(); 330 | } 331 | #endif 332 | IMMEDIATE_CRASH(); 333 | } 334 | 335 | static NEVER_INLINE void partitionExcessiveAllocationSize() 336 | { 337 | IMMEDIATE_CRASH(); 338 | } 339 | 340 | static NEVER_INLINE void partitionBucketFull() 341 | { 342 | IMMEDIATE_CRASH(); 343 | } 344 | 345 | // partitionPageStateIs* 346 | // Note that it's only valid to call these functions on pages found on one of 347 | // the page lists. Specifically, you can't call these functions on full pages 348 | // that were detached from the active list. 349 | static bool ALWAYS_INLINE partitionPageStateIsActive(const PartitionPage* page) 350 | { 351 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page != &PartitionRootGeneric::gSeedPage); 352 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->pageOffset); 353 | return (page->numAllocatedSlots > 0 && (page->freelistHead || page->numUnprovisionedSlots)); 354 | } 355 | 356 | static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page) 357 | { 358 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page != &PartitionRootGeneric::gSeedPage); 359 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->pageOffset); 360 | bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket)); 361 | if (ret) { 362 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->freelistHead); 363 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->numUnprovisionedSlots); 364 | } 365 | return ret; 366 | } 367 | 368 | static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page) 369 | { 370 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page != &PartitionRootGeneric::gSeedPage); 371 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->pageOffset); 372 | return (!page->numAllocatedSlots && page->freelistHead); 373 | } 374 | 375 | static bool ALWAYS_INLINE partitionPageStateIsDecommitted(const PartitionPage* page) 376 | { 377 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page != &PartitionRootGeneric::gSeedPage); 378 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->pageOffset); 379 | bool ret = (!page->numAllocatedSlots && !page->freelistHead); 380 | if (ret) { 381 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->numUnprovisionedSlots); 382 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->emptyCacheIndex == -1); 383 | } 384 | return ret; 385 | } 386 | 387 | static void partitionIncreaseCommittedPages(PartitionRootBase* root, size_t len) 388 | { 389 | root->totalSizeOfCommittedPages += len; 390 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->totalSizeOfCommittedPages <= root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); 391 | } 392 | 393 | static void partitionDecreaseCommittedPages(PartitionRootBase* root, size_t len) 394 | { 395 | root->totalSizeOfCommittedPages -= len; 396 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->totalSizeOfCommittedPages <= root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); 397 | } 398 | 399 | static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, void* addr, size_t len) 400 | { 401 | decommitSystemPages(addr, len); 402 | partitionDecreaseCommittedPages(root, len); 403 | } 404 | 405 | static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, void* addr, size_t len) 406 | { 407 | recommitSystemPages(addr, len); 408 | partitionIncreaseCommittedPages(root, len); 409 | } 410 | 411 | static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root, int flags, uint16_t numPartitionPages) 412 | { 413 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!(reinterpret_cast(root->nextPartitionPage) % kPartitionPageSize)); 414 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!(reinterpret_cast(root->nextPartitionPageEnd) % kPartitionPageSize)); 415 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(numPartitionPages <= kNumPartitionPagesPerSuperPage); 416 | size_t totalSize = kPartitionPageSize * numPartitionPages; 417 | size_t numPartitionPagesLeft = (root->nextPartitionPageEnd - root->nextPartitionPage) >> kPartitionPageShift; 418 | if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { 419 | // In this case, we can still hand out pages from the current super page 420 | // allocation. 421 | char* ret = root->nextPartitionPage; 422 | root->nextPartitionPage += totalSize; 423 | partitionIncreaseCommittedPages(root, totalSize); 424 | return ret; 425 | } 426 | 427 | // Need a new super page. We want to allocate super pages in a continguous 428 | // address region as much as possible. This is important for not causing 429 | // page table bloat and not fragmenting address spaces in 32 bit architectures. 430 | char* requestedAddress = root->nextSuperPage; 431 | char* superPage = reinterpret_cast(allocPages(requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible)); 432 | if (UNLIKELY(!superPage)) 433 | return 0; 434 | 435 | root->totalSizeOfSuperPages += kSuperPageSize; 436 | partitionIncreaseCommittedPages(root, totalSize); 437 | 438 | root->nextSuperPage = superPage + kSuperPageSize; 439 | char* ret = superPage + kPartitionPageSize; 440 | root->nextPartitionPage = ret + totalSize; 441 | root->nextPartitionPageEnd = root->nextSuperPage - kPartitionPageSize; 442 | // Make the first partition page in the super page a guard page, but leave a 443 | // hole in the middle. 444 | // This is where we put page metadata and also a tiny amount of extent 445 | // metadata. 446 | setSystemPagesInaccessible(superPage, kSystemPageSize); 447 | setSystemPagesInaccessible(superPage + (kSystemPageSize * 2), kPartitionPageSize - (kSystemPageSize * 2)); 448 | // Also make the last partition page a guard page. 449 | setSystemPagesInaccessible(superPage + (kSuperPageSize - kPartitionPageSize), kPartitionPageSize); 450 | 451 | // If we were after a specific address, but didn't get it, assume that 452 | // the system chose a lousy address. Here most OS'es have a default 453 | // algorithm that isn't randomized. For example, most Linux 454 | // distributions will allocate the mapping directly before the last 455 | // successful mapping, which is far from random. So we just get fresh 456 | // randomness for the next mapping attempt. 457 | if (requestedAddress && requestedAddress != superPage) 458 | root->nextSuperPage = 0; 459 | 460 | // We allocated a new super page so update super page metadata. 461 | // First check if this is a new extent or not. 462 | PartitionSuperPageExtentEntry* latestExtent = reinterpret_cast(partitionSuperPageToMetadataArea(superPage)); 463 | // By storing the root in every extent metadata object, we have a fast way 464 | // to go from a pointer within the partition to the root object. 465 | latestExtent->root = root; 466 | // Most new extents will be part of a larger extent, and these three fields 467 | // are unused, but we initialize them to 0 so that we get a clear signal 468 | // in case they are accidentally used. 469 | latestExtent->superPageBase = 0; 470 | latestExtent->superPagesEnd = 0; 471 | latestExtent->next = 0; 472 | 473 | PartitionSuperPageExtentEntry* currentExtent = root->currentExtent; 474 | bool isNewExtent = (superPage != requestedAddress); 475 | if (UNLIKELY(isNewExtent)) { 476 | if (UNLIKELY(!currentExtent)) { 477 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!root->firstExtent); 478 | root->firstExtent = latestExtent; 479 | } else { 480 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(currentExtent->superPageBase); 481 | currentExtent->next = latestExtent; 482 | } 483 | root->currentExtent = latestExtent; 484 | latestExtent->superPageBase = superPage; 485 | latestExtent->superPagesEnd = superPage + kSuperPageSize; 486 | } else { 487 | // We allocated next to an existing extent so just nudge the size up a little. 488 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(currentExtent->superPagesEnd); 489 | currentExtent->superPagesEnd += kSuperPageSize; 490 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(ret >= currentExtent->superPageBase && ret < currentExtent->superPagesEnd); 491 | } 492 | return ret; 493 | } 494 | 495 | static ALWAYS_INLINE uint16_t partitionBucketPartitionPages(const PartitionBucket* bucket) 496 | { 497 | return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage - 1)) / kNumSystemPagesPerPartitionPage; 498 | } 499 | 500 | static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) 501 | { 502 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsDecommitted(page)); 503 | 504 | page->numUnprovisionedSlots = partitionBucketSlots(page->bucket); 505 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numUnprovisionedSlots); 506 | 507 | page->nextPage = nullptr; 508 | } 509 | 510 | static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, PartitionBucket* bucket) 511 | { 512 | // The bucket never changes. We set it up once. 513 | page->bucket = bucket; 514 | page->emptyCacheIndex = -1; 515 | 516 | partitionPageReset(page); 517 | 518 | // If this page has just a single slot, do not set up page offsets for any 519 | // page metadata other than the first one. This ensures that attempts to 520 | // touch invalid page metadata fail. 521 | if (page->numUnprovisionedSlots == 1) 522 | return; 523 | 524 | uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); 525 | char* pageCharPtr = reinterpret_cast(page); 526 | for (uint16_t i = 1; i < numPartitionPages; ++i) { 527 | pageCharPtr += kPageMetadataSize; 528 | PartitionPage* secondaryPage = reinterpret_cast(pageCharPtr); 529 | secondaryPage->pageOffset = i; 530 | } 531 | } 532 | 533 | static ALWAYS_INLINE size_t partitionRoundUpToSystemPage(size_t size) 534 | { 535 | return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; 536 | } 537 | 538 | static ALWAYS_INLINE size_t partitionRoundDownToSystemPage(size_t size) 539 | { 540 | return size & kSystemPageBaseMask; 541 | } 542 | 543 | static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page) 544 | { 545 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page != &PartitionRootGeneric::gSeedPage); 546 | uint16_t numSlots = page->numUnprovisionedSlots; 547 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(numSlots); 548 | PartitionBucket* bucket = page->bucket; 549 | // We should only get here when _every_ slot is either used or unprovisioned. 550 | // (The third state is "on the freelist". If we have a non-empty freelist, we should not get here.) 551 | // Not a security concern, this will happen often with size specific partitions 552 | //RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); 553 | // Similarly, make explicitly sure that the freelist is empty. 554 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->freelistHead); 555 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numAllocatedSlots >= 0); 556 | 557 | size_t size = bucket->slotSize; 558 | char* base = reinterpret_cast(partitionPageToPointer(page)); 559 | char* returnObject = base + (size * page->numAllocatedSlots); 560 | char* firstFreelistPointer = returnObject + size; 561 | char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFreelistEntry*); 562 | // Our goal is to fault as few system pages as possible. We calculate the 563 | // page containing the "end" of the returned slot, and then allow freelist 564 | // pointers to be written up to the end of that page. 565 | char* subPageLimit = reinterpret_cast(partitionRoundUpToSystemPage(reinterpret_cast(firstFreelistPointer))); 566 | char* slotsLimit = returnObject + (size * numSlots); 567 | char* freelistLimit = subPageLimit; 568 | if (UNLIKELY(slotsLimit < freelistLimit)) 569 | freelistLimit = slotsLimit; 570 | 571 | uint16_t numNewFreelistEntries = 0; 572 | if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { 573 | // Only consider used space in the slot span. If we consider wasted 574 | // space, we may get an off-by-one when a freelist pointer fits in the 575 | // wasted space, but a slot does not. 576 | // We know we can fit at least one freelist pointer. 577 | numNewFreelistEntries = 1; 578 | // Any further entries require space for the whole slot span. 579 | numNewFreelistEntries += static_cast((freelistLimit - firstFreelistPointerExtent) / size); 580 | } 581 | 582 | // We always return an object slot -- that's the +1 below. 583 | // We do not neccessarily create any new freelist entries, because we cross sub page boundaries frequently for large bucket sizes. 584 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(numNewFreelistEntries + 1 <= numSlots); 585 | numSlots -= (numNewFreelistEntries + 1); 586 | page->numUnprovisionedSlots = numSlots; 587 | page->numAllocatedSlots++; 588 | 589 | std::vector vfreelist; 590 | 591 | if (LIKELY(numNewFreelistEntries)) { 592 | char* freelistPointer = firstFreelistPointer; 593 | PartitionFreelistEntry* entry = reinterpret_cast(freelistPointer); 594 | vfreelist.push_back(entry); 595 | 596 | while (--numNewFreelistEntries) { 597 | freelistPointer += size; 598 | vfreelist.push_back(reinterpret_cast(freelistPointer)); 599 | } 600 | 601 | std::random_shuffle(vfreelist.begin(), vfreelist.end(), _rand); 602 | 603 | entry = vfreelist[vfreelist.size()-1]; 604 | vfreelist.pop_back(); 605 | page->freelistHead = entry; 606 | 607 | returnObject = reinterpret_cast(vfreelist[vfreelist.size()-1]); 608 | vfreelist.pop_back(); 609 | 610 | for(int i=0;inext = partitionFreelistMask(nextEntry); 613 | entry = nextEntry; 614 | } 615 | 616 | entry->next = partitionFreelistMask(0); 617 | } else { 618 | page->freelistHead = 0; 619 | } 620 | 621 | return returnObject; 622 | } 623 | 624 | // This helper function scans a bucket's active page list for a suitable new 625 | // active page. 626 | // When it finds a suitable new active page (one that has free slots and is not 627 | // empty), it is set as the new active page. If there is no suitable new 628 | // active page, the current active page is set to the seed page. 629 | // As potential pages are scanned, they are tidied up according to their state. 630 | // Empty pages are swept on to the empty page list, decommitted pages on to the 631 | // decommitted page list and full pages are unlinked from any list. 632 | static bool partitionSetNewActivePage(PartitionBucket* bucket) 633 | { 634 | PartitionPage* page = bucket->activePagesHead; 635 | if (page == &PartitionRootBase::gSeedPage) 636 | return false; 637 | 638 | PartitionPage* nextPage; 639 | 640 | for (; page; page = nextPage) { 641 | nextPage = page->nextPage; 642 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->bucket == bucket); 643 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page != bucket->emptyPagesHead); 644 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page != bucket->decommittedPagesHead); 645 | 646 | // Deal with empty and decommitted pages. 647 | if (LIKELY(partitionPageStateIsActive(page))) { 648 | // This page is usable because it has freelist entries, or has 649 | // unprovisioned slots we can create freelist entries from. 650 | bucket->activePagesHead = page; 651 | return true; 652 | } 653 | if (LIKELY(partitionPageStateIsEmpty(page))) { 654 | page->nextPage = bucket->emptyPagesHead; 655 | bucket->emptyPagesHead = page; 656 | } else if (LIKELY(partitionPageStateIsDecommitted(page))) { 657 | page->nextPage = bucket->decommittedPagesHead; 658 | bucket->decommittedPagesHead = page; 659 | } else { 660 | // Not a security concern. We will easily fill up pages 661 | // with size specific partitions, and we don't want to halt 662 | //RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsFull(page)); 663 | // If we get here, we found a full page. Skip over it too, and also 664 | // tag it as full (via a negative value). We need it tagged so that 665 | // free'ing can tell, and move it back into the active page list. 666 | page->numAllocatedSlots = -page->numAllocatedSlots; 667 | ++bucket->numFullPages; 668 | // numFullPages is a uint16_t for efficient packing so guard against 669 | // overflow to be safe. 670 | if (UNLIKELY(!bucket->numFullPages)) 671 | partitionBucketFull(); 672 | // Not necessary but might help stop accidents. 673 | page->nextPage = 0; 674 | } 675 | } 676 | 677 | bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; 678 | return false; 679 | } 680 | 681 | static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(PartitionPage* page) 682 | { 683 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionBucketIsDirectMapped(page->bucket)); 684 | return reinterpret_cast(reinterpret_cast(page) + 3 * kPageMetadataSize); 685 | } 686 | 687 | static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, size_t size) 688 | { 689 | size_t* rawSizePtr = partitionPageGetRawSizePtr(page); 690 | if (UNLIKELY(rawSizePtr != nullptr)) 691 | *rawSizePtr = size; 692 | } 693 | 694 | static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, int flags, size_t rawSize) 695 | { 696 | size_t size = partitionDirectMapSize(rawSize); 697 | 698 | // Because we need to fake looking like a super page, we need to allocate 699 | // a bunch of system pages more than "size": 700 | // - The first few system pages are the partition page in which the super 701 | // page metadata is stored. We fault just one system page out of a partition 702 | // page sized clump. 703 | // - We add a trailing guard page on 32-bit (on 64-bit we rely on the 704 | // massive address space plus randomization instead). 705 | size_t mapSize = size + kPartitionPageSize; 706 | #if !CPU(64BIT) 707 | mapSize += kSystemPageSize; 708 | #endif 709 | // Round up to the allocation granularity. 710 | mapSize += kPageAllocationGranularityOffsetMask; 711 | mapSize &= kPageAllocationGranularityBaseMask; 712 | 713 | // TODO: these pages will be zero-filled. Consider internalizing an 714 | // allocZeroed() API so we can avoid a memset() entirely in this case. 715 | char* ptr = reinterpret_cast(allocPages(0, mapSize, kSuperPageSize, PageAccessible)); 716 | if (UNLIKELY(!ptr)) 717 | return nullptr; 718 | 719 | size_t committedPageSize = size + kSystemPageSize; 720 | root->totalSizeOfDirectMappedPages += committedPageSize; 721 | partitionIncreaseCommittedPages(root, committedPageSize); 722 | 723 | char* slot = ptr + kPartitionPageSize; 724 | setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), kPartitionPageSize - (kSystemPageSize * 2)); 725 | #if !CPU(64BIT) 726 | setSystemPagesInaccessible(ptr, kSystemPageSize); 727 | setSystemPagesInaccessible(slot + size, kSystemPageSize); 728 | #endif 729 | 730 | PartitionSuperPageExtentEntry* extent = reinterpret_cast(partitionSuperPageToMetadataArea(ptr)); 731 | extent->root = root; 732 | // The new structures are all located inside a fresh system page so they 733 | // will all be zeroed out. These ASSERTs are for documentation. 734 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!extent->superPageBase); 735 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!extent->superPagesEnd); 736 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!extent->next); 737 | PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot); 738 | PartitionBucket* bucket = reinterpret_cast(reinterpret_cast(page) + (kPageMetadataSize * 2)); 739 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->nextPage); 740 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->numAllocatedSlots); 741 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->numUnprovisionedSlots); 742 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->pageOffset); 743 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->emptyCacheIndex); 744 | page->bucket = bucket; 745 | page->freelistHead = reinterpret_cast(slot); 746 | PartitionFreelistEntry* nextEntry = reinterpret_cast(slot); 747 | nextEntry->next = partitionFreelistMask(0); 748 | 749 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!bucket->activePagesHead); 750 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!bucket->emptyPagesHead); 751 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!bucket->decommittedPagesHead); 752 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!bucket->numSystemPagesPerSlotSpan); 753 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!bucket->numFullPages); 754 | bucket->slotSize = size; 755 | 756 | PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); 757 | mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; 758 | mapExtent->bucket = bucket; 759 | 760 | // Maintain the doubly-linked list of all direct mappings. 761 | mapExtent->nextExtent = root->directMapList; 762 | if (mapExtent->nextExtent) 763 | mapExtent->nextExtent->prevExtent = mapExtent; 764 | mapExtent->prevExtent = nullptr; 765 | root->directMapList = mapExtent; 766 | 767 | return page; 768 | } 769 | 770 | static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) 771 | { 772 | PartitionRootBase* root = partitionPageToRoot(page); 773 | const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page); 774 | size_t unmapSize = extent->mapSize; 775 | 776 | // Maintain the doubly-linked list of all direct mappings. 777 | if (extent->prevExtent) { 778 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(extent->prevExtent->nextExtent == extent); 779 | extent->prevExtent->nextExtent = extent->nextExtent; 780 | } else { 781 | root->directMapList = extent->nextExtent; 782 | } 783 | if (extent->nextExtent) { 784 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(extent->nextExtent->prevExtent == extent); 785 | extent->nextExtent->prevExtent = extent->prevExtent; 786 | } 787 | 788 | // Add on the size of the trailing guard page and preceeding partition 789 | // page. 790 | unmapSize += kPartitionPageSize + kSystemPageSize; 791 | 792 | size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize; 793 | partitionDecreaseCommittedPages(root, uncommittedPageSize); 794 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->totalSizeOfDirectMappedPages >= uncommittedPageSize); 795 | root->totalSizeOfDirectMappedPages -= uncommittedPageSize; 796 | 797 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!(unmapSize & kPageAllocationGranularityOffsetMask)); 798 | 799 | char* ptr = reinterpret_cast(partitionPageToPointer(page)); 800 | // Account for the mapping starting a partition page before the actual 801 | // allocation address. 802 | ptr -= kPartitionPageSize; 803 | 804 | freePages(ptr, unmapSize); 805 | } 806 | 807 | void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, PartitionBucket* bucket) 808 | { 809 | // The slow path is called when the freelist is empty. 810 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!bucket->activePagesHead->freelistHead); 811 | 812 | PartitionPage* newPage = nullptr; 813 | 814 | // For the partitionAllocGeneric API, we have a bunch of buckets marked 815 | // as special cases. We bounce them through to the slow path so that we 816 | // can still have a blazing fast hot path due to lack of corner-case 817 | // branches. 818 | bool returnNull = flags & PartitionAllocReturnNull; 819 | if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { 820 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(size > kGenericMaxBucketed); 821 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucket == &PartitionRootBase::gPagedBucket); 822 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); 823 | if (size > kGenericMaxDirectMapped) { 824 | if (returnNull) 825 | return nullptr; 826 | partitionExcessiveAllocationSize(); 827 | } 828 | newPage = partitionDirectMap(root, flags, size); 829 | } else if (LIKELY(partitionSetNewActivePage(bucket))) { 830 | // First, did we find an active page in the active pages list? 831 | newPage = bucket->activePagesHead; 832 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsActive(newPage)); 833 | } else if (LIKELY(bucket->emptyPagesHead != nullptr) || LIKELY(bucket->decommittedPagesHead != nullptr)) { 834 | // Second, look in our lists of empty and decommitted pages. 835 | // Check empty pages first, which are preferred, but beware that an 836 | // empty page might have been decommitted. 837 | while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) { 838 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(newPage->bucket == bucket); 839 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsEmpty(newPage) || partitionPageStateIsDecommitted(newPage)); 840 | bucket->emptyPagesHead = newPage->nextPage; 841 | // Accept the empty page unless it got decommitted. 842 | if (newPage->freelistHead) { 843 | newPage->nextPage = nullptr; 844 | break; 845 | } 846 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsDecommitted(newPage)); 847 | newPage->nextPage = bucket->decommittedPagesHead; 848 | bucket->decommittedPagesHead = newPage; 849 | } 850 | if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr)) { 851 | newPage = bucket->decommittedPagesHead; 852 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(newPage->bucket == bucket); 853 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsDecommitted(newPage)); 854 | bucket->decommittedPagesHead = newPage->nextPage; 855 | void* addr = partitionPageToPointer(newPage); 856 | partitionRecommitSystemPages(root, addr, partitionBucketBytes(newPage->bucket)); 857 | partitionPageReset(newPage); 858 | } 859 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(newPage); 860 | } else { 861 | // Third. If we get here, we need a brand new page. 862 | uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); 863 | void* rawPages = partitionAllocPartitionPages(root, flags, numPartitionPages); 864 | if (LIKELY(rawPages != nullptr)) { 865 | newPage = partitionPointerToPageNoAlignmentCheck(rawPages); 866 | partitionPageSetup(newPage, bucket); 867 | } 868 | } 869 | 870 | // Bail if we had a memory allocation failure. 871 | if (UNLIKELY(!newPage)) { 872 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); 873 | if (returnNull) 874 | return nullptr; 875 | partitionOutOfMemory(root); 876 | } 877 | 878 | bucket = newPage->bucket; 879 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucket != &PartitionRootBase::gPagedBucket); 880 | bucket->activePagesHead = newPage; 881 | partitionPageSetRawSize(newPage, size); 882 | 883 | // If we found an active page with free slots, or an empty page, we have a 884 | // usable freelist head. 885 | if (LIKELY(newPage->freelistHead != nullptr)) { 886 | PartitionFreelistEntry* entry = newPage->freelistHead; 887 | PartitionFreelistEntry* z = entry; 888 | 889 | while(entry) { 890 | if((rand() % 10) == 1) { 891 | break; 892 | } 893 | 894 | z = entry; 895 | entry = partitionFreelistMask(entry->next); 896 | } 897 | 898 | if(entry == nullptr) { 899 | entry = newPage->freelistHead; 900 | } 901 | 902 | if(entry == newPage->freelistHead) { 903 | PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next); 904 | newPage->freelistHead = newHead; 905 | } else { 906 | z->next = entry->next; 907 | } 908 | 909 | newPage->numAllocatedSlots++; 910 | return entry; 911 | } 912 | 913 | // Otherwise, we need to build the freelist. 914 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(newPage->numUnprovisionedSlots); 915 | return partitionPageAllocAndFillFreelist(newPage); 916 | } 917 | 918 | static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, PartitionPage* page) 919 | { 920 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsEmpty(page)); 921 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!partitionBucketIsDirectMapped(page->bucket)); 922 | void* addr = partitionPageToPointer(page); 923 | partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket)); 924 | 925 | // We actually leave the decommitted page in the active list. We'll sweep 926 | // it on to the decommitted page list when we next walk the active page 927 | // list. 928 | // Pulling this trick enables us to use a singly-linked page list for all 929 | // cases, which is critical in keeping the page metadata structure down to 930 | // 32 bytes in size. 931 | page->freelistHead = 0; 932 | page->numUnprovisionedSlots = 0; 933 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsDecommitted(page)); 934 | } 935 | 936 | static void partitionDecommitPageIfPossible(PartitionRootBase* root, PartitionPage* page) 937 | { 938 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->emptyCacheIndex >= 0); 939 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(static_cast(page->emptyCacheIndex) < kMaxFreeableSpans); 940 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page == root->globalEmptyPageRing[page->emptyCacheIndex]); 941 | page->emptyCacheIndex = -1; 942 | if (partitionPageStateIsEmpty(page)) 943 | partitionDecommitPage(root, page); 944 | } 945 | 946 | static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) 947 | { 948 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsEmpty(page)); 949 | PartitionRootBase* root = partitionPageToRoot(page); 950 | 951 | // If the page is already registered as empty, give it another life. 952 | if (page->emptyCacheIndex != -1) { 953 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->emptyCacheIndex >= 0); 954 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(static_cast(page->emptyCacheIndex) < kMaxFreeableSpans); 955 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->globalEmptyPageRing[page->emptyCacheIndex] == page); 956 | root->globalEmptyPageRing[page->emptyCacheIndex] = 0; 957 | } 958 | 959 | int16_t currentIndex = root->globalEmptyPageRingIndex; 960 | PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex]; 961 | // The page might well have been re-activated, filled up, etc. before we get 962 | // around to looking at it here. 963 | if (pageToDecommit) 964 | partitionDecommitPageIfPossible(root, pageToDecommit); 965 | 966 | // We put the empty slot span on our global list of "pages that were once 967 | // empty". thus providing it a bit of breathing room to get re-used before 968 | // we really free it. This improves performance, particularly on Mac OS X 969 | // which has subpar memory management performance. 970 | root->globalEmptyPageRing[currentIndex] = page; 971 | page->emptyCacheIndex = currentIndex; 972 | ++currentIndex; 973 | if (currentIndex == kMaxFreeableSpans) 974 | currentIndex = 0; 975 | root->globalEmptyPageRingIndex = currentIndex; 976 | } 977 | 978 | static void partitionDecommitEmptyPages(PartitionRootBase* root) 979 | { 980 | for (size_t i = 0; i < kMaxFreeableSpans; ++i) { 981 | PartitionPage* page = root->globalEmptyPageRing[i]; 982 | if (page) 983 | partitionDecommitPageIfPossible(root, page); 984 | root->globalEmptyPageRing[i] = nullptr; 985 | } 986 | } 987 | 988 | void partitionFreeSlowPath(PartitionPage* page) 989 | { 990 | PartitionBucket* bucket = page->bucket; 991 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page != &PartitionRootGeneric::gSeedPage); 992 | if (LIKELY(page->numAllocatedSlots == 0)) { 993 | // Page became fully unused. 994 | if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { 995 | partitionDirectUnmap(page); 996 | return; 997 | } 998 | // If it's the current active page, change it. We bounce the page to 999 | // the empty list as a force towards defragmentation. 1000 | if (LIKELY(page == bucket->activePagesHead)) 1001 | (void) partitionSetNewActivePage(bucket); 1002 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucket->activePagesHead != page); 1003 | 1004 | partitionPageSetRawSize(page, 0); 1005 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!partitionPageGetRawSize(page)); 1006 | 1007 | partitionRegisterEmptyPage(page); 1008 | } else { 1009 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!partitionBucketIsDirectMapped(bucket)); 1010 | // Ensure that the page is full. That's the only valid case if we 1011 | // arrive here. 1012 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numAllocatedSlots < 0); 1013 | // A transition of numAllocatedSlots from 0 to -1 is not legal, and 1014 | // likely indicates a double-free. 1015 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numAllocatedSlots != -1); 1016 | page->numAllocatedSlots = -page->numAllocatedSlots - 2; 1017 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); 1018 | // Fully used page became partially used. It must be put back on the 1019 | // non-full page list. Also make it the current page to increase the 1020 | // chances of it being filled up again. The old current page will be 1021 | // the next page. 1022 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!page->nextPage); 1023 | if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage)) 1024 | page->nextPage = bucket->activePagesHead; 1025 | bucket->activePagesHead = page; 1026 | --bucket->numFullPages; 1027 | // Special case: for a partition page with just a single slot, it may 1028 | // now be empty and we want to run it through the empty logic. 1029 | if (UNLIKELY(page->numAllocatedSlots == 0)) 1030 | partitionFreeSlowPath(page); 1031 | } 1032 | } 1033 | 1034 | bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPage* page, size_t rawSize) 1035 | { 1036 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionBucketIsDirectMapped(page->bucket)); 1037 | 1038 | rawSize = partitionCookieSizeAdjustAdd(rawSize); 1039 | 1040 | // Note that the new size might be a bucketed size; this function is called 1041 | // whenever we're reallocating a direct mapped allocation. 1042 | size_t newSize = partitionDirectMapSize(rawSize); 1043 | if (newSize < kGenericMinDirectMappedDownsize) 1044 | return false; 1045 | 1046 | // bucket->slotSize is the current size of the allocation. 1047 | size_t currentSize = page->bucket->slotSize; 1048 | if (newSize == currentSize) 1049 | return true; 1050 | 1051 | char* charPtr = static_cast(partitionPageToPointer(page)); 1052 | 1053 | if (newSize < currentSize) { 1054 | size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize; 1055 | 1056 | // Don't reallocate in-place if new size is less than 80 % of the full 1057 | // map size, to avoid holding on to too much unused address space. 1058 | if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4) 1059 | return false; 1060 | 1061 | // Shrink by decommitting unneeded pages and making them inaccessible. 1062 | size_t decommitSize = currentSize - newSize; 1063 | partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); 1064 | setSystemPagesInaccessible(charPtr + newSize, decommitSize); 1065 | } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { 1066 | // Grow within the actually allocated memory. Just need to make the 1067 | // pages accessible again. 1068 | size_t recommitSize = newSize - currentSize; 1069 | bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize); 1070 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(ret); 1071 | partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); 1072 | 1073 | #if ENABLE(ASSERT) 1074 | memset(charPtr + currentSize, kUninitializedByte, recommitSize); 1075 | #endif 1076 | } else { 1077 | // We can't perform the realloc in-place. 1078 | // TODO: support this too when possible. 1079 | return false; 1080 | } 1081 | 1082 | #if ENABLE(ASSERT) 1083 | // Write a new trailing cookie. 1084 | partitionCookieWriteValue(charPtr + rawSize - kCookieSize, page); 1085 | #endif 1086 | 1087 | partitionPageSetRawSize(page, rawSize); 1088 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageGetRawSize(page) == rawSize); 1089 | 1090 | page->bucket->slotSize = newSize; 1091 | return true; 1092 | } 1093 | 1094 | void* partitionReallocGeneric(PartitionRootGeneric* root, void* ptr, size_t newSize) 1095 | { 1096 | #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 1097 | return realloc(ptr, newSize); 1098 | #else 1099 | if (UNLIKELY(!ptr)) 1100 | return partitionAllocGeneric(root, newSize); 1101 | if (UNLIKELY(!newSize)) { 1102 | partitionFreeGeneric(root, ptr); 1103 | return 0; 1104 | } 1105 | 1106 | if (newSize > kGenericMaxDirectMapped) 1107 | partitionExcessiveAllocationSize(); 1108 | 1109 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr))); 1110 | 1111 | PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); 1112 | 1113 | if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) { 1114 | // We may be able to perform the realloc in place by changing the 1115 | // accessibility of memory pages and, if reducing the size, decommitting 1116 | // them. 1117 | if (partitionReallocDirectMappedInPlace(root, page, newSize)) 1118 | return ptr; 1119 | } 1120 | 1121 | size_t actualNewSize = partitionAllocActualSize(root, newSize); 1122 | size_t actualOldSize = partitionAllocGetSize(ptr); 1123 | 1124 | // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the 1125 | // new size is a significant percentage smaller. We could do the same if we 1126 | // determine it is a win. 1127 | if (actualNewSize == actualOldSize) { 1128 | // Trying to allocate a block of size newSize would give us a block of 1129 | // the same size as the one we've already got, so no point in doing 1130 | // anything here. 1131 | return ptr; 1132 | } 1133 | 1134 | // This realloc cannot be resized in-place. Sadness. 1135 | void* ret = partitionAllocGeneric(root, newSize); 1136 | size_t copySize = actualOldSize; 1137 | if (newSize < copySize) 1138 | copySize = newSize; 1139 | 1140 | memcpy(ret, ptr, copySize); 1141 | partitionFreeGeneric(root, ptr); 1142 | return ret; 1143 | #endif 1144 | } 1145 | 1146 | static size_t partitionPurgePage(PartitionPage* page, bool discard) 1147 | { 1148 | const PartitionBucket* bucket = page->bucket; 1149 | size_t slotSize = bucket->slotSize; 1150 | if (slotSize < kSystemPageSize || !page->numAllocatedSlots) 1151 | return 0; 1152 | 1153 | size_t bucketNumSlots = partitionBucketSlots(bucket); 1154 | size_t discardableBytes = 0; 1155 | 1156 | size_t rawSize = partitionPageGetRawSize(const_cast(page)); 1157 | if (rawSize) { 1158 | uint32_t usedBytes = static_cast(partitionRoundUpToSystemPage(rawSize)); 1159 | discardableBytes = bucket->slotSize - usedBytes; 1160 | if (discardableBytes && discard) { 1161 | char* ptr = reinterpret_cast(partitionPageToPointer(page)); 1162 | ptr += usedBytes; 1163 | discardSystemPages(ptr, discardableBytes); 1164 | } 1165 | return discardableBytes; 1166 | } 1167 | 1168 | const size_t maxSlotCount = (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; 1169 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucketNumSlots <= maxSlotCount); 1170 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numUnprovisionedSlots < bucketNumSlots); 1171 | size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots; 1172 | char slotUsage[maxSlotCount]; 1173 | size_t lastSlot = static_cast(-1); 1174 | memset(slotUsage, 1, numSlots); 1175 | char* ptr = reinterpret_cast(partitionPageToPointer(page)); 1176 | PartitionFreelistEntry* entry = page->freelistHead; 1177 | // First, walk the freelist for this page and make a bitmap of which slots 1178 | // are not in use. 1179 | while (entry) { 1180 | size_t slotIndex = (reinterpret_cast(entry) - ptr) / slotSize; 1181 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(slotIndex < numSlots); 1182 | slotUsage[slotIndex] = 0; 1183 | entry = partitionFreelistMask(entry->next); 1184 | // If we have a slot where the masked freelist entry is 0, we can 1185 | // actually discard that freelist entry because touching a discarded 1186 | // page is guaranteed to return original content or 0. 1187 | // (Note that this optimization won't fire on big endian machines 1188 | // because the masking function is negation.) 1189 | if (!partitionFreelistMask(entry)) 1190 | lastSlot = slotIndex; 1191 | } 1192 | 1193 | // If the slot(s) at the end of the slot span are not in used, we can 1194 | // truncate them entirely and rewrite the freelist. 1195 | size_t truncatedSlots = 0; 1196 | while (!slotUsage[numSlots - 1]) { 1197 | truncatedSlots++; 1198 | numSlots--; 1199 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(numSlots); 1200 | } 1201 | // First, do the work of calculating the discardable bytes. Don't actually 1202 | // discard anything unless the discard flag was passed in. 1203 | char* beginPtr = nullptr; 1204 | char* endPtr = nullptr; 1205 | size_t unprovisionedBytes = 0; 1206 | if (truncatedSlots) { 1207 | beginPtr = ptr + (numSlots * slotSize); 1208 | endPtr = beginPtr + (slotSize * truncatedSlots); 1209 | beginPtr = reinterpret_cast(partitionRoundUpToSystemPage(reinterpret_cast(beginPtr))); 1210 | // We round the end pointer here up and not down because we're at the 1211 | // end of a slot span, so we "own" all the way up the page boundary. 1212 | endPtr = reinterpret_cast(partitionRoundUpToSystemPage(reinterpret_cast(endPtr))); 1213 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(endPtr <= ptr + partitionBucketBytes(bucket)); 1214 | if (beginPtr < endPtr) { 1215 | unprovisionedBytes = endPtr - beginPtr; 1216 | discardableBytes += unprovisionedBytes; 1217 | } 1218 | } 1219 | if (unprovisionedBytes && discard) { 1220 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(truncatedSlots > 0); 1221 | size_t numNewEntries = 0; 1222 | page->numUnprovisionedSlots += truncatedSlots; 1223 | // Rewrite the freelist. 1224 | PartitionFreelistEntry** entryPtr = &page->freelistHead; 1225 | for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) { 1226 | if (slotUsage[slotIndex]) 1227 | continue; 1228 | PartitionFreelistEntry* entry = reinterpret_cast(ptr + (slotSize * slotIndex)); 1229 | *entryPtr = partitionFreelistMask(entry); 1230 | entryPtr = reinterpret_cast(entry); 1231 | numNewEntries++; 1232 | } 1233 | // Terminate the freelist chain. 1234 | *entryPtr = nullptr; 1235 | // The freelist head is stored unmasked. 1236 | page->freelistHead = partitionFreelistMask(page->freelistHead); 1237 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(numNewEntries == numSlots - page->numAllocatedSlots); 1238 | // Discard the memory. 1239 | discardSystemPages(beginPtr, unprovisionedBytes); 1240 | } 1241 | 1242 | // Next, walk the slots and for any not in use, consider where the system 1243 | // page boundaries occur. We can release any system pages back to the 1244 | // system as long as we don't interfere with a freelist pointer or an 1245 | // adjacent slot. 1246 | for (size_t i = 0; i < numSlots; ++i) { 1247 | if (slotUsage[i]) 1248 | continue; 1249 | // The first address we can safely discard is just after the freelist 1250 | // pointer. There's one quirk: if the freelist pointer is actually a 1251 | // null, we can discard that pointer value too. 1252 | char* beginPtr = ptr + (i * slotSize); 1253 | char* endPtr = beginPtr + slotSize; 1254 | if (i != lastSlot) 1255 | beginPtr += sizeof(PartitionFreelistEntry); 1256 | beginPtr = reinterpret_cast(partitionRoundUpToSystemPage(reinterpret_cast(beginPtr))); 1257 | endPtr = reinterpret_cast(partitionRoundDownToSystemPage(reinterpret_cast(endPtr))); 1258 | if (beginPtr < endPtr) { 1259 | size_t partialSlotBytes = endPtr - beginPtr; 1260 | discardableBytes += partialSlotBytes; 1261 | if (discard) 1262 | discardSystemPages(beginPtr, partialSlotBytes); 1263 | } 1264 | } 1265 | return discardableBytes; 1266 | } 1267 | 1268 | static void partitionPurgeBucket(PartitionBucket* bucket) 1269 | { 1270 | if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { 1271 | for (PartitionPage* page = bucket->activePagesHead; page; page = page->nextPage) { 1272 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page != &PartitionRootGeneric::gSeedPage); 1273 | (void) partitionPurgePage(page, true); 1274 | } 1275 | } 1276 | } 1277 | 1278 | void partitionPurgeMemory(PartitionRoot* root, int flags) 1279 | { 1280 | if (flags & PartitionPurgeDecommitEmptyPages) 1281 | partitionDecommitEmptyPages(root); 1282 | // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages 1283 | // here because that flag is only useful for allocations >= system page 1284 | // size. We only have allocations that large inside generic partitions 1285 | // at the moment. 1286 | } 1287 | 1288 | void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) 1289 | { 1290 | spinLockLock(&root->lock); 1291 | if (flags & PartitionPurgeDecommitEmptyPages) 1292 | partitionDecommitEmptyPages(root); 1293 | if (flags & PartitionPurgeDiscardUnusedSystemPages) { 1294 | for (size_t i = 0; i < kGenericNumBuckets; ++i) { 1295 | PartitionBucket* bucket = &root->buckets[i]; 1296 | if (bucket->slotSize >= kSystemPageSize) 1297 | partitionPurgeBucket(bucket); 1298 | } 1299 | } 1300 | spinLockUnlock(&root->lock); 1301 | } 1302 | 1303 | static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const PartitionPage* page) 1304 | { 1305 | uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); 1306 | 1307 | if (partitionPageStateIsDecommitted(page)) { 1308 | ++statsOut->numDecommittedPages; 1309 | return; 1310 | } 1311 | 1312 | statsOut->discardableBytes += partitionPurgePage(const_cast(page), false); 1313 | 1314 | size_t rawSize = partitionPageGetRawSize(const_cast(page)); 1315 | if (rawSize) 1316 | statsOut->activeBytes += static_cast(rawSize); 1317 | else 1318 | statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucketSlotSize); 1319 | 1320 | size_t pageBytesResident = partitionRoundUpToSystemPage((bucketNumSlots - page->numUnprovisionedSlots) * statsOut->bucketSlotSize); 1321 | statsOut->residentBytes += pageBytesResident; 1322 | if (partitionPageStateIsEmpty(page)) { 1323 | statsOut->decommittableBytes += pageBytesResident; 1324 | ++statsOut->numEmptyPages; 1325 | } else if (partitionPageStateIsFull(page)) { 1326 | ++statsOut->numFullPages; 1327 | } else { 1328 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsActive(page)); 1329 | ++statsOut->numActivePages; 1330 | } 1331 | } 1332 | 1333 | static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, const PartitionBucket* bucket) 1334 | { 1335 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!partitionBucketIsDirectMapped(bucket)); 1336 | statsOut->isValid = false; 1337 | // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), 1338 | // the bucket might still need to be reported if it has a list of empty, 1339 | // decommitted or full pages. 1340 | if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && !bucket->emptyPagesHead && !bucket->decommittedPagesHead && !bucket->numFullPages) 1341 | return; 1342 | 1343 | memset(statsOut, '\0', sizeof(*statsOut)); 1344 | statsOut->isValid = true; 1345 | statsOut->isDirectMap = false; 1346 | statsOut->numFullPages = static_cast(bucket->numFullPages); 1347 | statsOut->bucketSlotSize = bucket->slotSize; 1348 | uint16_t bucketNumSlots = partitionBucketSlots(bucket); 1349 | size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots; 1350 | statsOut->allocatedPageSize = partitionBucketBytes(bucket); 1351 | statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage; 1352 | statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize; 1353 | 1354 | for (const PartitionPage* page = bucket->emptyPagesHead; page; page = page->nextPage) { 1355 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsEmpty(page) || partitionPageStateIsDecommitted(page)); 1356 | partitionDumpPageStats(statsOut, page); 1357 | } 1358 | for (const PartitionPage* page = bucket->decommittedPagesHead; page; page = page->nextPage) { 1359 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageStateIsDecommitted(page)); 1360 | partitionDumpPageStats(statsOut, page); 1361 | } 1362 | 1363 | if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { 1364 | for (const PartitionPage* page = bucket->activePagesHead; page; page = page->nextPage) { 1365 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page != &PartitionRootGeneric::gSeedPage); 1366 | partitionDumpPageStats(statsOut, page); 1367 | } 1368 | } 1369 | } 1370 | 1371 | void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* partitionName, bool isLightDump, PartitionStatsDumper* partitionStatsDumper) 1372 | { 1373 | PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; 1374 | static const size_t kMaxReportableDirectMaps = 4096; 1375 | uint32_t directMapLengths[kMaxReportableDirectMaps]; 1376 | size_t numDirectMappedAllocations = 0; 1377 | 1378 | spinLockLock(&partition->lock); 1379 | 1380 | for (size_t i = 0; i < kGenericNumBuckets; ++i) { 1381 | const PartitionBucket* bucket = &partition->buckets[i]; 1382 | // Don't report the pseudo buckets that the generic allocator sets up in 1383 | // order to preserve a fast size->bucket map (see 1384 | // partitionAllocGenericInit for details). 1385 | if (!bucket->activePagesHead) 1386 | bucketStats[i].isValid = false; 1387 | else 1388 | partitionDumpBucketStats(&bucketStats[i], bucket); 1389 | } 1390 | 1391 | for (PartitionDirectMapExtent* extent = partition->directMapList; extent; extent = extent->nextExtent) { 1392 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!extent->nextExtent || extent->nextExtent->prevExtent == extent); 1393 | directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; 1394 | ++numDirectMappedAllocations; 1395 | if (numDirectMappedAllocations == kMaxReportableDirectMaps) 1396 | break; 1397 | } 1398 | 1399 | spinLockUnlock(&partition->lock); 1400 | 1401 | // partitionsDumpBucketStats is called after collecting stats because it 1402 | // can try to allocate using PartitionAllocGeneric and it can't obtain the 1403 | // lock. 1404 | PartitionMemoryStats partitionStats = { 0 }; 1405 | partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + partition->totalSizeOfDirectMappedPages; 1406 | partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; 1407 | for (size_t i = 0; i < kGenericNumBuckets; ++i) { 1408 | if (bucketStats[i].isValid) { 1409 | partitionStats.totalResidentBytes += bucketStats[i].residentBytes; 1410 | partitionStats.totalActiveBytes += bucketStats[i].activeBytes; 1411 | partitionStats.totalDecommittableBytes += bucketStats[i].decommittableBytes; 1412 | partitionStats.totalDiscardableBytes += bucketStats[i].discardableBytes; 1413 | if (!isLightDump) 1414 | partitionStatsDumper->partitionsDumpBucketStats(partitionName, &bucketStats[i]); 1415 | } 1416 | } 1417 | 1418 | size_t directMappedAllocationsTotalSize = 0; 1419 | for (size_t i = 0; i < numDirectMappedAllocations; ++i) { 1420 | PartitionBucketMemoryStats stats; 1421 | memset(&stats, '\0', sizeof(stats)); 1422 | stats.isValid = true; 1423 | stats.isDirectMap = true; 1424 | stats.numFullPages = 1; 1425 | uint32_t size = directMapLengths[i]; 1426 | stats.allocatedPageSize = size; 1427 | stats.bucketSlotSize = size; 1428 | stats.activeBytes = size; 1429 | stats.residentBytes = size; 1430 | directMappedAllocationsTotalSize += size; 1431 | partitionStatsDumper->partitionsDumpBucketStats(partitionName, &stats); 1432 | } 1433 | partitionStats.totalResidentBytes += directMappedAllocationsTotalSize; 1434 | partitionStats.totalActiveBytes += directMappedAllocationsTotalSize; 1435 | partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); 1436 | } 1437 | 1438 | void partitionDumpStats(PartitionRoot* partition, const char* partitionName, bool isLightDump, PartitionStatsDumper* partitionStatsDumper) 1439 | { 1440 | static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); 1441 | PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets]; 1442 | const size_t partitionNumBuckets = partition->numBuckets; 1443 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionNumBuckets <= kMaxReportableBuckets); 1444 | 1445 | for (size_t i = 0; i < partitionNumBuckets; ++i) 1446 | partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]); 1447 | 1448 | // partitionsDumpBucketStats is called after collecting stats because it 1449 | // can use PartitionAlloc to allocate and this can affect the statistics. 1450 | PartitionMemoryStats partitionStats = { 0 }; 1451 | partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages; 1452 | partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; 1453 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!partition->totalSizeOfDirectMappedPages); 1454 | for (size_t i = 0; i < partitionNumBuckets; ++i) { 1455 | if (memoryStats[i].isValid) { 1456 | partitionStats.totalResidentBytes += memoryStats[i].residentBytes; 1457 | partitionStats.totalActiveBytes += memoryStats[i].activeBytes; 1458 | partitionStats.totalDecommittableBytes += memoryStats[i].decommittableBytes; 1459 | partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; 1460 | if (!isLightDump) 1461 | partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memoryStats[i]); 1462 | } 1463 | } 1464 | partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); 1465 | } 1466 | 1467 | } // namespace WTF 1468 | 1469 | -------------------------------------------------------------------------------- /PartitionAlloc.h: -------------------------------------------------------------------------------- 1 | #include "config.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | /* 8 | * Copyright (C) 2013 Google Inc. All rights reserved. 9 | * 10 | * Redistribution and use in source and binary forms, with or without 11 | * modification, are permitted provided that the following conditions are 12 | * met: 13 | * 14 | * * Redistributions of source code must retain the above copyright 15 | * notice, this list of conditions and the following disclaimer. 16 | * * Redistributions in binary form must reproduce the above 17 | * copyright notice, this list of conditions and the following disclaimer 18 | * in the documentation and/or other materials provided with the 19 | * distribution. 20 | * * Neither the name of Google Inc. nor the names of its 21 | * contributors may be used to endorse or promote products derived from 22 | * this software without specific prior written permission. 23 | * 24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 27 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 28 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 29 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 30 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 31 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 32 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 33 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 34 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 | */ 36 | 37 | #ifndef WTF_PartitionAlloc_h 38 | #define WTF_PartitionAlloc_h 39 | 40 | // DESCRIPTION 41 | // partitionAlloc() / partitionAllocGeneric() and partitionFree() / 42 | // partitionFreeGeneric() are approximately analagous to malloc() and free(). 43 | // 44 | // The main difference is that a PartitionRoot / PartitionRootGeneric object 45 | // must be supplied to these functions, representing a specific "heap partition" 46 | // that will be used to satisfy the allocation. Different partitions are 47 | // guaranteed to exist in separate address spaces, including being separate from 48 | // the main system heap. If the contained objects are all freed, physical memory 49 | // is returned to the system but the address space remains reserved. 50 | // 51 | // THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE 52 | // SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To 53 | // minimize the instruction count to the fullest extent possible, the 54 | // PartitionRoot is really just a header adjacent to other data areas provided 55 | // by the allocator class. 56 | // 57 | // The partitionAlloc() variant of the API has the following caveats: 58 | // - Allocations and frees against a single partition must be single threaded. 59 | // - Allocations must not exceed a max size, chosen at compile-time via a 60 | // templated parameter to PartitionAllocator. 61 | // - Allocation sizes must be aligned to the system pointer size. 62 | // - Allocations are bucketed exactly according to size. 63 | // 64 | // And for partitionAllocGeneric(): 65 | // - Multi-threaded use against a single partition is ok; locking is handled. 66 | // - Allocations of any arbitrary size can be handled (subject to a limit of 67 | // INT_MAX bytes for security reasons). 68 | // - Bucketing is by approximate size, for example an allocation of 4000 bytes 69 | // might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and 70 | // keep worst-case waste to ~10%. 71 | // 72 | // The allocators are designed to be extremely fast, thanks to the following 73 | // properties and design: 74 | // - Just a single (reasonably predicatable) branch in the hot / fast path for 75 | // both allocating and (significantly) freeing. 76 | // - A minimal number of operations in the hot / fast path, with the slow paths 77 | // in separate functions, leading to the possibility of inlining. 78 | // - Each partition page (which is usually multiple physical pages) has a 79 | // metadata structure which allows fast mapping of free() address to an 80 | // underlying bucket. 81 | // - Supports a lock-free API for fast performance in single-threaded cases. 82 | // - The freelist for a given bucket is split across a number of partition 83 | // pages, enabling various simple tricks to try and minimize fragmentation. 84 | // - Fine-grained bucket sizes leading to less waste and better packing. 85 | // 86 | // The following security properties are provided at this time: 87 | // - Linear overflows cannot corrupt into the partition. 88 | // - Linear overflows cannot corrupt out of the partition. 89 | // - Freed pages will only be re-used within the partition. 90 | // (exception: large allocations > ~1MB) 91 | // - Freed pages will only hold same-sized objects when re-used. 92 | // - Dereference of freelist pointer should fault. 93 | // - Out-of-line main metadata: linear over or underflow cannot corrupt it. 94 | // - Partial pointer overwrite of freelist pointer should fault. 95 | // - Rudimentary double-free detection. 96 | // - Large allocations (> ~1MB) are guard-paged at the beginning and end. 97 | // 98 | // The following security properties could be investigated in the future: 99 | // - Per-object bucketing (instead of per-size) is mostly available at the API, 100 | // but not used yet. 101 | // - No randomness of freelist entries or bucket position. 102 | // - Better checking for wild pointers in free(). 103 | // - Better freelist masking function to guarantee fault on 32-bit. 104 | 105 | #include "Assertions.h" 106 | #include "BitwiseOperations.h" 107 | #include "ByteSwap.h" 108 | #include "PageAllocator.h" 109 | #include "SpinLock.h" 110 | 111 | #include 112 | 113 | #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 114 | #include 115 | #endif 116 | 117 | #if ENABLE(ASSERT) 118 | #include 119 | #endif 120 | 121 | namespace WTF { 122 | 123 | // Allocation granularity of sizeof(void*) bytes. 124 | static const size_t kAllocationGranularity = sizeof(void*); 125 | static const size_t kAllocationGranularityMask = kAllocationGranularity - 1; 126 | static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; 127 | 128 | // Underlying partition storage pages are a power-of-two size. It is typical 129 | // for a partition page to be based on multiple system pages. Most references to 130 | // "page" refer to partition pages. 131 | // We also have the concept of "super pages" -- these are the underlying system 132 | // allocations we make. Super pages contain multiple partition pages inside them 133 | // and include space for a small amount of metadata per partition page. 134 | // Inside super pages, we store "slot spans". A slot span is a continguous range 135 | // of one or more partition pages that stores allocations of the same size. 136 | // Slot span sizes are adjusted depending on the allocation size, to make sure 137 | // the packing does not lead to unused (wasted) space at the end of the last 138 | // system page of the span. For our current max slot span size of 64k and other 139 | // constant values, we pack _all_ partitionAllocGeneric() sizes perfectly up 140 | // against the end of a system page. 141 | static const size_t kPartitionPageShift = 14; // 16KB 142 | static const size_t kPartitionPageSize = 1 << kPartitionPageShift; 143 | static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1; 144 | static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask; 145 | static const size_t kMaxPartitionPagesPerSlotSpan = 4; 146 | 147 | // To avoid fragmentation via never-used freelist entries, we hand out partition 148 | // freelist sections gradually, in units of the dominant system page size. 149 | // What we're actually doing is avoiding filling the full partition page 150 | // (typically 16KB) will freelist pointers right away. Writing freelist 151 | // pointers will fault and dirty a private page, which is very wasteful if we 152 | // never actually store objects there. 153 | static const size_t kNumSystemPagesPerPartitionPage = kPartitionPageSize / kSystemPageSize; 154 | static const size_t kMaxSystemPagesPerSlotSpan = kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan; 155 | 156 | // We reserve virtual address space in 2MB chunks (aligned to 2MB as well). 157 | // These chunks are called "super pages". We do this so that we can store 158 | // metadata in the first few pages of each 2MB aligned section. This leads to 159 | // a very fast free(). We specifically choose 2MB because this virtual address 160 | // block represents a full but single PTE allocation on ARM, ia32 and x64. 161 | // 162 | // The layout of the super page is as follows. The sizes below are the same 163 | // for 32 bit and 64 bit. 164 | // 165 | // | Guard page (4KB) | Metadata page (4KB) | Guard pages (8KB) | Slot span | Slot span | ... | Slot span | Guard page (4KB) | 166 | // 167 | // - Each slot span is a contiguous range of one or more PartitionPages. 168 | // - The metadata page has the following format. Note that the PartitionPage 169 | // that is not at the head of a slot span is "unused". In other words, 170 | // the metadata for the slot span is stored only in the first PartitionPage 171 | // of the slot span. Metadata accesses to other PartitionPages are 172 | // redirected to the first PartitionPage. 173 | // 174 | // | SuperPageExtentEntry (32B) | PartitionPage of slot span 1 (32B, used) | PartitionPage of slot span 1 (32B, unused) | PartitionPage of slot span 1 (32B, unused) | PartitionPage of slot span 2 (32B, used) | PartitionPage of slot span 3 (32B, used) | ... | PartitionPage of slot span N (32B, unused) | 175 | // 176 | // A direct mapped page has a similar layout to fake it looking like a super page: 177 | // 178 | // | Guard page (4KB) | Metadata page (4KB) | Guard pages (8KB) | Direct mapped object | Guard page (4KB) | 179 | // 180 | // - The metadata page has the following layout: 181 | // 182 | // | SuperPageExtentEntry (32B) | PartitionPage (32B) | PartitionBucket (32B) | PartitionDirectMapExtent (8B) | 183 | static const size_t kSuperPageShift = 21; // 2MB 184 | static const size_t kSuperPageSize = 1 << kSuperPageShift; 185 | static const size_t kSuperPageOffsetMask = kSuperPageSize - 1; 186 | static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask; 187 | static const size_t kNumPartitionPagesPerSuperPage = kSuperPageSize / kPartitionPageSize; 188 | 189 | static const size_t kPageMetadataShift = 5; // 32 bytes per partition page. 190 | static const size_t kPageMetadataSize = 1 << kPageMetadataShift; 191 | 192 | // The following kGeneric* constants apply to the generic variants of the API. 193 | // The "order" of an allocation is closely related to the power-of-two size of 194 | // the allocation. More precisely, the order is the bit index of the 195 | // most-significant-bit in the allocation size, where the bit numbers starts 196 | // at index 1 for the least-significant-bit. 197 | // In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2 198 | // covers 2->3, order 3 covers 4->7, order 4 covers 8->15. 199 | static const size_t kGenericMinBucketedOrder = 4; // 8 bytes. 200 | static const size_t kGenericMaxBucketedOrder = 20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB) 201 | static const size_t kGenericNumBucketedOrders = (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1; 202 | static const size_t kGenericNumBucketsPerOrderBits = 3; // Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144, 160, ..., 240 203 | static const size_t kGenericNumBucketsPerOrder = 1 << kGenericNumBucketsPerOrderBits; 204 | static const size_t kGenericNumBuckets = kGenericNumBucketedOrders * kGenericNumBucketsPerOrder; 205 | static const size_t kGenericSmallestBucket = 1 << (kGenericMinBucketedOrder - 1); 206 | static const size_t kGenericMaxBucketSpacing = 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits); 207 | static const size_t kGenericMaxBucketed = (1 << (kGenericMaxBucketedOrder - 1)) + ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing); 208 | static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1; // Limit when downsizing a direct mapping using realloc(). 209 | static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize; 210 | static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT; 211 | 212 | // Constants for the memory reclaim logic. 213 | static const size_t kMaxFreeableSpans = 16; 214 | 215 | // If the total size in bytes of allocated but not committed pages exceeds this 216 | // value (probably it is a "out of virtual address space" crash), 217 | // a special crash stack trace is generated at |partitionOutOfMemory|. 218 | // This is to distinguish "out of virtual address space" from 219 | // "out of physical memory" in crash reports. 220 | static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB 221 | 222 | #if ENABLE(ASSERT) 223 | // Uninitialized memory 224 | static const unsigned char kUninitializedByte = 0xDE; 225 | // Freed memory 226 | static const unsigned char kFreedByte = 0xDF; 227 | static const size_t kCookieSize = 16; // Handles alignment up to XMM instructions on Intel. 228 | #endif 229 | 230 | struct PartitionBucket; 231 | struct PartitionRootBase; 232 | 233 | struct PartitionFreelistEntry { 234 | PartitionFreelistEntry* next; 235 | }; 236 | 237 | // Some notes on page states. A page can be in one of four major states: 238 | // 1) Active. 239 | // 2) Full. 240 | // 3) Empty. 241 | // 4) Decommitted. 242 | // An active page has available free slots. A full page has no free slots. An 243 | // empty page has no free slots, and a decommitted page is an empty page that 244 | // had its backing memory released back to the system. 245 | // There are two linked lists tracking the pages. The "active page" list is an 246 | // approximation of a list of active pages. It is an approximation because 247 | // full, empty and decommitted pages may briefly be present in the list until 248 | // we next do a scan over it. 249 | // The "empty page" list is an accurate list of pages which are either empty 250 | // or decommitted. 251 | // 252 | // The significant page transitions are: 253 | // - free() will detect when a full page has a slot free()'d and immediately 254 | // return the page to the head of the active list. 255 | // - free() will detect when a page is fully emptied. It _may_ add it to the 256 | // empty list or it _may_ leave it on the active list until a future list scan. 257 | // - malloc() _may_ scan the active page list in order to fulfil the request. 258 | // If it does this, full, empty and decommitted pages encountered will be 259 | // booted out of the active list. If there are no suitable active pages found, 260 | // an empty or decommitted page (if one exists) will be pulled from the empty 261 | // list on to the active list. 262 | struct PartitionPage { 263 | PartitionFreelistEntry* freelistHead; 264 | PartitionPage* nextPage; 265 | PartitionBucket* bucket; 266 | int16_t numAllocatedSlots; // Deliberately signed, 0 for empty or decommitted page, -n for full pages. 267 | uint16_t numUnprovisionedSlots; 268 | uint16_t pageOffset; 269 | int16_t emptyCacheIndex; // -1 if not in the empty cache. 270 | }; 271 | 272 | struct PartitionBucket { 273 | PartitionPage* activePagesHead; // Accessed most in hot path => goes first. 274 | PartitionPage* emptyPagesHead; 275 | PartitionPage* decommittedPagesHead; 276 | uint32_t slotSize; 277 | uint16_t numSystemPagesPerSlotSpan; 278 | uint16_t numFullPages; 279 | }; 280 | 281 | // An "extent" is a span of consecutive superpages. We link to the partition's 282 | // next extent (if there is one) at the very start of a superpage's metadata 283 | // area. 284 | struct PartitionSuperPageExtentEntry { 285 | PartitionRootBase* root; 286 | char* superPageBase; 287 | char* superPagesEnd; 288 | PartitionSuperPageExtentEntry* next; 289 | }; 290 | 291 | struct PartitionDirectMapExtent { 292 | PartitionDirectMapExtent* nextExtent; 293 | PartitionDirectMapExtent* prevExtent; 294 | PartitionBucket* bucket; 295 | size_t mapSize; // Mapped size, not including guard pages and meta-data. 296 | }; 297 | 298 | struct WTF_EXPORT PartitionRootBase { 299 | size_t totalSizeOfCommittedPages; 300 | size_t totalSizeOfSuperPages; 301 | size_t totalSizeOfDirectMappedPages; 302 | // Invariant: totalSizeOfCommittedPages <= totalSizeOfSuperPages + totalSizeOfDirectMappedPages. 303 | unsigned numBuckets; 304 | unsigned maxAllocation; 305 | bool initialized; 306 | char* nextSuperPage; 307 | char* nextPartitionPage; 308 | char* nextPartitionPageEnd; 309 | PartitionSuperPageExtentEntry* currentExtent; 310 | PartitionSuperPageExtentEntry* firstExtent; 311 | PartitionDirectMapExtent* directMapList; 312 | PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; 313 | int16_t globalEmptyPageRingIndex; 314 | uintptr_t invertedSelf; 315 | 316 | static int gInitializedLock; 317 | static bool gInitialized; 318 | // gSeedPage is used as a sentinel to indicate that there is no page 319 | // in the active page list. We can use nullptr, but in that case we need 320 | // to add a null-check branch to the hot allocation path. We want to avoid 321 | // that. 322 | static PartitionPage gSeedPage; 323 | static PartitionBucket gPagedBucket; 324 | 325 | // This is the maximum size of the delayed freelist (16) 326 | size_t delayed_free_list_max_sz; 327 | 328 | // A pointer to a vector of pointers we are waiting to free() 329 | std::vector delayed_free_list; 330 | 331 | // User heap allocations have a canary before and after them 332 | // This canary value is per-partition-root and XOR'd by the 333 | // the last byte of the address they're located at 334 | bool kCookieInitialized; 335 | unsigned char kCookieValue[WTF::kCookieSize]; 336 | }; 337 | 338 | // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. 339 | struct PartitionRoot : public PartitionRootBase { 340 | // The PartitionAlloc templated class ensures the following is correct. 341 | ALWAYS_INLINE PartitionBucket* buckets() { return reinterpret_cast(this + 1); } 342 | ALWAYS_INLINE const PartitionBucket* buckets() const { return reinterpret_cast(this + 1); } 343 | }; 344 | 345 | // Never instantiate a PartitionRootGeneric directly, instead use PartitionAllocatorGeneric. 346 | struct PartitionRootGeneric : public PartitionRootBase { 347 | int lock; 348 | // Some pre-computed constants. 349 | size_t orderIndexShifts[kBitsPerSizet + 1]; 350 | size_t orderSubIndexMasks[kBitsPerSizet + 1]; 351 | // The bucket lookup table lets us map a size_t to a bucket quickly. 352 | // The trailing +1 caters for the overflow case for very large allocation sizes. 353 | // It is one flat array instead of a 2D array because in the 2D world, we'd 354 | // need to index array[blah][max+1] which risks undefined behavior. 355 | PartitionBucket* bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; 356 | PartitionBucket buckets[kGenericNumBuckets]; 357 | }; 358 | 359 | // Flags for partitionAllocGenericFlags. 360 | enum PartitionAllocFlags { 361 | PartitionAllocReturnNull = 1 << 0, 362 | }; 363 | 364 | // Struct used to retrieve total memory usage of a partition. Used by 365 | // PartitionStatsDumper implementation. 366 | struct PartitionMemoryStats { 367 | size_t totalMmappedBytes; // Total bytes mmaped from the system. 368 | size_t totalCommittedBytes; // Total size of commmitted pages. 369 | size_t totalResidentBytes; // Total bytes provisioned by the partition. 370 | size_t totalActiveBytes; // Total active bytes in the partition. 371 | size_t totalDecommittableBytes; // Total bytes that could be decommitted. 372 | size_t totalDiscardableBytes; // Total bytes that could be discarded. 373 | }; 374 | 375 | // Struct used to retrieve memory statistics about a partition bucket. Used by 376 | // PartitionStatsDumper implementation. 377 | struct PartitionBucketMemoryStats { 378 | bool isValid; // Used to check if the stats is valid. 379 | bool isDirectMap; // True if this is a direct mapping; size will not be unique. 380 | uint32_t bucketSlotSize; // The size of the slot in bytes. 381 | uint32_t allocatedPageSize; // Total size the partition page allocated from the system. 382 | uint32_t activeBytes; // Total active bytes used in the bucket. 383 | uint32_t residentBytes; // Total bytes provisioned in the bucket. 384 | uint32_t decommittableBytes; // Total bytes that could be decommitted. 385 | uint32_t discardableBytes; // Total bytes that could be discarded. 386 | uint32_t numFullPages; // Number of pages with all slots allocated. 387 | uint32_t numActivePages; // Number of pages that have at least one provisioned slot. 388 | uint32_t numEmptyPages; // Number of pages that are empty but not decommitted. 389 | uint32_t numDecommittedPages; // Number of pages that are empty and decommitted. 390 | }; 391 | 392 | int _rand (int i) { return rand() % i; } 393 | 394 | // Interface that is passed to partitionDumpStats and 395 | // partitionDumpStatsGeneric for using the memory statistics. 396 | class WTF_EXPORT PartitionStatsDumper { 397 | public: 398 | // Called to dump total memory used by partition, once per partition. 399 | virtual void partitionDumpTotals(const char* partitionName, const PartitionMemoryStats*) = 0; 400 | 401 | // Called to dump stats about buckets, for each bucket. 402 | virtual void partitionsDumpBucketStats(const char* partitionName, const PartitionBucketMemoryStats*) = 0; 403 | }; 404 | 405 | WTF_EXPORT void partitionAllocInit(PartitionRoot*, size_t numBuckets, size_t maxAllocation); 406 | WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*); 407 | WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); 408 | WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); 409 | 410 | enum PartitionPurgeFlags { 411 | // Decommitting the ring list of empty pages is reasonably fast. 412 | PartitionPurgeDecommitEmptyPages = 1 << 0, 413 | // Discarding unused system pages is slower, because it involves walking all 414 | // freelists in all active partition pages of all buckets >= system page 415 | // size. It often frees a similar amount of memory to decommitting the empty 416 | // pages, though. 417 | PartitionPurgeDiscardUnusedSystemPages = 1 << 1, 418 | }; 419 | 420 | WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int); 421 | WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); 422 | 423 | WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, int, size_t, PartitionBucket*); 424 | WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*); 425 | WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, void*, size_t); 426 | 427 | WTF_EXPORT void partitionDumpStats(PartitionRoot*, const char* partitionName, bool isLightDump, PartitionStatsDumper*); 428 | WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, const char* partitionName, bool isLightDump, PartitionStatsDumper*); 429 | 430 | ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(PartitionFreelistEntry* ptr) 431 | { 432 | // We use bswap on little endian as a fast mask for two reasons: 433 | // 1) If an object is freed and its vtable used where the attacker doesn't 434 | // get the chance to run allocations between the free and use, the vtable 435 | // dereference is likely to fault. 436 | // 2) If the attacker has a linear buffer overflow and elects to try and 437 | // corrupt a freelist pointer, partial pointer overwrite attacks are 438 | // thwarted. 439 | // For big endian, similar guarantees are arrived at with a negation. 440 | #if CPU(BIG_ENDIAN) 441 | uintptr_t masked = ~reinterpret_cast(ptr); 442 | #else 443 | uintptr_t masked = bswapuintptrt(reinterpret_cast(ptr)); 444 | #endif 445 | return reinterpret_cast(masked); 446 | } 447 | 448 | ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) 449 | { 450 | #if ENABLE(ASSERT) 451 | // Add space for cookies, checking for integer overflow. 452 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(size + (2 * kCookieSize) > size); 453 | size += 2 * kCookieSize; 454 | #endif 455 | return size; 456 | } 457 | 458 | ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) 459 | { 460 | #if ENABLE(ASSERT) 461 | // Remove space for cookies. 462 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(size >= 2 * kCookieSize); 463 | size -= 2 * kCookieSize; 464 | #endif 465 | return size; 466 | } 467 | 468 | ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) 469 | { 470 | #if ENABLE(ASSERT) 471 | // The value given to the application is actually just after the cookie. 472 | ptr = static_cast(ptr) - kCookieSize; 473 | #endif 474 | return ptr; 475 | } 476 | 477 | ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr); 478 | ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page) 479 | { 480 | PartitionSuperPageExtentEntry* extentEntry = reinterpret_cast(reinterpret_cast(page) & kSystemPageBaseMask); 481 | return extentEntry->root; 482 | } 483 | 484 | ALWAYS_INLINE void partitionCookieWriteValue(void* ptr, PartitionPage *page) 485 | { 486 | #if ENABLE(ASSERT) 487 | unsigned char* cookiePtr = reinterpret_cast(ptr); 488 | PartitionRootBase* root = partitionPageToRoot(page); 489 | uint8_t x = (uintptr_t) cookiePtr & 0xff; 490 | for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) { 491 | *cookiePtr = root->kCookieValue[i] ^ x; 492 | } 493 | #endif 494 | } 495 | 496 | ALWAYS_INLINE void partitionCookieCheckValue(void* ptr, PartitionPage *page) 497 | { 498 | #if ENABLE(ASSERT) 499 | unsigned char* cookiePtr = reinterpret_cast(ptr); 500 | uint8_t x = (uintptr_t) cookiePtr & 0xff; 501 | PartitionRootBase* root = partitionPageToRoot(page); 502 | for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) { 503 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(*cookiePtr == (root->kCookieValue[i] ^ x)); 504 | } 505 | #endif 506 | } 507 | 508 | ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) 509 | { 510 | uintptr_t pointerAsUint = reinterpret_cast(ptr); 511 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!(pointerAsUint & kSuperPageOffsetMask)); 512 | // The metadata area is exactly one system page (the guard page) into the 513 | // super page. 514 | return reinterpret_cast(pointerAsUint + kSystemPageSize); 515 | } 516 | 517 | ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) 518 | { 519 | uintptr_t pointerAsUint = reinterpret_cast(ptr); 520 | char* superPagePtr = reinterpret_cast(pointerAsUint & kSuperPageBaseMask); 521 | uintptr_t partitionPageIndex = (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; 522 | // Index 0 is invalid because it is the metadata and guard area and 523 | // the last index is invalid because it is a guard page. 524 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageIndex); 525 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); 526 | PartitionPage* page = reinterpret_cast(partitionSuperPageToMetadataArea(superPagePtr) + (partitionPageIndex << kPageMetadataShift)); 527 | // Partition pages in the same slot span can share the same page object. Adjust for that. 528 | size_t delta = page->pageOffset << kPageMetadataShift; 529 | page = reinterpret_cast(reinterpret_cast(page) - delta); 530 | return page; 531 | } 532 | 533 | ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) 534 | { 535 | uintptr_t pointerAsUint = reinterpret_cast(page); 536 | uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); 537 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(superPageOffset > kSystemPageSize); 538 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * kPageMetadataSize)); 539 | uintptr_t partitionPageIndex = (superPageOffset - kSystemPageSize) >> kPageMetadataShift; 540 | // Index 0 is invalid because it is the metadata area and the last index is invalid because it is a guard page. 541 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageIndex); 542 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); 543 | uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); 544 | void* ret = reinterpret_cast(superPageBase + (partitionPageIndex << kPartitionPageShift)); 545 | return ret; 546 | } 547 | 548 | ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) 549 | { 550 | PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); 551 | // Checks that the pointer is a multiple of bucket size. 552 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!((reinterpret_cast(ptr) - reinterpret_cast(partitionPageToPointer(page))) % page->bucket->slotSize)); 553 | return page; 554 | } 555 | 556 | ALWAYS_INLINE bool partitionBucketIsDirectMapped(const PartitionBucket* bucket) 557 | { 558 | return !bucket->numSystemPagesPerSlotSpan; 559 | } 560 | 561 | ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) 562 | { 563 | return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; 564 | } 565 | 566 | ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) 567 | { 568 | return static_cast(partitionBucketBytes(bucket) / bucket->slotSize); 569 | } 570 | 571 | ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) 572 | { 573 | // For single-slot buckets which span more than one partition page, we 574 | // have some spare metadata space to store the raw allocation size. We 575 | // can use this to report better statistics. 576 | PartitionBucket* bucket = page->bucket; 577 | if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) 578 | return nullptr; 579 | 580 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION((bucket->slotSize % kSystemPageSize) == 0); 581 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionBucketIsDirectMapped(bucket) || partitionBucketSlots(bucket) == 1); 582 | page++; 583 | return reinterpret_cast(&page->freelistHead); 584 | } 585 | 586 | ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) 587 | { 588 | size_t* rawSizePtr = partitionPageGetRawSizePtr(page); 589 | if (UNLIKELY(rawSizePtr != nullptr)) 590 | return *rawSizePtr; 591 | return 0; 592 | } 593 | 594 | ALWAYS_INLINE bool partitionPointerIsValid(void* ptr) 595 | { 596 | PartitionPage* page = partitionPointerToPage(ptr); 597 | PartitionRootBase* root = partitionPageToRoot(page); 598 | return root->invertedSelf == ~reinterpret_cast(root); 599 | } 600 | 601 | ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, int flags, size_t size, PartitionBucket* bucket) 602 | { 603 | PartitionPage* page = bucket->activePagesHead; 604 | // Check that this page is neither full nor freed. 605 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numAllocatedSlots >= 0); 606 | void* ret = page->freelistHead; 607 | if (LIKELY(ret != 0)) { 608 | PartitionFreelistEntry* t = page->freelistHead; 609 | PartitionFreelistEntry* z = t; 610 | 611 | while(t) { 612 | if((rand() % 10) == 1) { 613 | break; 614 | } 615 | 616 | z = t; 617 | t = partitionFreelistMask(t->next); 618 | 619 | if(t == NULL) { 620 | break; 621 | } 622 | 623 | // Ensure that t and page mask to the same base address. 624 | // This should catch most corruptions of the freelist 625 | // where the value is not explicitly controlled or 626 | // informed from a prior memory disclosure 627 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(((uintptr_t) t & (uintptr_t) kSuperPageBaseMask) == ((uintptr_t) page & (uintptr_t) kSuperPageBaseMask)); 628 | 629 | // This check will ensure we can mask to a valid page 630 | // pointer from the user pointer. This also checks the 631 | // root pointer has a valid inverted self 632 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(t)); 633 | } 634 | 635 | if(t) { 636 | ret = t; 637 | } else { 638 | ret = page->freelistHead; 639 | // Ensure that t and page mask to the same base address. 640 | // This should catch most corruptions of the freelist 641 | // where the value is not explicitly controlled or 642 | // informed from a prior memory disclosure 643 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(((uintptr_t) ret & (uintptr_t) kSuperPageBaseMask) == ((uintptr_t) page & (uintptr_t) kSuperPageBaseMask)); 644 | 645 | // This check will ensure we can mask to a valid page 646 | // pointer from the user pointer. This also checks the 647 | // root pointer has a valid inverted self 648 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(ret)); 649 | } 650 | 651 | // All large allocations must go through the slow path to correctly 652 | // update the size metadata. 653 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageGetRawSize(page) == 0); 654 | 655 | if(ret == page->freelistHead) { 656 | PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast(ret)->next); 657 | page->freelistHead = newHead; 658 | } else { 659 | z->next = t->next; 660 | } 661 | 662 | page->numAllocatedSlots++; 663 | } else { 664 | ret = partitionAllocSlowPath(root, flags, size, bucket); 665 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!ret || partitionPointerIsValid(ret)); 666 | } 667 | #if ENABLE(ASSERT) 668 | if (!ret) 669 | return 0; 670 | // Fill the uninitialized pattern, and write the cookies. 671 | page = partitionPointerToPage(ret); 672 | size_t slotSize = page->bucket->slotSize; 673 | size_t rawSize = partitionPageGetRawSize(page); 674 | if (rawSize) { 675 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(rawSize == size); 676 | slotSize = rawSize; 677 | } 678 | size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); 679 | char* charRet = static_cast(ret); 680 | // The value given to the application is actually just after the cookie. 681 | ret = charRet + kCookieSize; 682 | memset(ret, kUninitializedByte, noCookieSize); 683 | partitionCookieWriteValue(charRet, page); 684 | partitionCookieWriteValue(charRet + kCookieSize + noCookieSize, page); 685 | #endif 686 | return ret; 687 | } 688 | 689 | ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, size_t size) 690 | { 691 | #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 692 | void* result = malloc(size); 693 | RELEASE_RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(result); 694 | return result; 695 | #else 696 | size = partitionCookieSizeAdjustAdd(size); 697 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->initialized); 698 | size_t index = size >> kBucketShift; 699 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(index < root->numBuckets); 700 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(size == index << kBucketShift); 701 | PartitionBucket* bucket = &root->buckets()[index]; 702 | return partitionBucketAlloc(root, 0, size, bucket); 703 | #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 704 | } 705 | 706 | ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page, bool delay = false) 707 | { 708 | if(delay == true) { 709 | PartitionRootBase *prb = partitionPageToRoot(page); 710 | 711 | // Make sure the pointer is not already on our delayed 712 | // free list. Assert if it is 713 | for(auto p : prb->delayed_free_list) { 714 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(p != ptr); 715 | } 716 | 717 | if(prb->delayed_free_list.size() < prb->delayed_free_list_max_sz) { 718 | size_t rawSize = partitionPageGetRawSize(page); 719 | size_t slotSize = page->bucket->slotSize; 720 | if (rawSize) 721 | slotSize = rawSize; 722 | // Destroy the user data before adding the 723 | // pointer to the delayed free list 724 | memset(reinterpret_cast(ptr) + kCookieSize, kFreedByte, slotSize-(kCookieSize*2)); 725 | prb->delayed_free_list.push_back(ptr); 726 | return; 727 | } else { 728 | std::random_shuffle(prb->delayed_free_list.begin(), prb->delayed_free_list.end(), _rand); 729 | void *tptr = prb->delayed_free_list[prb->delayed_free_list.size()-1]; 730 | prb->delayed_free_list.pop_back(); 731 | prb->delayed_free_list.push_back(ptr); 732 | ptr = tptr; 733 | page = partitionPointerToPage(ptr); 734 | } 735 | } 736 | 737 | // If these asserts fire, you probably corrupted memory. 738 | #if ENABLE(ASSERT) 739 | size_t rawSize = partitionPageGetRawSize(page); 740 | size_t slotSize = page->bucket->slotSize; 741 | if (rawSize) 742 | slotSize = rawSize; 743 | // Canary values should be intact even though user data 744 | // was previously memset 745 | partitionCookieCheckValue(ptr, page); 746 | partitionCookieCheckValue(reinterpret_cast(ptr) + slotSize - kCookieSize, page); 747 | memset(ptr, kFreedByte, slotSize); 748 | #endif 749 | 750 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numAllocatedSlots); 751 | PartitionFreelistEntry* freelistHead = page->freelistHead; 752 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!freelistHead || partitionPointerIsValid(freelistHead)); 753 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(ptr != freelistHead); // Catches an immediate double free. 754 | ASSERT_WITH_SECURITY_IMPLICATION(!freelistHead || ptr != partitionFreelistMask(freelistHead->next)); // Look for double free one level deeper in debug. 755 | 756 | PartitionFreelistEntry* f = freelistHead; 757 | 758 | while(f != NULL) { 759 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(f != ptr); 760 | f = partitionFreelistMask(f->next); 761 | } 762 | 763 | PartitionFreelistEntry* entry = static_cast(ptr); 764 | entry->next = partitionFreelistMask(freelistHead); 765 | page->freelistHead = entry; 766 | --page->numAllocatedSlots; 767 | 768 | if (UNLIKELY(page->numAllocatedSlots <= 0)) { 769 | partitionFreeSlowPath(page); 770 | } else { 771 | // All single-slot allocations must go through the slow path to 772 | // correctly update the size metadata. 773 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageGetRawSize(page) == 0); 774 | } 775 | } 776 | 777 | ALWAYS_INLINE void partitionFree(void* ptr) 778 | { 779 | #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 780 | free(ptr); 781 | #else 782 | // It is safe to call 'free(NULL)' in 783 | // almost all heap implementations 784 | if(ptr == NULL) { 785 | return; 786 | } 787 | 788 | ptr = partitionCookieFreePointerAdjust(ptr); 789 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(ptr)); 790 | PartitionPage* page = partitionPointerToPage(ptr); 791 | partitionFreeWithPage(ptr, page, true); 792 | #endif 793 | } 794 | 795 | ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket(PartitionRootGeneric* root, size_t size) 796 | { 797 | size_t order = kBitsPerSizet - countLeadingZerosSizet(size); 798 | // The order index is simply the next few bits after the most significant bit. 799 | size_t orderIndex = (size >> root->orderIndexShifts[order]) & (kGenericNumBucketsPerOrder - 1); 800 | // And if the remaining bits are non-zero we must bump the bucket up. 801 | size_t subOrderIndex = size & root->orderSubIndexMasks[order]; 802 | PartitionBucket* bucket = root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + orderIndex + !!subOrderIndex]; 803 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!bucket->slotSize || bucket->slotSize >= size); 804 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!(bucket->slotSize % kGenericSmallestBucket)); 805 | return bucket; 806 | } 807 | 808 | ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, int flags, size_t size) 809 | { 810 | #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 811 | void* result = malloc(size); 812 | RELEASE_RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(result); 813 | return result; 814 | #else 815 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->initialized); 816 | size = partitionCookieSizeAdjustAdd(size); 817 | PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); 818 | spinLockLock(&root->lock); 819 | void* ret = partitionBucketAlloc(root, flags, size, bucket); 820 | spinLockUnlock(&root->lock); 821 | return ret; 822 | #endif 823 | } 824 | 825 | ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, size_t size) 826 | { 827 | return partitionAllocGenericFlags(root, 0, size); 828 | } 829 | 830 | ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) 831 | { 832 | #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 833 | free(ptr); 834 | #else 835 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->initialized); 836 | 837 | if (UNLIKELY(!ptr)) 838 | return; 839 | 840 | ptr = partitionCookieFreePointerAdjust(ptr); 841 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(ptr)); 842 | PartitionPage* page = partitionPointerToPage(ptr); 843 | spinLockLock(&root->lock); 844 | partitionFreeWithPage(ptr, page, true); 845 | spinLockUnlock(&root->lock); 846 | #endif 847 | } 848 | 849 | ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) 850 | { 851 | // Caller must check that the size is not above the kGenericMaxDirectMapped 852 | // limit before calling. This also guards against integer overflow in the 853 | // calculation here. 854 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(size <= kGenericMaxDirectMapped); 855 | return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; 856 | } 857 | 858 | ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, size_t size) 859 | { 860 | #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 861 | return size; 862 | #else 863 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->initialized); 864 | size = partitionCookieSizeAdjustAdd(size); 865 | PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); 866 | if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { 867 | size = bucket->slotSize; 868 | } else if (size > kGenericMaxDirectMapped) { 869 | // Too large to allocate => return the size unchanged. 870 | } else { 871 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucket == &PartitionRootBase::gPagedBucket); 872 | size = partitionDirectMapSize(size); 873 | } 874 | return partitionCookieSizeAdjustSubtract(size); 875 | #endif 876 | } 877 | 878 | ALWAYS_INLINE bool partitionAllocSupportsGetSize() 879 | { 880 | #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 881 | return false; 882 | #else 883 | return true; 884 | #endif 885 | } 886 | 887 | ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) 888 | { 889 | // No need to lock here. Only 'ptr' being freed by another thread could 890 | // cause trouble, and the caller is responsible for that not happening. 891 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionAllocSupportsGetSize()); 892 | ptr = partitionCookieFreePointerAdjust(ptr); 893 | RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(ptr)); 894 | PartitionPage* page = partitionPointerToPage(ptr); 895 | size_t size = page->bucket->slotSize; 896 | return partitionCookieSizeAdjustSubtract(size); 897 | } 898 | 899 | // N (or more accurately, N - sizeof(void*)) represents the largest size in 900 | // bytes that will be handled by a SizeSpecificPartitionAllocator. 901 | // Attempts to partitionAlloc() more than this amount will fail. 902 | template 903 | class SizeSpecificPartitionAllocator { 904 | public: 905 | static const size_t kMaxAllocation = N - kAllocationGranularity; 906 | static const size_t kNumBuckets = N + (WTF::kCookieSize*2) / kAllocationGranularity; 907 | void init() { partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocation); } 908 | bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); } 909 | ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; } 910 | private: 911 | PartitionRoot m_partitionRoot; 912 | PartitionBucket m_actualBuckets[kNumBuckets]; 913 | }; 914 | 915 | class PartitionAllocatorGeneric { 916 | public: 917 | void init() { partitionAllocGenericInit(&m_partitionRoot); } 918 | bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } 919 | ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } 920 | private: 921 | PartitionRootGeneric m_partitionRoot; 922 | }; 923 | 924 | } // namespace WTF 925 | 926 | using WTF::SizeSpecificPartitionAllocator; 927 | using WTF::PartitionAllocatorGeneric; 928 | using WTF::PartitionRoot; 929 | using WTF::partitionAllocInit; 930 | using WTF::partitionAllocShutdown; 931 | using WTF::partitionAlloc; 932 | using WTF::partitionFree; 933 | using WTF::partitionAllocGeneric; 934 | using WTF::partitionFreeGeneric; 935 | using WTF::partitionReallocGeneric; 936 | using WTF::partitionAllocActualSize; 937 | using WTF::partitionAllocSupportsGetSize; 938 | using WTF::partitionAllocGetSize; 939 | 940 | #endif // WTF_PartitionAlloc_h 941 | 942 | // Hardened PartitionAlloc C API 943 | extern "C" { 944 | 945 | // Size specific partitions/slots for common allocations 946 | // These templates define the maximum size allocation that 947 | // can occur within them 948 | static SizeSpecificPartitionAllocator<64> _PA; 949 | static SizeSpecificPartitionAllocator<128> __PA; 950 | static SizeSpecificPartitionAllocator<256> ___PA; 951 | static SizeSpecificPartitionAllocator<512> ____PA; 952 | 953 | // Generic partition for strings 954 | static PartitionAllocatorGeneric g_string_partition; 955 | 956 | // Generic object partition for other objects 957 | static PartitionAllocatorGeneric g_other_partition; 958 | 959 | // C wrapper for creating a generic partition 960 | void *new_generic_partition() { 961 | PartitionAllocatorGeneric *np = new PartitionAllocatorGeneric; 962 | np->init(); 963 | return (void *)np; 964 | } 965 | 966 | // C wrapper for allocating from a generic partition 967 | void *generic_partition_alloc(void *p, size_t sz) { 968 | PartitionAllocatorGeneric *np = reinterpret_cast(p); 969 | return (void *) partitionAllocGeneric(np->root(), sz); 970 | } 971 | 972 | // C wrapper for reallocating from a generic partition 973 | void *generic_partition_realloc(void *p, void *t, size_t sz) { 974 | PartitionAllocatorGeneric *np = reinterpret_cast(p); 975 | return (void *) partitionReallocGeneric(np->root(), t, sz); 976 | } 977 | 978 | // C wrapper for freeing from a generic partition 979 | void generic_partition_free(void *p, void *m) { 980 | PartitionAllocatorGeneric *np = reinterpret_cast(p); 981 | partitionFreeGeneric(np->root(), m); 982 | } 983 | 984 | // C wrapper for deleting a generic partition 985 | void delete_generic_partition(void *p) { 986 | PartitionAllocatorGeneric *np = reinterpret_cast(p); 987 | np->shutdown(); 988 | } 989 | 990 | // Initialization function that must be called 991 | // before any other operations are performed 992 | void partitionalloc_init() { 993 | _PA.init(); 994 | __PA.init(); 995 | ___PA.init(); 996 | ____PA.init(); 997 | g_string_partition.init(); 998 | g_other_partition.init(); 999 | } 1000 | 1001 | // Shutdown function that should be called 1002 | // before a program has exited 1003 | void partitionalloc_shutdown() { 1004 | _PA.shutdown(); 1005 | __PA.shutdown(); 1006 | ___PA.shutdown(); 1007 | ____PA.shutdown(); 1008 | g_string_partition.shutdown(); 1009 | g_other_partition.shutdown(); 1010 | } 1011 | 1012 | // The C API supports a few size defined partitions 1013 | void *partition_malloc_sz(size_t sz) { 1014 | // kCookie size is already accounted for in our 1015 | // size specific partition templates 1016 | if(sz <= 64) { 1017 | return partitionAlloc(_PA.root(), sz); 1018 | } else if(sz <= 128) { 1019 | return partitionAlloc(__PA.root(), sz); 1020 | } else if(sz <= 256) { 1021 | return partitionAlloc(___PA.root(), sz); 1022 | } else if(sz <= 512) { 1023 | return partitionAlloc(____PA.root(), sz); 1024 | } 1025 | 1026 | return NULL; 1027 | } 1028 | 1029 | // The correct page structure is derived from the pointer 1030 | void partition_free_sz(void *ptr) { 1031 | partitionFree(ptr); 1032 | } 1033 | 1034 | // Allocate memory for a string 1035 | void *partition_malloc_string(size_t sz) { 1036 | return partitionAllocGeneric(g_string_partition.root(), sz); 1037 | } 1038 | 1039 | // Reallocate memory for a string 1040 | void *partition_realloc_string(void *p, size_t sz) { 1041 | return partitionReallocGeneric(g_string_partition.root(), p, sz); 1042 | } 1043 | 1044 | // Free memory for a string 1045 | void partition_free_string(void *ptr) { 1046 | partitionFreeGeneric(g_string_partition.root(), ptr); 1047 | } 1048 | 1049 | // Allocate memory for other objects 1050 | void *partition_malloc(size_t sz) { 1051 | return partitionAllocGeneric(g_other_partition.root(), sz); 1052 | } 1053 | 1054 | // Reallocate memory for other objects 1055 | void *partition_realloc(void *p, size_t sz) { 1056 | return partitionReallocGeneric(g_string_partition.root(), p, sz); 1057 | } 1058 | 1059 | // Free memory for other objects 1060 | void partition_free(void *ptr) { 1061 | partitionFreeGeneric(g_other_partition.root(), ptr); 1062 | } 1063 | 1064 | // Checks if a pointer has a valid page/root structure 1065 | // ASSERTS on failure or returns 0 1066 | int check_partition_pointer(void *p) { 1067 | WTF::partitionPointerIsValid((uint8_t *) p - WTF::kCookieSize); 1068 | return 0; 1069 | } 1070 | } 1071 | 1072 | // A base class you can easily inherit from. Overloads 1073 | // new operator to use PartitionAlloc. This only works 1074 | // with the global size specific PartitionAlloc templates 1075 | // _PA*. We do not implement reference counting here 1076 | class PartitionBackedBase { 1077 | public: 1078 | PartitionBackedBase() { } 1079 | ~PartitionBackedBase() { } 1080 | 1081 | void *operator new(size_t sz) { 1082 | void *p = partition_malloc_sz(sz); 1083 | return p; 1084 | } 1085 | 1086 | void operator delete(void *ptr) { 1087 | partition_free_sz(ptr); 1088 | } 1089 | 1090 | int check_this_ptr() { 1091 | return check_partition_pointer(this); 1092 | } 1093 | }; -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | This is a standalone library containing PartitionAlloc, the allocator used in Chrome's Blink engine. It needs a lot of work and testing before it should be used on production code. This code is changing rapidly and should be considered unstable and untested until this notice is removed. 4 | 5 | # PartitionAlloc 6 | 7 | This is a fork of the PartitionAlloc code from Chrome's Blink engine. If you're not familiar with PartitionAlloc go read [this](http://struct.github.io/partition_alloc.html). The TLDR is that PartitionAlloc is a heap allocator that segments allocations based on size or type. This provides the ability to separate sensitive data structures from those tainted by user inputs if the API is used correctly. The PartitionAlloc developers offer the following security guarantees: 8 | 9 | * Linear overflows cannot corrupt into the partition 10 | * Linear overflows cannot corrupt out of the partition 11 | * Freed pages will only be re-used within the partition (exception: large allocations > ~1MB) 12 | * Freed pages will only hold same-sized objects when re-used 13 | * Dereference of freelist pointer should fault 14 | * Out-of-line main metadata: linear over or underflow cannot corrupt it 15 | * Partial pointer overwrite of freelist pointer should fault 16 | * Rudimentary double-free detection 17 | * Large allocations (> ~1MB) are guard-paged at the beginning and end 18 | 19 | # Hardening 20 | 21 | PartitionAlloc provides some good security against heap exploits right out of the box. However there is always room for improvement. Many additional security mechanisms can be enabled if performance is not an issue. And that is precisely what I have done with this fork of the code. Some of these have been documented [here](http://struct.github.io/partition_alloc.html). All calls to ASSERT have been replaced with ASSERT_WITH_SECURITY_IMPLICATION and enabled by default. This has obvious performance penalities. 22 | 23 | The following changes have been made to the original PartitionAlloc code base. 24 | 25 | ## Allocate 26 | 27 | * Randomization of the freelist upon creation 28 | * Freelist entries are randomly selected upon allocation 29 | * Allocated slots are surrounded by a canary value that is unique per-partition and XOR'd by the last byte of its address 30 | * New allocations are memset with 0xDE 31 | * All freelist pointers are checked for a valid page mask and root inverted self value 32 | 33 | ## Free 34 | 35 | * Delayed free of all user allocations using a vector stored with the partition root 36 | * Free'd allocations have their user data memset before they're added to the delayed free list 37 | * Better double free detection 38 | 39 | ## Design 40 | 41 | Some of the changes made to PartitionAlloc for security were thought through and the result of years of exploit writing. Others were made on a whim because they seemed like a good idea. 42 | 43 | The delayed free list is a std::vector stored within the partition root itself. This location was chosen for mainly two reasons: 1) To keep the PartitionPage and PartitionBucket structures at their current size and 2) To keep a separate freelist per-partition. The latter, in theory, helps with performance but I have no data to prove this. 44 | 45 | The user data canary secret value is also stored in the partition root itself which means that each root has its own unique canary value. Each canary written to the beginning and end of a user allocation is XOR'd by the last byte of the address of where it resides in memory. This may change in the future as I research more into memory disclosure attacks against PartitionAlloc. 46 | 47 | # API Additions 48 | 49 | This fork also includes a basic C API with the following interfaces: 50 | 51 | * void *new_generic_partition() - Returns a void pointer to a PartitionAllocatorGeneric class 52 | * void *generic_partition_alloc(void *r, size_t s) - Returns an allocation from a PartitionAllocatorGeneric root r of size s 53 | * void generic_partition_free(void *r, void *a) - Frees an allocation a from a root r 54 | * void delete_generic_partition(void *r) - Deletes a PartitionAllocatorGeneric r 55 | * void partitionalloc_init() - Initializes all global partitions used in the C interface 56 | * void partitionalloc_shutdown() Shuts down all global partitions used in the C interface 57 | * void *partition_malloc_sz(size_t s) - Allocates s bytes from a global size specific partition 58 | * void partition_free_sz(void *p) - Free a memory allocation p from a global size specific partition 59 | * void *partition_malloc_string(size_t s) - Allocates s bytes from a global partition specifically for strings 60 | * void partition_free_string(void *p) - Frees a memory allocation p from a global partition specifically for strings 61 | * void *partition_malloc(size_t s) - Allocates s bytes from a global generic partition 62 | * void partition_free(void *p) - Frees a memory allocation p from a global generic partition 63 | * int check_partition_pointer(void *p) - Checks if p is a valid pointer with a partition (will assert if the check fails) 64 | 65 | The following additional things have been added: 66 | 67 | * 4 Size specific partition templates for 64, 128, 256, and 512 byte allocations 68 | * 2 Generic partitions, one for strings, one for general use 69 | * A C++ class PartitionBackedBase which can be used as a base class which overloads new/delete operators to allocate from a size specific partition 70 | 71 | # Exploitation 72 | 73 | _This is a work in progress_ 74 | 75 | Modern memory safety exploitation is typically viewed as how much influence or control an untrusted input has over a read, write, or execute primitive. Exploit developers chain these primitives together in order to gain complete control of the process. Therefore it is important to make this process as difficult as possible. But default exploit mitigations such as DEP and ASLR only go so far and theres a fine balance between performance and security. 76 | 77 | Heap allocators are a good target for exploit writers. They are usually where C++ objects, strings, and other data structures are stored during runtime. By design PartitionAlloc attempts to separate these types of objects in order to minimize the control an exploit developer has. But seperation of data types is not enough to stop exploitation. We need to be sure the heap allocator itself does not introduce any additional risk. We need to defend against a number of different bug classes with these objects such as double delete, overwrites, use-after-free's and so on. 78 | 79 | # Usage 80 | 81 | Type `make test` and then run `build/pa_test`. The pa_test.cpp program will show you the basics of using the C API. 82 | 83 | # Performance 84 | 85 | Do not use Hardened PartitionAlloc if performance is important for your application. There are plenty of fast user space allocators out there. They don't have the same security properties, they are designed for speed. If you want an allocator that tries to strike a balance between the two then you can likely just stick with your system allocator (ptmalloc2, LFH, etc). If you want an allocator that doesn't try to strike that balance and instead only cares about security then you may want to give Hardened PartitionAlloc a try. 86 | 87 | # Todo 88 | 89 | This is a work in progress and I would like to reach a stable release at some point soon. The goal is for it to be packaged for popular Linux distributions and actually used in production. 90 | 91 | * Improved delayed free implementation 92 | * More efficient double free detection 93 | * Document other security relevant asserts 94 | * Research memalign support https://github.com/struct/HardenedPartitionAlloc/issues/1 95 | * Hardening patches are untested on Windows 96 | 97 | # Who 98 | 99 | The fork of PartitionAlloc and hardening patches are maintained by Chris Rohlf chris.rohlf@gmail.com 100 | 101 | The original PartitionAlloc (Google) and WebKit (Apple, Google) code are copyrighted by their respective authors. All licenses and code copyright headers have been preserved. 102 | 103 | # Thanks 104 | 105 | The following people are owed a thank you for their suggestions and ideas: 106 | 107 | [CopperheadOS](https://twitter.com/copperheados) -------------------------------------------------------------------------------- /SpinLock.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2013 Google Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * Neither the name of Google Inc. nor the names of its 15 | * contributors may be used to endorse or promote products derived from 16 | * this software without specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | #ifndef WTF_SpinLock_h 32 | #define WTF_SpinLock_h 33 | 34 | // DESCRIPTION 35 | // spinLockLock() and spinLockUnlock() are simple spinlock primitives based on 36 | // the standard CPU primitive of atomic increment and decrement of an int at 37 | // a given memory address. 38 | 39 | #include "Atomics.h" 40 | #include 41 | namespace WTF { 42 | 43 | ALWAYS_INLINE void spinLockLock(int volatile* lock) 44 | { 45 | while (UNLIKELY(atomicTestAndSetToOne(lock))) { 46 | while (*lock) { } // Spin without spamming locked instructions. 47 | } 48 | } 49 | 50 | ALWAYS_INLINE void spinLockUnlock(int volatile* lock) 51 | { 52 | atomicSetOneToZero(lock); 53 | } 54 | 55 | } // namespace WTF 56 | 57 | using WTF::spinLockLock; 58 | using WTF::spinLockUnlock; 59 | 60 | #endif // WTF_SpinLock_h 61 | -------------------------------------------------------------------------------- /WTFExport.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2013 Google Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * Neither the name of Google Inc. nor the names of its 15 | * contributors may be used to endorse or promote products derived from 16 | * this software without specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | 32 | #ifndef WTFExport_h 33 | #define WTFExport_h 34 | 35 | #if !defined(WTF_IMPLEMENTATION) 36 | #define WTF_IMPLEMENTATION 0 37 | #endif 38 | 39 | #if defined(COMPONENT_BUILD) 40 | #if defined(WIN32) 41 | #if WTF_IMPLEMENTATION 42 | #define WTF_EXPORT __declspec(dllexport) 43 | #else 44 | #define WTF_EXPORT __declspec(dllimport) 45 | #endif 46 | #else // defined(WIN32) 47 | #define WTF_EXPORT __attribute__((visibility("default"))) 48 | #endif 49 | #else // defined(COMPONENT_BUILD) 50 | #define WTF_EXPORT 51 | #endif 52 | 53 | #endif // WTFExport_h 54 | -------------------------------------------------------------------------------- /config.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2004, 2005, 2006, 2013 Apple Inc. 3 | * Copyright (C) 2009 Google Inc. All rights reserved. 4 | * Copyright (C) 2007-2009 Torch Mobile, Inc. 5 | * Copyright (C) 2010, 2011 Research In Motion Limited. All rights reserved. 6 | * 7 | * This library is free software; you can redistribute it and/or 8 | * modify it under the terms of the GNU Library General Public 9 | * License as published by the Free Software Foundation; either 10 | * version 2 of the License, or (at your option) any later version. 11 | * 12 | * This library is distributed in the hope that it will be useful, 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 | * Library General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU Library General Public License 18 | * along with this library; see the file COPYING.LIB. If not, write to 19 | * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, 20 | * Boston, MA 02110-1301, USA. 21 | * 22 | */ 23 | 24 | /* Include compiler specific macros */ 25 | #include "Compiler.h" 26 | 27 | #if COMPILER(MSVC) 28 | #define _USE_MATH_DEFINES // Make math.h behave like other platforms. 29 | #endif 30 | 31 | /* ==== Platform adaptation macros: these describe properties of the target environment. ==== */ 32 | 33 | /* HAVE() - specific system features (headers, functions or similar) that are present or not */ 34 | #define HAVE(WTF_FEATURE) (defined HAVE_##WTF_FEATURE && HAVE_##WTF_FEATURE) 35 | /* OS() - underlying operating system; only to be used for mandated low-level services like 36 | virtual memory, not to choose a GUI toolkit */ 37 | #define OS(WTF_FEATURE) (defined WTF_OS_##WTF_FEATURE && WTF_OS_##WTF_FEATURE) 38 | 39 | /* ==== Policy decision macros: these define policy choices for a particular port. ==== */ 40 | 41 | /* USE() - use a particular third-party library or optional OS service */ 42 | #define USE(WTF_FEATURE) (defined WTF_USE_##WTF_FEATURE && WTF_USE_##WTF_FEATURE) 43 | /* ENABLE() - turn on a specific feature of WebKit */ 44 | #define ENABLE(WTF_FEATURE) (defined ENABLE_##WTF_FEATURE && ENABLE_##WTF_FEATURE) 45 | 46 | /* ==== OS() - underlying operating system; only to be used for mandated low-level services like 47 | virtual memory, not to choose a GUI toolkit ==== */ 48 | 49 | /* OS(ANDROID) - Android */ 50 | #ifdef ANDROID 51 | #define WTF_OS_ANDROID 1 52 | /* OS(MACOSX) - Any Darwin-based OS, including Mac OS X and iPhone OS */ 53 | #elif defined(__APPLE__) 54 | #define WTF_OS_MACOSX 1 55 | /* OS(FREEBSD) - FreeBSD */ 56 | #elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__FreeBSD_kernel__) 57 | #define WTF_OS_FREEBSD 1 58 | /* OS(LINUX) - Linux */ 59 | #elif defined(__linux__) 60 | #define WTF_OS_LINUX 1 61 | /* OS(OPENBSD) - OpenBSD */ 62 | #elif defined(__OpenBSD__) 63 | #define WTF_OS_OPENBSD 1 64 | /* OS(WIN) - Any version of Windows */ 65 | #elif defined(WIN32) || defined(_WIN32) 66 | #define WTF_OS_WIN 1 67 | #endif 68 | 69 | /* OS(POSIX) - Any Unix-like system */ 70 | #if OS(ANDROID) \ 71 | || OS(MACOSX) \ 72 | || OS(FREEBSD) \ 73 | || OS(LINUX) \ 74 | || OS(OPENBSD) \ 75 | || defined(unix) \ 76 | || defined(__unix) \ 77 | || defined(__unix__) 78 | #define WTF_OS_POSIX 1 79 | #endif 80 | 81 | /* There is an assumption in the project that either OS(WIN) or OS(POSIX) is set. */ 82 | #if !OS(WIN) && !OS(POSIX) 83 | #error Either OS(WIN) or OS(POSIX) needs to be set. 84 | #endif 85 | 86 | /* Operating environments */ 87 | 88 | #if OS(ANDROID) 89 | #define WTF_USE_LOW_QUALITY_IMAGE_INTERPOLATION 1 90 | #else 91 | #define WTF_USE_ICCJPEG 1 92 | #define WTF_USE_QCMSLIB 1 93 | #endif 94 | 95 | #if OS(MACOSX) 96 | #define WTF_USE_CF 1 97 | #endif /* OS(MACOSX) */ 98 | 99 | #if OS(POSIX) 100 | #define HAVE_SIGNAL_H 1 101 | #define HAVE_SYS_TIME_H 1 102 | #define WTF_USE_PTHREADS 1 103 | #endif /* OS(POSIX) */ 104 | 105 | #if !OS(WIN) && !OS(ANDROID) 106 | #define HAVE_TM_GMTOFF 1 107 | #define HAVE_TM_ZONE 1 108 | #define HAVE_TIMEGM 1 109 | #endif 110 | 111 | #if OS(MACOSX) 112 | #define WTF_USE_NEW_THEME 1 113 | #endif /* OS(MACOSX) */ 114 | 115 | #if OS(WIN) 116 | 117 | // If we don't define these, they get defined in windef.h. 118 | // We want to use std::min and std::max. 119 | #ifndef max 120 | #define max max 121 | #endif 122 | #ifndef min 123 | #define min min 124 | #endif 125 | 126 | #endif /* OS(WIN) */ 127 | 128 | #ifdef __cplusplus 129 | 130 | // These undefs match up with defines in build/mac/Prefix.h for Mac OS X. 131 | // Helps us catch if anyone uses new or delete by accident in code and doesn't include "config.h". 132 | #undef new 133 | #undef delete 134 | #include 135 | #include 136 | 137 | #endif 138 | 139 | // Adopted from base/compiler_specific.h where you can find a detailed explanation. 140 | #if COMPILER(MSVC) 141 | #define STATIC_CONST_MEMBER_DEFINITION __declspec(selectany) 142 | #else 143 | #define STATIC_CONST_MEMBER_DEFINITION 144 | #endif 145 | -------------------------------------------------------------------------------- /tests/linear_overflow.cpp: -------------------------------------------------------------------------------- 1 | // This is an example of a linear heap overflow 2 | // that should get caught by a security assert 3 | #include 4 | #include 5 | #include "../PartitionAlloc.h" 6 | 7 | // This program should result in the following ASSERT 8 | // ASSERTION FAILED: *cookiePtr == root->kCookieValue[i] 9 | 10 | #define BUFFER_SIZE 128 11 | 12 | void run_test() { 13 | void *gp = new_generic_partition(); 14 | void *p = generic_partition_alloc(gp, BUFFER_SIZE); 15 | ASSERT(p); 16 | memset(p, 0x41, BUFFER_SIZE*2); 17 | generic_partition_free(gp, p); 18 | delete_generic_partition(gp); 19 | } 20 | 21 | int main(int argc, char *argv[]) { 22 | // Initialize the C API by calling _init() which will 23 | // make sure all generic partitions are initialized 24 | partitionalloc_init(); 25 | 26 | // Run all tests 27 | run_test(); 28 | 29 | // Shutdown all generic partitions 30 | partitionalloc_shutdown(); 31 | 32 | return 0; 33 | } 34 | -------------------------------------------------------------------------------- /tests/pa_test.cpp: -------------------------------------------------------------------------------- 1 | // This serves as an example of how to use the various 2 | // C/C++ APIs that ship with Hardened PartitionAlloc 3 | #include 4 | #include 5 | #include 6 | #include "../PartitionAlloc.h" 7 | 8 | class MyClass : public PartitionBackedBase { 9 | public: 10 | MyClass() { 11 | ptr = NULL; 12 | idx = 0; 13 | } 14 | 15 | ~MyClass() { } 16 | 17 | void setPtr(char *s) { 18 | ptr = s; 19 | } 20 | char *getPtr() { 21 | return ptr; 22 | } 23 | 24 | void setIdx(int i) { 25 | idx = i; 26 | } 27 | 28 | int getIdx() { 29 | return idx; 30 | } 31 | 32 | private: 33 | char *ptr; 34 | int idx; 35 | }; 36 | 37 | typedef struct MyClassProxy { 38 | MyClassProxy() { 39 | m = new MyClass(); 40 | } 41 | 42 | ~MyClassProxy() { 43 | delete m; 44 | } 45 | 46 | MyClass *m; 47 | 48 | MyClass *operator->() { 49 | check_partition_pointer(m); 50 | return m; 51 | } 52 | 53 | } MyClassProxy; 54 | 55 | 56 | void run_test() { 57 | // PartitionAlloc API test with global root 58 | PartitionAllocatorGeneric my_partition; 59 | my_partition.init(); 60 | void *p = partitionAllocGeneric(my_partition.root(), 16); 61 | partitionFreeGeneric(my_partition.root(), p); 62 | my_partition.shutdown(); 63 | 64 | for(int i = 0; i < 512; i++) { 65 | p = partition_malloc_sz(64); 66 | ASSERT(p); 67 | partition_free_sz(p); 68 | } 69 | 70 | for(int i = 0; i < 512; i++) { 71 | p = partition_malloc_sz(128); 72 | ASSERT(p); 73 | partition_free_sz(p); 74 | } 75 | 76 | for(int i = 0; i < 512; i++) { 77 | p = partition_malloc_sz(256); 78 | ASSERT(p); 79 | partition_free_sz(p); 80 | } 81 | 82 | for(int i = 0; i < 32; i++) { 83 | p = partition_malloc_sz(512); 84 | ASSERT(p); 85 | partition_free_sz(p); 86 | } 87 | 88 | p = partition_malloc_string(128); 89 | ASSERT(p); 90 | check_partition_pointer(p); 91 | p = partition_realloc_string(p, 128); 92 | ASSERT(p); 93 | partition_free_string(p); 94 | 95 | p = partition_malloc(512); 96 | ASSERT(p); 97 | p = partition_realloc(p, 550); 98 | ASSERT(p); 99 | partition_free(p); 100 | 101 | // Create a new MyClass which inherits from PartitionBackedBase 102 | // which overloads the new operator 103 | MyClass *mc = new MyClass(); 104 | ASSERT(mc); 105 | mc->setIdx(1234); 106 | check_partition_pointer(mc); 107 | delete mc; 108 | 109 | void *gp = new_generic_partition(); 110 | p = generic_partition_alloc(gp, 128); 111 | ASSERT(p); 112 | 113 | // Create a proxy and call a method, which 114 | // will trigger a call to check_partition_pointer 115 | MyClassProxy j; 116 | j->setIdx(100); 117 | 118 | p = generic_partition_realloc(gp, p, 256); 119 | ASSERT(p); 120 | generic_partition_free(gp, p); 121 | delete_generic_partition(gp); 122 | } 123 | 124 | int main(int argc, char *argv[]) { 125 | // Initialize the C API by calling _init() which will 126 | // make sure all generic partitions are initialized 127 | partitionalloc_init(); 128 | 129 | // Run all tests 130 | run_test(); 131 | 132 | // Shutdown all generic partitions 133 | partitionalloc_shutdown(); 134 | 135 | return 0; 136 | } 137 | -------------------------------------------------------------------------------- /tests/pointer_check.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "../PartitionAlloc.h" 5 | 6 | #define BUFFER_SIZE 128 7 | 8 | void run_test() { 9 | void *gp = new_generic_partition(); 10 | void *p = generic_partition_alloc(gp, BUFFER_SIZE); 11 | ASSERT(p); 12 | // Should return 0 13 | int ret = check_partition_pointer(p); 14 | ASSERT(!ret); 15 | 16 | char *d = (char *) malloc(128); 17 | // Should assert and crash 18 | ret = check_partition_pointer(d); 19 | ASSERT(!ret); 20 | free(d); 21 | 22 | generic_partition_free(gp, p); 23 | delete_generic_partition(gp); 24 | } 25 | 26 | int main(int argc, char *argv[]) { 27 | // Initialize the C API by calling _init() which will 28 | // make sure all generic partitions are initialized 29 | partitionalloc_init(); 30 | 31 | // Run all tests 32 | run_test(); 33 | 34 | // Shutdown all generic partitions 35 | partitionalloc_shutdown(); 36 | 37 | return 0; 38 | } 39 | --------------------------------------------------------------------------------