├── .gitignore ├── LICENSE ├── README.md ├── cmake ├── BuildSettings.cmake └── iOS.cmake ├── common ├── CMakeLists.txt ├── autoresetevent.h ├── autoreseteventcondvar.h ├── benaphore.h ├── bitfield.h ├── diningphilosophers.h ├── inmemorylogger.cpp ├── inmemorylogger.h ├── rwlock.h └── sema.h └── tests ├── CMakeLists.txt ├── autoreseteventtester.cpp ├── benaphoretester.cpp ├── diningphilosophertester.cpp ├── main.cpp ├── recursivebenaphoretester.cpp ├── rwlocktester.cpp └── simplerwlocktester.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | *~ 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 Jeff Preshing 2 | 3 | This software is provided 'as-is', without any express or implied 4 | warranty. In no event will the authors be held liable for any damages 5 | arising from the use of this software. 6 | 7 | Permission is granted to anyone to use this software for any purpose, 8 | including commercial applications, and to alter it and redistribute it 9 | freely, subject to the following restrictions: 10 | 11 | 1. The origin of this software must not be misrepresented; you must not 12 | claim that you wrote the original software. If you use this software 13 | in a product, an acknowledgement in the product documentation would be 14 | appreciated but is not required. 15 | 2. Altered source versions must be plainly marked as such, and must not be 16 | misrepresented as being the original software. 17 | 3. This notice may not be removed or altered from any source distribution. 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Various synchronization primitives for multithreaded applications in C++11. 2 | 3 | Used in the blog post, [Semaphores are Surprisingly Versatile](http://preshing.com/20150316/semaphores-are-surprisingly-versatile). 4 | 5 | Code is released under the [zlib license](http://en.wikipedia.org/wiki/Zlib_License). See the `LICENSE` file. 6 | 7 | ## How to Build the Tests 8 | 9 | First, you must generate the projects using [CMake](http://www.cmake.org/). Open a command prompt in the `tests` folder and do the following. 10 | 11 | mkdir build 12 | cd build 13 | cmake .. 14 | 15 | `cmake` takes an optional `-G` argument to specify which project generator to use. For example, the following command will use the Visual Studio 2013 generator. A complete list of available generators can be found by running `cmake` with no arguments. 16 | 17 | cmake -G "Visual Studio 12 2013" .. 18 | 19 | On a Unix-like OS, to generate a makefile that builds the release configuration: 20 | 21 | cmake -DCMAKE_BUILD_TYPE=Release -G "Unix Makefiles" .. 22 | 23 | To generate projects for iOS devices, use the following. 24 | 25 | cmake -DCMAKE_TOOLCHAIN_FILE=../../cmake/iOS.cmake -G "Xcode" .. 26 | 27 | To build the project, simply use the generated project files as you would normally. On some platforms, you can use CMake to perform the build step, too. For example, on Windows, you can use the command: 28 | 29 | cmake --build . --config Release 30 | -------------------------------------------------------------------------------- /cmake/BuildSettings.cmake: -------------------------------------------------------------------------------- 1 | # Enable debug info in Release. 2 | if(${MSVC}) 3 | set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /Zi") 4 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Zi") 5 | set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /debug") 6 | else() 7 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") 8 | if(NOT ${CYGWIN}) # Don't specify -pthread on Cygwin 9 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") 10 | endif() 11 | set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -g") 12 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -g") 13 | endif() 14 | 15 | if(${IOS}) 16 | set(CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphoneos;-iphonesimulator") 17 | set_target_properties(${PROJECT_NAME} PROPERTIES XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "iPhone Developer") 18 | endif() 19 | -------------------------------------------------------------------------------- /cmake/iOS.cmake: -------------------------------------------------------------------------------- 1 | # This file is based off of the Platform/Darwin.cmake and Platform/UnixPaths.cmake 2 | # files which are included with CMake 2.8.4 3 | # It has been altered for iOS development 4 | 5 | # Options: 6 | # 7 | # IOS_PLATFORM = OS (default) or SIMULATOR 8 | # This decides if SDKS will be selected from the iPhoneOS.platform or iPhoneSimulator.platform folders 9 | # OS - the default, used to build for iPhone and iPad physical devices, which have an arm arch. 10 | # SIMULATOR - used to build for the Simulator platforms, which have an x86 arch. 11 | # 12 | # CMAKE_IOS_DEVELOPER_ROOT = automatic(default) or /path/to/platform/Developer folder 13 | # By default this location is automatcially chosen based on the IOS_PLATFORM value above. 14 | # If set manually, it will override the default location and force the user of a particular Developer Platform 15 | # 16 | # CMAKE_IOS_SDK_ROOT = automatic(default) or /path/to/platform/Developer/SDKs/SDK folder 17 | # By default this location is automatcially chosen based on the CMAKE_IOS_DEVELOPER_ROOT value. 18 | # In this case it will always be the most up-to-date SDK found in the CMAKE_IOS_DEVELOPER_ROOT path. 19 | # If set manually, this will force the use of a specific SDK version 20 | 21 | # Macros: 22 | # 23 | # set_xcode_property (TARGET XCODE_PROPERTY XCODE_VALUE) 24 | # A convenience macro for setting xcode specific properties on targets 25 | # example: set_xcode_property (myioslib IPHONEOS_DEPLOYMENT_TARGET "3.1") 26 | # 27 | # find_host_package (PROGRAM ARGS) 28 | # A macro used to find executable programs on the host system, not within the iOS environment. 29 | # Thanks to the android-cmake project for providing the command 30 | 31 | # Standard settings 32 | set (CMAKE_SYSTEM_NAME Darwin) 33 | set (CMAKE_SYSTEM_VERSION 1) 34 | set (UNIX True) 35 | set (APPLE True) 36 | set (IOS True) 37 | 38 | # Required as of cmake 2.8.10 39 | set (CMAKE_OSX_DEPLOYMENT_TARGET "" CACHE STRING "Force unset of the deployment target for iOS" FORCE) 40 | 41 | # Determine the cmake host system version so we know where to find the iOS SDKs 42 | find_program (CMAKE_UNAME uname /bin /usr/bin /usr/local/bin) 43 | if (CMAKE_UNAME) 44 | exec_program(uname ARGS -r OUTPUT_VARIABLE CMAKE_HOST_SYSTEM_VERSION) 45 | string (REGEX REPLACE "^([0-9]+)\\.([0-9]+).*$" "\\1" DARWIN_MAJOR_VERSION "${CMAKE_HOST_SYSTEM_VERSION}") 46 | endif (CMAKE_UNAME) 47 | 48 | # Force the compilers to gcc for iOS 49 | include (CMakeForceCompiler) 50 | CMAKE_FORCE_C_COMPILER (/usr/bin/clang Apple) 51 | CMAKE_FORCE_CXX_COMPILER (/usr/bin/clang++ Apple) 52 | set(CMAKE_AR ar CACHE FILEPATH "" FORCE) 53 | 54 | # Skip the platform compiler checks for cross compiling 55 | set (CMAKE_CXX_COMPILER_WORKS TRUE) 56 | set (CMAKE_C_COMPILER_WORKS TRUE) 57 | 58 | # All iOS/Darwin specific settings - some may be redundant 59 | set (CMAKE_SHARED_LIBRARY_PREFIX "lib") 60 | set (CMAKE_SHARED_LIBRARY_SUFFIX ".dylib") 61 | set (CMAKE_SHARED_MODULE_PREFIX "lib") 62 | set (CMAKE_SHARED_MODULE_SUFFIX ".so") 63 | set (CMAKE_MODULE_EXISTS 1) 64 | set (CMAKE_DL_LIBS "") 65 | 66 | set (CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG "-compatibility_version ") 67 | set (CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ") 68 | set (CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}") 69 | set (CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}") 70 | 71 | # Hidden visibilty is required for cxx on iOS 72 | set (CMAKE_C_FLAGS_INIT "") 73 | set (CMAKE_CXX_FLAGS_INIT "-fvisibility=hidden -fvisibility-inlines-hidden -isysroot ${CMAKE_OSX_SYSROOT}") 74 | 75 | set (CMAKE_C_LINK_FLAGS "-Wl,-search_paths_first ${CMAKE_C_LINK_FLAGS}") 76 | set (CMAKE_CXX_LINK_FLAGS "-Wl,-search_paths_first ${CMAKE_CXX_LINK_FLAGS}") 77 | 78 | set (CMAKE_PLATFORM_HAS_INSTALLNAME 1) 79 | set (CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib -headerpad_max_install_names") 80 | set (CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle -headerpad_max_install_names") 81 | set (CMAKE_SHARED_MODULE_LOADER_C_FLAG "-Wl,-bundle_loader,") 82 | set (CMAKE_SHARED_MODULE_LOADER_CXX_FLAG "-Wl,-bundle_loader,") 83 | set (CMAKE_FIND_LIBRARY_SUFFIXES ".dylib" ".so" ".a") 84 | 85 | # hack: if a new cmake (which uses CMAKE_INSTALL_NAME_TOOL) runs on an old build tree 86 | # (where install_name_tool was hardcoded) and where CMAKE_INSTALL_NAME_TOOL isn't in the cache 87 | # and still cmake didn't fail in CMakeFindBinUtils.cmake (because it isn't rerun) 88 | # hardcode CMAKE_INSTALL_NAME_TOOL here to install_name_tool, so it behaves as it did before, Alex 89 | if (NOT DEFINED CMAKE_INSTALL_NAME_TOOL) 90 | find_program(CMAKE_INSTALL_NAME_TOOL install_name_tool) 91 | endif (NOT DEFINED CMAKE_INSTALL_NAME_TOOL) 92 | 93 | # Setup iOS platform unless specified manually with IOS_PLATFORM 94 | if (NOT DEFINED IOS_PLATFORM) 95 | set (IOS_PLATFORM "OS") 96 | endif (NOT DEFINED IOS_PLATFORM) 97 | set (IOS_PLATFORM ${IOS_PLATFORM} CACHE STRING "Type of iOS Platform") 98 | 99 | # Check the platform selection and setup for developer root 100 | if (${IOS_PLATFORM} STREQUAL "OS") 101 | set (IOS_PLATFORM_LOCATION "iPhoneOS.platform") 102 | 103 | # This causes the installers to properly locate the output libraries 104 | set (CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphoneos") 105 | elseif (${IOS_PLATFORM} STREQUAL "SIMULATOR") 106 | set (IOS_PLATFORM_LOCATION "iPhoneSimulator.platform") 107 | 108 | # This causes the installers to properly locate the output libraries 109 | set (CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphonesimulator") 110 | else (${IOS_PLATFORM} STREQUAL "OS") 111 | message (FATAL_ERROR "Unsupported IOS_PLATFORM value selected. Please choose OS or SIMULATOR") 112 | endif (${IOS_PLATFORM} STREQUAL "OS") 113 | 114 | # Setup iOS developer location unless specified manually with CMAKE_IOS_DEVELOPER_ROOT 115 | # Note Xcode 4.3 changed the installation location, choose the most recent one available 116 | set (XCODE_POST_43_ROOT "/Applications/Xcode.app/Contents/Developer/Platforms/${IOS_PLATFORM_LOCATION}/Developer") 117 | set (XCODE_PRE_43_ROOT "/Developer/Platforms/${IOS_PLATFORM_LOCATION}/Developer") 118 | if (NOT DEFINED CMAKE_IOS_DEVELOPER_ROOT) 119 | if (EXISTS ${XCODE_POST_43_ROOT}) 120 | set (CMAKE_IOS_DEVELOPER_ROOT ${XCODE_POST_43_ROOT}) 121 | elseif(EXISTS ${XCODE_PRE_43_ROOT}) 122 | set (CMAKE_IOS_DEVELOPER_ROOT ${XCODE_PRE_43_ROOT}) 123 | endif (EXISTS ${XCODE_POST_43_ROOT}) 124 | endif (NOT DEFINED CMAKE_IOS_DEVELOPER_ROOT) 125 | set (CMAKE_IOS_DEVELOPER_ROOT ${CMAKE_IOS_DEVELOPER_ROOT} CACHE PATH "Location of iOS Platform") 126 | 127 | # Find and use the most recent iOS sdk unless specified manually with CMAKE_IOS_SDK_ROOT 128 | if (NOT DEFINED CMAKE_IOS_SDK_ROOT) 129 | file (GLOB _CMAKE_IOS_SDKS "${CMAKE_IOS_DEVELOPER_ROOT}/SDKs/*") 130 | if (_CMAKE_IOS_SDKS) 131 | list (SORT _CMAKE_IOS_SDKS) 132 | list (REVERSE _CMAKE_IOS_SDKS) 133 | list (GET _CMAKE_IOS_SDKS 0 CMAKE_IOS_SDK_ROOT) 134 | else (_CMAKE_IOS_SDKS) 135 | message (FATAL_ERROR "No iOS SDK's found in default search path ${CMAKE_IOS_DEVELOPER_ROOT}. Manually set CMAKE_IOS_SDK_ROOT or install the iOS SDK.") 136 | endif (_CMAKE_IOS_SDKS) 137 | message (STATUS "Toolchain using default iOS SDK: ${CMAKE_IOS_SDK_ROOT}") 138 | endif (NOT DEFINED CMAKE_IOS_SDK_ROOT) 139 | set (CMAKE_IOS_SDK_ROOT ${CMAKE_IOS_SDK_ROOT} CACHE PATH "Location of the selected iOS SDK") 140 | 141 | # Set the sysroot default to the most recent SDK 142 | set (CMAKE_OSX_SYSROOT ${CMAKE_IOS_SDK_ROOT} CACHE PATH "Sysroot used for iOS support") 143 | 144 | # set the architecture for iOS 145 | # NOTE: Currently both ARCHS_STANDARD_32_BIT and ARCHS_UNIVERSAL_IPHONE_OS set armv7 only, so set both manually 146 | if (${IOS_PLATFORM} STREQUAL "OS") 147 | set (IOS_ARCH armv6 armv7) 148 | else (${IOS_PLATFORM} STREQUAL "OS") 149 | set (IOS_ARCH i386) 150 | endif (${IOS_PLATFORM} STREQUAL "OS") 151 | 152 | set (CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE string "Build architecture for iOS") 153 | 154 | # Set the find root to the iOS developer roots and to user defined paths 155 | set (CMAKE_FIND_ROOT_PATH ${CMAKE_IOS_DEVELOPER_ROOT} ${CMAKE_IOS_SDK_ROOT} ${CMAKE_PREFIX_PATH} CACHE string "iOS find search path root") 156 | 157 | # default to searching for frameworks first 158 | set (CMAKE_FIND_FRAMEWORK FIRST) 159 | 160 | # set up the default search directories for frameworks 161 | set (CMAKE_SYSTEM_FRAMEWORK_PATH 162 | ${CMAKE_IOS_SDK_ROOT}/System/Library/Frameworks 163 | ${CMAKE_IOS_SDK_ROOT}/System/Library/PrivateFrameworks 164 | ${CMAKE_IOS_SDK_ROOT}/Developer/Library/Frameworks 165 | ) 166 | 167 | # only search the iOS sdks, not the remainder of the host filesystem 168 | set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY) 169 | set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 170 | set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 171 | 172 | 173 | # This little macro lets you set any XCode specific property 174 | macro (set_xcode_property TARGET XCODE_PROPERTY XCODE_VALUE) 175 | set_property (TARGET ${TARGET} PROPERTY XCODE_ATTRIBUTE_${XCODE_PROPERTY} ${XCODE_VALUE}) 176 | endmacro (set_xcode_property) 177 | 178 | 179 | # This macro lets you find executable programs on the host system 180 | macro (find_host_package) 181 | set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 182 | set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER) 183 | set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER) 184 | set (IOS FALSE) 185 | 186 | find_package(${ARGN}) 187 | 188 | set (IOS TRUE) 189 | set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY) 190 | set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 191 | set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 192 | endmacro (find_host_package) 193 | -------------------------------------------------------------------------------- /common/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project(Common) 2 | 3 | file(GLOB FILES *.h *.cpp) 4 | add_library(Common ${FILES}) 5 | include(../cmake/BuildSettings.cmake) 6 | -------------------------------------------------------------------------------- /common/autoresetevent.h: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #ifndef __CPP11OM_AUTO_RESET_EVENT_H__ 7 | #define __CPP11OM_AUTO_RESET_EVENT_H__ 8 | 9 | #include 10 | #include 11 | #include "sema.h" 12 | 13 | 14 | //--------------------------------------------------------- 15 | // AutoResetEvent 16 | //--------------------------------------------------------- 17 | class AutoResetEvent 18 | { 19 | private: 20 | // m_status == 1: Event object is signaled. 21 | // m_status == 0: Event object is reset and no threads are waiting. 22 | // m_status == -N: Event object is reset and N threads are waiting. 23 | std::atomic m_status; 24 | DefaultSemaphoreType m_sema; 25 | 26 | public: 27 | AutoResetEvent(int initialStatus = 0) : m_status(initialStatus) 28 | { 29 | assert(initialStatus >= 0 && initialStatus <= 1); 30 | } 31 | 32 | void signal() 33 | { 34 | int oldStatus = m_status.load(std::memory_order_relaxed); 35 | for (;;) // Increment m_status atomically via CAS loop. 36 | { 37 | assert(oldStatus <= 1); 38 | if (oldStatus == 1) 39 | return; // Event object is already signaled. 40 | int newStatus = oldStatus + 1; 41 | if (m_status.compare_exchange_weak(oldStatus, newStatus, std::memory_order_release, std::memory_order_relaxed)) 42 | break; 43 | // The compare-exchange failed, likely because another thread changed m_status. 44 | // oldStatus has been updated. Retry the CAS loop. 45 | } 46 | if (oldStatus < 0) 47 | m_sema.signal(); // Release one waiting thread. 48 | } 49 | 50 | void wait() 51 | { 52 | int oldStatus = m_status.fetch_sub(1, std::memory_order_acquire); 53 | assert(oldStatus <= 1); 54 | if (oldStatus < 1) 55 | { 56 | m_sema.wait(); 57 | } 58 | } 59 | }; 60 | 61 | 62 | #endif // __CPP11OM_AUTO_RESET_EVENT_H__ 63 | -------------------------------------------------------------------------------- /common/autoreseteventcondvar.h: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/modern-cpp-threading/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #ifndef __MCPPT_AUTO_RESET_EVENT_COND_VAR_H__ 7 | #define __MCPPT_AUTO_RESET_EVENT_COND_VAR_H__ 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | 14 | //--------------------------------------------------------- 15 | // AutoResetEventCondVar 16 | //--------------------------------------------------------- 17 | class AutoResetEventCondVar 18 | { 19 | private: 20 | // m_status == 1: Event object is signaled. 21 | // m_status == 0: Event object is reset and no threads are waiting. 22 | // m_status == -N: Event object is reset and N threads are waiting. 23 | std::mutex m_mutex; 24 | int m_status; 25 | std::condition_variable m_condition; 26 | 27 | public: 28 | AutoResetEventCondVar(int initialStatus = 0) : m_status(initialStatus) 29 | { 30 | assert(initialStatus >= 0 && initialStatus <= 1); 31 | } 32 | 33 | void signal() 34 | { 35 | // Increment m_status atomically via critical section. 36 | std::lock_guard lock(m_mutex); 37 | int oldStatus = m_status; 38 | if (oldStatus == 1) 39 | return; // Event object is already signaled. 40 | m_status++; 41 | if (oldStatus < 0) 42 | m_condition.notify_one(); // Release one waiting thread. 43 | } 44 | 45 | void wait() 46 | { 47 | std::unique_lock lock(m_mutex); 48 | int oldStatus = m_status; 49 | m_status--; 50 | assert(oldStatus <= 1); 51 | if (oldStatus < 1) 52 | { 53 | m_condition.wait(lock); 54 | } 55 | } 56 | }; 57 | 58 | 59 | #endif // __MCPPT_AUTO_RESET_EVENT_COND_VAR_H__ 60 | -------------------------------------------------------------------------------- /common/benaphore.h: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #ifndef __CPP11OM_BENAPHORE_H__ 7 | #define __CPP11OM_BENAPHORE_H__ 8 | 9 | #include 10 | #include 11 | #include 12 | #include "sema.h" 13 | 14 | 15 | //--------------------------------------------------------- 16 | // NonRecursiveBenaphore 17 | //--------------------------------------------------------- 18 | class NonRecursiveBenaphore 19 | { 20 | private: 21 | std::atomic m_contentionCount; 22 | DefaultSemaphoreType m_sema; 23 | 24 | public: 25 | NonRecursiveBenaphore() : m_contentionCount(0) {} 26 | 27 | void lock() 28 | { 29 | if (m_contentionCount.fetch_add(1, std::memory_order_acquire) > 0) 30 | { 31 | m_sema.wait(); 32 | } 33 | } 34 | 35 | bool tryLock() 36 | { 37 | if (m_contentionCount.load(std::memory_order_relaxed) != 0) 38 | return false; 39 | int expected = 0; 40 | return m_contentionCount.compare_exchange_strong(expected, 1, std::memory_order_acquire); 41 | } 42 | 43 | void unlock() 44 | { 45 | int oldCount = m_contentionCount.fetch_sub(1, std::memory_order_release); 46 | assert(oldCount > 0); 47 | if (oldCount > 1) 48 | { 49 | m_sema.signal(); 50 | } 51 | } 52 | }; 53 | 54 | 55 | //--------------------------------------------------------- 56 | // RecursiveBenaphore 57 | //--------------------------------------------------------- 58 | class RecursiveBenaphore 59 | { 60 | private: 61 | std::atomic m_contentionCount; 62 | std::atomic m_owner; 63 | int m_recursion; 64 | DefaultSemaphoreType m_sema; 65 | 66 | public: 67 | RecursiveBenaphore() 68 | : m_contentionCount(0) 69 | // Apple LLVM 6.0 (in Xcode 6.1) refuses to initialize m_owner from a std::thread::id. 70 | // "error: no viable conversion from 'std::__1::__thread_id' to '_Atomic(std::__1::__thread_id)'" 71 | // Using atomic_init (below) in that case. 72 | #if !defined(__llvm__) 73 | , m_owner(std::thread::id()) 74 | #endif 75 | , m_recursion(0) 76 | { 77 | // GCC 4.7.2's libstdc++-v3 doesn't implement atomic_init. 78 | // "warning: inline function 'void std::atomic_init(std::atomic<_ITp>*, _ITp) [with _ITp = std::thread::id]' used but never defined [enabled by default]" 79 | // Using the constructor (above) in that case. 80 | #if defined(__llvm__) 81 | std::atomic_init(&m_owner, std::thread::id()); 82 | #endif 83 | 84 | // If this assert fails on your system, you'll have to replace std::thread::id with a 85 | // more compact, platform-specific thread ID, or just comment the assert and live with 86 | // the extra overhead. 87 | assert(m_owner.is_lock_free()); 88 | } 89 | 90 | void lock() 91 | { 92 | std::thread::id tid = std::this_thread::get_id(); 93 | if (m_contentionCount.fetch_add(1, std::memory_order_acquire) > 0) 94 | { 95 | if (tid != m_owner.load(std::memory_order_relaxed)) 96 | m_sema.wait(); 97 | } 98 | //--- We are now inside the lock --- 99 | m_owner.store(tid, std::memory_order_relaxed); 100 | m_recursion++; 101 | } 102 | 103 | bool tryLock() 104 | { 105 | std::thread::id tid = std::this_thread::get_id(); 106 | if (m_owner.load(std::memory_order_relaxed) == tid) 107 | { 108 | // Already inside the lock 109 | m_contentionCount.fetch_add(1, std::memory_order_relaxed); 110 | } 111 | else 112 | { 113 | if (m_contentionCount.load(std::memory_order_relaxed) != 0) 114 | return false; 115 | int expected = 0; 116 | if (!m_contentionCount.compare_exchange_strong(expected, 1, std::memory_order_acquire)) 117 | return false; 118 | //--- We are now inside the lock --- 119 | m_owner.store(tid, std::memory_order_relaxed); 120 | } 121 | m_recursion++; 122 | return true; 123 | } 124 | 125 | void unlock() 126 | { 127 | assert(std::this_thread::get_id() == m_owner.load(std::memory_order_relaxed)); 128 | int recur = --m_recursion; 129 | if (recur == 0) 130 | m_owner.store(std::thread::id(), std::memory_order_relaxed); 131 | if (m_contentionCount.fetch_sub(1, std::memory_order_release) > 1) 132 | { 133 | if (recur == 0) 134 | m_sema.signal(); 135 | } 136 | //--- We are now outside the lock --- 137 | } 138 | }; 139 | 140 | 141 | #endif // __CPP11OM_BENAPHORE_H__ 142 | -------------------------------------------------------------------------------- /common/bitfield.h: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #ifndef __CPP11OM_BITFIELD_H__ 7 | #define __CPP11OM_BITFIELD_H__ 8 | 9 | #include 10 | 11 | 12 | //--------------------------------------------------------- 13 | // BitFieldMember<>: Used internally by ADD_BITFIELD_MEMBER macro. 14 | // All members are public to simplify compliance with sections 9.0.7 and 15 | // 9.5.1 of the C++11 standard, thereby avoiding undefined behavior. 16 | //--------------------------------------------------------- 17 | template 18 | struct BitFieldMember 19 | { 20 | T value; 21 | 22 | static_assert(Offset + Bits <= (int) sizeof(T) * 8, "Member exceeds bitfield boundaries"); 23 | static_assert(Bits < (int) sizeof(T) * 8, "Can't fill entire bitfield with one member"); 24 | 25 | static const T Maximum = (T(1) << Bits) - 1; 26 | static const T Mask = Maximum << Offset; 27 | T maximum() const { return Maximum; } 28 | T one() const { return T(1) << Offset; } 29 | 30 | operator T() const 31 | { 32 | return (value >> Offset) & Maximum; 33 | } 34 | 35 | BitFieldMember& operator=(T v) 36 | { 37 | assert(v <= Maximum); // v must fit inside the bitfield member 38 | value = (value & ~Mask) | (v << Offset); 39 | return *this; 40 | } 41 | 42 | BitFieldMember& operator+=(T v) 43 | { 44 | assert(T(*this) + v <= Maximum); // result must fit inside the bitfield member 45 | value += v << Offset; 46 | return *this; 47 | } 48 | 49 | BitFieldMember& operator-=(T v) 50 | { 51 | assert(T(*this) >= v); // result must not underflow 52 | value -= v << Offset; 53 | return *this; 54 | } 55 | 56 | BitFieldMember& operator++() { return *this += 1; } 57 | BitFieldMember& operator++(int) { return *this += 1; } // postfix form 58 | BitFieldMember& operator--() { return *this -= 1; } 59 | BitFieldMember& operator--(int) { return *this -= 1; } // postfix form 60 | }; 61 | 62 | 63 | //--------------------------------------------------------- 64 | // BitFieldArray<>: Used internally by ADD_BITFIELD_ARRAY macro. 65 | // All members are public to simplify compliance with sections 9.0.7 and 66 | // 9.5.1 of the C++11 standard, thereby avoiding undefined behavior. 67 | //--------------------------------------------------------- 68 | template 69 | struct BitFieldArray 70 | { 71 | T value; 72 | 73 | static_assert(BaseOffset + BitsPerItem * NumItems <= (int) sizeof(T) * 8, "Array exceeds bitfield boundaries"); 74 | static_assert(BitsPerItem < (int) sizeof(T) * 8, "Can't fill entire bitfield with one array element"); 75 | 76 | static const T Maximum = (T(1) << BitsPerItem) - 1; 77 | T maximum() const { return Maximum; } 78 | int numItems() const { return NumItems; } 79 | 80 | class Element 81 | { 82 | private: 83 | T& value; 84 | int offset; 85 | 86 | public: 87 | Element(T& value, int offset) : value(value), offset(offset) {} 88 | T mask() const { return Maximum << offset; } 89 | 90 | operator T() const 91 | { 92 | return (value >> offset) & Maximum; 93 | } 94 | 95 | Element& operator=(T v) 96 | { 97 | assert(v <= Maximum); // v must fit inside the bitfield member 98 | value = (value & ~mask()) | (v << offset); 99 | return *this; 100 | } 101 | 102 | Element& operator+=(T v) 103 | { 104 | assert(T(*this) + v <= Maximum); // result must fit inside the bitfield member 105 | value += v << offset; 106 | return *this; 107 | } 108 | 109 | Element& operator-=(T v) 110 | { 111 | assert(T(*this) >= v); // result must not underflow 112 | value -= v << offset; 113 | return *this; 114 | } 115 | 116 | Element& operator++() { return *this += 1; } 117 | Element& operator++(int) { return *this += 1; } // postfix form 118 | Element& operator--() { return *this -= 1; } 119 | Element& operator--(int) { return *this -= 1; } // postfix form 120 | }; 121 | 122 | Element operator[](int i) 123 | { 124 | assert(i >= 0 && i < NumItems); // array index must be in range 125 | return Element(value, BaseOffset + BitsPerItem * i); 126 | } 127 | 128 | const Element operator[](int i) const 129 | { 130 | assert(i >= 0 && i < NumItems); // array index must be in range 131 | return Element(value, BaseOffset + BitsPerItem * i); 132 | } 133 | }; 134 | 135 | 136 | //--------------------------------------------------------- 137 | // Bitfield definition macros. 138 | // For usage examples, see RWLock and LockReducedDiningPhilosophers. 139 | // All members are public to simplify compliance with sections 9.0.7 and 140 | // 9.5.1 of the C++11 standard, thereby avoiding undefined behavior. 141 | //--------------------------------------------------------- 142 | #define BEGIN_BITFIELD_TYPE(typeName, T) \ 143 | union typeName \ 144 | { \ 145 | struct Wrapper { T value; }; \ 146 | Wrapper wrapper; \ 147 | typeName(T v = 0) { wrapper.value = v; } \ 148 | typeName& operator=(T v) { wrapper.value = v; return *this; } \ 149 | operator T&() { return wrapper.value; } \ 150 | operator T() const { return wrapper.value; } \ 151 | typedef T StorageType; 152 | 153 | #define ADD_BITFIELD_MEMBER(memberName, offset, bits) \ 154 | BitFieldMember memberName; 155 | 156 | #define ADD_BITFIELD_ARRAY(memberName, offset, bits, numItems) \ 157 | BitFieldArray memberName; 158 | 159 | #define END_BITFIELD_TYPE() \ 160 | }; 161 | 162 | 163 | #endif // __CPP11OM_BITFIELD_H__ 164 | 165 | -------------------------------------------------------------------------------- /common/diningphilosophers.h: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #ifndef __CPP11OM_DINING_PHILOSOPHERS_H__ 7 | #define __CPP11OM_DINING_PHILOSOPHERS_H__ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include "sema.h" 17 | #include "bitfield.h" 18 | 19 | 20 | //--------------------------------------------------------- 21 | // DiningPhiloHelpers 22 | //--------------------------------------------------------- 23 | namespace DiningPhiloHelpers 24 | { 25 | inline int left(int index, int numPhilos) 26 | { 27 | return (index > 0 ? index : numPhilos) - 1; 28 | } 29 | 30 | inline int right(int index, int numPhilos) 31 | { 32 | ++index; 33 | return index < numPhilos ? index : 0; 34 | } 35 | } 36 | 37 | 38 | //--------------------------------------------------------- 39 | // DiningPhilosophers 40 | //--------------------------------------------------------- 41 | class DiningPhilosophers 42 | { 43 | private: 44 | int m_numPhilos; 45 | 46 | // "Box office" using a mutex. 47 | // m_status keeps track of the status of each philosopher (thread). 48 | // 0: Philosopher is thinking 49 | // 1: Philosopher is eating 50 | // 2+: Philosopher is waiting and must not eat before his/her direct neighbors if they have a lower status. 51 | std::mutex m_mutex; 52 | std::vector m_status; 53 | 54 | // "Bouncers" 55 | // Can't use std::vector because DefaultSemaphoreType is not copiable/movable. 56 | std::unique_ptr m_sema; 57 | 58 | int left(int index) const { return DiningPhiloHelpers::left(index, m_numPhilos); } 59 | int right(int index) const { return DiningPhiloHelpers::right(index, m_numPhilos); } 60 | 61 | int neighbor(int index, int step) const 62 | { 63 | assert(step >= 0 && step < m_numPhilos); 64 | index += step; 65 | if (index >= m_numPhilos) 66 | index -= m_numPhilos; 67 | return index; 68 | } 69 | 70 | // We call tryAdjustStatus after a philosopher finishes eating. 71 | // It fans outward (in the direction of step), trying to decrement the status of each neighbor (to target). 72 | bool tryAdjustStatus(int philoIndex, int target, int step) 73 | { 74 | // Should not already have the target status. 75 | assert(m_status[philoIndex] != target); 76 | if (m_status[philoIndex] == target + 1) 77 | { 78 | // Decrementing this status will bring it to target. 79 | // Make sure the next neighbor doesn't prevent it. 80 | int n = neighbor(philoIndex, step); 81 | assert(m_status[n] != target + 1); // No two neighbors should have equal status > 0. 82 | if (m_status[n] != target) 83 | { 84 | // Decrement it. 85 | m_status[philoIndex] = target; 86 | // If neighbor's status is exactly 1 greater, continue visiting. 87 | if (m_status[n] > target) 88 | tryAdjustStatus(n, target + 1, step); 89 | return true; 90 | } 91 | } 92 | return false; 93 | } 94 | 95 | public: 96 | DiningPhilosophers(int numPhilos) : m_numPhilos(numPhilos) 97 | { 98 | m_status.resize(numPhilos); 99 | m_sema = std::unique_ptr(new DefaultSemaphoreType[numPhilos]); 100 | } 101 | 102 | void beginEating(int philoIndex) 103 | { 104 | int maxNeighborStatus; // Initialized inside lock 105 | 106 | { 107 | std::lock_guard lock(m_mutex); 108 | assert(m_status[philoIndex] == 0); // Must have been thinking 109 | // Establish order relative to direct neighbors. 110 | maxNeighborStatus = std::max(m_status[left(philoIndex)], m_status[right(philoIndex)]); 111 | m_status[philoIndex] = maxNeighborStatus + 1; 112 | // Sanity check. 113 | for (int i = 0; i < m_numPhilos; i++) 114 | assert(m_status[i] >= 0 && m_status[i] <= m_numPhilos); 115 | } 116 | 117 | if (maxNeighborStatus > 0) 118 | m_sema[philoIndex].wait(); // Neighbor has priority; must wait 119 | } 120 | 121 | void endEating(int philoIndex) 122 | { 123 | int stepFirst = 1; 124 | int firstNeighbor = neighbor(philoIndex, 1); 125 | int secondNeighbor = neighbor(philoIndex, m_numPhilos - 1); 126 | bool firstWillEat; // Initialized inside lock 127 | bool secondWillEat; // Initialized inside lock 128 | 129 | { 130 | std::lock_guard lock(m_mutex); 131 | assert(m_status[philoIndex] == 1); // Must have been eating 132 | m_status[philoIndex] = 0; 133 | // Choose which neighbor to visit first based on priority 134 | if (m_status[firstNeighbor] > m_status[secondNeighbor]) 135 | { 136 | std::swap(firstNeighbor, secondNeighbor); 137 | stepFirst = m_numPhilos - stepFirst; 138 | } 139 | // Adjust neighbor statuses. 140 | firstWillEat = tryAdjustStatus(firstNeighbor, 1, stepFirst); 141 | secondWillEat = tryAdjustStatus(secondNeighbor, 1, m_numPhilos - stepFirst); 142 | // Sanity check. 143 | for (int i = 0; i < m_numPhilos; i++) 144 | assert(m_status[i] >= 0 && m_status[i] <= m_numPhilos); 145 | } 146 | 147 | if (firstWillEat) 148 | m_sema[firstNeighbor].signal(); // Release waiting neighbor 149 | if (secondWillEat) 150 | m_sema[secondNeighbor].signal(); // Release waiting neighbor 151 | } 152 | }; 153 | 154 | 155 | //--------------------------------------------------------- 156 | // LockReducedDiningPhilosophers 157 | // Version of DiningPhilosophers with a lock-free box office. 158 | //--------------------------------------------------------- 159 | class LockReducedDiningPhilosophers 160 | { 161 | private: 162 | int m_numPhilos; 163 | 164 | // Lock-free "box office". 165 | // AllStatus::philos keeps track of the status of each philosopher (thread). 166 | // 0: Philosopher is thinking 167 | // 1: Philosopher is eating 168 | // 2+: Philosopher is waiting and must not eat before his/her direct neighbors if they have a lower status. 169 | typedef uint32_t IntType; 170 | BEGIN_BITFIELD_TYPE(AllStatus, IntType) 171 | ADD_BITFIELD_ARRAY(philos, 0, 4, 8) // 8 array elements, 4 bits each 172 | END_BITFIELD_TYPE() 173 | std::atomic m_allStatus; 174 | 175 | // "Bouncers" 176 | // Can't use std::vector because DefaultSemaphoreType is not copiable/movable. 177 | std::unique_ptr m_sema; 178 | 179 | int left(int index) const { return DiningPhiloHelpers::left(index, m_numPhilos); } 180 | int right(int index) const { return DiningPhiloHelpers::right(index, m_numPhilos); } 181 | 182 | int neighbor(int index, int step) const 183 | { 184 | assert(step >= 0 && step < m_numPhilos); 185 | index += step; 186 | if (index >= m_numPhilos) 187 | index -= m_numPhilos; 188 | return index; 189 | } 190 | 191 | // We call tryAdjustStatus after a philosopher finishes eating. 192 | // It fans outward (in the direction of step), trying to decrement the status of each neighbor (to target). 193 | bool tryAdjustStatus(AllStatus& allStatus, int philoIndex, int target, int step) const 194 | { 195 | // Should not already have the target status. 196 | assert(allStatus.philos[philoIndex] != target); 197 | if (allStatus.philos[philoIndex] == target + 1) 198 | { 199 | // Decrementing this status will bring it to target. 200 | // Make sure the next neighbor doesn't prevent it. 201 | int n = neighbor(philoIndex, step); 202 | assert(allStatus.philos[n] != target + 1); // No two neighbors should have equal status > 0. 203 | if (allStatus.philos[n] != target) 204 | { 205 | // Decrement it. 206 | allStatus.philos[philoIndex] = target; 207 | // If neighbor's status is exactly 1 greater, continue visiting. 208 | if (allStatus.philos[n] > IntType(target)) 209 | tryAdjustStatus(allStatus, n, target + 1, step); 210 | return true; 211 | } 212 | } 213 | return false; 214 | } 215 | 216 | public: 217 | LockReducedDiningPhilosophers(int numPhilos) 218 | : m_numPhilos(numPhilos) 219 | , m_allStatus(0) 220 | { 221 | assert(IntType(numPhilos) <= AllStatus().philos.maximum()); 222 | assert(numPhilos < AllStatus().philos.numItems()); 223 | m_sema = std::unique_ptr(new DefaultSemaphoreType[numPhilos]); 224 | } 225 | 226 | void beginEating(int philoIndex) 227 | { 228 | int maxNeighborStatus; // Initialized inside CAS loop. 229 | 230 | AllStatus oldStatus = m_allStatus.load(std::memory_order_relaxed); 231 | for (;;) // Begin CAS loop 232 | { 233 | assert(oldStatus.philos[philoIndex] == 0); // Must have been thinking 234 | // Establish order relative to direct neighbors. 235 | maxNeighborStatus = std::max(oldStatus.philos[left(philoIndex)], oldStatus.philos[right(philoIndex)]); 236 | AllStatus newStatus(oldStatus); 237 | newStatus.philos[philoIndex] = maxNeighborStatus + 1; 238 | // Sanity check. 239 | for (int i = 0; i < m_numPhilos; i++) 240 | assert(newStatus.philos[i] <= IntType(m_numPhilos)); 241 | // CAS until successful. On failure, oldStatus will be updated with the latest value. 242 | if (m_allStatus.compare_exchange_strong(oldStatus, newStatus, std::memory_order_relaxed)) 243 | break; 244 | } 245 | 246 | if (maxNeighborStatus > 0) 247 | m_sema[philoIndex].wait(); // Neighbor has priority; must wait 248 | } 249 | 250 | void endEating(int philoIndex) 251 | { 252 | int stepFirst = 1; 253 | int firstNeighbor = neighbor(philoIndex, 1); 254 | int secondNeighbor = neighbor(philoIndex, m_numPhilos - 1); 255 | bool firstWillEat; // Initialized inside CAS loop. 256 | bool secondWillEat; // Initialized inside CAS loop. 257 | 258 | AllStatus oldStatus = m_allStatus.load(std::memory_order_relaxed); 259 | for (;;) // Begin CAS loop 260 | { 261 | assert(oldStatus.philos[philoIndex] == 1); // Must have been eating 262 | AllStatus newStatus(oldStatus); 263 | newStatus.philos[philoIndex] = 0; 264 | // Choose which neighbor to visit first based on priority 265 | if (newStatus.philos[firstNeighbor] > newStatus.philos[secondNeighbor]) 266 | { 267 | std::swap(firstNeighbor, secondNeighbor); 268 | stepFirst = m_numPhilos - stepFirst; 269 | } 270 | // Adjust neighbor statuses. 271 | firstWillEat = tryAdjustStatus(newStatus, firstNeighbor, 1, stepFirst); 272 | secondWillEat = tryAdjustStatus(newStatus, secondNeighbor, 1, m_numPhilos - stepFirst); 273 | // Sanity check. 274 | for (int i = 0; i < m_numPhilos; i++) 275 | assert(newStatus.philos[i] <= IntType(m_numPhilos)); 276 | // CAS until successful. On failure, oldStatus will be updated with the latest value. 277 | if (m_allStatus.compare_exchange_strong(oldStatus, newStatus, std::memory_order_relaxed)) 278 | break; 279 | } 280 | 281 | if (firstWillEat) 282 | m_sema[firstNeighbor].signal(); // Release waiting neighbor 283 | if (secondWillEat) 284 | m_sema[secondNeighbor].signal(); // Release waiting neighbor 285 | } 286 | }; 287 | 288 | 289 | typedef LockReducedDiningPhilosophers DefaultDiningPhilosophersType; 290 | 291 | 292 | #endif // __CPP11OM_DINING_PHILOSOPHERS_H__ 293 | -------------------------------------------------------------------------------- /common/inmemorylogger.cpp: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #include 7 | #include 8 | #include "inmemorylogger.h" 9 | 10 | 11 | InMemoryLogger::InMemoryLogger() 12 | : m_head(new Page) 13 | , m_tail(m_head.get()) 14 | { 15 | } 16 | 17 | InMemoryLogger::Event* InMemoryLogger::allocateEventFromNewPage() 18 | { 19 | std::lock_guard lock(m_mutex); 20 | // Double-checked locking: 21 | // Check again whether the current page is full. Another thread may have called 22 | // allocateEventFromNewPage and created a new page by the time we get take the lock. 23 | Page* oldTail = m_tail.load(std::memory_order_relaxed); 24 | if (oldTail->index.load(std::memory_order_relaxed) < EVENTS_PER_PAGE) 25 | { 26 | int index = oldTail->index.fetch_add(1, std::memory_order_relaxed); 27 | // Yes! We got a slot on this page. 28 | if (index < EVENTS_PER_PAGE) 29 | return &oldTail->events[index]; 30 | } 31 | 32 | // OK, we're definitely out of space. It's up to us to allocate a new page. 33 | std::unique_ptr newTail(new Page); 34 | Page* page = newTail.get(); 35 | // Reserve the first slot. Relaxed is fine because there will be a release/ 36 | // consume relationship via m_tail before any other threads access the index. 37 | page->index.store(1, std::memory_order_relaxed); 38 | // A plain non-atomic move to oldTail->next is fine because there are no other writers here, 39 | // and nobody is supposed to read the logged contents until all logging is complete. 40 | oldTail->next = std::move(newTail); 41 | // m_tail must be written atomically because it is read concurrently from other threads. 42 | // We also use release/consume semantics so that its constructed contents are visible to other threads. 43 | // Again, very much like the double-checked locking pattern. 44 | m_tail.store(page, std::memory_order_release); 45 | 46 | // Return the reserved slot. 47 | return &page->events[0]; 48 | } 49 | -------------------------------------------------------------------------------- /common/inmemorylogger.h: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #ifndef __CPP11OM_IN_MEMORY_LOGGER_H__ 7 | #define __CPP11OM_IN_MEMORY_LOGGER_H__ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | 15 | //--------------------------------------------------------- 16 | // InMemberLogger 17 | // Logs an unbounded number of generic events. 18 | // Each event has a const char* message and a size_t param. 19 | // log() is usually lock-free, except when it's time allocate a new Page. 20 | // Iterator should only be used after logging is complete. 21 | // Useful for post-mortem debugging and for validating tests, as DiningPhilosopherTester does. 22 | //--------------------------------------------------------- 23 | class InMemoryLogger 24 | { 25 | public: 26 | struct Event 27 | { 28 | std::thread::id tid; 29 | const char* msg; 30 | size_t param; 31 | 32 | Event() : msg(nullptr), param(0) {} 33 | }; 34 | 35 | private: 36 | static const int EVENTS_PER_PAGE = 16384; 37 | 38 | struct Page 39 | { 40 | std::unique_ptr next; 41 | std::atomic index; // This can exceed EVENTS_PER_PAGE, but it's harmless. Just means page is full. 42 | Event events[EVENTS_PER_PAGE]; 43 | 44 | Page() : index(0) {} 45 | }; 46 | 47 | // Store events in a linked list of pages. 48 | // Mutex is only locked when it's time to allocate a new page. 49 | std::mutex m_mutex; 50 | std::unique_ptr m_head; 51 | std::atomic m_tail; 52 | 53 | Event* allocateEventFromNewPage(); 54 | 55 | public: 56 | InMemoryLogger(); 57 | 58 | void log(const char* msg, size_t param = 0) 59 | { 60 | std::atomic_signal_fence(std::memory_order_seq_cst); // Compiler barrier 61 | // On weak CPUs and current C++ compilers, memory_order_consume costs the same as acquire. :( 62 | // (If you don't like that, you can probably demote this load to relaxed and get away with it. 63 | // Technically, you'd be violating the spec, but in practice it will likely work. Just 64 | // inspect the assembly and make sure there is a data dependency between m_tail.load and 65 | // both subsequent uses of page, and you're golden. The only way I can imagine the dependency 66 | // chain being broken is if the compiler knows the addresses that will be allocated 67 | // in allocateEventFromNewPage at runtime, which is a huuuuuuuuuge leap of the imagination.) 68 | // http://preshing.com/20140709/the-purpose-of-memory_order_consume-in-cpp11 69 | Page* page = m_tail.load(std::memory_order_consume); 70 | Event* evt; 71 | int index = page->index.fetch_add(1, std::memory_order_relaxed); 72 | if (index < EVENTS_PER_PAGE) 73 | evt = &page->events[index]; 74 | else 75 | evt = allocateEventFromNewPage(); // Double-checked locking is performed inside here. 76 | evt->tid = std::this_thread::get_id(); 77 | evt->msg = msg; 78 | evt->param = param; 79 | std::atomic_signal_fence(std::memory_order_seq_cst); // Compiler barrier 80 | } 81 | 82 | // Iterators are meant to be used only after all logging is complete. 83 | friend class Iterator; 84 | class Iterator 85 | { 86 | private: 87 | Page* m_page; 88 | int m_index; 89 | 90 | public: 91 | Iterator(Page* p, int i) : m_page(p), m_index(i) {} 92 | 93 | Iterator& operator++() 94 | { 95 | m_index++; 96 | if (m_index >= EVENTS_PER_PAGE) 97 | { 98 | Page* next = m_page->next.get(); 99 | if (next) 100 | { 101 | m_page = next; 102 | m_index = 0; 103 | } 104 | else 105 | { 106 | m_index = m_page->index; 107 | } 108 | } 109 | return *this; 110 | } 111 | 112 | bool operator!=(const Iterator& other) const 113 | { 114 | return (m_page != other.m_page) || (m_index != other.m_index); 115 | } 116 | 117 | const Event& operator*() const 118 | { 119 | return m_page->events[m_index]; 120 | } 121 | }; 122 | 123 | Iterator begin() 124 | { 125 | return Iterator(m_head.get(), 0); 126 | } 127 | 128 | Iterator end() 129 | { 130 | Page* tail = m_tail.load(std::memory_order_relaxed); 131 | return Iterator(tail, tail->index); 132 | } 133 | }; 134 | 135 | 136 | #endif // __CPP11OM_IN_MEMORY_LOGGER_H__ 137 | -------------------------------------------------------------------------------- /common/rwlock.h: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #ifndef __CPP11OM_RWLOCK_H__ 7 | #define __CPP11OM_RWLOCK_H__ 8 | 9 | #include 10 | #include 11 | #include 12 | #include "sema.h" 13 | #include "bitfield.h" 14 | 15 | 16 | //--------------------------------------------------------- 17 | // NonRecursiveRWLock 18 | //--------------------------------------------------------- 19 | class NonRecursiveRWLock 20 | { 21 | private: 22 | BEGIN_BITFIELD_TYPE(Status, uint32_t) 23 | ADD_BITFIELD_MEMBER(readers, 0, 10) 24 | ADD_BITFIELD_MEMBER(waitToRead, 10, 10) 25 | ADD_BITFIELD_MEMBER(writers, 20, 10) 26 | END_BITFIELD_TYPE() 27 | 28 | std::atomic m_status; 29 | DefaultSemaphoreType m_readSema; 30 | DefaultSemaphoreType m_writeSema; 31 | 32 | public: 33 | NonRecursiveRWLock() : m_status(0) {} 34 | 35 | void lockReader() 36 | { 37 | Status oldStatus = m_status.load(std::memory_order_relaxed); 38 | Status newStatus; 39 | do 40 | { 41 | newStatus = oldStatus; 42 | if (oldStatus.writers > 0) 43 | { 44 | newStatus.waitToRead++; 45 | } 46 | else 47 | { 48 | newStatus.readers++; 49 | } 50 | // CAS until successful. On failure, oldStatus will be updated with the latest value. 51 | } 52 | while (!m_status.compare_exchange_weak(oldStatus, newStatus, 53 | std::memory_order_acquire, std::memory_order_relaxed)); 54 | 55 | if (oldStatus.writers > 0) 56 | { 57 | m_readSema.wait(); 58 | } 59 | } 60 | 61 | void unlockReader() 62 | { 63 | Status oldStatus = m_status.fetch_sub(Status().readers.one(), std::memory_order_release); 64 | assert(oldStatus.readers > 0); 65 | if (oldStatus.readers == 1 && oldStatus.writers > 0) 66 | { 67 | m_writeSema.signal(); 68 | } 69 | } 70 | 71 | void lockWriter() 72 | { 73 | Status oldStatus = m_status.fetch_add(Status().writers.one(), std::memory_order_acquire); 74 | assert(oldStatus.writers + 1 <= Status().writers.maximum()); 75 | if (oldStatus.readers > 0 || oldStatus.writers > 0) 76 | { 77 | m_writeSema.wait(); 78 | } 79 | } 80 | 81 | void unlockWriter() 82 | { 83 | Status oldStatus = m_status.load(std::memory_order_relaxed); 84 | Status newStatus; 85 | uint32_t waitToRead = 0; 86 | do 87 | { 88 | assert(oldStatus.readers == 0); 89 | newStatus = oldStatus; 90 | newStatus.writers--; 91 | waitToRead = oldStatus.waitToRead; 92 | if (waitToRead > 0) 93 | { 94 | newStatus.waitToRead = 0; 95 | newStatus.readers = waitToRead; 96 | } 97 | // CAS until successful. On failure, oldStatus will be updated with the latest value. 98 | } 99 | while (!m_status.compare_exchange_weak(oldStatus, newStatus, 100 | std::memory_order_release, std::memory_order_relaxed)); 101 | 102 | if (waitToRead > 0) 103 | { 104 | m_readSema.signal(waitToRead); 105 | } 106 | else if (oldStatus.writers > 1) 107 | { 108 | m_writeSema.signal(); 109 | } 110 | } 111 | }; 112 | 113 | 114 | //--------------------------------------------------------- 115 | // ReadLockGuard 116 | //--------------------------------------------------------- 117 | template 118 | class ReadLockGuard 119 | { 120 | private: 121 | LockType& m_lock; 122 | 123 | public: 124 | ReadLockGuard(LockType& lock) : m_lock(lock) 125 | { 126 | m_lock.lockReader(); 127 | } 128 | 129 | ~ReadLockGuard() 130 | { 131 | m_lock.unlockReader(); 132 | } 133 | }; 134 | 135 | 136 | //--------------------------------------------------------- 137 | // WriteLockGuard 138 | //--------------------------------------------------------- 139 | template 140 | class WriteLockGuard 141 | { 142 | private: 143 | LockType& m_lock; 144 | 145 | public: 146 | WriteLockGuard(LockType& lock) : m_lock(lock) 147 | { 148 | m_lock.lockWriter(); 149 | } 150 | 151 | ~WriteLockGuard() 152 | { 153 | m_lock.unlockWriter(); 154 | } 155 | }; 156 | 157 | 158 | #endif // __CPP11OM_RWLOCK_H__ 159 | -------------------------------------------------------------------------------- /common/sema.h: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #ifndef __CPP11OM_SEMAPHORE_H__ 7 | #define __CPP11OM_SEMAPHORE_H__ 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | 14 | #if defined(_WIN32) 15 | //--------------------------------------------------------- 16 | // Semaphore (Windows) 17 | //--------------------------------------------------------- 18 | 19 | // Avoid including windows.h in a header; we only need a handful of 20 | // items, so we'll redeclare them here (this is relatively safe since 21 | // the API generally has to remain stable between Windows versions). 22 | // I know this is an ugly hack but it still beats polluting the global 23 | // namespace with thousands of generic names or adding a .cpp for nothing. 24 | extern "C" { 25 | struct _SECURITY_ATTRIBUTES; 26 | __declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName); 27 | __declspec(dllimport) int __stdcall CloseHandle(void* hObject); 28 | __declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds); 29 | __declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount); 30 | } 31 | 32 | class Semaphore 33 | { 34 | private: 35 | void* m_hSema; 36 | 37 | Semaphore(const Semaphore& other) = delete; 38 | Semaphore& operator=(const Semaphore& other) = delete; 39 | 40 | public: 41 | Semaphore(int initialCount = 0) 42 | { 43 | assert(initialCount >= 0); 44 | const long maxLong = 0x7fffffff; 45 | m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr); 46 | } 47 | 48 | ~Semaphore() 49 | { 50 | CloseHandle(m_hSema); 51 | } 52 | 53 | void wait() 54 | { 55 | const unsigned long infinite = 0xffffffff; 56 | WaitForSingleObject(m_hSema, infinite); 57 | } 58 | 59 | void signal(int count = 1) 60 | { 61 | ReleaseSemaphore(m_hSema, count, nullptr); 62 | } 63 | }; 64 | 65 | 66 | #elif defined(__MACH__) 67 | //--------------------------------------------------------- 68 | // Semaphore (Apple iOS and OSX) 69 | // Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html 70 | //--------------------------------------------------------- 71 | 72 | #include 73 | 74 | class Semaphore 75 | { 76 | private: 77 | semaphore_t m_sema; 78 | 79 | Semaphore(const Semaphore& other) = delete; 80 | Semaphore& operator=(const Semaphore& other) = delete; 81 | 82 | public: 83 | Semaphore(int initialCount = 0) 84 | { 85 | assert(initialCount >= 0); 86 | semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount); 87 | } 88 | 89 | ~Semaphore() 90 | { 91 | semaphore_destroy(mach_task_self(), m_sema); 92 | } 93 | 94 | void wait() 95 | { 96 | semaphore_wait(m_sema); 97 | } 98 | 99 | void signal() 100 | { 101 | semaphore_signal(m_sema); 102 | } 103 | 104 | void signal(int count) 105 | { 106 | while (count-- > 0) 107 | { 108 | semaphore_signal(m_sema); 109 | } 110 | } 111 | }; 112 | 113 | 114 | #elif defined(__unix__) 115 | //--------------------------------------------------------- 116 | // Semaphore (POSIX, Linux) 117 | //--------------------------------------------------------- 118 | 119 | #include 120 | 121 | class Semaphore 122 | { 123 | private: 124 | sem_t m_sema; 125 | 126 | Semaphore(const Semaphore& other) = delete; 127 | Semaphore& operator=(const Semaphore& other) = delete; 128 | 129 | public: 130 | Semaphore(int initialCount = 0) 131 | { 132 | assert(initialCount >= 0); 133 | sem_init(&m_sema, 0, initialCount); 134 | } 135 | 136 | ~Semaphore() 137 | { 138 | sem_destroy(&m_sema); 139 | } 140 | 141 | void wait() 142 | { 143 | // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error 144 | int rc; 145 | do 146 | { 147 | rc = sem_wait(&m_sema); 148 | } 149 | while (rc == -1 && errno == EINTR); 150 | } 151 | 152 | void signal() 153 | { 154 | sem_post(&m_sema); 155 | } 156 | 157 | void signal(int count) 158 | { 159 | while (count-- > 0) 160 | { 161 | sem_post(&m_sema); 162 | } 163 | } 164 | }; 165 | 166 | 167 | #else 168 | 169 | #error Unsupported platform! 170 | 171 | #endif 172 | 173 | 174 | //--------------------------------------------------------- 175 | // LightweightSemaphore 176 | //--------------------------------------------------------- 177 | class LightweightSemaphore 178 | { 179 | public: 180 | // The underlying semaphores are limited to int-sized counts, 181 | // but there's no reason we can't scale higher on platforms with 182 | // a wider size_t than int -- the only counts we pass on to the 183 | // underlying semaphores are the number of waiting threads, which 184 | // will always fit in an int for all platforms regardless of our 185 | // high-level count. 186 | typedef std::make_signed::type ssize_t; 187 | 188 | private: 189 | std::atomic m_count; 190 | Semaphore m_sema; 191 | 192 | void waitWithPartialSpinning() 193 | { 194 | ssize_t oldCount; 195 | // Is there a better way to set the initial spin count? 196 | // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC, 197 | // as threads start hitting the kernel semaphore. 198 | int spin = 10000; 199 | while (spin--) 200 | { 201 | oldCount = m_count.load(std::memory_order_relaxed); 202 | if ((oldCount > 0) && m_count.compare_exchange_strong(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed)) 203 | return; 204 | std::atomic_signal_fence(std::memory_order_acquire); // Prevent the compiler from collapsing the loop. 205 | } 206 | oldCount = m_count.fetch_sub(1, std::memory_order_acquire); 207 | if (oldCount <= 0) 208 | { 209 | m_sema.wait(); 210 | } 211 | } 212 | 213 | ssize_t waitManyWithPartialSpinning(ssize_t max) 214 | { 215 | assert(max > 0); 216 | ssize_t oldCount; 217 | int spin = 10000; 218 | while (spin--) 219 | { 220 | oldCount = m_count.load(std::memory_order_relaxed); 221 | if (oldCount > 0) 222 | { 223 | ssize_t newCount = oldCount > max ? oldCount - max : 0; 224 | if (m_count.compare_exchange_strong(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed)) 225 | return oldCount - newCount; 226 | } 227 | std::atomic_signal_fence(std::memory_order_acquire); 228 | } 229 | oldCount = m_count.fetch_sub(1, std::memory_order_acquire); 230 | if (oldCount <= 0) 231 | m_sema.wait(); 232 | if (max > 1) 233 | return 1 + tryWaitMany(max - 1); 234 | return 1; 235 | } 236 | 237 | public: 238 | LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount) 239 | { 240 | assert(initialCount >= 0); 241 | } 242 | 243 | bool tryWait() 244 | { 245 | ssize_t oldCount = m_count.load(std::memory_order_relaxed); 246 | while (oldCount > 0) 247 | { 248 | if (m_count.compare_exchange_weak(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed)) 249 | return true; 250 | } 251 | return false; 252 | } 253 | 254 | void wait() 255 | { 256 | if (!tryWait()) 257 | waitWithPartialSpinning(); 258 | } 259 | 260 | // Acquires between 0 and (greedily) max, inclusive 261 | ssize_t tryWaitMany(ssize_t max) 262 | { 263 | assert(max >= 0); 264 | ssize_t oldCount = m_count.load(std::memory_order_relaxed); 265 | while (oldCount > 0) 266 | { 267 | ssize_t newCount = oldCount > max ? oldCount - max : 0; 268 | if (m_count.compare_exchange_weak(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed)) 269 | return oldCount - newCount; 270 | } 271 | return 0; 272 | } 273 | 274 | // Acquires at least one, and (greedily) at most max 275 | ssize_t waitMany(ssize_t max) 276 | { 277 | assert(max >= 0); 278 | ssize_t result = tryWaitMany(max); 279 | if (result == 0 && max > 0) 280 | result = waitManyWithPartialSpinning(max); 281 | return result; 282 | } 283 | 284 | void signal(ssize_t count = 1) 285 | { 286 | assert(count >= 0); 287 | ssize_t oldCount = m_count.fetch_add(count, std::memory_order_release); 288 | ssize_t toRelease = -oldCount < count ? -oldCount : count; 289 | if (toRelease > 0) 290 | { 291 | m_sema.signal((int)toRelease); 292 | } 293 | } 294 | 295 | ssize_t availableApprox() const 296 | { 297 | ssize_t count = m_count.load(std::memory_order_relaxed); 298 | return count > 0 ? count : 0; 299 | } 300 | }; 301 | 302 | 303 | typedef LightweightSemaphore DefaultSemaphoreType; 304 | 305 | 306 | #endif // __CPP11OM_SEMAPHORE_H__ 307 | -------------------------------------------------------------------------------- /tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.6) 2 | set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE INTERNAL "limited configs") 3 | project(Tests) 4 | 5 | set(MACOSX_BUNDLE_GUI_IDENTIFIER "com.mycompany.\${PRODUCT_NAME:identifier}") 6 | file(GLOB FILES *.cpp *.h) 7 | add_executable(Tests MACOSX_BUNDLE ${FILES}) 8 | include(../cmake/BuildSettings.cmake) 9 | 10 | add_subdirectory(../common common) 11 | include_directories(../common) 12 | target_link_libraries(${PROJECT_NAME} Common) 13 | -------------------------------------------------------------------------------- /tests/autoreseteventtester.cpp: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "autoresetevent.h" 12 | 13 | 14 | //--------------------------------------------------------- 15 | // AutoResetEventTester 16 | //--------------------------------------------------------- 17 | class AutoResetEventTester 18 | { 19 | private: 20 | std::unique_ptr m_events; 21 | std::atomic m_counter; 22 | int m_threadCount; 23 | int m_iterationCount; 24 | std::atomic m_success; 25 | 26 | public: 27 | AutoResetEventTester() 28 | : m_counter(0) 29 | , m_threadCount(0) 30 | , m_success(0) 31 | {} 32 | 33 | void kickThreads(int exceptThread) 34 | { 35 | for (int i = 0; i < m_threadCount; i++) 36 | { 37 | if (i != exceptThread) 38 | m_events[i].signal(); 39 | } 40 | } 41 | 42 | void threadFunc(int threadNum) 43 | { 44 | std::random_device rd; 45 | std::mt19937 randomEngine(rd()); 46 | bool isKicker = (threadNum == 0); 47 | 48 | for (int i = 0; i < m_iterationCount; i++) 49 | { 50 | if (isKicker) 51 | { 52 | m_counter.store(m_threadCount, std::memory_order_relaxed); 53 | kickThreads(threadNum); 54 | } 55 | else 56 | { 57 | m_events[threadNum].wait(); 58 | } 59 | 60 | // Decrement shared counter 61 | int previous = m_counter.fetch_sub(1, std::memory_order_relaxed); 62 | if (previous < 1) 63 | m_success.store(false, std::memory_order_relaxed); 64 | 65 | // Last one to decrement becomes the kicker next time 66 | isKicker = (previous == 1); 67 | 68 | // Do a random amount of work in the range [0, 10) units, biased towards low numbers. 69 | float f = std::uniform_real_distribution(0.f, 1.f)(randomEngine); 70 | int workUnits = (int) (f * f * 10); 71 | for (int j = 1; j < workUnits; j++) 72 | randomEngine(); // Do one work unit 73 | } 74 | } 75 | 76 | bool test(int threadCount, int iterationCount) 77 | { 78 | m_events = std::unique_ptr(new AutoResetEvent[threadCount]); 79 | m_counter.store(0, std::memory_order_relaxed); 80 | m_threadCount = threadCount; 81 | m_iterationCount = iterationCount; 82 | m_success.store(true, std::memory_order_relaxed); 83 | 84 | std::vector threads; 85 | for (int i = 0; i < threadCount; i++) 86 | threads.emplace_back(&AutoResetEventTester::threadFunc, this, i); 87 | for (std::thread& t : threads) 88 | t.join(); 89 | 90 | return m_success.load(std::memory_order_relaxed); 91 | } 92 | }; 93 | 94 | bool testAutoResetEvent() 95 | { 96 | AutoResetEventTester tester; 97 | return tester.test(4, 1000000); 98 | } 99 | -------------------------------------------------------------------------------- /tests/benaphoretester.cpp: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #include 7 | #include 8 | #include "benaphore.h" 9 | 10 | 11 | //--------------------------------------------------------- 12 | // BenaphoreTester 13 | //--------------------------------------------------------- 14 | class BenaphoreTester 15 | { 16 | private: 17 | int m_iterationCount; 18 | NonRecursiveBenaphore m_mutex; 19 | int m_value; 20 | 21 | public: 22 | BenaphoreTester() : m_iterationCount(0), m_value(0) {} 23 | 24 | void threadFunc(int threadNum) 25 | { 26 | for (int i = 0; i < m_iterationCount; i++) 27 | { 28 | m_mutex.lock(); 29 | // std::lock_guard lock(m_mutex); 30 | m_value++; 31 | m_mutex.unlock(); 32 | } 33 | } 34 | 35 | bool test(int threadCount, int iterationCount) 36 | { 37 | m_iterationCount = iterationCount; 38 | m_value = 0; 39 | 40 | std::vector threads; 41 | for (int i = 0; i < threadCount; i++) 42 | threads.emplace_back(&BenaphoreTester::threadFunc, this, i); 43 | for (std::thread& t : threads) 44 | t.join(); 45 | 46 | return (m_value == threadCount * iterationCount); 47 | } 48 | }; 49 | 50 | bool testBenaphore() 51 | { 52 | BenaphoreTester tester; 53 | return tester.test(4, 400000); 54 | } 55 | -------------------------------------------------------------------------------- /tests/diningphilosophertester.cpp: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "diningphilosophers.h" 12 | #include "inmemorylogger.h" 13 | 14 | 15 | //--------------------------------------------------------- 16 | // DiningPhilosopherTester 17 | //--------------------------------------------------------- 18 | class DiningPhilosopherTester 19 | { 20 | private: 21 | InMemoryLogger m_logger; 22 | std::unique_ptr m_philosophers; 23 | int m_iterationCount; 24 | 25 | public: 26 | DiningPhilosopherTester() : m_iterationCount(0) {} 27 | 28 | void threadFunc(int philoIndex) 29 | { 30 | std::random_device rd; 31 | std::mt19937 randomEngine(rd()); 32 | 33 | for (int i = 0; i < m_iterationCount; i++) 34 | { 35 | // Do a random amount of work. 36 | int workUnits = std::uniform_int_distribution(0, 100)(randomEngine); 37 | for (int j = 0; j < workUnits; j++) 38 | randomEngine(); 39 | 40 | m_philosophers->beginEating(philoIndex); 41 | m_logger.log("eat", philoIndex); 42 | 43 | // Do a random amount of work. 44 | workUnits = std::uniform_int_distribution(0, 5000)(randomEngine); 45 | for (int j = 0; j < workUnits; j++) 46 | randomEngine(); 47 | 48 | m_logger.log("think", philoIndex); 49 | m_philosophers->endEating(philoIndex); 50 | } 51 | } 52 | 53 | bool test(int numPhilos, int iterationCount) 54 | { 55 | m_iterationCount = iterationCount; 56 | m_philosophers = std::unique_ptr(new DefaultDiningPhilosophersType(numPhilos)); 57 | 58 | std::vector threads; 59 | for (int i = 0; i < numPhilos; i++) 60 | threads.emplace_back(&DiningPhilosopherTester::threadFunc, this, i); 61 | for (std::thread& t : threads) 62 | t.join(); 63 | 64 | // Replay event log to make sure it's OK. 65 | std::vector isEating(numPhilos); 66 | bool ok = true; 67 | for (const auto& evt : m_logger) 68 | { 69 | int philoIndex = (int) evt.param; 70 | if (std::strcmp(evt.msg, "eat") == 0) 71 | { 72 | if (isEating[philoIndex]) 73 | ok = false; 74 | if (isEating[DiningPhiloHelpers::left(philoIndex, numPhilos)] != 0) 75 | ok = false; 76 | if (isEating[DiningPhiloHelpers::right(philoIndex, numPhilos)] != 0) 77 | ok = false; 78 | isEating[philoIndex] = 1; 79 | } 80 | else 81 | { 82 | assert(std::strcmp(evt.msg, "think") == 0); 83 | if (!isEating[philoIndex]) 84 | ok = false; 85 | isEating[philoIndex] = 0; 86 | } 87 | } 88 | for (char s : isEating) 89 | { 90 | if (s) 91 | ok = false; 92 | } 93 | 94 | m_philosophers = nullptr; 95 | return ok; 96 | } 97 | }; 98 | 99 | bool testDiningPhilosophers() 100 | { 101 | DiningPhilosopherTester tester; 102 | return tester.test(5, 10000); 103 | } 104 | 105 | -------------------------------------------------------------------------------- /tests/main.cpp: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #include 7 | #include 8 | 9 | 10 | //--------------------------------------------------------- 11 | // List of tests 12 | //--------------------------------------------------------- 13 | struct TestInfo 14 | { 15 | const char* name; 16 | bool (*testFunc)(); 17 | }; 18 | 19 | bool testBenaphore(); 20 | bool testRecursiveBenaphore(); 21 | bool testAutoResetEvent(); 22 | bool testRWLock(); 23 | bool testRWLockSimple(); 24 | bool testDiningPhilosophers(); 25 | 26 | #define ADD_TEST(name) { #name, name }, 27 | TestInfo g_tests[] = 28 | { 29 | ADD_TEST(testBenaphore) 30 | ADD_TEST(testRecursiveBenaphore) 31 | ADD_TEST(testAutoResetEvent) 32 | ADD_TEST(testRWLock) 33 | ADD_TEST(testRWLockSimple) 34 | ADD_TEST(testDiningPhilosophers) 35 | }; 36 | 37 | //--------------------------------------------------------- 38 | // main 39 | //--------------------------------------------------------- 40 | int main() 41 | { 42 | bool allTestsPassed = true; 43 | 44 | for (const TestInfo& test : g_tests) 45 | { 46 | std::cout << "Running " << test.name << "..."; 47 | 48 | auto start = std::chrono::high_resolution_clock::now(); 49 | bool result = test.testFunc(); 50 | auto end = std::chrono::high_resolution_clock::now(); 51 | 52 | auto millis = std::chrono::duration_cast(end - start).count(); 53 | std::cout << " " << (result ? "passed" : "***FAILED***") << " in " << millis << " ms\n"; 54 | allTestsPassed = allTestsPassed && result; 55 | } 56 | 57 | return allTestsPassed ? 0 : 1; 58 | } 59 | -------------------------------------------------------------------------------- /tests/recursivebenaphoretester.cpp: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "benaphore.h" 11 | 12 | 13 | //--------------------------------------------------------- 14 | // RecursiveBenaphoreTester 15 | //--------------------------------------------------------- 16 | class RecursiveBenaphoreTester 17 | { 18 | private: 19 | struct ThreadStats 20 | { 21 | int iterations; 22 | int workUnitsComplete; 23 | int amountIncremented; 24 | 25 | ThreadStats() 26 | { 27 | iterations = 0; 28 | workUnitsComplete = 0; 29 | amountIncremented = 0; 30 | } 31 | 32 | ThreadStats& operator+=(const ThreadStats &other) 33 | { 34 | iterations += other.iterations; 35 | workUnitsComplete += other.workUnitsComplete; 36 | amountIncremented += other.amountIncremented; 37 | return *this; 38 | } 39 | }; 40 | 41 | int m_iterationCount; 42 | RecursiveBenaphore m_recursiveMutex; 43 | int m_value; 44 | std::vector m_threadStats; 45 | 46 | public: 47 | RecursiveBenaphoreTester() : m_iterationCount(0), m_value(0) {} 48 | 49 | void threadFunc(int threadNum) 50 | { 51 | std::random_device rd; 52 | std::mt19937 randomEngine(rd()); 53 | ThreadStats localStats; 54 | int lockCount = 0; 55 | int lastValue = 0; 56 | 57 | for (int i = 0; i < m_iterationCount; i++) 58 | { 59 | localStats.iterations++; 60 | 61 | // Do a random amount of work. 62 | int workUnits = std::uniform_int_distribution<>(0, 3)(randomEngine); 63 | for (int j = 1; j < workUnits; j++) 64 | randomEngine(); // Do one work unit 65 | localStats.workUnitsComplete += workUnits; 66 | 67 | // Consistency check. 68 | if (lockCount > 0) 69 | { 70 | assert(m_value == lastValue); 71 | } 72 | 73 | // Decide what the new lock count should be in the range [0, 4), biased towards low numbers. 74 | float f = std::uniform_real_distribution(0.f, 1.f)(randomEngine); 75 | int desiredLockCount = (int) (f * f * 4); 76 | 77 | // Perform unlocks, if any. 78 | while (lockCount > desiredLockCount) 79 | { 80 | m_recursiveMutex.unlock(); 81 | lockCount--; 82 | } 83 | 84 | // Perform locks, if any. 85 | bool useTryLock = (std::uniform_int_distribution<>(0, 1)(randomEngine) == 0); 86 | while (lockCount < desiredLockCount) 87 | { 88 | if (useTryLock) 89 | { 90 | if (!m_recursiveMutex.tryLock()) 91 | break; 92 | } 93 | else 94 | { 95 | m_recursiveMutex.lock(); 96 | } 97 | lockCount++; 98 | } 99 | 100 | // If locked, increment counter. 101 | if (lockCount > 0) 102 | { 103 | assert((m_value - lastValue) >= 0); 104 | m_value += threadNum + 1; 105 | lastValue = m_value; 106 | localStats.amountIncremented += threadNum + 1; 107 | } 108 | } 109 | 110 | // Release Lock if still holding it. 111 | while (lockCount > 0) 112 | { 113 | m_recursiveMutex.unlock(); 114 | lockCount--; 115 | } 116 | 117 | // Copy statistics. 118 | m_threadStats[threadNum] = localStats; 119 | } 120 | 121 | bool test(int threadCount, int iterationCount) 122 | { 123 | m_iterationCount = iterationCount; 124 | m_value = 0; 125 | m_threadStats.resize(threadCount); 126 | 127 | std::vector threads; 128 | for (int i = 0; i < threadCount; i++) 129 | threads.emplace_back(&RecursiveBenaphoreTester::threadFunc, this, i); 130 | for (std::thread& t : threads) 131 | t.join(); 132 | 133 | ThreadStats totalStats; 134 | for (const ThreadStats& s : m_threadStats) 135 | totalStats += s; 136 | return (m_value == totalStats.amountIncremented); 137 | } 138 | }; 139 | 140 | bool testRecursiveBenaphore() 141 | { 142 | RecursiveBenaphoreTester tester; 143 | return tester.test(4, 100000); 144 | } 145 | -------------------------------------------------------------------------------- /tests/rwlocktester.cpp: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "rwlock.h" 11 | 12 | 13 | //--------------------------------------------------------- 14 | // RWLockTester 15 | //--------------------------------------------------------- 16 | class RWLockTester 17 | { 18 | private: 19 | static const int SHARED_ARRAY_LENGTH = 8; 20 | int m_shared[SHARED_ARRAY_LENGTH]; 21 | NonRecursiveRWLock m_rwLock; 22 | int m_iterationCount; 23 | std::atomic m_success; 24 | 25 | public: 26 | RWLockTester() 27 | : m_iterationCount(0) 28 | , m_success(false) 29 | {} 30 | 31 | void threadFunc(int threadNum) 32 | { 33 | std::random_device rd; 34 | std::mt19937 randomEngine(rd()); 35 | 36 | for (int i = 0; i < m_iterationCount; i++) 37 | { 38 | // Choose randomly whether to read or write. 39 | if (std::uniform_int_distribution<>(0, 3)(randomEngine) == 0) 40 | { 41 | // Write an incrementing sequence of numbers (backwards). 42 | int value = std::uniform_int_distribution<>()(randomEngine); 43 | WriteLockGuard guard(m_rwLock); 44 | for (int j = SHARED_ARRAY_LENGTH - 1; j >= 0; j--) 45 | { 46 | m_shared[j] = value--; 47 | } 48 | } 49 | else 50 | { 51 | // Check that the sequence of numbers is incrementing. 52 | bool ok = true; 53 | { 54 | ReadLockGuard guard(m_rwLock); 55 | int value = m_shared[0]; 56 | for (int j = 1; j < SHARED_ARRAY_LENGTH; j++) 57 | { 58 | ok = ok && (++value == m_shared[j]); 59 | } 60 | } 61 | if (!ok) 62 | { 63 | m_success.store(false, std::memory_order_relaxed); 64 | } 65 | } 66 | } 67 | } 68 | 69 | bool test(int threadCount, int iterationCount) 70 | { 71 | m_iterationCount = iterationCount; 72 | for (int j = 0; j < SHARED_ARRAY_LENGTH; j++) 73 | m_shared[j] = j; 74 | m_success.store(true, std::memory_order_relaxed); 75 | 76 | std::vector threads; 77 | for (int i = 0; i < threadCount; i++) 78 | threads.emplace_back(&RWLockTester::threadFunc, this, i); 79 | for (std::thread& t : threads) 80 | t.join(); 81 | 82 | return m_success.load(std::memory_order_relaxed); 83 | } 84 | }; 85 | 86 | bool testRWLock() 87 | { 88 | RWLockTester tester; 89 | return tester.test(4, 1000000); 90 | } 91 | -------------------------------------------------------------------------------- /tests/simplerwlocktester.cpp: -------------------------------------------------------------------------------- 1 | //--------------------------------------------------------- 2 | // For conditions of distribution and use, see 3 | // https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE 4 | //--------------------------------------------------------- 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "rwlock.h" 11 | 12 | 13 | //--------------------------------------------------------- 14 | // SimpleRWLockTester 15 | // Writes less frequently and only protects a single int. 16 | //--------------------------------------------------------- 17 | class SimpleRWLockTester 18 | { 19 | private: 20 | NonRecursiveRWLock m_rwLock; 21 | int m_sharedInt; 22 | int m_iterationCount; 23 | std::atomic m_totalWrites; 24 | 25 | public: 26 | SimpleRWLockTester() 27 | : m_iterationCount(0) 28 | , m_totalWrites(0) 29 | {} 30 | 31 | void threadFunc(int threadNum) 32 | { 33 | std::random_device rd; 34 | std::mt19937 randomEngine(rd()); 35 | int writes = 0; 36 | volatile int accumulator = 0; // Prevent compiler from eliminating this variable 37 | 38 | for (int i = 0; i < m_iterationCount; i++) 39 | { 40 | // Choose randomly whether to read or write. 41 | if (std::uniform_int_distribution<>(0, 30)(randomEngine) == 0) 42 | { 43 | WriteLockGuard guard(m_rwLock); 44 | m_sharedInt++; 45 | writes++; 46 | } 47 | else 48 | { 49 | ReadLockGuard guard(m_rwLock); 50 | accumulator += m_sharedInt; 51 | } 52 | } 53 | 54 | m_totalWrites.fetch_add(writes, std::memory_order_relaxed); 55 | } 56 | 57 | bool test(int threadCount, int iterationCount) 58 | { 59 | m_iterationCount = iterationCount; 60 | m_sharedInt = 0; 61 | m_totalWrites.store(0, std::memory_order_relaxed); 62 | 63 | std::vector threads; 64 | for (int i = 0; i < threadCount; i++) 65 | threads.emplace_back(&SimpleRWLockTester::threadFunc, this, i); 66 | for (std::thread& t : threads) 67 | t.join(); 68 | 69 | return (m_sharedInt == m_totalWrites.load(std::memory_order_relaxed)); 70 | } 71 | }; 72 | 73 | bool testRWLockSimple() 74 | { 75 | SimpleRWLockTester tester; 76 | return tester.test(4, 2000000); 77 | } 78 | --------------------------------------------------------------------------------