├── .travis.yml ├── .gitignore ├── CMakeLists.txt ├── appveyor.yml ├── LICENSE ├── tests ├── latch_test.cpp ├── CMakeLists.txt ├── perf_rwlock.sh ├── dump_typeinfo.cpp ├── spinlock_test.cpp ├── compile_test.cpp ├── perf_rwlock.cpp ├── barrier_test.cpp ├── semaphore_test.cpp ├── yamc_testutil.hpp ├── checked_test.cpp └── lock_test.cpp └── include ├── apple_native_mutex.hpp ├── ttas_spin_mutex.hpp ├── yamc_backoff_spin.hpp ├── yamc_latch.hpp ├── yamc_scoped_lock.hpp ├── naive_spin_mutex.hpp ├── yamc_semaphore.hpp ├── yamc_barrier.hpp ├── yamc_rwlock_sched.hpp ├── posix_semaphore.hpp ├── yamc_shared_lock.hpp ├── yamc_lock_validator.hpp ├── win_semaphore.hpp ├── gcd_semaphore.hpp ├── alternate_shared_mutex.hpp ├── alternate_mutex.hpp ├── win_native_mutex.hpp ├── fair_mutex.hpp ├── posix_native_mutex.hpp ├── checked_mutex.hpp └── checked_shared_mutex.hpp /.travis.yml: -------------------------------------------------------------------------------- 1 | language: cpp 2 | 3 | dist: xenial 4 | 5 | jobs: 6 | include: 7 | - os: linux 8 | compiler: gcc 9 | - os: linux 10 | compiler: clang 11 | - os: osx 12 | compiler: clang 13 | osx_image: xcode9.4 14 | 15 | before_script: 16 | - cmake --version 17 | 18 | script: 19 | - mkdir build 20 | - cd build 21 | - cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_TESTING=ON .. 22 | - make -j2 23 | - tests/dump_sizeof 24 | - tests/dump_layout 25 | - ctest --output-on-failure 26 | 27 | notifications: 28 | email: false 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files 2 | *.slo 3 | *.lo 4 | *.o 5 | *.obj 6 | 7 | # Precompiled Headers 8 | *.gch 9 | *.pch 10 | 11 | # Compiled Dynamic libraries 12 | *.so 13 | *.dylib 14 | *.dll 15 | 16 | # Fortran module files 17 | *.mod 18 | *.smod 19 | 20 | # Compiled Static libraries 21 | *.lai 22 | *.la 23 | *.a 24 | *.lib 25 | 26 | # Executables 27 | *.exe 28 | *.out 29 | *.app 30 | 31 | # CMake.gitignore 32 | CMakeCache.txt 33 | CMakeFiles 34 | CMakeScripts 35 | Testing 36 | Makefile 37 | cmake_install.cmake 38 | install_manifest.txt 39 | compile_commands.json 40 | CTestTestfile.cmake 41 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.2) 2 | 3 | project(yamc C CXX) 4 | 5 | # Requires C++11 6 | set(CMAKE_CXX_STANDARD 11) 7 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 8 | set(CMAKE_CXX_EXTENSIONS OFF) 9 | 10 | option(ENABLE_TESTING "Enable unit testing." ON) 11 | 12 | if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang") 13 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic") 14 | endif() 15 | 16 | include_directories(include) 17 | 18 | if (ENABLE_TESTING) 19 | enable_testing() 20 | add_subdirectory(tests) 21 | endif() 22 | 23 | install(DIRECTORY include/ DESTINATION include/yamc/ 24 | FILES_MATCHING PATTERN "*.hpp") 25 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | platform: 2 | - Win32 3 | - x64 4 | 5 | configuration: 6 | - Debug 7 | # - Release 8 | 9 | before_build: 10 | - echo "Running cmake..." 11 | - cd c:\projects\yamc 12 | - cmake --version 13 | - set PATH=C:\Program Files (x86)\MSBuild\14.0\Bin;%PATH% 14 | - set CMAKE_OPTS=-DCMAKE_BUILD_TYPE=RELEASE -DENABLE_TESTING=ON 15 | - if %PLATFORM% == Win32 cmake -G "Visual Studio 14 2015" %CMAKE_OPTS% . 16 | - if %PLATFORM% == x64 cmake -G "Visual Studio 14 2015 Win64" %CMAKE_OPTS% . 17 | 18 | build: 19 | project: yamc.sln 20 | 21 | test_script: 22 | - tests\%CONFIGURATION%\dump_sizeof 23 | - tests\%CONFIGURATION%\dump_layout 24 | - ctest --output-on-failure 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017-2019 yohhoy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/latch_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * latch_test.cpp 3 | */ 4 | #include "gtest/gtest.h" 5 | #include "yamc_latch.hpp" 6 | #include "yamc_testutil.hpp" 7 | 8 | 9 | // latch constructor 10 | TEST(LatchTest, Ctor) 11 | { 12 | EXPECT_NO_THROW(yamc::latch{1}); 13 | } 14 | 15 | // latch::count_down() 16 | TEST(LatchTest, CountDown) 17 | { 18 | yamc::latch latch{1}; 19 | EXPECT_NO_THROW(latch.count_down()); 20 | } 21 | 22 | // latch::count_down(0) 23 | TEST(LatchTest, CountDownZero) 24 | { 25 | yamc::latch latch{1}; 26 | EXPECT_NO_THROW(latch.count_down(0)); 27 | } 28 | 29 | // latch::try_wait() 30 | TEST(LatchTest, TryWait) 31 | { 32 | yamc::latch latch{0}; 33 | EXPECT_TRUE(latch.try_wait()); 34 | } 35 | 36 | // latch::try_wait() failure 37 | TEST(LatchTest, TryWaitFail) 38 | { 39 | yamc::latch latch{1}; 40 | EXPECT_FALSE(latch.try_wait()); 41 | } 42 | 43 | // latch::wait() 44 | TEST(LatchTest, Wait) 45 | { 46 | SETUP_STEPTEST; 47 | yamc::latch latch{1}; 48 | // signal-thread 49 | yamc::test::join_thread thd([&]{ 50 | EXPECT_STEP(1); 51 | EXPECT_NO_THROW(latch.count_down(0)); // no signal 52 | EXPECT_STEP(2); 53 | EXPECT_NO_THROW(latch.count_down()); 54 | }); 55 | // wait-thread 56 | { 57 | EXPECT_NO_THROW(latch.wait()); 58 | EXPECT_STEP(3); 59 | } 60 | } 61 | 62 | // latch::arrive_and_wait 63 | TEST(LatchTest, ArriveAndWait) 64 | { 65 | SETUP_STEPTEST; 66 | yamc::latch latch{3}; // counter=3 67 | // signal-thread 68 | yamc::test::join_thread thd([&]{ 69 | EXPECT_STEP(1); 70 | EXPECT_NO_THROW(latch.arrive_and_wait(2)); // update=2 71 | EXPECT_STEP_RANGE(2, 3); 72 | }); 73 | // wait-thread 74 | { 75 | EXPECT_NO_THROW(latch.arrive_and_wait()); // update=1 76 | EXPECT_STEP_RANGE(2, 3); 77 | } 78 | } 79 | 80 | // latch::max() 81 | TEST(LatchTest, Max) 82 | { 83 | EXPECT_GT((yamc::latch::max)(), 0); 84 | } 85 | -------------------------------------------------------------------------------- /include/apple_native_mutex.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * apple_native_mutex.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2019 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef APPLE_NATIVE_MUTEX_HPP_ 27 | #define APPLE_NATIVE_MUTEX_HPP_ 28 | 29 | // macOS/iOS mutex 30 | #include 31 | 32 | 33 | namespace yamc { 34 | 35 | /* 36 | * Native mutex wrapper on macOS/iOS families 37 | * 38 | * - yamc::apple::unfair_lock 39 | * 40 | * https://developer.apple.com/documentation/os/1646466-os_unfair_lock_lock 41 | */ 42 | namespace apple { 43 | 44 | class unfair_lock { 45 | ::os_unfair_lock oslock_ = OS_UNFAIR_LOCK_INIT; 46 | 47 | public: 48 | constexpr unfair_lock() noexcept = default; 49 | ~unfair_lock() = default; 50 | 51 | unfair_lock(const unfair_lock&) = delete; 52 | unfair_lock& operator=(const unfair_lock&) = delete; 53 | 54 | void lock() 55 | { 56 | ::os_unfair_lock_lock(&oslock_); 57 | } 58 | 59 | bool try_lock() 60 | { 61 | return ::os_unfair_lock_trylock(&oslock_); 62 | } 63 | 64 | void unlock() 65 | { 66 | ::os_unfair_lock_unlock(&oslock_); 67 | } 68 | 69 | using native_handle_type = ::os_unfair_lock_t; 70 | native_handle_type native_handle() 71 | { 72 | return &oslock_; 73 | } 74 | }; 75 | 76 | } // namespace apple 77 | } // namespace yamc 78 | 79 | #endif 80 | -------------------------------------------------------------------------------- /include/ttas_spin_mutex.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * ttas_spin_mutex.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_TTAS_SPIN_MUTEX_HPP_ 27 | #define YAMC_TTAS_SPIN_MUTEX_HPP_ 28 | 29 | #include 30 | #include "yamc_backoff_spin.hpp" 31 | 32 | 33 | namespace yamc { 34 | 35 | /* 36 | * Test-and-Test-And-Swap(TTAS) spinlock implementation 37 | * 38 | * - yamc::spin_ttas::mutex 39 | * - yamc::spin_ttas::basic_mutex 40 | */ 41 | namespace spin_ttas { 42 | 43 | template 44 | class basic_mutex { 45 | std::atomic state_{0}; 46 | 47 | public: 48 | basic_mutex() = default; 49 | ~basic_mutex() = default; 50 | 51 | basic_mutex(const basic_mutex&) = delete; 52 | basic_mutex& operator=(const basic_mutex&) = delete; 53 | 54 | void lock() 55 | { 56 | typename BackoffPolicy::state state; 57 | int expected; 58 | do { 59 | while (state_.load(std::memory_order_relaxed) != 0) { 60 | BackoffPolicy::wait(state); 61 | } 62 | expected = 0; 63 | } while (!state_.compare_exchange_weak(expected, 1, std::memory_order_acquire)); 64 | } 65 | 66 | bool try_lock() 67 | { 68 | int expected = 0; 69 | return state_.compare_exchange_weak(expected, 1, std::memory_order_acquire); 70 | } 71 | 72 | void unlock() 73 | { 74 | state_.store(0, std::memory_order_release); 75 | } 76 | }; 77 | 78 | using mutex = basic_mutex; 79 | 80 | } // namespace spin_ttas 81 | } // namespace yamc 82 | 83 | #endif 84 | -------------------------------------------------------------------------------- /tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.2) 2 | 3 | # Requires C++11 4 | set(CMAKE_CXX_STANDARD 11) 5 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 6 | set(CMAKE_CXX_EXTENSIONS OFF) 7 | 8 | # Pthread 9 | set(THREADS_PREFER_PTHREAD_FLAG ON) 10 | find_package(Threads) 11 | 12 | # Google Test 13 | if(MSVC) 14 | set(GTEST_DEBUG_POSTFIX d) 15 | set(GTEST_FORCE_SHARED_CRT ON) 16 | endif() 17 | include(ExternalProject) 18 | set(EXTERNAL_INSTALL_LOCATION ${CMAKE_BINARY_DIR}/external) 19 | ExternalProject_Add(googletest 20 | GIT_REPOSITORY https://github.com/google/googletest 21 | GIT_TAG release-1.11.0 22 | CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${EXTERNAL_INSTALL_LOCATION} 23 | -DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} 24 | -DCMAKE_CXX_STANDARD_REQUIRED=${CMAKE_CXX_STANDARD_REQUIRED} 25 | -Dgtest_force_shared_crt=${GTEST_FORCE_SHARED_CRT} 26 | ) 27 | 28 | include_directories(${EXTERNAL_INSTALL_LOCATION}/include) 29 | link_directories(${EXTERNAL_INSTALL_LOCATION}/lib) 30 | 31 | # dump type information 32 | add_executable(dump_sizeof dump_typeinfo.cpp) 33 | add_executable(dump_layout dump_typeinfo.cpp) 34 | set_target_properties(dump_sizeof PROPERTIES COMPILE_DEFINITIONS "DUMP_SIZEOF") 35 | set_target_properties(dump_layout PROPERTIES COMPILE_DEFINITIONS "DUMP_STANDARD_LAYOUT") 36 | 37 | # performance test 38 | add_executable(perf_rwlock perf_rwlock.cpp) 39 | target_link_libraries(perf_rwlock Threads::Threads) 40 | 41 | # Unit tests 42 | add_executable(compile_test compile_test.cpp) 43 | target_link_libraries(compile_test Threads::Threads) 44 | add_test(compile compile_test) 45 | 46 | macro (do_test name srcfile) 47 | set(testname ${name}_test) 48 | add_executable(${testname} ${srcfile}.cpp) 49 | add_dependencies(${testname} googletest) 50 | target_link_libraries(${testname} 51 | Threads::Threads 52 | debug gtest${GTEST_DEBUG_POSTFIX} optimized gtest 53 | debug gtest_main${GTEST_DEBUG_POSTFIX} optimized gtest_main) 54 | add_test(${name} ${testname}) 55 | endmacro (do_test) 56 | 57 | do_test(basic basic_test) 58 | do_test(spinlock spinlock_test) 59 | do_test(checked0 checked_test) 60 | do_test(checked1 checked_test) 61 | set_target_properties(checked0_test PROPERTIES COMPILE_DEFINITIONS "YAMC_CHECKED_CALL_ABORT=0") 62 | set_target_properties(checked1_test PROPERTIES COMPILE_DEFINITIONS "YAMC_CHECKED_CALL_ABORT=1") 63 | do_test(deadlock0 deadlock_test) 64 | do_test(deadlock1 deadlock_test) 65 | set_target_properties(deadlock0_test PROPERTIES COMPILE_DEFINITIONS "YAMC_CHECKED_CALL_ABORT=0") 66 | set_target_properties(deadlock1_test PROPERTIES COMPILE_DEFINITIONS "YAMC_CHECKED_CALL_ABORT=1") 67 | do_test(fairness fairness_test) 68 | do_test(rwlock rwlock_test) 69 | do_test(lock lock_test) 70 | do_test(semaphore semaphore_test) 71 | do_test(latch latch_test) 72 | do_test(barrier barrier_test) 73 | -------------------------------------------------------------------------------- /include/yamc_backoff_spin.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * yamc_backoff_spin.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_BACKOFF_SPIN_HPP_ 27 | #define YAMC_BACKOFF_SPIN_HPP_ 28 | 29 | #include 30 | 31 | 32 | /// default backoff spin policy 33 | #ifndef YAMC_BACKOFF_SPIN_DEFAULT 34 | #define YAMC_BACKOFF_SPIN_DEFAULT yamc::backoff::exponential<> 35 | #endif 36 | 37 | 38 | /// initial count for yamc::backoff::exponential<> 39 | #ifndef YAMC_BACKOFF_EXPONENTIAL_INITCOUNT 40 | #define YAMC_BACKOFF_EXPONENTIAL_INITCOUNT 4000 41 | #endif 42 | 43 | 44 | namespace yamc { 45 | 46 | /* 47 | * backoff algorithm for spinlock basic_mutex 48 | * 49 | * - yamc::backoff::exponential 50 | * - yamc::backoff::yield 51 | * - yamc::backoff::busy 52 | */ 53 | namespace backoff { 54 | 55 | /// exponential backoff spin policy 56 | template < 57 | unsigned int InitCount = YAMC_BACKOFF_EXPONENTIAL_INITCOUNT 58 | > 59 | struct exponential { 60 | struct state { 61 | unsigned int initcount = InitCount; 62 | unsigned int counter = InitCount; 63 | }; 64 | 65 | static void wait(state& s) 66 | { 67 | if (s.counter == 0) { 68 | // yield thread at exponential decreasing interval 69 | std::this_thread::yield(); 70 | s.initcount = (s.initcount >> 1) | 1; 71 | s.counter = s.initcount; 72 | } 73 | --s.counter; 74 | } 75 | }; 76 | 77 | 78 | /// simple yield thread policy 79 | struct yield { 80 | struct state {}; 81 | 82 | static void wait(state&) 83 | { 84 | std::this_thread::yield(); 85 | } 86 | }; 87 | 88 | 89 | /// 'real' busy-loop policy 90 | /// 91 | /// ATTENTION: 92 | /// This policy may waste your CPU time. 93 | /// 94 | struct busy { 95 | struct state {}; 96 | static void wait(state&) {} 97 | }; 98 | 99 | } // namespace backoff 100 | } // namespace yamc 101 | 102 | #endif 103 | -------------------------------------------------------------------------------- /tests/perf_rwlock.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | DATFILE="perf_rwlock.dat" 3 | echo "input: ${DATFILE}" 4 | 5 | if [ ! -f ${DATFILE} ]; then 6 | echo "file not found.\nrun \"perf_rwlock > ${DATFILE}\"" 7 | exit 1 8 | fi 9 | 10 | 11 | PNGFILE="perf_rwlock.png" 12 | echo "output: ${PNGFILE}" 13 | gnuplot << EOT 14 | set terminal png medium 15 | set output "${PNGFILE}" 16 | 17 | set title "rwlock throughput" 18 | set xlabel "writer threads" 19 | set ylabel "ops/sec/thread" 20 | set xrange [0.5:9.5] 21 | set yrange [0:] 22 | 23 | plot "${DATFILE}" index 2 using 1:3 with lines lt 1 title "ReaderPerfer/WriteLock", \ 24 | "${DATFILE}" index 2 using 1:3:4 with errorbars lt 1 notitle, \ 25 | "${DATFILE}" index 2 using 1:7 with lines lt 2 title "ReaderPerfer/ReadLock", \ 26 | "${DATFILE}" index 2 using 1:7:8 with errorbars lt 2 notitle, \ 27 | "${DATFILE}" index 3 using 1:3 with lines lt 3 title "WriterPerfer/WriteLock", \ 28 | "${DATFILE}" index 3 using 1:3:4 with errorbars lt 3 notitle, \ 29 | "${DATFILE}" index 3 using 1:7 with lines lt 4 title "WriterPerfer/ReadLock", \ 30 | "${DATFILE}" index 3 using 1:7:8 with errorbars lt 4 notitle, \ 31 | "${DATFILE}" index 4 using 1:3 with lines lt 5 title "TaskFair/WriteLock", \ 32 | "${DATFILE}" index 4 using 1:3:4 with errorbars lt 5 notitle, \ 33 | "${DATFILE}" index 4 using 1:7 with lines lt 6 title "TaskFair/ReadLock", \ 34 | "${DATFILE}" index 4 using 1:7:8 with errorbars lt 6 notitle, \ 35 | "${DATFILE}" index 5 using 1:3 with lines lt 7 title "PhaseFair/WriteLock", \ 36 | "${DATFILE}" index 5 using 1:3:4 with errorbars lt 7 notitle, \ 37 | "${DATFILE}" index 5 using 1:7 with lines lt 8 title "PhaseFair/ReadLock", \ 38 | "${DATFILE}" index 5 using 1:7:8 with errorbars lt 8 notitle, 39 | EOT 40 | 41 | 42 | PNGFILE="perf_rwlock-unfair.png" 43 | echo "output: ${PNGFILE}" 44 | gnuplot << EOT 45 | set terminal png medium 46 | set output "${PNGFILE}" 47 | 48 | set title "rwlock throughput" 49 | set xlabel "writer threads" 50 | set ylabel "ops/sec/thread" 51 | set xrange [0.5:9.5] 52 | set yrange [0:] 53 | 54 | plot "${DATFILE}" index 2 using 1:3 with linespoints lt 1 title "ReaderPerfer/WriteLock", \ 55 | "${DATFILE}" index 2 using 1:7 with linespoints lt 2 title "ReaderPerfer/ReadLock", \ 56 | "${DATFILE}" index 3 using 1:3 with linespoints lt 3 title "WriterPerfer/WriteLock", \ 57 | "${DATFILE}" index 3 using 1:7 with linespoints lt 4 title "WriterPerfer/ReadLock", 58 | EOT 59 | 60 | 61 | PNGFILE="perf_rwlock-fair.png" 62 | echo "output: ${PNGFILE}" 63 | gnuplot << EOT 64 | set terminal png medium 65 | set output "${PNGFILE}" 66 | 67 | set title "rwlock throughput" 68 | set xlabel "writer threads" 69 | set ylabel "ops/sec/thread" 70 | set xrange [0.5:9.5] 71 | set yrange [0:] 72 | 73 | plot "${DATFILE}" index 4 using 1:3 with linespoints lt 5 title "TaskFair/WriteLock", \ 74 | "${DATFILE}" index 4 using 1:7 with linespoints lt 6 title "TaskFair/ReadLock", \ 75 | "${DATFILE}" index 5 using 1:3 with linespoints lt 7 title "PhaseFair/WriteLock", \ 76 | "${DATFILE}" index 5 using 1:7 with linespoints lt 8 title "PhaseFair/ReadLock", 77 | EOT 78 | -------------------------------------------------------------------------------- /include/yamc_latch.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * yamc_latch.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2019 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_LATCH_HPP_ 27 | #define YAMC_LATCH_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | 36 | /* 37 | * Latches in C++20 Standard Library 38 | * 39 | * - yamc::latch 40 | */ 41 | namespace yamc { 42 | 43 | class latch { 44 | std::ptrdiff_t counter_; 45 | mutable std::condition_variable cv_; 46 | mutable std::mutex mtx_; 47 | 48 | public: 49 | static constexpr ptrdiff_t (max)() noexcept 50 | { 51 | return (std::numeric_limits::max)(); 52 | } 53 | 54 | /*constexpr*/ explicit latch(std::ptrdiff_t expected) 55 | : counter_(expected) 56 | { 57 | assert(0 <= expected && expected < (max)()); 58 | } 59 | 60 | ~latch() = default; 61 | 62 | latch(const latch&) = delete; 63 | latch& operator=(const latch&) = delete; 64 | 65 | void count_down(std::ptrdiff_t update = 1) 66 | { 67 | std::lock_guard lk(mtx_); 68 | assert(0 <= update && update <= counter_); 69 | counter_ -= update; 70 | if (counter_ == 0) { 71 | cv_.notify_all(); 72 | } 73 | } 74 | 75 | bool try_wait() const noexcept 76 | { 77 | std::lock_guard lk(mtx_); 78 | // no spurious failure 79 | return (counter_ == 0); 80 | } 81 | 82 | void wait() const 83 | { 84 | std::unique_lock lk(mtx_); 85 | while (counter_ != 0) { 86 | cv_.wait(lk); 87 | } 88 | } 89 | 90 | void arrive_and_wait(std::ptrdiff_t update = 1) 91 | { 92 | std::unique_lock lk(mtx_); 93 | // equivalent to { count_down(update); wait(); } 94 | assert(0 <= update && update <= counter_); 95 | counter_ -= update; 96 | if (counter_ == 0) { 97 | cv_.notify_all(); 98 | } 99 | while (counter_ != 0) { 100 | cv_.wait(lk); 101 | } 102 | } 103 | }; 104 | 105 | } // namespace yamc 106 | 107 | #endif 108 | -------------------------------------------------------------------------------- /include/yamc_scoped_lock.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * yamc_scoped_lock.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2018 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_SCOPED_LOCK_HPP_ 27 | #define YAMC_SCOPED_LOCK_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | 33 | 34 | /* 35 | * std::scoped_lock in C++17 Standard Library 36 | * 37 | * - yamc::scoped_lock 38 | */ 39 | namespace yamc { 40 | 41 | template 42 | class scoped_lock { 43 | template 44 | typename std::enable_if::type 45 | invoke_unlock() {} 46 | 47 | template 48 | typename std::enable_if::type 49 | invoke_unlock() 50 | { 51 | std::get(pm_).unlock(); 52 | invoke_unlock(); 53 | } 54 | 55 | public: 56 | explicit scoped_lock(MutexTypes&... m) 57 | : pm_(m...) 58 | { 59 | std::lock(m...); 60 | } 61 | 62 | explicit scoped_lock(std::adopt_lock_t, MutexTypes&... m) 63 | : pm_(m...) 64 | { 65 | } 66 | 67 | ~scoped_lock() 68 | { 69 | invoke_unlock<>(); 70 | } 71 | 72 | scoped_lock(const scoped_lock&) = delete; 73 | scoped_lock& operator=(const scoped_lock&) = delete; 74 | 75 | private: 76 | std::tuple pm_; 77 | }; 78 | 79 | 80 | template <> 81 | class scoped_lock<> { 82 | public: 83 | explicit scoped_lock() = default; 84 | explicit scoped_lock(std::adopt_lock_t) {} 85 | ~scoped_lock() = default; 86 | 87 | scoped_lock(const scoped_lock&) = delete; 88 | scoped_lock& operator=(const scoped_lock&) = delete; 89 | }; 90 | 91 | 92 | template 93 | class scoped_lock { 94 | public: 95 | using mutex_type = Mutex; 96 | 97 | explicit scoped_lock(Mutex& m) 98 | : m_(m) 99 | { 100 | m.lock(); 101 | } 102 | 103 | explicit scoped_lock(std::adopt_lock_t, Mutex& m) 104 | : m_(m) 105 | { 106 | } 107 | 108 | ~scoped_lock() 109 | { 110 | m_.unlock(); 111 | } 112 | 113 | scoped_lock(const scoped_lock&) = delete; 114 | scoped_lock& operator=(const scoped_lock&) = delete; 115 | 116 | private: 117 | Mutex& m_; 118 | }; 119 | 120 | 121 | } // namespace yamc 122 | 123 | #endif 124 | -------------------------------------------------------------------------------- /tests/dump_typeinfo.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * dump_typeinfo.cpp 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "naive_spin_mutex.hpp" 10 | #include "ttas_spin_mutex.hpp" 11 | #include "checked_mutex.hpp" 12 | #include "checked_shared_mutex.hpp" 13 | #include "fair_mutex.hpp" 14 | #include "fair_shared_mutex.hpp" 15 | #include "alternate_mutex.hpp" 16 | #include "alternate_shared_mutex.hpp" 17 | // platform native 18 | #if defined(__linux__) || defined(__APPLE__) 19 | #include "posix_native_mutex.hpp" 20 | #define ENABLE_POSIX_NATIVE_MUTEX 21 | #endif 22 | #if defined(_WIN32) 23 | #include "win_native_mutex.hpp" 24 | #define ENABLE_WIN_NATIVE_MUTEX 25 | #endif 26 | #if defined(__APPLE__) 27 | #include "apple_native_mutex.hpp" 28 | #define ENABLE_APPLE_NATIVE_MUTEX 29 | #endif 30 | 31 | 32 | #ifdef DUMP_SIZEOF 33 | #define DUMP(T) std::printf("%s %zu\n", #T, sizeof(T)) 34 | #endif 35 | 36 | #ifdef DUMP_STANDARD_LAYOUT 37 | #define DUMP(T) std::printf("%s %d\n", #T, (int)std::is_standard_layout::value) 38 | #endif 39 | 40 | 41 | int main() 42 | { 43 | DUMP(int); 44 | DUMP(std::size_t); 45 | DUMP(std::thread::id); 46 | DUMP(std::condition_variable); 47 | 48 | DUMP(std::mutex); 49 | DUMP(std::timed_mutex); 50 | DUMP(std::recursive_mutex); 51 | DUMP(std::recursive_timed_mutex); 52 | 53 | DUMP(yamc::spin::mutex); 54 | DUMP(yamc::spin_weak::mutex); 55 | DUMP(yamc::spin_ttas::mutex); 56 | 57 | DUMP(yamc::checked::mutex); 58 | DUMP(yamc::checked::timed_mutex); 59 | DUMP(yamc::checked::recursive_mutex); 60 | DUMP(yamc::checked::recursive_timed_mutex); 61 | DUMP(yamc::checked::shared_mutex); 62 | DUMP(yamc::checked::shared_timed_mutex); 63 | 64 | DUMP(yamc::fair::mutex); 65 | DUMP(yamc::fair::recursive_mutex); 66 | DUMP(yamc::fair::timed_mutex); 67 | DUMP(yamc::fair::recursive_timed_mutex); 68 | 69 | DUMP(yamc::fair::shared_mutex); 70 | DUMP(yamc::fair::shared_timed_mutex); 71 | DUMP(yamc::fair::basic_shared_mutex); 72 | DUMP(yamc::fair::basic_shared_mutex); 73 | DUMP(yamc::fair::basic_shared_timed_mutex); 74 | DUMP(yamc::fair::basic_shared_timed_mutex); 75 | 76 | DUMP(yamc::alternate::mutex); 77 | DUMP(yamc::alternate::recursive_mutex); 78 | DUMP(yamc::alternate::timed_mutex); 79 | DUMP(yamc::alternate::recursive_timed_mutex); 80 | 81 | DUMP(yamc::alternate::shared_mutex); 82 | DUMP(yamc::alternate::shared_timed_mutex); 83 | DUMP(yamc::alternate::basic_shared_mutex); 84 | DUMP(yamc::alternate::basic_shared_mutex); 85 | DUMP(yamc::alternate::basic_shared_timed_mutex); 86 | DUMP(yamc::alternate::basic_shared_timed_mutex); 87 | 88 | #if defined(ENABLE_POSIX_NATIVE_MUTEX) 89 | DUMP(yamc::posix::native_mutex); 90 | DUMP(yamc::posix::native_recursive_mutex); 91 | DUMP(yamc::posix::rwlock); 92 | #if YAMC_POSIX_SPINLOCK_SUPPORTED 93 | DUMP(yamc::posix::spinlock); 94 | #endif 95 | #endif 96 | #if defined(ENABLE_WIN_NATIVE_MUTEX) 97 | DUMP(yamc::win::native_mutex); 98 | DUMP(yamc::win::critical_section); 99 | DUMP(yamc::win::slim_rwlock); 100 | #endif 101 | #if defined(ENABLE_APPLE_NATIVE_MUTEX) 102 | DUMP(yamc::apple::unfair_lock); 103 | #endif 104 | return 0; 105 | } 106 | -------------------------------------------------------------------------------- /include/naive_spin_mutex.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * naive_spin_mutex.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_NAIVE_SPIN_MUTEX_HPP_ 27 | #define YAMC_NAIVE_SPIN_MUTEX_HPP_ 28 | 29 | #include 30 | #include "yamc_backoff_spin.hpp" 31 | 32 | 33 | namespace yamc { 34 | 35 | /* 36 | * naive Test-And-Swap(TAS) spinlock implementation (with memory_order_seq_cst) 37 | * 38 | * - yamc::spin::mutex 39 | * - yamc::spin::basic_mutex 40 | */ 41 | namespace spin { 42 | 43 | template 44 | class basic_mutex { 45 | std::atomic state_{0}; 46 | 47 | public: 48 | basic_mutex() = default; 49 | ~basic_mutex() = default; 50 | 51 | basic_mutex(const basic_mutex&) = delete; 52 | basic_mutex& operator=(const basic_mutex&) = delete; 53 | 54 | void lock() 55 | { 56 | typename BackoffPolicy::state state; 57 | int expected = 0; 58 | while (!state_.compare_exchange_weak(expected, 1)) { 59 | BackoffPolicy::wait(state); 60 | expected = 0; 61 | } 62 | } 63 | 64 | bool try_lock() 65 | { 66 | int expected = 0; 67 | return state_.compare_exchange_weak(expected, 1); 68 | } 69 | 70 | void unlock() 71 | { 72 | state_.store(0); 73 | } 74 | }; 75 | 76 | using mutex = basic_mutex; 77 | 78 | } // namespace spin 79 | 80 | 81 | /* 82 | * naive Test-And-Swap(TAS) spinlock implementation for weak hardware memory model 83 | * 84 | * - yamc::spin_weak::mutex 85 | * - yamc::spin_weak::basic_mutex 86 | */ 87 | namespace spin_weak { 88 | 89 | template 90 | class basic_mutex { 91 | std::atomic state_{0}; 92 | 93 | public: 94 | basic_mutex() = default; 95 | ~basic_mutex() = default; 96 | 97 | basic_mutex(const basic_mutex&) = delete; 98 | basic_mutex& operator=(const basic_mutex&) = delete; 99 | 100 | void lock() 101 | { 102 | typename BackoffPolicy::state state; 103 | int expected = 0; 104 | while (!state_.compare_exchange_weak(expected, 1, std::memory_order_acquire, std::memory_order_relaxed)) { 105 | BackoffPolicy::wait(state); 106 | expected = 0; 107 | } 108 | } 109 | 110 | bool try_lock() 111 | { 112 | int expected = 0; 113 | return state_.compare_exchange_weak(expected, 1, std::memory_order_acquire, std::memory_order_relaxed); 114 | } 115 | 116 | void unlock() 117 | { 118 | state_.store(0, std::memory_order_release); 119 | } 120 | }; 121 | 122 | using mutex = basic_mutex; 123 | 124 | } // namespace spin_weak 125 | 126 | } // namespace yamc 127 | 128 | #endif 129 | -------------------------------------------------------------------------------- /include/yamc_semaphore.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * yamc_semaphore.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2019 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_SEMAPHORE_HPP_ 27 | #define YAMC_SEMAPHORE_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | 36 | 37 | /// default least_max_value of yamc::counting_semaphore 38 | #ifndef YAMC_SEMAPHORE_LEAST_MAX_VALUE 39 | #define YAMC_SEMAPHORE_LEAST_MAX_VALUE ((std::numeric_limits::max)()) 40 | #endif 41 | 42 | 43 | /* 44 | * Semaphores in C++20 Standard Library 45 | * 46 | * - yamc::counting_semaphore 47 | * - yamc::binary_semaphore 48 | */ 49 | namespace yamc { 50 | 51 | template 52 | class counting_semaphore { 53 | std::ptrdiff_t counter_; 54 | std::condition_variable cv_; 55 | std::mutex mtx_; 56 | 57 | template 58 | bool do_try_acquirewait(const std::chrono::time_point& tp) 59 | { 60 | std::unique_lock lk(mtx_); 61 | while (counter_ <= 0) { 62 | if (cv_.wait_until(lk, tp) == std::cv_status::timeout) { 63 | if (0 < counter_) // re-check predicate 64 | break; 65 | return false; 66 | } 67 | } 68 | --counter_; 69 | return true; 70 | } 71 | 72 | public: 73 | static constexpr std::ptrdiff_t (max)() noexcept 74 | { 75 | static_assert(0 <= least_max_value, "least_max_value shall be non-negative"); 76 | return least_max_value; 77 | } 78 | 79 | /*constexpr*/ explicit counting_semaphore(std::ptrdiff_t desired) 80 | : counter_(desired) 81 | { 82 | assert(0 <= desired && desired <= (max)()); 83 | // counting_semaphore constructor throws nothing. 84 | } 85 | 86 | ~counting_semaphore() = default; 87 | 88 | counting_semaphore(const counting_semaphore&) = delete; 89 | counting_semaphore& operator=(const counting_semaphore&) = delete; 90 | 91 | void release(std::ptrdiff_t update = 1) 92 | { 93 | std::lock_guard lk(mtx_); 94 | assert(0 <= update && update <= (max)() - counter_); 95 | counter_ += update; 96 | if (0 < counter_) { 97 | cv_.notify_all(); 98 | } 99 | } 100 | 101 | void acquire() 102 | { 103 | std::unique_lock lk(mtx_); 104 | while (counter_ <= 0) { 105 | cv_.wait(lk); 106 | } 107 | --counter_; 108 | } 109 | 110 | bool try_acquire() noexcept 111 | { 112 | std::unique_lock lk(mtx_); 113 | if (counter_ <= 0) { 114 | // no spurious failure 115 | return false; 116 | } 117 | --counter_; 118 | return true; 119 | } 120 | 121 | template 122 | bool try_acquire_for(const std::chrono::duration& rel_time) 123 | { 124 | const auto tp = std::chrono::steady_clock::now() + rel_time; 125 | return do_try_acquirewait(tp); 126 | } 127 | 128 | template 129 | bool try_acquire_until(const std::chrono::time_point& abs_time) 130 | { 131 | return do_try_acquirewait(abs_time); 132 | } 133 | }; 134 | 135 | using binary_semaphore = counting_semaphore<1>; 136 | 137 | 138 | } // namespace yamc 139 | 140 | #endif 141 | -------------------------------------------------------------------------------- /include/yamc_barrier.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * yamc_barrier.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2019 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_BARRIER_HPP_ 27 | #define YAMC_BARRIER_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | 36 | /* 37 | * Barriers in C++20 Standard Library 38 | * 39 | * - yamc::barrier 40 | */ 41 | namespace yamc { 42 | 43 | namespace detail { 44 | 45 | /// default CompletionFunction of yamc::barrier class template 46 | class default_barrier_completion { 47 | public: 48 | void operator()() const { 49 | // no effect 50 | } 51 | }; 52 | 53 | struct barrier_arrival_token { 54 | unsigned phase_; 55 | }; 56 | 57 | } // namespace detail 58 | 59 | 60 | template 61 | class barrier { 62 | std::ptrdiff_t init_count_; 63 | std::ptrdiff_t counter_; 64 | unsigned phase_ = 0; 65 | CompletionFunction completion_; 66 | mutable std::condition_variable cv_; 67 | mutable std::mutex mtx_; 68 | 69 | void phase_completion_step(bool drop) 70 | { 71 | completion_(); 72 | if (drop) { 73 | --init_count_; 74 | } 75 | counter_ = init_count_; 76 | ++phase_; 77 | cv_.notify_all(); 78 | } 79 | 80 | public: 81 | using arrival_token = detail::barrier_arrival_token; 82 | 83 | static constexpr ptrdiff_t (max)() noexcept 84 | { 85 | return (std::numeric_limits::max)(); 86 | } 87 | 88 | /*constexpr*/ explicit barrier(std::ptrdiff_t expected, CompletionFunction f = CompletionFunction()) 89 | : init_count_(expected) 90 | , counter_(expected) 91 | , completion_(std::move(f)) 92 | { 93 | assert(0 <= expected && expected < (max())); 94 | } 95 | 96 | ~barrier() = default; 97 | 98 | barrier(const barrier&) = delete; 99 | barrier& operator=(const barrier&) = delete; 100 | 101 | #if 201703L <= __cplusplus 102 | [[nodiscard]] 103 | #endif 104 | arrival_token arrive(std::ptrdiff_t update = 1) 105 | { 106 | std::lock_guard lk(mtx_); 107 | assert(0 < update && update <= counter_); 108 | arrival_token token{phase_}; 109 | counter_ -= update; 110 | if (counter_ == 0) { 111 | phase_completion_step(false); 112 | } 113 | return token; 114 | } 115 | 116 | void wait(arrival_token&& arrival) const 117 | { 118 | std::unique_lock lk(mtx_); 119 | while (phase_ <= arrival.phase_) { 120 | cv_.wait(lk); 121 | } 122 | } 123 | 124 | void arrive_and_wait() 125 | { 126 | // equivalent to wait(arrive()) 127 | std::unique_lock lk(mtx_); 128 | assert(0 < counter_); 129 | auto arrival_phase = phase_; 130 | counter_ -= 1; 131 | if (counter_ == 0) { 132 | phase_completion_step(false); 133 | } 134 | while (phase_ <= arrival_phase) { 135 | cv_.wait(lk); 136 | } 137 | } 138 | 139 | void arrive_and_drop() 140 | { 141 | std::unique_lock lk(mtx_); 142 | assert(0 < counter_); 143 | auto arrival_phase = phase_; 144 | counter_ -= 1; 145 | if (counter_ == 0) { 146 | phase_completion_step(true); 147 | } 148 | while (phase_ <= arrival_phase) { 149 | cv_.wait(lk); 150 | } 151 | } 152 | }; 153 | 154 | } // namespace yamc 155 | 156 | #endif 157 | -------------------------------------------------------------------------------- /tests/spinlock_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * spinlock_test.cpp 3 | */ 4 | #include 5 | #include "gtest/gtest.h" 6 | #include "naive_spin_mutex.hpp" 7 | #include "ttas_spin_mutex.hpp" 8 | #include "yamc_testutil.hpp" 9 | #if defined(__linux__) || defined(__APPLE__) 10 | #include "posix_native_mutex.hpp" 11 | #define ENABLE_POSIX_NATIVE_MUTEX 12 | #endif 13 | 14 | 15 | #define TEST_THREADS 20 16 | #define TEST_ITERATION 100000u 17 | 18 | 19 | using SpinMutexTypes = ::testing::Types< 20 | yamc::spin::basic_mutex>, 21 | yamc::spin_weak::basic_mutex>, 22 | yamc::spin_ttas::basic_mutex>, 23 | yamc::spin::basic_mutex, 24 | yamc::spin_weak::basic_mutex, 25 | yamc::spin_ttas::basic_mutex, 26 | yamc::spin::basic_mutex, 27 | yamc::spin_weak::basic_mutex, 28 | yamc::spin_ttas::basic_mutex 29 | #if defined(ENABLE_POSIX_NATIVE_MUTEX) && YAMC_POSIX_SPINLOCK_SUPPORTED 30 | , yamc::posix::spinlock 31 | #endif 32 | >; 33 | 34 | template 35 | struct SpinMutexTest : ::testing::Test {}; 36 | 37 | TYPED_TEST_SUITE(SpinMutexTest, SpinMutexTypes); 38 | 39 | // mutex::lock() 40 | TYPED_TEST(SpinMutexTest, BasicLock) 41 | { 42 | TypeParam mtx; 43 | std::size_t counter = 0; 44 | yamc::test::task_runner( 45 | TEST_THREADS, 46 | [&](std::size_t /*id*/) { 47 | for (std::size_t n = 0; n < TEST_ITERATION; ++n) { 48 | std::lock_guard lk(mtx); 49 | counter = counter + 1; 50 | } 51 | }); 52 | EXPECT_EQ(TEST_ITERATION * TEST_THREADS, counter); 53 | } 54 | 55 | // mutex::try_lock() 56 | TYPED_TEST(SpinMutexTest, TryLock) 57 | { 58 | TypeParam mtx; 59 | std::size_t counter = 0; 60 | yamc::test::task_runner( 61 | TEST_THREADS, 62 | [&](std::size_t /*id*/) { 63 | for (std::size_t n = 0; n < TEST_ITERATION; ++n) { 64 | while (!mtx.try_lock()) { 65 | std::this_thread::yield(); 66 | } 67 | std::lock_guard lk(mtx, std::adopt_lock); 68 | counter = counter + 1; 69 | } 70 | }); 71 | EXPECT_EQ(TEST_ITERATION * TEST_THREADS, counter); 72 | } 73 | 74 | // mutex::try_lock() failure 75 | TYPED_TEST(SpinMutexTest, TryLockFail) 76 | { 77 | yamc::test::barrier step(2); 78 | TypeParam mtx; 79 | yamc::test::join_thread thd([&]{ 80 | EXPECT_NO_THROW(mtx.lock()); 81 | step.await(); // b1 82 | step.await(); // b2 83 | EXPECT_NO_THROW(mtx.unlock()); 84 | }); 85 | { 86 | step.await(); // b1 87 | EXPECT_FALSE(mtx.try_lock()); 88 | step.await(); // b2 89 | } 90 | } 91 | 92 | 93 | // lockfree property of atomic 94 | TEST(AtomicTest, LockfreeInt) 95 | { 96 | // std::atomic type is always lock-free 97 | EXPECT_EQ(2, ATOMIC_INT_LOCK_FREE); 98 | } 99 | 100 | // YAMC_BACKOFF_* macros 101 | TEST(BackoffTest, Macro) 102 | { 103 | bool yamc_backoff_spin_default = std::is_same>::value; 104 | EXPECT_TRUE(yamc_backoff_spin_default); 105 | EXPECT_EQ(4000, YAMC_BACKOFF_EXPONENTIAL_INITCOUNT); 106 | } 107 | 108 | // backoff::exponential<100> 109 | TEST(BackoffTest, Exponential100) 110 | { 111 | using BackoffPolicy = yamc::backoff::exponential<100>; 112 | BackoffPolicy::state state; 113 | EXPECT_EQ(100u, state.initcount); 114 | EXPECT_EQ(100u, state.counter); 115 | for (int i = 0; i < 100; ++i) { 116 | BackoffPolicy::wait(state); // wait 100 117 | } 118 | EXPECT_EQ(0u, state.counter); 119 | for (int i = 0; i < 2000; ++i) { 120 | BackoffPolicy::wait(state); 121 | } 122 | EXPECT_EQ(1u, state.initcount); 123 | EXPECT_EQ(0u, state.counter); 124 | BackoffPolicy::wait(state); 125 | EXPECT_EQ(1u, state.initcount); 126 | EXPECT_EQ(0u, state.counter); 127 | } 128 | 129 | // backoff::exponential<1> 130 | TEST(BackoffTest, Exponential1) 131 | { 132 | using BackoffPolicy = yamc::backoff::exponential<1>; 133 | BackoffPolicy::state state; 134 | EXPECT_EQ(1u, state.initcount); 135 | EXPECT_EQ(1u, state.counter); 136 | BackoffPolicy::wait(state); 137 | EXPECT_EQ(1u, state.initcount); 138 | EXPECT_EQ(0u, state.counter); 139 | } 140 | 141 | // backoff::yield 142 | TEST(BackoffTest, Yield) 143 | { 144 | using BackoffPolicy = yamc::backoff::yield; 145 | // NOTE: backoff::yield class has no observable behavior nor state 146 | BackoffPolicy::state state; 147 | BackoffPolicy::wait(state); 148 | } 149 | 150 | // backoff::busy 151 | TEST(BackoffTest, Busy) 152 | { 153 | using BackoffPolicy = yamc::backoff::busy; 154 | // NOTE: backoff::busy class has no observable behavior nor state 155 | BackoffPolicy::state state; 156 | BackoffPolicy::wait(state); 157 | } 158 | -------------------------------------------------------------------------------- /include/yamc_rwlock_sched.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * yamc_rwlock_sched.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_RWLOCK_SCHED_HPP_ 27 | #define YAMC_RWLOCK_SCHED_HPP_ 28 | 29 | #include 30 | #include 31 | 32 | 33 | /// default shared_mutex rwlock policy 34 | #ifndef YAMC_RWLOCK_SCHED_DEFAULT 35 | #define YAMC_RWLOCK_SCHED_DEFAULT yamc::rwlock::ReaderPrefer 36 | #endif 37 | 38 | 39 | namespace yamc { 40 | 41 | /* 42 | * readers-writer locking policy for basic_shared_(timed)_mutex 43 | * 44 | * - yamc::rwlock::ReaderPrefer 45 | * - yamc::rwlock::WriterPrefer 46 | */ 47 | namespace rwlock { 48 | 49 | /// Reader prefer scheduling 50 | /// 51 | /// NOTE: 52 | // This policy might introduce "Writer Starvation" if readers continuously hold shared lock. 53 | // PThreads rwlock implementation in Linux use this scheduling policy as default. 54 | // (see also PTHREAD_RWLOCK_PREFER_READER_NP) 55 | // 56 | struct ReaderPrefer { 57 | static const std::size_t writer_mask = ~(~std::size_t(0u) >> 1); // MSB 1bit 58 | static const std::size_t reader_mask = ~std::size_t(0u) >> 1; 59 | 60 | struct state { 61 | std::size_t rwcount = 0; 62 | }; 63 | 64 | static void before_wait_wlock(state&) {} 65 | static void after_wait_wlock(state&) {} 66 | 67 | static bool wait_wlock(state& s) 68 | { 69 | return (s.rwcount != 0); 70 | } 71 | 72 | static void acquire_wlock(state& s) 73 | { 74 | assert(!(s.rwcount & writer_mask)); 75 | s.rwcount |= writer_mask; 76 | } 77 | 78 | static void release_wlock(state& s) 79 | { 80 | assert(s.rwcount & writer_mask); 81 | s.rwcount &= ~writer_mask; 82 | } 83 | 84 | static bool wait_rlock(state& s) 85 | { 86 | return (s.rwcount & writer_mask) != 0; 87 | } 88 | 89 | static void acquire_rlock(state& s) 90 | { 91 | assert((s.rwcount & reader_mask) < reader_mask); 92 | ++s.rwcount; 93 | } 94 | 95 | static bool release_rlock(state& s) 96 | { 97 | assert(0 < (s.rwcount & reader_mask)); 98 | return (--s.rwcount == 0); 99 | } 100 | }; 101 | 102 | 103 | /// Writer prefer scheduling 104 | /// 105 | /// NOTE: 106 | /// If there are waiting writer, new readers are blocked until all shared lock are released, 107 | // and the writer thread can get exclusive lock in preference to blocked reader threads. 108 | // This policy might introduce "Reader Starvation" if writers continuously request exclusive lock. 109 | /// (see also PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) 110 | /// 111 | struct WriterPrefer { 112 | static const std::size_t locked = ~(~std::size_t(0u) >> 1); // MSB 1bit 113 | static const std::size_t wait_mask = ~std::size_t(0u) >> 1; 114 | 115 | struct state { 116 | std::size_t nwriter = 0; 117 | std::size_t nreader = 0; 118 | }; 119 | 120 | static void before_wait_wlock(state& s) 121 | { 122 | assert((s.nwriter & wait_mask) < wait_mask); 123 | ++s.nwriter; 124 | } 125 | 126 | static bool wait_wlock(state& s) 127 | { 128 | return ((s.nwriter & locked) || 0 < s.nreader); 129 | } 130 | 131 | static void after_wait_wlock(state& s) 132 | { 133 | assert(0 < (s.nwriter & wait_mask)); 134 | --s.nwriter; 135 | } 136 | 137 | static void acquire_wlock(state& s) 138 | { 139 | assert(!(s.nwriter & locked)); 140 | s.nwriter |= locked; 141 | } 142 | 143 | static void release_wlock(state& s) 144 | { 145 | assert(s.nwriter & locked); 146 | s.nwriter &= ~locked; 147 | } 148 | 149 | static bool wait_rlock(state& s) 150 | { 151 | return (s.nwriter != 0); 152 | } 153 | 154 | static void acquire_rlock(state& s) 155 | { 156 | assert(!(s.nwriter & locked)); 157 | ++s.nreader; 158 | } 159 | 160 | static bool release_rlock(state& s) 161 | { 162 | assert(0 < s.nreader); 163 | return (--s.nreader == 0); 164 | } 165 | }; 166 | 167 | } // namespace rwlock 168 | } // namespace yamc 169 | 170 | #endif 171 | -------------------------------------------------------------------------------- /include/posix_semaphore.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * posix_semaphore.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2019 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef POSIX_SEMAPHORE_HPP_ 27 | #define POSIX_SEMAPHORE_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | // POSIX semaphore 35 | #include // SEM_VALUE_MAX 36 | #include 37 | #include // timespec struct 38 | 39 | 40 | namespace yamc { 41 | 42 | /* 43 | * Semaphores in C++20 Standard Library for POSIX-compatible platform 44 | * 45 | * - yamc::posix::counting_semaphore 46 | * - yamc::posix::binary_semaphore 47 | * 48 | * This implementation use POSIX unnamed semaphore. 49 | * https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/semaphore.h.html 50 | */ 51 | namespace posix { 52 | 53 | namespace detail { 54 | 55 | template 56 | inline 57 | std::chrono::time_point 58 | to_system_timepoint(const std::chrono::time_point& tp) 59 | { 60 | return std::chrono::system_clock::now() + (tp - Clock::now()); 61 | } 62 | 63 | template 64 | inline 65 | std::chrono::time_point 66 | to_system_timepoint(const std::chrono::time_point& tp) 67 | { 68 | return tp; 69 | } 70 | 71 | } // namespace detail 72 | 73 | 74 | template 75 | class counting_semaphore { 76 | ::sem_t sem_; 77 | 78 | static void throw_errno(const char* what_arg) 79 | { 80 | throw std::system_error(std::error_code(errno, std::generic_category()), what_arg); 81 | } 82 | 83 | bool do_try_acquirewait(const std::chrono::system_clock::time_point& tp) 84 | { 85 | using namespace std::chrono; 86 | // convert C++ system_clock to POSIX struct timespec 87 | struct ::timespec abs_timeout; 88 | abs_timeout.tv_sec = system_clock::to_time_t(tp); 89 | abs_timeout.tv_nsec = (long)(duration_cast(tp.time_since_epoch()).count() % 1000000000); 90 | 91 | errno = 0; 92 | if (::sem_timedwait(&sem_, &abs_timeout) == 0) { 93 | return true; 94 | } else if (errno != ETIMEDOUT) { 95 | throw_errno("sem_timedwait"); 96 | } 97 | return false; 98 | } 99 | 100 | public: 101 | static constexpr std::ptrdiff_t max() noexcept 102 | { 103 | static_assert(0 <= least_max_value, "least_max_value shall be non-negative"); 104 | static_assert(least_max_value <= SEM_VALUE_MAX, "least_max_value shall be less than or equal to SEM_VALUE_MAX"); 105 | return least_max_value; 106 | } 107 | 108 | /*constexpr*/ explicit counting_semaphore(std::ptrdiff_t desired) 109 | { 110 | assert(0 <= desired && desired <= max()); 111 | ::sem_init(&sem_, 0, (unsigned int)desired); 112 | // counting_semaphore constructor throws nothing. 113 | } 114 | 115 | ~counting_semaphore() 116 | { 117 | ::sem_destroy(&sem_); 118 | } 119 | 120 | counting_semaphore(const counting_semaphore&) = delete; 121 | counting_semaphore& operator=(const counting_semaphore&) = delete; 122 | 123 | void release(std::ptrdiff_t update = 1) 124 | { 125 | errno = 0; 126 | while (0 < update--) { 127 | if (::sem_post(&sem_) != 0) { 128 | throw_errno("sem_post"); 129 | } 130 | } 131 | } 132 | 133 | void acquire() 134 | { 135 | errno = 0; 136 | if (::sem_wait(&sem_) != 0) { 137 | throw_errno("sem_wait"); 138 | } 139 | } 140 | 141 | bool try_acquire() noexcept 142 | { 143 | return (::sem_trywait(&sem_) == 0); 144 | } 145 | 146 | template 147 | bool try_acquire_for(const std::chrono::duration& rel_time) 148 | { 149 | // C++ Standard says '_for'-suffixed timeout function shall use steady clock, 150 | // but we use system_clock to convert from time_point to legacy time_t. 151 | const auto tp = std::chrono::system_clock::now() + rel_time; 152 | return do_try_acquirewait(tp); 153 | } 154 | 155 | template 156 | bool try_acquire_until(const std::chrono::time_point& abs_time) 157 | { 158 | return do_try_acquirewait(detail::to_system_timepoint(abs_time)); 159 | } 160 | }; 161 | 162 | using binary_semaphore = counting_semaphore<1>; 163 | 164 | } // namespace posix 165 | } // namespace yamc 166 | 167 | #endif 168 | -------------------------------------------------------------------------------- /include/yamc_shared_lock.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * yamc_shared_lock.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_SHARED_LOCK_HPP_ 27 | #define YAMC_SHARED_LOCK_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include // std::swap 34 | 35 | 36 | /* 37 | * std::shared_lock in C++14 Standard Library 38 | * 39 | * - yamc::shared_lock 40 | */ 41 | namespace yamc { 42 | 43 | template 44 | class shared_lock { 45 | void locking_precondition(const char* emsg) 46 | { 47 | if (pm_ == nullptr) { 48 | throw std::system_error(std::make_error_code(std::errc::operation_not_permitted), emsg); 49 | } 50 | if (owns_) { 51 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), emsg); 52 | } 53 | } 54 | 55 | public: 56 | using mutex_type = Mutex; 57 | 58 | shared_lock() noexcept = default; 59 | 60 | explicit shared_lock(mutex_type& m) 61 | { 62 | m.lock_shared(); 63 | pm_ = &m; 64 | owns_ = true; 65 | } 66 | 67 | shared_lock(mutex_type& m, std::defer_lock_t) noexcept 68 | { 69 | pm_ = &m; 70 | owns_ = false; 71 | } 72 | 73 | shared_lock(mutex_type& m, std::try_to_lock_t) 74 | { 75 | pm_ = &m; 76 | owns_ = m.try_lock_shared(); 77 | } 78 | 79 | shared_lock(mutex_type& m, std::adopt_lock_t) 80 | { 81 | pm_ = &m; 82 | owns_ = true; 83 | } 84 | 85 | template 86 | shared_lock(mutex_type& m, const std::chrono::time_point& abs_time) 87 | { 88 | pm_ = &m; 89 | owns_ = m.try_lock_shared_until(abs_time); 90 | } 91 | 92 | template 93 | shared_lock(mutex_type& m, const std::chrono::duration& rel_time) 94 | { 95 | pm_ = &m; 96 | owns_ = m.try_lock_shared_for(rel_time); 97 | } 98 | 99 | ~shared_lock() 100 | { 101 | if (owns_) { 102 | assert(pm_ != nullptr); 103 | pm_->unlock_shared(); 104 | } 105 | } 106 | 107 | shared_lock(const shared_lock&) = delete; 108 | shared_lock& operator=(const shared_lock&) = delete; 109 | 110 | shared_lock(shared_lock&& rhs) noexcept 111 | { 112 | if (pm_ && owns_) { 113 | pm_->unlock_shared(); 114 | } 115 | pm_ = rhs.pm_; 116 | owns_ = rhs.owns_; 117 | rhs.pm_ = nullptr; 118 | rhs.owns_ = false; 119 | } 120 | 121 | shared_lock& operator=(shared_lock&& rhs) noexcept 122 | { 123 | if (pm_ && owns_) { 124 | pm_->unlock_shared(); 125 | } 126 | pm_ = rhs.pm_; 127 | owns_ = rhs.owns_; 128 | rhs.pm_ = nullptr; 129 | rhs.owns_ = false; 130 | return *this; 131 | } 132 | 133 | void lock() 134 | { 135 | locking_precondition("shared_lock::lock"); 136 | pm_->lock_shared(); 137 | owns_ = true; 138 | } 139 | 140 | bool try_lock() 141 | { 142 | locking_precondition("shared_lock::try_lock"); 143 | return (owns_ = pm_->try_lock_shared()); 144 | } 145 | 146 | template 147 | bool try_lock_for(const std::chrono::duration& rel_time) 148 | { 149 | locking_precondition("shared_lock::try_lock_for"); 150 | return (owns_ = pm_->try_lock_shared_for(rel_time)); 151 | } 152 | 153 | template 154 | bool try_lock_until(const std::chrono::time_point& abs_time) 155 | { 156 | locking_precondition("shared_lock::try_lock_until"); 157 | return (owns_ = pm_->try_lock_shared_until(abs_time)); 158 | } 159 | 160 | void unlock() 161 | { 162 | assert(pm_ != nullptr); 163 | if (!owns_) { 164 | throw std::system_error(std::make_error_code(std::errc::operation_not_permitted), "shared_lock::unlock"); 165 | } 166 | pm_->unlock_shared(); 167 | owns_ = false; 168 | } 169 | 170 | void swap(shared_lock& sl) noexcept 171 | { 172 | std::swap(pm_, sl.pm_); 173 | std::swap(owns_, sl.owns_); 174 | } 175 | 176 | mutex_type* release() noexcept 177 | { 178 | mutex_type* result = pm_; 179 | pm_ = nullptr; 180 | owns_ = false; 181 | return result; 182 | } 183 | 184 | bool owns_lock() const noexcept 185 | { return owns_; } 186 | 187 | explicit operator bool() const noexcept 188 | { return owns_; } 189 | 190 | mutex_type* mutex() const noexcept 191 | { return pm_; } 192 | 193 | private: 194 | mutex_type* pm_ = nullptr; 195 | bool owns_ = false; 196 | }; 197 | 198 | } // namespace yamc 199 | 200 | 201 | namespace std { 202 | 203 | /// std::swap() specialization for yamc::shared_lock type 204 | template 205 | void swap(yamc::shared_lock& lhs, yamc::shared_lock& rhs) noexcept 206 | { 207 | lhs.swap(rhs); 208 | } 209 | 210 | } // namespace std 211 | 212 | #endif 213 | -------------------------------------------------------------------------------- /tests/compile_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * compile_test.hpp 3 | */ 4 | #include 5 | #include 6 | #include // std::{move,swap} 7 | #include "naive_spin_mutex.hpp" 8 | #include "ttas_spin_mutex.hpp" 9 | #include "checked_mutex.hpp" 10 | #include "checked_shared_mutex.hpp" 11 | #include "fair_mutex.hpp" 12 | #include "fair_shared_mutex.hpp" 13 | #include "alternate_mutex.hpp" 14 | #include "alternate_shared_mutex.hpp" 15 | #include "yamc_testutil.hpp" 16 | 17 | 18 | template 19 | void test_requirements() 20 | { 21 | Mutex mtx; 22 | // Mutex::lock(), unlock() 23 | { 24 | std::lock_guard lk(mtx); 25 | } 26 | { 27 | std::unique_lock lk(mtx); 28 | } 29 | // Mutex::try_lock() 30 | { 31 | std::unique_lock lk(mtx, std::try_to_lock); 32 | } 33 | } 34 | 35 | 36 | template 37 | void test_requirements_timed() 38 | { 39 | test_requirements(); 40 | TimedMutex mtx; 41 | // TimedMutex::try_lock_for() 42 | if (mtx.try_lock_for(std::chrono::nanoseconds(1))) { 43 | mtx.unlock(); 44 | } 45 | if (mtx.try_lock_for(std::chrono::seconds(1))) { 46 | mtx.unlock(); 47 | } 48 | if (mtx.try_lock_for(std::chrono::hours(1))) { // shall immediately return 'true'... 49 | mtx.unlock(); 50 | } 51 | // TimedMutex::try_lock_until() 52 | if (mtx.try_lock_until(std::chrono::system_clock::now())) { 53 | mtx.unlock(); 54 | } 55 | if (mtx.try_lock_until(std::chrono::steady_clock::now())) { 56 | mtx.unlock(); 57 | } 58 | if (mtx.try_lock_until(std::chrono::high_resolution_clock::now())) { 59 | mtx.unlock(); 60 | } 61 | } 62 | 63 | 64 | template 65 | void test_requirements_shared() 66 | { 67 | test_requirements(); 68 | SharedMutex mtx; 69 | // SharedMutex::lock_shared(), unlock_shared() 70 | mtx.lock_shared(); 71 | mtx.unlock_shared(); 72 | // SharedMutex::try_lock_shared() 73 | if (mtx.try_lock_shared()) { 74 | mtx.unlock_shared(); 75 | } 76 | } 77 | 78 | 79 | template 80 | void test_requirements_shared_timed() 81 | { 82 | test_requirements_shared(); 83 | test_requirements_timed(); 84 | SharedTimedMutex mtx; 85 | // SharedTimedMutex::try_lock_shared_for() 86 | if (mtx.try_lock_shared_for(std::chrono::nanoseconds(1))) { 87 | mtx.unlock_shared(); 88 | } 89 | if (mtx.try_lock_shared_for(std::chrono::seconds(1))) { 90 | mtx.unlock_shared(); 91 | } 92 | if (mtx.try_lock_shared_for(std::chrono::hours(1))) { // shall immediately return 'true'... 93 | mtx.unlock_shared(); 94 | } 95 | // SharedTimedMutex::try_lock_shared_until() 96 | if (mtx.try_lock_shared_until(std::chrono::system_clock::now())) { 97 | mtx.unlock_shared(); 98 | } 99 | if (mtx.try_lock_shared_until(std::chrono::steady_clock::now())) { 100 | mtx.unlock_shared(); 101 | } 102 | if (mtx.try_lock_shared_until(std::chrono::high_resolution_clock::now())) { 103 | mtx.unlock_shared(); 104 | } 105 | } 106 | 107 | 108 | int main() 109 | { 110 | test_requirements(); 111 | test_requirements(); 112 | test_requirements(); 113 | // spinlock mutex with yamc::backoff::* policy 114 | test_requirements>>(); 115 | test_requirements>>(); 116 | test_requirements>>(); 117 | test_requirements>(); 118 | test_requirements>(); 119 | test_requirements>(); 120 | test_requirements>(); 121 | test_requirements>(); 122 | test_requirements>(); 123 | 124 | test_requirements(); 125 | test_requirements(); 126 | test_requirements_timed(); 127 | test_requirements_timed(); 128 | test_requirements_shared(); 129 | test_requirements_shared_timed(); 130 | 131 | test_requirements(); 132 | test_requirements(); 133 | test_requirements_timed(); 134 | test_requirements_timed(); 135 | 136 | test_requirements_shared(); 137 | test_requirements_shared_timed(); 138 | // shared_(timed_)muetx with yamc::rwlock::* policy 139 | test_requirements_shared>(); 140 | test_requirements_shared>(); 141 | test_requirements_shared_timed>(); 142 | test_requirements_shared_timed>(); 143 | 144 | test_requirements(); 145 | test_requirements(); 146 | test_requirements_timed(); 147 | test_requirements_timed(); 148 | 149 | test_requirements_shared(); 150 | test_requirements_shared_timed(); 151 | // shared_(timed_)mutex with yamc::rwlock::* policy 152 | test_requirements_shared>(); 153 | test_requirements_shared>(); 154 | test_requirements_shared_timed>(); 155 | test_requirements_shared_timed>(); 156 | return 0; 157 | } 158 | -------------------------------------------------------------------------------- /include/yamc_lock_validator.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * yamc_lock_validator.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_LOCK_VALIDATOR_HPP_ 27 | #define YAMC_LOCK_VALIDATOR_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | 39 | #ifndef YAMC_CHECK_VERBOSE 40 | #define YAMC_CHECK_VERBOSE 0 41 | #endif 42 | 43 | namespace yamc { 44 | 45 | /* 46 | * lock validator for checked mutex 47 | * 48 | * - yamc::validator::deadlock 49 | * - yamc::validator::null 50 | */ 51 | namespace validator { 52 | 53 | class deadlock { 54 | private: 55 | struct entry { 56 | std::size_t mid; 57 | std::vector owners; 58 | std::vector waiters; 59 | }; 60 | using mutexmap_type = std::unordered_map; 61 | 62 | struct table_ref { 63 | std::unique_lock lk; 64 | mutexmap_type& mutexmap; 65 | std::size_t& counter; 66 | }; 67 | 68 | static table_ref global_table() 69 | { 70 | static std::mutex global_guard; 71 | static mutexmap_type global_mutexmap; 72 | static std::size_t global_counter = 0; 73 | return { std::unique_lock(global_guard), global_mutexmap, global_counter }; 74 | } 75 | 76 | template 77 | static void remove_elem(std::vector& vec, T value) 78 | { 79 | vec.erase(std::remove(vec.begin(), vec.end(), value), vec.end()); 80 | } 81 | 82 | static bool find_closepath(const mutexmap_type& mutexmap, uintptr_t end_mkey, std::thread::id tid) 83 | { 84 | for (const auto& e : mutexmap) { 85 | if (std::find(e.second.owners.begin(), e.second.owners.end(), tid) == e.second.owners.end()) 86 | continue; 87 | if (e.first == end_mkey) 88 | return true; // found close path 89 | for (const auto& next_tid : e.second.waiters) { 90 | if (find_closepath(mutexmap, end_mkey, next_tid)) { 91 | return true; 92 | } 93 | } 94 | } 95 | return false; 96 | } 97 | 98 | static void dump_mutexmap(std::ostream& os, const mutexmap_type& mutexmap) 99 | { 100 | for (const auto& e : mutexmap) { 101 | os << " Mutex#" << e.second.mid << ": owners={"; 102 | int i = 0; 103 | for (const auto& id : e.second.owners) { 104 | os << (i++ ? "," : "") << id; 105 | } 106 | os << "} waiters={"; 107 | i = 0; 108 | for (const auto& id : e.second.waiters) { 109 | os << (i++ ? "," : "") << id; 110 | } 111 | os << "}\n"; 112 | } 113 | os << std::endl; 114 | } 115 | 116 | public: 117 | static void ctor(uintptr_t mkey) 118 | { 119 | auto&& table = global_table(); 120 | table.mutexmap[mkey] = { ++table.counter, {}, {} }; 121 | } 122 | 123 | static void dtor(uintptr_t mkey) 124 | { 125 | auto&& table = global_table(); 126 | table.mutexmap.erase(mkey); 127 | } 128 | 129 | static void locked(uintptr_t mkey, std::thread::id tid, bool shared) 130 | { 131 | auto&& table = global_table(); 132 | table.mutexmap[mkey].owners.push_back(tid); 133 | #if YAMC_CHECK_VERBOSE 134 | std::cout << "Thread#" << tid << " acquired Mutex#" << table.mutexmap[mkey].mid 135 | << " " << (shared ? "shared-lock" : "lock") << '\n'; 136 | dump_mutexmap(std::cout, table.mutexmap); 137 | #else 138 | (void)shared; // suppress "unused variable" warning 139 | #endif 140 | } 141 | 142 | static void unlocked(uintptr_t mkey, std::thread::id tid, bool shared) 143 | { 144 | auto&& table = global_table(); 145 | remove_elem(table.mutexmap[mkey].owners, tid); 146 | #if YAMC_CHECK_VERBOSE 147 | std::cout << "Thread#" << tid << " released Mutex#" << table.mutexmap[mkey].mid 148 | << " " << (shared ? "shared-lock" : "lock") << '\n'; 149 | dump_mutexmap(std::cout, table.mutexmap); 150 | #else 151 | (void)shared; // suppress "unused variable" warning 152 | #endif 153 | } 154 | 155 | static bool enqueue(uintptr_t mkey, std::thread::id tid, bool shared) 156 | { 157 | auto&& table = global_table(); 158 | table.mutexmap[mkey].waiters.push_back(tid); 159 | if (find_closepath(table.mutexmap, mkey, tid)) { 160 | // detect deadlock 161 | std::cout << "Thread#" << tid << " wait for Mutex#" << table.mutexmap[mkey].mid 162 | << " " << (shared ? "shared-lock" : "lock") << '\n'; 163 | dump_mutexmap(std::cout, table.mutexmap); 164 | std::cout << "==== DEADLOCK DETECTED ====" << std::endl; 165 | return false; 166 | } 167 | #if YAMC_CHECK_VERBOSE 168 | std::cout << "Thread#" << tid << " wait for Mutex#" << table.mutexmap[mkey].mid 169 | << " " << (shared ? "shared-lock" : "lock") << '\n'; 170 | dump_mutexmap(std::cout, table.mutexmap); 171 | #endif 172 | return true; 173 | } 174 | 175 | static void dequeue(uintptr_t mkey, std::thread::id tid) 176 | { 177 | auto&& table = global_table(); 178 | remove_elem(table.mutexmap[mkey].waiters, tid); 179 | } 180 | }; 181 | 182 | 183 | class null { 184 | public: 185 | static void ctor(uintptr_t) {} 186 | static void dtor(uintptr_t) {} 187 | static void locked(uintptr_t, std::thread::id, bool) {} 188 | static void unlocked(uintptr_t, std::thread::id, bool) {} 189 | static bool enqueue(uintptr_t, std::thread::id, bool) { return true; } 190 | static void dequeue(uintptr_t, std::thread::id) {} 191 | }; 192 | 193 | 194 | } // namespace validator 195 | } // namespace yamc 196 | 197 | #endif 198 | -------------------------------------------------------------------------------- /include/win_semaphore.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * win_semaphore.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2019 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef WIN_SEMAPHORE_HPP_ 27 | #define WIN_SEMAPHORE_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | // Windows semaphore 34 | #include 35 | 36 | 37 | /// Enable acculate timeout for yamc::win::* primitives 38 | #ifndef YAMC_WIN_ACCURATE_TIMEOUT 39 | #define YAMC_WIN_ACCURATE_TIMEOUT 1 40 | #endif 41 | 42 | 43 | /* 44 | * Semaphores in C++20 Standard Library for Windows platform 45 | * 46 | * - yamc::win::counting_semaphore 47 | * - yamc::win::binary_semaphore 48 | * 49 | * This implementation use native Win32 semaphore. 50 | * https://docs.microsoft.com/windows/win32/sync/semaphore-objects 51 | */ 52 | namespace yamc { 53 | 54 | namespace win { 55 | 56 | template 57 | class counting_semaphore { 58 | ::HANDLE hsem_ = NULL; 59 | 60 | void validate_native_handle(const char* what_arg) 61 | { 62 | if (hsem_ == NULL) { 63 | // [thread.mutex.requirements.mutex] 64 | // invalid_argument - if any native handle type manipulated as part of mutex construction is incorrect. 65 | throw std::system_error(std::make_error_code(std::errc::invalid_argument), what_arg); 66 | } 67 | } 68 | 69 | template 70 | bool do_try_acquirewait(const std::chrono::duration& timeout) 71 | { 72 | using namespace std::chrono; 73 | // round up timeout to milliseconds precision 74 | DWORD timeout_in_msec = static_cast(duration_cast(timeout + nanoseconds{999999}).count()); 75 | DWORD result = ::WaitForSingleObject(hsem_, timeout_in_msec); 76 | #if YAMC_WIN_ACCURATE_TIMEOUT 77 | if (result == WAIT_TIMEOUT && 0 < timeout_in_msec) { 78 | // Win32 wait functions will return early than specified timeout interval by design. 79 | // (https://docs.microsoft.com/windows/win32/sync/wait-functions for more details) 80 | // 81 | // The current thread sleep one more "tick" to guarantee timing specification in C++ Standard, 82 | // that actual timeout interval shall be longer than requested timeout of try_acquire_*(). 83 | ::Sleep(1); 84 | } 85 | #endif 86 | if (result != WAIT_OBJECT_0 && result != WAIT_TIMEOUT) { 87 | // [thread.mutex.requirements.mutex] 88 | // resource_unavailable_try_again - if any native handle type manipulated is not available. 89 | throw std::system_error(std::make_error_code(std::errc::resource_unavailable_try_again), "WaitForSingleObject"); 90 | } 91 | return (result == WAIT_OBJECT_0); 92 | } 93 | 94 | public: 95 | static constexpr std::ptrdiff_t (max)() noexcept 96 | { 97 | // Windows.h header defines max() and min() function-like macros that make trouble, 98 | // we put parenthesis around `max` identifier to prevent unexpected macro expansion. 99 | // https://docs.microsoft.com/windows/win32/multimedia/max 100 | static_assert(0 <= least_max_value, "least_max_value shall be non-negative"); 101 | return least_max_value; 102 | } 103 | 104 | /*constexpr*/ explicit counting_semaphore(std::ptrdiff_t desired) 105 | { 106 | assert(0 <= desired && desired <= (max)()); 107 | hsem_ = ::CreateSemaphore(NULL, (LONG)desired, (LONG)((max)()), NULL); 108 | // counting_semaphore constructor throws nothing. 109 | } 110 | 111 | ~counting_semaphore() 112 | { 113 | if (hsem_ != NULL) { 114 | ::CloseHandle(hsem_); 115 | } 116 | } 117 | 118 | counting_semaphore(const counting_semaphore&) = delete; 119 | counting_semaphore& operator=(const counting_semaphore&) = delete; 120 | 121 | void release(std::ptrdiff_t update = 1) 122 | { 123 | validate_native_handle("counting_semaphore::release"); 124 | BOOL result = ::ReleaseSemaphore(hsem_, (LONG)update, NULL); 125 | if (!result) { 126 | // [thread.mutex.requirements.mutex] 127 | // resource_unavailable_try_again - if any native handle type manipulated is not available. 128 | throw std::system_error(std::make_error_code(std::errc::resource_unavailable_try_again), "ReleaseSemaphore"); 129 | } 130 | } 131 | 132 | void acquire() 133 | { 134 | validate_native_handle("counting_semaphore::acquire"); 135 | DWORD result = ::WaitForSingleObject(hsem_, INFINITE); 136 | if (result != WAIT_OBJECT_0) { 137 | // [thread.mutex.requirements.mutex] 138 | // resource_unavailable_try_again - if any native handle type manipulated is not available. 139 | throw std::system_error(std::make_error_code(std::errc::resource_unavailable_try_again), "WaitForSingleObject"); 140 | } 141 | } 142 | 143 | bool try_acquire() noexcept 144 | { 145 | return (hsem_ != NULL && ::WaitForSingleObject(hsem_, 0) == WAIT_OBJECT_0); 146 | } 147 | 148 | template 149 | bool try_acquire_for(const std::chrono::duration& rel_time) 150 | { 151 | validate_native_handle("counting_semaphore::try_acquire_for"); 152 | return do_try_acquirewait(rel_time); 153 | } 154 | 155 | template 156 | bool try_acquire_until(const std::chrono::time_point& abs_time) 157 | { 158 | validate_native_handle("counting_semaphore::try_acquire_until"); 159 | return do_try_acquirewait(abs_time - Clock::now()); 160 | } 161 | }; 162 | 163 | using binary_semaphore = counting_semaphore<1>; 164 | 165 | } // namespace win 166 | } // namespace yamc 167 | 168 | #endif 169 | -------------------------------------------------------------------------------- /include/gcd_semaphore.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * gcd_semaphore.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2019 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef GCD_SEMAPHORE_HPP_ 27 | #define GCD_SEMAPHORE_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | // Grand Central Dispatch (GCD) 36 | #include 37 | 38 | 39 | namespace yamc { 40 | 41 | /* 42 | * Semaphores in C++20 Standard Library for macOS/iOS families 43 | * 44 | * - yamc::gcd::counting_semaphore 45 | * - yamc::gcd::binary_semaphore 46 | * 47 | * This implementation use dispatch semaphore of GCD runtime. 48 | * https://developer.apple.com/documentation/dispatch/dispatchsemaphore 49 | */ 50 | namespace gcd { 51 | 52 | namespace detail { 53 | 54 | template 55 | inline 56 | int64_t 57 | from_unix_epoch(const std::chrono::time_point& tp) 58 | { 59 | using namespace std::chrono; 60 | auto rel_time = tp - Clock::now(); 61 | return duration_cast((system_clock::now() + rel_time).time_since_epoch()).count(); 62 | } 63 | 64 | template 65 | inline 66 | int64_t 67 | from_unix_epoch(const std::chrono::time_point& tp) 68 | { 69 | // Until C++20, the epoch of std::chrono::system_clock is unspecified, 70 | // but most implementation use UNIX epoch (19700101T000000Z). 71 | using namespace std::chrono; 72 | return duration_cast(tp.time_since_epoch()).count(); 73 | } 74 | 75 | } // namespace detail 76 | 77 | 78 | template ::max()> 79 | class counting_semaphore { 80 | ::dispatch_semaphore_t dsema_ = NULL; 81 | 82 | void validate_native_handle(const char* what_arg) 83 | { 84 | if (dsema_ == NULL) { 85 | // [thread.mutex.requirements.mutex] 86 | // invalid_argument - if any native handle type manipulated as part of mutex construction is incorrect. 87 | throw std::system_error(std::make_error_code(std::errc::invalid_argument), what_arg); 88 | } 89 | } 90 | 91 | public: 92 | static constexpr std::ptrdiff_t max() noexcept 93 | { 94 | static_assert(0 <= least_max_value, "least_max_value shall be non-negative"); 95 | // We assume least_max_value equals to std::numeric_limits::max() here. 96 | // The official document says nothing about the upper limit of initial value. 97 | return least_max_value; 98 | } 99 | 100 | /*constexpr*/ explicit counting_semaphore(std::ptrdiff_t desired) 101 | { 102 | assert(0 <= desired && desired <= max()); 103 | dsema_ = ::dispatch_semaphore_create((long)desired); 104 | // counting_semaphore constructor throws nothing. 105 | } 106 | 107 | ~counting_semaphore() 108 | { 109 | // dispatch_semaphore_create() function is declared with DISPATCH_MALLOC, 110 | // alias of GNU __malloc__ attribute. We need free() when no ObjC runtime. 111 | ::free(dsema_); 112 | } 113 | 114 | counting_semaphore(const counting_semaphore&) = delete; 115 | counting_semaphore& operator=(const counting_semaphore&) = delete; 116 | 117 | void release(std::ptrdiff_t update = 1) 118 | { 119 | validate_native_handle("counting_semaphore::release"); 120 | while (0 < update--) { 121 | ::dispatch_semaphore_signal(dsema_); 122 | // dispatch_semaphore_signal() function return number of threads signaled. 123 | } 124 | } 125 | 126 | void acquire() 127 | { 128 | validate_native_handle("counting_semaphore::acquire"); 129 | long result = ::dispatch_semaphore_wait(dsema_, DISPATCH_TIME_FOREVER); 130 | if (result != KERN_SUCCESS) { 131 | // [thread.mutex.requirements.mutex] 132 | // resource_unavailable_try_again - if any native handle type manipulated is not available. 133 | throw std::system_error(std::make_error_code(std::errc::resource_unavailable_try_again), "dispatch_semaphore_wait"); 134 | } 135 | } 136 | 137 | bool try_acquire() noexcept 138 | { 139 | return (dsema_ != NULL && ::dispatch_semaphore_wait(dsema_, DISPATCH_TIME_NOW) == KERN_SUCCESS); 140 | } 141 | 142 | template 143 | bool try_acquire_for(const std::chrono::duration& rel_time) 144 | { 145 | using namespace std::chrono; 146 | validate_native_handle("counting_semaphore::try_acquire_for"); 147 | int64_t delta = duration_cast(rel_time).count(); 148 | auto timeout = ::dispatch_time(DISPATCH_TIME_NOW, delta); 149 | 150 | long result = ::dispatch_semaphore_wait(dsema_, timeout); 151 | if (result != KERN_SUCCESS && result != KERN_OPERATION_TIMED_OUT) { 152 | // [thread.mutex.requirements.mutex] 153 | // resource_unavailable_try_again - if any native handle type manipulated is not available. 154 | throw std::system_error(std::make_error_code(std::errc::resource_unavailable_try_again), "dispatch_semaphore_wait"); 155 | } 156 | return (result == KERN_SUCCESS); 157 | } 158 | 159 | template 160 | bool try_acquire_until(const std::chrono::time_point& abs_time) 161 | { 162 | validate_native_handle("counting_semaphore::try_acquire_until"); 163 | const struct ::timespec unix_epoch = { 0, 0 }; 164 | auto timeout = ::dispatch_walltime(&unix_epoch, detail::from_unix_epoch(abs_time)); 165 | 166 | long result = ::dispatch_semaphore_wait(dsema_, timeout); 167 | if (result != KERN_SUCCESS && result != KERN_OPERATION_TIMED_OUT) { 168 | // [thread.mutex.requirements.mutex] 169 | // resource_unavailable_try_again - if any native handle type manipulated is not available. 170 | throw std::system_error(std::make_error_code(std::errc::resource_unavailable_try_again), "dispatch_semaphore_wait"); 171 | } 172 | return (result == KERN_SUCCESS); 173 | } 174 | }; 175 | 176 | using binary_semaphore = counting_semaphore<1>; 177 | 178 | } // namespace gcd 179 | } // namespace yamc 180 | 181 | #endif 182 | -------------------------------------------------------------------------------- /include/alternate_shared_mutex.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * alternate_shared_mutex.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_ALTERNATE_SHARED_MUTEX_HPP_ 27 | #define YAMC_ALTERNATE_SHARED_MUTEX_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include "yamc_rwlock_sched.hpp" 34 | 35 | 36 | namespace yamc { 37 | 38 | /* 39 | * alternate implementation of shared mutex variants 40 | * 41 | * - yamc::alternate::shared_mutex 42 | * - yamc::alternate::shared_timed_mutex 43 | * - yamc::alternate::basic_shared_mutex 44 | * - yamc::alternate::basic_shared_timed_mutex 45 | */ 46 | namespace alternate { 47 | 48 | namespace detail { 49 | 50 | template 51 | class shared_mutex_base { 52 | protected: 53 | typename RwLockPolicy::state state_; 54 | std::condition_variable cv_; 55 | std::mutex mtx_; 56 | 57 | void lock() 58 | { 59 | std::unique_lock lk(mtx_); 60 | RwLockPolicy::before_wait_wlock(state_); 61 | while (RwLockPolicy::wait_wlock(state_)) { 62 | cv_.wait(lk); 63 | } 64 | RwLockPolicy::after_wait_wlock(state_); 65 | RwLockPolicy::acquire_wlock(state_); 66 | } 67 | 68 | bool try_lock() 69 | { 70 | std::lock_guard lk(mtx_); 71 | if (RwLockPolicy::wait_wlock(state_)) 72 | return false; 73 | RwLockPolicy::acquire_wlock(state_); 74 | return true; 75 | } 76 | 77 | void unlock() 78 | { 79 | std::lock_guard lk(mtx_); 80 | RwLockPolicy::release_wlock(state_); 81 | cv_.notify_all(); 82 | } 83 | 84 | void lock_shared() 85 | { 86 | std::unique_lock lk(mtx_); 87 | while (RwLockPolicy::wait_rlock(state_)) { 88 | cv_.wait(lk); 89 | } 90 | RwLockPolicy::acquire_rlock(state_); 91 | } 92 | 93 | bool try_lock_shared() 94 | { 95 | std::lock_guard lk(mtx_); 96 | if (RwLockPolicy::wait_rlock(state_)) 97 | return false; 98 | RwLockPolicy::acquire_rlock(state_); 99 | return true; 100 | } 101 | 102 | void unlock_shared() 103 | { 104 | std::lock_guard lk(mtx_); 105 | if (RwLockPolicy::release_rlock(state_)) { 106 | cv_.notify_all(); 107 | } 108 | } 109 | }; 110 | 111 | } // namespace detail 112 | 113 | 114 | template 115 | class basic_shared_mutex : private detail::shared_mutex_base { 116 | using base = detail::shared_mutex_base; 117 | 118 | public: 119 | basic_shared_mutex() = default; 120 | ~basic_shared_mutex() = default; 121 | 122 | basic_shared_mutex(const basic_shared_mutex&) = delete; 123 | basic_shared_mutex& operator=(const basic_shared_mutex&) = delete; 124 | 125 | using base::lock; 126 | using base::try_lock; 127 | using base::unlock; 128 | 129 | using base::lock_shared; 130 | using base::try_lock_shared; 131 | using base::unlock_shared; 132 | }; 133 | 134 | using shared_mutex = basic_shared_mutex; 135 | 136 | 137 | template 138 | class basic_shared_timed_mutex : private detail::shared_mutex_base { 139 | using base = detail::shared_mutex_base; 140 | 141 | using base::state_; 142 | using base::cv_; 143 | using base::mtx_; 144 | 145 | template 146 | bool do_try_lockwait(const std::chrono::time_point& tp) 147 | { 148 | std::unique_lock lk(mtx_); 149 | RwLockPolicy::before_wait_wlock(state_); 150 | while (RwLockPolicy::wait_wlock(state_)) { 151 | if (cv_.wait_until(lk, tp) == std::cv_status::timeout) { 152 | if (!RwLockPolicy::wait_wlock(state_)) // re-check predicate 153 | break; 154 | RwLockPolicy::after_wait_wlock(state_); 155 | return false; 156 | } 157 | } 158 | RwLockPolicy::after_wait_wlock(state_); 159 | RwLockPolicy::acquire_wlock(state_); 160 | return true; 161 | } 162 | 163 | template 164 | bool do_try_lock_sharedwait(const std::chrono::time_point& tp) 165 | { 166 | std::unique_lock lk(mtx_); 167 | while (RwLockPolicy::wait_rlock(state_)) { 168 | if (cv_.wait_until(lk, tp) == std::cv_status::timeout) { 169 | if (!RwLockPolicy::wait_rlock(state_)) // re-check predicate 170 | break; 171 | return false; 172 | } 173 | } 174 | RwLockPolicy::acquire_rlock(state_); 175 | return true; 176 | } 177 | 178 | public: 179 | basic_shared_timed_mutex() = default; 180 | ~basic_shared_timed_mutex() = default; 181 | 182 | basic_shared_timed_mutex(const basic_shared_timed_mutex&) = delete; 183 | basic_shared_timed_mutex& operator=(const basic_shared_timed_mutex&) = delete; 184 | 185 | using base::lock; 186 | using base::try_lock; 187 | using base::unlock; 188 | 189 | template 190 | bool try_lock_for(const std::chrono::duration& duration) 191 | { 192 | const auto tp = std::chrono::steady_clock::now() + duration; 193 | return do_try_lockwait(tp); 194 | } 195 | 196 | template 197 | bool try_lock_until(const std::chrono::time_point& tp) 198 | { 199 | return do_try_lockwait(tp); 200 | } 201 | 202 | using base::lock_shared; 203 | using base::try_lock_shared; 204 | using base::unlock_shared; 205 | 206 | template 207 | bool try_lock_shared_for(const std::chrono::duration& duration) 208 | { 209 | const auto tp = std::chrono::steady_clock::now() + duration; 210 | return do_try_lock_sharedwait(tp); 211 | } 212 | 213 | template 214 | bool try_lock_shared_until(const std::chrono::time_point& tp) 215 | { 216 | return do_try_lock_sharedwait(tp); 217 | } 218 | }; 219 | 220 | using shared_timed_mutex = basic_shared_timed_mutex; 221 | 222 | } // namespace alternate 223 | } // namespace yamc 224 | 225 | #endif 226 | -------------------------------------------------------------------------------- /include/alternate_mutex.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * alternate_mutex.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_ALTERNATE_MUTEX_HPP_ 27 | #define YAMC_ALTERNATE_MUTEX_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | 36 | 37 | namespace yamc { 38 | 39 | /* 40 | * alternate implementation of mutex variants 41 | * 42 | * - yamc::alternate::mutex (alias of std::mutex) 43 | * - yamc::alternate::recursive_mutex 44 | * - yamc::alternate::timed_mutex 45 | * - yamc::alternate::recursive_timed_mutex 46 | */ 47 | namespace alternate { 48 | 49 | // declare for consistency 50 | using mutex = std::mutex; 51 | 52 | 53 | class recursive_mutex { 54 | std::size_t ncount_ = 0; 55 | std::atomic owner_ = {}; 56 | std::mutex mtx_; 57 | 58 | public: 59 | recursive_mutex() = default; 60 | ~recursive_mutex() 61 | { 62 | assert(ncount_ == 0 && owner_ == std::thread::id()); 63 | } 64 | 65 | recursive_mutex(const recursive_mutex&) = delete; 66 | recursive_mutex& operator=(const recursive_mutex&) = delete; 67 | 68 | void lock() 69 | { 70 | const auto tid = std::this_thread::get_id(); 71 | if (owner_.load(std::memory_order_relaxed) == tid) { 72 | ++ncount_; 73 | } else { 74 | mtx_.lock(); 75 | owner_.store(tid, std::memory_order_relaxed); 76 | ncount_ = 1; 77 | } 78 | } 79 | 80 | bool try_lock() 81 | { 82 | const auto tid = std::this_thread::get_id(); 83 | if (owner_.load(std::memory_order_relaxed) == tid) { 84 | ++ncount_; 85 | } else { 86 | if (!mtx_.try_lock()) 87 | return false; 88 | owner_.store(tid, std::memory_order_relaxed); 89 | ncount_ = 1; 90 | } 91 | return true; 92 | } 93 | 94 | void unlock() 95 | { 96 | assert(0 < ncount_ && owner_ == std::this_thread::get_id()); 97 | if (--ncount_ == 0) { 98 | owner_.store(std::thread::id(), std::memory_order_relaxed); 99 | mtx_.unlock(); 100 | } 101 | } 102 | }; 103 | 104 | 105 | class timed_mutex { 106 | int state_ = 0; 107 | std::condition_variable cv_; 108 | std::mutex mtx_; 109 | 110 | template 111 | bool do_try_lockwait(const std::chrono::time_point& tp) 112 | { 113 | std::unique_lock lk(mtx_); 114 | while (state_ != 0) { 115 | if (cv_.wait_until(lk, tp) == std::cv_status::timeout) { 116 | if (state_ == 0) // re-check predicate 117 | break; 118 | return false; 119 | } 120 | } 121 | state_ = 1; 122 | return true; 123 | } 124 | 125 | public: 126 | timed_mutex() = default; 127 | ~timed_mutex() 128 | { 129 | assert(state_ == 0); 130 | } 131 | 132 | timed_mutex(const timed_mutex&) = delete; 133 | timed_mutex& operator=(const timed_mutex&) = delete; 134 | 135 | void lock() 136 | { 137 | std::unique_lock lk(mtx_); 138 | while (state_ != 0) { 139 | cv_.wait(lk); 140 | } 141 | state_ = 1; 142 | } 143 | 144 | bool try_lock() 145 | { 146 | std::lock_guard lk(mtx_); 147 | if (state_ != 0) 148 | return false; 149 | state_ = 1; 150 | return true; 151 | } 152 | 153 | void unlock() 154 | { 155 | std::lock_guard lk(mtx_); 156 | assert(state_ == 1); 157 | state_ = 0; 158 | cv_.notify_one(); 159 | } 160 | 161 | template 162 | bool try_lock_for(const std::chrono::duration& duration) 163 | { 164 | const auto tp = std::chrono::steady_clock::now() + duration; 165 | return do_try_lockwait(tp); 166 | } 167 | 168 | template 169 | bool try_lock_until(const std::chrono::time_point& tp) 170 | { 171 | return do_try_lockwait(tp); 172 | } 173 | }; 174 | 175 | 176 | class recursive_timed_mutex { 177 | std::size_t ncount_ = 0; 178 | std::thread::id owner_ = {}; 179 | std::condition_variable cv_; 180 | std::mutex mtx_; 181 | 182 | template 183 | bool do_try_lockwait(const std::chrono::time_point& tp) 184 | { 185 | const auto tid = std::this_thread::get_id(); 186 | std::unique_lock lk(mtx_); 187 | if (owner_ == tid) { 188 | ++ncount_; 189 | return true; 190 | } 191 | while (ncount_ != 0) { 192 | if (cv_.wait_until(lk, tp) == std::cv_status::timeout) { 193 | if (ncount_ == 0) // re-check predicate 194 | break; 195 | return false; 196 | } 197 | } 198 | assert(owner_ == std::thread::id()); 199 | ncount_ = 1; 200 | owner_ = tid; 201 | return true; 202 | } 203 | 204 | public: 205 | recursive_timed_mutex() = default; 206 | ~recursive_timed_mutex() 207 | { 208 | assert(ncount_ == 0 && owner_ == std::thread::id()); 209 | } 210 | 211 | recursive_timed_mutex(const recursive_timed_mutex&) = delete; 212 | recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; 213 | 214 | void lock() 215 | { 216 | const auto tid = std::this_thread::get_id(); 217 | std::unique_lock lk(mtx_); 218 | if (owner_ == tid) { 219 | ++ncount_; 220 | return; 221 | } 222 | while (ncount_ != 0) { 223 | cv_.wait(lk); 224 | } 225 | assert(owner_ == std::thread::id()); 226 | ncount_ = 1; 227 | owner_ = tid; 228 | } 229 | 230 | bool try_lock() 231 | { 232 | const auto tid = std::this_thread::get_id(); 233 | std::lock_guard lk(mtx_); 234 | if (owner_ == tid) { 235 | ++ncount_; 236 | return true; 237 | } 238 | if (ncount_ == 0) { 239 | assert(owner_ == std::thread::id()); 240 | ncount_ = 1; 241 | owner_ = tid; 242 | return true; 243 | } 244 | return false; 245 | } 246 | 247 | void unlock() 248 | { 249 | std::lock_guard lk(mtx_); 250 | assert(0 < ncount_ && owner_ == std::this_thread::get_id()); 251 | if (--ncount_ == 0) { 252 | owner_ = std::thread::id(); 253 | cv_.notify_one(); 254 | } 255 | } 256 | 257 | template 258 | bool try_lock_for(const std::chrono::duration& duration) 259 | { 260 | const auto tp = std::chrono::steady_clock::now() + duration; 261 | return do_try_lockwait(tp); 262 | } 263 | 264 | template 265 | bool try_lock_until(const std::chrono::time_point& tp) 266 | { 267 | return do_try_lockwait(tp); 268 | } 269 | }; 270 | 271 | } // namespace alternate 272 | } // namespace yamc 273 | 274 | #endif 275 | -------------------------------------------------------------------------------- /include/win_native_mutex.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * win_native_mutex.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2019 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef WIN_NATIVE_MUTEX_HPP_ 27 | #define WIN_NATIVE_MUTEX_HPP_ 28 | 29 | #include 30 | #include 31 | // Windows mutex 32 | #include 33 | 34 | 35 | /// Enable accurate timeout for yamc::win::* primitives 36 | #ifndef YAMC_WIN_ACCURATE_TIMEOUT 37 | #define YAMC_WIN_ACCURATE_TIMEOUT 1 38 | #endif 39 | 40 | 41 | namespace yamc { 42 | 43 | /* 44 | * Native Mutex/CriticalSection/SlimRWLock wrapper on Windows platform 45 | * 46 | * - yamc::win::native_mutex 47 | * - yamc::win::critical_section 48 | * - yamc::win::slim_rwlock 49 | * 50 | * Characteristics: 51 | * | | recursive | timed | shared | 52 | * +------------------+-----------+---------+---------+ 53 | * | native_mutex | support | support | N/A | 54 | * | critical_section | support | N/A | N/A | 55 | * | slim_rwlock | N/A | N/A | support | 56 | * 57 | * https://docs.microsoft.com/windows/win32/sync/mutex-objects 58 | * https://docs.microsoft.com/windows/win32/sync/critical-section-objects 59 | * https://docs.microsoft.com/windows/win32/sync/slim-reader-writer--srw--locks 60 | */ 61 | namespace win { 62 | 63 | class native_mutex { 64 | ::HANDLE hmtx_ = NULL; 65 | 66 | template 67 | bool do_try_lockwait(const std::chrono::duration& timeout) 68 | { 69 | using namespace std::chrono; 70 | // round up timeout to milliseconds precision 71 | DWORD timeout_in_msec = static_cast(duration_cast(timeout + nanoseconds{999999}).count()); 72 | DWORD result = ::WaitForSingleObject(hmtx_, timeout_in_msec); 73 | #if YAMC_WIN_ACCURATE_TIMEOUT 74 | if (result == WAIT_TIMEOUT && 0 < timeout_in_msec) { 75 | // Win32 wait functions will return early than specified timeout interval by design. 76 | // (https://docs.microsoft.com/windows/win32/sync/wait-functions for more details) 77 | // 78 | // The current thread sleep one more "tick" to guarantee timing specification in C++ Standard, 79 | // that actual timeout interval shall be longer than requested timeout of try_lock_*(). 80 | ::Sleep(1); 81 | } 82 | #endif 83 | if (result != WAIT_OBJECT_0 && result != WAIT_TIMEOUT) { 84 | // [thread.mutex.requirements.mutex] 85 | // resource_unavailable_try_again - if any native handle type manipulated is not available. 86 | throw std::system_error(std::make_error_code(std::errc::resource_unavailable_try_again), "WaitForSingleObject"); 87 | } 88 | return (result == WAIT_OBJECT_0); 89 | } 90 | 91 | public: 92 | native_mutex() 93 | { 94 | hmtx_ = ::CreateMutex(NULL, FALSE, NULL); 95 | if (hmtx_ == NULL) { 96 | // [thread.mutex.requirements.mutex] 97 | // invalid_argument - if any native handle type manipulated as part of mutex construction is incorrect. 98 | throw std::system_error(std::make_error_code(std::errc::invalid_argument), "CreateMutex"); 99 | } 100 | } 101 | 102 | ~native_mutex() 103 | { 104 | ::CloseHandle(hmtx_); 105 | } 106 | 107 | native_mutex(const native_mutex&) = delete; 108 | native_mutex& operator=(const native_mutex&) = delete; 109 | 110 | void lock() 111 | { 112 | DWORD result = ::WaitForSingleObject(hmtx_, INFINITE); 113 | if (result != WAIT_OBJECT_0) { 114 | // [thread.mutex.requirements.mutex] 115 | // resource_unavailable_try_again - if any native handle type manipulated is not available. 116 | throw std::system_error(std::make_error_code(std::errc::resource_unavailable_try_again), "WaitForSingleObject"); 117 | } 118 | } 119 | 120 | bool try_lock() 121 | { 122 | return (::WaitForSingleObject(hmtx_, 0) == WAIT_OBJECT_0); 123 | } 124 | 125 | template 126 | bool try_lock_for(const std::chrono::duration& rel_time) 127 | { 128 | return do_try_lockwait(rel_time); 129 | } 130 | 131 | template 132 | bool try_lock_until(const std::chrono::time_point& abs_time) 133 | { 134 | return do_try_lockwait(abs_time - Clock::now()); 135 | } 136 | 137 | void unlock() 138 | { 139 | ::ReleaseMutex(hmtx_); 140 | } 141 | 142 | using native_handle_type = ::HANDLE; 143 | native_handle_type native_handle() 144 | { 145 | return &hmtx_; 146 | } 147 | }; 148 | 149 | 150 | class critical_section { 151 | ::CRITICAL_SECTION cs_; 152 | 153 | public: 154 | critical_section() noexcept 155 | { 156 | ::InitializeCriticalSection(&cs_); 157 | } 158 | 159 | ~critical_section() 160 | { 161 | ::DeleteCriticalSection(&cs_); 162 | } 163 | 164 | critical_section(const critical_section&) = delete; 165 | critical_section& operator=(const critical_section&) = delete; 166 | 167 | void lock() 168 | { 169 | ::EnterCriticalSection(&cs_); 170 | } 171 | 172 | bool try_lock() 173 | { 174 | return (::TryEnterCriticalSection(&cs_) != 0); 175 | // explicit comparison to 0 to suppress "warning C4800" 176 | } 177 | 178 | void unlock() 179 | { 180 | ::LeaveCriticalSection(&cs_); 181 | } 182 | 183 | using native_handle_type = ::CRITICAL_SECTION*; 184 | native_handle_type native_handle() 185 | { 186 | return &cs_; 187 | } 188 | }; 189 | 190 | 191 | class slim_rwlock { 192 | ::SRWLOCK srwlock_ = SRWLOCK_INIT; 193 | 194 | public: 195 | slim_rwlock() = default; 196 | ~slim_rwlock() = default; 197 | 198 | slim_rwlock(const slim_rwlock&) = delete; 199 | slim_rwlock& operator=(const slim_rwlock&) = delete; 200 | 201 | void lock() 202 | { 203 | ::AcquireSRWLockExclusive(&srwlock_); 204 | } 205 | 206 | bool try_lock() 207 | { 208 | return (::TryAcquireSRWLockExclusive(&srwlock_) != 0); 209 | // explicit comparison to 0 to suppress "warning C4800" 210 | } 211 | 212 | void unlock() 213 | { 214 | ::ReleaseSRWLockExclusive(&srwlock_); 215 | } 216 | 217 | void lock_shared() 218 | { 219 | ::AcquireSRWLockShared(&srwlock_); 220 | } 221 | 222 | bool try_lock_shared() 223 | { 224 | return (::TryAcquireSRWLockShared(&srwlock_) != 0); 225 | // explicit comparison to 0 to suppress "warning C4800" 226 | } 227 | 228 | void unlock_shared() 229 | { 230 | ::ReleaseSRWLockShared(&srwlock_); 231 | } 232 | 233 | using native_handle_type = ::SRWLOCK*; 234 | native_handle_type native_handle() 235 | { 236 | return &srwlock_; 237 | } 238 | }; 239 | 240 | 241 | using mutex = critical_section; 242 | using recursive_mutex = critical_section; 243 | using timed_mutex = native_mutex; 244 | using recursive_timed_mutex = native_mutex; 245 | 246 | using shared_mutex = slim_rwlock; 247 | // Windows have no native primitives equivalent to shared_timed_mutex 248 | 249 | 250 | } // namespace win 251 | } // namespace yamc 252 | 253 | #endif 254 | -------------------------------------------------------------------------------- /tests/perf_rwlock.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * perf_rwlock.cpp 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include "alternate_shared_mutex.hpp" 14 | #include "fair_mutex.hpp" 15 | #include "fair_shared_mutex.hpp" 16 | #include "yamc_testutil.hpp" 17 | 18 | 19 | // measurement duration 20 | #define PERF_DURATION std::chrono::seconds(5) 21 | 22 | #define PERF_WEIGHT_TASK 100 23 | #define PERF_WEIGHT_WAIT 200 24 | 25 | #ifndef PERF_SCHED_YEILD 26 | #define PERF_SCHED_YEILD std::this_thread::yield() 27 | #endif 28 | 29 | // dummy task (waste CPU instructions) 30 | #define PERF_DUMMY_TASK(weight_) { volatile unsigned n = (weight_); while (--n); } 31 | 32 | 33 | struct config { 34 | std::size_t nwriter; 35 | std::size_t nreader; 36 | }; 37 | 38 | 39 | template 40 | void perform_rwlock_contention(const config& cfg) 41 | { 42 | yamc::test::barrier gate(cfg.nwriter + cfg.nreader + 1); 43 | std::vector thds; 44 | std::vector counters(cfg.nwriter + cfg.nreader); 45 | 46 | std::atomic running = {1}; 47 | SharedMutex mtx; 48 | 49 | // setup writer threads 50 | for (std::size_t i = 0; i < cfg.nwriter; i++) { 51 | std::size_t idx = i; 52 | thds.emplace_back([&,idx]{ 53 | std::size_t nwcount = 0; 54 | gate.await(); // start 55 | while (running.load(std::memory_order_relaxed)) { 56 | mtx.lock(); 57 | PERF_DUMMY_TASK(PERF_WEIGHT_TASK) 58 | ++nwcount; // write op 59 | mtx.unlock(); 60 | PERF_DUMMY_TASK(PERF_WEIGHT_WAIT) 61 | PERF_SCHED_YEILD; 62 | } 63 | gate.await(); // end 64 | counters[idx] = nwcount; 65 | }); 66 | } 67 | 68 | // setup reader threads 69 | for (std::size_t i = 0; i < cfg.nreader; i++) { 70 | std::size_t idx = cfg.nwriter + i; 71 | thds.emplace_back([&,idx]{ 72 | std::size_t nrcount = 0; 73 | gate.await(); // start 74 | while (running.load(std::memory_order_relaxed)) { 75 | mtx.lock_shared(); 76 | PERF_DUMMY_TASK(PERF_WEIGHT_TASK) 77 | ++nrcount; // read op 78 | mtx.unlock_shared(); 79 | PERF_DUMMY_TASK(PERF_WEIGHT_WAIT) 80 | PERF_SCHED_YEILD; 81 | } 82 | gate.await(); // end 83 | counters[idx] = nrcount; 84 | }); 85 | } 86 | 87 | // run measurement 88 | yamc::test::stopwatch<> sw; 89 | gate.await(); // start 90 | std::this_thread::sleep_for(PERF_DURATION); 91 | running.store(0, std::memory_order_relaxed); 92 | gate.await(); // end 93 | double elapsed = (double)sw.elapsed().count() / 1000000.; // [sec] 94 | 95 | // summarize write/read issue count 96 | for (auto& t : thds) { 97 | t.join(); 98 | } 99 | auto pivot = counters.begin(); 100 | std::advance(pivot, cfg.nwriter); 101 | std::size_t nwissue = std::accumulate(counters.begin(), pivot, std::size_t{0}); 102 | std::size_t nrissue = std::accumulate(pivot, counters.end(), std::size_t{0}); 103 | 104 | // average [count/sec/thread] 105 | double wavg = (double)nwissue / cfg.nwriter / elapsed; 106 | double ravg = (double)nrissue / cfg.nreader / elapsed; 107 | // SD(standard deviation) [count/sec/thread] 108 | double wsd = std::sqrt(std::accumulate(counters.begin(), pivot, 0., 109 | [wavg, elapsed](double acc, std::size_t v) { 110 | return acc + (v / elapsed - wavg) * (v / elapsed - wavg); 111 | }) / cfg.nwriter); 112 | double rsd = std::sqrt(std::accumulate(pivot, counters.end(), 0., 113 | [ravg, elapsed](double acc, std::size_t v) { 114 | return acc + (v / elapsed - ravg) * (v / elapsed - ravg); 115 | }) / cfg.nreader); 116 | 117 | // print result 118 | std::cout 119 | << cfg.nwriter << '\t' << nwissue << '\t' << wavg << '\t' << wsd << '\t' 120 | << cfg.nreader << '\t' << nrissue << '\t' << ravg << '\t' << rsd << std::endl; 121 | } 122 | 123 | 124 | template 125 | void perform_lock_contention(const config& cfg) 126 | { 127 | std::size_t nthread = cfg.nwriter + cfg.nreader; 128 | yamc::test::barrier gate(nthread + 1); 129 | std::vector thds; 130 | std::vector counters(nthread); 131 | 132 | std::atomic running = {1}; 133 | Mutex mtx; 134 | 135 | // writer/reader threads 136 | for (std::size_t i = 0; i < nthread; i++) { 137 | std::size_t idx = i; 138 | thds.emplace_back([&,idx]{ 139 | std::size_t ncount = 0; 140 | gate.await(); // start 141 | while (running.load(std::memory_order_relaxed)) { 142 | mtx.lock(); 143 | PERF_DUMMY_TASK(PERF_WEIGHT_TASK) 144 | ++ncount; // write/read op 145 | mtx.unlock(); 146 | PERF_DUMMY_TASK(PERF_WEIGHT_WAIT) 147 | PERF_SCHED_YEILD; 148 | } 149 | gate.await(); // end 150 | counters[idx] = ncount; 151 | }); 152 | } 153 | 154 | // run measurement 155 | yamc::test::stopwatch<> sw; 156 | gate.await(); // start 157 | std::this_thread::sleep_for(PERF_DURATION); 158 | running.store(0, std::memory_order_relaxed); 159 | gate.await(); // end 160 | double elapsed = (double)sw.elapsed().count() / 1000000.; // [sec] 161 | 162 | // summarize write/read issue count 163 | for (auto& t : thds) { 164 | t.join(); 165 | } 166 | std::size_t nissue = std::accumulate(counters.begin(), counters.end(), std::size_t{0}); 167 | 168 | // average [count/sec/thread] 169 | double avg = (double)nissue / nthread / elapsed; 170 | // SD(standard deviation) [count/sec/thread] 171 | double sd = std::sqrt(std::accumulate(counters.begin(), counters.end(), 0., 172 | [avg, elapsed](double acc, std::size_t v) { 173 | return acc + (v / elapsed - avg) * (v / elapsed - avg); 174 | }) / nthread); 175 | 176 | // print result 177 | std::cout 178 | << nthread << '\t' << nissue << '\t' << avg << '\t' << sd << "\t-\t-\t-\t-" << std::endl; 179 | } 180 | 181 | 182 | void print_header(const char* title, unsigned nthread) 183 | { 184 | std::cout 185 | << "# " << title 186 | << " ncpu=" << std::thread::hardware_concurrency() << " nthread=" << nthread 187 | << " task/wait=" << PERF_WEIGHT_TASK << "/" << PERF_WEIGHT_WAIT 188 | << " duration=" << PERF_DURATION.count() << std::endl; 189 | } 190 | 191 | 192 | template 193 | void perf_lock(const char* title, unsigned nthread) 194 | { 195 | print_header(title, nthread); 196 | std::cout << "# Wt/Rd\t[raw]\t[ops]\t[sd]\t-\t-\t-\t-" << std::endl; 197 | perform_lock_contention({ nthread, 0 }); 198 | std::cout << "\n\n" << std::flush; 199 | } 200 | 201 | 202 | template 203 | void perf_rwlock(const char* title, unsigned nthread) 204 | { 205 | print_header(title, nthread); 206 | std::cout << "# Write\t[raw]\t[ops]\t[sd]\tRead\t[raw]\t[ops]\t[sd]" << std::endl; 207 | for (unsigned nwt = 1; nwt < nthread; nwt++) { 208 | config cfg = { nwt, nthread - nwt }; 209 | perform_rwlock_contention(cfg); 210 | } 211 | std::cout << "\n\n" << std::flush; 212 | } 213 | 214 | 215 | int main() 216 | { 217 | unsigned nthread = 10; 218 | using reader_prefer_shared_mutex = yamc::alternate::basic_shared_mutex; 219 | using writer_prefer_shared_mutex = yamc::alternate::basic_shared_mutex; 220 | using task_fairness_shared_mutex = yamc::fair::basic_shared_mutex; 221 | using phase_fairness_shared_mutex = yamc::fair::basic_shared_mutex; 222 | 223 | perf_lock ("StdMutex", nthread); 224 | perf_lock("FifoMutex", nthread); 225 | 226 | perf_rwlock("ReaderPrefer", nthread); 227 | perf_rwlock("WriterPrefer", nthread); 228 | perf_rwlock ("TaskFair", nthread); 229 | perf_rwlock("PhaseFair", nthread); 230 | } 231 | -------------------------------------------------------------------------------- /tests/barrier_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * barrier_test.cpp 3 | */ 4 | #include 5 | #include "gtest/gtest.h" 6 | #include "yamc_barrier.hpp" 7 | #include "yamc_testutil.hpp" 8 | 9 | 10 | struct null_completion { 11 | void operator()() {} 12 | }; 13 | 14 | struct counting_completion { 15 | int *counter; 16 | void operator()() { 17 | ++*counter; 18 | } 19 | }; 20 | 21 | 22 | // type requirements (compile-time assertion) 23 | TEST(BarrierTest, TypeRequirements) 24 | { 25 | using arrival_token = yamc::barrier<>::arrival_token; 26 | static_assert(std::is_move_constructible::value, "Cpp17MoveConstructible"); 27 | static_assert(std::is_move_assignable::value, "Cpp17MoveAssignable"); 28 | static_assert(std::is_destructible::value, "Cpp17Destructible"); 29 | } 30 | 31 | // barrier constructor 32 | TEST(BarrierTest, Ctor) 33 | { 34 | EXPECT_NO_THROW(yamc::barrier<>{1}); 35 | } 36 | 37 | // barrier construction with completion 38 | TEST(BarrierTest, CtorCompletion) 39 | { 40 | EXPECT_NO_THROW(yamc::barrier{1}); 41 | } 42 | 43 | // barrier construction with completion argument 44 | TEST(BarrierTest, CtorCompletionArg) 45 | { 46 | null_completion completion; 47 | EXPECT_NO_THROW((yamc::barrier{1, completion})); 48 | } 49 | 50 | // barrier constructor throws exception 51 | TEST(BarrierTest, CtorThrow) 52 | { 53 | struct throwing_completion { 54 | throwing_completion() = default; 55 | throwing_completion(const throwing_completion&) = default; 56 | throwing_completion(throwing_completion&&) { 57 | // move constructor throw exception 58 | throw 42; // int 59 | } 60 | void operator()() {} 61 | } completion; 62 | EXPECT_THROW((yamc::barrier{1, completion}), int); 63 | } 64 | 65 | // barrier::arrive() 66 | TEST(BarrierTest, Arrive) 67 | { 68 | yamc::barrier<> barrier{3}; // expected count=3 69 | EXPECT_NO_THROW((void)barrier.arrive()); // c=3->2 70 | EXPECT_NO_THROW((void)barrier.arrive(2)); // c=2->0, next phase 71 | EXPECT_NO_THROW((void)barrier.arrive(3)); // c=3->0, next phase 72 | // casting void to suppress "ignoring return value" warning 73 | } 74 | 75 | // barrier::arrive() with completion 76 | TEST(BarrierTest, ArriveCompletion) 77 | { 78 | int counter = 0; 79 | counting_completion complation{&counter}; 80 | yamc::barrier barrier{2, complation}; // expected count=2 81 | EXPECT_NO_THROW((void)barrier.arrive()); // c=2->1 82 | EXPECT_EQ(counter, 0); 83 | EXPECT_NO_THROW((void)barrier.arrive()); // c=1->0, call completion 84 | EXPECT_EQ(counter, 1); 85 | EXPECT_NO_THROW((void)barrier.arrive(2)); // c=2->0, call completion 86 | EXPECT_EQ(counter, 2); 87 | } 88 | 89 | // barrier::wait() 90 | TEST(BarrierTest, Wait) 91 | { 92 | yamc::barrier<> barrier{1}; 93 | EXPECT_NO_THROW(barrier.wait(barrier.arrive())); 94 | auto token = barrier.arrive(); 95 | EXPECT_NO_THROW(barrier.wait(std::move(token))); 96 | } 97 | 98 | // barrier::arrive_and_wait() 99 | TEST(BarrierTest, ArriveAndWait) 100 | { 101 | yamc::barrier<> barrier{1}; 102 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 103 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 104 | } 105 | 106 | // barrier::arrive_and_wait() with completion 107 | TEST(BarrierTest, ArriveAndWaitCompletion) 108 | { 109 | int counter = 0; 110 | counting_completion complation{&counter}; 111 | yamc::barrier barrier{1, complation}; 112 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 113 | EXPECT_EQ(counter, 1); 114 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 115 | EXPECT_EQ(counter, 2); 116 | } 117 | 118 | // barrier::arrive_and_drop() 119 | TEST(BarrierTest, ArriveAndDrop) 120 | { 121 | yamc::barrier<> barrier{1}; 122 | EXPECT_NO_THROW(barrier.arrive_and_drop()); 123 | } 124 | 125 | // barrier::arrive_and_drop() with completion 126 | TEST(BarrierTest, ArriveAndDropCompletion) 127 | { 128 | int counter = 0; 129 | counting_completion complation{&counter}; 130 | yamc::barrier barrier{1, complation}; 131 | EXPECT_NO_THROW(barrier.arrive_and_drop()); 132 | EXPECT_EQ(counter, 1); 133 | } 134 | 135 | // basic phasing 136 | // 137 | // T0: 1.X...X...X.4 138 | // | | | 139 | // T1: ..X.2.X...X.5 140 | // | | | 141 | // T2: ..X...X.3.X.6 142 | // 143 | // CriticalPath = 1-2-3-{4|5|6} 144 | // 145 | // X=arrive_and_wait() 146 | // 147 | TEST(BarrierTest, BasicPhasing) 148 | { 149 | SETUP_STEPTEST; 150 | yamc::barrier<> barrier{3}; 151 | yamc::test::task_runner( 152 | 3, 153 | [&](std::size_t id) { 154 | switch (id) { 155 | case 0: 156 | EXPECT_STEP(1); 157 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 158 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 159 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 160 | EXPECT_STEP_RANGE(4, 6); 161 | break; 162 | case 1: 163 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 164 | EXPECT_STEP(2); 165 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 166 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 167 | EXPECT_STEP_RANGE(4, 6); 168 | break; 169 | case 2: 170 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 171 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 172 | EXPECT_STEP(3); 173 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 174 | EXPECT_STEP_RANGE(4, 6); 175 | break; 176 | } 177 | } 178 | ); 179 | } 180 | 181 | // arrive+wait phasing 182 | // 183 | // T0: 1.A.A-W-3 184 | // | | 185 | // T1: A-W.2.A.4 186 | // 187 | // CriticalPath = 1-2-{3|4} 188 | // 189 | // A=arrive(), W=wait() 190 | // 191 | TEST(BarrierTest, ArriveWaitPhasing) 192 | { 193 | SETUP_STEPTEST; 194 | yamc::barrier<> barrier{2}; 195 | yamc::test::task_runner( 196 | 2, 197 | [&](std::size_t id) { 198 | switch (id) { 199 | case 0: 200 | { 201 | EXPECT_STEP(1); 202 | EXPECT_NO_THROW((void)barrier.arrive()); // phase=0->1 203 | auto token = barrier.arrive(); // phase=1->2 204 | EXPECT_NO_THROW(barrier.wait(std::move(token))); 205 | EXPECT_STEP_RANGE(3, 4); 206 | break; 207 | } 208 | case 1: 209 | { 210 | auto token = barrier.arrive(); // phase=0->1 211 | EXPECT_NO_THROW(barrier.wait(std::move(token))); 212 | EXPECT_STEP(2); 213 | EXPECT_NO_THROW((void)barrier.arrive()); // phase=1->2 214 | EXPECT_STEP_RANGE(3, 4); 215 | break; 216 | } 217 | } 218 | } 219 | ); 220 | } 221 | 222 | // past token 223 | // 224 | // T0: 1.A---X-W.3 225 | // | | 226 | // T1: ..X.2.X.... 227 | // 228 | // CriticalPath = 1-2-3 229 | // 230 | // A=arrive(), W=wait() 231 | // X=arrive_and_wait() 232 | // 233 | TEST(BarrierTest, PastToken) 234 | { 235 | SETUP_STEPTEST; 236 | yamc::barrier<> barrier{2}; 237 | yamc::test::task_runner( 238 | 2, 239 | [&](std::size_t id) { 240 | switch (id) { 241 | case 0: 242 | { 243 | EXPECT_STEP(1); 244 | auto token = barrier.arrive(); 245 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 246 | EXPECT_NO_THROW(barrier.wait(std::move(token))); // past token 247 | EXPECT_STEP(3); 248 | break; 249 | } 250 | case 1: 251 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 252 | EXPECT_STEP(2); 253 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 254 | break; 255 | } 256 | } 257 | ); 258 | } 259 | 260 | // phasing with drop 261 | // 262 | // T0: 1.D.2........ 263 | // | 264 | // T1: ..X.3.X...D.5 265 | // | | | 266 | // T2: ..X...X.4.X.6 267 | // 268 | // CriticalPath = 1-{2|3}-4-{5|6} 269 | // 270 | // X=arrive_and_wait(), D=arrive_and_drop() 271 | // 272 | TEST(BarrierTest, DropPhasing) 273 | { 274 | SETUP_STEPTEST; 275 | yamc::barrier<> barrier{3}; 276 | yamc::test::task_runner( 277 | 3, 278 | [&](std::size_t id) { 279 | switch (id) { 280 | case 0: 281 | EXPECT_STEP(1); 282 | EXPECT_NO_THROW(barrier.arrive_and_drop()); 283 | EXPECT_STEP_RANGE(2, 3); 284 | break; 285 | case 1: 286 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 287 | EXPECT_STEP_RANGE(2, 3); 288 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 289 | EXPECT_NO_THROW(barrier.arrive_and_drop()); 290 | EXPECT_STEP_RANGE(5, 6); 291 | break; 292 | case 2: 293 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 294 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 295 | EXPECT_STEP(4); 296 | EXPECT_NO_THROW(barrier.arrive_and_wait()); 297 | EXPECT_STEP_RANGE(5, 6); 298 | break; 299 | } 300 | } 301 | ); 302 | } 303 | 304 | // barrier::max() 305 | TEST(BarrierTest, Max) 306 | { 307 | EXPECT_GT((yamc::barrier<>::max)(), 0); 308 | } 309 | -------------------------------------------------------------------------------- /tests/semaphore_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * semaphore_test.cpp 3 | */ 4 | #include "gtest/gtest.h" 5 | #include "yamc_semaphore.hpp" 6 | #include "yamc_testutil.hpp" 7 | #if defined(__APPLE__) 8 | #include "gcd_semaphore.hpp" 9 | #define ENABLE_GCD_SEMAPHORE 10 | #endif 11 | #if defined(__linux__) && !defined(__APPLE__) 12 | #include "posix_semaphore.hpp" 13 | #define ENABLE_POSIX_SEMAPHORE 14 | #endif 15 | #if defined(_WIN32) 16 | #include "win_semaphore.hpp" 17 | #define ENABLE_WIN_SEMAPHORE 18 | #endif 19 | 20 | 21 | #define TEST_THREADS 8 22 | #define TEST_ITERATION 10000u 23 | 24 | #define TEST_NOT_TIMEOUT std::chrono::minutes(3) 25 | #define TEST_EXPECT_TIMEOUT std::chrono::milliseconds(300) 26 | 27 | 28 | // selector for generic semaphore implementation (platform independent) 29 | struct GenericSemaphore { 30 | template 31 | using counting_semaphore = yamc::counting_semaphore; 32 | using counting_semaphore_def = yamc::counting_semaphore<>; 33 | using binary_semaphore = yamc::binary_semaphore; 34 | }; 35 | 36 | #if defined(ENABLE_GCD_SEMAPHORE) 37 | // selector for GCD dispatch semaphore implementation 38 | struct GcdSemaphore { 39 | template 40 | using counting_semaphore = yamc::gcd::counting_semaphore; 41 | using counting_semaphore_def = yamc::gcd::counting_semaphore<>; 42 | using binary_semaphore = yamc::gcd::binary_semaphore; 43 | }; 44 | #endif 45 | 46 | #if defined(ENABLE_POSIX_SEMAPHORE) 47 | // selector for POSIX semaphore implementation 48 | struct PosixSemaphore { 49 | template 50 | using counting_semaphore = yamc::posix::counting_semaphore; 51 | using counting_semaphore_def = yamc::posix::counting_semaphore<>; 52 | using binary_semaphore = yamc::posix::binary_semaphore; 53 | }; 54 | #endif 55 | 56 | #if defined(ENABLE_WIN_SEMAPHORE) 57 | // selector for Windows semaphore implementation 58 | struct WinSemaphore { 59 | template 60 | using counting_semaphore = yamc::win::counting_semaphore; 61 | using counting_semaphore_def = yamc::win::counting_semaphore<>; 62 | using binary_semaphore = yamc::win::binary_semaphore; 63 | }; 64 | #endif 65 | 66 | using SemaphoreSelector = ::testing::Types< 67 | GenericSemaphore 68 | #if defined(ENABLE_GCD_SEMAPHORE) 69 | , GcdSemaphore 70 | #endif 71 | #if defined(ENABLE_POSIX_SEMAPHORE) 72 | , PosixSemaphore 73 | #endif 74 | #if defined(ENABLE_WIN_SEMAPHORE) 75 | , WinSemaphore 76 | #endif 77 | >; 78 | 79 | 80 | template 81 | struct SemaphoreTest : ::testing::Test {}; 82 | 83 | TYPED_TEST_SUITE(SemaphoreTest, SemaphoreSelector); 84 | 85 | // semaphore construction with zero 86 | TYPED_TEST(SemaphoreTest, CtorZero) 87 | { 88 | using counting_semaphore = typename TypeParam::template counting_semaphore<1>; 89 | EXPECT_NO_THROW(counting_semaphore{0}); 90 | } 91 | 92 | // semaphore constructor with maximum value 93 | TYPED_TEST(SemaphoreTest, CtorMaxValue) 94 | { 95 | constexpr ptrdiff_t LEAST_MAX_VALUE = 1000; 96 | using counting_semaphore = typename TypeParam::template counting_semaphore; 97 | EXPECT_NO_THROW(counting_semaphore{(counting_semaphore::max)()}); 98 | } 99 | 100 | // semaphore::acquire() 101 | TYPED_TEST(SemaphoreTest, Acquire) 102 | { 103 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 104 | counting_semaphore sem{1}; 105 | EXPECT_NO_THROW(sem.acquire()); 106 | } 107 | 108 | // semaphore::try_acquire() 109 | TYPED_TEST(SemaphoreTest, TryAcquire) 110 | { 111 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 112 | counting_semaphore sem{1}; 113 | EXPECT_TRUE(sem.try_acquire()); 114 | } 115 | 116 | // semaphore::try_acquire() failure 117 | TYPED_TEST(SemaphoreTest, TryAcquireFail) 118 | { 119 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 120 | counting_semaphore sem{0}; 121 | EXPECT_FALSE(sem.try_acquire()); 122 | } 123 | 124 | // semaphore::try_acquire_for() 125 | TYPED_TEST(SemaphoreTest, TryAcquireFor) 126 | { 127 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 128 | counting_semaphore sem{1}; 129 | EXPECT_TRUE(sem.try_acquire_for(TEST_NOT_TIMEOUT)); 130 | } 131 | 132 | // semaphore::try_acquire_until() 133 | TYPED_TEST(SemaphoreTest, TryAcquireUntil) 134 | { 135 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 136 | counting_semaphore sem{1}; 137 | EXPECT_TRUE(sem.try_acquire_until(std::chrono::system_clock::now() + TEST_NOT_TIMEOUT)); 138 | } 139 | 140 | // semaphore::try_acquire_for() timeout 141 | TYPED_TEST(SemaphoreTest, TryAcquireForTimeout) 142 | { 143 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 144 | counting_semaphore sem{0}; 145 | yamc::test::stopwatch sw; 146 | EXPECT_FALSE(sem.try_acquire_for(TEST_EXPECT_TIMEOUT)); 147 | EXPECT_LE(TEST_EXPECT_TIMEOUT, sw.elapsed()); 148 | } 149 | 150 | // semaphore::try_acquire_until() timeout 151 | TYPED_TEST(SemaphoreTest, TryAcquireUntilTimeout) 152 | { 153 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 154 | counting_semaphore sem{0}; 155 | yamc::test::stopwatch<> sw; 156 | EXPECT_FALSE(sem.try_acquire_until(std::chrono::system_clock::now() + TEST_EXPECT_TIMEOUT)); 157 | EXPECT_LE(TEST_EXPECT_TIMEOUT, sw.elapsed()); 158 | } 159 | 160 | // semaphore::try_acquire_until() timeout with steady_clock 161 | TYPED_TEST(SemaphoreTest, TryAcquireUntilTimeoutSteadyClock) 162 | { 163 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 164 | counting_semaphore sem{0}; 165 | yamc::test::stopwatch sw; 166 | EXPECT_FALSE(sem.try_acquire_until(std::chrono::steady_clock::now() + TEST_EXPECT_TIMEOUT)); 167 | EXPECT_LE(TEST_EXPECT_TIMEOUT, sw.elapsed()); 168 | } 169 | 170 | // semaphore::try_acquire_until() timeout with high_resolution_clock 171 | TYPED_TEST(SemaphoreTest, TryAcquireUntilTimeoutHighResolutionClock) 172 | { 173 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 174 | counting_semaphore sem{0}; 175 | yamc::test::stopwatch sw; 176 | EXPECT_FALSE(sem.try_acquire_until(std::chrono::high_resolution_clock::now() + TEST_EXPECT_TIMEOUT)); 177 | EXPECT_LE(TEST_EXPECT_TIMEOUT, sw.elapsed()); 178 | } 179 | 180 | // semaphore::release() 181 | TYPED_TEST(SemaphoreTest, Release) 182 | { 183 | SETUP_STEPTEST; 184 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 185 | counting_semaphore sem{0}; 186 | yamc::test::join_thread thd([&]{ 187 | EXPECT_STEP(1); 188 | EXPECT_NO_THROW(sem.release()); 189 | }); 190 | // wait-thread 191 | { 192 | EXPECT_NO_THROW(sem.acquire()); 193 | EXPECT_STEP(2); 194 | } 195 | } 196 | 197 | // semaphore::release(update) 198 | TYPED_TEST(SemaphoreTest, ReleaseUpdate) 199 | { 200 | SETUP_STEPTEST; 201 | using counting_semaphore = typename TypeParam::counting_semaphore_def; 202 | counting_semaphore sem{0}; 203 | yamc::test::task_runner( 204 | 4, 205 | [&](std::size_t id) { 206 | if (id == 0) { 207 | // signal-thread 208 | EXPECT_STEP(1); 209 | EXPECT_NO_THROW(sem.release(3)); 210 | } else { 211 | // 3 wait-threads 212 | EXPECT_NO_THROW(sem.acquire()); 213 | EXPECT_STEP_RANGE(2, 4); 214 | } 215 | } 216 | ); 217 | } 218 | 219 | // use semaphore as Mutex 220 | TYPED_TEST(SemaphoreTest, UseAsMutex) 221 | { 222 | using binary_semaphore = typename TypeParam::binary_semaphore; 223 | binary_semaphore sem{1}; 224 | std::size_t counter = 0; 225 | yamc::test::task_runner( 226 | TEST_THREADS, 227 | [&](std::size_t /*id*/) { 228 | for (std::size_t n = 0; n < TEST_ITERATION; ++n) { 229 | EXPECT_NO_THROW(sem.acquire()); 230 | counter = counter + 1; 231 | std::this_thread::yield(); // provoke lock contention 232 | EXPECT_NO_THROW(sem.release()); 233 | } 234 | }); 235 | EXPECT_EQ(TEST_ITERATION * TEST_THREADS, counter); 236 | } 237 | 238 | 239 | template 240 | struct LeastMaxValueTest : ::testing::Test {}; 241 | 242 | TYPED_TEST_SUITE(LeastMaxValueTest, SemaphoreSelector); 243 | 244 | // counting_semaphore::max() with least_max_value 245 | TYPED_TEST(LeastMaxValueTest, CounitingSemaphore) 246 | { 247 | constexpr ptrdiff_t LEAST_MAX_VALUE = 1000; 248 | using counting_semaphore = typename TypeParam::template counting_semaphore; 249 | EXPECT_GE((counting_semaphore::max)(), LEAST_MAX_VALUE); 250 | // counting_semaphore::max() may return value greater than N. 251 | } 252 | 253 | // binary_semaphore::max() 254 | TYPED_TEST(LeastMaxValueTest, BinarySemaphore) 255 | { 256 | using binary_semaphore = typename TypeParam::binary_semaphore; 257 | EXPECT_GE((binary_semaphore::max)(), 1); 258 | // counting_semaphore::max() may return value greater than N. 259 | } 260 | -------------------------------------------------------------------------------- /include/fair_mutex.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * fair_mutex.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_FAIR_MUTEX_HPP_ 27 | #define YAMC_FAIR_MUTEX_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | 36 | namespace yamc { 37 | 38 | /* 39 | * fairness (FIFO locking) mutex 40 | * 41 | * - yamc::fair::mutex 42 | * - yamc::fair::recursive_mutex 43 | * - yamc::fair::timed_mutex 44 | * - yamc::fair::recursive_timed_mutex 45 | */ 46 | namespace fair { 47 | 48 | class mutex { 49 | std::size_t next_ = 0; 50 | std::size_t curr_ = 0; 51 | std::condition_variable cv_; 52 | std::mutex mtx_; 53 | 54 | public: 55 | mutex() = default; 56 | ~mutex() = default; 57 | 58 | mutex(const mutex&) = delete; 59 | mutex& operator=(const mutex&) = delete; 60 | 61 | void lock() 62 | { 63 | std::unique_lock lk(mtx_); 64 | const std::size_t request = next_++; 65 | while (request != curr_) { 66 | cv_.wait(lk); 67 | } 68 | } 69 | 70 | bool try_lock() 71 | { 72 | std::lock_guard lk(mtx_); 73 | if (next_ != curr_) 74 | return false; 75 | ++next_; 76 | return true; 77 | } 78 | 79 | void unlock() 80 | { 81 | std::lock_guard lk(mtx_); 82 | ++curr_; 83 | cv_.notify_all(); 84 | } 85 | }; 86 | 87 | 88 | class recursive_mutex { 89 | std::size_t next_ = 0; 90 | std::size_t curr_ = 0; 91 | std::size_t ncount_ = 0; 92 | std::thread::id owner_; 93 | std::condition_variable cv_; 94 | std::mutex mtx_; 95 | 96 | public: 97 | recursive_mutex() = default; 98 | ~recursive_mutex() = default; 99 | 100 | recursive_mutex(const recursive_mutex&) = delete; 101 | recursive_mutex& operator=(const recursive_mutex&) = delete; 102 | 103 | void lock() 104 | { 105 | const auto tid = std::this_thread::get_id(); 106 | std::unique_lock lk(mtx_); 107 | if (owner_ == tid) { 108 | assert(0 < ncount_); 109 | ++ncount_; 110 | return; 111 | } 112 | const std::size_t request = next_++; 113 | while (request != curr_) { 114 | cv_.wait(lk); 115 | } 116 | assert(ncount_ == 0 && owner_ == std::thread::id()); 117 | ncount_ = 1; 118 | owner_ = tid; 119 | } 120 | 121 | bool try_lock() 122 | { 123 | const auto tid = std::this_thread::get_id(); 124 | std::lock_guard lk(mtx_); 125 | if (owner_ == tid) { 126 | assert(0 < ncount_); 127 | ++ncount_; 128 | return true; 129 | } 130 | if (next_ != curr_) 131 | return false; 132 | ++next_; 133 | assert(ncount_ == 0 && owner_ == std::thread::id()); 134 | ncount_ = 1; 135 | owner_ = tid; 136 | return true; 137 | } 138 | 139 | void unlock() 140 | { 141 | std::lock_guard lk(mtx_); 142 | assert(0 < ncount_ && owner_ == std::this_thread::get_id()); 143 | if (--ncount_ == 0) { 144 | ++curr_; 145 | owner_ = std::thread::id(); 146 | cv_.notify_all(); 147 | } 148 | } 149 | }; 150 | 151 | 152 | namespace detail { 153 | 154 | class timed_mutex_impl { 155 | public: 156 | struct node { 157 | node* next; 158 | node* prev; 159 | }; 160 | 161 | node queue_; // q.next = front(), q.prev = back() 162 | node locked_; // placeholder node of 'locked' state 163 | std::condition_variable cv_; 164 | std::mutex mtx_; 165 | 166 | private: 167 | bool wq_empty() 168 | { 169 | return queue_.next == &queue_; 170 | } 171 | 172 | void wq_push_back(node* p) 173 | { 174 | node* back = queue_.prev; 175 | back->next = queue_.prev = p; 176 | p->next = &queue_; 177 | p->prev = back; 178 | } 179 | 180 | void wq_erase(node* p) 181 | { 182 | p->next->prev= p->prev; 183 | p->prev->next = p->next; 184 | } 185 | 186 | void wq_pop_front() 187 | { 188 | wq_erase(queue_.next); 189 | } 190 | 191 | void wq_replace_front(node* p) 192 | { 193 | // q.push_front() + q.push_front(p) 194 | node* front = queue_.next; 195 | assert(front != p); 196 | *p = *front; 197 | queue_.next = front->next->prev = p; 198 | } 199 | 200 | public: 201 | timed_mutex_impl() 202 | : queue_{&queue_, &queue_} {} 203 | ~timed_mutex_impl() = default; 204 | 205 | std::unique_lock internal_lock() 206 | { 207 | return std::unique_lock(mtx_); 208 | } 209 | 210 | void impl_lock(std::unique_lock& lk) 211 | { 212 | if (!wq_empty()) { 213 | node request; 214 | wq_push_back(&request); 215 | while (queue_.next != &request) { 216 | cv_.wait(lk); 217 | } 218 | wq_replace_front(&locked_); 219 | } else { 220 | wq_push_back(&locked_); 221 | } 222 | } 223 | 224 | bool impl_try_lock() 225 | { 226 | if (!wq_empty()) { 227 | return false; 228 | } 229 | wq_push_back(&locked_); 230 | return true; 231 | } 232 | 233 | void impl_unlock() 234 | { 235 | assert(queue_.next == &locked_); 236 | wq_pop_front(); 237 | cv_.notify_all(); 238 | } 239 | 240 | template 241 | bool impl_try_lockwait(std::unique_lock& lk, const std::chrono::time_point& tp) 242 | { 243 | if (!wq_empty()) { 244 | node request; 245 | wq_push_back(&request); 246 | while (queue_.next != &request) { 247 | if (cv_.wait_until(lk, tp) == std::cv_status::timeout) { 248 | if (queue_.next == &request) // re-check predicate 249 | break; 250 | wq_erase(&request); 251 | return false; 252 | } 253 | } 254 | wq_replace_front(&locked_); 255 | } else { 256 | wq_push_back(&locked_); 257 | } 258 | return true; 259 | } 260 | }; 261 | 262 | } // namespace detail 263 | 264 | 265 | class timed_mutex { 266 | detail::timed_mutex_impl impl_; 267 | 268 | public: 269 | timed_mutex() = default; 270 | ~timed_mutex() = default; 271 | 272 | timed_mutex(const timed_mutex&) = delete; 273 | timed_mutex& operator=(const timed_mutex&) = delete; 274 | 275 | void lock() 276 | { 277 | auto lk = impl_.internal_lock(); 278 | impl_.impl_lock(lk); 279 | } 280 | 281 | bool try_lock() 282 | { 283 | auto lk = impl_.internal_lock(); 284 | return impl_.impl_try_lock(); 285 | } 286 | 287 | void unlock() 288 | { 289 | auto lk = impl_.internal_lock(); 290 | impl_.impl_unlock(); 291 | } 292 | 293 | template 294 | bool try_lock_for(const std::chrono::duration& duration) 295 | { 296 | const auto tp = std::chrono::steady_clock::now() + duration; 297 | auto lk = impl_.internal_lock(); 298 | return impl_.impl_try_lockwait(lk, tp); 299 | } 300 | 301 | template 302 | bool try_lock_until(const std::chrono::time_point& tp) 303 | { 304 | auto lk = impl_.internal_lock(); 305 | return impl_.impl_try_lockwait(lk, tp); 306 | } 307 | }; 308 | 309 | 310 | class recursive_timed_mutex { 311 | std::size_t ncount_ = 0; 312 | std::thread::id owner_ = {}; 313 | detail::timed_mutex_impl impl_; 314 | 315 | public: 316 | recursive_timed_mutex() = default; 317 | ~recursive_timed_mutex() = default; 318 | 319 | recursive_timed_mutex(const recursive_timed_mutex&) = delete; 320 | recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; 321 | 322 | void lock() 323 | { 324 | const auto tid = std::this_thread::get_id(); 325 | auto lk = impl_.internal_lock(); 326 | if (owner_ == tid) { 327 | assert(0 < ncount_); 328 | ++ncount_; 329 | } else { 330 | impl_.impl_lock(lk); 331 | ncount_ = 1; 332 | owner_ = tid; 333 | } 334 | } 335 | 336 | bool try_lock() 337 | { 338 | const auto tid = std::this_thread::get_id(); 339 | auto lk = impl_.internal_lock(); 340 | if (owner_ == tid) { 341 | assert(0 < ncount_); 342 | ++ncount_; 343 | return true; 344 | } 345 | if (!impl_.impl_try_lock()) 346 | return false; 347 | ncount_ = 1; 348 | owner_ = tid; 349 | return true; 350 | } 351 | 352 | void unlock() 353 | { 354 | auto lk = impl_.internal_lock(); 355 | assert(0 < ncount_ && owner_ == std::this_thread::get_id()); 356 | if (--ncount_ == 0) { 357 | impl_.impl_unlock(); 358 | owner_ = std::thread::id(); 359 | } 360 | } 361 | 362 | template 363 | bool try_lock_for(const std::chrono::duration& duration) 364 | { 365 | const auto tp = std::chrono::steady_clock::now() + duration; 366 | return try_lock_until(tp); // delegate 367 | } 368 | 369 | template 370 | bool try_lock_until(const std::chrono::time_point& tp) 371 | { 372 | const auto tid = std::this_thread::get_id(); 373 | auto lk = impl_.internal_lock(); 374 | if (owner_ == tid) { 375 | assert(0 < ncount_); 376 | ++ncount_; 377 | return true; 378 | } 379 | if (!impl_.impl_try_lockwait(lk, tp)) 380 | return false; 381 | ncount_ = 1; 382 | owner_ = tid; 383 | return true; 384 | } 385 | }; 386 | 387 | } // namespace fair 388 | } // namespace yamc 389 | 390 | #endif 391 | -------------------------------------------------------------------------------- /tests/yamc_testutil.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * yamc_testutil.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_TESTUTIL_HPP_ 27 | #define YAMC_TESTUTIL_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | 39 | 40 | // platform 41 | #if defined(_WIN32) 42 | #define TEST_PLATFORM_LINUX 0 43 | #define TEST_PLATFORM_OSX 0 44 | #define TEST_PLATFORM_WINDOWS 1 45 | #elif defined(__APPLE__) 46 | #define TEST_PLATFORM_LINUX 0 47 | #define TEST_PLATFORM_OSX 1 48 | #define TEST_PLATFORM_WINDOWS 0 49 | #elif defined(__linux) 50 | #define TEST_PLATFORM_LINUX 1 51 | #define TEST_PLATFORM_OSX 0 52 | #define TEST_PLATFORM_WINDOWS 0 53 | #endif 54 | 55 | 56 | // C++ compiler 57 | #if defined(__clang__) 58 | #define TEST_COMPILER_CLANG 1 59 | #define TEST_COMPILER_GCC 0 60 | #define TEST_COMPILER_MSVC 0 61 | #elif defined(__GNUC__) 62 | #define TEST_COMPILER_CLANG 0 63 | #define TEST_COMPILER_GCC 1 64 | #define TEST_COMPILER_MSVC 65 | #elif defined(_MSC_VER) 66 | #define TEST_COMPILER_CLANG 0 67 | #define TEST_COMPILER_GCC 0 68 | #define TEST_COMPILER_MSVC 1 69 | #endif 70 | 71 | 72 | namespace yamc { 73 | namespace test { 74 | 75 | 76 | /// auto join thread 77 | class join_thread { 78 | std::thread thd_; 79 | 80 | public: 81 | template 82 | explicit join_thread(F&& f) 83 | : thd_(std::forward(f)) {} 84 | 85 | ~join_thread() noexcept(false) 86 | { 87 | thd_.join(); 88 | } 89 | 90 | join_thread(const join_thread&) = delete; 91 | join_thread& operator=(const join_thread&) = delete; 92 | join_thread(join_thread&&) = default; 93 | join_thread& operator=(join_thread&&) = default; 94 | }; 95 | 96 | 97 | /// rendezvous point primitive 98 | class barrier { 99 | std::size_t nthread_; 100 | std::size_t count_; 101 | std::size_t step_ = 0; 102 | std::condition_variable cv_; 103 | std::mutex mtx_; 104 | 105 | public: 106 | explicit barrier(std::size_t n) 107 | : nthread_(n), count_(n) {} 108 | 109 | barrier(const barrier&) = delete; 110 | barrier& operator=(const barrier&) = delete; 111 | 112 | bool await() 113 | { 114 | std::unique_lock lk(mtx_); 115 | std::size_t step = step_; 116 | if (--count_ == 0) { 117 | count_ = nthread_; 118 | ++step_; 119 | cv_.notify_all(); 120 | return true; 121 | } 122 | while (step == step_) { 123 | cv_.wait(lk); 124 | } 125 | return false; 126 | } 127 | }; 128 | 129 | 130 | /// phase control primitive 131 | class phaser { 132 | std::size_t sentinel_ = 0; 133 | std::vector phase_; 134 | std::condition_variable cv_; 135 | std::mutex mtx_; 136 | 137 | void do_advance(std::size_t id, std::size_t n) 138 | { 139 | std::lock_guard lk(mtx_); 140 | phase_[id] += n; 141 | sentinel_ = *std::min_element(phase_.begin(), phase_.end()); 142 | cv_.notify_all(); 143 | } 144 | 145 | void do_await(std::size_t id) 146 | { 147 | std::unique_lock lk(mtx_); 148 | phase_[id] += 1; 149 | sentinel_ = *std::min_element(phase_.begin(), phase_.end()); 150 | while (sentinel_ != phase_[id]) { 151 | cv_.wait(lk); 152 | } 153 | cv_.notify_all(); 154 | } 155 | 156 | public: 157 | explicit phaser(std::size_t n) 158 | : phase_(n, 0u) {} 159 | 160 | class proxy { 161 | phaser* phaser_; 162 | std::size_t id_; 163 | 164 | friend class phaser; 165 | proxy(phaser* p, std::size_t id) 166 | : phaser_(p), id_(id) {} 167 | 168 | public: 169 | void advance(std::size_t n) 170 | { phaser_->do_advance(id_, n); } 171 | void await() 172 | { phaser_->do_await(id_); } 173 | }; 174 | 175 | proxy get(std::size_t id) 176 | { 177 | assert(id < phase_.size()); 178 | return {this, id}; 179 | } 180 | }; 181 | 182 | 183 | /// parallel task runner 184 | template 185 | void task_runner(std::size_t nthread, F f) 186 | { 187 | barrier gate(1 + nthread); 188 | std::vector thds; 189 | for (std::size_t n = 0; n < nthread; ++n) { 190 | thds.emplace_back([f,n,&gate]{ 191 | gate.await(); 192 | f(n); 193 | }); 194 | } 195 | gate.await(); // start 196 | for (auto& t: thds) { 197 | t.join(); 198 | } 199 | } 200 | 201 | 202 | /// stopwatch 203 | template 204 | class stopwatch { 205 | typename Clock::time_point start_; 206 | 207 | public: 208 | stopwatch() 209 | : start_(Clock::now()) {} 210 | 211 | stopwatch(const stopwatch&) = delete; 212 | stopwatch& operator=(const stopwatch&) = delete; 213 | 214 | Duration elapsed() 215 | { 216 | auto end = Clock::now(); 217 | return std::chrono::duration_cast(end - start_); 218 | } 219 | }; 220 | 221 | } // namespace test 222 | 223 | 224 | namespace cxx { 225 | 226 | /// C++14 std::make_unique() 227 | template 228 | inline 229 | std::unique_ptr make_unique(Args&&... args) 230 | { 231 | return std::unique_ptr(new T(std::forward(args)...)); 232 | } 233 | 234 | /// C++17 std::void_t 235 | template 236 | struct make_void { using type = void; }; 237 | template 238 | using void_t = typename make_void::type; 239 | 240 | } // namespace cxx 241 | 242 | 243 | // mock classes of mutex types 244 | namespace mock { 245 | 246 | struct mutex { 247 | bool locked = false; 248 | bool retval_on_trylock = true; 249 | 250 | mutex() = default; 251 | ~mutex() = default; 252 | 253 | mutex(const mutex&) = delete; 254 | mutex& operator=(const mutex&) = delete; 255 | 256 | void lock() { locked = true; } 257 | bool try_lock() { return (locked = retval_on_trylock); } 258 | void unlock() { locked = false; } 259 | }; 260 | 261 | struct recursive_mutex { 262 | bool retval_on_trylock = true; 263 | 264 | recursive_mutex() = default; 265 | ~recursive_mutex() = default; 266 | 267 | recursive_mutex(const recursive_mutex&) = delete; 268 | recursive_mutex& operator=(const recursive_mutex&) = delete; 269 | 270 | void lock() {} 271 | bool try_lock() { return retval_on_trylock; } 272 | void unlock() {} 273 | }; 274 | 275 | struct timed_mutex { 276 | bool retval_on_trylock = true; 277 | 278 | timed_mutex() = default; 279 | ~timed_mutex() = default; 280 | 281 | timed_mutex(const timed_mutex&) = delete; 282 | timed_mutex& operator=(const timed_mutex&) = delete; 283 | 284 | void lock() {} 285 | bool try_lock() { return retval_on_trylock; } 286 | void unlock() {} 287 | 288 | template 289 | bool try_lock_for(const std::chrono::duration&) 290 | { return retval_on_trylock; } 291 | 292 | template 293 | bool try_lock_until(const std::chrono::time_point&) 294 | { return retval_on_trylock; } 295 | }; 296 | 297 | struct recursive_timed_mutex { 298 | bool retval_on_trylock = true; 299 | 300 | recursive_timed_mutex() = default; 301 | ~recursive_timed_mutex() = default; 302 | 303 | recursive_timed_mutex(const recursive_timed_mutex&) = delete; 304 | recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; 305 | 306 | void lock() {} 307 | bool try_lock() { return retval_on_trylock; } 308 | void unlock() {} 309 | 310 | template 311 | bool try_lock_for(const std::chrono::duration&) 312 | { return retval_on_trylock; } 313 | 314 | template 315 | bool try_lock_until(const std::chrono::time_point&) 316 | { return retval_on_trylock; } 317 | }; 318 | 319 | struct shared_mutex { 320 | bool retval_on_trylock = true; 321 | 322 | shared_mutex() = default; 323 | ~shared_mutex() = default; 324 | 325 | shared_mutex(const shared_mutex&) = delete; 326 | shared_mutex& operator=(const shared_mutex&) = delete; 327 | 328 | void lock() {} 329 | bool try_lock() { return retval_on_trylock; } 330 | void unlock() {} 331 | 332 | void lock_shared() {} 333 | bool try_lock_shared() { return retval_on_trylock; } 334 | void unlock_shared() {} 335 | }; 336 | 337 | struct shared_timed_mutex { 338 | bool retval_on_trylock = true; 339 | 340 | shared_timed_mutex() = default; 341 | ~shared_timed_mutex() = default; 342 | 343 | shared_timed_mutex(const shared_timed_mutex&) = delete; 344 | shared_timed_mutex& operator=(const shared_timed_mutex&) = delete; 345 | 346 | void lock() {} 347 | bool try_lock() { return retval_on_trylock; } 348 | void unlock() {} 349 | 350 | template 351 | bool try_lock_for(const std::chrono::duration&) 352 | { return retval_on_trylock; } 353 | 354 | template 355 | bool try_lock_until(const std::chrono::time_point&) 356 | { return retval_on_trylock; } 357 | 358 | void lock_shared() {} 359 | bool try_lock_shared() { return retval_on_trylock; } 360 | void unlock_shared() {} 361 | 362 | template 363 | bool try_lock_shared_for(const std::chrono::duration&) 364 | { return retval_on_trylock; } 365 | 366 | template 367 | bool try_lock_shared_until(const std::chrono::time_point&) 368 | { return retval_on_trylock; } 369 | }; 370 | 371 | } // namespace mock 372 | 373 | } // namespace yamc 374 | 375 | 376 | #define TEST_TICKS std::chrono::milliseconds(200) 377 | #define WAIT_TICKS std::this_thread::sleep_for(TEST_TICKS) 378 | 379 | // stepping test 380 | #if 0 381 | namespace { 382 | std::mutex g_guard; 383 | #define TRACE(msg_) \ 384 | (std::unique_lock(g_guard),\ 385 | std::cout << std::this_thread::get_id() << ':' << (msg_) << std::endl) 386 | } 387 | #else 388 | #define TRACE(msg_) 389 | #endif 390 | 391 | // declare stepping counter 392 | #define SETUP_STEPTEST std::atomic step = {0} 393 | 394 | // expect n-th step 395 | #define EXPECT_STEP(n_) \ 396 | do { \ 397 | TRACE("STEP"#n_); \ 398 | int s = step.fetch_add(1, std::memory_order_relaxed) + 1; \ 399 | EXPECT_EQ(n_, s); \ 400 | WAIT_TICKS; \ 401 | } while (0) 402 | 403 | // expect between step#r0 and step#r1 404 | #define EXPECT_STEP_RANGE(r0_, r1_) \ 405 | do { \ 406 | TRACE("STEP"#r0_"-"#r1_); \ 407 | int s = step.fetch_add(1, std::memory_order_relaxed) + 1; \ 408 | EXPECT_TRUE(r0_ <= s && s <= r1_); \ 409 | WAIT_TICKS; \ 410 | } while(0) 411 | 412 | // advance step counter (w/o wait ticks) 413 | #define ADVANCE_STEP(msg_, inc_) \ 414 | do { \ 415 | TRACE(msg_); \ 416 | step.fetch_add(inc_, std::memory_order_relaxed); \ 417 | } while(0) 418 | 419 | 420 | #endif 421 | -------------------------------------------------------------------------------- /include/posix_native_mutex.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * posix_native_mutex.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2019 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef POSIX_NATIVE_MUTEX_HPP_ 27 | #define POSIX_NATIVE_MUTEX_HPP_ 28 | 29 | #include 30 | // POSIX(pthreads) mutex 31 | #include 32 | #include 33 | 34 | 35 | #if defined(__APPLE__) 36 | // macOS doesn't have timed locking functions 37 | #define YAMC_POSIX_TIMEOUT_SUPPORTED 0 38 | // macOS doesn't provide pthread_spinlock_t 39 | #define YAMC_POSIX_SPINLOCK_SUPPORTED 0 40 | #else 41 | #define YAMC_POSIX_TIMEOUT_SUPPORTED 1 42 | #define YAMC_POSIX_SPINLOCK_SUPPORTED 1 43 | #endif 44 | 45 | 46 | namespace yamc { 47 | 48 | /* 49 | * Pthreads mutex wrapper on POSIX-compatible platform 50 | * 51 | * - yamc::posix::native_mutex 52 | * - yamc::posix::native_recursive_mutex 53 | * - yamc::posix::rwlock 54 | * - yamc::posix::spinlock [conditional] 55 | * 56 | * Some platform doesn't support locking operation with timeout. 57 | * Some platform doesn't provide spinlock object (pthread_spinlock_t). 58 | * https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/pthread.h.html 59 | * https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_timedlock.html 60 | * https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_timedrdlock.html 61 | * https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_timedwrlock.html 62 | * https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_spin_init.html 63 | */ 64 | namespace posix { 65 | 66 | class native_mutex { 67 | #if defined(PTHREAD_MUTEX_INITIALIZER) 68 | // POSIX.1 defines PTHREAD_MUTEX_INITIALIZER macro to initialize default mutex. 69 | // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_destroy.html 70 | ::pthread_mutex_t mtx_ = PTHREAD_MUTEX_INITIALIZER; 71 | #elif defined(PTHREAD_MUTEX_INITIALIZER_NP) 72 | ::pthread_mutex_t mtx_ = PTHREAD_MUTEX_INITIALIZER_NP; 73 | #endif 74 | 75 | #if YAMC_POSIX_TIMEOUT_SUPPORTED 76 | bool do_try_lockwait(const std::chrono::system_clock::time_point& tp) 77 | { 78 | using namespace std::chrono; 79 | struct ::timespec abs_timeout; 80 | abs_timeout.tv_sec = system_clock::to_time_t(tp); 81 | abs_timeout.tv_nsec = (long)(duration_cast(tp.time_since_epoch()).count() % 1000000000); 82 | return (::pthread_mutex_timedlock(&mtx_, &abs_timeout) == 0); 83 | } 84 | #endif 85 | 86 | public: 87 | constexpr native_mutex() noexcept = default; 88 | 89 | ~native_mutex() 90 | { 91 | ::pthread_mutex_destroy(&mtx_); 92 | } 93 | 94 | native_mutex(const native_mutex&) = delete; 95 | native_mutex& operator=(const native_mutex&) = delete; 96 | 97 | void lock() 98 | { 99 | ::pthread_mutex_lock(&mtx_); 100 | } 101 | 102 | bool try_lock() 103 | { 104 | return (::pthread_mutex_trylock(&mtx_) == 0); 105 | } 106 | 107 | void unlock() 108 | { 109 | ::pthread_mutex_unlock(&mtx_); 110 | } 111 | 112 | #if YAMC_POSIX_TIMEOUT_SUPPORTED 113 | template 114 | bool try_lock_for(const std::chrono::duration& rel_time) 115 | { 116 | // C++ Standard says '_for'-suffixed timeout function shall use steady clock, 117 | // but we use std::chrono::system_clock which may or may not be steady. 118 | const auto tp = std::chrono::system_clock::now() + rel_time; 119 | return do_try_lockwait(tp); 120 | } 121 | 122 | template 123 | bool try_lock_until(const std::chrono::time_point& abs_time) 124 | { 125 | static_assert(std::is_same::value, "support only system_clock"); 126 | return do_try_lockwait(abs_time); 127 | } 128 | #endif 129 | 130 | using native_handle_type = ::pthread_mutex_t*; 131 | native_handle_type native_handle() 132 | { 133 | return &mtx_; 134 | } 135 | }; 136 | 137 | 138 | class native_recursive_mutex { 139 | // POSIX.1 does NOT define PTHREAD_RECURSIVE_MUTEX_INITIALIZER-like macro, 140 | // - Linux defines PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP macro, 141 | // - macOS defines PTHREAD_RECURSIVE_MUTEX_INITIALIZER macro. 142 | #if defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) 143 | ::pthread_mutex_t mtx_ = PTHREAD_RECURSIVE_MUTEX_INITIALIZER; 144 | #elif defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP) 145 | ::pthread_mutex_t mtx_ = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; 146 | #else 147 | ::pthread_mutex_t mtx_; 148 | #endif 149 | 150 | #if YAMC_POSIX_TIMEOUT_SUPPORTED 151 | bool do_try_lockwait(const std::chrono::system_clock::time_point& tp) 152 | { 153 | using namespace std::chrono; 154 | struct ::timespec abs_timeout; 155 | abs_timeout.tv_sec = system_clock::to_time_t(tp); 156 | abs_timeout.tv_nsec = (long)(duration_cast(tp.time_since_epoch()).count() % 1000000000); 157 | return (::pthread_mutex_timedlock(&mtx_, &abs_timeout) == 0); 158 | } 159 | #endif 160 | 161 | public: 162 | native_recursive_mutex() 163 | { 164 | #if !defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) && !defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP) 165 | ::pthread_mutexattr_t attr; 166 | ::pthread_mutexattr_init(&attr); 167 | ::pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); 168 | ::pthread_mutex_init(&mtx_, &attr); 169 | ::pthread_mutexattr_destroy(&attr); 170 | #endif 171 | } 172 | 173 | ~native_recursive_mutex() 174 | { 175 | ::pthread_mutex_destroy(&mtx_); 176 | } 177 | 178 | native_recursive_mutex(const native_recursive_mutex&) = delete; 179 | native_recursive_mutex& operator=(const native_recursive_mutex&) = delete; 180 | 181 | void lock() 182 | { 183 | ::pthread_mutex_lock(&mtx_); 184 | } 185 | 186 | bool try_lock() 187 | { 188 | return (::pthread_mutex_trylock(&mtx_) == 0); 189 | } 190 | 191 | void unlock() 192 | { 193 | ::pthread_mutex_unlock(&mtx_); 194 | } 195 | 196 | #if YAMC_POSIX_TIMEOUT_SUPPORTED 197 | template 198 | bool try_lock_for(const std::chrono::duration& rel_time) 199 | { 200 | // C++ Standard says '_for'-suffixed timeout function shall use steady clock, 201 | // but we use std::chrono::system_clock which may or may not be steady. 202 | const auto tp = std::chrono::system_clock::now() + rel_time; 203 | return do_try_lockwait(tp); 204 | } 205 | 206 | template 207 | bool try_lock_until(const std::chrono::time_point& abs_time) 208 | { 209 | static_assert(std::is_same::value, "support only system_clock"); 210 | return do_try_lockwait(abs_time); 211 | } 212 | #endif // YAMC_POSIX_TIMEOUT_SUPPORTED 213 | 214 | using native_handle_type = ::pthread_mutex_t*; 215 | native_handle_type native_handle() 216 | { 217 | return &mtx_; 218 | } 219 | }; 220 | 221 | 222 | class rwlock { 223 | #if defined(PTHREAD_RWLOCK_INITIALIZER) 224 | // POSIX.1-2001/SUSv3 once deleted PTHREAD_RWLOCK_INITIALIZER macro, 225 | // POSIX.1-2008/SUSv4 defines PTHREAD_RWLOCK_INITIALIZER macro again. 226 | // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/pthread.h.html 227 | ::pthread_rwlock_t rwlock_ = PTHREAD_RWLOCK_INITIALIZER; 228 | #elif defined(PTHREAD_RWLOCK_INITIALIZER_NP) 229 | ::pthread_rwlock_t rwlock_ = PTHREAD_RWLOCK_INITIALIZER_NP; 230 | #endif 231 | 232 | #if YAMC_POSIX_TIMEOUT_SUPPORTED 233 | bool do_try_lockwait(const std::chrono::system_clock::time_point& tp) 234 | { 235 | using namespace std::chrono; 236 | struct ::timespec abs_timeout; 237 | abs_timeout.tv_sec = system_clock::to_time_t(tp); 238 | abs_timeout.tv_nsec = (long)(duration_cast(tp.time_since_epoch()).count() % 1000000000); 239 | return (::pthread_rwlock_timedwrlock(&rwlock_, &abs_timeout) == 0); 240 | } 241 | 242 | bool do_try_lock_sharedwait(const std::chrono::system_clock::time_point& tp) 243 | { 244 | using namespace std::chrono; 245 | struct ::timespec abs_timeout; 246 | abs_timeout.tv_sec = system_clock::to_time_t(tp); 247 | abs_timeout.tv_nsec = (long)(duration_cast(tp.time_since_epoch()).count() % 1000000000); 248 | return (::pthread_rwlock_timedrdlock(&rwlock_, &abs_timeout) == 0); 249 | } 250 | #endif 251 | 252 | public: 253 | rwlock() = default; 254 | ~rwlock() 255 | { 256 | ::pthread_rwlock_destroy(&rwlock_); 257 | } 258 | 259 | rwlock(const rwlock&) = delete; 260 | rwlock& operator=(const rwlock&) = delete; 261 | 262 | void lock() 263 | { 264 | ::pthread_rwlock_wrlock(&rwlock_); 265 | } 266 | 267 | bool try_lock() 268 | { 269 | return (::pthread_rwlock_trywrlock(&rwlock_) == 0); 270 | } 271 | 272 | void unlock() 273 | { 274 | ::pthread_rwlock_unlock(&rwlock_); 275 | } 276 | 277 | void lock_shared() 278 | { 279 | ::pthread_rwlock_rdlock(&rwlock_); 280 | } 281 | 282 | bool try_lock_shared() 283 | { 284 | return (::pthread_rwlock_tryrdlock(&rwlock_) == 0); 285 | } 286 | 287 | void unlock_shared() 288 | { 289 | ::pthread_rwlock_unlock(&rwlock_); 290 | } 291 | 292 | #if YAMC_POSIX_TIMEOUT_SUPPORTED 293 | template 294 | bool try_lock_for(const std::chrono::duration& rel_time) 295 | { 296 | // C++ Standard says '_for'-suffixed timeout function shall use steady clock, 297 | // but we use std::chrono::system_clock which may or may not be steady. 298 | const auto tp = std::chrono::system_clock::now() + rel_time; 299 | return do_try_lockwait(tp); 300 | } 301 | 302 | template 303 | bool try_lock_until(const std::chrono::time_point& abs_time) 304 | { 305 | static_assert(std::is_same::value, "support only system_clock"); 306 | return do_try_lockwait(abs_time); 307 | } 308 | 309 | template 310 | bool try_lock_shared_for(const std::chrono::duration& rel_time) 311 | { 312 | // C++ Standard says '_for'-suffixed timeout function shall use steady clock, 313 | // but we use std::chrono::system_clock which may or may not be steady. 314 | const auto tp = std::chrono::system_clock::now() + rel_time; 315 | return do_try_lock_sharedwait(tp); 316 | } 317 | 318 | template 319 | bool try_lock_shared_until(const std::chrono::time_point& abs_time) 320 | { 321 | static_assert(std::is_same::value, "support only system_clock"); 322 | return do_try_lock_sharedwait(abs_time); 323 | } 324 | #endif // YAMC_POSIX_TIMEOUT_SUPPORTED 325 | 326 | using native_handle_type = ::pthread_rwlock_t*; 327 | native_handle_type native_handle() 328 | { 329 | return &rwlock_; 330 | } 331 | }; 332 | 333 | 334 | #if YAMC_POSIX_SPINLOCK_SUPPORTED 335 | class spinlock { 336 | ::pthread_spinlock_t slock_; 337 | 338 | public: 339 | /*constexpr*/ spinlock() noexcept 340 | { 341 | ::pthread_spin_init(&slock_, 0); 342 | } 343 | 344 | ~spinlock() 345 | { 346 | ::pthread_spin_destroy(&slock_); 347 | } 348 | 349 | spinlock(const spinlock&) = delete; 350 | spinlock& operator=(const spinlock&) = delete; 351 | 352 | void lock() 353 | { 354 | ::pthread_spin_lock(&slock_); 355 | } 356 | 357 | bool try_lock() 358 | { 359 | return (::pthread_spin_trylock(&slock_) == 0); 360 | } 361 | 362 | void unlock() 363 | { 364 | ::pthread_spin_unlock(&slock_); 365 | } 366 | 367 | using native_handle_type = ::pthread_spinlock_t*; 368 | native_handle_type native_handle() 369 | { 370 | return &slock_; 371 | } 372 | }; 373 | #endif // YAMC_POSIX_SPINLOCK_SUPPORTED 374 | 375 | 376 | using mutex = native_mutex; 377 | using recursive_mutex = native_recursive_mutex; 378 | using timed_mutex = native_mutex; 379 | using recursive_timed_mutex = native_recursive_mutex; 380 | 381 | using shared_mutex = rwlock; 382 | using shared_timed_mutex = rwlock; 383 | 384 | } // namespace posix 385 | } // namespace yamc 386 | 387 | #endif 388 | -------------------------------------------------------------------------------- /include/checked_mutex.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * checked_mutex.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_CHECKED_MUTEX_HPP_ 27 | #define YAMC_CHECKED_MUTEX_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include "yamc_lock_validator.hpp" 37 | 38 | 39 | // call std::abort() when requirements violation 40 | #ifndef YAMC_CHECKED_CALL_ABORT 41 | #define YAMC_CHECKED_CALL_ABORT 0 42 | #endif 43 | 44 | // default deadlock detection mode 45 | #ifndef YAMC_CHECKED_DETECT_DEADLOCK 46 | #define YAMC_CHECKED_DETECT_DEADLOCK 1 47 | #endif 48 | 49 | 50 | namespace yamc { 51 | 52 | /* 53 | * strict requirements checking mutex for debug 54 | * 55 | * - yamc::checked::mutex 56 | * - yamc::checked::timed_mutex 57 | * - yamc::checked::recursive_mutex 58 | * - yamc::checked::recursive_timed_mutex 59 | */ 60 | namespace checked { 61 | 62 | namespace detail { 63 | 64 | #if YAMC_CHECKED_DETECT_DEADLOCK 65 | using validator = yamc::validator::deadlock; 66 | #else 67 | using validator = yamc::validator::null; 68 | #endif 69 | 70 | 71 | class mutex_base { 72 | protected: 73 | std::thread::id owner_; 74 | std::condition_variable cv_; 75 | std::mutex mtx_; 76 | 77 | void dtor_precondition(const char* emsg) 78 | { 79 | std::lock_guard lk(mtx_); 80 | if (owner_ != std::thread::id()) { 81 | // object liveness 82 | #if YAMC_CHECKED_CALL_ABORT 83 | std::abort(); 84 | (void)emsg; // suppress "unused variable" warning 85 | #else 86 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), emsg); 87 | #endif 88 | } 89 | } 90 | 91 | void lock() 92 | { 93 | const auto tid = std::this_thread::get_id(); 94 | std::unique_lock lk(mtx_); 95 | if (owner_ == tid) { 96 | // non-recursive semantics 97 | #if YAMC_CHECKED_CALL_ABORT 98 | std::abort(); 99 | #else 100 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), "recursive lock"); 101 | #endif 102 | } 103 | while (owner_ != std::thread::id()) { 104 | if (!validator::enqueue(reinterpret_cast(this), tid, false)) { 105 | // deadlock detection 106 | #if YAMC_CHECKED_CALL_ABORT 107 | std::abort(); 108 | #else 109 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), "deadlock"); 110 | #endif 111 | } 112 | cv_.wait(lk); 113 | validator::dequeue(reinterpret_cast(this), tid); 114 | } 115 | owner_ = tid; 116 | validator::locked(reinterpret_cast(this), tid, false); 117 | } 118 | 119 | bool try_lock() 120 | { 121 | const auto tid = std::this_thread::get_id(); 122 | std::lock_guard lk(mtx_); 123 | if (owner_ == tid) { 124 | // non-recursive semantics 125 | #if YAMC_CHECKED_CALL_ABORT 126 | std::abort(); 127 | #else 128 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), "recursive try_lock"); 129 | #endif 130 | } 131 | if (owner_ != std::thread::id()) { 132 | return false; 133 | } 134 | owner_ = tid; 135 | validator::locked(reinterpret_cast(this), tid, false); 136 | return true; 137 | } 138 | 139 | void unlock() 140 | { 141 | const auto tid = std::this_thread::get_id(); 142 | std::lock_guard lk(mtx_); 143 | if (owner_ != tid) { 144 | // owner thread 145 | #if YAMC_CHECKED_CALL_ABORT 146 | std::abort(); 147 | #else 148 | throw std::system_error(std::make_error_code(std::errc::operation_not_permitted), "invalid unlock"); 149 | #endif 150 | } 151 | owner_ = std::thread::id(); 152 | validator::unlocked(reinterpret_cast(this), tid, false); 153 | cv_.notify_all(); 154 | } 155 | }; 156 | 157 | 158 | class recursive_mutex_base { 159 | protected: 160 | std::size_t ncount_ = 0; 161 | std::thread::id owner_; 162 | std::condition_variable cv_; 163 | std::mutex mtx_; 164 | 165 | void dtor_precondition(const char* emsg) 166 | { 167 | std::lock_guard lk(mtx_); 168 | if (ncount_ != 0 || owner_ != std::thread::id()) { 169 | // object liveness 170 | #if YAMC_CHECKED_CALL_ABORT 171 | std::abort(); 172 | (void)emsg; // suppress "unused variable" warning 173 | #else 174 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), emsg); 175 | #endif 176 | } 177 | } 178 | 179 | void lock() 180 | { 181 | const auto tid = std::this_thread::get_id(); 182 | std::unique_lock lk(mtx_); 183 | if (owner_ == tid) { 184 | ++ncount_; 185 | return; 186 | } 187 | while (ncount_ != 0) { 188 | if (!validator::enqueue(reinterpret_cast(this), tid, false)) { 189 | // deadlock detection 190 | #if YAMC_CHECKED_CALL_ABORT 191 | std::abort(); 192 | #else 193 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), "deadlock"); 194 | #endif 195 | } 196 | cv_.wait(lk); 197 | validator::dequeue(reinterpret_cast(this), tid); 198 | } 199 | assert(owner_ == std::thread::id()); 200 | ncount_ = 1; 201 | owner_ = tid; 202 | validator::locked(reinterpret_cast(this), tid, false); 203 | } 204 | 205 | bool try_lock() 206 | { 207 | const auto tid = std::this_thread::get_id(); 208 | std::lock_guard lk(mtx_); 209 | if (owner_ == tid) { 210 | ++ncount_; 211 | return true; 212 | } 213 | if (ncount_ == 0) { 214 | assert(owner_ == std::thread::id()); 215 | ncount_ = 1; 216 | owner_ = tid; 217 | validator::locked(reinterpret_cast(this), tid, false); 218 | return true; 219 | } 220 | return false; 221 | } 222 | 223 | void unlock() 224 | { 225 | const auto tid = std::this_thread::get_id(); 226 | std::lock_guard lk(mtx_); 227 | if (owner_ != tid) { 228 | // owner thread 229 | #if YAMC_CHECKED_CALL_ABORT 230 | std::abort(); 231 | #else 232 | throw std::system_error(std::make_error_code(std::errc::operation_not_permitted), "invalid unlock"); 233 | #endif 234 | } 235 | assert(0 < ncount_); 236 | if (--ncount_ == 0) { 237 | owner_ = std::thread::id(); 238 | validator::unlocked(reinterpret_cast(this), tid, false); 239 | cv_.notify_all(); 240 | } 241 | } 242 | }; 243 | 244 | } // namespace detail 245 | 246 | 247 | class mutex : private detail::mutex_base { 248 | using base = detail::mutex_base; 249 | 250 | public: 251 | mutex() 252 | { 253 | detail::validator::ctor(reinterpret_cast(this)); 254 | } 255 | 256 | ~mutex() noexcept(false) 257 | { 258 | detail::validator::dtor(reinterpret_cast(this)); 259 | dtor_precondition("abandoned mutex"); 260 | } 261 | 262 | mutex(const mutex&) = delete; 263 | mutex& operator=(const mutex&) = delete; 264 | 265 | using base::lock; 266 | using base::try_lock; 267 | using base::unlock; 268 | }; 269 | 270 | 271 | class timed_mutex : private detail::mutex_base { 272 | using base = detail::mutex_base; 273 | 274 | template 275 | bool do_try_lockwait(const std::chrono::time_point& tp, const char* emsg) 276 | { 277 | const auto tid = std::this_thread::get_id(); 278 | std::unique_lock lk(mtx_); 279 | if (owner_ == tid) { 280 | // non-recursive semantics 281 | #if YAMC_CHECKED_CALL_ABORT 282 | std::abort(); 283 | (void)emsg; // suppress "unused variable" warning 284 | #else 285 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), emsg); 286 | #endif 287 | } 288 | while (owner_ != std::thread::id()) { 289 | if (cv_.wait_until(lk, tp) == std::cv_status::timeout) { 290 | if (owner_ == std::thread::id()) // re-check predicate 291 | break; 292 | return false; 293 | } 294 | } 295 | owner_ = tid; 296 | detail::validator::locked(reinterpret_cast(this), tid, false); 297 | return true; 298 | } 299 | 300 | public: 301 | timed_mutex() 302 | { 303 | detail::validator::ctor(reinterpret_cast(this)); 304 | } 305 | 306 | ~timed_mutex() noexcept(false) 307 | { 308 | detail::validator::dtor(reinterpret_cast(this)); 309 | dtor_precondition("abandoned timed_mutex"); 310 | } 311 | 312 | timed_mutex(const timed_mutex&) = delete; 313 | timed_mutex& operator=(const timed_mutex&) = delete; 314 | 315 | using base::lock; 316 | using base::try_lock; 317 | using base::unlock; 318 | 319 | template 320 | bool try_lock_for(const std::chrono::duration& duration) 321 | { 322 | const auto tp = std::chrono::steady_clock::now() + duration; 323 | return do_try_lockwait(tp, "recursive try_lock_for"); 324 | } 325 | 326 | template 327 | bool try_lock_until(const std::chrono::time_point& tp) 328 | { 329 | return do_try_lockwait(tp, "recursive try_lock_until"); 330 | } 331 | }; 332 | 333 | 334 | class recursive_mutex : private detail::recursive_mutex_base { 335 | using base = detail::recursive_mutex_base; 336 | 337 | public: 338 | recursive_mutex() 339 | { 340 | detail::validator::ctor(reinterpret_cast(this)); 341 | } 342 | 343 | ~recursive_mutex() noexcept(false) 344 | { 345 | detail::validator::dtor(reinterpret_cast(this)); 346 | dtor_precondition("abandoned recursive_mutex"); 347 | } 348 | 349 | recursive_mutex(const recursive_mutex&) = delete; 350 | recursive_mutex& operator=(const recursive_mutex&) = delete; 351 | 352 | using base::lock; 353 | using base::try_lock; 354 | using base::unlock; 355 | }; 356 | 357 | 358 | class recursive_timed_mutex : private detail::recursive_mutex_base { 359 | using base = detail::recursive_mutex_base; 360 | 361 | template 362 | bool do_try_lockwait(const std::chrono::time_point& tp) 363 | { 364 | const auto tid = std::this_thread::get_id(); 365 | std::unique_lock lk(mtx_); 366 | if (owner_ == tid) { 367 | ++ncount_; 368 | return true; 369 | } 370 | while (ncount_ != 0) { 371 | if (cv_.wait_until(lk, tp) == std::cv_status::timeout) { 372 | if (ncount_ == 0) // re-check predicate 373 | break; 374 | return false; 375 | } 376 | } 377 | assert(owner_ == std::thread::id()); 378 | ncount_ = 1; 379 | owner_ = tid; 380 | detail::validator::locked(reinterpret_cast(this), tid, false); 381 | return true; 382 | } 383 | 384 | public: 385 | recursive_timed_mutex() 386 | { 387 | detail::validator::ctor(reinterpret_cast(this)); 388 | } 389 | 390 | ~recursive_timed_mutex() noexcept(false) 391 | { 392 | detail::validator::dtor(reinterpret_cast(this)); 393 | dtor_precondition("abandoned recursive_timed_mutex"); 394 | } 395 | 396 | recursive_timed_mutex(const recursive_timed_mutex&) = delete; 397 | recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; 398 | 399 | using base::lock; 400 | using base::try_lock; 401 | using base::unlock; 402 | 403 | template 404 | bool try_lock_for(const std::chrono::duration& duration) 405 | { 406 | const auto tp = std::chrono::steady_clock::now() + duration; 407 | return do_try_lockwait(tp); 408 | } 409 | 410 | template 411 | bool try_lock_until(const std::chrono::time_point& tp) 412 | { 413 | return do_try_lockwait(tp); 414 | } 415 | }; 416 | 417 | } // namespace checked 418 | } // namespace yamc 419 | 420 | #endif 421 | -------------------------------------------------------------------------------- /include/checked_shared_mutex.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * checked_shared_mutex.hpp 3 | * 4 | * MIT License 5 | * 6 | * Copyright (c) 2017 yohhoy 7 | * 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy 9 | * of this software and associated documentation files (the "Software"), to deal 10 | * in the Software without restriction, including without limitation the rights 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | * copies of the Software, and to permit persons to whom the Software is 13 | * furnished to do so, subject to the following conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be included in all 16 | * copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | * SOFTWARE. 25 | */ 26 | #ifndef YAMC_CHECKED_SHARED_MUTEX_HPP_ 27 | #define YAMC_CHECKED_SHARED_MUTEX_HPP_ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include "yamc_rwlock_sched.hpp" 38 | #include "yamc_lock_validator.hpp" 39 | 40 | 41 | // call std::abort() when requirements violation 42 | #ifndef YAMC_CHECKED_CALL_ABORT 43 | #define YAMC_CHECKED_CALL_ABORT 0 44 | #endif 45 | 46 | // default deadlock detection mode 47 | #ifndef YAMC_CHECKED_DETECT_DEADLOCK 48 | #define YAMC_CHECKED_DETECT_DEADLOCK 1 49 | #endif 50 | 51 | 52 | namespace yamc { 53 | 54 | /* 55 | * strict requirements checking shared mutex for debug 56 | * 57 | * - yamc::checked::shared_mutex 58 | * - yamc::checked::shared_timed_mutex 59 | * - yamc::checked::basic_shared_mutex 60 | * - yamc::checked::basic_shared_timed_mutex 61 | */ 62 | namespace checked { 63 | 64 | namespace detail { 65 | 66 | #if YAMC_CHECKED_DETECT_DEADLOCK 67 | using validator = yamc::validator::deadlock; 68 | #else 69 | using validator = yamc::validator::null; 70 | #endif 71 | 72 | 73 | template 74 | class shared_mutex_base { 75 | protected: 76 | typename RwLockPolicy::state state_; 77 | std::thread::id e_owner_; // exclusive ownership thread 78 | std::vector s_owner_; // shared ownership threads 79 | std::condition_variable cv_; 80 | std::mutex mtx_; 81 | 82 | bool is_shared_owner(std::thread::id tid) 83 | { 84 | return std::find(s_owner_.begin(), s_owner_.end(), tid) != s_owner_.end(); 85 | } 86 | 87 | void dtor_precondition(const char* emsg) 88 | { 89 | std::lock_guard lk(mtx_); 90 | if (e_owner_ != std::thread::id() || !s_owner_.empty()) { 91 | // object liveness 92 | #if YAMC_CHECKED_CALL_ABORT 93 | std::abort(); 94 | (void)emsg; // suppress "unused variable" warning 95 | #else 96 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), emsg); 97 | #endif 98 | } 99 | } 100 | 101 | void lock() 102 | { 103 | const auto tid = std::this_thread::get_id(); 104 | std::unique_lock lk(mtx_); 105 | if (e_owner_ == tid || is_shared_owner(tid)) { 106 | // non-recursive semantics 107 | #if YAMC_CHECKED_CALL_ABORT 108 | std::abort(); 109 | #else 110 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), "recursive lock"); 111 | #endif 112 | } 113 | RwLockPolicy::before_wait_wlock(state_); 114 | while (RwLockPolicy::wait_wlock(state_)) { 115 | if (!validator::enqueue(reinterpret_cast(this), tid, false)) { 116 | // deadlock detection 117 | #if YAMC_CHECKED_CALL_ABORT 118 | std::abort(); 119 | #else 120 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), "deadlock"); 121 | #endif 122 | } 123 | cv_.wait(lk); 124 | validator::dequeue(reinterpret_cast(this), tid); 125 | } 126 | RwLockPolicy::after_wait_wlock(state_); 127 | RwLockPolicy::acquire_wlock(state_); 128 | e_owner_ = tid; 129 | validator::locked(reinterpret_cast(this), tid, false); 130 | } 131 | 132 | bool try_lock() 133 | { 134 | const auto tid = std::this_thread::get_id(); 135 | std::lock_guard lk(mtx_); 136 | if (e_owner_ == tid || is_shared_owner(tid)) { 137 | // non-recursive semantics 138 | #if YAMC_CHECKED_CALL_ABORT 139 | std::abort(); 140 | #else 141 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), "recursive try_lock"); 142 | #endif 143 | } 144 | if (RwLockPolicy::wait_wlock(state_)) 145 | return false; 146 | RwLockPolicy::acquire_wlock(state_); 147 | e_owner_ = tid; 148 | validator::locked(reinterpret_cast(this), tid, false); 149 | return true; 150 | } 151 | 152 | void unlock() 153 | { 154 | const auto tid = std::this_thread::get_id(); 155 | std::lock_guard lk(mtx_); 156 | if (e_owner_ != tid) { 157 | // owner thread 158 | #if YAMC_CHECKED_CALL_ABORT 159 | std::abort(); 160 | #else 161 | throw std::system_error(std::make_error_code(std::errc::operation_not_permitted), "invalid unlock"); 162 | #endif 163 | } 164 | e_owner_ = {}; 165 | RwLockPolicy::release_wlock(state_); 166 | validator::unlocked(reinterpret_cast(this), tid, false); 167 | cv_.notify_all(); 168 | } 169 | 170 | void lock_shared() 171 | { 172 | const auto tid = std::this_thread::get_id(); 173 | std::unique_lock lk(mtx_); 174 | if (e_owner_ == tid || is_shared_owner(tid)) { 175 | // non-recursive semantics 176 | #if YAMC_CHECKED_CALL_ABORT 177 | std::abort(); 178 | #else 179 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), "recursive lock_shared"); 180 | #endif 181 | } 182 | while (RwLockPolicy::wait_rlock(state_)) { 183 | if (!validator::enqueue(reinterpret_cast(this), tid, true)) { 184 | // deadlock detection 185 | #if YAMC_CHECKED_CALL_ABORT 186 | std::abort(); 187 | #else 188 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), "deadlock"); 189 | #endif 190 | } 191 | cv_.wait(lk); 192 | validator::dequeue(reinterpret_cast(this), tid); 193 | } 194 | RwLockPolicy::acquire_rlock(state_); 195 | s_owner_.push_back(tid); 196 | validator::locked(reinterpret_cast(this), tid, true); 197 | } 198 | 199 | bool try_lock_shared() 200 | { 201 | const auto tid = std::this_thread::get_id(); 202 | std::lock_guard lk(mtx_); 203 | if (e_owner_ == tid || is_shared_owner(tid)) { 204 | // non-recursive semantics 205 | #if YAMC_CHECKED_CALL_ABORT 206 | std::abort(); 207 | #else 208 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), "recursive try_lock_shared"); 209 | #endif 210 | } 211 | if (RwLockPolicy::wait_rlock(state_)) 212 | return false; 213 | RwLockPolicy::acquire_rlock(state_); 214 | s_owner_.push_back(tid); 215 | validator::locked(reinterpret_cast(this), tid, true); 216 | return true; 217 | } 218 | 219 | void unlock_shared() 220 | { 221 | const auto tid = std::this_thread::get_id(); 222 | std::lock_guard lk(mtx_); 223 | if (!is_shared_owner(tid)) { 224 | // owner thread 225 | #if YAMC_CHECKED_CALL_ABORT 226 | std::abort(); 227 | #else 228 | throw std::system_error(std::make_error_code(std::errc::operation_not_permitted), "invalid unlock_shared"); 229 | #endif 230 | } 231 | if (RwLockPolicy::release_rlock(state_)) { 232 | cv_.notify_all(); 233 | } 234 | auto result = std::remove(s_owner_.begin(), s_owner_.end(), tid); 235 | s_owner_.erase(result, s_owner_.end()); 236 | validator::unlocked(reinterpret_cast(this), tid, true); 237 | } 238 | }; 239 | 240 | } // namespace detail 241 | 242 | 243 | template 244 | class basic_shared_mutex : private detail::shared_mutex_base { 245 | using base = detail::shared_mutex_base; 246 | 247 | public: 248 | basic_shared_mutex() 249 | { 250 | detail::validator::ctor(reinterpret_cast(this)); 251 | } 252 | 253 | ~basic_shared_mutex() noexcept(false) 254 | { 255 | detail::validator::dtor(reinterpret_cast(this)); 256 | base::dtor_precondition("abandoned shared_mutex"); 257 | } 258 | 259 | basic_shared_mutex(const basic_shared_mutex&) = delete; 260 | basic_shared_mutex& operator=(const basic_shared_mutex&) = delete; 261 | 262 | using base::lock; 263 | using base::try_lock; 264 | using base::unlock; 265 | 266 | using base::lock_shared; 267 | using base::try_lock_shared; 268 | using base::unlock_shared; 269 | }; 270 | 271 | using shared_mutex = basic_shared_mutex; 272 | 273 | 274 | template 275 | class basic_shared_timed_mutex : private detail::shared_mutex_base { 276 | using base = detail::shared_mutex_base; 277 | 278 | using base::state_; 279 | using base::e_owner_; 280 | using base::s_owner_; 281 | using base::cv_; 282 | using base::mtx_; 283 | 284 | template 285 | bool do_try_lockwait(const std::chrono::time_point& tp, const char* emsg) 286 | { 287 | const auto tid = std::this_thread::get_id(); 288 | std::unique_lock lk(mtx_); 289 | if (e_owner_ == tid || base::is_shared_owner(tid)) { 290 | // non-recursive semantics 291 | #if YAMC_CHECKED_CALL_ABORT 292 | std::abort(); 293 | (void)emsg; // suppress "unused variable" warning 294 | #else 295 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), emsg); 296 | #endif 297 | } 298 | RwLockPolicy::before_wait_wlock(state_); 299 | while (RwLockPolicy::wait_wlock(state_)) { 300 | if (cv_.wait_until(lk, tp) == std::cv_status::timeout) { 301 | if (!RwLockPolicy::wait_wlock(state_)) // re-check predicate 302 | break; 303 | RwLockPolicy::after_wait_wlock(state_); 304 | return false; 305 | } 306 | } 307 | RwLockPolicy::after_wait_wlock(state_); 308 | RwLockPolicy::acquire_wlock(state_); 309 | e_owner_ = tid; 310 | detail::validator::locked(reinterpret_cast(this), tid, false); 311 | return true; 312 | } 313 | 314 | template 315 | bool do_try_lock_sharedwait(const std::chrono::time_point& tp, const char* emsg) 316 | { 317 | const auto tid = std::this_thread::get_id(); 318 | std::unique_lock lk(mtx_); 319 | if (e_owner_ == tid || base::is_shared_owner(tid)) { 320 | // non-recursive semantics 321 | #if YAMC_CHECKED_CALL_ABORT 322 | std::abort(); 323 | (void)emsg; // suppress "unused variable" warning 324 | #else 325 | throw std::system_error(std::make_error_code(std::errc::resource_deadlock_would_occur), emsg); 326 | #endif 327 | } 328 | while (RwLockPolicy::wait_rlock(state_)) { 329 | if (cv_.wait_until(lk, tp) == std::cv_status::timeout) { 330 | if (!RwLockPolicy::wait_rlock(state_)) // re-check predicate 331 | break; 332 | return false; 333 | } 334 | } 335 | RwLockPolicy::acquire_rlock(state_); 336 | s_owner_.push_back(tid); 337 | detail::validator::locked(reinterpret_cast(this), tid, true); 338 | return true; 339 | } 340 | 341 | public: 342 | basic_shared_timed_mutex() 343 | { 344 | detail::validator::ctor(reinterpret_cast(this)); 345 | } 346 | 347 | ~basic_shared_timed_mutex() noexcept(false) 348 | { 349 | detail::validator::dtor(reinterpret_cast(this)); 350 | base::dtor_precondition("abandoned shared_timed_mutex"); 351 | } 352 | 353 | basic_shared_timed_mutex(const basic_shared_timed_mutex&) = delete; 354 | basic_shared_timed_mutex& operator=(const basic_shared_timed_mutex&) = delete; 355 | 356 | using base::lock; 357 | using base::try_lock; 358 | using base::unlock; 359 | 360 | template 361 | bool try_lock_for(const std::chrono::duration& duration) 362 | { 363 | const auto tp = std::chrono::steady_clock::now() + duration; 364 | return do_try_lockwait(tp, "recursive try_lock_for"); 365 | } 366 | 367 | template 368 | bool try_lock_until(const std::chrono::time_point& tp) 369 | { 370 | return do_try_lockwait(tp, "recursive try_lock_until"); 371 | } 372 | 373 | using base::lock_shared; 374 | using base::try_lock_shared; 375 | using base::unlock_shared; 376 | 377 | template 378 | bool try_lock_shared_for(const std::chrono::duration& duration) 379 | { 380 | const auto tp = std::chrono::steady_clock::now() + duration; 381 | return do_try_lock_sharedwait(tp, "recursive try_lock_shared_for"); 382 | } 383 | 384 | template 385 | bool try_lock_shared_until(const std::chrono::time_point& tp) 386 | { 387 | return do_try_lock_sharedwait(tp, "recursive try_lock_shared_until"); 388 | } 389 | }; 390 | 391 | using shared_timed_mutex = basic_shared_timed_mutex; 392 | 393 | } // namespace checked 394 | } // namespace yamc 395 | 396 | #endif 397 | -------------------------------------------------------------------------------- /tests/checked_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * checked_test.cpp 3 | * 4 | * test configuration: 5 | * - YAMC_CHECKED_CALL_ABORT=0 throw std::system_error [default] 6 | * - YAMC_CHECKED_CALL_ABORT=1 call std::abort() 7 | */ 8 | #include 9 | #include "gtest/gtest.h" 10 | #include "checked_mutex.hpp" 11 | #include "checked_shared_mutex.hpp" 12 | #include "yamc_testutil.hpp" 13 | 14 | 15 | #if YAMC_CHECKED_CALL_ABORT 16 | // call std::abort() on check failure 17 | #if TEST_PLATFORM_WINDOWS 18 | #define EXPECT_CHECK_FAILURE(statement_) EXPECT_EXIT(statement_, ::testing::ExitedWithCode(3), "") 19 | #else 20 | #define EXPECT_CHECK_FAILURE(statement_) EXPECT_EXIT(statement_, ::testing::KilledBySignal(SIGABRT), "") 21 | #endif 22 | #define EXPECT_CHECK_FAILURE_OUTER(block_) EXPECT_CHECK_FAILURE(block_) 23 | #define EXPECT_CHECK_FAILURE_INNER(block_) block_ 24 | 25 | #else 26 | // throw an exception on check failure 27 | #define EXPECT_CHECK_FAILURE(statement_) EXPECT_THROW(statement_, std::system_error) 28 | #define EXPECT_CHECK_FAILURE_OUTER(block_) block_ 29 | #define EXPECT_CHECK_FAILURE_INNER(block_) EXPECT_CHECK_FAILURE(block_) 30 | 31 | #endif 32 | 33 | 34 | using CheckedMutexTypes = ::testing::Types< 35 | yamc::checked::mutex, 36 | yamc::checked::timed_mutex, 37 | yamc::checked::shared_mutex 38 | >; 39 | 40 | template 41 | struct CheckedMutexTest : ::testing::Test {}; 42 | 43 | TYPED_TEST_SUITE(CheckedMutexTest, CheckedMutexTypes); 44 | 45 | // abandon mutex 46 | TYPED_TEST(CheckedMutexTest, AbandonMutex) { 47 | EXPECT_CHECK_FAILURE({ 48 | TypeParam mtx; 49 | mtx.lock(); 50 | // no unlock() 51 | }); 52 | } 53 | 54 | // abandon mutex by other thread 55 | TYPED_TEST(CheckedMutexTest, AbandonMutexSide) { 56 | auto test_body = []{ 57 | yamc::test::barrier step(2); 58 | auto pmtx = yamc::cxx::make_unique(); 59 | // owner-thread 60 | yamc::test::join_thread thd([&]{ 61 | ASSERT_NO_THROW(pmtx->lock()); 62 | step.await(); // b1 63 | step.await(); // b2 64 | }); 65 | // other-thread 66 | { 67 | step.await(); // b1 68 | EXPECT_CHECK_FAILURE_INNER({ 69 | delete pmtx.release(); 70 | }); 71 | step.await(); // b2 72 | } 73 | }; 74 | EXPECT_CHECK_FAILURE_OUTER(test_body()); 75 | } 76 | 77 | // recurse lock() on non-recursive mutex 78 | TYPED_TEST(CheckedMutexTest, RecurseLock) { 79 | TypeParam mtx; 80 | ASSERT_NO_THROW(mtx.lock()); 81 | EXPECT_CHECK_FAILURE(mtx.lock()); 82 | ASSERT_NO_THROW(mtx.unlock()); 83 | } 84 | 85 | // recurse try_lock() on non-recursive mutex 86 | TYPED_TEST(CheckedMutexTest, RecurseTryLock) { 87 | TypeParam mtx; 88 | ASSERT_NO_THROW(mtx.lock()); 89 | EXPECT_CHECK_FAILURE(mtx.try_lock()); 90 | ASSERT_NO_THROW(mtx.unlock()); 91 | } 92 | 93 | // invalid unlock() 94 | TYPED_TEST(CheckedMutexTest, InvalidUnlock0) { 95 | TypeParam mtx; 96 | EXPECT_CHECK_FAILURE(mtx.unlock()); 97 | } 98 | 99 | // invalid unlock() 100 | TYPED_TEST(CheckedMutexTest, InvalidUnlock1) { 101 | TypeParam mtx; 102 | ASSERT_NO_THROW(mtx.lock()); 103 | ASSERT_NO_THROW(mtx.unlock()); 104 | EXPECT_CHECK_FAILURE(mtx.unlock()); 105 | } 106 | 107 | // non owner thread call unlock() 108 | TYPED_TEST(CheckedMutexTest, NonOwnerUnlock) { 109 | auto test_body = []{ 110 | yamc::test::barrier step(2); 111 | TypeParam mtx; 112 | // owner-thread 113 | yamc::test::join_thread thd([&]{ 114 | ASSERT_NO_THROW(mtx.lock()); 115 | step.await(); // b1 116 | step.await(); // b2 117 | ASSERT_NO_THROW(mtx.unlock()); 118 | }); 119 | // other-thread 120 | { 121 | step.await(); // b1 122 | EXPECT_CHECK_FAILURE_INNER(mtx.unlock()); 123 | step.await(); // b2 124 | } 125 | }; 126 | EXPECT_CHECK_FAILURE_OUTER(test_body()); 127 | } 128 | 129 | 130 | using CheckedRecursiveMutexTypes = ::testing::Types< 131 | yamc::checked::recursive_mutex, 132 | yamc::checked::recursive_timed_mutex 133 | >; 134 | 135 | template 136 | struct CheckedRecursiveMutexTest : ::testing::Test {}; 137 | 138 | TYPED_TEST_SUITE(CheckedRecursiveMutexTest, CheckedRecursiveMutexTypes); 139 | 140 | // abandon recursive_mutex 141 | TYPED_TEST(CheckedRecursiveMutexTest, AbandonMutex) { 142 | EXPECT_CHECK_FAILURE({ 143 | TypeParam mtx; 144 | mtx.lock(); 145 | // no unlock() 146 | }); 147 | } 148 | 149 | // abandon mutex by other thread 150 | TYPED_TEST(CheckedRecursiveMutexTest, AbandonMutexSide) { 151 | auto test_body = []{ 152 | yamc::test::barrier step(2); 153 | auto pmtx = yamc::cxx::make_unique(); 154 | // owner-thread 155 | yamc::test::join_thread thd([&]{ 156 | ASSERT_NO_THROW(pmtx->lock()); 157 | step.await(); // b1 158 | step.await(); // b2 159 | }); 160 | // other-thread 161 | { 162 | step.await(); // b1 163 | EXPECT_CHECK_FAILURE_INNER({ 164 | delete pmtx.release(); 165 | }); 166 | step.await(); // b2 167 | } 168 | }; 169 | EXPECT_CHECK_FAILURE_OUTER(test_body()); 170 | } 171 | 172 | // invalid unlock() 173 | TYPED_TEST(CheckedRecursiveMutexTest, InvalidUnlock0) { 174 | TypeParam mtx; 175 | EXPECT_CHECK_FAILURE(mtx.unlock()); 176 | } 177 | 178 | // invalid unlock() 179 | TYPED_TEST(CheckedRecursiveMutexTest, InvalidUnlock1) { 180 | TypeParam mtx; 181 | ASSERT_NO_THROW(mtx.lock()); // lockcnt = 1 182 | ASSERT_NO_THROW(mtx.unlock()); // lockcnt = 0 183 | EXPECT_CHECK_FAILURE(mtx.unlock()); 184 | } 185 | 186 | // invalid unlock() 187 | TYPED_TEST(CheckedRecursiveMutexTest, InvalidUnlock2) { 188 | TypeParam mtx; 189 | ASSERT_NO_THROW(mtx.lock()); // lockcnt = 1 190 | ASSERT_NO_THROW(mtx.lock()); // lockcnt = 2 191 | ASSERT_NO_THROW(mtx.unlock()); // lockcnt = 1 192 | ASSERT_NO_THROW(mtx.unlock()); // lockcnt = 0 193 | EXPECT_CHECK_FAILURE(mtx.unlock()); 194 | } 195 | 196 | // non owner thread call unlock() 197 | TYPED_TEST(CheckedRecursiveMutexTest, NonOwnerUnlock) { 198 | auto test_body = []{ 199 | yamc::test::barrier step(2); 200 | TypeParam mtx; 201 | // owner-thread 202 | yamc::test::join_thread thd([&]{ 203 | ASSERT_NO_THROW(mtx.lock()); 204 | step.await(); // b1 205 | step.await(); // b2 206 | ASSERT_NO_THROW(mtx.unlock()); 207 | }); 208 | // other-thread 209 | { 210 | step.await(); // b1 211 | EXPECT_CHECK_FAILURE_INNER(mtx.unlock()); 212 | step.await(); // b2 213 | } 214 | }; 215 | EXPECT_CHECK_FAILURE_OUTER(test_body()); 216 | } 217 | 218 | 219 | using CheckedTimedMutexTypes = ::testing::Types< 220 | yamc::checked::timed_mutex, 221 | yamc::checked::shared_timed_mutex 222 | >; 223 | 224 | template 225 | struct CheckedTimedMutexTest : ::testing::Test {}; 226 | 227 | TYPED_TEST_SUITE(CheckedTimedMutexTest, CheckedTimedMutexTypes); 228 | 229 | // recurse try_lock_for() on non-recursive mutex 230 | TYPED_TEST(CheckedTimedMutexTest, RecurseTryLockFor) { 231 | TypeParam mtx; 232 | ASSERT_NO_THROW(mtx.lock()); 233 | EXPECT_CHECK_FAILURE(mtx.try_lock_for(std::chrono::seconds(1))); 234 | ASSERT_NO_THROW(mtx.unlock()); 235 | } 236 | 237 | // recurse try_lock_until() on non-recursive mutex 238 | TYPED_TEST(CheckedTimedMutexTest, RecurseTryLockUntil) { 239 | TypeParam mtx; 240 | ASSERT_NO_THROW(mtx.lock()); 241 | EXPECT_CHECK_FAILURE(mtx.try_lock_until(std::chrono::system_clock::now())); 242 | ASSERT_NO_THROW(mtx.unlock()); 243 | } 244 | 245 | 246 | using CheckedSharedMutexTypes = ::testing::Types< 247 | yamc::checked::shared_mutex, 248 | yamc::checked::shared_timed_mutex 249 | >; 250 | 251 | template 252 | struct CheckedSharedMutexTest : ::testing::Test {}; 253 | 254 | TYPED_TEST_SUITE(CheckedSharedMutexTest, CheckedSharedMutexTypes); 255 | 256 | // abandon mutex 257 | TYPED_TEST(CheckedSharedMutexTest, AbandonMutex) { 258 | EXPECT_CHECK_FAILURE({ 259 | TypeParam mtx; 260 | mtx.lock_shared(); 261 | // no unlock() 262 | }); 263 | } 264 | 265 | // abandon mutex by other thread 266 | TYPED_TEST(CheckedSharedMutexTest, AbandonMutexSide) { 267 | auto test_body = []{ 268 | yamc::test::barrier step(2); 269 | auto pmtx = yamc::cxx::make_unique(); 270 | // owner-thread 271 | yamc::test::join_thread thd([&]{ 272 | ASSERT_NO_THROW(pmtx->lock_shared()); 273 | step.await(); // b1 274 | step.await(); // b2 275 | }); 276 | // other-thread 277 | { 278 | step.await(); // b1 279 | EXPECT_CHECK_FAILURE_INNER({ 280 | delete pmtx.release(); 281 | }); 282 | step.await(); // b2 283 | } 284 | }; 285 | EXPECT_CHECK_FAILURE_OUTER(test_body()); 286 | } 287 | 288 | // recurse lock_shared() 289 | TYPED_TEST(CheckedSharedMutexTest, RecurseLockShared) { 290 | TypeParam mtx; 291 | ASSERT_NO_THROW(mtx.lock_shared()); 292 | EXPECT_CHECK_FAILURE(mtx.lock_shared()); 293 | ASSERT_NO_THROW(mtx.unlock_shared()); 294 | } 295 | 296 | // recurse try_lock_shared() 297 | TYPED_TEST(CheckedSharedMutexTest, RecurseTryLockShared) { 298 | TypeParam mtx; 299 | ASSERT_NO_THROW(mtx.lock_shared()); 300 | EXPECT_CHECK_FAILURE(mtx.try_lock_shared()); 301 | ASSERT_NO_THROW(mtx.unlock_shared()); 302 | } 303 | 304 | // lock() to lock_shared() 305 | TYPED_TEST(CheckedSharedMutexTest, LockToLockShared) { 306 | TypeParam mtx; 307 | ASSERT_NO_THROW(mtx.lock()); 308 | EXPECT_CHECK_FAILURE(mtx.lock_shared()); 309 | ASSERT_NO_THROW(mtx.unlock()); 310 | } 311 | 312 | // lock() to try_lock_shared() 313 | TYPED_TEST(CheckedSharedMutexTest, LockToTryLockShared) { 314 | TypeParam mtx; 315 | ASSERT_NO_THROW(mtx.lock()); 316 | EXPECT_CHECK_FAILURE(mtx.try_lock_shared()); 317 | ASSERT_NO_THROW(mtx.unlock()); 318 | } 319 | 320 | // lock_shared() to lock() 321 | TYPED_TEST(CheckedSharedMutexTest, LockSharedToLock) { 322 | TypeParam mtx; 323 | ASSERT_NO_THROW(mtx.lock_shared()); 324 | EXPECT_CHECK_FAILURE(mtx.lock()); 325 | ASSERT_NO_THROW(mtx.unlock_shared()); 326 | } 327 | 328 | // lock_shared() to try_lock() 329 | TYPED_TEST(CheckedSharedMutexTest, LockSharedToTryLock) { 330 | TypeParam mtx; 331 | ASSERT_NO_THROW(mtx.lock_shared()); 332 | EXPECT_CHECK_FAILURE(mtx.try_lock()); 333 | ASSERT_NO_THROW(mtx.unlock_shared()); 334 | } 335 | 336 | // unmatch unlock() 337 | TYPED_TEST(CheckedSharedMutexTest, UnmatchUnlock) { 338 | TypeParam mtx; 339 | ASSERT_NO_THROW(mtx.lock_shared()); 340 | EXPECT_CHECK_FAILURE(mtx.unlock()); 341 | ASSERT_NO_THROW(mtx.unlock_shared()); 342 | } 343 | 344 | // unmatch unlock_shared() 345 | TYPED_TEST(CheckedSharedMutexTest, UnmatchUnlockShared) { 346 | TypeParam mtx; 347 | ASSERT_NO_THROW(mtx.lock()); 348 | EXPECT_CHECK_FAILURE(mtx.unlock_shared()); 349 | ASSERT_NO_THROW(mtx.unlock()); 350 | } 351 | 352 | // invalid unlock_shared() 353 | TYPED_TEST(CheckedSharedMutexTest, InvalidUnlockShared0) { 354 | TypeParam mtx; 355 | EXPECT_CHECK_FAILURE(mtx.unlock_shared()); 356 | } 357 | 358 | // invalid unlock_shared() 359 | TYPED_TEST(CheckedSharedMutexTest, InvalidUnlockShared1) { 360 | TypeParam mtx; 361 | ASSERT_NO_THROW(mtx.lock_shared()); 362 | ASSERT_NO_THROW(mtx.unlock_shared()); 363 | EXPECT_CHECK_FAILURE(mtx.unlock_shared()); 364 | } 365 | 366 | // non owner thread call unlock_shared() 367 | TYPED_TEST(CheckedSharedMutexTest, NonOwnerUnlockShared) { 368 | auto test_body = []{ 369 | yamc::test::barrier step(2); 370 | TypeParam mtx; 371 | // owner-thread 372 | yamc::test::join_thread thd([&]{ 373 | ASSERT_NO_THROW(mtx.lock_shared()); 374 | step.await(); // b1 375 | step.await(); // b2 376 | ASSERT_NO_THROW(mtx.unlock_shared()); 377 | }); 378 | // other-thread 379 | { 380 | step.await(); // b1 381 | EXPECT_CHECK_FAILURE_INNER(mtx.unlock_shared()); 382 | step.await(); // b2 383 | } 384 | }; 385 | EXPECT_CHECK_FAILURE_OUTER(test_body()); 386 | } 387 | 388 | 389 | using CheckedSharedTimedMutexTypes = ::testing::Types< 390 | yamc::checked::shared_timed_mutex 391 | >; 392 | 393 | template 394 | struct CheckedSharedTimedMutexTest : ::testing::Test {}; 395 | 396 | TYPED_TEST_SUITE(CheckedSharedTimedMutexTest, CheckedSharedTimedMutexTypes); 397 | 398 | // recurse try_lock_shared_for() 399 | TYPED_TEST(CheckedSharedTimedMutexTest, RecurseTryLockSharedFor) { 400 | TypeParam mtx; 401 | ASSERT_NO_THROW(mtx.lock_shared()); 402 | EXPECT_CHECK_FAILURE(mtx.try_lock_shared_for(std::chrono::seconds(1))); 403 | ASSERT_NO_THROW(mtx.unlock_shared()); 404 | } 405 | 406 | // recurse try_lock_shared_until() 407 | TYPED_TEST(CheckedSharedTimedMutexTest, RecurseTryLockSharedUntil) { 408 | TypeParam mtx; 409 | ASSERT_NO_THROW(mtx.lock_shared()); 410 | EXPECT_CHECK_FAILURE(mtx.try_lock_shared_until(std::chrono::system_clock::now())); 411 | ASSERT_NO_THROW(mtx.unlock_shared()); 412 | } 413 | 414 | // lock() to try_lock_shared_for() 415 | TYPED_TEST(CheckedSharedTimedMutexTest, LockToTryLockSharedFor) { 416 | TypeParam mtx; 417 | ASSERT_NO_THROW(mtx.lock()); 418 | EXPECT_CHECK_FAILURE(mtx.try_lock_shared_for(std::chrono::seconds(1))); 419 | ASSERT_NO_THROW(mtx.unlock()); 420 | } 421 | 422 | // lock() to try_lock_shared_until() 423 | TYPED_TEST(CheckedSharedTimedMutexTest, LockToTryLockSharedUntil) { 424 | TypeParam mtx; 425 | ASSERT_NO_THROW(mtx.lock()); 426 | EXPECT_CHECK_FAILURE(mtx.try_lock_shared_until(std::chrono::system_clock::now())); 427 | ASSERT_NO_THROW(mtx.unlock()); 428 | } 429 | 430 | // lock_shared() to try_lock_for() 431 | TYPED_TEST(CheckedSharedTimedMutexTest, LockSharedToTryLockFor) { 432 | TypeParam mtx; 433 | ASSERT_NO_THROW(mtx.lock_shared()); 434 | EXPECT_CHECK_FAILURE(mtx.try_lock_for(std::chrono::seconds(1))); 435 | ASSERT_NO_THROW(mtx.unlock_shared()); 436 | } 437 | 438 | // lock_shared() to try_lock_until() 439 | TYPED_TEST(CheckedSharedTimedMutexTest, LockSharedToTryLockUntil) { 440 | TypeParam mtx; 441 | ASSERT_NO_THROW(mtx.lock_shared()); 442 | EXPECT_CHECK_FAILURE(mtx.try_lock_until(std::chrono::system_clock::now())); 443 | ASSERT_NO_THROW(mtx.unlock_shared()); 444 | } 445 | -------------------------------------------------------------------------------- /tests/lock_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * lock_test.cpp 3 | */ 4 | #include 5 | #include "gtest/gtest.h" 6 | #include "yamc_shared_lock.hpp" 7 | #include "yamc_scoped_lock.hpp" 8 | #include "yamc_testutil.hpp" 9 | 10 | 11 | #define EXPECT_THROW_SYSTEM_ERROR(errorcode_, block_) \ 12 | try { \ 13 | block_ \ 14 | FAIL(); \ 15 | } catch (const std::system_error& e) { \ 16 | EXPECT_EQ(std::make_error_code(errorcode_), e.code()); \ 17 | } 18 | 19 | 20 | using MockSharedMutex = yamc::mock::shared_mutex; 21 | using MockSharedTimedMutex = yamc::mock::shared_timed_mutex; 22 | 23 | // shared_lock::mutex_type 24 | TEST(SharedLockTest, MutexType) 25 | { 26 | bool shall_be_true = std::is_same::mutex_type>::value; 27 | EXPECT_TRUE(shall_be_true); 28 | } 29 | 30 | // shared_lock() noexcept 31 | TEST(SharedLockTest, CtorDefault) 32 | { 33 | yamc::shared_lock lk; 34 | EXPECT_EQ(nullptr, lk.mutex()); 35 | EXPECT_FALSE(lk.owns_lock()); 36 | EXPECT_TRUE(noexcept(yamc::shared_lock{})); 37 | } 38 | 39 | // explicit shared_lock(mutex_type&) 40 | TEST(SharedLockTest, CtorMutex) 41 | { 42 | MockSharedMutex mtx; 43 | yamc::shared_lock lk(mtx); 44 | EXPECT_EQ(&mtx, lk.mutex()); 45 | EXPECT_TRUE(lk.owns_lock()); 46 | } 47 | 48 | // shared_lock(mutex_type&, defer_lock_t) noexcept 49 | TEST(SharedLockTest, CtorDeferLock) 50 | { 51 | MockSharedMutex mtx; 52 | yamc::shared_lock lk(mtx, std::defer_lock); 53 | EXPECT_EQ(&mtx, lk.mutex()); 54 | EXPECT_FALSE(lk.owns_lock()); 55 | EXPECT_TRUE(noexcept(yamc::shared_lock(mtx, std::defer_lock))); 56 | } 57 | 58 | // shared_lock(mutex_type&, try_to_lock_t) 59 | TEST(SharedLockTest, CtorTryToLock) 60 | { 61 | MockSharedMutex mtx; 62 | yamc::shared_lock lk(mtx, std::try_to_lock); 63 | EXPECT_EQ(&mtx, lk.mutex()); 64 | EXPECT_TRUE(lk.owns_lock()); 65 | } 66 | 67 | // shared_lock(mutex_type&, try_to_lock_t) failure 68 | TEST(SharedLockTest, CtorTryToLockFail) 69 | { 70 | MockSharedMutex mtx; 71 | mtx.retval_on_trylock = false; 72 | yamc::shared_lock lk(mtx, std::try_to_lock); 73 | EXPECT_EQ(&mtx, lk.mutex()); 74 | EXPECT_FALSE(lk.owns_lock()); 75 | } 76 | 77 | // shared_lock(mutex_type&, adopt_lock_t) 78 | TEST(SharedLockTest, CtorAdoptLock) 79 | { 80 | MockSharedMutex mtx; 81 | mtx.lock_shared(); 82 | yamc::shared_lock lk(mtx, std::adopt_lock); 83 | EXPECT_EQ(&mtx, lk.mutex()); 84 | EXPECT_TRUE(lk.owns_lock()); 85 | } 86 | 87 | // shared_lock(mutex_type&, const chrono::time_point&) 88 | TEST(SharedLockTest, CtorTimePoint) 89 | { 90 | MockSharedTimedMutex mtx; 91 | yamc::shared_lock lk(mtx, std::chrono::system_clock::now()); 92 | EXPECT_EQ(&mtx, lk.mutex()); 93 | EXPECT_TRUE(lk.owns_lock()); 94 | } 95 | 96 | // shared_lock(mutex_type&, const chrono::time_point&) failure 97 | TEST(SharedLockTest, CtorTimePointFail) 98 | { 99 | MockSharedTimedMutex mtx; 100 | mtx.retval_on_trylock = false; 101 | yamc::shared_lock lk(mtx, std::chrono::system_clock::now()); 102 | EXPECT_EQ(&mtx, lk.mutex()); 103 | EXPECT_FALSE(lk.owns_lock()); 104 | } 105 | 106 | // shared_lock(mutex_type&, const chrono::duration&) 107 | TEST(SharedLockTest, CtorRelTime) 108 | { 109 | MockSharedTimedMutex mtx; 110 | yamc::shared_lock lk(mtx, std::chrono::milliseconds(1)); 111 | EXPECT_EQ(&mtx, lk.mutex()); 112 | EXPECT_TRUE(lk.owns_lock()); 113 | } 114 | 115 | // shared_lock(mutex_type&, const chrono::duration&) failure 116 | TEST(SharedLockTest, CtorRelTimeFail) 117 | { 118 | MockSharedTimedMutex mtx; 119 | mtx.retval_on_trylock = false; 120 | yamc::shared_lock lk(mtx, std::chrono::milliseconds(1)); 121 | EXPECT_EQ(&mtx, lk.mutex()); 122 | EXPECT_FALSE(lk.owns_lock()); 123 | } 124 | 125 | // shared_lock(shared_lock&&) noexcept 126 | TEST(SharedLockTest, MoveCtor) 127 | { 128 | MockSharedMutex mtx; 129 | yamc::shared_lock lk1(mtx); 130 | yamc::shared_lock lk2(std::move(lk1)); // move-constructor 131 | EXPECT_EQ(nullptr, lk1.mutex()); 132 | EXPECT_FALSE(lk1.owns_lock()); 133 | EXPECT_EQ(&mtx, lk2.mutex()); 134 | EXPECT_TRUE(lk2.owns_lock()); 135 | EXPECT_TRUE(noexcept(yamc::shared_lock(std::move(lk2)))); 136 | } 137 | 138 | // shared_lock& operator=(shared_lock&&) noexcept 139 | TEST(SharedLockTest, MoveAssign) 140 | { 141 | MockSharedMutex mtx; 142 | yamc::shared_lock lk1(mtx); 143 | yamc::shared_lock lk2; 144 | lk2 = std::move(lk1); // move-assignment 145 | EXPECT_EQ(nullptr, lk1.mutex()); 146 | EXPECT_FALSE(lk1.owns_lock()); 147 | EXPECT_EQ(&mtx, lk2.mutex()); 148 | EXPECT_TRUE(lk2.owns_lock()); 149 | EXPECT_TRUE(noexcept(lk1 = std::move(lk2))); 150 | } 151 | 152 | // lock() 153 | TEST(SharedLockTest, Lock) 154 | { 155 | MockSharedMutex mtx; 156 | yamc::shared_lock lk(mtx, std::defer_lock); 157 | EXPECT_NO_THROW(lk.lock()); 158 | EXPECT_TRUE(lk.owns_lock()); 159 | } 160 | 161 | // lock() throw exception/operation_not_permitted 162 | TEST(SharedLockTest, LockThrowEPERM) 163 | { 164 | { 165 | yamc::shared_lock lk; 166 | EXPECT_THROW(lk.lock(), std::system_error); 167 | } 168 | { 169 | yamc::shared_lock lk; 170 | EXPECT_THROW_SYSTEM_ERROR(std::errc::operation_not_permitted, { 171 | lk.lock(); 172 | }); 173 | } 174 | } 175 | 176 | // lock() throw exception/resource_deadlock_would_occur 177 | TEST(SharedLockTest, LockThrowEDEADLK) 178 | { 179 | { 180 | yamc::shared_lock lk; 181 | EXPECT_THROW(lk.lock(), std::system_error); 182 | } 183 | { 184 | MockSharedMutex mtx; 185 | yamc::shared_lock lk(mtx); 186 | EXPECT_THROW_SYSTEM_ERROR(std::errc::resource_deadlock_would_occur, { 187 | lk.lock(); 188 | }); 189 | } 190 | } 191 | 192 | // try_lock() 193 | TEST(SharedLockTest, TryLock) 194 | { 195 | MockSharedMutex mtx; 196 | yamc::shared_lock lk(mtx, std::defer_lock); 197 | EXPECT_TRUE(lk.try_lock()); 198 | EXPECT_TRUE(lk.owns_lock()); 199 | } 200 | 201 | // try_lock() failure 202 | TEST(SharedLockTest, TryLockFail) 203 | { 204 | MockSharedMutex mtx; 205 | mtx.retval_on_trylock = false; 206 | yamc::shared_lock lk(mtx, std::defer_lock); 207 | EXPECT_FALSE(lk.try_lock()); 208 | EXPECT_FALSE(lk.owns_lock()); 209 | } 210 | 211 | // try_lock() throw exception/operation_not_permitted 212 | TEST(SharedLockTest, TryLockThrowEPERM) 213 | { 214 | { 215 | yamc::shared_lock lk; 216 | EXPECT_THROW(lk.try_lock(), std::system_error); 217 | } 218 | { 219 | yamc::shared_lock lk; 220 | EXPECT_THROW_SYSTEM_ERROR(std::errc::operation_not_permitted, { 221 | lk.try_lock(); 222 | }); 223 | } 224 | } 225 | 226 | // try_lock() throw exception/resource_deadlock_would_occur 227 | TEST(SharedLockTest, TryLockThrowEDEADLK) 228 | { 229 | { 230 | MockSharedMutex mtx; 231 | yamc::shared_lock lk(mtx); 232 | EXPECT_THROW(lk.try_lock(), std::system_error); 233 | } 234 | { 235 | MockSharedMutex mtx; 236 | yamc::shared_lock lk(mtx); 237 | EXPECT_THROW_SYSTEM_ERROR(std::errc::resource_deadlock_would_occur, { 238 | lk.try_lock(); 239 | }); 240 | } 241 | } 242 | 243 | // unlock() 244 | TEST(SharedLockTest, Unlock) 245 | { 246 | MockSharedMutex mtx; 247 | yamc::shared_lock lk(mtx); 248 | EXPECT_NO_THROW(lk.unlock()); 249 | EXPECT_FALSE(lk.owns_lock()); 250 | } 251 | 252 | // unlock() throw system_error/operation_not_permitted 253 | TEST(SharedLockTest, UnlockThrowEPERM) 254 | { 255 | { 256 | MockSharedMutex mtx; 257 | yamc::shared_lock lk(mtx, std::defer_lock); 258 | EXPECT_THROW(lk.unlock(), std::system_error); 259 | } 260 | { 261 | MockSharedMutex mtx; 262 | yamc::shared_lock lk(mtx, std::defer_lock); 263 | EXPECT_THROW_SYSTEM_ERROR(std::errc::operation_not_permitted, { 264 | lk.unlock(); 265 | }); 266 | } 267 | } 268 | 269 | // void swap(shared_lock&) noexcept 270 | TEST(SharedLockTest, Swap) 271 | { 272 | MockSharedMutex mtx1, mtx2; 273 | yamc::shared_lock lk1(mtx1); // {&mtx1, true} 274 | yamc::shared_lock lk2(mtx2, std::defer_lock); // {&mtx2, false} 275 | lk1.swap(lk2); 276 | EXPECT_EQ(&mtx2, lk1.mutex()); 277 | EXPECT_FALSE(lk1.owns_lock()); 278 | EXPECT_EQ(&mtx1, lk2.mutex()); 279 | EXPECT_TRUE(lk2.owns_lock()); 280 | EXPECT_TRUE(noexcept(lk1.swap(lk2))); 281 | } 282 | 283 | // void swap(shared_lock&, shared_lock&) noexcept 284 | TEST(SharedLockTest, SwapNonMember) 285 | { 286 | MockSharedMutex mtx1, mtx2; 287 | yamc::shared_lock lk1(mtx1); // {&mtx1, true} 288 | yamc::shared_lock lk2(mtx2, std::defer_lock); // {&mtx2, false} 289 | std::swap(lk1, lk2); 290 | EXPECT_EQ(&mtx2, lk1.mutex()); 291 | EXPECT_FALSE(lk1.owns_lock()); 292 | EXPECT_EQ(&mtx1, lk2.mutex()); 293 | EXPECT_TRUE(lk2.owns_lock()); 294 | EXPECT_TRUE(noexcept(std::swap(lk1, lk2))); 295 | } 296 | 297 | // mutex_type* release() noexcept 298 | TEST(SharedLockTest, Release) 299 | { 300 | MockSharedMutex mtx; 301 | yamc::shared_lock lk(mtx); 302 | EXPECT_EQ(&mtx, lk.release()); 303 | EXPECT_EQ(nullptr, lk.mutex()); 304 | EXPECT_FALSE(lk.owns_lock()); 305 | EXPECT_TRUE(noexcept(lk.release())); 306 | } 307 | 308 | // bool owns_lock() const noexcept 309 | TEST(SharedLockTest, OwnsLock) 310 | { 311 | MockSharedMutex mtx; 312 | const yamc::shared_lock lk(mtx); 313 | EXPECT_TRUE(lk.owns_lock()); 314 | EXPECT_TRUE(noexcept(lk.owns_lock())); 315 | } 316 | 317 | // explicit operator bool () const noexcept 318 | TEST(SharedLockTest, OperatorBool) 319 | { 320 | { 321 | MockSharedMutex mtx; 322 | const yamc::shared_lock lk(mtx); 323 | if (lk) { // shall be true 324 | SUCCEED(); 325 | } else { 326 | FAIL(); 327 | } 328 | } 329 | { 330 | MockSharedMutex mtx; 331 | const yamc::shared_lock lk(mtx, std::defer_lock); 332 | if (lk) { // shall be false 333 | FAIL(); 334 | } else { 335 | SUCCEED(); 336 | } 337 | } 338 | } 339 | 340 | // mutex_type* mutex() const noexcept 341 | TEST(SharedLockTest, Mutex) 342 | { 343 | MockSharedMutex mtx; 344 | const yamc::shared_lock lk(mtx); 345 | EXPECT_EQ(&mtx, lk.mutex()); 346 | EXPECT_TRUE(noexcept(lk.mutex())); 347 | } 348 | 349 | 350 | using MockMutex = yamc::mock::mutex; 351 | 352 | template > 353 | struct has_mutex_type : std::false_type { }; 354 | 355 | template 356 | struct has_mutex_type> : std::true_type { }; 357 | 358 | 359 | // scoped_lock::mutex_type 360 | TEST(ScopedLockTest, MutexType) 361 | { 362 | // scoped_lock 363 | bool shall_be_true = std::is_same::mutex_type>::value; 364 | EXPECT_TRUE(shall_be_true); 365 | } 366 | 367 | // scoped_lock::mutex_type not exist 368 | TEST(ScopedLockTest, MutexTypeNotExist) 369 | { 370 | // scoped_lock<> 371 | EXPECT_FALSE(has_mutex_type>::value); 372 | // scoped_lock 373 | bool shall_be_false = has_mutex_type>::value; 374 | EXPECT_FALSE(shall_be_false); 375 | } 376 | 377 | // explicit scoped_lock() 378 | TEST(ScopedLockTest, CtorLock0) 379 | { 380 | yamc::scoped_lock<> lk; 381 | (void)lk; // suppress "unused variable" warning 382 | SUCCEED(); 383 | } 384 | 385 | // explicit scoped_lock(Mutex1) 386 | TEST(ScopedLockTest, CtorLock1) 387 | { 388 | MockMutex mtx1; 389 | { 390 | yamc::scoped_lock lk(mtx1); 391 | EXPECT_TRUE(mtx1.locked); 392 | } 393 | EXPECT_FALSE(mtx1.locked); 394 | } 395 | 396 | // explicit scoped_lock(Mutex1, Mutex2) 397 | TEST(ScopedLockTest, CtorLock2) 398 | { 399 | MockMutex mtx1, mtx2; 400 | { 401 | yamc::scoped_lock lk(mtx1, mtx2); 402 | EXPECT_TRUE(mtx1.locked); 403 | EXPECT_TRUE(mtx2.locked); 404 | } 405 | EXPECT_FALSE(mtx1.locked); 406 | EXPECT_FALSE(mtx2.locked); 407 | } 408 | 409 | // explicit scoped_lock(adopt_lock_t) 410 | TEST(ScopedLockTest, CtorAdoptLock0) 411 | { 412 | yamc::scoped_lock<> lk(std::adopt_lock); 413 | (void)lk; // suppress "unused variable" warning 414 | SUCCEED(); 415 | } 416 | 417 | // explicit scoped_lock(adopt_lock_t, Mutex1) 418 | TEST(ScopedLockTest, CtorAdoptLock1) 419 | { 420 | MockMutex mtx1; 421 | mtx1.locked = true; 422 | { 423 | yamc::scoped_lock lk(std::adopt_lock, mtx1); 424 | } 425 | EXPECT_FALSE(mtx1.locked); 426 | } 427 | 428 | // explicit scoped_lock(adopt_lock_t, Mutex1, Mutex2) 429 | TEST(ScopedLockTest, CtorAdoptLock2) 430 | { 431 | MockMutex mtx1, mtx2; 432 | mtx1.locked = mtx2.locked = true; 433 | { 434 | yamc::scoped_lock lk(std::adopt_lock, mtx1, mtx2); 435 | } 436 | EXPECT_FALSE(mtx1.locked); 437 | EXPECT_FALSE(mtx2.locked); 438 | } 439 | 440 | // avoid deadlock 441 | TEST(ScopedLockTest, AvoidDeadlock) 442 | { 443 | SETUP_STEPTEST; 444 | yamc::test::phaser phaser(2); 445 | using Mutex1 = std::mutex; 446 | using Mutex2 = std::recursive_mutex; 447 | Mutex1 mtx1; 448 | Mutex2 mtx2; 449 | yamc::test::task_runner(2, [&](std::size_t id) { 450 | auto ph = phaser.get(id); 451 | switch (id) { 452 | case 0: 453 | // lock order: 1->2 454 | ASSERT_NO_THROW(mtx1.lock()); 455 | ASSERT_NO_THROW(mtx2.lock()); 456 | ph.await(); // p1 457 | { 458 | yamc::scoped_lock lk(std::adopt_lock, mtx1, mtx2); 459 | EXPECT_STEP(1); 460 | ph.await(); // p2 461 | } 462 | ph.await(); // p3 463 | // lock order: 2->1 464 | ASSERT_NO_THROW(mtx2.lock()); 465 | ASSERT_NO_THROW(mtx1.lock()); 466 | ph.await(); // p4 467 | { 468 | yamc::scoped_lock lk(std::adopt_lock, mtx1, mtx2); 469 | EXPECT_STEP(3); 470 | ph.await(); // p5 471 | } 472 | break; 473 | case 1: 474 | ph.await(); // p1 475 | ph.advance(1); // p2 476 | { 477 | yamc::scoped_lock lk(mtx1, mtx2); 478 | EXPECT_STEP(2); 479 | } 480 | ph.await(); // p3 481 | ph.await(); // p4 482 | ph.advance(1); // p5 483 | { 484 | yamc::scoped_lock lk(mtx1, mtx2); 485 | EXPECT_STEP(4); 486 | } 487 | break; 488 | } 489 | }); 490 | } 491 | --------------------------------------------------------------------------------