├── CPPLINT.cfg ├── README.md ├── BUILD ├── src └── ccbase │ ├── version.h │ ├── common.h │ ├── thread.h │ ├── thread.cc │ ├── thread_local_obj.h │ ├── token_bucket.h │ ├── timer_wheel.h │ ├── eventfd.h │ ├── worker_group.h │ ├── concurrent_ptr.h │ ├── worker_pool.h │ ├── fast_queue.h │ ├── worker_group.cc │ ├── token_bucket.cc │ ├── macro_list.h │ ├── accumulated_list.h │ ├── worker_pool.cc │ ├── dispatch_queue.h │ ├── closure.h.pump │ ├── timer_wheel.cc │ └── memory_reclamation.h └── test ├── eventfd_test.cc ├── thread_test.cc ├── fast_queue_test.cc ├── token_bucket_test.cc ├── worker_group_test.cc ├── worker_pool_test.cc ├── memory_reclamation_test.cc ├── accumulated_list_test.cc ├── dispatch_queue_test.cc ├── closure_test.cc ├── timer_wheel_test.cc └── concurrent_ptr_test.cc /CPPLINT.cfg: -------------------------------------------------------------------------------- 1 | root=src 2 | linelength=100 3 | filter=-build/c++11 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CCBASE 2 | 3 | ## Introduction 4 | 5 | CCBASE is a C++11 base library aimed for high performance server development. It contains a collection of useful foundation classes which are good complement to STL. 6 | 7 | Main features: 8 | 9 | * designed for high-performance, multi-threading, production environment 10 | * only common building blocks for high-volume server development 11 | * make full use of lock-free techniques 12 | * use what C++11 have and concentrate on what C++11 lack 13 | * clean and robust code with sufficient tests 14 | 15 | Currently CCBASE mainly contains these compoments: 16 | 17 | * lock-free M to N fifo queue 18 | * O(1) timer manager using timer wheel 19 | * lock-free worker thread pool 20 | * a faster closure implementation (than std::function) 21 | * thread safe memory reclamation 22 | * a faster lock-free concurrent smart pointer (than atomic shared\_ptr) 23 | * token bucket implementation 24 | 25 | ## HowToUse 26 | 27 | It provides a [bazel](https://bazel.build) BUILD file so just set deps on it if you are using bazel, or you need to import .h and .cc files to your build system. 28 | 29 | If you want to build unit-tests, install bazel and [gtestx](https://github.com/mikewei/gtestx) first. 30 | 31 | -------------------------------------------------------------------------------- /BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | cc_library( 4 | name = "ccbase", 5 | includes = ["src"], 6 | copts = [ 7 | "-g", 8 | "-O2", 9 | "-Wall", 10 | ], 11 | linkopts = [ 12 | "-lrt", 13 | "-pthread", 14 | ], 15 | nocopts = "-fPIC", 16 | linkstatic = 1, 17 | srcs = glob([ 18 | "src/ccbase/*.cc", 19 | "src/ccbase/*.h", 20 | ]), 21 | deps = [], 22 | ) 23 | 24 | cc_test( 25 | name = "test", 26 | copts = [ 27 | "-g", 28 | "-O2", 29 | "-Wall", 30 | ], 31 | nocopts = "-fPIC", 32 | linkstatic = 1, 33 | srcs = glob(["test/*_test.cc"]), 34 | deps = [ 35 | ":ccbase", 36 | "//gtestx", 37 | ], 38 | ) 39 | 40 | cc_library( 41 | name = "ccbase_diag", 42 | includes = ["src"], 43 | copts = [ 44 | "-g", 45 | "-O2", 46 | "-Wall", 47 | "-fsanitize=address", 48 | ], 49 | linkopts = [ 50 | "-lrt", 51 | "-pthread", 52 | ], 53 | nocopts = "-fPIC", 54 | linkstatic = 1, 55 | srcs = glob([ 56 | "src/ccbase/*.cc", 57 | "src/ccbase/*.h", 58 | ]), 59 | deps = [], 60 | ) 61 | 62 | cc_test( 63 | name = "test_diag", 64 | copts = [ 65 | "-g", 66 | "-O2", 67 | "-Wall", 68 | "-fsanitize=address", 69 | ], 70 | linkopts = [ 71 | "-fsanitize=address", 72 | "-static-libasan", 73 | ], 74 | nocopts = "-fPIC", 75 | linkstatic = 1, 76 | srcs = glob(["test/*_test.cc"]), 77 | deps = [ 78 | ":ccbase_diag", 79 | "//gtestx", 80 | ], 81 | ) 82 | 83 | -------------------------------------------------------------------------------- /src/ccbase/version.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_VERSION_H_ 31 | #define CCBASE_VERSION_H_ 32 | 33 | // conform to Semantic Versioning (http://semver.org) 34 | 35 | #define CCBASE_MAJOR_VERSION 1 36 | #define CCBASE_MINOR_VERSION 0 37 | #define CCBASE_PATCH_VERSION 0 38 | #define CCBASE_PRERLEASE_VERSION beta3 39 | 40 | #endif // CCBASE_VERSION_H_ 41 | -------------------------------------------------------------------------------- /src/ccbase/common.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_COMMON_H_ 31 | #define CCBASE_COMMON_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | #define CCB_NOT_COPYABLE_AND_MOVABLE(ClassName) \ 39 | ClassName(const ClassName&) = delete; \ 40 | void operator=(const ClassName&) = delete; \ 41 | ClassName(ClassName&&) = delete; \ 42 | void operator=(ClassName&&) = delete 43 | 44 | #endif // CCBASE_COMMON_H_ 45 | -------------------------------------------------------------------------------- /src/ccbase/thread.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_THREAD_H_ 31 | #define CCBASE_THREAD_H_ 32 | 33 | #include 34 | #include 35 | #include "ccbase/common.h" 36 | #include "ccbase/closure.h" 37 | 38 | namespace ccb { 39 | 40 | std::thread CreateThread(ClosureFunc func); 41 | std::thread CreateThread(const std::string& name, ClosureFunc func); 42 | void CreateDetachedThread(ClosureFunc func); 43 | void CreateDetachedThread(const std::string& name, ClosureFunc func); 44 | 45 | } // namespace ccb 46 | 47 | #endif // CCBASE_THREAD_H_ 48 | -------------------------------------------------------------------------------- /test/eventfd_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include "gtestx/gtestx.h" 31 | #include "ccbase/eventfd.h" 32 | 33 | TEST(EventFd, Simple) { 34 | ccb::EventFd efd; 35 | ASSERT_FALSE(efd.Get()); 36 | ASSERT_TRUE(efd.Notify()); 37 | ASSERT_TRUE(efd.Get()); 38 | ASSERT_FALSE(efd.GetWait(10)); 39 | } 40 | 41 | PERF_TEST(EventFd, ConstructPerf) { 42 | ccb::EventFd efd; 43 | } 44 | 45 | PERF_TEST(EventFd, NotifyAndGet) { 46 | static ccb::EventFd efd; 47 | efd.Notify(); 48 | efd.Get(); 49 | } 50 | 51 | PERF_TEST(EventFd, CtorNotifyAndGet) { 52 | ccb::EventFd efd; 53 | efd.Notify(); 54 | efd.Get(); 55 | } 56 | -------------------------------------------------------------------------------- /test/thread_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include "gtestx/gtestx.h" 34 | #include "ccbase/thread.h" 35 | 36 | TEST(Thread, Test) { 37 | std::atomic_int i{4}; 38 | ccb::CreateDetachedThread([&i] { 39 | i--; 40 | }); 41 | ccb::CreateDetachedThread("detached-thread", [&i] { 42 | i--; 43 | }); 44 | usleep(10000); 45 | std::thread t1 = ccb::CreateThread([&i] { 46 | i--; 47 | }); 48 | std::thread t2 = ccb::CreateThread("thread", [&i] { 49 | i--; 50 | }); 51 | t1.join(); 52 | t2.join(); 53 | ASSERT_EQ(0, i); 54 | } 55 | -------------------------------------------------------------------------------- /src/ccbase/thread.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include "ccbase/thread.h" 33 | 34 | namespace ccb { 35 | 36 | static void SetThreadName(const std::string& name) { 37 | if (name.size() > 0) { 38 | char buf[17]; 39 | prctl(PR_GET_NAME, buf, 0, 0, 0); 40 | buf[16] = 0; 41 | std::string str(buf); 42 | size_t slash_pos = str.find_last_of('/'); 43 | if (slash_pos != std::string::npos) { 44 | str.erase(slash_pos); 45 | } 46 | str.append("/"); 47 | str.append(name); 48 | prctl(PR_SET_NAME, str.c_str(), 0, 0, 0); 49 | } 50 | } 51 | 52 | std::thread CreateThread(ClosureFunc func) { 53 | return std::thread(std::move(func)); 54 | } 55 | 56 | std::thread CreateThread(const std::string& name, ClosureFunc func) { 57 | return std::thread([name, func] { 58 | SetThreadName(name); 59 | func(); 60 | }); 61 | } 62 | 63 | void CreateDetachedThread(ClosureFunc func) { 64 | std::thread(std::move(func)).detach(); 65 | } 66 | 67 | void CreateDetachedThread(const std::string& name, ClosureFunc func) { 68 | std::thread([name, func] { 69 | SetThreadName(name); 70 | func(); 71 | }).detach(); 72 | } 73 | 74 | } // namespace ccb 75 | -------------------------------------------------------------------------------- /src/ccbase/thread_local_obj.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2016-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_THREAD_LOCAL_OBJ_H_ 31 | #define CCBASE_THREAD_LOCAL_OBJ_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | 37 | namespace ccb { 38 | 39 | template 40 | class ThreadLocalObj { 41 | public: 42 | ThreadLocalObj() { 43 | instance_id_ = next_instance_id_.fetch_add(1); 44 | } 45 | size_t instance_id() const { 46 | return instance_id_; 47 | } 48 | T& get() const { 49 | return (instance_id_ < kInstanceCacheSize ? tls_obj_cache_[instance_id_] 50 | : tls_obj_map_[instance_id_]); 51 | } 52 | 53 | private: 54 | CCB_NOT_COPYABLE_AND_MOVABLE(ThreadLocalObj); 55 | 56 | size_t instance_id_; 57 | static constexpr size_t kInstanceCacheSize = 64; 58 | static thread_local std::unordered_map tls_obj_map_; 59 | static thread_local std::array tls_obj_cache_; 60 | static std::atomic next_instance_id_; 61 | }; 62 | 63 | template 64 | thread_local std::unordered_map 65 | ThreadLocalObj::tls_obj_map_{128}; 66 | 67 | template 68 | thread_local std::array::kInstanceCacheSize> 69 | ThreadLocalObj::tls_obj_cache_; 70 | 71 | template 72 | std::atomic 73 | ThreadLocalObj::next_instance_id_{0}; 74 | 75 | } // namespace ccb 76 | 77 | #endif // CCBASE_THREAD_LOCAL_OBJ_H_ 78 | -------------------------------------------------------------------------------- /src/ccbase/token_bucket.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_TOKEN_BUCKET_H_ 31 | #define CCBASE_TOKEN_BUCKET_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include "ccbase/common.h" 38 | 39 | namespace ccb { 40 | 41 | class TokenBucket { 42 | public: 43 | explicit TokenBucket(uint32_t tokens_per_sec); 44 | TokenBucket(uint32_t tokens_per_sec, 45 | uint32_t bucket_size); 46 | TokenBucket(uint32_t tokens_per_sec, 47 | uint32_t bucket_size, 48 | uint32_t init_tokens, 49 | const struct timeval* tv_now = nullptr, 50 | bool enable_lock_for_mt = true); 51 | 52 | void Gen(const struct timeval* tv_now = nullptr); 53 | bool Get(uint32_t need_tokens = 1); 54 | void Mod(uint32_t tokens_per_sec, 55 | uint32_t bucket_size); 56 | void Mod(uint32_t tokens_per_sec, 57 | uint32_t bucket_size, 58 | uint32_t init_tokens); 59 | uint32_t tokens() const; 60 | bool Check(uint32_t need_tokens); 61 | int Overdraft(uint32_t need_tokens); 62 | 63 | private: 64 | CCB_NOT_COPYABLE_AND_MOVABLE(TokenBucket); 65 | 66 | uint32_t tokens_per_sec_; 67 | uint32_t bucket_size_; 68 | uint64_t last_gen_time_; 69 | uint64_t last_calc_delta_; 70 | std::atomic token_count_; 71 | bool enable_lock_; 72 | std::mutex gen_mutex_; 73 | }; 74 | 75 | inline uint32_t TokenBucket::tokens() const { 76 | int64_t token_count = token_count_.load(std::memory_order_relaxed); 77 | return static_cast(token_count <= 0 ? 0 : token_count); 78 | } 79 | 80 | } // namespace ccb 81 | 82 | #endif // CCBASE_TOKEN_BUCKET_H_ 83 | -------------------------------------------------------------------------------- /src/ccbase/timer_wheel.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_TIMER_WHEEL_H_ 31 | #define CCBASE_TIMER_WHEEL_H_ 32 | 33 | #include 34 | #include "ccbase/closure.h" 35 | #include "ccbase/common.h" 36 | 37 | namespace ccb { 38 | 39 | class TimerWheelImpl; 40 | class TimerWheelNode; 41 | class TimerOwner; 42 | 43 | using tick_t = uint64_t; 44 | 45 | class TimerWheel { 46 | public: 47 | explicit TimerWheel(size_t us_per_tick = 1000, 48 | bool enable_lock_for_mt = true); 49 | ~TimerWheel(); 50 | 51 | bool AddTimer(tick_t timeout, 52 | ClosureFunc callback, 53 | TimerOwner* owner = nullptr); 54 | bool ResetTimer(const TimerOwner& owner, 55 | tick_t timeout); 56 | bool AddPeriodTimer(tick_t timeout, 57 | ClosureFunc callback, 58 | TimerOwner* owner = nullptr); 59 | bool ResetPeriodTimer(const TimerOwner& owner, 60 | tick_t timeout); 61 | 62 | void MoveOn(); 63 | void MoveOn(ClosureFunc)> sched_func); 64 | 65 | size_t GetTimerCount() const; 66 | tick_t GetCurrentTick() const; 67 | 68 | private: 69 | CCB_NOT_COPYABLE_AND_MOVABLE(TimerWheel); 70 | 71 | std::shared_ptr pimpl_; 72 | }; 73 | 74 | class TimerOwner { 75 | public: 76 | TimerOwner(); 77 | ~TimerOwner(); // delete the timer 78 | 79 | bool has_timer() const { 80 | return static_cast(timer_); 81 | } 82 | void Cancel(); 83 | 84 | private: 85 | CCB_NOT_COPYABLE_AND_MOVABLE(TimerOwner); 86 | 87 | std::unique_ptr timer_; 88 | std::shared_ptr timer_wheel_; 89 | friend class TimerWheelImpl; 90 | }; 91 | 92 | } // namespace ccb 93 | 94 | #endif // CCBASE_TIMER_WHEEL_H_ 95 | -------------------------------------------------------------------------------- /src/ccbase/eventfd.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_EVENTFD_H_ 31 | #define CCBASE_EVENTFD_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include "ccbase/common.h" 39 | 40 | namespace ccb { 41 | 42 | class EventFd { 43 | public: 44 | // useful API 45 | EventFd() : EventFd(0, EFD_NONBLOCK) {} 46 | bool Notify() { 47 | return Write(1); 48 | } 49 | bool Get() { 50 | uint64_t val; 51 | return Read(&val); 52 | } 53 | bool GetWait(int timeout = -1) { 54 | struct pollfd pfd; 55 | pfd.fd = fd_; 56 | pfd.events = POLLIN; 57 | while (!Get()) { 58 | if (poll(&pfd, 1, timeout) <= 0) { 59 | return false; 60 | } 61 | } 62 | return true; 63 | } 64 | 65 | // syscall wrapper 66 | EventFd(unsigned int initval, int flags) { 67 | int res = eventfd(initval, flags); 68 | if (res < 0) { 69 | throw std::system_error(errno, std::system_category(), "eventfd create fail"); 70 | } 71 | fd_ = res; 72 | } 73 | ~EventFd() { 74 | close(fd_); 75 | } 76 | bool Write(uint64_t val) { 77 | int res = write(fd_, &val, sizeof(uint64_t)); 78 | if (res < 0 && errno != EAGAIN) { 79 | throw std::system_error(errno, std::system_category(), "eventfd write fail"); 80 | } 81 | return (res == sizeof(uint64_t)); 82 | } 83 | bool Read(uint64_t* val) { 84 | int res = read(fd_, val, sizeof(uint64_t)); 85 | if (res < 0 && errno != EAGAIN) { 86 | throw std::system_error(errno, std::system_category(), "eventfd read fail"); 87 | } 88 | return (res == sizeof(uint64_t)); 89 | } 90 | int fd() { 91 | return fd_; 92 | } 93 | 94 | private: 95 | CCB_NOT_COPYABLE_AND_MOVABLE(EventFd); 96 | 97 | int fd_; 98 | }; 99 | 100 | } // namespace ccb 101 | 102 | #endif // CCBASE_EVENTFD_H_ 103 | -------------------------------------------------------------------------------- /test/fast_queue_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include "gtestx/gtestx.h" 34 | #include "ccbase/fast_queue.h" 35 | 36 | #define QSIZE (1000000) 37 | 38 | using TestTypes = testing::Types; 39 | 40 | template 41 | class FastQueueTest : public testing::Test { 42 | protected: 43 | FastQueueTest() : 44 | fq_(QSIZE), 45 | count_(0), 46 | overflow_(0), 47 | stop_(false), 48 | err_found_(false) {} 49 | void SetUp() { 50 | thread_ = std::thread(&FastQueueTest::ThreadMain, this); 51 | timer_thread_ = std::thread([this] { 52 | unsigned count = 0; 53 | while (!stop_.load(std::memory_order_relaxed)) { 54 | std::this_thread::sleep_for(std::chrono::milliseconds(10)); 55 | if (++count % 100 == 0) OnTimer(); 56 | } 57 | }); 58 | } 59 | void TearDown() { 60 | stop_.store(true, std::memory_order_relaxed); 61 | thread_.join(); 62 | timer_thread_.join(); 63 | } 64 | void ThreadMain() { 65 | std::cout << "consumer thread start" << std::endl; 66 | int val, check = 0; 67 | while (!stop_.load(std::memory_order_relaxed)) { 68 | if (!fq_.PopWait(&val, 100)) { 69 | if (!stop_) { 70 | std::cout << "consumer read nothing!" << std::endl; 71 | exit(1); 72 | } 73 | continue; 74 | } 75 | if (val != check++) { 76 | std::cout << "thread_main check failed" << std::endl; 77 | err_found_.store(true, std::memory_order_relaxed); 78 | break; 79 | } 80 | count_.fetch_add(1, std::memory_order_relaxed); 81 | } 82 | std::cout << "consumer thread exit" << std::endl; 83 | } 84 | void OnTimer() { 85 | std::cout << "read " << count_ << "/s overflow " << overflow_ << std::endl; 86 | count_ = overflow_ = 0; 87 | } 88 | ccb::FastQueue fq_; 89 | std::atomic count_; 90 | std::atomic overflow_; 91 | std::thread thread_; 92 | std::thread timer_thread_; 93 | std::atomic_bool stop_; 94 | std::atomic_bool err_found_; 95 | }; 96 | TYPED_TEST_CASE(FastQueueTest, TestTypes); 97 | 98 | TYPED_PERF_TEST_OPT(FastQueueTest, IO_Perf, 1000000, 1500) { 99 | static int val = 0; 100 | if (this->fq_.Push(val)) { 101 | val++; 102 | } else { 103 | this->overflow_++; 104 | } 105 | if ((val & 0xfff) == 0) { 106 | EXPECT_FALSE(this->err_found_.load(std::memory_order_relaxed)); 107 | EXPECT_EQ(0UL, this->overflow_); 108 | } 109 | } 110 | 111 | -------------------------------------------------------------------------------- /test/token_bucket_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include "gtestx/gtestx.h" 34 | #include "ccbase/token_bucket.h" 35 | 36 | class TokenBucketTest : public testing::TestWithParam { 37 | protected: 38 | TokenBucketTest() {} 39 | virtual ~TokenBucketTest() {} 40 | virtual void SetUp() { 41 | tb_.reset(new ccb::TokenBucket(n_, n_, n_, nullptr, GetParam())); 42 | } 43 | virtual void TearDown() { 44 | } 45 | std::unique_ptr tb_; 46 | static constexpr unsigned int n_ = 10000; 47 | }; 48 | 49 | constexpr unsigned int TokenBucketTest::n_; 50 | 51 | INSTANTIATE_TEST_CASE_P(IsEnableLock, TokenBucketTest, testing::Values(false, true)); 52 | 53 | TEST_P(TokenBucketTest, Get) { 54 | ASSERT_GE(tb_->tokens(), n_); 55 | ASSERT_TRUE(tb_->Check(n_)); 56 | ASSERT_TRUE(tb_->Get(tb_->tokens())); 57 | ASSERT_EQ(0U, tb_->tokens()); 58 | ASSERT_FALSE(tb_->Check(1)); 59 | ASSERT_FALSE(tb_->Get(1)); 60 | sleep(1); 61 | tb_->Gen(); 62 | ASSERT_EQ(n_, tb_->tokens()); 63 | for (unsigned int i = 0; i < n_; i++) { 64 | ASSERT_TRUE(tb_->Check(1)); 65 | ASSERT_TRUE(tb_->Get(1)); 66 | } 67 | ASSERT_FALSE(tb_->Check(1)); 68 | ASSERT_FALSE(tb_->Get(1)); 69 | } 70 | 71 | PERF_TEST_P(TokenBucketTest, GetPerf) { 72 | if (!tb_->Get(1)) tb_->Mod(n_, n_, n_); 73 | } 74 | 75 | PERF_TEST_P(TokenBucketTest, GenPerf) { 76 | tb_->Gen(); 77 | } 78 | 79 | class TokenBucketMTTest : public testing::Test { 80 | protected: 81 | TokenBucketMTTest() : counter_(0), stop_flag_(false) {} 82 | virtual ~TokenBucketMTTest() {} 83 | virtual void SetUp() { 84 | tb_.reset(new ccb::TokenBucket(n_, n_, 0, nullptr, true)); 85 | gen_thread_1_ = std::thread([this] { 86 | while (!stop_flag_) tb_->Gen(); 87 | }); 88 | gen_thread_2_ = std::thread([this] { 89 | while (!stop_flag_) tb_->Gen(); 90 | }); 91 | get_thread_1_ = std::thread([this] { 92 | while (!stop_flag_) { 93 | if (tb_->Get()) counter_++; 94 | } 95 | }); 96 | mon_thread_ = std::thread([this] { 97 | size_t last_counter = 0; 98 | while (!stop_flag_) { 99 | sleep(1); 100 | size_t cur_counter = counter_.load(); 101 | fprintf(stderr, "get %lu tokens/s\n", cur_counter - last_counter); 102 | last_counter = cur_counter; 103 | } 104 | }); 105 | } 106 | virtual void TearDown() { 107 | stop_flag_ = true; 108 | mon_thread_.join(); 109 | get_thread_1_.join(); 110 | gen_thread_2_.join(); 111 | gen_thread_1_.join(); 112 | } 113 | static constexpr unsigned int n_ = 10000; 114 | std::thread gen_thread_1_; 115 | std::thread gen_thread_2_; 116 | std::thread get_thread_1_; 117 | std::thread mon_thread_; 118 | std::atomic counter_; 119 | std::atomic stop_flag_; 120 | std::unique_ptr tb_; 121 | }; 122 | 123 | constexpr unsigned int TokenBucketMTTest::n_; 124 | 125 | PERF_TEST_F(TokenBucketMTTest, Get) { 126 | while (!tb_->Get()) { 127 | } 128 | counter_++; 129 | } 130 | 131 | -------------------------------------------------------------------------------- /test/worker_group_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include "gtestx/gtestx.h" 33 | #include "ccbase/worker_group.h" 34 | #include "ccbase/token_bucket.h" 35 | 36 | #define QSIZE 1000000 37 | #define DEFAULT_HZ 1000000 38 | #define DEFAULT_TIME 1500 39 | 40 | DECLARE_uint64(hz); 41 | 42 | using Worker = ccb::WorkerGroup::Worker; 43 | 44 | class WorkerGroupTest : public testing::Test { 45 | protected: 46 | void SetUp() { 47 | } 48 | void TearDown() { 49 | } 50 | 51 | ccb::WorkerGroup worker_group_1_{2, QSIZE}; 52 | ccb::WorkerGroup worker_group_2_{2, QSIZE}; 53 | std::atomic_int val{0}; 54 | }; 55 | 56 | TEST_F(WorkerGroupTest, PostTask) { 57 | worker_group_1_.PostTask([this] { 58 | val++; 59 | }); 60 | worker_group_1_.PostTask(1, [this] { 61 | val++; 62 | }); 63 | worker_group_2_.PostTask([this] { 64 | val++; 65 | }); 66 | worker_group_2_.PostTask(0, [this] { 67 | val++; 68 | }); 69 | usleep(10000); 70 | ASSERT_EQ(4, val); 71 | worker_group_1_.PostTask([this] { 72 | val--; 73 | }, 50); 74 | worker_group_1_.PostTask(1, [this] { 75 | val--; 76 | }, 10); 77 | worker_group_2_.PostTask([this] { 78 | val--; 79 | }, 20); 80 | worker_group_2_.PostTask(0, [this] { 81 | val--; 82 | }, 30); 83 | ASSERT_EQ(4, val); 84 | usleep(55000); 85 | ASSERT_EQ(0, val); 86 | } 87 | 88 | TEST_F(WorkerGroupTest, PostPeriodTask) { 89 | worker_group_1_.PostPeriodTask([this] { 90 | val++; 91 | }, 20); 92 | worker_group_1_.PostPeriodTask(1, [this] { 93 | val++; 94 | }, 20); 95 | worker_group_2_.PostPeriodTask([this] { 96 | val++; 97 | }, 20); 98 | worker_group_2_.PostPeriodTask(0, [this] { 99 | val++; 100 | }, 20); 101 | usleep(5000); 102 | ASSERT_EQ(0, val); 103 | usleep(20000); 104 | ASSERT_EQ(4, val); 105 | usleep(20000); 106 | ASSERT_EQ(8, val); 107 | usleep(20000); 108 | ASSERT_EQ(12, val); 109 | } 110 | 111 | TEST_F(WorkerGroupTest, WorkerSelf) { 112 | worker_group_1_.PostTask([this] { 113 | val++; 114 | Worker::self()->PostTask([this] { 115 | val++; 116 | }); 117 | }); 118 | usleep(20000); 119 | ASSERT_EQ(2, val); 120 | } 121 | 122 | TEST_F(WorkerGroupTest, WorkerTls) { 123 | worker_group_1_.PostTask(0, [] { 124 | Worker::tls()++; 125 | }); 126 | worker_group_1_.PostTask(0, [] { 127 | Worker::tls()++; 128 | }); 129 | worker_group_1_.PostTask(0, [this] { 130 | val = Worker::tls(); 131 | }); 132 | usleep(20000); 133 | ASSERT_EQ(2, val); 134 | } 135 | 136 | TEST_F(WorkerGroupTest, WorkerPoller) { 137 | ccb::WorkerGroup::Poller* poller1 = nullptr; 138 | ccb::WorkerGroup::Poller* poller2 = nullptr; 139 | worker_group_1_.PostTask(0, [&poller1] { 140 | poller1 = Worker::self()->poller(); 141 | poller1->Poll(0); 142 | }); 143 | worker_group_1_.PostTask(1, [&poller2] { 144 | poller2 = Worker::self()->poller(); 145 | poller2->Poll(0); 146 | }); 147 | usleep(20000); 148 | ASSERT_NE(nullptr, poller1); 149 | ASSERT_EQ(poller1, poller2); 150 | } 151 | 152 | PERF_TEST_F_OPT(WorkerGroupTest, PostTaskPerf, DEFAULT_HZ, DEFAULT_TIME) { 153 | ASSERT_TRUE(worker_group_1_.PostTask([]{})) << PERF_ABORT; 154 | } 155 | 156 | PERF_TEST_F_OPT(WorkerGroupTest, PostSharedTaskPerf, DEFAULT_HZ, DEFAULT_TIME) { 157 | static ccb::ClosureFunc f{[]{}}; 158 | ASSERT_TRUE(worker_group_1_.PostTask(f)) << PERF_ABORT; 159 | } 160 | 161 | PERF_TEST_F(WorkerGroupTest, SpawnThreadPostTask) { 162 | std::thread([this] { 163 | ASSERT_TRUE(worker_group_1_.PostTask([]{})) << PERF_ABORT; 164 | }).join(); 165 | } 166 | 167 | TEST(WorkerGroupDtorTest, DestructBeforeThreadExit) { 168 | ccb::WorkerGroup worker_group{1, QSIZE}; 169 | worker_group.PostTask([]{}); 170 | } 171 | -------------------------------------------------------------------------------- /src/ccbase/worker_group.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_WORKER_GROUP_H_ 31 | #define CCBASE_WORKER_GROUP_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include "ccbase/common.h" 38 | #include "ccbase/closure.h" 39 | #include "ccbase/timer_wheel.h" 40 | #include "ccbase/dispatch_queue.h" 41 | #include "ccbase/thread_local_obj.h" 42 | 43 | namespace ccb { 44 | 45 | class WorkerGroup { 46 | public: 47 | using TaskQueue = DispatchQueue>; 48 | 49 | class Poller { 50 | public: 51 | virtual ~Poller() {} 52 | // if timeout_ms is 0 the Poll call must be non-blocking 53 | virtual void Poll(size_t timeout_ms) = 0; 54 | }; 55 | using PollerSupplier = ClosureFunc(size_t worker_id)>; 56 | 57 | class Worker : public TimerWheel { 58 | public: 59 | ~Worker(); 60 | static Worker* self() { 61 | return tls_self_; 62 | } 63 | template static T& tls() { 64 | static thread_local T tls_ctx; 65 | return tls_ctx; 66 | } 67 | size_t id() const { 68 | return id_; 69 | } 70 | WorkerGroup* worker_group() const { 71 | return group_; 72 | } 73 | Poller* poller() const { 74 | return poller_.get(); 75 | } 76 | bool PostTask(ClosureFunc func); 77 | 78 | private: 79 | CCB_NOT_COPYABLE_AND_MOVABLE(Worker); 80 | 81 | Worker(WorkerGroup* grp, size_t id, TaskQueue::InQueue* q, 82 | std::shared_ptr poller); 83 | void WorkerMainEntry(); 84 | size_t BatchProcessTasks(size_t max); 85 | 86 | WorkerGroup* group_; 87 | size_t id_; 88 | TaskQueue::InQueue* inq_; 89 | std::shared_ptr poller_; 90 | std::atomic_bool stop_flag_; 91 | std::thread thread_; 92 | static thread_local Worker* tls_self_; 93 | friend class WorkerGroup; 94 | }; 95 | 96 | public: 97 | WorkerGroup(size_t worker_num, size_t queue_size); 98 | WorkerGroup(size_t worker_num, size_t queue_size, 99 | PollerSupplier poller_supplier); 100 | ~WorkerGroup(); 101 | size_t id() const { 102 | return tls_client_ctx_.instance_id(); 103 | } 104 | size_t size() const { 105 | return workers_.size(); 106 | } 107 | bool is_current_thread() { 108 | Worker* worker = Worker::self(); 109 | return (worker && worker->group_ == this); 110 | } 111 | bool is_current_thread(size_t worker_id) { 112 | Worker* worker = Worker::self(); 113 | return (worker && worker->group_ == this && worker->id() == worker_id); 114 | } 115 | 116 | bool PostTask(ClosureFunc func); 117 | bool PostTask(size_t worker_id, ClosureFunc func); 118 | bool PostTask(ClosureFunc func, size_t delay_ms); 119 | bool PostTask(size_t worker_id, ClosureFunc func, size_t delay_ms); 120 | bool PostPeriodTask(ClosureFunc func, size_t period_ms); 121 | bool PostPeriodTask(size_t worker_id, ClosureFunc func, 122 | size_t period_ms); 123 | 124 | private: 125 | CCB_NOT_COPYABLE_AND_MOVABLE(WorkerGroup); 126 | 127 | TaskQueue::OutQueue* GetOutQueue(); 128 | 129 | struct ClientContext { 130 | std::shared_ptr queue_holder; 131 | TaskQueue::OutQueue* out_queue; 132 | 133 | ClientContext() 134 | : queue_holder(nullptr), out_queue(nullptr) {} 135 | ~ClientContext() { 136 | if (out_queue) { 137 | out_queue->Unregister(); 138 | } 139 | } 140 | operator bool() const { 141 | return out_queue; 142 | } 143 | }; 144 | 145 | std::shared_ptr queue_; 146 | std::vector> workers_; 147 | ThreadLocalObj tls_client_ctx_; 148 | }; 149 | 150 | } // namespace ccb 151 | 152 | #endif // CCBASE_WORKER_GROUP_H_ 153 | -------------------------------------------------------------------------------- /test/worker_pool_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include "gtestx/gtestx.h" 33 | #include "ccbase/worker_pool.h" 34 | 35 | #define QSIZE 100000 36 | #define NOP_TASK_HZ 100000 37 | #define MS1_TASK_HZ 100000 38 | #define DEFAULT_TIME 1500 39 | 40 | DECLARE_uint64(hz); 41 | 42 | class WorkerPoolTest : public testing::Test { 43 | protected: 44 | void SetUp() { 45 | } 46 | void TearDown() { 47 | } 48 | 49 | ccb::WorkerPool worker_pool_1_{1, 8, QSIZE}; 50 | ccb::WorkerPool worker_pool_2_{2, 256, QSIZE}; 51 | std::atomic_int val{0}; 52 | }; 53 | 54 | TEST_F(WorkerPoolTest, PostTask) { 55 | worker_pool_1_.PostTask([this] { 56 | val++; 57 | }); 58 | worker_pool_2_.PostTask([this] { 59 | val++; 60 | }); 61 | usleep(10000); 62 | ASSERT_EQ(2, val); 63 | } 64 | 65 | TEST_F(WorkerPoolTest, PostDelayTask) { 66 | worker_pool_1_.PostTask([this] { 67 | val++; 68 | }, 1); 69 | worker_pool_2_.PostTask([this] { 70 | val++; 71 | }, 1); 72 | usleep(10000); 73 | ASSERT_EQ(2, val); 74 | } 75 | 76 | TEST_F(WorkerPoolTest, PostPeriodTask) { 77 | worker_pool_1_.PostPeriodTask([this] { 78 | if (val < 10) val++; 79 | }, 1); 80 | usleep(50000); 81 | ASSERT_EQ(10, val); 82 | } 83 | 84 | TEST_F(WorkerPoolTest, WorkerSelf) { 85 | using Worker = ccb::WorkerPool::Worker; 86 | worker_pool_1_.PostTask([this] { 87 | val++; 88 | Worker::self()->worker_pool()->PostTask([this] { 89 | val++; 90 | }); 91 | }); 92 | usleep(20000); 93 | ASSERT_EQ(2, val); 94 | } 95 | 96 | namespace { 97 | struct TestContext : public ccb::WorkerPool::Context { 98 | int Get() const { 99 | return 1; 100 | } 101 | }; 102 | } // namespace 103 | 104 | TEST_F(WorkerPoolTest, UseContext) { 105 | ccb::WorkerPool worker_pool{1, 1, 10, [](size_t) { 106 | return std::make_shared(); 107 | }}; 108 | int value = 0; 109 | worker_pool.PostTask([&value] { 110 | value = ccb::WorkerPool::Worker::self()->context()->Get(); 111 | }); 112 | usleep(10000); 113 | ASSERT_EQ(1, value); 114 | } 115 | 116 | PERF_TEST_F_OPT(WorkerPoolTest, PostNopTaskPerf, NOP_TASK_HZ, DEFAULT_TIME) { 117 | static size_t counter = 0; 118 | if (++counter == NOP_TASK_HZ) { 119 | fprintf(stderr, "concurrent workers: %lu/%lu\n", 120 | worker_pool_1_.concurrent_workers(), 121 | worker_pool_1_.size()); 122 | counter = 0; 123 | } 124 | ASSERT_TRUE(worker_pool_1_.PostTask([]{})) << PERF_ABORT; 125 | } 126 | 127 | PERF_TEST_F_OPT(WorkerPoolTest, PostSleepTaskPerf, MS1_TASK_HZ, DEFAULT_TIME) { 128 | static size_t counter = 0; 129 | if (++counter == MS1_TASK_HZ) { 130 | fprintf(stderr, "concurrent workers: %lu/%lu\n", 131 | worker_pool_2_.concurrent_workers(), 132 | worker_pool_2_.size()); 133 | counter = 0; 134 | } 135 | ASSERT_TRUE(worker_pool_2_.PostTask([]{usleep(1000);})) << PERF_ABORT; 136 | } 137 | 138 | PERF_TEST_F_OPT(WorkerPoolTest, PostSleepChangePerf, MS1_TASK_HZ, DEFAULT_TIME) { 139 | static size_t counter = 0; 140 | static size_t round = 0; 141 | if (++counter == MS1_TASK_HZ) { 142 | fprintf(stderr, "concurrent workers: %lu/%lu round: %lu\n", 143 | worker_pool_2_.concurrent_workers(), 144 | worker_pool_2_.size(), 145 | round); 146 | counter = 0; 147 | round++; 148 | } 149 | if (round/20%2 == 1) { 150 | if (counter%10 != 0) { // 1/10 workload 151 | return; 152 | } 153 | } 154 | ASSERT_TRUE(worker_pool_2_.PostTask([]{usleep(1000);})) << PERF_ABORT; 155 | } 156 | 157 | PERF_TEST_F_OPT(WorkerPoolTest, PostSharedTaskPerf, NOP_TASK_HZ, DEFAULT_TIME) { 158 | static ccb::ClosureFunc f{[]{}}; 159 | ASSERT_TRUE(worker_pool_1_.PostTask(f)) << PERF_ABORT; 160 | } 161 | 162 | PERF_TEST_F(WorkerPoolTest, SpawnThreadPostTask) { 163 | std::thread([this] { 164 | ASSERT_TRUE(worker_pool_1_.PostTask([]{})) << PERF_ABORT; 165 | }).join(); 166 | } 167 | 168 | TEST(WorkerPoolDtorTest, DestructBeforeThreadExit) { 169 | ccb::WorkerPool worker_pool{2, 8, QSIZE}; 170 | worker_pool.PostTask([]{}); 171 | } 172 | -------------------------------------------------------------------------------- /src/ccbase/concurrent_ptr.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2016-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_CONCURRENT_PTR_H_ 31 | #define CCBASE_CONCURRENT_PTR_H_ 32 | 33 | #include 34 | #include 35 | #include "ccbase/common.h" 36 | #include "ccbase/memory_reclamation.h" 37 | 38 | namespace ccb { 39 | 40 | template 41 | struct ConcurrentPtrScope {}; 42 | 43 | template 44 | class ConcurrentPtrReader { 45 | public: 46 | explicit ConcurrentPtrReader(const ConcurrentPtrType* cp) 47 | : cp_(cp) { 48 | ptr_ = cp->ReadLock(); 49 | } 50 | 51 | ~ConcurrentPtrReader() { 52 | cp_->ReadUnlock(); 53 | } 54 | 55 | typename ConcurrentPtrType::PtrType get() const { 56 | return ptr_; 57 | } 58 | 59 | typename ConcurrentPtrType::PtrType operator->() const { 60 | return get(); 61 | } 62 | 63 | private: 64 | CCB_NOT_COPYABLE_AND_MOVABLE(ConcurrentPtrReader); 65 | 66 | const ConcurrentPtrType* cp_; 67 | typename ConcurrentPtrType::PtrType ptr_; 68 | }; 69 | 70 | 71 | template , 73 | class Reclamation = EpochBasedReclamation>> 74 | class ConcurrentPtr { 75 | public: 76 | using PtrType = T*; 77 | using Reader = ConcurrentPtrReader; 78 | 79 | ConcurrentPtr() 80 | : ptr_(nullptr) {} 81 | 82 | explicit ConcurrentPtr(std::nullptr_t) 83 | : ptr_(nullptr) {} 84 | 85 | explicit ConcurrentPtr(T* ptr) 86 | : ptr_(ptr) {} 87 | 88 | /* Destructor 89 | * 90 | * Caller should garentee that no one could race with the destruction 91 | */ 92 | ~ConcurrentPtr() { 93 | if (ptr_) Deleter()(ptr_); 94 | } 95 | 96 | T* ReadLock() const { 97 | return recl_.ReadLock(&ptr_); 98 | } 99 | 100 | void ReadUnlock() const { 101 | recl_.ReadUnlock(); 102 | } 103 | 104 | void Reset(bool sync_cleanup = false) { 105 | Reset(nullptr, sync_cleanup); 106 | } 107 | 108 | void Reset(T* ptr, bool sync_cleanup = false) { 109 | T* old_ptr = ptr_.exchange(ptr, std::memory_order_seq_cst); 110 | if (old_ptr) 111 | recl_.Retire(old_ptr, Deleter()); 112 | if (sync_cleanup) 113 | recl_.RetireCleanup(); 114 | } 115 | 116 | private: 117 | CCB_NOT_COPYABLE_AND_MOVABLE(ConcurrentPtr); 118 | 119 | std::atomic ptr_; 120 | mutable PtrReclamationAdapter recl_; 121 | }; 122 | 123 | 124 | template , 126 | class Reclamation = EpochBasedReclamation, 127 | ConcurrentPtrScope>> 128 | class ConcurrentSharedPtr 129 | : private ConcurrentPtr, 130 | std::default_delete>, 131 | Reclamation> { 132 | public: 133 | using Base = ConcurrentPtr, 134 | std::default_delete>, 135 | Reclamation>; 136 | 137 | ConcurrentSharedPtr() 138 | : Base(new std::shared_ptr(nullptr, Deleter())) {} 139 | 140 | explicit ConcurrentSharedPtr(std::nullptr_t) 141 | : Base(new std::shared_ptr(nullptr, Deleter())) {} 142 | 143 | explicit ConcurrentSharedPtr(T* ptr) 144 | : Base(new std::shared_ptr(ptr, Deleter())) {} 145 | 146 | explicit ConcurrentSharedPtr(std::shared_ptr shptr) 147 | : Base(new std::shared_ptr(std::move(shptr))) {} 148 | 149 | ~ConcurrentSharedPtr() {} 150 | 151 | std::shared_ptr Get() const { 152 | typename Base::Reader reader(this); 153 | return *reader.get(); 154 | } 155 | 156 | std::shared_ptr operator->() const { 157 | return Get(); 158 | } 159 | 160 | void Reset(bool sync_cleanup = false) { 161 | Base::Reset(new std::shared_ptr(nullptr, Deleter()), sync_cleanup); 162 | } 163 | 164 | void Reset(T* ptr, bool sync_cleanup = false) { 165 | Base::Reset(new std::shared_ptr(ptr, Deleter()), sync_cleanup); 166 | } 167 | 168 | void Reset(std::shared_ptr shptr, bool sync_cleanup = false) { 169 | Base::Reset(new std::shared_ptr(std::move(shptr)), sync_cleanup); 170 | } 171 | 172 | private: 173 | CCB_NOT_COPYABLE_AND_MOVABLE(ConcurrentSharedPtr); 174 | }; 175 | 176 | } // namespace ccb 177 | 178 | #endif // CCBASE_CONCURRENT_PTR_H_ 179 | -------------------------------------------------------------------------------- /src/ccbase/worker_pool.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_WORKER_POOL_H_ 31 | #define CCBASE_WORKER_POOL_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include "ccbase/common.h" 39 | #include "ccbase/closure.h" 40 | #include "ccbase/timer_wheel.h" 41 | #include "ccbase/dispatch_queue.h" 42 | #include "ccbase/thread_local_obj.h" 43 | 44 | namespace ccb { 45 | 46 | class WorkerPool { 47 | public: 48 | class Context {}; 49 | // For low schedule latency ContextSupplier should be noblocking and do 50 | // blocking initialization lazily. If null context is returned the worker 51 | // thread creation will fail. 52 | using ContextSupplier = ClosureFunc(size_t worker_id)>; 53 | 54 | class Worker { 55 | public: 56 | ~Worker(); 57 | static Worker* self() { 58 | return tls_self_; 59 | } 60 | size_t id() const { 61 | return id_; 62 | } 63 | template 64 | ContextType* context() const { 65 | return static_cast(context_.get()); 66 | } 67 | WorkerPool* worker_pool() const { 68 | return pool_; 69 | } 70 | TimerWheel* timer_wheel() const { 71 | return &pool_->timer_wheel_; 72 | } 73 | 74 | private: 75 | CCB_NOT_COPYABLE_AND_MOVABLE(Worker); 76 | 77 | Worker(WorkerPool* pool, size_t id, std::shared_ptr context); 78 | void ExitWithAutoCleanup(); 79 | void WorkerMainEntry(); 80 | 81 | WorkerPool* pool_; 82 | size_t id_; 83 | std::shared_ptr context_; 84 | std::atomic_bool stop_flag_; 85 | std::thread thread_; 86 | static thread_local Worker* tls_self_; 87 | friend class WorkerPool; 88 | }; 89 | 90 | public: 91 | WorkerPool(size_t min_workers, size_t max_workers, size_t queue_size); 92 | WorkerPool(size_t min_workers, size_t max_workers, size_t queue_size, 93 | ContextSupplier context_supplier); 94 | ~WorkerPool(); 95 | 96 | size_t id() const { 97 | return tls_client_ctx_.instance_id(); 98 | } 99 | size_t size() const { 100 | return workers_.size(); 101 | } 102 | size_t concurrent_workers() const { 103 | return busy_workers_.load(std::memory_order_relaxed); 104 | } 105 | bool is_current_thread() { 106 | Worker* worker = Worker::self(); 107 | return (worker && worker->pool_ == this); 108 | } 109 | 110 | bool PostTask(ClosureFunc func); 111 | bool PostTask(ClosureFunc func, size_t delay_ms); 112 | bool PostPeriodTask(ClosureFunc func, size_t period_ms); 113 | 114 | private: 115 | CCB_NOT_COPYABLE_AND_MOVABLE(WorkerPool); 116 | 117 | using TaskQueue = DispatchQueue>; 118 | 119 | bool WorkerPollTask(Worker* worker, ClosureFunc* task); 120 | bool PollTimerTaskInLock(ClosureFunc* task); 121 | void SchedTimerTaskInLock(ClosureFunc task); 122 | void WorkerBeginProcess(Worker* worker); 123 | bool WorkerEndProcess(Worker* worker); 124 | bool CheckHighWatermark(); 125 | bool CheckLowWatermark(); 126 | void ExpandWorkersInLock(size_t num); 127 | void RetireWorkerInLock(Worker* worker); 128 | TaskQueue::OutQueue* GetOutQueue(); 129 | 130 | struct ClientContext { 131 | std::shared_ptr queue_holder; 132 | TaskQueue::OutQueue* out_queue; 133 | 134 | ClientContext() 135 | : queue_holder(nullptr), out_queue(nullptr) {} 136 | ~ClientContext() { 137 | if (out_queue) { 138 | out_queue->Unregister(); 139 | } 140 | } 141 | operator bool() const { 142 | return out_queue; 143 | } 144 | }; 145 | 146 | const size_t min_workers_; 147 | const size_t max_workers_; 148 | std::atomic total_workers_; 149 | std::atomic busy_workers_; 150 | std::atomic next_worker_id_; 151 | std::atomic last_above_low_watermark_ts_; 152 | TimerWheel timer_wheel_; 153 | std::queue> timer_task_queue_; 154 | ClosureFunc)> sched_timer_task_; 155 | std::shared_ptr task_queue_; 156 | TaskQueue::InQueue* shared_inq_; 157 | ContextSupplier context_supplier_; 158 | std::map> workers_; 159 | std::mutex updating_mutex_; 160 | std::mutex polling_mutex_; 161 | ThreadLocalObj tls_client_ctx_; 162 | }; 163 | 164 | } // namespace ccb 165 | 166 | #endif // CCBASE_WORKER_POOL_H_ 167 | -------------------------------------------------------------------------------- /src/ccbase/fast_queue.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_FAST_QUEUE_H_ 31 | #define CCBASE_FAST_QUEUE_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | #include "ccbase/eventfd.h" 37 | #include "ccbase/common.h" 38 | 39 | namespace ccb { 40 | 41 | template 42 | class FastQueue { 43 | public: 44 | explicit FastQueue(size_t qlen); 45 | ~FastQueue(); 46 | bool Push(const T& val); 47 | bool Push(T&& val); 48 | bool Pop(T* ptr); 49 | bool PopWait(T* ptr, int timeout = -1); 50 | 51 | size_t used_size() { 52 | size_t head = head_.load(std::memory_order_acquire); 53 | size_t tail = tail_.load(std::memory_order_acquire); 54 | return (tail >= head) ? (tail - head) : (tail + qlen_ - head); 55 | } 56 | 57 | size_t free_size() { 58 | size_t head = head_.load(std::memory_order_acquire); 59 | size_t tail = tail_.load(std::memory_order_acquire); 60 | return (head > tail) ? (head - 1 - tail) : (head - 1 + qlen_ - tail); 61 | } 62 | 63 | private: 64 | CCB_NOT_COPYABLE_AND_MOVABLE(FastQueue); 65 | 66 | void move_head() { 67 | size_t head = head_.load(std::memory_order_relaxed) + 1; 68 | if (head >= qlen_) 69 | head -= qlen_; 70 | head_.store(head, std::memory_order_release); 71 | } 72 | void move_tail() { 73 | size_t tail = tail_.load(std::memory_order_relaxed) + 1; 74 | if (tail >= qlen_) 75 | tail -= qlen_; 76 | tail_.store(tail, std::memory_order_release); 77 | } 78 | 79 | using Slot = typename std::aligned_storage::type; 80 | 81 | size_t qlen_; 82 | std::atomic head_; 83 | std::atomic tail_; 84 | std::unique_ptr array_; 85 | std::unique_ptr event_; 86 | }; 87 | 88 | template 89 | FastQueue::FastQueue(size_t qlen) 90 | : qlen_(qlen), head_(0), tail_(0), 91 | array_(new Slot[qlen]), 92 | event_(kEnableNotify ? new EventFd() : nullptr) { 93 | } 94 | 95 | template 96 | FastQueue::~FastQueue() { 97 | while (Pop(nullptr)) {} 98 | } 99 | 100 | template 101 | bool FastQueue::Push(const T& val) { 102 | if (free_size() <= 0) { 103 | return false; 104 | } 105 | new (&array_[tail_.load(std::memory_order_relaxed)]) T(val); 106 | move_tail(); 107 | if (kEnableNotify) { 108 | // StoreLoad order require barrier 109 | std::atomic_thread_fence(std::memory_order_seq_cst); 110 | if (used_size() == 1) { 111 | event_->Notify(); 112 | } 113 | } 114 | return true; 115 | } 116 | 117 | template 118 | bool FastQueue::Push(T&& val) { 119 | if (free_size() <= 0) { 120 | return false; 121 | } 122 | new (&array_[tail_.load(std::memory_order_relaxed)]) T(std::move(val)); 123 | move_tail(); 124 | if (kEnableNotify) { 125 | // the memory fence garentees that the pushed node is visiable to all 126 | // threads before checking condition of notification 127 | std::atomic_thread_fence(std::memory_order_seq_cst); 128 | if (used_size() == 1) { 129 | event_->Notify(); 130 | } 131 | } 132 | return true; 133 | } 134 | 135 | template 136 | bool FastQueue::Pop(T* ptr) { 137 | if (kEnableNotify) { 138 | // the memory fence garentees that any previous pop is visible to all 139 | // threads before checking new node, therefore if used_size > 1 is found 140 | // when new node is pushed PopWait() will never miss it before blocking 141 | std::atomic_thread_fence(std::memory_order_seq_cst); 142 | } 143 | if (used_size() <= 0) { 144 | return false; 145 | } 146 | T* head_slot = reinterpret_cast( 147 | &array_[head_.load(std::memory_order_relaxed)]); 148 | if (ptr) *ptr = std::move(*head_slot); 149 | head_slot->~T(); 150 | move_head(); 151 | return true; 152 | } 153 | 154 | template 155 | bool FastQueue::PopWait(T* ptr, int timeout) { 156 | if (kEnableNotify) { 157 | while (!Pop(ptr)) { 158 | if (!event_->GetWait(timeout)) { 159 | return false; 160 | } 161 | } 162 | } else { 163 | int sleep_ms = 0; 164 | while (!Pop(ptr)) { 165 | if (timeout >= 0 && sleep_ms >= timeout) 166 | return false; 167 | usleep(1000); 168 | sleep_ms++; 169 | } 170 | } 171 | return true; 172 | } 173 | 174 | } // namespace ccb 175 | 176 | #endif // CCBASE_FAST_QUEUE_H_ 177 | -------------------------------------------------------------------------------- /test/memory_reclamation_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2016-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include "gtestx/gtestx.h" 35 | #include "ccbase/memory_reclamation.h" 36 | 37 | class TraceableObj { 38 | public: 39 | TraceableObj() : val_(1) {} 40 | ~TraceableObj() { val_ = 0; } 41 | 42 | int val() const { 43 | return val_; 44 | } 45 | static size_t allocated_objs() { 46 | return allocated_objs_; 47 | } 48 | 49 | static void* operator new(size_t sz) { 50 | allocated_objs_++; 51 | return ::operator new(sz); 52 | } 53 | static void operator delete(void* ptr, size_t sz) { 54 | allocated_objs_--; 55 | ::operator delete(ptr); 56 | } 57 | 58 | private: 59 | int val_; 60 | static std::atomic allocated_objs_; 61 | }; 62 | 63 | std::atomic TraceableObj::allocated_objs_{0}; 64 | 65 | using TestTypes = testing::Types, 66 | ccb::EpochBasedReclamation, 67 | ccb::HazardPtrReclamation>; 68 | 69 | template 70 | class MemoryReclamationTest : public testing::Test { 71 | protected: 72 | MemoryReclamationTest() : ptr_(nullptr) {} 73 | 74 | void SetUp() { 75 | ASSERT_EQ(0, TraceableObj::allocated_objs()); 76 | } 77 | void TearDown() { 78 | ASSERT_EQ(0, TraceableObj::allocated_objs()); 79 | } 80 | ccb::PtrReclamationAdapter recl_; 81 | std::atomic ptr_; 82 | }; 83 | TYPED_TEST_CASE(MemoryReclamationTest, TestTypes); 84 | 85 | template 86 | class MemoryReclamationPerfTest : public testing::Test { 87 | protected: 88 | MemoryReclamationPerfTest() : stop_flag_(false), ptr_(nullptr) {} 89 | 90 | void SetUp() { 91 | ASSERT_EQ(0, TraceableObj::allocated_objs()); 92 | auto reader_code = [this] { 93 | while (!stop_flag_.load(std::memory_order_relaxed)) { 94 | TraceableObj* ptr = this->recl_.ReadLock(&this->ptr_); 95 | for (int i = 0; ptr && i < 100; i++) { 96 | ASSERT_EQ(1, ptr->val()); 97 | } 98 | this->recl_.ReadUnlock(); 99 | } 100 | }; 101 | for (auto& t : reader_tasks_) { 102 | t = std::thread(reader_code); 103 | } 104 | auto writer_code = [this] { 105 | while (!stop_flag_.load(std::memory_order_relaxed)) { 106 | auto old_ptr = this->ptr_.exchange(new TraceableObj); 107 | if (old_ptr) this->recl_.Retire(old_ptr); 108 | } 109 | this->recl_.RetireCleanup(); 110 | }; 111 | for (auto& t : writer_tasks_) { 112 | t = std::thread(writer_code); 113 | } 114 | } 115 | void TearDown() { 116 | stop_flag_.store(true); 117 | for (auto& t : reader_tasks_) { 118 | t.join(); 119 | } 120 | for (auto& t : writer_tasks_) { 121 | t.join(); 122 | } 123 | this->recl_.Retire(this->ptr_.load()); 124 | this->recl_.RetireCleanup(); 125 | ASSERT_EQ(0, TraceableObj::allocated_objs()); 126 | } 127 | std::thread reader_tasks_[2]; 128 | std::thread writer_tasks_[1]; 129 | std::atomic stop_flag_; 130 | ccb::PtrReclamationAdapter recl_; 131 | std::atomic ptr_; 132 | }; 133 | TYPED_TEST_CASE(MemoryReclamationPerfTest, TestTypes); 134 | 135 | TYPED_TEST(MemoryReclamationTest, Simple) { 136 | this->ptr_ = new TraceableObj; 137 | ASSERT_EQ(1, TraceableObj::allocated_objs()); 138 | TraceableObj* ptr = this->recl_.ReadLock(&this->ptr_); 139 | ASSERT_EQ(1, ptr->val()); 140 | this->recl_.ReadUnlock(); 141 | ASSERT_EQ(1, TraceableObj::allocated_objs()); 142 | this->ptr_ = nullptr; 143 | this->recl_.Retire(ptr); 144 | this->recl_.RetireCleanup(); 145 | ASSERT_EQ(0, TraceableObj::allocated_objs()); 146 | } 147 | 148 | TYPED_TEST(MemoryReclamationTest, Read) { 149 | auto deleter = [this] { 150 | auto ptr = this->ptr_.load(); 151 | this->ptr_ = nullptr; 152 | this->recl_.Retire(ptr); 153 | this->recl_.RetireCleanup(); 154 | }; 155 | this->ptr_ = new TraceableObj; 156 | ASSERT_EQ(1, TraceableObj::allocated_objs()); 157 | auto ptr = this->recl_.ReadLock(&this->ptr_); 158 | std::thread t{deleter}; 159 | for (int i = 0; i < 10; i++) { 160 | ASSERT_EQ(1, TraceableObj::allocated_objs()); 161 | ASSERT_EQ(1, ptr->val()); 162 | usleep(10000); 163 | } 164 | this->recl_.ReadUnlock(); 165 | t.join(); 166 | ASSERT_EQ(0, TraceableObj::allocated_objs()); 167 | } 168 | 169 | TYPED_PERF_TEST(MemoryReclamationPerfTest, RetirePerf) { 170 | auto old_ptr = this->ptr_.exchange(new TraceableObj); 171 | if (old_ptr) this->recl_.Retire(old_ptr); 172 | } 173 | -------------------------------------------------------------------------------- /src/ccbase/worker_group.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include "ccbase/thread.h" 34 | #include "ccbase/worker_group.h" 35 | 36 | namespace ccb { 37 | 38 | namespace { 39 | 40 | constexpr size_t kMaxBatchProcessTasks = 16; 41 | constexpr size_t kPollerTimeoutMs = 1; 42 | 43 | class DefaultWorkerPoller : public WorkerGroup::Poller { 44 | public: 45 | virtual ~DefaultWorkerPoller() {} 46 | void Poll(size_t timeout_ms) override { 47 | if (timeout_ms > 0) { 48 | usleep(timeout_ms * 1000); 49 | } 50 | } 51 | static std::shared_ptr Instance() { 52 | static std::shared_ptr instance_{ 53 | new DefaultWorkerPoller 54 | }; 55 | return instance_; 56 | } 57 | }; 58 | 59 | } // namespace 60 | 61 | thread_local WorkerGroup::Worker* WorkerGroup::Worker::tls_self_ = nullptr; 62 | 63 | WorkerGroup::Worker::Worker(WorkerGroup* grp, size_t id, 64 | WorkerGroup::TaskQueue::InQueue* q, 65 | std::shared_ptr poller) 66 | : TimerWheel(1000, false), 67 | group_(grp), 68 | id_(id), 69 | inq_(q), 70 | poller_(std::move(poller)), 71 | stop_flag_(false) { 72 | char name[16]; 73 | snprintf(name, sizeof(name), "w%lu-%lu", grp->id(), id); 74 | thread_ = CreateThread(name, BindClosure(this, &Worker::WorkerMainEntry)); 75 | } 76 | 77 | WorkerGroup::Worker::~Worker() { 78 | stop_flag_.store(true, std::memory_order_release); 79 | thread_.join(); 80 | } 81 | 82 | bool WorkerGroup::Worker::PostTask(ClosureFunc func) { 83 | return group_->PostTask(id_, std::move(func)); 84 | } 85 | 86 | void WorkerGroup::Worker::WorkerMainEntry() { 87 | tls_self_ = this; 88 | while (!stop_flag_.load(std::memory_order_acquire)) { 89 | TimerWheel::MoveOn(); 90 | size_t n = BatchProcessTasks(kMaxBatchProcessTasks); 91 | poller_->Poll(n < kMaxBatchProcessTasks ? kPollerTimeoutMs : 0); 92 | } 93 | BatchProcessTasks(std::numeric_limits::max()); 94 | } 95 | 96 | size_t WorkerGroup::Worker::BatchProcessTasks(size_t max) { 97 | size_t cnt; 98 | for (cnt = 0; cnt < max ; cnt++) { 99 | ClosureFunc func; 100 | if (!inq_->Pop(&func)) { 101 | break; 102 | } 103 | func(); 104 | } 105 | return cnt; 106 | } 107 | 108 | 109 | WorkerGroup::WorkerGroup(size_t worker_num, size_t queue_size) 110 | : WorkerGroup(worker_num, queue_size, [](size_t) { 111 | return DefaultWorkerPoller::Instance(); 112 | }) { 113 | } 114 | 115 | WorkerGroup::WorkerGroup(size_t worker_num, size_t queue_size, 116 | PollerSupplier poller_supplier) 117 | : queue_(std::make_shared(queue_size)) { 118 | for (size_t id = 0; id < worker_num; id++) { 119 | workers_.emplace_back(new Worker(this, id, queue_->RegisterConsumer(), 120 | poller_supplier(id))); 121 | } 122 | } 123 | 124 | WorkerGroup::~WorkerGroup() { 125 | } 126 | 127 | 128 | WorkerGroup::TaskQueue::OutQueue* WorkerGroup::GetOutQueue() { 129 | auto& client_ctx = tls_client_ctx_.get(); 130 | if (!client_ctx) { 131 | client_ctx.queue_holder = queue_; 132 | client_ctx.out_queue = queue_->RegisterProducer(); 133 | } 134 | return client_ctx.out_queue; 135 | } 136 | 137 | bool WorkerGroup::PostTask(ClosureFunc func) { 138 | TaskQueue::OutQueue* outq = GetOutQueue(); 139 | return outq->Push(std::move(func)); 140 | } 141 | 142 | bool WorkerGroup::PostTask(size_t worker_id, ClosureFunc func) { 143 | TaskQueue::OutQueue* outq = GetOutQueue(); 144 | return outq->Push(worker_id, std::move(func)); 145 | } 146 | 147 | bool WorkerGroup::PostTask(ClosureFunc func, size_t delay_ms) { 148 | TaskQueue::OutQueue* outq = GetOutQueue(); 149 | return outq->Push([func, delay_ms] { 150 | Worker::self()->AddTimer(delay_ms, std::move(func)); 151 | }); 152 | } 153 | 154 | bool WorkerGroup::PostTask(size_t worker_id, ClosureFunc func, 155 | size_t delay_ms) { 156 | TaskQueue::OutQueue* outq = GetOutQueue(); 157 | return outq->Push(worker_id, [func, delay_ms] { 158 | Worker::self()->AddTimer(delay_ms, std::move(func)); 159 | }); 160 | } 161 | 162 | bool WorkerGroup::PostPeriodTask(ClosureFunc func, size_t period_ms) { 163 | TaskQueue::OutQueue* outq = GetOutQueue(); 164 | return outq->Push([func, period_ms] { 165 | Worker::self()->AddPeriodTimer(period_ms, std::move(func)); 166 | }); 167 | } 168 | 169 | bool WorkerGroup::PostPeriodTask(size_t worker_id, ClosureFunc func, 170 | size_t period_ms) { 171 | TaskQueue::OutQueue* outq = GetOutQueue(); 172 | return outq->Push(worker_id, [func, period_ms] { 173 | Worker::self()->AddPeriodTimer(period_ms, std::move(func)); 174 | }); 175 | } 176 | 177 | } // namespace ccb 178 | -------------------------------------------------------------------------------- /src/ccbase/token_bucket.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | 40 | #include "ccbase/token_bucket.h" 41 | 42 | #define TV2US(ptv) ((ptv)->tv_sec * 1000000 + (ptv)->tv_usec) 43 | 44 | namespace ccb { 45 | 46 | namespace { 47 | 48 | // conditional locker 49 | class Locker { 50 | public: 51 | Locker(std::mutex* m, bool on) 52 | : m_(*m), on_(on) { 53 | if (on_) m_.lock(); 54 | } 55 | ~Locker() { 56 | if (on_) m_.unlock(); 57 | } 58 | 59 | private: 60 | std::mutex& m_; 61 | bool on_; 62 | }; 63 | 64 | } // namespace 65 | 66 | TokenBucket::TokenBucket(uint32_t tokens_per_sec) 67 | : TokenBucket(tokens_per_sec, 68 | tokens_per_sec / 5) { 69 | } 70 | 71 | TokenBucket::TokenBucket(uint32_t tokens_per_sec, 72 | uint32_t bucket_size) 73 | : TokenBucket(tokens_per_sec, 74 | bucket_size, 75 | bucket_size, 76 | nullptr, 77 | true) { 78 | } 79 | 80 | TokenBucket::TokenBucket(uint32_t tokens_per_sec, 81 | uint32_t bucket_size, 82 | uint32_t init_tokens, 83 | const struct timeval* tv_now, 84 | bool enable_lock_for_mt) 85 | : tokens_per_sec_(tokens_per_sec), 86 | bucket_size_(bucket_size ? bucket_size : 1), 87 | token_count_(init_tokens), 88 | enable_lock_(enable_lock_for_mt) { 89 | struct timeval tv; 90 | if (!tv_now) { 91 | tv_now = &tv; 92 | gettimeofday(&tv, nullptr); 93 | } 94 | last_gen_time_ = TV2US(tv_now); 95 | last_calc_delta_ = 0; 96 | } 97 | 98 | void TokenBucket::Mod(uint32_t tokens_per_sec, uint32_t bucket_size) { 99 | Locker locker(&gen_mutex_, enable_lock_); 100 | tokens_per_sec_ = tokens_per_sec; 101 | bucket_size_ = bucket_size; 102 | } 103 | 104 | void TokenBucket::Mod(uint32_t tokens_per_sec, 105 | uint32_t bucket_size, 106 | uint32_t init_tokens) { 107 | Locker locker(&gen_mutex_, enable_lock_); 108 | tokens_per_sec_ = tokens_per_sec; 109 | bucket_size_ = bucket_size; 110 | token_count_.store(init_tokens, std::memory_order_relaxed); 111 | } 112 | 113 | void TokenBucket::Gen(const struct timeval* tv_now) { 114 | struct timeval tv; 115 | uint64_t us_now, us_past; 116 | uint64_t new_tokens, calc_delta; 117 | int64_t new_token_count, cur_token_count; 118 | 119 | Locker locker(&gen_mutex_, enable_lock_); 120 | 121 | if (tv_now == nullptr) { 122 | tv_now = &tv; 123 | gettimeofday(&tv, nullptr); 124 | } 125 | us_now = TV2US(tv_now); 126 | if (us_now < last_gen_time_) { 127 | last_gen_time_ = us_now; 128 | return; 129 | } 130 | 131 | us_past = us_now - last_gen_time_; 132 | new_tokens = (((uint64_t)tokens_per_sec_ * us_past + last_calc_delta_) 133 | / 1000000); 134 | calc_delta = (((uint64_t)tokens_per_sec_ * us_past + last_calc_delta_) 135 | % 1000000); 136 | 137 | last_gen_time_ = us_now; 138 | last_calc_delta_ = calc_delta; 139 | cur_token_count = token_count_.load(std::memory_order_relaxed); 140 | new_token_count = cur_token_count + new_tokens; 141 | if (new_token_count < cur_token_count || 142 | new_token_count > static_cast(bucket_size_)) { 143 | new_token_count = bucket_size_; 144 | } 145 | if (!enable_lock_) { 146 | token_count_.store(new_token_count, std::memory_order_relaxed); 147 | } else { 148 | token_count_.fetch_add(new_token_count - cur_token_count); 149 | } 150 | } 151 | 152 | bool TokenBucket::Check(uint32_t need_tokens) { 153 | int64_t token_count = token_count_.load(std::memory_order_relaxed); 154 | if (token_count < static_cast(need_tokens)) { 155 | return false; 156 | } 157 | return true; 158 | } 159 | 160 | bool TokenBucket::Get(uint32_t need_tokens) { 161 | int64_t token_count = token_count_.load(std::memory_order_relaxed); 162 | if (token_count < static_cast(need_tokens)) { 163 | return false; 164 | } 165 | if (!enable_lock_) { 166 | token_count_.store(token_count - need_tokens, std::memory_order_relaxed); 167 | } else { 168 | int64_t cur_tokens = token_count_.fetch_sub(need_tokens); 169 | if (cur_tokens < -static_cast(bucket_size_)) { 170 | // rollback if overdraft too much 171 | token_count_.fetch_add(need_tokens); 172 | return false; 173 | } 174 | } 175 | return true; 176 | } 177 | 178 | int TokenBucket::Overdraft(uint32_t need_tokens) { 179 | int64_t cur_tokens; 180 | if (!enable_lock_) { 181 | cur_tokens = token_count_.load(std::memory_order_relaxed); 182 | token_count_.store(cur_tokens - need_tokens, std::memory_order_relaxed); 183 | } else { 184 | cur_tokens = token_count_.fetch_sub(need_tokens); 185 | } 186 | return (cur_tokens < static_cast(need_tokens) ? 187 | need_tokens - cur_tokens : 0); 188 | } 189 | 190 | } // namespace ccb 191 | 192 | -------------------------------------------------------------------------------- /src/ccbase/macro_list.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_MACRO_LIST_H_ 31 | #define CCBASE_MACRO_LIST_H_ 32 | 33 | #include "ccbase/common.h" 34 | 35 | namespace ccb { 36 | 37 | struct ListHead { 38 | struct ListHead* next; 39 | struct ListHead* prev; 40 | }; 41 | 42 | #define CCB_INIT_LIST_HEAD(ptr) do { \ 43 | (ptr)->next = (ptr); \ 44 | (ptr)->prev = (ptr); \ 45 | } while (0) 46 | 47 | #define CCB_LIST_HEAD_INIT(name) { &(name), &(name) } 48 | #define CCB_LIST_HEAD(name) \ 49 | ccb::ListHead name = LIST_HEAD_INIT(name) 50 | 51 | /* 52 | * Insert a new entry between two known consecutive entries. 53 | * 54 | * This is only for internal list manipulation where we know 55 | * the prev/next entries already! 56 | */ 57 | #define CCB_LIST_ADD_INTERNAL(node, prev_node, next_node) do { \ 58 | (next_node)->prev = (node); \ 59 | (node)->next = (next_node); \ 60 | (node)->prev = (prev_node); \ 61 | (prev_node)->next = (node); \ 62 | } while (0) 63 | 64 | /* 65 | * list_add - add a new entry 66 | * @new: new entry to be added 67 | * @head: list head to add it after 68 | * 69 | * Insert a new entry after the specified head. 70 | * This is good for implementing stacks. 71 | */ 72 | #define CCB_LIST_ADD(node, head) do { \ 73 | ccb::ListHead * newn = (node); \ 74 | ccb::ListHead * prev = (head); \ 75 | ccb::ListHead * next = (head)->next; \ 76 | CCB_LIST_ADD_INTERNAL(newn, prev, next); \ 77 | } while (0) 78 | 79 | /* 80 | * list_add_tail - add a new entry 81 | * @new: new entry to be added 82 | * @head: list head to add it before 83 | * 84 | * Insert a new entry before the specified head. 85 | * This is useful for implementing queues. 86 | */ 87 | #define CCB_LIST_ADD_TAIL(node, head) do { \ 88 | ccb::ListHead * newn = (node); \ 89 | ccb::ListHead * prev = (head)->prev; \ 90 | ccb::ListHead * next = (head); \ 91 | CCB_LIST_ADD_INTERNAL(newn, prev, next); \ 92 | } while (0) 93 | 94 | /* 95 | * Delete a list entry by making the prev/next entries 96 | * point to each other. 97 | * 98 | * This is only for internal list manipulation where we know 99 | * the prev/next entries already! 100 | */ 101 | #define CCB_LIST_DEL_INTERNAL(prev_node, next_node) do { \ 102 | (next_node)->prev = (prev_node); \ 103 | (prev_node)->next = (next_node); \ 104 | } while (0) 105 | 106 | /* 107 | * list_del - deletes entry from list. 108 | * @entry: the element to delete from the list. 109 | * Note: list_empty on entry does not return true after this, the entry is in an undefined state. 110 | */ 111 | #define CCB_LIST_DEL(entry) do { \ 112 | ccb::ListHead * prev = (entry)->prev; \ 113 | ccb::ListHead * next = (entry)->next; \ 114 | CCB_LIST_DEL_INTERNAL(prev, next); \ 115 | } while (0) 116 | 117 | /* 118 | * list_del_init - deletes entry from list and reinitialize it. 119 | * @entry: the element to delete from the list. 120 | */ 121 | #define CCB_LIST_DEL_INIT(entry) do { \ 122 | CCB_LIST_DEL(entry); \ 123 | CCB_INIT_LIST_HEAD(entry); \ 124 | } while (0) 125 | 126 | /* 127 | * list_empty - tests whether a list is empty 128 | * @head: the list to test. 129 | */ 130 | #define CCB_LIST_EMPTY(head) ((head)->next == (head)) 131 | 132 | /* 133 | * list_splice - join two lists 134 | * @list: the new list to add. 135 | * @head: the place to add it in the first list. 136 | */ 137 | #define CCB_LIST_SPLICE(list, head) do { \ 138 | ccb::ListHead * first = (list)->next; \ 139 | if (first != (list)) { \ 140 | ccb::ListHead * last = (list)->prev; \ 141 | ccb::ListHead * at = (head)->next; \ 142 | first->prev = (head); \ 143 | last->next = at; \ 144 | st->prev = last; \ 145 | } \ 146 | } while (0) 147 | 148 | /* 149 | * list_entry - get the struct for this entry 150 | * @ptr: the &struct list_head pointer. 151 | * @type: the type of the struct this is embedded in. 152 | * @member: the name of the list_struct within the struct. 153 | */ 154 | #define CCB_LIST_ENTRY(ptr, type, member) \ 155 | ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member))) // NOLINT 156 | 157 | /* 158 | * list_for_each - iterate over a list 159 | * @pos: the &struct list_head to use as a loop counter. 160 | * @head: the head for your list. 161 | */ 162 | #define CCB_LIST_FOR_EACH(pos, head) \ 163 | for ((pos) = (head)->next; (pos) != (head); pos = (pos)->next) 164 | 165 | /* 166 | * list_for_each_safe - iterate over a list safe against removal of list entry 167 | * @pos: the &struct list_head to use as a loop counter. 168 | * @n: another &struct list_head to use as temporary storage 169 | * @head: the head for your list. 170 | */ 171 | #define CCB_LIST_FOR_EACH_SAFE(pos, n, head) \ 172 | for ((pos) = (head)->next, (n) = (pos)->next; (pos) != (head); \ 173 | (pos) = (n), (n) = (pos)->next) 174 | 175 | /* 176 | * list_for_each_prev - iterate over a list in reverse order 177 | * @pos: the &struct list_head to use as a loop counter. 178 | * @head: the head for your list. 179 | */ 180 | #define CCB_LIST_FOR_EACH_PREV(pos, head) \ 181 | for ((pos) = (head)->prev; (pos) != (head); (pos) = (pos)->prev) 182 | 183 | 184 | } // namespace ccb 185 | 186 | #endif // CCBASE_MACRO_LIST_H_ 187 | -------------------------------------------------------------------------------- /test/accumulated_list_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2016-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include "gtestx/gtestx.h" 36 | #include "ccbase/accumulated_list.h" 37 | 38 | class AccumulatedListTest : public testing::Test { 39 | protected: 40 | AccumulatedListTest() { 41 | } 42 | void SetUp() { 43 | } 44 | void TearDown() { 45 | } 46 | static constexpr int magic = 0x37; 47 | struct TestNode { 48 | int value{magic}; 49 | }; 50 | ccb::AccumulatedList alist_; 51 | }; 52 | 53 | constexpr int AccumulatedListTest::magic; 54 | 55 | TEST_F(AccumulatedListTest, SingleThread) { 56 | size_t add_count = 0; 57 | do { 58 | size_t travel_count = 0; 59 | alist_.AddNode(); ++add_count; 60 | alist_.Travel([&travel_count] (TestNode* n) { 61 | ASSERT_EQ(magic, n->value); 62 | ++travel_count; 63 | }); 64 | ASSERT_EQ(travel_count, add_count); 65 | } while (add_count < 100); 66 | } 67 | 68 | TEST_F(AccumulatedListTest, MultiThread) { 69 | constexpr size_t loops = 5000000; 70 | auto w_code = [this] { 71 | for (size_t i = 0; i < loops; i++) { 72 | alist_.AddNode(); 73 | } 74 | }; 75 | std::atomic r_stop_flag{false}; 76 | auto r_code = [this, &r_stop_flag] { 77 | while (!r_stop_flag.load(std::memory_order_relaxed)) { 78 | alist_.Travel([] (TestNode* n) { 79 | ASSERT_EQ(magic, n->value); 80 | }); 81 | } 82 | }; 83 | std::thread w_task1{w_code}; 84 | std::thread w_task2{w_code}; 85 | std::thread w_task3{w_code}; 86 | std::thread r_task1{r_code}; 87 | w_task1.join(); 88 | w_task2.join(); 89 | w_task3.join(); 90 | r_stop_flag.store(true); 91 | r_task1.join(); 92 | size_t travel_count = 0; 93 | alist_.Travel([&travel_count] (TestNode*) { 94 | ++travel_count; 95 | }); 96 | ASSERT_EQ(loops * 3, travel_count); 97 | } 98 | 99 | class AllocatedListTest : public testing::Test { 100 | protected: 101 | AllocatedListTest() { 102 | } 103 | void SetUp() { 104 | } 105 | void TearDown() { 106 | } 107 | static constexpr int magic = 0x37; 108 | struct TestNode { 109 | int value{magic}; 110 | }; 111 | ccb::AllocatedList alist_; 112 | }; 113 | 114 | constexpr int AllocatedListTest::magic; 115 | 116 | TEST_F(AllocatedListTest, SingleThread) { 117 | std::vector added_nodes; 118 | do { 119 | added_nodes.push_back(alist_.Alloc()); 120 | size_t travel_count = 0; 121 | alist_.Travel([&travel_count] (TestNode* n) { 122 | ASSERT_EQ(magic, n->value); 123 | ++travel_count; 124 | }); 125 | ASSERT_EQ(travel_count, added_nodes.size()); 126 | } while (added_nodes.size() < 100); 127 | while (added_nodes.size() > 0) { 128 | alist_.Free(added_nodes.back()); 129 | added_nodes.pop_back(); 130 | size_t travel_count = 0; 131 | alist_.Travel([&travel_count] (TestNode* n) { 132 | ASSERT_EQ(magic, n->value); 133 | ++travel_count; 134 | }); 135 | ASSERT_EQ(travel_count, added_nodes.size()); 136 | } 137 | } 138 | 139 | TEST_F(AllocatedListTest, MultiThread) { 140 | auto code = [this](int batch) { 141 | std::vector added_nodes; 142 | for (int n = 0; n < 10000000/batch; n++) { 143 | for (int i = 0; i < batch; i++) { 144 | added_nodes.push_back(alist_.Alloc()); 145 | } 146 | for (int i = 0; i < batch; i++) { 147 | alist_.Free(added_nodes[i]); 148 | } 149 | added_nodes.clear(); 150 | } 151 | alist_.Alloc(); 152 | }; 153 | std::thread task1{code, 1}; 154 | std::thread task2{code, 2}; 155 | std::thread task3{code, 3}; 156 | task1.join(); 157 | task2.join(); 158 | task3.join(); 159 | size_t travel_count = 0; 160 | alist_.Travel([&travel_count] (TestNode*) { 161 | ++travel_count; 162 | }); 163 | ASSERT_EQ(3, travel_count); 164 | } 165 | 166 | class ThreadLocalListTest : public testing::Test { 167 | protected: 168 | ThreadLocalListTest() { 169 | } 170 | void SetUp() { 171 | } 172 | void TearDown() { 173 | } 174 | struct TestNode { 175 | TestNode() : value(0) {} 176 | ~TestNode() {value = -1;} 177 | int value; 178 | }; 179 | template struct ScopeTag {}; 180 | ccb::ThreadLocalList> alist_; 181 | ccb::ThreadLocalList> alist_mt_; 182 | }; 183 | 184 | TEST_F(ThreadLocalListTest, SingleThread) { 185 | size_t mod_count = 0; 186 | do { 187 | size_t travel_count = 0; 188 | ++(alist_.LocalNode()->value); ++mod_count; 189 | alist_.Travel([&travel_count, mod_count] (TestNode* n) { 190 | ASSERT_EQ(mod_count, n->value); 191 | ++travel_count; 192 | }); 193 | ASSERT_EQ(1, travel_count); 194 | } while (mod_count < 100); 195 | } 196 | 197 | TEST_F(ThreadLocalListTest, MultiThread) { 198 | constexpr size_t kTaskNum = 1000; 199 | std::thread tasks[kTaskNum]; 200 | for (auto& task : tasks) { 201 | task = std::thread([this] { 202 | alist_mt_.LocalNode()->value++; 203 | ASSERT_EQ(1, alist_mt_.LocalNode()->value); 204 | }); 205 | } 206 | for (auto& task : tasks) { 207 | task.join(); 208 | } 209 | size_t travel_count = 0; 210 | alist_mt_.Travel([&travel_count] (TestNode*) { 211 | ++travel_count; 212 | }); 213 | ASSERT_EQ(0, travel_count); 214 | } 215 | 216 | -------------------------------------------------------------------------------- /src/ccbase/accumulated_list.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2016-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_ACCUMULATED_LIST_H_ 31 | #define CCBASE_ACCUMULATED_LIST_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | #include "ccbase/common.h" 37 | 38 | namespace ccb { 39 | 40 | template 41 | class AccumulatedList { 42 | public: 43 | AccumulatedList() 44 | : head_(nullptr) {} 45 | ~AccumulatedList(); 46 | 47 | T* AddNode(); 48 | template void Travel(F&& f); 49 | template T* FindNode(F&& f); 50 | 51 | private: 52 | CCB_NOT_COPYABLE_AND_MOVABLE(AccumulatedList); 53 | 54 | struct Node { 55 | T data; 56 | Node* next; 57 | Node() : data(), next(nullptr) {} 58 | }; 59 | 60 | std::atomic head_; 61 | }; 62 | 63 | template 64 | T* AccumulatedList::AddNode() { 65 | Node* new_node = new Node(); 66 | Node* old_head = head_.load(std::memory_order_relaxed); 67 | do { 68 | new_node->next = old_head; 69 | } while (!head_.compare_exchange_weak(old_head, new_node, 70 | std::memory_order_release, 71 | std::memory_order_relaxed)); 72 | return &new_node->data; 73 | } 74 | 75 | template 76 | template 77 | void AccumulatedList::Travel(F&& f) { 78 | for (Node* node = head_.load(std::memory_order_seq_cst); 79 | node != nullptr; node = node->next) { 80 | f(&node->data); 81 | } 82 | } 83 | 84 | template 85 | template 86 | T* AccumulatedList::FindNode(F&& f) { 87 | for (Node* node = head_.load(std::memory_order_seq_cst); 88 | node != nullptr; node = node->next) { 89 | if (f(&node->data)) 90 | return &node->data; 91 | } 92 | return nullptr; 93 | } 94 | 95 | template 96 | AccumulatedList::~AccumulatedList() { 97 | for (Node* node = head_.load(std::memory_order_seq_cst); 98 | node != nullptr; ) { 99 | Node* next = node->next; 100 | delete node; 101 | node = next; 102 | } 103 | } 104 | 105 | template 106 | class AllocatedList { 107 | public: 108 | AllocatedList() {} 109 | ~AllocatedList() {} 110 | 111 | T* Alloc(); 112 | void Free(T* ptr); 113 | // caller should task care of concurrency issues here 114 | template void Travel(F&& f); 115 | 116 | private: 117 | CCB_NOT_COPYABLE_AND_MOVABLE(AllocatedList); 118 | 119 | struct Node { 120 | std::atomic is_allocated; 121 | typename std::aligned_storage::type data; 122 | 123 | Node() : is_allocated(true) { 124 | new (&data) T(); 125 | } 126 | ~Node() { 127 | if (is_allocated) { 128 | reinterpret_cast(&data)->~T(); 129 | } 130 | } 131 | }; 132 | AccumulatedList list_; 133 | }; 134 | 135 | template 136 | T* AllocatedList::Alloc() { 137 | Node* node = list_.FindNode([](Node* node) { 138 | if (!node->is_allocated.load(std::memory_order_relaxed)) { 139 | bool old_value = false; 140 | if (node->is_allocated.compare_exchange_weak( 141 | old_value, true, 142 | std::memory_order_release, 143 | std::memory_order_relaxed)) { 144 | new (&node->data) T(); 145 | return true; 146 | } 147 | } 148 | return false; 149 | }); 150 | if (!node) 151 | node = list_.AddNode(); 152 | return reinterpret_cast(&node->data); 153 | } 154 | 155 | template 156 | void AllocatedList::Free(T* ptr) { 157 | Node* node = reinterpret_cast(reinterpret_cast(ptr) 158 | - offsetof(Node, data)); 159 | reinterpret_cast(&node->data)->~T(); 160 | node->is_allocated.store(false, std::memory_order_release); 161 | } 162 | 163 | template 164 | template 165 | void AllocatedList::Travel(F&& f) { 166 | list_.Travel([&f](Node* node) { 167 | if (node->is_allocated.load(std::memory_order_acquire)) { 168 | f(reinterpret_cast(&node->data)); 169 | } 170 | }); 171 | } 172 | 173 | template 174 | class ThreadLocalList { 175 | public: 176 | T* LocalNode(); 177 | template void Travel(F&& f); 178 | 179 | private: 180 | static void FreeLocalNode(T* ptr); 181 | static AllocatedList* GlobalList(); 182 | static std::shared_ptr> CreateGlobalListOnce(); 183 | }; 184 | 185 | template 186 | T* ThreadLocalList::LocalNode() { 187 | static thread_local std::unique_ptr 188 | tls_local_node{GlobalList()->Alloc(), &FreeLocalNode}; 189 | return tls_local_node.get(); 190 | } 191 | 192 | template 193 | void ThreadLocalList::FreeLocalNode(T* ptr) { 194 | GlobalList()->Free(ptr); 195 | } 196 | 197 | template 198 | template 199 | void ThreadLocalList::Travel(F&& f) { 200 | GlobalList()->Travel(std::forward(f)); 201 | } 202 | 203 | template 204 | AllocatedList* ThreadLocalList::GlobalList() { 205 | static thread_local std::shared_ptr> 206 | tls_global_list{CreateGlobalListOnce()}; 207 | return tls_global_list.get(); 208 | } 209 | 210 | template 211 | std::shared_ptr> 212 | ThreadLocalList::CreateGlobalListOnce() { 213 | // a safe assumption: only called before return of main() 214 | static std::shared_ptr> g_list{new AllocatedList()}; 215 | return g_list; 216 | } 217 | 218 | } // namespace ccb 219 | 220 | #endif // CCBASE_ACCUMULATED_LIST_H_ 221 | -------------------------------------------------------------------------------- /test/dispatch_queue_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include "gtestx/gtestx.h" 33 | #include "ccbase/dispatch_queue.h" 34 | #include "ccbase/token_bucket.h" 35 | 36 | #define QSIZE 1000000 37 | #define DEFAULT_HZ 1000000 38 | #define DEFAULT_TIME 1500 39 | 40 | DECLARE_uint64(hz); 41 | 42 | class DispatchQueueTest : public testing::Test { 43 | protected: 44 | DispatchQueueTest() 45 | : dispatch_queue_(QSIZE) {} 46 | void SetUp() {} 47 | void TearDown() {} 48 | 49 | ccb::DispatchQueue dispatch_queue_; 50 | }; 51 | 52 | TEST_F(DispatchQueueTest, UnregisterProducer) { 53 | auto producer = dispatch_queue_.RegisterProducer(); 54 | auto consumer = dispatch_queue_.RegisterConsumer(); 55 | ASSERT_NE(nullptr, producer); 56 | ASSERT_NE(nullptr, consumer); 57 | producer->Push(1); 58 | int val = 0; 59 | consumer->Pop(&val); 60 | ASSERT_EQ(1, val); 61 | producer->Unregister(); 62 | producer = dispatch_queue_.RegisterProducer(); 63 | producer->Push(2); 64 | consumer->Pop(&val); 65 | ASSERT_EQ(2, val); 66 | } 67 | 68 | PERF_TEST_F(DispatchQueueTest, OneshotProducer) { 69 | static auto producer = dispatch_queue_.RegisterProducer(); 70 | static auto consumer = dispatch_queue_.RegisterConsumer(); 71 | static int count = 0; 72 | producer->Unregister(); 73 | producer = dispatch_queue_.RegisterProducer(); 74 | producer->Push(++count); 75 | int val = 0; 76 | consumer->Pop(&val); 77 | ASSERT_EQ(count, val) << PERF_ABORT; 78 | } 79 | 80 | class DispatchQueuePerfTest : public testing::Test { 81 | protected: 82 | DispatchQueuePerfTest() 83 | : dispatch_queue_(QSIZE), 84 | r1_count_(0), 85 | r2_count_(0), 86 | overflow_(0), 87 | stop_(false), 88 | err_found_(false) {} 89 | void SetUp() { 90 | r1_thread_ = std::thread(&DispatchQueuePerfTest::ReadThread, this, 1); 91 | r2_thread_ = std::thread(&DispatchQueuePerfTest::ReadThread, this, 2); 92 | timer_thread_ = std::thread([this] { 93 | unsigned count = 0; 94 | while (!stop_.load(std::memory_order_relaxed)) { 95 | std::this_thread::sleep_for(std::chrono::milliseconds(10)); 96 | if (++count % 100 == 0) OnTimer(); 97 | } 98 | }); 99 | std::this_thread::sleep_for(std::chrono::milliseconds(10)); 100 | w2_thread_ = std::thread(&DispatchQueuePerfTest::WriteThread, this, 2); 101 | std::cout << "producer thread #" << 1 << " start" << std::endl; 102 | } 103 | void TearDown() { 104 | stop_.store(true, std::memory_order_relaxed); 105 | r1_thread_.join(); 106 | r2_thread_.join(); 107 | w2_thread_.join(); 108 | timer_thread_.join(); 109 | } 110 | void ReadThread(int id) { 111 | std::cout << "consumer thread #" << id << " start" << std::endl; 112 | auto q = dispatch_queue_.RegisterConsumer(); 113 | auto& count = (id == 1 ? r1_count_ : r2_count_); 114 | int val, check[2] = {0, 0}; 115 | while (!stop_.load(std::memory_order_relaxed)) { 116 | if (!q->Pop(&val)) { 117 | std::this_thread::sleep_for(std::chrono::milliseconds(1)); 118 | continue; 119 | } 120 | if (val/4 != check[val%2]++) { 121 | std::cout << "ReadThread #" << id << " check " << val%2 << " failed:" 122 | << "val/4=" << val/4 << " check=" << check[val%2]-1 << std::endl; 123 | err_found_.store(true, std::memory_order_relaxed); 124 | break; 125 | } 126 | count.store(count.load(std::memory_order_relaxed) + 1, std::memory_order_relaxed); 127 | } 128 | std::cout << "consumer thread #" << id << " exit" << std::endl; 129 | } 130 | void WriteThread(int id) { 131 | std::cout << "producer thread #" << id << " start" << std::endl; 132 | uint64_t hz = FLAGS_hz ? FLAGS_hz : DEFAULT_HZ; 133 | std::cerr << "hz=" << hz << std::endl; 134 | ccb::TokenBucket tb(hz, hz/5, hz/500, nullptr, false); 135 | auto q = dispatch_queue_.RegisterProducer(); 136 | int val = 1; 137 | while (!stop_.load(std::memory_order_relaxed)) { 138 | if (tb.Get(1)) { 139 | if (q->Push(val/2%2, val)) { 140 | val += 2; 141 | } 142 | } else { 143 | usleep(1000); 144 | tb.Gen(); 145 | } 146 | } 147 | std::cout << "producer thread #" << id << " exit" << std::endl; 148 | } 149 | void OnTimer() { 150 | thread_local uint64_t last_r1_count = 0; 151 | thread_local uint64_t last_r2_count = 0; 152 | thread_local uint64_t last_overflow = 0; 153 | std::cout << "r1_read " << r1_count_ - last_r1_count 154 | << "/s r2_read " << r2_count_ - last_r2_count 155 | << " overflow " << overflow_ - last_overflow 156 | << std::endl; 157 | last_r1_count = r1_count_; 158 | last_r2_count = r2_count_; 159 | last_overflow = overflow_; 160 | } 161 | 162 | ccb::DispatchQueue dispatch_queue_; 163 | std::atomic r1_count_; 164 | std::atomic r2_count_; 165 | std::atomic overflow_; 166 | std::thread r1_thread_; 167 | std::thread r2_thread_; 168 | std::thread w2_thread_; 169 | std::thread timer_thread_; 170 | std::atomic_bool stop_; 171 | std::atomic_bool err_found_; 172 | }; 173 | 174 | PERF_TEST_F_OPT(DispatchQueuePerfTest, IO_Perf, DEFAULT_HZ, DEFAULT_TIME) { 175 | static int val = 0; 176 | static auto q = dispatch_queue_.RegisterProducer(); 177 | if (q->Push(val/2%2, val)) { 178 | val += 2; 179 | } else { 180 | overflow_.store(overflow_.load(std::memory_order_relaxed) + 1, 181 | std::memory_order_relaxed); 182 | } 183 | if ((val & 0xfff) == 0) { 184 | ASSERT_FALSE(err_found_.load(std::memory_order_relaxed)) << PERF_ABORT; 185 | } 186 | } 187 | 188 | 189 | -------------------------------------------------------------------------------- /test/closure_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include "gtestx/gtestx.h" 32 | #include "ccbase/closure.h" 33 | 34 | class ClosureTest : public testing::Test { 35 | public: 36 | ClosureTest() { 37 | } 38 | void SetUp() { 39 | } 40 | void TearDown() { 41 | } 42 | static void Function() { 43 | n++; 44 | } 45 | void Method() { 46 | n++; 47 | } 48 | void Method_Args(int a, int b) { 49 | n++; 50 | } 51 | struct Functor { 52 | void operator()() const { 53 | n++; 54 | } 55 | void operator()(int) const { 56 | n++; 57 | } 58 | }; 59 | static int n; 60 | }; 61 | 62 | class ClosureFuncTest : public ClosureTest { 63 | }; 64 | 65 | int ClosureTest::n = 0; 66 | 67 | TEST_F(ClosureTest, Run) { 68 | int expect = 0; n = 0; 69 | // NewClosure 70 | ccb::internal::NewClosure(ClosureTest::Function)->Run(); 71 | ASSERT_EQ(++expect, n); 72 | ccb::internal::NewClosure(static_cast(this), 73 | &ClosureTest::Method)->Run(); 74 | ASSERT_EQ(++expect, n); 75 | ccb::internal::NewClosure(static_cast(this), 76 | &ClosureTest::Method_Args, 1)->Run(-1); 77 | ASSERT_EQ(++expect, n); 78 | ccb::internal::NewClosure(Functor())->Run(); 79 | ASSERT_EQ(++expect, n); 80 | ccb::internal::NewClosure([]{ClosureTest::n++;})->Run(); 81 | ASSERT_EQ(++expect, n); 82 | ccb::internal::NewClosure(std::bind(&ClosureTest::Method, 83 | static_cast(this)))->Run(); 84 | ASSERT_EQ(++expect, n); 85 | // NewPermanentClosure 86 | ccb::internal::Closure* ptr; 87 | (ptr = ccb::internal::NewPermanentClosure(ClosureTest::Function))->Run(); 88 | delete ptr; 89 | ASSERT_EQ(++expect, n); 90 | (ptr = ccb::internal::NewPermanentClosure(static_cast(this), 91 | &ClosureTest::Method))->Run(); 92 | delete ptr; 93 | ASSERT_EQ(++expect, n); 94 | (ptr = ccb::internal::NewPermanentClosure(static_cast(this), 95 | &ClosureTest::Method_Args, 1, -1))->Run(); 96 | delete ptr; 97 | ASSERT_EQ(++expect, n); 98 | (ptr = ccb::internal::NewPermanentClosure(Functor()))->Run(); 99 | delete ptr; 100 | ASSERT_EQ(++expect, n); 101 | (ptr = ccb::internal::NewPermanentClosure([]{ClosureTest::n++;}))->Run(); 102 | delete ptr; 103 | ASSERT_EQ(++expect, n); 104 | (ptr = ccb::internal::NewPermanentClosure(std::bind(&ClosureTest::Method, 105 | static_cast(this))))->Run(); 106 | delete ptr; 107 | ASSERT_EQ(++expect, n); 108 | } 109 | 110 | TEST_F(ClosureTest, Clone) { 111 | int expect = 0; n = 0; 112 | ccb::internal::Closure* ptr; 113 | ccb::internal::Closure* ptr2; 114 | // NewClosure 115 | ccb::internal::NewClosure(ClosureTest::Function)->Run(); 116 | ASSERT_EQ(++expect, n); 117 | ptr = ccb::internal::NewClosure(static_cast(this), &ClosureTest::Method); 118 | ptr->Clone()->Run(); 119 | ptr->Run(); 120 | ASSERT_EQ(++++expect, n); 121 | // NewPermanentClosure 122 | ptr = ccb::internal::NewPermanentClosure(static_cast(this), 123 | &ClosureTest::Method_Args, 1, -1); 124 | ptr2 = ptr->Clone(); 125 | ptr->Run(); delete ptr; 126 | ptr2->Run(); delete ptr2; 127 | ASSERT_EQ(++++expect, n); 128 | } 129 | 130 | PERF_TEST_F(ClosureTest, Perf) { 131 | ccb::internal::NewClosure(static_cast(this), 132 | &ClosureTest::Method_Args, 1)->Run(-1); 133 | } 134 | 135 | TEST_F(ClosureFuncTest, Run) { 136 | int expect = 0; n = 0; 137 | ccb::BindClosure(ClosureFuncTest::Function)(); 138 | ASSERT_EQ(++expect, n); 139 | ccb::BindClosure(static_cast(this), &ClosureFuncTest::Method)(); 140 | ASSERT_EQ(++expect, n); 141 | ccb::BindClosure(static_cast(this), &ClosureFuncTest::Method_Args, 1)(-1); 142 | ASSERT_EQ(++expect, n); 143 | ccb::BindClosure(Functor())(); 144 | ASSERT_EQ(++expect, n); 145 | ccb::BindClosure(Functor())(0); 146 | ASSERT_EQ(++expect, n); 147 | ccb::BindClosure([]{ClosureFuncTest::n++;})(); 148 | ASSERT_EQ(++expect, n); 149 | ccb::BindClosure([](int)->int{return ClosureFuncTest::n++;})(0); 150 | ASSERT_EQ(++expect, n); 151 | ccb::BindClosure(std::bind(&ClosureFuncTest::Method, static_cast(this)))(); 152 | ASSERT_EQ(++expect, n); 153 | } 154 | 155 | TEST_F(ClosureFuncTest, Ops) { 156 | ccb::ClosureFunc f{[]{}}; 157 | ASSERT_TRUE(f); 158 | f.reset(); 159 | ASSERT_FALSE(f); 160 | ccb::ClosureFunc([]{}).swap(f); 161 | ASSERT_TRUE(f); 162 | ccb::ClosureFunc f2{[]{}}; 163 | f = f2; 164 | ASSERT_TRUE(f); 165 | (f = f2).reset(); 166 | ASSERT_FALSE(f); 167 | } 168 | 169 | TEST_F(ClosureFuncTest, OpsArg1) { 170 | ccb::ClosureFunc f{[](int){}}; 171 | ASSERT_TRUE(f); 172 | f.reset(); 173 | ASSERT_FALSE(f); 174 | ccb::ClosureFunc([](int){}).swap(f); 175 | ASSERT_TRUE(f); 176 | ccb::ClosureFunc f2{[](int){}}; 177 | f = f2; 178 | ASSERT_TRUE(f); 179 | (f = f2).reset(); 180 | ASSERT_FALSE(f); 181 | } 182 | 183 | TEST_F(ClosureFuncTest, OpsArg2) { 184 | ccb::ClosureFunc f{[](int, std::string){}}; 185 | ASSERT_TRUE(f); 186 | f.reset(); 187 | ASSERT_FALSE(f); 188 | ccb::ClosureFunc([](int, std::string){}).swap(f); 189 | ASSERT_TRUE(f); 190 | ccb::ClosureFunc f2{[](int, std::string){}}; 191 | f = f2; 192 | ASSERT_TRUE(f); 193 | (f = f2).reset(); 194 | ASSERT_FALSE(f); 195 | } 196 | 197 | TEST_F(ClosureFuncTest, CopyMove) { 198 | int n = 0; 199 | ccb::ClosureFunc f{[n]()mutable{return ++n;}}; 200 | ASSERT_TRUE(f); 201 | ASSERT_EQ(1, f()); 202 | ccb::ClosureFunc f2{f}; 203 | ASSERT_TRUE(f2); 204 | ASSERT_EQ(2, f2()); 205 | ccb::ClosureFunc f3{std::move(f2)}; 206 | ASSERT_TRUE(f3); 207 | ASSERT_FALSE(f2); 208 | ASSERT_EQ(3, f3()); 209 | ASSERT_EQ(4, f()); 210 | } 211 | 212 | PERF_TEST_F(ClosureFuncTest, NewCall) { 213 | ccb::ClosureFunc{[] {}}(); 214 | } 215 | 216 | PERF_TEST_F(ClosureFuncTest, NewMoveCall) { 217 | ccb::ClosureFunc f{[]{}}; 218 | ccb::ClosureFunc{std::move(f)}(); 219 | } 220 | 221 | PERF_TEST_F(ClosureFuncTest, CopyCall) { 222 | static ccb::ClosureFunc f = {[]{}}; 223 | ccb::ClosureFunc{f}(); 224 | } 225 | 226 | PERF_TEST(CompareWithStdFunction, ClosureFunc_New) { 227 | ccb::ClosureFunc{[]{}}; 228 | } 229 | 230 | PERF_TEST(CompareWithStdFunction, StdFunction_New) { 231 | std::function{[]{}}; 232 | } 233 | 234 | PERF_TEST(CompareWithStdFunction, ClosureFunc_Copy) { 235 | static ccb::ClosureFunc f = {[]{}}; 236 | ccb::ClosureFunc{f}; 237 | } 238 | 239 | PERF_TEST(CompareWithStdFunction, StdFunction_Copy) { 240 | static std::function f = {[]{}}; 241 | std::function{f}; 242 | } 243 | 244 | PERF_TEST(CompareWithStdFunction, ClosureFunc_Move) { 245 | static ccb::ClosureFunc f = {[]{}}; 246 | ccb::ClosureFunc f2{std::move(f)}; 247 | f = std::move(f2); 248 | } 249 | 250 | PERF_TEST(CompareWithStdFunction, StdFunction_Move) { 251 | static std::function f = {[]{}}; 252 | std::function f2{std::move(f)}; 253 | f = std::move(f2); 254 | } 255 | -------------------------------------------------------------------------------- /test/timer_wheel_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include "gtestx/gtestx.h" 34 | #include "ccbase/timer_wheel.h" 35 | 36 | class TimerWheelTest : public testing::Test { 37 | protected: 38 | TimerWheelTest() { 39 | } 40 | void SetUp() { 41 | } 42 | void TearDown() { 43 | } 44 | ccb::TimerWheel tw_; 45 | int timers_ = 0; 46 | size_t count_ = 0; 47 | }; 48 | 49 | TEST_F(TimerWheelTest, Simple) { 50 | int check = 0; 51 | // 0ms 52 | tw_.AddTimer(0, [&check] { 53 | check--; 54 | }); 55 | check++; 56 | tw_.MoveOn(); 57 | ASSERT_EQ(0, check); 58 | // 5ms 59 | tw_.AddTimer(5, [&check] { 60 | check -= 5; 61 | }); 62 | check += 5; 63 | usleep(2000); 64 | tw_.MoveOn(); 65 | EXPECT_EQ(5, check); 66 | usleep(4000); 67 | tw_.MoveOn(); 68 | ASSERT_EQ(0, check); 69 | // 500ms 70 | tw_.AddTimer(500, [&check] { 71 | check -= 500; 72 | }); 73 | check += 500; 74 | usleep(501000); 75 | tw_.MoveOn(); 76 | ASSERT_EQ(0, check); 77 | } 78 | 79 | TEST_F(TimerWheelTest, Owner) { 80 | int check = 0; 81 | { 82 | ccb::TimerOwner to; 83 | tw_.AddTimer(1, [&check] { 84 | check++; 85 | }, &to); 86 | EXPECT_EQ(1UL, tw_.GetTimerCount()); 87 | } 88 | EXPECT_EQ(0UL, tw_.GetTimerCount()); 89 | tw_.MoveOn(); 90 | usleep(2000); 91 | tw_.MoveOn(); 92 | EXPECT_EQ(0, check); 93 | 94 | { 95 | ccb::TimerOwner to; 96 | tw_.AddTimer(1, [&check] { 97 | check++; 98 | }, &to); 99 | EXPECT_EQ(1UL, tw_.GetTimerCount()); 100 | usleep(2000); 101 | tw_.MoveOn(); 102 | EXPECT_EQ(0UL, tw_.GetTimerCount()); 103 | EXPECT_EQ(1, check--); 104 | } 105 | EXPECT_EQ(0UL, tw_.GetTimerCount()); 106 | EXPECT_EQ(0, check); 107 | 108 | { 109 | ccb::TimerOwner to; 110 | tw_.AddTimer(1, [&check] { 111 | check++; 112 | }, &to); 113 | EXPECT_EQ(1UL, tw_.GetTimerCount()); 114 | tw_.AddTimer(1, [&check] { 115 | check++; 116 | }, &to); 117 | EXPECT_EQ(1UL, tw_.GetTimerCount()); 118 | usleep(2000); 119 | tw_.MoveOn(); 120 | EXPECT_EQ(0UL, tw_.GetTimerCount()); 121 | EXPECT_EQ(1, check--); 122 | tw_.AddTimer(1, [&check] { 123 | check++; 124 | }, &to); 125 | EXPECT_EQ(1UL, tw_.GetTimerCount()); 126 | } 127 | EXPECT_EQ(0UL, tw_.GetTimerCount()); 128 | usleep(2000); 129 | tw_.MoveOn(); 130 | EXPECT_EQ(0, check); 131 | } 132 | 133 | TEST_F(TimerWheelTest, Cancel) { 134 | int check = 0; 135 | ccb::TimerOwner to; 136 | tw_.AddTimer(1, [&check] { 137 | check++; 138 | }, &to); 139 | EXPECT_EQ(1UL, tw_.GetTimerCount()); 140 | to.Cancel(); 141 | EXPECT_EQ(0UL, tw_.GetTimerCount()); 142 | usleep(2000); 143 | tw_.MoveOn(); 144 | EXPECT_EQ(0, check); 145 | 146 | tw_.ResetTimer(to, 1); 147 | EXPECT_EQ(1UL, tw_.GetTimerCount()); 148 | usleep(2000); 149 | tw_.MoveOn(); 150 | EXPECT_EQ(1, check); 151 | EXPECT_EQ(0UL, tw_.GetTimerCount()); 152 | to.Cancel(); 153 | } 154 | 155 | TEST_F(TimerWheelTest, Reset) { 156 | int check = 0; 157 | ccb::TimerOwner to; 158 | tw_.AddTimer(0, [&check] { 159 | check--; 160 | }, &to); 161 | // reset pending timer 162 | tw_.ResetTimer(to, 5); 163 | tw_.MoveOn(); 164 | ASSERT_EQ(0, check); 165 | check++; 166 | usleep(10000); 167 | tw_.MoveOn(); 168 | ASSERT_EQ(0, check); 169 | usleep(10000); 170 | // reset launched timer 171 | tw_.ResetTimer(to, 0); 172 | check++; 173 | tw_.MoveOn(); 174 | ASSERT_EQ(0, check); 175 | // reset timer to period timer 176 | tw_.ResetTimer(to, 5); 177 | tw_.ResetPeriodTimer(to, 10); 178 | check += 2; 179 | usleep(25000); 180 | tw_.MoveOn(); 181 | ASSERT_EQ(0, check); 182 | } 183 | 184 | static inline int64_t ts_diff(const timespec& ts1, const timespec& ts2) { 185 | return ((ts1.tv_sec - ts2.tv_sec) * 1000000000 + ts1.tv_nsec - ts2.tv_nsec); 186 | } 187 | 188 | TEST_F(TimerWheelTest, Period) { 189 | int check = 0; 190 | ASSERT_FALSE(tw_.AddPeriodTimer(0, []{})); 191 | ASSERT_TRUE(tw_.AddPeriodTimer(10, [&check] { 192 | check++; 193 | })); 194 | struct timespec ts; 195 | clock_gettime(CLOCK_MONOTONIC, &ts); 196 | for (int i = 1; i < 10; i++) { 197 | while (true) { 198 | usleep(500); 199 | tw_.MoveOn(); 200 | struct timespec cts; 201 | clock_gettime(CLOCK_MONOTONIC, &cts); 202 | if (ts_diff(cts, ts) >= i*10*1000000) 203 | break; 204 | } 205 | EXPECT_EQ(i, check); 206 | } 207 | } 208 | 209 | TEST_F(TimerWheelTest, GetCurrentTick) { 210 | ccb::tick_t init_tick = tw_.GetCurrentTick(); 211 | ccb::tick_t last_tick = init_tick; 212 | for (int i = 1; i < 10; i++) { 213 | usleep(500); 214 | tw_.MoveOn(); 215 | ccb::tick_t cur_tick = tw_.GetCurrentTick(); 216 | ASSERT_TRUE(last_tick == cur_tick || last_tick + 1 == cur_tick); 217 | last_tick = cur_tick; 218 | } 219 | ASSERT_LT(init_tick, last_tick); 220 | } 221 | 222 | TEST_F(TimerWheelTest, DeleteNodeTrackingTest) { 223 | int check = 0; 224 | ccb::TimerOwner* to = new ccb::TimerOwner; 225 | tw_.AddTimer(0, [&check, to] { 226 | check++; 227 | delete to; 228 | }); 229 | tw_.AddTimer(0, [&check] { 230 | check++; 231 | }, to); 232 | EXPECT_EQ(2UL, tw_.GetTimerCount()); 233 | tw_.MoveOn(); 234 | ASSERT_EQ(1, check); 235 | } 236 | 237 | PERF_TEST_F(TimerWheelTest, AddTimerPerf) { 238 | timers_++; 239 | tw_.AddTimer(1, [this]{ 240 | timers_--; 241 | }); 242 | if ((count_ & 0x3ff) == 0) { 243 | tw_.MoveOn(); 244 | } 245 | if ((++count_ & 0x3fffff) == 1) { 246 | fprintf(stderr, "pending %d timers\n", timers_); 247 | } 248 | } 249 | 250 | PERF_TEST_F(TimerWheelTest, ResetTimerPerf) { 251 | static ccb::TimerOwner owner; 252 | if (!owner.has_timer()) { 253 | tw_.AddTimer(1, []{}, &owner); 254 | } 255 | tw_.ResetTimer(owner, 1); 256 | } 257 | 258 | class TimerWheelNoLockTest : public testing::Test { 259 | protected: 260 | TimerWheelNoLockTest() { 261 | } 262 | void SetUp() { 263 | } 264 | void TearDown() { 265 | } 266 | ccb::TimerWheel tw_{1000, false}; 267 | int timers_ = 0; 268 | size_t count_ = 0; 269 | }; 270 | 271 | PERF_TEST_F(TimerWheelNoLockTest, AddTimerPerf) { 272 | timers_++; 273 | tw_.AddTimer(1, [this]{ 274 | timers_--; 275 | }); 276 | if ((count_ & 0x3ff) == 0) { 277 | tw_.MoveOn(); 278 | } 279 | if ((++count_ & 0x3fffff) == 1) { 280 | fprintf(stderr, "pending %d timers\n", timers_); 281 | } 282 | } 283 | 284 | PERF_TEST_F(TimerWheelNoLockTest, ResetTimerPerf) { 285 | static ccb::TimerOwner owner; 286 | if (!owner.has_timer()) { 287 | tw_.AddTimer(1, []{}, &owner); 288 | } 289 | tw_.ResetTimer(owner, 1); 290 | } 291 | 292 | class TimerWheelMTTest : public testing::Test { 293 | protected: 294 | void SetUp() { 295 | thread_ = std::thread([this] { 296 | while (!stop_) { 297 | tw_.MoveOn(); 298 | usleep(200); 299 | } 300 | }); 301 | } 302 | void TearDown() { 303 | stop_ = true; 304 | thread_.join(); 305 | } 306 | ccb::TimerWheel tw_; 307 | std::thread thread_; 308 | size_t count_ = 0; 309 | std::atomic timers_ = {0}; 310 | std::atomic stop_ = {false}; 311 | }; 312 | 313 | TEST_F(TimerWheelMTTest, Simple) { 314 | std::atomic check{0}; 315 | // 0ms 316 | tw_.AddTimer(0, [&check] { 317 | check--; 318 | }); 319 | check++; 320 | usleep(10000); 321 | ASSERT_EQ(0, check); 322 | // 50ms 323 | tw_.AddTimer(50, [&check] { 324 | check -= 5; 325 | }); 326 | check += 5; 327 | usleep(20000); 328 | EXPECT_EQ(5, check); 329 | usleep(40000); 330 | ASSERT_EQ(0, check); 331 | // 500ms 332 | tw_.AddTimer(500, [&check] { 333 | check -= 500; 334 | }); 335 | check += 500; 336 | usleep(510000); 337 | ASSERT_EQ(0, check); 338 | } 339 | 340 | PERF_TEST_F(TimerWheelMTTest, AddTimerPerf) { 341 | timers_++; 342 | bool res = tw_.AddTimer(1, [this]{ 343 | timers_--; 344 | }); 345 | ASSERT_TRUE(res) << PERF_ABORT; 346 | if ((++count_ & 0x3fffff) == 1) { 347 | fprintf(stderr, "pending %d timers\n", static_cast(timers_)); 348 | } 349 | } 350 | 351 | -------------------------------------------------------------------------------- /src/ccbase/worker_pool.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include "ccbase/thread.h" 35 | #include "ccbase/worker_pool.h" 36 | 37 | namespace ccb { 38 | 39 | namespace { 40 | 41 | // wait 10 seconds before shrinking workers 42 | constexpr size_t kShrinkWorkersWaitMs = 10000; 43 | 44 | size_t HighWatermark(size_t total_workers) { 45 | // 3/4 total-workers 46 | return total_workers - total_workers / 4; 47 | } 48 | 49 | size_t ExpandingNumber(size_t total_workers, size_t max) { 50 | // 1/2 (at least 1) total-workers 51 | return std::min(total_workers / 2 + 1, 52 | (max > total_workers ? max - total_workers : 0)); 53 | } 54 | 55 | size_t LowWatermark(size_t total_workers) { 56 | // 1/4 total-workers 57 | return total_workers / 4; 58 | } 59 | 60 | } // namespace 61 | 62 | thread_local WorkerPool::Worker* WorkerPool::Worker::tls_self_ = nullptr; 63 | 64 | WorkerPool::Worker::Worker(WorkerPool* pool, size_t id, 65 | std::shared_ptr context) 66 | : pool_(pool), 67 | id_(id), 68 | context_(context), 69 | stop_flag_(false) { 70 | char name[16]; 71 | snprintf(name, sizeof(name), "wp%lu-%lu", pool->id(), id); 72 | thread_ = CreateThread(name, BindClosure(this, &Worker::WorkerMainEntry)); 73 | } 74 | 75 | WorkerPool::Worker::~Worker() { 76 | if (thread_.joinable()) { 77 | stop_flag_.store(true, std::memory_order_release); 78 | thread_.join(); 79 | } 80 | } 81 | 82 | void WorkerPool::Worker::WorkerMainEntry() { 83 | tls_self_ = this; 84 | std::unique_ptr self_deleter; 85 | ClosureFunc task_func; 86 | while (pool_->WorkerPollTask(this, &task_func)) { 87 | pool_->WorkerBeginProcess(this); 88 | task_func(); 89 | if (pool_->WorkerEndProcess(this)) { 90 | // this worker is retired 91 | thread_.detach(); 92 | self_deleter.reset(this); 93 | break; 94 | } 95 | } 96 | } 97 | 98 | 99 | WorkerPool::WorkerPool(size_t min_workers, 100 | size_t max_workers, 101 | size_t queue_size) 102 | : WorkerPool(min_workers, max_workers, queue_size, [](size_t) { 103 | static std::shared_ptr default_context{new Context}; 104 | return default_context; 105 | }) { 106 | } 107 | 108 | WorkerPool::WorkerPool(size_t min_workers, 109 | size_t max_workers, 110 | size_t queue_size, 111 | ContextSupplier context_supplier) 112 | : min_workers_(min_workers), 113 | max_workers_(max_workers), 114 | total_workers_(0), 115 | busy_workers_(0), 116 | next_worker_id_(0), 117 | last_above_low_watermark_ts_(0), 118 | timer_wheel_(1000, true), 119 | sched_timer_task_(BindClosure(this, &WorkerPool::SchedTimerTaskInLock)), 120 | task_queue_(std::make_shared(queue_size)), 121 | shared_inq_(task_queue_->RegisterConsumer()), 122 | context_supplier_(context_supplier) { 123 | ExpandWorkersInLock(min_workers); 124 | if (total_workers_ < min_workers_) { 125 | throw std::runtime_error("create minimal workers failed"); 126 | } 127 | } 128 | 129 | WorkerPool::~WorkerPool() { 130 | std::lock_guard locker(updating_mutex_); 131 | // signal all worker threads to exit 132 | for (auto& entry : workers_) { 133 | entry.second->stop_flag_.store(true, std::memory_order_release); 134 | } 135 | workers_.clear(); 136 | } 137 | 138 | bool WorkerPool::WorkerPollTask(Worker* worker, ClosureFunc* task) { 139 | std::lock_guard lock(polling_mutex_); 140 | while (!worker->stop_flag_.load(std::memory_order_acquire)) { 141 | if (PollTimerTaskInLock(task)) { 142 | return true; 143 | } 144 | if (shared_inq_->Pop(task)) { 145 | return true; 146 | } 147 | usleep(1000); 148 | } 149 | return shared_inq_->Pop(task); 150 | } 151 | 152 | bool WorkerPool::PollTimerTaskInLock(ClosureFunc* task) { 153 | timer_wheel_.MoveOn(sched_timer_task_); 154 | if (timer_task_queue_.empty()) { 155 | return false; 156 | } else { 157 | *task = std::move(timer_task_queue_.front()); 158 | timer_task_queue_.pop(); 159 | return true; 160 | } 161 | } 162 | 163 | void WorkerPool::SchedTimerTaskInLock(ClosureFunc task) { 164 | timer_task_queue_.push(std::move(task)); 165 | } 166 | 167 | void WorkerPool::WorkerBeginProcess(Worker* worker) { 168 | busy_workers_++; 169 | if (CheckHighWatermark() && updating_mutex_.try_lock()) { 170 | if (CheckHighWatermark()) { 171 | ExpandWorkersInLock(ExpandingNumber( 172 | total_workers_.load(std::memory_order_relaxed), max_workers_)); 173 | } 174 | updating_mutex_.unlock(); 175 | } 176 | } 177 | 178 | bool WorkerPool::WorkerEndProcess(Worker* worker) { 179 | bool retire_this_worker = false; 180 | busy_workers_--; 181 | if (CheckLowWatermark() && updating_mutex_.try_lock()) { 182 | if (CheckLowWatermark()) { 183 | RetireWorkerInLock(worker); 184 | retire_this_worker = true; 185 | } 186 | updating_mutex_.unlock(); 187 | } 188 | return retire_this_worker; 189 | } 190 | 191 | bool WorkerPool::CheckHighWatermark() { 192 | if (total_workers_.load(std::memory_order_relaxed) >= max_workers_) { 193 | return false; 194 | } 195 | if (busy_workers_.load(std::memory_order_relaxed) 196 | >= HighWatermark(total_workers_.load(std::memory_order_relaxed))) { 197 | last_above_low_watermark_ts_.store(timer_wheel_.GetCurrentTick(), 198 | std::memory_order_relaxed); 199 | return true; 200 | } else { 201 | return false; 202 | } 203 | } 204 | 205 | bool WorkerPool::CheckLowWatermark() { 206 | if (total_workers_.load(std::memory_order_relaxed) <= min_workers_) { 207 | return false; 208 | } 209 | if (busy_workers_.load(std::memory_order_relaxed) 210 | <= LowWatermark(total_workers_.load(std::memory_order_relaxed))) { 211 | if (last_above_low_watermark_ts_.load(std::memory_order_relaxed) 212 | + kShrinkWorkersWaitMs <= timer_wheel_.GetCurrentTick()) { 213 | return true; 214 | } else { 215 | return false; 216 | } 217 | } else { 218 | last_above_low_watermark_ts_.store(timer_wheel_.GetCurrentTick(), 219 | std::memory_order_relaxed); 220 | return false; 221 | } 222 | } 223 | 224 | void WorkerPool::ExpandWorkersInLock(size_t num) { 225 | for (size_t i = 0; i < num; i++) { 226 | size_t worker_id = next_worker_id_++; 227 | std::shared_ptr context = context_supplier_(worker_id); 228 | if (!context) { 229 | break; 230 | } 231 | workers_[worker_id].reset(new Worker(this, worker_id, std::move(context))); 232 | } 233 | total_workers_.store(workers_.size(), std::memory_order_relaxed); 234 | } 235 | 236 | void WorkerPool::RetireWorkerInLock(Worker* worker) { 237 | auto it = workers_.find(worker->id()); 238 | it->second.release(); 239 | workers_.erase(it); 240 | total_workers_.store(workers_.size(), std::memory_order_relaxed); 241 | } 242 | 243 | WorkerPool::TaskQueue::OutQueue* WorkerPool::GetOutQueue() { 244 | auto& client_ctx = tls_client_ctx_.get(); 245 | if (!client_ctx) { 246 | client_ctx.queue_holder = task_queue_; 247 | client_ctx.out_queue = task_queue_->RegisterProducer(); 248 | } 249 | return client_ctx.out_queue; 250 | } 251 | 252 | bool WorkerPool::PostTask(ClosureFunc func) { 253 | TaskQueue::OutQueue* outq = GetOutQueue(); 254 | return outq->Push(std::move(func)); 255 | } 256 | 257 | bool WorkerPool::PostTask(ClosureFunc func, size_t delay_ms) { 258 | TaskQueue::OutQueue* outq = GetOutQueue(); 259 | return outq->Push([func, delay_ms] { 260 | Worker::self()->timer_wheel()->AddTimer(delay_ms, std::move(func)); 261 | }); 262 | } 263 | 264 | bool WorkerPool::PostPeriodTask(ClosureFunc func, size_t period_ms) { 265 | TaskQueue::OutQueue* outq = GetOutQueue(); 266 | return outq->Push([func, period_ms] { 267 | Worker::self()->timer_wheel()->AddPeriodTimer(period_ms, std::move(func)); 268 | }); 269 | } 270 | 271 | } // namespace ccb 272 | -------------------------------------------------------------------------------- /test/concurrent_ptr_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2016-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include "gtestx/gtestx.h" 35 | #include "ccbase/concurrent_ptr.h" 36 | 37 | namespace { 38 | 39 | class TraceableObj { 40 | public: 41 | TraceableObj() : val_(1) {} 42 | ~TraceableObj() { val_ = 0; } 43 | 44 | int val() const { 45 | return val_; 46 | } 47 | static size_t allocated_objs() { 48 | return allocated_objs_; 49 | } 50 | 51 | static void* operator new(size_t sz) { 52 | allocated_objs_++; 53 | return ::operator new(sz); 54 | } 55 | static void operator delete(void* ptr, size_t sz) { 56 | allocated_objs_--; 57 | ::operator delete(ptr); 58 | } 59 | 60 | private: 61 | int val_; 62 | static std::atomic allocated_objs_; 63 | }; 64 | 65 | std::atomic TraceableObj::allocated_objs_{0}; 66 | 67 | template > 68 | using TestTypes = testing::Types, 69 | ccb::EpochBasedReclamation, 70 | ccb::HazardPtrReclamation>; 71 | 72 | } // namespace 73 | 74 | template 75 | class ConcurrentPtrTest : public testing::Test { 76 | protected: 77 | void SetUp() { 78 | ASSERT_EQ(0, TraceableObj::allocated_objs()); 79 | } 80 | void TearDown() { 81 | ASSERT_EQ(0, TraceableObj::allocated_objs()); 82 | } 83 | 84 | ccb::ConcurrentPtr, 86 | RType> conc_ptr_; 87 | }; 88 | TYPED_TEST_CASE(ConcurrentPtrTest, TestTypes); 89 | 90 | TYPED_TEST(ConcurrentPtrTest, ReadLock) { 91 | TraceableObj* ptr = new TraceableObj; 92 | this->conc_ptr_.Reset(ptr); 93 | ASSERT_EQ(1, TraceableObj::allocated_objs()); 94 | TraceableObj* rp = this->conc_ptr_.ReadLock(); 95 | ASSERT_EQ(ptr, rp); 96 | ASSERT_EQ(1, rp->val()); 97 | this->conc_ptr_.ReadUnlock(); 98 | this->conc_ptr_.Reset(true); 99 | } 100 | 101 | TYPED_TEST(ConcurrentPtrTest, Reader) { 102 | TraceableObj* ptr = new TraceableObj; 103 | this->conc_ptr_.Reset(ptr); 104 | ASSERT_EQ(1, TraceableObj::allocated_objs()); 105 | { 106 | typename decltype(this->conc_ptr_)::Reader reader(&this->conc_ptr_); 107 | ASSERT_EQ(ptr, reader.get()); 108 | ASSERT_EQ(1, reader->val()); 109 | } 110 | this->conc_ptr_.Reset(true); 111 | } 112 | 113 | TYPED_TEST(ConcurrentPtrTest, Reset) { 114 | this->conc_ptr_.Reset(new TraceableObj); 115 | TraceableObj* rp = this->conc_ptr_.ReadLock(); 116 | ASSERT_EQ(1, rp->val()); 117 | this->conc_ptr_.ReadUnlock(); 118 | this->conc_ptr_.Reset(new TraceableObj); 119 | rp = this->conc_ptr_.ReadLock(); 120 | ASSERT_EQ(1, rp->val()); 121 | this->conc_ptr_.ReadUnlock(); 122 | this->conc_ptr_.Reset(true); 123 | } 124 | 125 | template 126 | class ConcurrentPtrPerfTest : public ConcurrentPtrTest { 127 | protected: 128 | ConcurrentPtrPerfTest() : stop_flag_(false) {} 129 | 130 | void SetUp() { 131 | ConcurrentPtrTest::SetUp(); 132 | this->conc_ptr_.Reset(new TraceableObj); 133 | auto reader_code = [this] { 134 | while (!stop_flag_.load(std::memory_order_relaxed)) { 135 | typename decltype(this->conc_ptr_)::Reader reader(&this->conc_ptr_); 136 | for (int i = 0; i < 100; i++) { 137 | ASSERT_EQ(1, reader->val()); 138 | } 139 | } 140 | }; 141 | for (auto& t : reader_tasks_) { 142 | t = std::thread(reader_code); 143 | } 144 | auto writer_code = [this] { 145 | while (!stop_flag_.load(std::memory_order_relaxed)) { 146 | this->conc_ptr_.Reset(new TraceableObj); 147 | } 148 | this->conc_ptr_.Reset(new TraceableObj, true); 149 | }; 150 | for (auto& t : writer_tasks_) { 151 | t = std::thread(writer_code); 152 | } 153 | auto spawn_code = [this] { 154 | while (!stop_flag_.load(std::memory_order_relaxed)) { 155 | std::thread([this] { 156 | typename decltype(this->conc_ptr_)::Reader reader(&this->conc_ptr_); 157 | for (int i = 0; i < 100; i++) { 158 | ASSERT_EQ(1, reader->val()); 159 | } 160 | }).join(); 161 | } 162 | }; 163 | for (auto& t : spawn_tasks_) { 164 | t = std::thread(spawn_code); 165 | } 166 | } 167 | 168 | void TearDown() { 169 | stop_flag_.store(true); 170 | for (auto& t : reader_tasks_) { 171 | t.join(); 172 | } 173 | for (auto& t : writer_tasks_) { 174 | t.join(); 175 | } 176 | for (auto& t : spawn_tasks_) { 177 | t.join(); 178 | } 179 | this->conc_ptr_.Reset(true); 180 | ConcurrentPtrTest::TearDown(); 181 | } 182 | 183 | std::thread reader_tasks_[1]; 184 | std::thread writer_tasks_[1]; 185 | std::thread spawn_tasks_[1]; 186 | std::atomic stop_flag_; 187 | }; 188 | TYPED_TEST_CASE(ConcurrentPtrPerfTest, TestTypes); 189 | 190 | TYPED_PERF_TEST(ConcurrentPtrPerfTest, ResetPerf) { 191 | this->conc_ptr_.Reset(new TraceableObj); 192 | } 193 | 194 | TYPED_PERF_TEST(ConcurrentPtrPerfTest, ReaderPerf) { 195 | typename decltype(this->conc_ptr_)::Reader reader(&this->conc_ptr_); 196 | ASSERT_EQ(1, reader->val()) << PERF_ABORT; 197 | } 198 | 199 | template 200 | class ConcurrentSharedPtrTest : public testing::Test { 201 | protected: 202 | void SetUp() { 203 | ASSERT_EQ(0, TraceableObj::allocated_objs()); 204 | } 205 | void TearDown() { 206 | ASSERT_EQ(0, TraceableObj::allocated_objs()); 207 | } 208 | 209 | ccb::ConcurrentSharedPtr, 211 | RType> cs_ptr_; 212 | }; 213 | TYPED_TEST_CASE(ConcurrentSharedPtrTest, TestTypes>); 214 | 215 | TYPED_TEST(ConcurrentSharedPtrTest, Read) { 216 | TraceableObj* ptr = new TraceableObj; 217 | this->cs_ptr_.Reset(ptr); 218 | ASSERT_EQ(1, TraceableObj::allocated_objs()); 219 | std::shared_ptr rp = this->cs_ptr_.Get(); 220 | ASSERT_EQ(ptr, rp.get()); 221 | ASSERT_EQ(1, rp->val()); 222 | ASSERT_EQ(1, this->cs_ptr_->val()); 223 | this->cs_ptr_.Reset(true); 224 | } 225 | 226 | TYPED_TEST(ConcurrentSharedPtrTest, Reset) { 227 | this->cs_ptr_.Reset(new TraceableObj); 228 | std::shared_ptr rp = this->cs_ptr_.Get(); 229 | ASSERT_EQ(1, rp->val()); 230 | this->cs_ptr_.Reset(std::make_shared()); 231 | ASSERT_EQ(1, this->cs_ptr_->val()); 232 | ASSERT_NE(rp, this->cs_ptr_.Get()); 233 | this->cs_ptr_.Reset(true); 234 | } 235 | 236 | template 237 | class ConcurrentSharedPtrPerfTest : public ConcurrentSharedPtrTest { 238 | protected: 239 | ConcurrentSharedPtrPerfTest() : stop_flag_(false) {} 240 | 241 | void SetUp() { 242 | ConcurrentSharedPtrTest::SetUp(); 243 | this->cs_ptr_.Reset(new TraceableObj); 244 | auto reader_code = [this] { 245 | while (!stop_flag_.load(std::memory_order_relaxed)) { 246 | std::shared_ptr ptr = this->cs_ptr_.Get(); 247 | for (int i = 0; i < 100; i++) { 248 | ASSERT_EQ(1, ptr->val()); 249 | } 250 | } 251 | }; 252 | for (auto& t : reader_tasks_) { 253 | t = std::thread(reader_code); 254 | } 255 | auto writer_code = [this] { 256 | while (!stop_flag_.load(std::memory_order_relaxed)) { 257 | this->cs_ptr_.Reset(new TraceableObj); 258 | } 259 | this->cs_ptr_.Reset(new TraceableObj, true); 260 | }; 261 | for (auto& t : writer_tasks_) { 262 | t = std::thread(writer_code); 263 | } 264 | auto spawn_code = [this] { 265 | while (!stop_flag_.load(std::memory_order_relaxed)) { 266 | std::thread([this] { 267 | std::shared_ptr ptr = this->cs_ptr_.Get(); 268 | for (int i = 0; i < 100; i++) { 269 | ASSERT_EQ(1, ptr->val()); 270 | } 271 | }).join(); 272 | } 273 | }; 274 | for (auto& t : spawn_tasks_) { 275 | t = std::thread(spawn_code); 276 | } 277 | } 278 | 279 | void TearDown() { 280 | stop_flag_.store(true); 281 | for (auto& t : reader_tasks_) { 282 | t.join(); 283 | } 284 | for (auto& t : writer_tasks_) { 285 | t.join(); 286 | } 287 | for (auto& t : spawn_tasks_) { 288 | t.join(); 289 | } 290 | this->cs_ptr_.Reset(true); 291 | ConcurrentSharedPtrTest::TearDown(); 292 | } 293 | 294 | std::thread reader_tasks_[1]; 295 | std::thread writer_tasks_[1]; 296 | std::thread spawn_tasks_[1]; 297 | std::atomic stop_flag_; 298 | }; 299 | TYPED_TEST_CASE(ConcurrentSharedPtrPerfTest, TestTypes>); 300 | 301 | TYPED_PERF_TEST(ConcurrentSharedPtrPerfTest, ResetPerf) { 302 | this->cs_ptr_.Reset(new TraceableObj); 303 | } 304 | 305 | TYPED_PERF_TEST(ConcurrentSharedPtrPerfTest, ReaderPerf) { 306 | ASSERT_EQ(1, this->cs_ptr_->val()) << PERF_ABORT; 307 | } 308 | 309 | // atomic std::shared_ptr is not available below gcc-5.0 310 | #if defined(__GNUC__) && __GNUC__ >= 5 311 | class StdAtomicSharedPtrPerfTest : public testing::Test { 312 | protected: 313 | StdAtomicSharedPtrPerfTest() : stop_flag_(false) {} 314 | 315 | void SetUp() { 316 | std::atomic_store(&this->as_ptr_, std::make_shared()); 317 | auto reader_code = [this] { 318 | while (!stop_flag_.load(std::memory_order_relaxed)) { 319 | std::shared_ptr ptr = std::atomic_load(&this->as_ptr_); 320 | for (int i = 0; i < 100; i++) { 321 | ASSERT_EQ(1, ptr->val()); 322 | } 323 | } 324 | }; 325 | for (auto& t : reader_tasks_) { 326 | t = std::thread(reader_code); 327 | } 328 | auto writer_code = [this] { 329 | while (!stop_flag_.load(std::memory_order_relaxed)) { 330 | std::atomic_store(&this->as_ptr_, std::make_shared()); 331 | } 332 | }; 333 | for (auto& t : writer_tasks_) { 334 | t = std::thread(writer_code); 335 | } 336 | auto spawn_code = [this] { 337 | while (!stop_flag_.load(std::memory_order_relaxed)) { 338 | std::thread([this] { 339 | std::shared_ptr ptr = std::atomic_load(&this->as_ptr_); 340 | for (int i = 0; i < 100; i++) { 341 | ASSERT_EQ(1, ptr->val()); 342 | } 343 | }).join(); 344 | } 345 | }; 346 | for (auto& t : spawn_tasks_) { 347 | t = std::thread(spawn_code); 348 | } 349 | } 350 | 351 | void TearDown() { 352 | stop_flag_.store(true); 353 | for (auto& t : reader_tasks_) { 354 | t.join(); 355 | } 356 | for (auto& t : writer_tasks_) { 357 | t.join(); 358 | } 359 | for (auto& t : spawn_tasks_) { 360 | t.join(); 361 | } 362 | } 363 | 364 | std::thread reader_tasks_[1]; 365 | std::thread writer_tasks_[1]; 366 | std::thread spawn_tasks_[1]; 367 | std::atomic stop_flag_; 368 | std::shared_ptr as_ptr_; 369 | }; 370 | 371 | PERF_TEST_F(StdAtomicSharedPtrPerfTest, ResetPerf) { 372 | std::atomic_store(&this->as_ptr_, std::make_shared()); 373 | } 374 | 375 | PERF_TEST_F(StdAtomicSharedPtrPerfTest, ReaderPerf) { 376 | ASSERT_EQ(1, std::atomic_load(&this->as_ptr_)->val()) << PERF_ABORT; 377 | } 378 | #endif 379 | 380 | -------------------------------------------------------------------------------- /src/ccbase/dispatch_queue.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_DISPATCH_QUEUE_H_ 31 | #define CCBASE_DISPATCH_QUEUE_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include "ccbase/fast_queue.h" 41 | 42 | namespace ccb { 43 | 44 | template 46 | class DispatchQueue { 47 | public: 48 | class OutQueue { 49 | public: 50 | virtual bool Push(const T& val) = 0; 51 | virtual bool Push(T&& val) = 0; 52 | virtual bool Push(size_t idx, const T& val) = 0; 53 | virtual bool Push(size_t idx, T&& val) = 0; 54 | virtual void Unregister() = 0; 55 | protected: 56 | virtual ~OutQueue() {} 57 | }; 58 | 59 | class InQueue { 60 | public: 61 | virtual bool Pop(T* ptr) = 0; 62 | virtual bool PopWait(T* ptr, int timeout) = 0; 63 | protected: 64 | virtual ~InQueue() {} 65 | }; 66 | 67 | explicit DispatchQueue(size_t qlen); 68 | virtual ~DispatchQueue(); 69 | 70 | OutQueue* RegisterProducer(); 71 | InQueue* RegisterConsumer(); 72 | void UnregisterProducer(OutQueue* outq); 73 | 74 | private: 75 | CCB_NOT_COPYABLE_AND_MOVABLE(DispatchQueue); 76 | 77 | using Queue = FastQueue; 78 | class Producer; 79 | class Consumer; 80 | 81 | size_t qlen_; 82 | std::mutex mutex_; 83 | std::atomic producers_[kMaxProducers]; 84 | std::atomic consumers_[kMaxConsumers]; 85 | std::atomic producer_count_; 86 | std::atomic consumer_count_; 87 | std::vector reclaimed_producers_; 88 | }; 89 | 90 | 91 | template 92 | class DispatchQueue::Producer 93 | : public DispatchQueue::OutQueue { 94 | public: 95 | Producer(DispatchQueue* dq, size_t idx) 96 | : dispatch_queue_(dq), producer_index_(idx), 97 | is_registered_(true), cur_index_(-1U) { 98 | for (auto& ap : queue_vec_) 99 | // std::atomic_init is not available in gcc-4.9 100 | ap.store(nullptr, std::memory_order_relaxed); 101 | } 102 | bool Push(const T& val) override; 103 | bool Push(T&& val) override; 104 | bool Push(size_t idx, const T& val) override; 105 | bool Push(size_t idx, T&& val) override; 106 | void Unregister() override; 107 | 108 | private: 109 | friend class DispatchQueue; 110 | DispatchQueue* dispatch_queue_; 111 | size_t producer_index_; 112 | bool is_registered_; 113 | size_t cur_index_; 114 | std::atomic queue_vec_[kMaxConsumers]; 115 | }; 116 | 117 | template 118 | bool DispatchQueue 119 | ::Producer::Push(const T& val) { 120 | if (!is_registered_) { 121 | throw std::logic_error("push unregistered OutQueue"); 122 | return false; 123 | } 124 | size_t last_index = cur_index_; 125 | for (cur_index_++; cur_index_ < kMaxConsumers; cur_index_++) { 126 | Queue* qptr = queue_vec_[cur_index_].load(std::memory_order_acquire); 127 | if (qptr == nullptr) 128 | break; 129 | if (qptr->Push(val)) 130 | return true; 131 | } 132 | for (cur_index_ = 0; cur_index_ <= last_index; cur_index_++) { 133 | Queue* qptr = queue_vec_[cur_index_].load(std::memory_order_acquire); 134 | if (qptr == nullptr) 135 | break; 136 | if (qptr->Push(val)) 137 | return true; 138 | } 139 | return false; 140 | } 141 | 142 | template 143 | bool DispatchQueue 144 | ::Producer::Push(T&& val) { 145 | if (!is_registered_) { 146 | throw std::logic_error("push unregistered OutQueue"); 147 | return false; 148 | } 149 | size_t last_index = cur_index_; 150 | for (cur_index_++; cur_index_ < kMaxConsumers; cur_index_++) { 151 | Queue* qptr = queue_vec_[cur_index_].load(std::memory_order_acquire); 152 | if (qptr == nullptr) 153 | break; 154 | if (qptr->Push(std::move(val))) 155 | return true; 156 | } 157 | for (cur_index_ = 0; cur_index_ <= last_index; cur_index_++) { 158 | Queue* qptr = queue_vec_[cur_index_].load(std::memory_order_acquire); 159 | if (qptr == nullptr) 160 | break; 161 | if (qptr->Push(std::move(val))) 162 | return true; 163 | } 164 | return false; 165 | } 166 | 167 | template 168 | bool DispatchQueue 169 | ::Producer::Push(size_t idx, const T& val) { 170 | if (!is_registered_) { 171 | throw std::logic_error("push unregistered OutQueue"); 172 | return false; 173 | } 174 | if (idx < kMaxConsumers) { 175 | Queue* qptr = queue_vec_[idx].load(std::memory_order_acquire); 176 | if (qptr && qptr->Push(val)) 177 | return true; 178 | } 179 | return false; 180 | } 181 | 182 | template 183 | bool DispatchQueue 184 | ::Producer::Push(size_t idx, T&& val) { 185 | if (!is_registered_) { 186 | throw std::logic_error("push unregistered OutQueue"); 187 | return false; 188 | } 189 | if (idx < kMaxConsumers) { 190 | Queue* qptr = queue_vec_[idx].load(std::memory_order_acquire); 191 | if (qptr && qptr->Push(std::move(val))) 192 | return true; 193 | } 194 | return false; 195 | } 196 | 197 | template 198 | void DispatchQueue 199 | ::Producer::Unregister() { 200 | dispatch_queue_->UnregisterProducer(this); 201 | } 202 | 203 | 204 | template 205 | class DispatchQueue::Consumer 206 | : public DispatchQueue::InQueue { 207 | public: 208 | Consumer(DispatchQueue* dq, size_t idx) 209 | : dispatch_queue_(dq), consumer_index_(idx), cur_index_(-1U), 210 | cur_index_read_cnt_(0) { 211 | for (auto& ap : queue_vec_) 212 | // std::atomic_init is not available in gcc-4.9 213 | ap.store(nullptr, std::memory_order_relaxed); 214 | } 215 | bool Pop(T* ptr) override; 216 | bool PopWait(T* ptr, int timeout) override; 217 | 218 | private: 219 | friend class DispatchQueue; 220 | static constexpr size_t kMaxStickyReadCnt = 32; 221 | DispatchQueue* dispatch_queue_; 222 | size_t consumer_index_; 223 | size_t cur_index_; 224 | size_t cur_index_read_cnt_; 225 | std::atomic queue_vec_[kMaxProducers]; 226 | }; 227 | 228 | template 229 | bool DispatchQueue 230 | ::Consumer::Pop(T* ptr) { 231 | // sticky read for performance 232 | if (cur_index_read_cnt_ && cur_index_read_cnt_ < kMaxStickyReadCnt) { 233 | Queue* qptr = queue_vec_[cur_index_].load(std::memory_order_acquire); 234 | if (qptr->Pop(ptr)) { 235 | cur_index_read_cnt_++; 236 | return true; 237 | } 238 | } 239 | cur_index_read_cnt_ = 0; 240 | 241 | size_t last_index = cur_index_; 242 | for (cur_index_++; cur_index_ < kMaxProducers; cur_index_++) { 243 | Queue* qptr = queue_vec_[cur_index_].load(std::memory_order_acquire); 244 | if (qptr == nullptr) 245 | break; 246 | if (qptr->Pop(ptr)) { 247 | cur_index_read_cnt_ = 1; 248 | return true; 249 | } 250 | } 251 | for (cur_index_ = 0; cur_index_ <= last_index; cur_index_++) { 252 | Queue* qptr = queue_vec_[cur_index_].load(std::memory_order_acquire); 253 | if (qptr == nullptr) 254 | break; 255 | if (qptr->Pop(ptr)) { 256 | cur_index_read_cnt_ = 1; 257 | return true; 258 | } 259 | } 260 | return false; 261 | } 262 | 263 | template 264 | bool DispatchQueue 265 | ::Consumer::PopWait(T* ptr, int timeout) { 266 | // naive impl now 267 | int sleep_ms = 0; 268 | while (!Pop(ptr)) { 269 | if (timeout >= 0 && sleep_ms >= timeout) 270 | return false; 271 | usleep(1000); 272 | sleep_ms++; 273 | } 274 | return true; 275 | } 276 | 277 | 278 | template 279 | DispatchQueue::DispatchQueue(size_t qlen) 280 | : qlen_(qlen), producer_count_(0), consumer_count_(0) { 281 | // std::atomic_init is not available in gcc-4.9 282 | for (auto& pr : producers_) 283 | pr.store(nullptr, std::memory_order_relaxed); 284 | for (auto& co : consumers_) 285 | co.store(nullptr, std::memory_order_relaxed); 286 | } 287 | 288 | template 289 | DispatchQueue::~DispatchQueue() { 290 | for (size_t i = 0; i < producer_count_.load(); i++) { 291 | for (size_t j = 0; j < consumer_count_.load(); j++) { 292 | delete producers_[i].load()->queue_vec_[j]; 293 | } 294 | delete producers_[i].load(); 295 | producers_[i].store(nullptr); 296 | } 297 | for (size_t j = 0; j < consumer_count_.load(); j++) { 298 | delete consumers_[j].load(); 299 | consumers_[j].store(nullptr); 300 | } 301 | } 302 | 303 | template 304 | typename DispatchQueue::OutQueue* 305 | DispatchQueue::RegisterProducer() { 306 | std::lock_guard lock(mutex_); 307 | 308 | if (!reclaimed_producers_.empty()) { 309 | size_t index = reclaimed_producers_.back(); 310 | reclaimed_producers_.pop_back(); 311 | Producer* producer = producers_[index].load(std::memory_order_relaxed); 312 | assert(producer && !producer->is_registered_); 313 | producer->is_registered_ = true; 314 | return producer; 315 | } 316 | 317 | size_t producer_count = producer_count_.load(std::memory_order_relaxed); 318 | if (producer_count >= kMaxProducers) 319 | return nullptr; 320 | 321 | Producer* producer = new Producer(this, producer_count); 322 | for (size_t i = 0; i < consumer_count_.load(std::memory_order_relaxed); i++) { 323 | Consumer* consumer = consumers_[i].load(std::memory_order_relaxed); 324 | Queue* queue = new Queue(qlen_); 325 | consumer->queue_vec_[producer_count] = queue; 326 | producer->queue_vec_[i] = queue; 327 | } 328 | producers_[producer_count].store(producer, std::memory_order_release); 329 | producer_count_.store(producer_count + 1, std::memory_order_release); 330 | return producer; 331 | } 332 | 333 | template 334 | typename DispatchQueue::InQueue* 335 | DispatchQueue::RegisterConsumer() { 336 | std::lock_guard lock(mutex_); 337 | 338 | size_t consumer_count = consumer_count_.load(std::memory_order_relaxed); 339 | if (consumer_count >= kMaxConsumers) 340 | return nullptr; 341 | 342 | Consumer* consumer = new Consumer(this, consumer_count); 343 | for (size_t i = 0; i < producer_count_.load(std::memory_order_relaxed); i++) { 344 | Producer* producer = producers_[i].load(std::memory_order_relaxed); 345 | Queue* queue = new Queue(qlen_); 346 | consumer->queue_vec_[i] = queue; 347 | producer->queue_vec_[consumer_count] = queue; 348 | } 349 | consumers_[consumer_count].store(consumer, std::memory_order_release); 350 | consumer_count_.store(consumer_count + 1, std::memory_order_release); 351 | return consumer; 352 | } 353 | 354 | template 355 | void DispatchQueue 356 | ::UnregisterProducer(OutQueue* outq) { 357 | std::lock_guard lock(mutex_); 358 | 359 | Producer* producer = static_cast(outq); 360 | if (producer->producer_index_ >= kMaxProducers || producer != 361 | producers_[producer->producer_index_].load(std::memory_order_relaxed)) { 362 | throw std::invalid_argument("invalid OutQueue to unregister"); 363 | } 364 | if (!producer->is_registered_) { 365 | throw std::logic_error("double unregister"); 366 | } 367 | producer->is_registered_ = false; 368 | reclaimed_producers_.push_back(producer->producer_index_); 369 | } 370 | 371 | } // namespace ccb 372 | 373 | #endif // CCBASE_DISPATCH_QUEUE_H_ 374 | -------------------------------------------------------------------------------- /src/ccbase/closure.h.pump: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_CLOSURE_H_ 31 | #define CCBASE_CLOSURE_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | #include "ccbase/common.h" 37 | 38 | namespace ccb { 39 | 40 | namespace internal { 41 | 42 | class ClosureFuncBase; 43 | 44 | class ClosureBase { 45 | public: 46 | virtual ~ClosureBase() {} 47 | virtual bool IsPermanent() const = 0; 48 | ClosureBase() : ref_count_(1) {} 49 | ClosureBase(const ClosureBase& c) : ref_count_(1) {} 50 | private: 51 | void AddRef() { 52 | assert(ref_count_ > 0); 53 | ref_count_.fetch_add(1); 54 | } 55 | void DelRef() { 56 | assert(IsPermanent()); 57 | if (ref_count_.fetch_sub(1) == 1) { 58 | delete this; 59 | } 60 | } 61 | std::atomic ref_count_; 62 | friend ClosureFuncBase; 63 | }; 64 | 65 | template 66 | class Closure : public ClosureBase { 67 | public: 68 | }; 69 | 70 | $var max_args = 6 71 | $range i 0..max_args 72 | 73 | $for i [[ 74 | 75 | $range j 1..i 76 | 77 | template < 78 | typename R$for j [[, typename A$j]] 79 | > 80 | class Closure : public ClosureBase { 81 | public: 82 | virtual R Run($for j, [[A$j a$j]]) = 0; 83 | virtual Closure* Clone() = 0; 84 | }; 85 | 86 | ]] 87 | 88 | template 89 | class ConditionalAutoDeleter { 90 | public: 91 | explicit ConditionalAutoDeleter(T* p) 92 | : p_(p) { 93 | } 94 | ~ConditionalAutoDeleter() { 95 | if (Enabled) 96 | delete p_; 97 | } 98 | private: 99 | ConditionalAutoDeleter(const ConditionalAutoDeleter&); 100 | ConditionalAutoDeleter& operator=(const ConditionalAutoDeleter&); 101 | private: 102 | T* p_; 103 | }; 104 | 105 | $range nleft_args 0..max_args 106 | $range nbind_args 0..max_args 107 | 108 | $for nleft_args [[ 109 | $for nbind_args [[ 110 | 111 | $var nargs = nleft_args + nbind_args 112 | $range j 1..nbind_args 113 | $range k 1..nargs 114 | $range i nbind_args+1..nargs 115 | 116 | // Closures with $nleft_args args and $nbind_args pre-binded args 117 | 118 | template < 119 | bool Permanent, 120 | typename R, 121 | typename Class, 122 | typename MethodClass$for k [[, typename Arg$k]] $for j [[, typename PreArg$j]] 123 | > 124 | class MethodClosure_Arg$(nleft_args)_Bind$nbind_args : public Closure { 125 | public: 126 | typedef R(MethodClass::*MethodType)($for k, [[Arg$k]]); 127 | MethodClosure_Arg$(nleft_args)_Bind$nbind_args(Class *object, MethodType method$for j [[, PreArg$j pa$j]]): 128 | object_(object), method_(method)$for j [[, pa_$(j)_(pa$j)]] {} 129 | virtual R Run($for i, [[Arg$i arg$i]]) { 130 | ConditionalAutoDeleter self_deleter(this); 131 | return (object_->*method_)($for j, [[pa_$(j)_]]$if nleft_args > 0[[$if nbind_args > 0[[, ]]]]$for i, [[arg$i]]); 132 | } 133 | virtual Closure* Clone() { 134 | return new MethodClosure_Arg$(nleft_args)_Bind$nbind_args(*this); 135 | } 136 | virtual bool IsPermanent() const { return Permanent; } 137 | private: 138 | Class* object_; 139 | MethodType method_; 140 | 141 | $for j [[ 142 | PreArg$j pa_$(j)_; 143 | 144 | ]] 145 | }; 146 | 147 | template 148 | Closure* 149 | NewClosure(Class *object, R(MethodClass::*method)($for k, [[Arg$k]])$for j [[, PreArg$j pa$j]]) { 150 | return new MethodClosure_Arg$(nleft_args)_Bind$nbind_args( 151 | object, method$for j [[, pa$j]]); 152 | } 153 | 154 | template 155 | Closure* 156 | NewPermanentClosure(Class *object, R(MethodClass::*method)($for k, [[Arg$k]])$for j [[, PreArg$j pa$j]]) { 157 | return new MethodClosure_Arg$(nleft_args)_Bind$nbind_args( 158 | object, method$for j [[, pa$j]]); 159 | } 160 | 161 | template < 162 | bool Permanent, 163 | typename R$for k [[, typename Arg$k]]$for j [[, typename PreArg$j]] 164 | > 165 | class FunctionClosure_Arg$(nleft_args)_Bind$nbind_args : public Closure { 166 | public: 167 | typedef R(*FunctionType)($for k, [[Arg$k]]); 168 | FunctionClosure_Arg$(nleft_args)_Bind$nbind_args(FunctionType function$for j [[, PreArg$j pa$j]]): 169 | function_(function)$for j [[, pa_$(j)_(pa$j)]] {} 170 | virtual R Run($for i, [[Arg$i arg$i]]) { 171 | ConditionalAutoDeleter self_deleter(this); 172 | return function_($for j, [[pa_$(j)_]]$if nleft_args > 0[[$if nbind_args > 0[[, ]]]]$for i, [[arg$i]]); 173 | } 174 | virtual Closure* Clone() { 175 | return new FunctionClosure_Arg$(nleft_args)_Bind$nbind_args(*this); 176 | } 177 | virtual bool IsPermanent() const { return Permanent; } 178 | private: 179 | FunctionType function_; 180 | 181 | $for j [[ 182 | PreArg$j pa_$(j)_; 183 | 184 | ]] 185 | }; 186 | 187 | template 188 | Closure* 189 | NewClosure(R(*function)($for k, [[Arg$k]])$for j [[, PreArg$j pa$j]]) { 190 | return new FunctionClosure_Arg$(nleft_args)_Bind$nbind_args(function$for j [[, pa$j]]); 191 | } 192 | 193 | template 194 | Closure* 195 | NewPermanentClosure(R(*function)($for k, [[Arg$k]])$for j [[, PreArg$j pa$j]]) { 196 | return new FunctionClosure_Arg$(nleft_args)_Bind$nbind_args(function$for j [[, pa$j]]); 197 | } 198 | 199 | template < 200 | bool Permanent, typename F, 201 | typename R$for k [[, typename Arg$k]]$for j [[, typename PreArg$j]] 202 | > 203 | class FunctorClosure_Arg$(nleft_args)_Bind$nbind_args : public Closure { 204 | public: 205 | explicit FunctorClosure_Arg$(nleft_args)_Bind$nbind_args(const F& functor$for j [[, PreArg$j pa$j]]) 206 | : functor_(functor)$for j [[, pa_$(j)_(pa$j)]] {} 207 | explicit FunctorClosure_Arg$(nleft_args)_Bind$nbind_args(F&& functor$for j [[, PreArg$j pa$j]]) 208 | : functor_(std::move(functor))$for j [[, pa_$(j)_(pa$j)]] {} 209 | virtual R Run($for i, [[Arg$i arg$i]]) { 210 | ConditionalAutoDeleter self_deleter(this); 211 | return functor_($for j, [[pa_$(j)_]]$if nleft_args > 0[[$if nbind_args > 0[[, ]]]]$for i, [[arg$i]]); 212 | } 213 | virtual Closure* Clone() { 214 | return new FunctorClosure_Arg$(nleft_args)_Bind$nbind_args(*this); 215 | } 216 | bool IsPermanent() const { return Permanent; } 217 | private: 218 | F functor_; 219 | 220 | $for j [[ 221 | PreArg$j pa_$(j)_; 222 | 223 | ]] 224 | }; 225 | 226 | template 227 | Closure* 228 | NewClosure(F&& functor$for j [[, PreArg$j pa$j]]) { 229 | return new FunctorClosure_Arg$(nleft_args)_Bind$nbind_args::type, R$for k [[, Arg$k]]$for j [[, PreArg$j]]>( 230 | std::forward(functor)$for j [[, pa$j]]); 231 | } 232 | 233 | template 234 | Closure* 235 | NewPermanentClosure(F&& functor$for j [[, PreArg$j pa$j]]) { 236 | return new FunctorClosure_Arg$(nleft_args)_Bind$nbind_args::type, R$for k [[, Arg$k]]$for j [[, PreArg$j]]>( 237 | std::forward(functor)$for j [[, pa$j]]); 238 | } 239 | 240 | ]] 241 | ]] 242 | 243 | // Helpers for 0-Args-Functor 244 | 245 | template 246 | auto NewClosure(F&& functor) -> Closure* { 247 | return new FunctorClosure_Arg0_Bind0::type, decltype(functor())>(std::forward(functor)); 248 | } 249 | 250 | template 251 | auto NewPermanentClosure(F&& functor) -> Closure* { 252 | return new FunctorClosure_Arg0_Bind0::type, decltype(functor())>(std::forward(functor)); 253 | } 254 | 255 | // Base of ccb::ClosureFunc 256 | 257 | class ClosureFuncBase { 258 | protected: 259 | ClosureFuncBase() : p_(nullptr) {} 260 | ClosureFuncBase(std::nullptr_t) : p_(nullptr) {} // NOLINT 261 | explicit ClosureFuncBase(ClosureBase* p) noexcept 262 | : p_(p) { 263 | assert(!p || p->IsPermanent()); 264 | } 265 | ClosureFuncBase(const ClosureFuncBase& c) noexcept 266 | : ClosureFuncBase(c.share()) {} 267 | ClosureFuncBase(ClosureFuncBase&& c) noexcept 268 | : p_(c.release()) {} 269 | ~ClosureFuncBase() { 270 | if (p_) p_->DelRef(); 271 | } 272 | ClosureFuncBase& operator=(const ClosureFuncBase& c) { 273 | ClosureFuncBase{c}.swap(*this); 274 | return *this; 275 | } 276 | ClosureFuncBase& operator=(ClosureFuncBase&& c) { 277 | ClosureFuncBase{std::move(c)}.swap(*this); 278 | return *this; 279 | } 280 | ClosureBase* get() const { 281 | return p_; 282 | } 283 | void reset(ClosureBase* p) { 284 | ClosureFuncBase{p}.swap(*this); 285 | } 286 | void swap(ClosureFuncBase& c) { 287 | std::swap(p_, c.p_); 288 | } 289 | 290 | private: 291 | ClosureBase* share() const { 292 | if (p_) p_->AddRef(); 293 | return p_; 294 | } 295 | ClosureBase* release() { 296 | ClosureBase* p = get(); 297 | p_ = nullptr; 298 | return p; 299 | } 300 | ClosureBase* p_; 301 | }; 302 | 303 | } // namespace internal 304 | 305 | // Public class ClosureFunc 306 | 307 | template 308 | class ClosureFunc { 309 | public: 310 | }; 311 | 312 | $range i 0..max_args 313 | 314 | $for i [[ 315 | 316 | $range j 1..i 317 | 318 | template < 319 | typename R$for j [[, typename A$j]] 320 | > 321 | class ClosureFunc : public internal::ClosureFuncBase { 322 | public: 323 | typedef internal::ClosureFuncBase BaseType; 324 | typedef internal::Closure ClosureType; 325 | ~ClosureFunc() = default; 326 | // constructor 327 | ClosureFunc() = default; 328 | ClosureFunc(std::nullptr_t) // NOLINT 329 | : ClosureFunc() {} 330 | explicit ClosureFunc(ClosureType* p) noexcept 331 | : ClosureFuncBase(p) {} 332 | template ::type, ClosureFunc>::value>::type> 333 | ClosureFunc(F&& f) noexcept 334 | : ClosureFuncBase(internal::NewPermanentClosure(std::forward(f))) {} 335 | // copyable and movable 336 | ClosureFunc(const ClosureFunc& c) = default; 337 | ClosureFunc& operator=(const ClosureFunc&) = default; 338 | ClosureFunc(ClosureFunc&& c) = default; 339 | ClosureFunc& operator=(ClosureFunc&&) = default; 340 | // resetable and swappable 341 | void reset() { 342 | BaseType::reset(nullptr); 343 | } 344 | void swap(ClosureFunc& c) { 345 | BaseType::swap(c); 346 | } 347 | // checkable 348 | operator bool() const { 349 | return static_cast(get()); 350 | } 351 | // callable 352 | R operator()($for j, [[A$j a$j]]) const { 353 | return static_cast(get())->Run($for j, [[a$j]]); 354 | } 355 | }; 356 | 357 | ]] 358 | 359 | $for nleft_args [[ 360 | $for nbind_args [[ 361 | 362 | $var nargs = nleft_args + nbind_args 363 | $range j 1..nbind_args 364 | $range k 1..nargs 365 | $range i nbind_args+1..nargs 366 | 367 | // Bind ClosureFunc with $nbind_args pre-binded args 368 | 369 | template 370 | ClosureFunc 371 | BindClosure(Class *object, R(MethodClass::*method)($for k, [[Arg$k]])$for j [[, PreArg$j pa$j]]) { 372 | return ClosureFunc(internal::NewPermanentClosure(object, method$for j [[, pa$j]])); 373 | } 374 | 375 | template 376 | ClosureFunc 377 | BindClosure(R(*function)($for k, [[Arg$k]])$for j [[, PreArg$j pa$j]]) { 378 | return ClosureFunc(internal::NewPermanentClosure(function$for j [[, pa$j]])); 379 | } 380 | 381 | template 382 | ClosureFunc 383 | BindClosure(F&& functor$for j [[, PreArg$j pa$j]]) { 384 | return ClosureFunc(internal::NewPermanentClosure(std::forward(functor)$for j [[, pa$j]])); 385 | } 386 | 387 | ]] 388 | ]] 389 | 390 | template 391 | auto BindClosure(F&& functor) -> ClosureFunc { 392 | return ClosureFunc(internal::NewPermanentClosure(std::forward(functor))); 393 | } 394 | 395 | } // namespace ccb 396 | 397 | #endif // CCBASE_CLOSURE_H_ 398 | -------------------------------------------------------------------------------- /src/ccbase/timer_wheel.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include "ccbase/timer_wheel.h" 37 | #include "ccbase/macro_list.h" 38 | 39 | #define CCB_TIMER_WHEEL_NODE(head) \ 40 | static_cast(CCB_LIST_ENTRY(head, ListNode, list)) 41 | 42 | namespace { 43 | 44 | constexpr uint64_t kTimerWheelVecs = 5; 45 | constexpr uint64_t kTimerVecBits = 6; 46 | constexpr uint64_t kTimerVecRootBits = 8; 47 | constexpr uint64_t kTimerVecSize = 1UL << kTimerVecBits; 48 | constexpr uint64_t kTimerVecRootSize = 1UL << kTimerVecRootBits; 49 | constexpr uint64_t kTimerVecMask = kTimerVecSize - 1; 50 | constexpr uint64_t kTimerVecRootMask = kTimerVecRootSize - 1; 51 | constexpr uint64_t kTimerMaxTimeout = 0xffffffffUL; 52 | 53 | // timer node flags 54 | constexpr int kTimerFlagHasOwner = 0x1; 55 | constexpr int kTimerFlagPeriod = 0x2; 56 | 57 | } // namespace 58 | 59 | namespace ccb { 60 | 61 | struct ListNode { 62 | ListHead list; 63 | }; 64 | 65 | struct TimerWheelNode : ListNode { 66 | tick_t timeout; 67 | tick_t expire; 68 | ClosureFunc callback; 69 | uint8_t flags; 70 | }; 71 | 72 | struct TimerVecRoot { 73 | int index; 74 | ListHead vec[kTimerVecRootSize]; 75 | }; 76 | 77 | struct TimerVec { 78 | int index; 79 | ListHead vec[kTimerVecSize]; 80 | }; 81 | 82 | struct TimerWheelVecs { 83 | TimerVec tv5; 84 | TimerVec tv4; 85 | TimerVec tv3; 86 | TimerVec tv2; 87 | TimerVecRoot tv1; 88 | TimerVec* const tvecs[kTimerWheelVecs] = { 89 | reinterpret_cast(&tv1), &tv2, &tv3, &tv4, &tv5 90 | }; 91 | 92 | TimerWheelVecs() { 93 | for (size_t i = 0; i < kTimerVecSize; i++) { 94 | CCB_INIT_LIST_HEAD(tv5.vec + i); 95 | CCB_INIT_LIST_HEAD(tv4.vec + i); 96 | CCB_INIT_LIST_HEAD(tv3.vec + i); 97 | CCB_INIT_LIST_HEAD(tv2.vec + i); 98 | } 99 | tv5.index = 0; 100 | tv4.index = 0; 101 | tv3.index = 0; 102 | tv2.index = 0; 103 | for (size_t i = 0; i < kTimerVecRootSize; i++) { 104 | CCB_INIT_LIST_HEAD(tv1.vec + i); 105 | } 106 | tv1.index = 0; 107 | } 108 | }; 109 | 110 | class TimerWheelImpl : public std::enable_shared_from_this { 111 | public: 112 | TimerWheelImpl(size_t us_per_tick, bool enable_lock); 113 | ~TimerWheelImpl(); 114 | 115 | bool AddTimer(tick_t timeout, 116 | ClosureFunc callback, 117 | TimerOwner* owner = nullptr); 118 | bool ResetTimer(const TimerOwner& owner, 119 | tick_t timeout); 120 | bool AddPeriodTimer(tick_t timeout, 121 | ClosureFunc callback, 122 | TimerOwner* owner = nullptr); 123 | bool ResetPeriodTimer(const TimerOwner& owner, 124 | tick_t timeout); 125 | 126 | void MoveOn() { 127 | MoveOn(nullptr); 128 | } 129 | void MoveOn(ClosureFunc)> sched_func); 130 | 131 | size_t timer_count() const { 132 | return timer_count_; 133 | } 134 | tick_t tick_cur() const { 135 | return tick_cur_.load(std::memory_order_relaxed); 136 | } 137 | // used by TimerOwner 138 | void DelTimerNode(TimerWheelNode* node); 139 | 140 | private: 141 | bool AddTimerNode(TimerWheelNode* node); 142 | void AddTimerNodeInLock(TimerWheelNode* node); 143 | void DelTimerNodeInLock(TimerWheelNode* node); 144 | void CascadeTimers(TimerVec* tv); 145 | void PollTimerWheel(std::vector>>* out); 147 | void InitTick(); 148 | tick_t GetTickNow() const; 149 | tick_t ToTick(const struct timespec& ts) const; 150 | 151 | private: 152 | // conditional locker 153 | class Locker { 154 | public: 155 | Locker(std::mutex& m, bool on) : m_(m), on_(on) { // NOLINT 156 | if (on_) m_.lock(); 157 | } 158 | ~Locker() { 159 | if (on_) m_.unlock(); 160 | } 161 | private: 162 | std::mutex& m_; 163 | bool on_; 164 | }; 165 | 166 | private: 167 | TimerWheelVecs wheel_; 168 | std::mutex mutex_; 169 | size_t us_per_tick_; 170 | bool enable_lock_; 171 | size_t timer_count_; 172 | std::atomic tick_cur_; 173 | struct timespec ts_start_; 174 | static thread_local bool tls_tracking_dead_nodes_; 175 | static thread_local std::vector tls_dead_nodes_; 176 | }; 177 | 178 | thread_local bool TimerWheelImpl::tls_tracking_dead_nodes_{false}; 179 | thread_local std::vector TimerWheelImpl::tls_dead_nodes_; 180 | 181 | TimerWheelImpl::TimerWheelImpl(size_t us_per_tick, bool enable_lock) 182 | : us_per_tick_(us_per_tick) 183 | , enable_lock_(enable_lock) 184 | , timer_count_(0) { 185 | InitTick(); 186 | } 187 | 188 | TimerWheelImpl::~TimerWheelImpl() { 189 | for (size_t tvecs_idx = 0; tvecs_idx < kTimerWheelVecs; tvecs_idx++) { 190 | TimerVec* tv = wheel_.tvecs[tvecs_idx]; 191 | size_t tv_size = (tvecs_idx == 0 ? kTimerVecRootSize : kTimerVecSize); 192 | for (size_t tv_index = 0; tv_index < tv_size; tv_index++) { 193 | for (ListHead *head = tv->vec + tv_index, *curr = head->next; 194 | curr != head; curr = head->next) { 195 | TimerWheelNode* node = CCB_TIMER_WHEEL_NODE(curr); 196 | DelTimerNodeInLock(node); 197 | // if owner alive timer-wheel should never freed 198 | assert(!(node->flags & kTimerFlagHasOwner)); 199 | delete node; 200 | } 201 | } 202 | } 203 | } 204 | 205 | bool TimerWheelImpl::AddTimer(tick_t timeout, 206 | ClosureFunc callback, 207 | TimerOwner* owner) { 208 | if (timeout > kTimerMaxTimeout) { 209 | return false; 210 | } 211 | TimerWheelNode* node = nullptr; 212 | if (owner) { 213 | if (owner->has_timer()) { 214 | node = owner->timer_.get(); 215 | DelTimerNode(node); 216 | } else { 217 | node = new TimerWheelNode; 218 | owner->timer_.reset(node); 219 | } 220 | owner->timer_wheel_ = shared_from_this(); 221 | } else { 222 | node = new TimerWheelNode; 223 | } 224 | node->timeout = timeout; 225 | node->expire = tick_cur() + timeout; 226 | node->callback = std::move(callback); 227 | node->flags = (owner ? kTimerFlagHasOwner : 0); 228 | return AddTimerNode(node); 229 | } 230 | 231 | bool TimerWheelImpl::ResetTimer(const TimerOwner& owner, 232 | tick_t timeout) { 233 | if (timeout > kTimerMaxTimeout) { 234 | return false; 235 | } 236 | if (!owner.has_timer()) { 237 | return false; 238 | } 239 | TimerWheelNode* node = owner.timer_.get(); 240 | DelTimerNode(node); 241 | node->timeout = timeout; 242 | node->expire = tick_cur() + timeout; 243 | node->flags = kTimerFlagHasOwner; 244 | return AddTimerNode(node); 245 | } 246 | 247 | bool TimerWheelImpl::AddPeriodTimer(tick_t timeout, 248 | ClosureFunc callback, 249 | TimerOwner* owner) { 250 | if (timeout > kTimerMaxTimeout || timeout == 0) { 251 | // 0-tick period timer is not allowed 252 | return false; 253 | } 254 | TimerWheelNode* node = nullptr; 255 | if (owner) { 256 | if (owner->has_timer()) { 257 | node = owner->timer_.get(); 258 | DelTimerNode(node); 259 | } else { 260 | node = new TimerWheelNode; 261 | owner->timer_.reset(node); 262 | } 263 | owner->timer_wheel_ = shared_from_this(); 264 | } else { 265 | node = new TimerWheelNode; 266 | } 267 | node->timeout = timeout; 268 | node->expire = tick_cur() + timeout; 269 | node->callback = std::move(callback); 270 | node->flags = kTimerFlagPeriod | (owner ? kTimerFlagHasOwner : 0); 271 | return AddTimerNode(node); 272 | } 273 | 274 | bool TimerWheelImpl::ResetPeriodTimer(const TimerOwner& owner, 275 | tick_t timeout) { 276 | if (timeout > kTimerMaxTimeout || timeout == 0) { 277 | // 0-tick period timer is not allowed 278 | return false; 279 | } 280 | if (!owner.has_timer()) { 281 | return false; 282 | } 283 | TimerWheelNode* node = owner.timer_.get(); 284 | DelTimerNode(node); 285 | node->timeout = timeout; 286 | node->expire = tick_cur() + timeout; 287 | node->flags = kTimerFlagPeriod | kTimerFlagHasOwner; 288 | return AddTimerNode(node); 289 | } 290 | 291 | void TimerWheelImpl::CascadeTimers(TimerVec* tv) { 292 | /* cascade all the timers from tv up one level */ 293 | ListHead *head; 294 | ListHead *curr; 295 | ListHead *next; 296 | head = tv->vec + tv->index; 297 | curr = head->next; 298 | /* 299 | * We are removing _all_ timers from the list, so we don't have to 300 | * detach them individually, just clear the list afterwards. 301 | */ 302 | while (curr != head) { 303 | TimerWheelNode* node = CCB_TIMER_WHEEL_NODE(curr); 304 | next = curr->next; 305 | AddTimerNodeInLock(node); 306 | curr = next; 307 | } 308 | CCB_INIT_LIST_HEAD(head); 309 | tv->index = (tv->index + 1) & kTimerVecMask; 310 | } 311 | 312 | void TimerWheelImpl::MoveOn(ClosureFunc)> sched_func) { 313 | thread_local std::vector>> cb_vec; 315 | PollTimerWheel(&cb_vec); 316 | tls_tracking_dead_nodes_ = true; 317 | // run callback without lock 318 | for (auto& cb : cb_vec) { 319 | TimerWheelNode* node = cb.first; 320 | ClosureFunc& callback = cb.second; 321 | if (node && !tls_dead_nodes_.empty() && 322 | std::find(tls_dead_nodes_.begin(), tls_dead_nodes_.end(), node) 323 | != tls_dead_nodes_.end()) { 324 | // the timer has been deleted by previous callbacks 325 | continue; 326 | } 327 | if (callback) { 328 | if (!sched_func) { 329 | callback(); 330 | } else { 331 | sched_func(std::move(callback)); 332 | } 333 | } 334 | } 335 | tls_tracking_dead_nodes_ = false; 336 | tls_dead_nodes_.clear(); 337 | cb_vec.clear(); 338 | } 339 | 340 | void TimerWheelImpl::PollTimerWheel( 341 | std::vector>>* out) { 342 | Locker lock(mutex_, enable_lock_); 343 | 344 | tick_t tick_to = GetTickNow(); 345 | if (tick_to < tick_cur()) { 346 | return; 347 | } 348 | 349 | auto& tv1 = wheel_.tv1; 350 | auto& tvecs = wheel_.tvecs; 351 | while (tick_to >= tick_cur()) { 352 | if (tv1.index == 0) { 353 | size_t n = 1; 354 | do { 355 | CascadeTimers(tvecs[n]); 356 | } while (tvecs[n]->index == 1 && ++n < kTimerWheelVecs); 357 | } 358 | 359 | for (ListHead *head = tv1.vec + tv1.index, *curr = head->next; 360 | curr != head; curr = head->next) { 361 | TimerWheelNode* node = CCB_TIMER_WHEEL_NODE(curr); 362 | DelTimerNodeInLock(node); 363 | if (node->flags & kTimerFlagPeriod) { // period timer 364 | // copy the callback closure 365 | out->emplace_back(node, node->callback); 366 | // reschedule 367 | node->expire = tick_cur() + node->timeout; 368 | AddTimerNodeInLock(node); 369 | } else { // oneshot timer 370 | if (!(node->flags & kTimerFlagHasOwner)) { 371 | // no owner, move the callback closure 372 | out->emplace_back(nullptr, std::move(node->callback)); 373 | delete node; 374 | } else { 375 | // has owner, copy the callback closure 376 | out->emplace_back(node, node->callback); 377 | } 378 | } 379 | } 380 | // next tick 381 | tick_cur_.store(tick_cur() + 1, std::memory_order_relaxed); 382 | tv1.index = (tv1.index + 1) & kTimerVecRootMask; 383 | } 384 | } 385 | 386 | inline bool TimerWheelImpl::AddTimerNode(TimerWheelNode* node) { 387 | Locker lock(mutex_, enable_lock_); 388 | AddTimerNodeInLock(node); 389 | return true; 390 | } 391 | 392 | void TimerWheelImpl::AddTimerNodeInLock(TimerWheelNode* node) { 393 | // link the node 394 | tick_t tick_exp = node->expire; 395 | tick_t tick_now = tick_cur(); 396 | // exp < now may happen in multi-thread environment 397 | tick_t idx = (tick_exp >= tick_now ? tick_exp - tick_now : 0); 398 | ListHead* vec; 399 | if (idx < kTimerVecRootSize) { 400 | int i = static_cast(tick_exp & kTimerVecRootMask); 401 | vec = wheel_.tv1.vec + i; 402 | } else if (idx < (tick_t)1 << (kTimerVecRootBits + kTimerVecBits)) { 403 | int i = static_cast((tick_exp >> kTimerVecRootBits) & kTimerVecMask); 404 | vec = wheel_.tv2.vec + i; 405 | } else if (idx < (tick_t)1 << (kTimerVecRootBits + 2 * kTimerVecBits)) { 406 | int i = static_cast((tick_exp >> (kTimerVecRootBits + kTimerVecBits)) & kTimerVecMask); 407 | vec = wheel_.tv3.vec + i; 408 | } else if (idx < (tick_t)1 << (kTimerVecRootBits + 3 * kTimerVecBits)) { 409 | int i = static_cast((tick_exp >> (kTimerVecRootBits + 2 * kTimerVecBits)) & kTimerVecMask); 410 | vec = wheel_.tv4.vec + i; 411 | } else if (idx < (tick_t)1 << (kTimerVecRootBits + 4 * kTimerVecBits)) { 412 | int i = static_cast((tick_exp >> (kTimerVecRootBits + 3 * kTimerVecBits)) & kTimerVecMask); 413 | vec = wheel_.tv5.vec + i; 414 | } else { 415 | // exp - now > kTimerMaxTimeout 416 | assert(false); 417 | return; 418 | } 419 | CCB_LIST_ADD(&(node->list), vec->prev); 420 | timer_count_++; 421 | } 422 | 423 | inline void TimerWheelImpl::DelTimerNode(TimerWheelNode* node) { 424 | if (tls_tracking_dead_nodes_) { 425 | tls_dead_nodes_.push_back(node); 426 | } 427 | Locker lock(mutex_, enable_lock_); 428 | DelTimerNodeInLock(node); 429 | } 430 | 431 | inline void TimerWheelImpl::DelTimerNodeInLock(TimerWheelNode* node) { 432 | // unlink the node 433 | if (!CCB_LIST_EMPTY(&(node->list))) { 434 | CCB_LIST_DEL_INIT(&(node->list)); 435 | timer_count_--; 436 | } 437 | } 438 | 439 | void TimerWheelImpl::InitTick() { 440 | if (clock_gettime(CLOCK_MONOTONIC, &ts_start_) < 0) { 441 | throw std::system_error(errno, std::system_category(), "clock_gettime"); 442 | } 443 | tick_cur_.store(0, std::memory_order_relaxed); 444 | } 445 | 446 | tick_t TimerWheelImpl::GetTickNow() const { 447 | struct timespec ts; 448 | if (clock_gettime(CLOCK_MONOTONIC, &ts) < 0) { 449 | throw std::system_error(errno, std::system_category(), "clock_gettime"); 450 | } 451 | return ToTick(ts); 452 | } 453 | 454 | tick_t TimerWheelImpl::ToTick(const struct timespec& ts) const { 455 | struct timespec dur; 456 | if (ts.tv_nsec >= ts_start_.tv_nsec) { 457 | dur.tv_nsec = ts.tv_nsec - ts_start_.tv_nsec; 458 | dur.tv_sec = ts.tv_sec - ts_start_.tv_sec; 459 | } else { 460 | dur.tv_nsec = ts.tv_nsec + 1000000000 - ts_start_.tv_nsec; 461 | dur.tv_sec = ts.tv_sec - 1 - ts_start_.tv_sec; 462 | } 463 | if (dur.tv_sec < 0) { 464 | throw std::runtime_error("ToTick: invalid timespec value"); 465 | } 466 | tick_t us = (static_cast(dur.tv_sec) * 1000000 + dur.tv_nsec / 1000); 467 | return us / us_per_tick_; 468 | } 469 | 470 | 471 | TimerOwner::TimerOwner() { 472 | } 473 | 474 | TimerOwner::~TimerOwner() { 475 | Cancel(); 476 | } 477 | 478 | void TimerOwner::Cancel() { 479 | if (has_timer()) { 480 | timer_wheel_->DelTimerNode(timer_.get()); 481 | } 482 | } 483 | 484 | 485 | TimerWheel::TimerWheel(size_t us_per_tick, bool enable_lock_for_mt) 486 | : pimpl_(std::make_shared(us_per_tick, enable_lock_for_mt)) { 487 | } 488 | 489 | TimerWheel::~TimerWheel() { 490 | } 491 | 492 | bool TimerWheel::AddTimer(tick_t timeout, 493 | ClosureFunc callback, 494 | TimerOwner* owner) { 495 | return pimpl_->AddTimer(timeout, std::move(callback), owner); 496 | } 497 | 498 | bool TimerWheel::ResetTimer(const TimerOwner& owner, 499 | tick_t timeout) { 500 | return pimpl_->ResetTimer(owner, timeout); 501 | } 502 | 503 | bool TimerWheel::AddPeriodTimer(tick_t timeout, 504 | ClosureFunc callback, 505 | TimerOwner* owner) { 506 | return pimpl_->AddPeriodTimer(timeout, std::move(callback), owner); 507 | } 508 | 509 | bool TimerWheel::ResetPeriodTimer(const TimerOwner& owner, 510 | tick_t timeout) { 511 | return pimpl_->ResetPeriodTimer(owner, timeout); 512 | } 513 | 514 | void TimerWheel::MoveOn() { 515 | return pimpl_->MoveOn(); 516 | } 517 | 518 | void TimerWheel::MoveOn(ClosureFunc)> sched_func) { 519 | return pimpl_->MoveOn(std::move(sched_func)); 520 | } 521 | 522 | size_t TimerWheel::GetTimerCount() const { 523 | return pimpl_->timer_count(); 524 | } 525 | 526 | size_t TimerWheel::GetCurrentTick() const { 527 | return pimpl_->tick_cur(); 528 | } 529 | 530 | } // namespace ccb 531 | 532 | -------------------------------------------------------------------------------- /src/ccbase/memory_reclamation.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2016-2017, Bin Wei 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are 6 | * met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * * Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following disclaimer 12 | * in the documentation and/or other materials provided with the 13 | * distribution. 14 | * * The names of its contributors may not be used to endorse or 15 | * promote products derived from this software without specific prior 16 | * written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CCBASE_MEMORY_RECLAMATION_H_ 31 | #define CCBASE_MEMORY_RECLAMATION_H_ 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include "ccbase/common.h" 41 | #include "ccbase/closure.h" 42 | #include "ccbase/accumulated_list.h" 43 | 44 | namespace ccb { 45 | 46 | /* Ref-count based memory relcamation 47 | */ 48 | template 49 | class RefCountReclamation { 50 | public: 51 | RefCountReclamation() : ref_count_(0) {} 52 | 53 | void ReadLock() { 54 | ref_count_.fetch_add(1, std::memory_order_seq_cst); 55 | } 56 | 57 | void ReadUnlock() { 58 | ref_count_.fetch_sub(1, std::memory_order_seq_cst); 59 | } 60 | 61 | void Retire(T* ptr) { 62 | Retire(ptr, std::default_delete()); 63 | } 64 | 65 | template 66 | void Retire(T* ptr, F&& del_func) { 67 | if (!ptr) return; 68 | while (ref_count_.load(std::memory_order_acquire)) {} 69 | del_func(ptr); 70 | } 71 | 72 | struct Trait { 73 | using ReadLockPointer = std::false_type; 74 | using HasRetireCleanup = std::false_type; 75 | }; 76 | 77 | private: 78 | CCB_NOT_COPYABLE_AND_MOVABLE(RefCountReclamation); 79 | 80 | std::atomic ref_count_; 81 | }; 82 | 83 | 84 | /* Epoch based memory relcamation 85 | */ 86 | template 87 | class EpochBasedReclamation { 88 | public: 89 | EpochBasedReclamation() = default; 90 | 91 | static void ReadLock() { 92 | ReaderThreadState* state = &state_list_.LocalNode()->reader_state; 93 | state->is_active.store(true, std::memory_order_relaxed); 94 | state->local_epoch.store(global_epoch_.load(std::memory_order_relaxed), 95 | std::memory_order_relaxed); 96 | // memory_order_seq_cst is required to garentee that updated is_active 97 | // and local_epoch are visable to all threads before critical-section 98 | atomic_thread_fence(std::memory_order_seq_cst); 99 | } 100 | 101 | static void ReadUnlock() { 102 | ReaderThreadState* state = &state_list_.LocalNode()->reader_state; 103 | // memory_order_release is required when leaving cirtical-section 104 | state->is_active.store(false, std::memory_order_release); 105 | } 106 | 107 | static void Retire(T* ptr) { 108 | Retire(ptr, nullptr); 109 | } 110 | 111 | static void Retire(T* ptr, std::default_delete) { 112 | Retire(ptr, nullptr); 113 | } 114 | 115 | /* Retire a pointer with user defined deletor 116 | * @ptr pointer to the object to be retired 117 | * @del_func deletor called when the retired object is reclaimed 118 | * 119 | * The object will be put into to a retire-list waiting for final reclamation 120 | * which happens when any possible reference to the retired object has gone. 121 | * IMPORTANT: caller must garantee @ptr has been consistently unreachable to 122 | * all threads (e.g. reset with memory_order_seq_cst) 123 | */ 124 | template 125 | static void Retire(T* ptr, F&& del_func) { 126 | // safety guideline: object retired in epoch N can only be referenced by 127 | // readers running in epoch N or N-1. 128 | // - reader-get-epoch < reader-get-ref < ref-unreachable < get-retire-epoch 129 | // so always have reader-epoch <= N 130 | // - if global-epoch is at least N all current and future readers live in 131 | // epoch >= N-1 132 | TryReclaim(); 133 | WriterThreadState* writer_state = &state_list_.LocalNode()->writer_state; 134 | writer_state->retire_lists[writer_state->retire_epoch % kEpochSlots] 135 | .emplace_back(ptr, std::forward(del_func)); 136 | TryUpdateEpoch(); 137 | } 138 | 139 | static void RetireCleanup() { 140 | for (size_t count = 0; count < kEpochSlots; ) { 141 | count += TryReclaim(); 142 | TryUpdateEpoch(); 143 | } 144 | } 145 | 146 | struct Trait { 147 | using ReadLockPointer = std::false_type; 148 | using HasRetireCleanup = std::true_type; 149 | }; 150 | 151 | private: 152 | CCB_NOT_COPYABLE_AND_MOVABLE(EpochBasedReclamation); 153 | 154 | static size_t TryReclaim() { 155 | WriterThreadState* writer_state = &state_list_.LocalNode()->writer_state; 156 | uint64_t epoch = global_epoch_.load(std::memory_order_seq_cst); 157 | if (writer_state->retire_epoch != epoch) { 158 | size_t reclaim_slots = epoch - writer_state->retire_epoch; 159 | if (reclaim_slots > kEpochSlots) { 160 | reclaim_slots = kEpochSlots; 161 | } 162 | for (size_t i = 1; i <= reclaim_slots; i++) { 163 | auto& rlist = writer_state->retire_lists[ 164 | (writer_state->retire_epoch + i) % kEpochSlots]; 165 | if (!rlist.empty()) rlist.clear(); 166 | } 167 | writer_state->retire_epoch = epoch; 168 | return reclaim_slots; 169 | } 170 | return 0; 171 | } 172 | 173 | static void TryUpdateEpoch() { 174 | // safety proof: 175 | // - if any reader's ReadLock-store-fence happens before the load below, 176 | // we must see the reader thread is active, and its local_epoch may lag 177 | // behind the epoch in which the critial-code is actually running 178 | // - if we see the reader's local-epoch is update, it must be true as we 179 | // assume the lag can't be as large as 2^64 180 | // - it we see the reader's local-epoch is old, it may be false-negative 181 | // but it is just safe as we will do nothing 182 | uint64_t epoch = global_epoch_.load(std::memory_order_seq_cst); 183 | // - if any reader's ReadLock-store-fence happens after the load above, 184 | // the reader's cirtial-code will be running in current global-epoch at 185 | // least. So no matter what we see and which decision we made it is 186 | // always safe. 187 | bool all_sync = true; 188 | state_list_.Travel([epoch, &all_sync](ThreadState* state) { 189 | if (state->reader_state.is_active.load(std::memory_order_seq_cst)) { 190 | all_sync = all_sync && 191 | (state->reader_state.local_epoch.load(std::memory_order_relaxed) 192 | == epoch); 193 | } 194 | }); 195 | if (all_sync) { 196 | global_epoch_.compare_exchange_weak(epoch, epoch + 1, 197 | std::memory_order_seq_cst, 198 | std::memory_order_relaxed); 199 | } 200 | } 201 | 202 | // reader state 203 | struct ReaderThreadState { 204 | std::atomic is_active; 205 | std::atomic local_epoch; 206 | 207 | ReaderThreadState() : is_active(false), local_epoch(0) {} 208 | }; 209 | 210 | // writer state 211 | struct RetireEntry { 212 | T* ptr; 213 | ClosureFunc del_func; 214 | 215 | RetireEntry(T* p, ClosureFunc f) 216 | : ptr(p), del_func(std::move(f)) {} 217 | RetireEntry(const RetireEntry&) = delete; 218 | RetireEntry(RetireEntry&& e) { 219 | // move may happen when retire_list.emplace_back 220 | ptr = e.ptr; 221 | e.ptr = nullptr; 222 | del_func = std::move(e.del_func); 223 | } 224 | void operator=(const RetireEntry&) = delete; 225 | void operator=(RetireEntry&&) = delete; 226 | ~RetireEntry() { 227 | if (ptr) { 228 | if (del_func) 229 | del_func(ptr); 230 | else 231 | std::default_delete()(ptr); 232 | } 233 | } 234 | }; 235 | 236 | static constexpr size_t kEpochSlots = 2; // only need 2 in this impl 237 | 238 | struct WriterThreadState { 239 | uint64_t retire_epoch{0}; 240 | std::vector retire_lists[kEpochSlots]; 241 | 242 | ~WriterThreadState() { 243 | // do cleanup if we have retired pointers not reclaimed 244 | if (std::any_of(retire_lists, retire_lists + kEpochSlots, 245 | [](const std::vector& v) { 246 | return !v.empty(); 247 | })) { 248 | RetireCleanup(); 249 | } 250 | } 251 | }; 252 | 253 | // thread local state 254 | struct ThreadState { 255 | ReaderThreadState reader_state; 256 | WriterThreadState writer_state; 257 | }; 258 | 259 | // static member variables 260 | static std::atomic global_epoch_; 261 | static ThreadLocalList state_list_; 262 | }; 263 | 264 | template 265 | std::atomic EpochBasedReclamation::global_epoch_{0}; 266 | 267 | template 268 | ThreadLocalList::ThreadState> 269 | EpochBasedReclamation::state_list_; 270 | 271 | 272 | /* Hazard pointer based memory relcamation 273 | */ 274 | template 277 | class HazardPtrReclamation { 278 | public: 279 | HazardPtrReclamation() = default; 280 | 281 | static void ReadLock(T* ptr, size_t index = 0) { 282 | assert(index < kHazardPtrNum); 283 | ReaderThreadState* state = &state_list_.LocalNode()->reader_state; 284 | // memory_order_seq_cst is required to garentee that hazard pointer 285 | // is visable to all threads before critical-section 286 | state->hazard_ptrs[index].store(ptr, std::memory_order_seq_cst); 287 | } 288 | 289 | static void ReadUnlock() { 290 | ReaderThreadState* state = &state_list_.LocalNode()->reader_state; 291 | for (auto& hp : state->hazard_ptrs) { 292 | // memory_order_release is required when leaving cirtical-section 293 | hp.store(nullptr, std::memory_order_release); 294 | } 295 | } 296 | 297 | static void Retire(T* ptr) { 298 | Retire(ptr, nullptr); 299 | } 300 | 301 | static void Retire(T* ptr, std::default_delete) { 302 | Retire(ptr, nullptr); 303 | } 304 | 305 | /* Retire a pointer with user defined deletor 306 | * @ptr pointer to the object to be retired 307 | * @del_func deletor called when the retired object is reclaimed 308 | * 309 | * The object will be put into to a retire-list waiting for final reclamation 310 | * which happens when retire-list is long enough and no other reference. 311 | * IMPORTANT: caller must garantee @ptr has been consistently unreachable to 312 | * all threads (e.g. reset with memory_order_seq_cst) 313 | */ 314 | template 315 | static void Retire(T* ptr, F&& del_func) { 316 | WriterThreadState* writer_state = &state_list_.LocalNode()->writer_state; 317 | writer_state->retire_list.emplace_back(ptr, std::forward(del_func)); 318 | if (writer_state->retire_list.size() >= kReclaimThreshold) { 319 | TryReclaim(); 320 | } 321 | } 322 | 323 | static void RetireCleanup() { 324 | WriterThreadState* writer_state = &state_list_.LocalNode()->writer_state; 325 | while (!writer_state->retire_list.empty()) { 326 | TryReclaim(); 327 | } 328 | } 329 | 330 | struct Trait { 331 | using ReadLockPointer = std::true_type; 332 | using HasRetireCleanup = std::true_type; 333 | }; 334 | 335 | private: 336 | CCB_NOT_COPYABLE_AND_MOVABLE(HazardPtrReclamation); 337 | 338 | static void TryReclaim() { 339 | // safety proof: 340 | // - if we decide one pointer can't be reclaimed it's always safe 341 | // - if we decide one pointer can be reclaimed let's consider one reader: 342 | // - if the reader's ReadLock complete after begining of checks below, 343 | // the double check following ReadLock will never see retired pointers 344 | // therefore the reclamation safety always hold 345 | // - if the reader's ReadLock complete before checks below, and 346 | // if the reader's ReadUnlock begin after checks done, the reader has 347 | // no reference definitely, else 348 | // - if the reader's ReadUnlock begin before checks done, only 2 cases: 349 | // - the reader has a ref and we acquired the release of ReadUnlock 350 | // - the reader has no ref at all 351 | // in either case we can safely reclaim the pointer. 352 | 353 | // use thread local allocation as cache 354 | static thread_local std::vector hazard_ptr_vec; 355 | hazard_ptr_vec.clear(); 356 | // collect and sort hazard-pointers 357 | state_list_.Travel([](ThreadState* state) { 358 | for (auto& hp : state->reader_state.hazard_ptrs) { 359 | T* ptr = hp.load(std::memory_order_seq_cst); 360 | if (ptr) hazard_ptr_vec.push_back(ptr); 361 | } 362 | }); 363 | std::sort(hazard_ptr_vec.begin(), hazard_ptr_vec.end()); 364 | // filter out hazard pointers (survivors) from retire list 365 | WriterThreadState* writer_state = &state_list_.LocalNode()->writer_state; 366 | size_t survivors = 0; 367 | for (auto& entry : writer_state->retire_list) { 368 | if (std::binary_search(hazard_ptr_vec.begin(), 369 | hazard_ptr_vec.end(), entry.ptr)) { 370 | writer_state->retire_list[survivors++].swap(entry); 371 | } 372 | } 373 | // reclaim all entries except survivors 374 | writer_state->retire_list.resize(survivors); 375 | } 376 | 377 | // reader state 378 | struct ReaderThreadState { 379 | std::atomic hazard_ptrs[kHazardPtrNum]; 380 | }; 381 | 382 | // writer state 383 | struct RetireEntry { 384 | T* ptr; 385 | ClosureFunc del_func; 386 | 387 | RetireEntry() 388 | : ptr(nullptr), del_func(nullptr) { 389 | assert(false); // never called 390 | } 391 | RetireEntry(T* p, ClosureFunc f) 392 | : ptr(p), del_func(std::move(f)) {} 393 | RetireEntry(const RetireEntry&) = delete; 394 | RetireEntry(RetireEntry&& e) { 395 | // move may happen when retire_list.emplace_back 396 | ptr = e.ptr; 397 | e.ptr = nullptr; 398 | del_func = std::move(e.del_func); 399 | } 400 | void operator=(const RetireEntry&) = delete; 401 | void operator=(RetireEntry&& e) = delete; 402 | void swap(RetireEntry& e) { 403 | if (this != &e) { 404 | std::swap(this->ptr, e.ptr); 405 | this->del_func.swap(e.del_func); 406 | } 407 | } 408 | ~RetireEntry() { 409 | if (ptr) { 410 | if (del_func) 411 | del_func(ptr); 412 | else 413 | std::default_delete()(ptr); 414 | } 415 | } 416 | }; 417 | 418 | struct WriterThreadState { 419 | std::vector retire_list; 420 | 421 | ~WriterThreadState() { 422 | // do cleanup if we have retired pointers not reclaimed 423 | if (!retire_list.empty()) { 424 | RetireCleanup(); 425 | } 426 | } 427 | }; 428 | 429 | // thread local state 430 | struct ThreadState { 431 | ReaderThreadState reader_state; 432 | WriterThreadState writer_state; 433 | }; 434 | 435 | // static member variables 436 | static ThreadLocalList state_list_; 437 | }; 438 | 439 | template 442 | ThreadLocalList::ThreadState> 444 | HazardPtrReclamation::state_list_; 446 | 447 | 448 | /* An adapter class for single pointer reclamation 449 | */ 450 | template 451 | class PtrReclamationAdapter : private Reclamation { 452 | public: 453 | PtrReclamationAdapter() = default; 454 | 455 | template 456 | typename std::enable_if::type 458 | ReadLock(const std::atomic* atomic_ptr) { 459 | T* ptr; 460 | do { 461 | ptr = atomic_ptr->load(std::memory_order_seq_cst); 462 | Reclamation::ReadLock(ptr, 0); 463 | } while (ptr != atomic_ptr->load(std::memory_order_seq_cst)); 464 | return ptr; 465 | } 466 | 467 | template 468 | typename std::enable_if::type 470 | ReadLock(const std::atomic* atomic_ptr) { 471 | Reclamation::ReadLock(); 472 | return atomic_ptr->load(std::memory_order_acquire); 473 | } 474 | 475 | void ReadUnlock() { 476 | Reclamation::ReadUnlock(); 477 | } 478 | 479 | void Retire(T* ptr) { 480 | Reclamation::Retire(ptr); 481 | } 482 | 483 | template 484 | void Retire(T* ptr, F&& del_func) { 485 | Reclamation::Retire(ptr, std::forward(del_func)); 486 | } 487 | 488 | template 489 | typename std::enable_if::type 491 | RetireCleanup() { 492 | Reclamation::RetireCleanup(); 493 | } 494 | 495 | template 496 | typename std::enable_if::type 498 | RetireCleanup() { 499 | } 500 | 501 | private: 502 | CCB_NOT_COPYABLE_AND_MOVABLE(PtrReclamationAdapter); 503 | }; 504 | 505 | } // namespace ccb 506 | 507 | #endif // CCBASE_MEMORY_RECLAMATION_H_ 508 | --------------------------------------------------------------------------------