├── src ├── helpers │ ├── malloc_free.c │ ├── new_delete_ops.cpp │ ├── runtime_stats_timer.c │ └── tasks_static.c ├── tick_timer.cpp ├── cpu.cpp ├── scheduler.cpp ├── pend_call.cpp ├── mutex.cpp ├── event_group.cpp ├── condition_variable.cpp ├── semaphore.cpp ├── timed_service.cpp ├── queue.cpp └── thread.cpp ├── CMakeLists.txt ├── .clang-format ├── LICENSE ├── include └── freertos │ ├── stdlib.hpp │ ├── cpu.hpp │ ├── scheduler.hpp │ ├── timed_service.hpp │ ├── tick_timer.hpp │ ├── pend_call.hpp │ ├── semaphore.hpp │ ├── mutex.hpp │ ├── condition_variable.hpp │ ├── queue.hpp │ ├── event_group.hpp │ └── thread.hpp └── README.md /src/helpers/malloc_free.c: -------------------------------------------------------------------------------- 1 | /** 2 | * @file malloc_free.c 3 | * @brief Redirection of C default memory allocation to FreeRTOS heap management. 4 | * @author Benedek Kupper 5 | */ 6 | #include 7 | #include "FreeRTOS.h" 8 | 9 | #if (configSUPPORT_DYNAMIC_ALLOCATION == 1) 10 | void* malloc(size_t size) 11 | { 12 | return pvPortMalloc(size); 13 | } 14 | 15 | void free(void *p) 16 | { 17 | vPortFree(p); 18 | } 19 | #endif /* (configSUPPORT_DYNAMIC_ALLOCATION == 1) */ 20 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(ESP_PLATFORM) 2 | idf_component_register( 3 | REQUIRES 4 | freertos 5 | INCLUDE_DIRS 6 | include 7 | SRCS 8 | src/condition_variable.cpp 9 | src/cpu.cpp 10 | src/event_group.cpp 11 | src/mutex.cpp 12 | src/pend_call.cpp 13 | src/queue.cpp 14 | src/scheduler.cpp 15 | src/semaphore.cpp 16 | src/thread.cpp 17 | src/tick_timer.cpp 18 | src/timed_service.cpp 19 | ) 20 | endif() 21 | -------------------------------------------------------------------------------- /src/tick_timer.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include "freertos/tick_timer.hpp" 3 | #include "freertos/cpu.hpp" 4 | #if ESP_PLATFORM 5 | #include 6 | #else 7 | #include 8 | #endif 9 | 10 | namespace freertos 11 | { 12 | tick_timer::time_point tick_timer::now() 13 | { 14 | rep ticks; 15 | if (!this_cpu::is_in_isr()) 16 | { 17 | ticks = xTaskGetTickCount(); 18 | } 19 | else 20 | { 21 | ticks = xTaskGetTickCountFromISR(); 22 | } 23 | 24 | return time_point(duration(ticks)); 25 | } 26 | } // namespace freertos 27 | -------------------------------------------------------------------------------- /src/helpers/new_delete_ops.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * @file new_delete_ops.c 3 | * @brief Redirection of C++ default memory allocation to FreeRTOS heap management. 4 | * @author Benedek Kupper 5 | */ 6 | #include "FreeRTOS.h" 7 | 8 | #if (configSUPPORT_DYNAMIC_ALLOCATION == 1) 9 | 10 | void* operator new(size_t size) 11 | { 12 | return pvPortMalloc(size); 13 | } 14 | 15 | void* operator new[](size_t size) 16 | { 17 | return pvPortMalloc(size); 18 | } 19 | 20 | void operator delete(void* p) 21 | { 22 | vPortFree(p); 23 | } 24 | 25 | void operator delete[](void* p) 26 | { 27 | vPortFree(p); 28 | } 29 | 30 | #endif /* (configSUPPORT_DYNAMIC_ALLOCATION == 1) */ 31 | -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: LLVM 2 | ColumnLimit: 100 3 | --- 4 | Language: Cpp 5 | Standard: c++20 6 | IndentWidth: 4 7 | PPIndentWidth: 4 8 | ConstructorInitializerIndentWidth: 4 9 | ContinuationIndentWidth: 4 10 | DerivePointerAlignment: false 11 | PointerAlignment: Left 12 | BreakBeforeBraces: Custom 13 | BraceWrapping: 14 | AfterClass: true 15 | AfterStruct: true 16 | AfterUnion: true 17 | AfterEnum: true 18 | AfterFunction: true 19 | AfterNamespace: true 20 | AfterExternBlock: true 21 | AfterCaseLabel: true 22 | AfterControlStatement: Always 23 | BeforeElse: true 24 | BeforeCatch: true 25 | BeforeLambdaBody: true 26 | SplitEmptyFunction: false 27 | SplitEmptyRecord: false 28 | SplitEmptyNamespace: false 29 | PackConstructorInitializers: NextLine 30 | AlwaysBreakTemplateDeclarations: Yes 31 | IncludeCategories: 32 | - Regex: '^<([a-z]+)>' 33 | Priority: 0 34 | FixNamespaceComments: true 35 | AllowShortFunctionsOnASingleLine: Inline 36 | AllowShortLambdasOnASingleLine: Inline 37 | ShortNamespaceLines: 1 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Benedek Kupper 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/cpu.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include "freertos/cpu.hpp" 3 | #if ESP_PLATFORM 4 | #include 5 | #include 6 | #else 7 | #include 8 | #include 9 | #endif 10 | 11 | namespace freertos 12 | { 13 | void cpu::critical_section::lock() 14 | { 15 | if (!this_cpu::is_in_isr()) 16 | { 17 | #if ESP_PLATFORM 18 | taskENTER_CRITICAL(&restore_lock_); 19 | #else 20 | taskENTER_CRITICAL(); 21 | #endif 22 | } 23 | else 24 | { 25 | restore_var() = std::uintptr_t(taskENTER_CRITICAL_FROM_ISR()); 26 | } 27 | } 28 | 29 | void cpu::critical_section::unlock() 30 | { 31 | if (!this_cpu::is_in_isr()) 32 | { 33 | #if ESP_PLATFORM 34 | taskEXIT_CRITICAL(&restore_lock_); 35 | #else 36 | taskEXIT_CRITICAL(); 37 | #endif 38 | } 39 | else 40 | { 41 | taskEXIT_CRITICAL_FROM_ISR(restore_var()); 42 | } 43 | } 44 | 45 | bool this_cpu::is_in_isr() 46 | { 47 | #if ESP_PLATFORM 48 | return xPortInIsrContext(); 49 | #else 50 | return xPortIsInsideInterrupt(); 51 | #endif 52 | } 53 | 54 | } // namespace freertos 55 | -------------------------------------------------------------------------------- /src/scheduler.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include "freertos/scheduler.hpp" 3 | #if ESP_PLATFORM 4 | #include 5 | #else 6 | #include 7 | #endif 8 | 9 | namespace freertos 10 | { 11 | 12 | static_assert(taskSCHEDULER_SUSPENDED == static_cast<::BaseType_t>(scheduler::state::suspended), 13 | "These values must match!"); 14 | static_assert(taskSCHEDULER_NOT_STARTED == 15 | static_cast<::BaseType_t>(scheduler::state::uninitialized), 16 | "These values must match!"); 17 | static_assert(taskSCHEDULER_RUNNING == static_cast<::BaseType_t>(scheduler::state::running), 18 | "These values must match!"); 19 | 20 | void scheduler::start() 21 | { 22 | // this call doesn't return 23 | vTaskStartScheduler(); 24 | } 25 | 26 | size_t scheduler::get_threads_count() 27 | { 28 | return uxTaskGetNumberOfTasks(); 29 | } 30 | 31 | scheduler::state scheduler::get_state() 32 | { 33 | return static_cast(xTaskGetSchedulerState()); 34 | } 35 | 36 | void scheduler::critical_section::lock() 37 | { 38 | vTaskSuspendAll(); 39 | } 40 | 41 | void scheduler::critical_section::unlock() 42 | { 43 | if (xTaskResumeAll()) 44 | { 45 | // a context switch had already happened 46 | } 47 | } 48 | } // namespace freertos 49 | -------------------------------------------------------------------------------- /include/freertos/stdlib.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_STDLIB_HPP_ 3 | #define __FREERTOS_STDLIB_HPP_ 4 | 5 | #include 6 | #include 7 | #if __has_include() 8 | #include 9 | #else 10 | #include 11 | #endif 12 | 13 | namespace freertos 14 | { 15 | template 16 | using lock_guard = std::lock_guard<_Mutex>; 17 | 18 | template 19 | using unique_lock = std::unique_lock<_Mutex>; 20 | 21 | enum class cv_status 22 | { 23 | no_timeout, 24 | timeout 25 | }; 26 | 27 | template 28 | #ifdef __cpp_lib_bit_cast 29 | constexpr To bit_cast(const From& src) 30 | { 31 | return std::bit_cast(src); 32 | } 33 | #else 34 | // https://en.cppreference.com/w/cpp/numeric/bit_cast.html 35 | std::enable_if_t && 36 | std::is_trivially_copyable_v, 37 | To> 38 | bit_cast(const From& src) noexcept 39 | { 40 | static_assert(std::is_trivially_constructible_v, 41 | "This implementation additionally requires " 42 | "destination type to be trivially constructible"); 43 | 44 | To dst; 45 | std::memcpy(&dst, &src, sizeof(To)); 46 | return dst; 47 | } 48 | #endif 49 | 50 | } // namespace freertos 51 | 52 | #endif // __FREERTOS_STDLIB_HPP_ 53 | -------------------------------------------------------------------------------- /src/pend_call.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include "freertos/pend_call.hpp" 3 | #include "freertos/cpu.hpp" 4 | #if ESP_PLATFORM 5 | #include 6 | #else 7 | #include 8 | #endif 9 | 10 | namespace freertos 11 | { 12 | #if (configUSE_TIMERS == 1) 13 | 14 | bool pend_call(pend_function_2 func, void* arg1, std::uint32_t arg2, tick_timer::duration waittime) 15 | { 16 | if (!this_cpu::is_in_isr()) 17 | { 18 | return xTimerPendFunctionCall(func, arg1, arg2, to_ticks(waittime)); 19 | } 20 | else 21 | { 22 | // cannot wait in ISR 23 | configASSERT(to_ticks(waittime) == 0); 24 | 25 | BaseType_t needs_yield = false; 26 | bool success = xTimerPendFunctionCallFromISR(func, arg1, arg2, &needs_yield); 27 | portYIELD_FROM_ISR(needs_yield); 28 | return success; 29 | } 30 | } 31 | 32 | static void pend_redirect_1(void* arg1, std::uint32_t arg2) 33 | { 34 | auto func = reinterpret_cast(arg1); 35 | func(arg2); 36 | } 37 | 38 | bool pend_call(pend_function_1 func, std::uint32_t arg1, tick_timer::duration waittime) 39 | { 40 | return pend_call(&pend_redirect_1, reinterpret_cast(func), arg1, waittime); 41 | } 42 | 43 | static void pend_redirect_0(void* arg1, std::uint32_t arg2) 44 | { 45 | auto func = reinterpret_cast(arg1); 46 | func(); 47 | } 48 | 49 | bool pend_call(pend_function_0 func, tick_timer::duration waittime) 50 | { 51 | return pend_call(&pend_redirect_0, reinterpret_cast(func), 0, waittime); 52 | } 53 | 54 | #endif // (configUSE_TIMERS == 1) 55 | } // namespace freertos 56 | -------------------------------------------------------------------------------- /src/mutex.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include "freertos/mutex.hpp" 3 | #include "freertos/cpu.hpp" 4 | #include "freertos/thread.hpp" 5 | #if ESP_PLATFORM 6 | #include 7 | #else 8 | #include 9 | #endif 10 | 11 | namespace freertos 12 | { 13 | #if (configUSE_MUTEXES == 1) 14 | 15 | void mutex::unlock() 16 | { 17 | configASSERT(!this_cpu::is_in_isr()); 18 | 19 | // the same thread must unlock the mutex that has locked it 20 | configASSERT(get_locking_thread()->get_id() == this_thread::get_id()); 21 | 22 | semaphore::release(); 23 | } 24 | 25 | mutex::mutex() 26 | { 27 | // construction not allowed in ISR 28 | configASSERT(!this_cpu::is_in_isr()); 29 | 30 | xSemaphoreCreateMutexStatic(this); 31 | } 32 | 33 | #if (configUSE_RECURSIVE_MUTEXES == 1) 34 | 35 | void recursive_mutex::unlock() 36 | { 37 | configASSERT(!this_cpu::is_in_isr()); 38 | 39 | // the same thread must unlock the mutex that has locked it 40 | configASSERT(get_locking_thread()->get_id() == this_thread::get_id()); 41 | 42 | xSemaphoreGiveRecursive(handle()); 43 | } 44 | 45 | bool recursive_mutex::recursive_take(tick_timer::duration timeout) 46 | { 47 | configASSERT(!this_cpu::is_in_isr()); 48 | 49 | return xSemaphoreTakeRecursive(handle(), to_ticks(timeout)); 50 | } 51 | 52 | recursive_mutex::recursive_mutex() 53 | { 54 | // construction not allowed in ISR 55 | configASSERT(!this_cpu::is_in_isr()); 56 | 57 | xSemaphoreCreateRecursiveMutexStatic(this); 58 | } 59 | 60 | #endif // (configUSE_RECURSIVE_MUTEXES == 1) 61 | 62 | #endif // (configUSE_MUTEXES == 1) 63 | } // namespace freertos 64 | -------------------------------------------------------------------------------- /include/freertos/cpu.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_CPU_HPP_ 3 | #define __FREERTOS_CPU_HPP_ 4 | 5 | #include "freertos/stdlib.hpp" 6 | #include "freertos/tick_timer.hpp" 7 | 8 | namespace freertos 9 | { 10 | class cpu 11 | { 12 | public: 13 | /// @brief A @ref BasicLockable class that prevents task and interrupt 14 | /// context switches while locked. 15 | class critical_section 16 | { 17 | public: 18 | /// @brief Locks the CPU, preventing thread and interrupt switches. 19 | void lock(); 20 | 21 | /// @brief Unlocks the CPU, allowing other interrupts and threads 22 | /// to preempt the current execution context. 23 | void unlock(); 24 | 25 | constexpr critical_section() 26 | { 27 | #if ESP_PLATFORM 28 | spinlock_initialize(&restore_lock_); 29 | #endif 30 | } 31 | 32 | private: 33 | #if ESP_PLATFORM 34 | spinlock_t restore_lock_; 35 | auto& restore_var() { return restore_lock_.count; } 36 | #else 37 | std::uintptr_t restore_{}; 38 | auto& restore_var() { return restore_; } 39 | #endif 40 | }; 41 | }; 42 | 43 | namespace this_cpu 44 | { 45 | /// @brief Determines if the current execution context is inside 46 | /// an interrupt service routine. 47 | /// @note The underlying port function (@ref xPortIsInsideInterrupt) 48 | /// is only available for a subset of ports 49 | /// but it could be extended to many targets 50 | /// e.g. by checking if the current task's stack is being used 51 | /// @return true if the current execution context is ISR, false otherwise 52 | bool is_in_isr(); 53 | 54 | } // namespace this_cpu 55 | } // namespace freertos 56 | 57 | #endif // __FREERTOS_CPU_HPP_ 58 | -------------------------------------------------------------------------------- /include/freertos/scheduler.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_SCHEDULER_HPP_ 3 | #define __FREERTOS_SCHEDULER_HPP_ 4 | 5 | #include "freertos/stdlib.hpp" 6 | #include "freertos/thread.hpp" 7 | 8 | namespace freertos 9 | { 10 | /// @brief Static class that allows FreeRTOS scheduler control. 11 | class scheduler 12 | { 13 | public: 14 | /// @brief Possible operating states of the scheduler. 15 | enum class state : ::BaseType_t 16 | { 17 | suspended = 0, 18 | uninitialized = 1, 19 | running = 2 20 | }; 21 | 22 | /// @brief Starts the scheduler. This function doesn't return. 23 | /// @remark Thread context callable 24 | [[noreturn]] static void start(); 25 | 26 | /// @brief Reads the current state of the scheduler. 27 | /// @return The current state of the scheduler 28 | /// @remark Thread context callable 29 | static state get_state(); 30 | 31 | /// @brief Reads the total number of existing threads. 32 | /// @return The number of threads 33 | /// @remark Thread context callable 34 | static size_t get_threads_count(); 35 | 36 | /// @brief @ref Lockable critical section that manipulates the scheduler's state. 37 | class critical_section 38 | { 39 | public: 40 | /// @brief Suspends the scheduler, allowing the caller thread to operate without 41 | /// the possibility of context switches. Blocking operations are forbidden 42 | /// while the scheduler is suspended. 43 | /// @remark Thread context callable 44 | void lock(); 45 | 46 | /// @brief Resumes the scheduler, allowing thread context switches. 47 | /// @remark Thread context callable 48 | void unlock(); 49 | 50 | constexpr critical_section() {} 51 | }; 52 | 53 | private: 54 | scheduler(); 55 | }; 56 | 57 | } // namespace freertos 58 | 59 | #endif // __FREERTOS_SCHEDULER_HPP_ 60 | -------------------------------------------------------------------------------- /include/freertos/timed_service.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_TIMED_SERVICE_HPP_ 3 | #define __FREERTOS_TIMED_SERVICE_HPP_ 4 | 5 | #include "freertos/tick_timer.hpp" 6 | 7 | struct tmrTimerControl; 8 | 9 | namespace freertos 10 | { 11 | class thread; 12 | 13 | #if (configUSE_TIMERS == 1) 14 | 15 | class timed_service : protected ::StaticTimer_t 16 | { 17 | public: 18 | using function = void (*)(timed_service*); 19 | 20 | ~timed_service(); 21 | 22 | bool is_active() const; 23 | 24 | bool start(tick_timer::duration waittime = tick_timer::duration(0)); 25 | bool stop(tick_timer::duration waittime = tick_timer::duration(0)); 26 | bool reset(tick_timer::duration waittime = tick_timer::duration(0)); 27 | 28 | bool is_reloading() const; 29 | void set_reloading(bool reloading); 30 | 31 | tick_timer::duration get_period() const; 32 | bool set_period(tick_timer::duration new_period, 33 | tick_timer::duration waittime = tick_timer::duration(0)); 34 | 35 | void* get_owner() const; 36 | void set_owner(void* owner); 37 | 38 | tick_timer::time_point get_trigger_time() const; 39 | 40 | const char* get_name() const; 41 | 42 | timed_service(function func, void* owner, tick_timer::duration period, bool reloading, 43 | const char* name = DEFAULT_NAME); 44 | 45 | private: 46 | static constexpr const char* DEFAULT_NAME = "anonym"; 47 | 48 | ::tmrTimerControl* handle() const 49 | { 50 | return reinterpret_cast<::tmrTimerControl*>(const_cast(this)); 51 | } 52 | 53 | static thread* get_service_thread(); 54 | 55 | // non-copyable 56 | timed_service(const timed_service&) = delete; 57 | timed_service& operator=(const timed_service&) = delete; 58 | // non-movable 59 | timed_service(const timed_service&&) = delete; 60 | timed_service& operator=(const timed_service&&) = delete; 61 | }; 62 | 63 | #endif // (configUSE_TIMERS == 1) 64 | } // namespace freertos 65 | 66 | #endif // __FREERTOS_TIMED_SERVICE_H_ 67 | -------------------------------------------------------------------------------- /src/event_group.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include "freertos/event_group.hpp" 3 | #include "freertos/cpu.hpp" 4 | #if ESP_PLATFORM 5 | #include 6 | #include 7 | #else 8 | #include 9 | #include 10 | #endif 11 | 12 | namespace freertos 13 | { 14 | 15 | event_group::event_group() 16 | { 17 | configASSERT(!this_cpu::is_in_isr()); 18 | xEventGroupCreateStatic(this); 19 | } 20 | 21 | event_group::~event_group() 22 | { 23 | configASSERT(!this_cpu::is_in_isr()); 24 | vEventGroupDelete(handle()); 25 | } 26 | 27 | events event_group::get() const 28 | { 29 | if (!this_cpu::is_in_isr()) 30 | { 31 | return events(xEventGroupGetBits(handle())); 32 | } 33 | else 34 | { 35 | return events(xEventGroupGetBitsFromISR(handle())); 36 | } 37 | } 38 | 39 | void event_group::set(events flags) 40 | { 41 | if (!this_cpu::is_in_isr()) 42 | { 43 | (void)xEventGroupSetBits(handle(), flags); 44 | } 45 | else 46 | { 47 | #if (configUSE_TIMERS == 1) 48 | 49 | BaseType_t needs_yield = false; 50 | (void)xEventGroupSetBitsFromISR(handle(), flags, &needs_yield); 51 | portYIELD_FROM_ISR(needs_yield); 52 | 53 | #else 54 | 55 | configASSERT(false); 56 | 57 | #endif // (configUSE_TIMERS == 1) 58 | } 59 | } 60 | 61 | void event_group::clear(events flags) 62 | { 63 | if (!this_cpu::is_in_isr()) 64 | { 65 | (void)xEventGroupClearBits(handle(), flags); 66 | } 67 | else 68 | { 69 | #if (configUSE_TIMERS == 1) 70 | 71 | (void)xEventGroupClearBitsFromISR(handle(), flags); 72 | 73 | #else 74 | 75 | configASSERT(false); 76 | 77 | #endif // (configUSE_TIMERS == 1) 78 | } 79 | } 80 | 81 | events event_group::wait(events flags, const tick_timer::duration& rel_time, bool exclusive, 82 | bool match_all) 83 | { 84 | configASSERT(!this_cpu::is_in_isr()); 85 | events setflags{xEventGroupWaitBits(handle(), flags, exclusive, match_all, to_ticks(rel_time))}; 86 | // only return the flags that are relevant to the wait operation 87 | return flags & setflags; 88 | } 89 | } // namespace freertos 90 | -------------------------------------------------------------------------------- /src/condition_variable.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include "freertos/condition_variable.hpp" 3 | #include "freertos/cpu.hpp" 4 | #if ESP_PLATFORM 5 | #include 6 | #else 7 | #include 8 | #endif 9 | 10 | namespace freertos 11 | { 12 | 13 | condition_variable_any::condition_variable_any() : queue_(), waiters_(0) 14 | { 15 | // construction not allowed in ISR 16 | configASSERT(!this_cpu::is_in_isr()); 17 | } 18 | 19 | condition_variable_any::~condition_variable_any() 20 | { 21 | configASSERT(waiters_ == 0); 22 | } 23 | 24 | void condition_variable_any::notify(waiter_count_t waiters) 25 | { 26 | // leaves a previous unconsumed message in the queue, 27 | // possibly blocking the effect of the current call 28 | queue_.push_front(waiters); 29 | } 30 | 31 | void condition_variable_any::notify_one() 32 | { 33 | auto waiters = waiters_; 34 | if (waiters > 0) 35 | { 36 | notify(1); 37 | } 38 | } 39 | 40 | void condition_variable_any::notify_all() 41 | { 42 | auto waiters = waiters_; 43 | if (waiters > 0) 44 | { 45 | notify(waiters); 46 | } 47 | } 48 | 49 | void condition_variable_any::pre_wait() 50 | { 51 | configASSERT(!this_cpu::is_in_isr()); 52 | 53 | // add thread to waiting list (while still locking) 54 | waiters_++; 55 | } 56 | 57 | bool condition_variable_any::do_wait(const tick_timer::duration& rel_time, 58 | waiter_count_t* rx_waiters) 59 | { 60 | return queue_.pop_front(rx_waiters, rel_time); 61 | } 62 | 63 | cv_status condition_variable_any::post_wait(bool wait_success, waiter_count_t rx_waiters) 64 | { 65 | // remove thread from waiting list (when again blocking) 66 | waiters_--; 67 | 68 | if (wait_success) 69 | { 70 | auto rem_waiters = rx_waiters - 1; 71 | rem_waiters = std::min(rem_waiters, waiters_); 72 | 73 | // chain the notification if necessary (from notify_all) 74 | if (rem_waiters > 0) 75 | { 76 | // overwrite the previous message 77 | // in case a notify arrived in the meantime 78 | queue_.replace(rem_waiters); 79 | } 80 | } 81 | 82 | return wait_success ? cv_status::no_timeout : cv_status::timeout; 83 | } 84 | } // namespace freertos 85 | -------------------------------------------------------------------------------- /include/freertos/tick_timer.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_TICK_TIMER_HPP_ 3 | #define __FREERTOS_TICK_TIMER_HPP_ 4 | 5 | #include 6 | #include 7 | #include 8 | #if ESP_PLATFORM 9 | #include 10 | #else 11 | #include 12 | #endif 13 | 14 | namespace freertos 15 | { 16 | namespace detail 17 | { 18 | constexpr ::TickType_t infinite_delay = portMAX_DELAY; 19 | constexpr ::TickType_t tick_rate_Hz = configTICK_RATE_HZ; 20 | } // namespace detail 21 | 22 | /// @brief A @ref TrivialClock class that wraps the FreeRTOS tick timer. 23 | class tick_timer 24 | { 25 | public: 26 | using rep = ::TickType_t; 27 | using period = std::ratio<1, detail::tick_rate_Hz>; 28 | using duration = std::chrono::duration; 29 | using time_point = std::chrono::time_point; 30 | static constexpr bool is_steady = true; 31 | 32 | /// @brief Wraps the current OS tick count into a clock time point. 33 | /// @return The current tick count as time_point 34 | /// @remark Thread and ISR context callable 35 | static time_point now(); 36 | }; 37 | 38 | template 39 | inline auto duration_until(const std::chrono::time_point& abs_time) 40 | { 41 | auto d = abs_time - Clock::now(); 42 | return d < Duration::zero() ? Duration::zero() : d; 43 | } 44 | 45 | /// @brief Converts a duration to the underlying tick count. 46 | /// @param rel_time: time duration 47 | /// @return Tick count 48 | template 49 | constexpr tick_timer::rep to_ticks(const std::chrono::duration& rel_time) 50 | { 51 | return std::chrono::duration_cast(rel_time).count(); 52 | } 53 | 54 | /// @brief Converts @ref tick_timer::time_point to the underlying tick count. 55 | /// @param time: time point from the start of the tick_timer 56 | /// @return Tick count 57 | constexpr tick_timer::rep to_ticks(const tick_timer::time_point& time) 58 | { 59 | return to_ticks(time.time_since_epoch()); 60 | } 61 | 62 | /// @brief Dedicated @ref tick_timer::duration expression that ensures 63 | /// infinite wait time on an operation 64 | constexpr tick_timer::duration infinity{detail::infinite_delay}; 65 | } // namespace freertos 66 | 67 | #endif // __FREERTOS_TICK_TIMER_HPP_ 68 | -------------------------------------------------------------------------------- /src/helpers/runtime_stats_timer.c: -------------------------------------------------------------------------------- 1 | /** 2 | * @file runtime_stats_timer.c 3 | * @brief Zero-cost runtime stats timer 4 | * The current implementation is specific to ARM Cortex Mx cores, 5 | * but the concept is reusable on many targets where the OS timer counter is accessible. 6 | * @author Benedek Kupper 7 | */ 8 | #include "FreeRTOS.h" 9 | #include "task.h" 10 | 11 | #ifdef portNVIC_INT_CTRL_REG /* Currently working detection of ARM Cortex Mx core */ 12 | #if (configGENERATE_RUN_TIME_STATS != 1) 13 | #warning "Ensure that FreeRTOSConfig.h contains: #define configGENERATE_RUN_TIME_STATS 1" 14 | #else 15 | #if portCONFIGURE_TIMER_FOR_RUN_TIME_STATS != ConfigureTimerForRunTimeStats 16 | #warning "Ensure that FreeRTOSConfig.h contains: #define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS ConfigureTimerForRunTimeStats" 17 | #endif 18 | #if portGET_RUN_TIME_COUNTER_VALUE != GetRuntimeCounterValueFromISR 19 | #warning "Ensure that FreeRTOSConfig.h contains: #define portGET_RUN_TIME_COUNTER_VALUE GetRuntimeCounterValueFromISR" 20 | #endif 21 | 22 | /* No generic header is available from ARM to include SysTick, so define it manually */ 23 | struct 24 | { 25 | volatile uint32_t CTRL; 26 | volatile uint32_t LOAD; 27 | volatile uint32_t VAL; 28 | volatile uint32_t CALIB; 29 | }*SysTick = (void*) 0xE000E010UL; 30 | 31 | /** 32 | * @brief This is a tunable value, a tradeoff between resolution and long runtime without overflow. 33 | */ 34 | static const uint32_t TimerResolution = 100; 35 | 36 | /** 37 | * @brief Empty funtion, the SysTick is configured by kernel when the scheduler is started. 38 | */ 39 | void ConfigureTimerForRunTimeStats(void) 40 | { 41 | } 42 | 43 | /** 44 | * @brief Calculates the runtime counter by scaling up the tick timer, 45 | * and adding the fractional value of the running timer. The resolution is controlled by 46 | * @ref TimerResolution 47 | * @note The OS is calling this function from ISR context, so only ISR API use is allowed. 48 | * @return The current virtual timer counter value. 49 | */ 50 | uint32_t GetRuntimeCounterValueFromISR(void) 51 | { 52 | uint32_t ticks = xTaskGetTickCountFromISR(); 53 | uint32_t count = SysTick->VAL; 54 | uint32_t reload_value = SysTick->LOAD; 55 | 56 | return (TimerResolution * ticks) + (TimerResolution * count / reload_value); 57 | } 58 | 59 | #endif /* configGENERATE_RUN_TIME_STATS */ 60 | #endif /* portNVIC_INT_CTRL_REG */ 61 | -------------------------------------------------------------------------------- /src/semaphore.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include 3 | #include "freertos/cpu.hpp" 4 | #include "freertos/semaphore.hpp" 5 | #if ESP_PLATFORM 6 | #include 7 | #else 8 | #include 9 | #endif 10 | 11 | namespace freertos 12 | { 13 | #if 0 // doesn't seem like a good idea 14 | semaphore::semaphore(semaphore&& other) 15 | { 16 | std::memmove(this, &other, sizeof(semaphore)); 17 | std::memset(&other, 0, sizeof(semaphore)); 18 | } 19 | 20 | semaphore& semaphore::operator=(semaphore&& other) 21 | { 22 | if (this != &other) 23 | { 24 | std::memmove(this, &other, sizeof(semaphore)); 25 | std::memset(&other, 0, sizeof(semaphore)); 26 | } 27 | return *this; 28 | } 29 | #endif 30 | 31 | bool semaphore::take(tick_timer::duration timeout) 32 | { 33 | if (!this_cpu::is_in_isr()) 34 | { 35 | return xSemaphoreTake(handle(), to_ticks(timeout)); 36 | } 37 | else 38 | { 39 | // cannot wait in ISR 40 | configASSERT(to_ticks(timeout) == 0); 41 | 42 | BaseType_t needs_yield = false; 43 | bool success = xSemaphoreTakeFromISR(handle(), &needs_yield); 44 | portYIELD_FROM_ISR(needs_yield); 45 | return success; 46 | } 47 | } 48 | 49 | bool semaphore::give(count_type update) 50 | { 51 | // the API only allows giving a single count 52 | if (!this_cpu::is_in_isr()) 53 | { 54 | bool success = true; 55 | while (success && (update > 0)) 56 | { 57 | success = xSemaphoreGive(handle()); 58 | update--; 59 | } 60 | return success; 61 | } 62 | else 63 | { 64 | BaseType_t needs_yield = false; 65 | bool success = true; 66 | while (success && (update > 0)) 67 | { 68 | success = xSemaphoreGiveFromISR(handle(), &needs_yield); 69 | update--; 70 | } 71 | portYIELD_FROM_ISR(needs_yield); 72 | return success; 73 | } 74 | } 75 | 76 | thread* semaphore::get_mutex_holder() const 77 | { 78 | if (!this_cpu::is_in_isr()) 79 | { 80 | return reinterpret_cast(xSemaphoreGetMutexHolder(handle())); 81 | } 82 | else 83 | { 84 | return reinterpret_cast(xSemaphoreGetMutexHolderFromISR(handle())); 85 | } 86 | } 87 | 88 | #if (configUSE_COUNTING_SEMAPHORES == 1) 89 | 90 | semaphore::semaphore(count_type max, count_type desired) 91 | { 92 | // construction not allowed in ISR 93 | configASSERT(!this_cpu::is_in_isr()); 94 | 95 | (void)xSemaphoreCreateCountingStatic(max, desired, this); 96 | } 97 | 98 | #endif // (configUSE_COUNTING_SEMAPHORES == 1) 99 | 100 | binary_semaphore::binary_semaphore(count_type desired) 101 | { 102 | // construction not allowed in ISR 103 | configASSERT(!this_cpu::is_in_isr()); 104 | 105 | (void)xSemaphoreCreateBinaryStatic(this); 106 | give(desired); 107 | } 108 | 109 | } // namespace freertos 110 | -------------------------------------------------------------------------------- /src/helpers/tasks_static.c: -------------------------------------------------------------------------------- 1 | /** 2 | * @file tasks_static.c 3 | * @brief Required FreeRTOS function definitions when static allocation is enabled. 4 | * @author https://www.freertos.org/a00110.html 5 | */ 6 | #include "FreeRTOS.h" 7 | #include "task.h" 8 | 9 | #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) 10 | /* configSUPPORT_STATIC_ALLOCATION is set to 1, so the application must provide an 11 | implementation of vApplicationGetIdleTaskMemory() to provide the memory that is 12 | used by the Idle task. */ 13 | void vApplicationGetIdleTaskMemory(StaticTask_t **ppxIdleTaskTCBBuffer, 14 | StackType_t **ppxIdleTaskStackBuffer, 15 | uint32_t *pulIdleTaskStackSize) 16 | { 17 | /* If the buffers to be provided to the Idle task are declared inside this 18 | function then they must be declared static - otherwise they will be allocated on 19 | the stack and so not exists after this function exits. */ 20 | static struct 21 | { 22 | StaticTask_t xTCB; 23 | StackType_t uxStack[configMINIMAL_STACK_SIZE]; 24 | }xIdleTask; 25 | 26 | /* Pass out a pointer to the StaticTask_t structure in which the Idle task's 27 | state will be stored. */ 28 | *ppxIdleTaskTCBBuffer = &xIdleTask.xTCB; 29 | 30 | /* Pass out the array that will be used as the Idle task's stack. */ 31 | *ppxIdleTaskStackBuffer = xIdleTask.uxStack; 32 | 33 | /* Pass out the size of the array pointed to by *ppxIdleTaskStackBuffer. 34 | Note that, as the array is necessarily of type StackType_t, 35 | configMINIMAL_STACK_SIZE is specified in words, not bytes. */ 36 | *pulIdleTaskStackSize = configMINIMAL_STACK_SIZE; 37 | } 38 | 39 | #if ( configUSE_TIMERS == 1 ) 40 | /* configSUPPORT_STATIC_ALLOCATION and configUSE_TIMERS are both set to 1, so the 41 | application must provide an implementation of vApplicationGetTimerTaskMemory() 42 | to provide the memory that is used by the Timer service task. */ 43 | void vApplicationGetTimerTaskMemory(StaticTask_t **ppxTimerTaskTCBBuffer, 44 | StackType_t **ppxTimerTaskStackBuffer, 45 | uint32_t *pulTimerTaskStackSize) 46 | { 47 | /* If the buffers to be provided to the Timer task are declared inside this 48 | function then they must be declared static - otherwise they will be allocated on 49 | the stack and so not exists after this function exits. */ 50 | static struct 51 | { 52 | StaticTask_t xTCB; 53 | StackType_t uxStack[configTIMER_TASK_STACK_DEPTH]; 54 | }xTimerTask; 55 | 56 | /* Pass out a pointer to the StaticTask_t structure in which the Timer 57 | task's state will be stored. */ 58 | *ppxTimerTaskTCBBuffer = &xTimerTask.xTCB; 59 | 60 | /* Pass out the array that will be used as the Timer task's stack. */ 61 | *ppxTimerTaskStackBuffer = xTimerTask.uxStack; 62 | 63 | /* Pass out the size of the array pointed to by *ppxTimerTaskStackBuffer. 64 | Note that, as the array is necessarily of type StackType_t, 65 | configTIMER_TASK_STACK_DEPTH is specified in words, not bytes. */ 66 | *pulTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH; 67 | } 68 | #endif /* ( configUSE_TIMERS == 1 ) */ 69 | 70 | #endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ 71 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FreeRTOS Modern C++ Wrappers 2 | 3 | [![License](http://img.shields.io/:license-mit-blue.svg?style=flat-square)](http://badges.mit-license.org) 4 | 5 | **freertos-mcpp** is a C++ wrapper library that allows developers to use the ubiquitous [FreeRTOS][FreeRTOS] kernel 6 | while simplifying its use by a new API that closely follows the C++ standard classes. 7 | 8 | ## Features 9 | 10 | * No virtual classes, the wrapper classes accurately encapsulate the underlying data structures 11 | * Promotes static allocation, optimizing RAM use and reducing heap fragmentation risks 12 | * Public API closely matches the standard C++ thread support library 13 | * The API selects the threading or interrupt service routine (xFromISR) FreeRTOS API calls by detecting ISR context 14 | 15 | ## Compatibility 16 | 17 | * C++11 and above 18 | * Tested with [FreeRTOS][FreeRTOS-Kernel] 10, its public API is stable enough to enable the use on a wide range of versions 19 | * ESP-IDF platform supported 20 | * Only works with FreeRTOS ports that have `xPortIsInsideInterrupt()` call implemented 21 | 22 | ## Porting 23 | 24 | This library requires certain configuration values to be set for correct operation. 25 | Consider the recommended settings for `FreeRTOSConfig.h`: 26 | 27 | ```C 28 | // required globally 29 | #define configSUPPORT_STATIC_ALLOCATION 1 30 | 31 | // required to allow termination of threads and automatic resource freeing (see thread documentation) 32 | // and for thread_owner that builds on it 33 | #define configSUPPORT_DYNAMIC_ALLOCATION 1 34 | extern void vTaskExitHandler(void); 35 | #define configTASK_RETURN_ADDRESS vTaskExitHandler 36 | 37 | // required for thread termination signalling, used by thread::join 38 | // configNUM_THREAD_LOCAL_STORAGE_POINTERS must be higher than configTHREAD_EXIT_CONDITION_INDEX 39 | #define configNUM_THREAD_LOCAL_STORAGE_POINTERS 1 40 | #define configTHREAD_EXIT_CONDITION_INDEX 0 41 | 42 | // required to support mutex, timed_mutex 43 | #define configUSE_MUTEXES 1 44 | 45 | // required to support recursive_mutex, recursive_timed_mutex 46 | #define configUSE_RECURSIVE_MUTEXES 1 47 | 48 | // required to support counting_semaphore 49 | #define configUSE_COUNTING_SEMAPHORES 1 50 | 51 | 52 | // recommended to use on Cortex Mx architectures (see src/helpers/runtime_stats_timer.c) 53 | #define configGENERATE_RUN_TIME_STATS 1 54 | extern void ConfigureTimerForRunTimeStats(void); 55 | #define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() ConfigureTimerForRunTimeStats() 56 | extern uint32_t GetRuntimeCounterValueFromISR(void); 57 | #define portGET_RUN_TIME_COUNTER_VALUE() GetRuntimeCounterValueFromISR() 58 | ``` 59 | 60 | In addition to the C++ wrappers, there are helper files located in `src/helpers` for some common use-cases: 61 | 62 | 1. `tasks_static.c` is required as source to support static allocation of kernel objects 63 | 2. `runtime_stats_timer.c` is a zero-cost runtime statistics timer for Cortex Mx architectures 64 | 3. `malloc_free.c` and `new_delete_ops.cpp` redirect heap allocation to FreeRTOS's heap management 65 | 66 | 67 | [FreeRTOS]: https://www.freertos.org/ 68 | [FreeRTOS-Kernel]: https://github.com/FreeRTOS/FreeRTOS-Kernel 69 | -------------------------------------------------------------------------------- /include/freertos/pend_call.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_PEND_CALL_HPP_ 3 | #define __FREERTOS_PEND_CALL_HPP_ 4 | 5 | #include "freertos/stdlib.hpp" 6 | #include "freertos/tick_timer.hpp" 7 | 8 | namespace freertos 9 | { 10 | #if (configUSE_TIMERS == 1) 11 | 12 | using pend_function_2 = void (*)(void*, std::uint32_t); 13 | 14 | /// @brief Schedules a single function call to be executed in the timer service thread. 15 | /// @param func: the function to execute in the timer service thread context 16 | /// @param arg1: opaque parameter to pass to the function 17 | /// @param arg2: opaque parameter to pass to the function 18 | /// @param waittime: duration to wait for the timer queue to accept this request 19 | /// @return true if the request is accepted, false if the timer queue is full 20 | bool pend_call(pend_function_2 func, void* arg1, std::uint32_t arg2, 21 | tick_timer::duration waittime = tick_timer::duration(0)); 22 | 23 | template 24 | inline bool pend_call(void (*func)(T1, T2), T1 arg1, T2 arg2, 25 | tick_timer::duration waittime = tick_timer::duration(0)) 26 | { 27 | const auto pend_caller = 28 | static_cast( 29 | &pend_call); 30 | return pend_caller(reinterpret_cast(func), bit_cast(arg1), 31 | bit_cast(arg2), waittime); 32 | } 33 | 34 | template 35 | inline bool pend_call(T1& obj, void (T1::*member_func)(T2), T2 arg2, 36 | tick_timer::duration waittime = tick_timer::duration(0)) 37 | { 38 | const auto pend_caller = 39 | static_cast( 40 | &pend_call); 41 | return pend_caller(reinterpret_cast(member_func), static_cast(&obj), 42 | bit_cast(arg2), waittime); 43 | } 44 | 45 | using pend_function_1 = void (*)(std::uint32_t); 46 | 47 | /// @brief Schedules a single function call to be executed in the timer service thread. 48 | /// @param func: the function to execute in the timer service thread context 49 | /// @param arg1: opaque parameter to pass to the function 50 | /// @param waittime: duration to wait for the timer queue to accept this request 51 | /// @return true if the request is accepted, false if the timer queue is full 52 | bool pend_call(pend_function_1 func, std::uint32_t arg1, 53 | tick_timer::duration waittime = tick_timer::duration(0)); 54 | 55 | template 56 | inline bool pend_call(void (*func)(T1), T1 arg1, 57 | tick_timer::duration waittime = tick_timer::duration(0)) 58 | { 59 | const auto pend_caller = 60 | static_cast(&pend_call); 61 | return pend_caller(reinterpret_cast(func), bit_cast(arg1), 62 | waittime); 63 | } 64 | 65 | template 66 | inline typename std::enable_if<(sizeof(T1*) == sizeof(std::uint32_t)), bool>::type 67 | pend_call(T1& obj, void (T1::*member_func)(), 68 | tick_timer::duration waittime = tick_timer::duration(0)) 69 | { 70 | const auto pend_caller = 71 | static_cast(&pend_call); 72 | return pend_caller(reinterpret_cast(member_func), 73 | static_cast(&obj), waittime); 74 | } 75 | 76 | using pend_function_0 = void (*)(); 77 | 78 | /// @brief Schedules a single function call to be executed in the timer service thread. 79 | /// @param func: the function to execute in the timer service thread context 80 | /// @param waittime: duration to wait for the timer queue to accept this request 81 | /// @return true if the request is accepted, false if the timer queue is full 82 | bool pend_call(pend_function_0 func, tick_timer::duration waittime = tick_timer::duration(0)); 83 | 84 | #endif // (configUSE_TIMERS == 1) 85 | } // namespace freertos 86 | 87 | #endif // __FREERTOS_PEND_CALL_HPP_ 88 | -------------------------------------------------------------------------------- /include/freertos/semaphore.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_SEMAPHORE_HPP_ 3 | #define __FREERTOS_SEMAPHORE_HPP_ 4 | 5 | #include "freertos/queue.hpp" 6 | 7 | namespace freertos 8 | { 9 | class thread; 10 | 11 | /// @brief An abstract base class for semaphores. Implements std::counting_semaphore API. 12 | /// An important distinction is that this class is non-copyable and non-movable, 13 | /// since FreeRTOS mutexes inherit from semaphores (that is, they're both null-size queues). 14 | class semaphore : protected queue 15 | { 16 | public: 17 | using count_type = queue::size_type; 18 | 19 | /// @brief Waits indefinitely until the semaphore is available, then takes it. 20 | /// @remark Thread context callable 21 | inline void acquire() { (void)take(infinity); } 22 | 23 | /// @brief Tries to take the semaphore if it is available. 24 | /// @return true if successful, false if the semaphore is unavailable 25 | /// @remark Thread and ISR context callable 26 | inline bool try_acquire() { return take(tick_timer::duration(0)); } 27 | 28 | /// @brief Tries to take the semaphore within the given time duration. 29 | /// @param rel_time: duration to wait for the semaphore to become available 30 | /// @return true if successful, false if the semaphore is unavailable 31 | /// @remark Thread context callable 32 | template 33 | inline bool try_acquire_for(const std::chrono::duration& rel_time) 34 | { 35 | return take(std::chrono::duration_cast(rel_time)); 36 | } 37 | 38 | /// @brief Tries to take the semaphore until the given deadline. 39 | /// @param abs_time: deadline to wait until the semaphore becomes available 40 | /// @return true if successful, false if the semaphore is unavailable 41 | /// @remark Thread context callable 42 | template 43 | inline bool try_acquire_until(const std::chrono::time_point& abs_time) 44 | { 45 | return try_acquire_for(duration_until(abs_time)); 46 | } 47 | 48 | /// @brief Makes the semaphore available a given number of times. 49 | /// @param update: the number of available signals to send 50 | /// @remark Thread and ISR context callable 51 | inline void release(count_type update = 1) { (void)give(update); } 52 | 53 | /// @brief Function to observe the semaphore's current acquirable count. 54 | /// @return The semaphore's acquirable count 55 | /// @remark Thread and ISR context callable 56 | inline count_type get_count() const { return queue::size(); } 57 | 58 | protected: 59 | // actual FreeRTOS operations 60 | bool take(tick_timer::duration timeout); 61 | bool give(count_type update); 62 | 63 | // empty constructor is used by mutexes, since the underlying API create calls differ 64 | semaphore() {} 65 | 66 | // only for mutexes 67 | thread* get_mutex_holder() const; 68 | 69 | #if (configUSE_COUNTING_SEMAPHORES == 1) 70 | 71 | // used to create counting_semaphore 72 | semaphore(count_type max, count_type desired); 73 | 74 | #endif // (configUSE_COUNTING_SEMAPHORES == 1) 75 | }; 76 | 77 | #if (configUSE_COUNTING_SEMAPHORES == 1) 78 | 79 | /// @brief Counting semaphore class. 80 | template 81 | class counting_semaphore : public semaphore 82 | { 83 | public: 84 | /// @brief Maximum value of the internal counter. 85 | static constexpr count_type max() { return MAX_VALUE; } 86 | 87 | /// @brief Constructs a counting semaphore statically. 88 | counting_semaphore(count_type desired = 0) : semaphore(MAX_VALUE, desired) {} 89 | }; 90 | 91 | #endif // (configUSE_COUNTING_SEMAPHORES == 1) 92 | 93 | /// @brief Binary semaphore class. 94 | // using binary_semaphore = std::counting_semaphore<1>; 95 | class binary_semaphore : public semaphore 96 | { 97 | public: 98 | /// @brief Maximum value of the internal counter. 99 | static constexpr count_type max() { return 1; } 100 | 101 | /// @brief Constructs a binary semaphore statically. 102 | binary_semaphore(count_type desired = 0); 103 | }; 104 | } // namespace freertos 105 | 106 | #endif // __FREERTOS_SEMAPHORE_HPP_ 107 | -------------------------------------------------------------------------------- /src/timed_service.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include "freertos/timed_service.hpp" 3 | #include "freertos/cpu.hpp" 4 | #include "freertos/thread.hpp" 5 | #if ESP_PLATFORM 6 | #include 7 | #else 8 | #include 9 | #endif 10 | 11 | namespace freertos 12 | { 13 | #if (configUSE_TIMERS == 1) 14 | 15 | timed_service::~timed_service() 16 | { 17 | configASSERT(!this_cpu::is_in_isr()); 18 | xTimerDelete(handle(), to_ticks(infinity)); 19 | } 20 | 21 | timed_service::timed_service(function func, void* arg, tick_timer::duration period, bool periodic, 22 | const char* name) 23 | { 24 | configASSERT(!this_cpu::is_in_isr()); 25 | xTimerCreateStatic(DEFAULT_NAME, to_ticks(period), periodic, arg, 26 | reinterpret_cast(func), this); 27 | } 28 | 29 | const char* timed_service::get_name() const 30 | { 31 | return pcTimerGetName(handle()); 32 | } 33 | 34 | bool timed_service::is_reloading() const 35 | { 36 | configASSERT(!this_cpu::is_in_isr()); 37 | return uxTimerGetReloadMode(handle()); 38 | } 39 | 40 | void timed_service::set_reloading(bool reloading) 41 | { 42 | vTimerSetReloadMode(handle(), reloading); 43 | } 44 | 45 | tick_timer::time_point timed_service::get_trigger_time() const 46 | { 47 | return tick_timer::time_point(tick_timer::duration(xTimerGetExpiryTime(handle()))); 48 | } 49 | 50 | void* timed_service::get_owner() const 51 | { 52 | configASSERT(!this_cpu::is_in_isr()); 53 | return pvTimerGetTimerID(handle()); 54 | } 55 | 56 | void timed_service::set_owner(void* owner) 57 | { 58 | configASSERT(!this_cpu::is_in_isr()); 59 | vTimerSetTimerID(handle(), owner); 60 | } 61 | 62 | bool timed_service::is_active() const 63 | { 64 | configASSERT(!this_cpu::is_in_isr()); 65 | return xTimerIsTimerActive(handle()); 66 | } 67 | 68 | bool timed_service::start(tick_timer::duration waittime) 69 | { 70 | if (!this_cpu::is_in_isr()) 71 | { 72 | return xTimerStart(handle(), to_ticks(waittime)); 73 | } 74 | else 75 | { 76 | // cannot wait in ISR 77 | configASSERT(to_ticks(waittime) == 0); 78 | 79 | BaseType_t needs_yield = false; 80 | bool success = xTimerStartFromISR(handle(), &needs_yield); 81 | portYIELD_FROM_ISR(needs_yield); 82 | return success; 83 | } 84 | } 85 | 86 | bool timed_service::stop(tick_timer::duration waittime) 87 | { 88 | if (!this_cpu::is_in_isr()) 89 | { 90 | return xTimerStop(handle(), to_ticks(waittime)); 91 | } 92 | else 93 | { 94 | // cannot wait in ISR 95 | configASSERT(to_ticks(waittime) == 0); 96 | 97 | BaseType_t needs_yield = false; 98 | bool success = xTimerStopFromISR(handle(), &needs_yield); 99 | portYIELD_FROM_ISR(needs_yield); 100 | return success; 101 | } 102 | } 103 | 104 | bool timed_service::reset(tick_timer::duration waittime) 105 | { 106 | if (!this_cpu::is_in_isr()) 107 | { 108 | return xTimerReset(handle(), to_ticks(waittime)); 109 | } 110 | else 111 | { 112 | // cannot wait in ISR 113 | configASSERT(to_ticks(waittime) == 0); 114 | 115 | BaseType_t needs_yield = false; 116 | bool success = xTimerResetFromISR(handle(), &needs_yield); 117 | portYIELD_FROM_ISR(needs_yield); 118 | return success; 119 | } 120 | } 121 | 122 | tick_timer::duration timed_service::get_period() const 123 | { 124 | configASSERT(!this_cpu::is_in_isr()); 125 | return tick_timer::duration(xTimerGetPeriod(handle())); 126 | } 127 | 128 | bool timed_service::set_period(tick_timer::duration new_period, tick_timer::duration waittime) 129 | { 130 | if (!this_cpu::is_in_isr()) 131 | { 132 | return xTimerChangePeriod(handle(), to_ticks(new_period), to_ticks(waittime)); 133 | } 134 | else 135 | { 136 | // cannot wait in ISR 137 | configASSERT(to_ticks(waittime) == 0); 138 | 139 | BaseType_t needs_yield = false; 140 | bool success = xTimerChangePeriodFromISR(handle(), to_ticks(new_period), &needs_yield); 141 | portYIELD_FROM_ISR(needs_yield); 142 | return success; 143 | } 144 | } 145 | 146 | thread* timed_service::get_service_thread() 147 | { 148 | return reinterpret_cast(xTimerGetTimerDaemonTaskHandle()); 149 | } 150 | 151 | #endif // (configUSE_TIMERS == 1) 152 | } // namespace freertos 153 | -------------------------------------------------------------------------------- /src/queue.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include "freertos/queue.hpp" 3 | #include "freertos/cpu.hpp" 4 | #if ESP_PLATFORM 5 | #include 6 | #else 7 | #include 8 | #endif 9 | 10 | namespace freertos 11 | { 12 | queue::size_type queue::size() const 13 | { 14 | if (!this_cpu::is_in_isr()) 15 | { 16 | return uxQueueMessagesWaiting(handle()); 17 | } 18 | else 19 | { 20 | return uxQueueMessagesWaitingFromISR(handle()); 21 | } 22 | } 23 | 24 | queue::size_type queue::available() const 25 | { 26 | // no ISR API available 27 | configASSERT(!this_cpu::is_in_isr()); 28 | { 29 | return uxQueueSpacesAvailable(handle()); 30 | } 31 | } 32 | 33 | bool queue::full() const 34 | { 35 | if (!this_cpu::is_in_isr()) 36 | { 37 | return uxQueueMessagesWaiting(handle()) == 0; 38 | } 39 | else 40 | { 41 | return xQueueIsQueueFullFromISR(handle()); 42 | } 43 | } 44 | 45 | bool queue::empty() const 46 | { 47 | if (!this_cpu::is_in_isr()) 48 | { 49 | return uxQueueSpacesAvailable(handle()) == 0; 50 | } 51 | else 52 | { 53 | return xQueueIsQueueEmptyFromISR(handle()); 54 | } 55 | } 56 | 57 | #if 0 58 | queue::size_type queue::max_size() const 59 | { 60 | // no public API available to read the queue length 61 | // doing size() + available() could lead to races 62 | return base_.uxDummy4[1]; 63 | } 64 | #endif 65 | 66 | void queue::reset() 67 | { 68 | // no ISR API available 69 | configASSERT(!this_cpu::is_in_isr()); 70 | { 71 | // call always succeeds 72 | (void)xQueueReset(handle()); 73 | } 74 | } 75 | 76 | queue::~queue() 77 | { 78 | // destruction not allowed in ISR 79 | configASSERT(!this_cpu::is_in_isr()); 80 | 81 | vQueueDelete(handle()); 82 | } 83 | 84 | bool queue::push_front(void* data, tick_timer::duration waittime) 85 | { 86 | if (!this_cpu::is_in_isr()) 87 | { 88 | return xQueueSendToFront(handle(), data, to_ticks(waittime)); 89 | } 90 | else 91 | { 92 | // cannot wait in ISR 93 | configASSERT(to_ticks(waittime) == 0); 94 | 95 | BaseType_t needs_yield = false; 96 | bool success = xQueueSendToFrontFromISR(handle(), data, &needs_yield); 97 | portYIELD_FROM_ISR(needs_yield); 98 | return success; 99 | } 100 | } 101 | 102 | bool queue::push_back(void* data, tick_timer::duration waittime) 103 | { 104 | if (!this_cpu::is_in_isr()) 105 | { 106 | return xQueueSendToBack(handle(), data, to_ticks(waittime)); 107 | } 108 | else 109 | { 110 | // cannot wait in ISR 111 | configASSERT(to_ticks(waittime) == 0); 112 | 113 | BaseType_t needs_yield = false; 114 | bool success = xQueueSendToBackFromISR(handle(), data, &needs_yield); 115 | portYIELD_FROM_ISR(needs_yield); 116 | return success; 117 | } 118 | } 119 | 120 | void queue::replace(void* data) 121 | { 122 | if (!this_cpu::is_in_isr()) 123 | { 124 | bool success = xQueueOverwrite(handle(), data); 125 | configASSERT(success); 126 | } 127 | else 128 | { 129 | BaseType_t needs_yield = false; 130 | bool success = xQueueOverwriteFromISR(handle(), data, &needs_yield); 131 | configASSERT(success); 132 | portYIELD_FROM_ISR(needs_yield); 133 | } 134 | } 135 | 136 | bool queue::peek_front(void* data, tick_timer::duration waittime) const 137 | { 138 | if (!this_cpu::is_in_isr()) 139 | { 140 | return xQueuePeek(handle(), data, to_ticks(waittime)); 141 | } 142 | else 143 | { 144 | return xQueuePeekFromISR(handle(), data); 145 | } 146 | } 147 | 148 | bool queue::pop_front(void* data, tick_timer::duration waittime) 149 | { 150 | if (!this_cpu::is_in_isr()) 151 | { 152 | return xQueueReceive(handle(), data, to_ticks(waittime)); 153 | } 154 | else 155 | { 156 | // cannot wait in ISR 157 | configASSERT(to_ticks(waittime) == 0); 158 | 159 | BaseType_t needs_yield = false; 160 | bool success = xQueueReceiveFromISR(handle(), data, &needs_yield); 161 | portYIELD_FROM_ISR(needs_yield); 162 | return success; 163 | } 164 | } 165 | 166 | queue::queue(size_type size, size_type elem_size, unsigned char* elem_buffer) 167 | { 168 | // construction not allowed in ISR 169 | configASSERT(!this_cpu::is_in_isr()); 170 | 171 | (void)xQueueCreateStatic(size, elem_size, elem_buffer, this); 172 | } 173 | 174 | #if 0 && (configSUPPORT_DYNAMIC_ALLOCATION == 1) 175 | 176 | queue* queue::create(size_type size, size_type elem_size) 177 | { 178 | // construction not allowed in ISR 179 | configASSERT(!this_cpu::is_in_isr()); 180 | 181 | return reinterpret_cast(xQueueCreate(size, elem_size)); 182 | } 183 | 184 | #endif // (configSUPPORT_DYNAMIC_ALLOCATION == 1) 185 | } // namespace freertos 186 | -------------------------------------------------------------------------------- /include/freertos/mutex.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_MUTEX_HPP_ 3 | #define __FREERTOS_MUTEX_HPP_ 4 | 5 | #include "freertos/semaphore.hpp" 6 | #include "freertos/stdlib.hpp" 7 | 8 | namespace freertos 9 | { 10 | #if (configUSE_MUTEXES == 1) 11 | 12 | /// @brief A class implementing std::mutex and std::timed_mutex API. 13 | class mutex : protected semaphore 14 | { 15 | public: 16 | /// @brief Locks the mutex, blocks until the mutex is lockable. 17 | /// @remark Thread context callable 18 | inline void lock() { semaphore::acquire(); } 19 | 20 | /// @brief Attempts to lock the mutex. 21 | /// @return true if the mutex got locked, false if it's already locked 22 | /// @remark Thread context callable 23 | inline bool try_lock() { return semaphore::try_acquire(); } 24 | 25 | /// @brief Unlocks the mutex. 26 | /// @remark Thread context callable 27 | void unlock(); 28 | 29 | /// @brief Tries to lock the mutex within the given time duration. 30 | /// @param rel_time: duration to wait for the mutex to become unlocked 31 | /// @return true if successful, false if the mutex is locked 32 | /// @remark Thread context callable 33 | template 34 | inline bool try_lock_for(const std::chrono::duration& rel_time) 35 | { 36 | return semaphore::try_acquire_for(rel_time); 37 | } 38 | 39 | /// @brief Tries to lock the mutex until the given deadline. 40 | /// @param abs_time: deadline to wait until the mutex becomes unlocked 41 | /// @return true if successful, false if the mutex is locked 42 | /// @remark Thread context callable 43 | template 44 | inline bool try_lock_until(const std::chrono::time_point& abs_time) 45 | { 46 | return semaphore::try_acquire_until(abs_time); 47 | } 48 | 49 | /// @brief Function to observe the mutex's current locking thread. 50 | /// @return The mutex's current holder thread, or nullptr if the mutex is unlocked 51 | /// @remark Thread and ISR context callable 52 | inline thread* get_locking_thread() const { return semaphore::get_mutex_holder(); } 53 | 54 | /// @brief Constructs a mutex statically. 55 | mutex(); 56 | }; 57 | 58 | /// @brief A type alias since @ref mutex already implements std::timed_mutex API. 59 | using timed_mutex = mutex; 60 | 61 | #if (configUSE_RECURSIVE_MUTEXES == 1) 62 | 63 | /// @brief A class implementing std::recursive_mutex and std::recursive_timed_mutex API. 64 | class recursive_mutex : protected semaphore 65 | { 66 | public: 67 | /// @brief Locks the mutex if the current thread isn't locking it already, 68 | /// blocks until the mutex is lockable. 69 | /// @remark Thread context callable 70 | inline void lock() { (void)recursive_take(infinity); } 71 | 72 | /// @brief Attempts to lock the mutex if the current thread isn't locking it already. 73 | /// @return true if the mutex got locked, false if it's already locked by another thread 74 | /// @remark Thread context callable 75 | inline bool try_lock() { return recursive_take(tick_timer::duration(0)); } 76 | 77 | /// @brief Reduces the thread's lock count and unlocks the mutex when the lock count reaches 78 | /// zero. 79 | /// @remark Thread context callable 80 | void unlock(); 81 | 82 | /// @brief Tries to lock the mutex within the given time duration. 83 | /// @param rel_time: duration to wait for the mutex to become unlocked 84 | /// @return true if successful, false if the mutex is locked 85 | /// @remark Thread context callable 86 | template 87 | inline bool try_lock_for(const std::chrono::duration& rel_time) 88 | { 89 | return recursive_take(std::chrono::duration_cast(rel_time)); 90 | } 91 | 92 | /// @brief Tries to lock the mutex until the given deadline. 93 | /// @param abs_time: deadline to wait until the mutex becomes unlocked 94 | /// @return true if successful, false if the mutex is locked 95 | /// @remark Thread context callable 96 | template 97 | inline bool try_lock_until(const std::chrono::time_point& abs_time) 98 | { 99 | return try_lock_for(duration_until(abs_time)); 100 | } 101 | 102 | /// @brief Function to observe the mutex's current locking thread. 103 | /// @return The mutex's current holder thread, or nullptr if the mutex is unlocked 104 | /// @remark Thread and ISR context callable 105 | inline thread* get_locking_thread() const { return semaphore::get_mutex_holder(); } 106 | 107 | /// @brief Constructs a recursive mutex statically. 108 | recursive_mutex(); 109 | 110 | private: 111 | bool recursive_take(tick_timer::duration timeout); 112 | }; 113 | 114 | /// @brief A type alias since @ref recursive_mutex already implements std::recursive_timed_mutex 115 | /// API. 116 | using recursive_timed_mutex = recursive_mutex; 117 | 118 | #endif // (configUSE_RECURSIVE_MUTEXES == 1) 119 | 120 | #endif // (configUSE_MUTEXES == 1) 121 | } // namespace freertos 122 | 123 | #endif // __FREERTOS_MUTEX_HPP_ 124 | -------------------------------------------------------------------------------- /include/freertos/condition_variable.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_CONDITION_VARIABLE_HPP_ 3 | #define __FREERTOS_CONDITION_VARIABLE_HPP_ 4 | 5 | #include "freertos/mutex.hpp" 6 | 7 | namespace freertos 8 | { 9 | /// @brief This object allows thread(s) to wait on a condition 10 | /// until they are notified of its change. Matches the std::condition_variable_any API. 11 | class condition_variable_any 12 | { 13 | public: 14 | /// @brief Constructs a condition_variable statically. 15 | /// @remark Thread context callable 16 | condition_variable_any(); 17 | 18 | /// @brief The destructor may only be called once no threads are waiting. 19 | /// @remark Thread context callable 20 | ~condition_variable_any(); 21 | 22 | /// @brief Unblocks the highest priority waiting thread (if any thread is waiting). 23 | /// @remark Thread and ISR context callable 24 | void notify_one(); 25 | 26 | /// @brief Unblocks all waiting threads. 27 | /// @remark Thread and ISR context callable 28 | void notify_all(); 29 | 30 | /// @brief Atomically unlocks @ref lock, and blocks the thread until a notification is 31 | /// received. 32 | /// When unblocked, the @ref lock is reacquired again. 33 | /// @param lock: the mutex to unlock while waiting on the condition_variable 34 | /// @remark Thread context callable 35 | template 36 | void wait(Lock& lock) 37 | { 38 | (void)wait_for(lock, infinity); 39 | } 40 | 41 | /// @brief Atomically unlocks @ref lock, and blocks the thread 42 | /// until the predicate is true after a notification is received. 43 | /// When unblocked, the @ref lock is reacquired again. 44 | /// @param lock: the mutex to unlock while waiting on the condition_variable 45 | /// @param pred: the condition to wait on 46 | /// @remark Thread context callable 47 | template 48 | void wait(Lock& lock, Predicate pred) 49 | { 50 | while (!pred()) 51 | { 52 | wait(lock); 53 | } 54 | } 55 | 56 | /// @brief Atomically unlocks @ref lock, and blocks the thread 57 | /// until a notification is received or until times out. 58 | /// When unblocked, the @ref lock is reacquired again. 59 | /// @param lock: the mutex to unlock while waiting on the condition_variable 60 | /// @param rel_time: duration to wait for the notification 61 | /// @return timeout if timed out without any notification, or no_timeout otherwise 62 | /// @remark Thread context callable 63 | template 64 | cv_status wait_for(Lock& lock, const std::chrono::duration& rel_time) 65 | { 66 | pre_wait(); 67 | 68 | lock.unlock(); 69 | 70 | waiter_count_t rx_waiters; 71 | bool success = 72 | do_wait(std::chrono::duration_cast(rel_time), &rx_waiters); 73 | 74 | lock.lock(); 75 | 76 | return post_wait(success, rx_waiters); 77 | } 78 | 79 | /// @brief Atomically unlocks @ref lock, and blocks the thread 80 | /// until a notification is received or until times out. 81 | /// When unblocked, the @ref lock is reacquired again. 82 | /// @param lock: the mutex to unlock while waiting on the condition_variable 83 | /// @param abs_time: deadline to wait for the notification 84 | /// @return timeout if timed out without any notification, or no_timeout otherwise 85 | /// @remark Thread context callable 86 | template 87 | cv_status wait_until(Lock& lock, const std::chrono::time_point& abs_time) 88 | { 89 | return wait_for(lock, duration_until(abs_time)); 90 | } 91 | 92 | /// @brief Atomically unlocks @ref lock, and blocks the thread 93 | /// until the predicate is true after a notification is received, 94 | /// or until times out. When unblocked, the @ref lock is reacquired again. 95 | /// @param lock: the mutex to unlock while waiting on the condition_variable 96 | /// @param abs_time: deadline to wait for the notification 97 | /// @param pred: the condition to wait on 98 | /// @return false if the predicate still evaluates to false after the timeout expired, 99 | /// otherwise true. 100 | /// @remark Thread context callable 101 | template 102 | bool wait_until(Lock& lock, const std::chrono::time_point& abs_time, Pred pred) 103 | { 104 | while (!pred()) 105 | { 106 | if (wait_until(lock, abs_time) == cv_status::timeout) 107 | { 108 | return pred(); 109 | } 110 | } 111 | return true; 112 | } 113 | 114 | /// @brief Atomically unlocks @ref lock, and blocks the thread 115 | /// until the predicate is true after a notification is received, 116 | /// or until times out. When unblocked, the @ref lock is reacquired again. 117 | /// @param lock: the mutex to unlock while waiting on the condition_variable 118 | /// @param rel_time: duration to wait for the notification 119 | /// @param pred: the condition to wait on 120 | /// @return false if the predicate still evaluates to false after the timeout expired, 121 | /// otherwise true. 122 | /// @remark Thread context callable 123 | template 124 | bool wait_for(Lock& lock, const std::chrono::duration& rel_time, Pred pred) 125 | { 126 | return wait_until(lock, tick_timer::now() + rel_time, std::move(pred)); 127 | } 128 | 129 | private: 130 | using waiter_count_t = ::UBaseType_t; 131 | 132 | shallow_copy_queue queue_; 133 | waiter_count_t waiters_; 134 | 135 | // non-copyable 136 | condition_variable_any(const condition_variable_any&) = delete; 137 | condition_variable_any& operator=(const condition_variable_any&) = delete; 138 | 139 | void notify(waiter_count_t waiters); 140 | 141 | void pre_wait(); 142 | bool do_wait(const tick_timer::duration& rel_time, waiter_count_t* rx_waiters); 143 | cv_status post_wait(bool wait_success, waiter_count_t rx_waiters); 144 | }; 145 | } // namespace freertos 146 | 147 | #endif // __FREERTOS_CONDITION_VARIABLE_HPP_ 148 | -------------------------------------------------------------------------------- /include/freertos/queue.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_QUEUE_HPP_ 3 | #define __FREERTOS_QUEUE_HPP_ 4 | 5 | #include "freertos/tick_timer.hpp" 6 | 7 | struct QueueDefinition; 8 | 9 | namespace freertos 10 | { 11 | /// @brief An abstract base class for all queues and derivatives. 12 | class queue : protected ::StaticQueue_t 13 | { 14 | public: 15 | using size_type = ::UBaseType_t; 16 | 17 | /// @brief The current occupied size of the queue. 18 | /// @return The number of elements in the queue 19 | /// @remark Thread and ISR context callable 20 | size_type size() const; 21 | 22 | /// @brief The current free size of the queue. 23 | /// @return The number of available spaces in the queue 24 | /// @remark Thread context callable 25 | size_type available() const; 26 | 27 | /// @brief Determines if the queue is currently full. 28 | /// @return True if the queue is full, false otherwise 29 | /// @remark Thread and ISR context callable 30 | bool full() const; 31 | 32 | /// @brief Determines if the queue is currently empty. 33 | /// @return True if the queue is empty, false otherwise 34 | /// @remark Thread and ISR context callable 35 | bool empty() const; 36 | 37 | /// @brief Flushes the queue, resetting it to it's initial empty state. 38 | /// @remark Thread context callable 39 | void reset(); 40 | 41 | /// @brief Destroys the queue, including freeing its dynamically allocated memory. 42 | /// @remark Thread context callable 43 | ~queue(); 44 | 45 | protected: 46 | inline ::QueueDefinition* handle() const 47 | { 48 | return reinterpret_cast<::QueueDefinition*>(const_cast(this)); 49 | } 50 | 51 | bool push_front(void* data, tick_timer::duration waittime); 52 | bool push_back(void* data, tick_timer::duration waittime); 53 | void replace(void* data); 54 | bool peek_front(void* data, tick_timer::duration waittime) const; 55 | bool pop_front(void* data, tick_timer::duration waittime); 56 | 57 | // empty constructor is used by semaphores, since the underlying API create calls differ 58 | queue() {} 59 | 60 | // used to create shallow_copy_queue 61 | queue(size_type size, size_type elem_size, unsigned char* elem_buffer); 62 | 63 | private: 64 | // non-copyable 65 | queue(const queue&) = delete; 66 | queue& operator=(const queue&) = delete; 67 | // non-movable 68 | queue(const queue&&) = delete; 69 | queue& operator=(const queue&&) = delete; 70 | }; 71 | 72 | template 73 | class ishallow_copy_queue : public queue 74 | { 75 | public: 76 | using value_type = T; 77 | 78 | /// @brief Size of the elements stored in the queue. 79 | static constexpr size_type elem_size() { return sizeof(value_type); } 80 | 81 | /// @brief Pushes a new value to the front of the queue. 82 | /// @param value: the new value to copy 83 | /// @param waittime: duration to wait for the queue to have available space 84 | /// @return true if successful, false if the queue is full 85 | /// @remark Thread and ISR context callable (ISR only with no waittime) 86 | bool push_front(const value_type& value, 87 | tick_timer::duration waittime = tick_timer::duration(0)) 88 | { 89 | return queue::push_front(reinterpret_cast(const_cast(&value)), 90 | waittime); 91 | } 92 | 93 | /// @brief Pushes a new value to the back of the queue. 94 | /// @param value: the new value to copy 95 | /// @param waittime: duration to wait for the queue to have available space 96 | /// @return true if successful, false if the queue is full 97 | /// @remark Thread and ISR context callable (ISR only with no waittime) 98 | bool push_back(const value_type& value, tick_timer::duration waittime = tick_timer::duration(0)) 99 | { 100 | return queue::push_back(reinterpret_cast(const_cast(&value)), waittime); 101 | } 102 | 103 | /// @brief Replaces the current queue element value to a new one. 104 | /// This call is meant to be used by single length queues only. 105 | /// @param value: the new value to copy 106 | /// @remark Thread and ISR context callable 107 | void replace(const value_type& value) 108 | { 109 | queue::replace(reinterpret_cast(const_cast(&value))); 110 | } 111 | 112 | /// @brief Copies the front value of the queue without consuming it. 113 | /// @param value: the destination pointer to copy to 114 | /// @param waittime: duration to wait for the queue to have an available element 115 | /// @return true if successful, false if the queue is empty 116 | /// @remark Thread and ISR context callable (ISR only with no waittime) 117 | bool peek_front(value_type* value, 118 | tick_timer::duration waittime = tick_timer::duration(0)) const 119 | { 120 | return queue::peek_front(reinterpret_cast(value), waittime); 121 | } 122 | 123 | /// @brief Copies the front value of the queue and removes it from the queue. 124 | /// @param value: the destination pointer to copy to 125 | /// @param waittime: duration to wait for the queue to have an available element 126 | /// @return true if successful, false if the queue is empty 127 | /// @remark Thread and ISR context callable (ISR only with no waittime) 128 | bool pop_front(value_type* value, tick_timer::duration waittime = tick_timer::duration(0)) 129 | { 130 | return queue::pop_front(reinterpret_cast(value), waittime); 131 | } 132 | 133 | protected: 134 | ishallow_copy_queue(size_type size, size_type elem_size, unsigned char* elem_buffer) 135 | : queue(size, elem_size, elem_buffer) 136 | {} 137 | }; 138 | 139 | /// @brief A thread/ISR-safe queue that stores shallow copies (via memcpy) of pushed elements. 140 | template 141 | class shallow_copy_queue : public ishallow_copy_queue 142 | { 143 | public: 144 | using value_type = T; 145 | using size_type = queue::size_type; 146 | 147 | /// @brief Maximum size of the queue. 148 | static constexpr size_type max_size() { return MAX_SIZE; } 149 | 150 | /// @brief Constructs a shallow-copy queue statically. 151 | shallow_copy_queue() 152 | : ishallow_copy_queue(max_size(), sizeof(value_type), elem_buffer_) 153 | {} 154 | 155 | private: 156 | unsigned char elem_buffer_[max_size() * sizeof(value_type)]; 157 | }; 158 | } // namespace freertos 159 | 160 | #endif // __FREERTOS_QUEUE_HPP_ 161 | -------------------------------------------------------------------------------- /include/freertos/event_group.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_EVENT_GROUP_HPP_ 3 | #define __FREERTOS_EVENT_GROUP_HPP_ 4 | 5 | #include "freertos/tick_timer.hpp" 6 | 7 | struct EventGroupDef_t; 8 | 9 | namespace freertos 10 | { 11 | /// @brief Thin type wrapper for condition flag value type. 12 | class events 13 | { 14 | public: 15 | using value_type = ::TickType_t; 16 | 17 | constexpr explicit events() : value_(0) {} 18 | constexpr explicit events(value_type value) : value_(value) {} 19 | operator value_type&() { return value_; } 20 | constexpr operator value_type() const { return value_; } 21 | constexpr events operator&(events other) const { return events(value_ & other.value_); } 22 | 23 | static constexpr events max() 24 | { 25 | // the highest byte is reserved for OS flags 26 | return events((1 << ((sizeof(value_type) - 1) * 8)) - 1); 27 | } 28 | static constexpr events min() { return events(); } 29 | 30 | /// @brief The value returned by blocking function calls when the wait time expired 31 | /// without any relevant flags being set. 32 | static constexpr events timeout() { return events(); } 33 | 34 | private: 35 | value_type value_; 36 | }; 37 | 38 | /// @brief This class is a lightweight condition variable, allows threads to block 39 | /// until a combination of flags has been set. The key difference to @ref condition_variable 40 | /// is that here the waiting side chooses the wait strategy: 41 | /// 1. whether to wait for a combination of flags (all) or one of many (any) 42 | /// 2. whether to consume the flag when receiving it (default) or not (shared) 43 | class event_group : private ::StaticEventGroup_t 44 | { 45 | public: 46 | /// @brief Constructs a event_group statically. 47 | /// @remark Thread context callable 48 | event_group(); 49 | 50 | /// @brief Destroys the event_group, including freeing its dynamically allocated memory. 51 | /// @remark Thread context callable 52 | ~event_group(); 53 | 54 | /// @brief Reads the current flags status. 55 | /// @return The currently active flags 56 | /// @remark Thread and ISR context callable 57 | events get() const; 58 | 59 | /// @brief Sets the provided flags in the condition. 60 | /// @param flags: the flags to activate 61 | /// @remark Thread and ISR context callable 62 | void set(events flags); 63 | 64 | /// @brief Removes the provided flags from the condition. 65 | /// @param flags: the flags to deactivate 66 | /// @remark Thread and ISR context callable 67 | void clear(events flags); 68 | 69 | /// @brief Blocks the current thread until any of the provided flags is raised. 70 | /// When a flag unblocks the thread, it will be cleared. 71 | /// @param flags: selection of flags to wait on 72 | /// @param rel_time: duration to wait for the activation 73 | /// @return the raised flag(s) that caused the activation, or 0 if timed out 74 | /// @remark Thread context callable 75 | template 76 | events wait_any_for(events flags, const std::chrono::duration& rel_time) 77 | { 78 | return wait(flags, std::chrono::duration_cast(rel_time), true, false); 79 | } 80 | 81 | /// @brief Blocks the current thread until any of the provided flags is raised. 82 | /// When a flag unblocks the thread, it will be cleared. 83 | /// @param flags: selection of flags to wait on 84 | /// @param abs_time: deadline to wait for the activation 85 | /// @return the raised flag(s) that caused the activation, or 0 if timed out 86 | /// @remark Thread context callable 87 | template 88 | events wait_any_until(events flags, const std::chrono::time_point& abs_time) 89 | { 90 | return wait_any_for(flags, duration_until(abs_time)); 91 | } 92 | 93 | /// @brief Blocks the current thread until all of the provided flags are raised. 94 | /// When the thread is unblocked, the required flags will be cleared. 95 | /// @param flags: combination of flags to wait on 96 | /// @param rel_time: duration to wait for the activation 97 | /// @return the raised flag(s) that caused the activation, or 0 if timed out 98 | /// @remark Thread context callable 99 | template 100 | events wait_all_for(events flags, const std::chrono::duration& rel_time) 101 | { 102 | return wait(flags, std::chrono::duration_cast(rel_time), true, true); 103 | } 104 | 105 | /// @brief Blocks the current thread until all of the provided flags are raised. 106 | /// When the thread is unblocked, the required flags will be cleared. 107 | /// @param flags: combination of flags to wait on 108 | /// @param abs_time: deadline to wait for the activation 109 | /// @return the raised flag(s) that caused the activation, or 0 if timed out 110 | /// @remark Thread context callable 111 | template 112 | events wait_all_until(events flags, const std::chrono::time_point& abs_time) 113 | { 114 | return wait_all_for(flags, duration_until(abs_time)); 115 | } 116 | 117 | /// @brief Blocks the current thread until any of the provided flags is raised. 118 | /// Doesn't modify the flags upon activation. 119 | /// @param flags: selection of flags to wait on 120 | /// @param rel_time: duration to wait for the activation 121 | /// @return the raised flag(s) that caused the activation, or 0 if timed out 122 | /// @remark Thread context callable 123 | template 124 | events shared_wait_any_for(events flags, const std::chrono::duration& rel_time) 125 | { 126 | return wait(flags, std::chrono::duration_cast(rel_time), false, 127 | false); 128 | } 129 | 130 | /// @brief Blocks the current thread until any of the provided flags is raised. 131 | /// Doesn't modify the flags upon activation. 132 | /// @param flags: selection of flags to wait on 133 | /// @param abs_time: deadline to wait for the activation 134 | /// @return the raised flag(s) that caused the activation, or 0 if timed out 135 | /// @remark Thread context callable 136 | template 137 | events shared_wait_any_until(events flags, 138 | const std::chrono::time_point& abs_time) 139 | { 140 | return shared_wait_any_for(flags, duration_until(abs_time)); 141 | } 142 | 143 | /// @brief Blocks the current thread until all of the provided flags are raised. 144 | /// Doesn't modify the flags upon activation. 145 | /// @param flags: combination of flags to wait on 146 | /// @param rel_time: duration to wait for the activation 147 | /// @return the raised flag(s) that caused the activation, or 0 if timed out 148 | /// @remark Thread context callable 149 | template 150 | events shared_wait_all_for(events flags, const std::chrono::duration& rel_time) 151 | { 152 | return wait(flags, std::chrono::duration_cast(rel_time), false, true); 153 | } 154 | 155 | /// @brief Blocks the current thread until all of the provided flags are raised. 156 | /// Doesn't modify the flags upon activation. 157 | /// @param flags: combination of flags to wait on 158 | /// @param abs_time: deadline to wait for the activation 159 | /// @return the raised flag(s) that caused the activation, or 0 if timed out 160 | /// @remark Thread context callable 161 | template 162 | events shared_wait_all_until(events flags, 163 | const std::chrono::time_point& abs_time) 164 | { 165 | return shared_wait_all_for(flags, duration_until(abs_time)); 166 | } 167 | 168 | protected: 169 | inline ::EventGroupDef_t* handle() const 170 | { 171 | return reinterpret_cast<::EventGroupDef_t*>(const_cast(this)); 172 | } 173 | 174 | events wait(events flags, const tick_timer::duration& rel_time, bool exclusive, bool match_all); 175 | }; 176 | } // namespace freertos 177 | 178 | #endif // __FREERTOS_EVENT_GROUP_HPP_ 179 | -------------------------------------------------------------------------------- /src/thread.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #include "freertos/thread.hpp" 3 | #include "freertos/cpu.hpp" 4 | #include "freertos/event_group.hpp" 5 | #include "freertos/scheduler.hpp" 6 | #if ESP_PLATFORM 7 | #include 8 | #else 9 | #include 10 | #endif 11 | 12 | #if configTASK_RETURN_ADDRESS != vTaskExitHandler 13 | #error "Ensure that FreeRTOSConfig.h contains: #define configTASK_RETURN_ADDRESS vTaskExitHandler" 14 | #endif 15 | 16 | /// @brief The thread's execution enters this function when the thread function returns. 17 | extern "C" void vTaskExitHandler(void) 18 | { 19 | freertos::thread* t = freertos::thread::get_current(); 20 | // call destructor 21 | t->~thread(); 22 | } 23 | 24 | namespace freertos 25 | { 26 | thread::~thread() 27 | { 28 | signal_exit(); 29 | 30 | vTaskDelete(handle()); 31 | } 32 | 33 | void thread::signal_exit() 34 | { 35 | #ifdef configTHREAD_EXIT_CONDITION_INDEX 36 | 37 | cpu::critical_section cs; 38 | const lock_guard lock(cs); 39 | 40 | auto cond = get_exit_condition(); 41 | // at task creation, this pointer is set to NULL 42 | if (cond != nullptr) 43 | { 44 | // if there is, signal it 45 | cond->set(cflag::max()); 46 | } 47 | 48 | #endif // configTHREAD_EXIT_CONDITION_INDEX 49 | } 50 | 51 | #ifdef configTHREAD_EXIT_CONDITION_INDEX 52 | 53 | #if (configNUM_THREAD_LOCAL_STORAGE_POINTERS <= configTHREAD_EXIT_CONDITION_INDEX) 54 | #error "Thread exit condition storage must be allowed." 55 | #endif 56 | 57 | condition_flags* thread::get_exit_condition() const 58 | { 59 | return reinterpret_cast( 60 | pvTaskGetThreadLocalStoragePointer(handle(), configTHREAD_EXIT_CONDITION_INDEX)); 61 | } 62 | 63 | void thread::set_exit_condition(condition_flags* cond) 64 | { 65 | configASSERT(nullptr == get_exit_condition()); // cannot have multiple threads joining 66 | vTaskSetThreadLocalStoragePointer(handle(), configTHREAD_EXIT_CONDITION_INDEX, 67 | reinterpret_cast(cond)); 68 | } 69 | 70 | bool thread::joinable() const 71 | { 72 | return (get_state() != state::terminated) && (nullptr == get_exit_condition()); 73 | } 74 | 75 | void thread::join() 76 | { 77 | configASSERT(joinable()); // else invalid_argument 78 | configASSERT(this->get_id() != 79 | freertos::this_thread::get_id()); // else resource_deadlock_would_occur 80 | 81 | condition_flags exit_cond; 82 | set_exit_condition(&exit_cond); 83 | 84 | // wait for signal from thread exit 85 | exit_cond.shared_wait_any_for(cflag::max(), infinity); 86 | 87 | // signal received, thread is deleted, return 88 | } 89 | 90 | #endif // configTHREAD_EXIT_CONDITION_INDEX 91 | 92 | void thread::suspend() 93 | { 94 | configASSERT(!this_cpu::is_in_isr()); 95 | { 96 | vTaskSuspend(handle()); 97 | } 98 | } 99 | 100 | void thread::resume() 101 | { 102 | if (!this_cpu::is_in_isr()) 103 | { 104 | vTaskResume(handle()); 105 | } 106 | else 107 | { 108 | BaseType_t needs_yield = xTaskResumeFromISR(handle()); 109 | portYIELD_FROM_ISR(needs_yield); 110 | } 111 | } 112 | 113 | thread::priority thread::get_priority() const 114 | { 115 | if (!this_cpu::is_in_isr()) 116 | { 117 | return uxTaskPriorityGet(handle()); 118 | } 119 | else 120 | { 121 | return uxTaskPriorityGetFromISR(handle()); 122 | } 123 | } 124 | 125 | void thread::set_priority(priority prio) 126 | { 127 | configASSERT(!this_cpu::is_in_isr()); 128 | { 129 | vTaskPrioritySet(handle(), prio); 130 | } 131 | } 132 | 133 | thread::id thread::get_id() const 134 | { 135 | #if (configUSE_TRACE_FACILITY == 1) 136 | return uxTaskGetTaskNumber(handle()); 137 | #else 138 | return id(this); 139 | #endif // (configUSE_TRACE_FACILITY == 1) 140 | } 141 | 142 | const char* thread::get_name() 143 | { 144 | return const_cast(pcTaskGetName(handle())); 145 | } 146 | 147 | thread::state thread::get_state() const 148 | { 149 | configASSERT(!this_cpu::is_in_isr()); 150 | { 151 | state s; 152 | switch (eTaskGetState(handle())) 153 | { 154 | case eTaskState::eRunning: 155 | s = state::running; 156 | break; 157 | case eTaskState::eReady: 158 | s = state::ready; 159 | break; 160 | case eTaskState::eBlocked: 161 | case eTaskState::eSuspended: 162 | s = state::suspended; 163 | break; 164 | case eTaskState::eDeleted: 165 | case eTaskState::eInvalid: 166 | default: 167 | s = state::terminated; 168 | break; 169 | } 170 | return s; 171 | } 172 | } 173 | 174 | thread* thread::get_current() 175 | { 176 | configASSERT(!this_cpu::is_in_isr()); 177 | configASSERT(scheduler::get_state() != scheduler::state::uninitialized); 178 | 179 | return reinterpret_cast(xTaskGetCurrentTaskHandle()); 180 | } 181 | 182 | #if (configSUPPORT_DYNAMIC_ALLOCATION == 1) 183 | 184 | thread* make_thread(thread::function func, void* param, size_t stacksize, thread::priority prio, 185 | const char* name) 186 | { 187 | thread* t = nullptr; 188 | bool res = xTaskCreate(func, name, stacksize / sizeof(StackType_t), param, prio, 189 | reinterpret_cast(&t)); 190 | configASSERT(res); 191 | return t; 192 | } 193 | 194 | #endif // (configSUPPORT_DYNAMIC_ALLOCATION == 1) 195 | 196 | thread::thread(StackType_t* pstack, std::uint32_t stack_size, function func, void* param, 197 | priority prio, const char* name) 198 | { 199 | (void)xTaskCreateStatic(func, name, stack_size, param, prio, pstack, this); 200 | } 201 | 202 | #if (configUSE_TASK_NOTIFICATIONS == 1) 203 | 204 | bool thread::notifier::notify(unsigned action, notify_value value) 205 | { 206 | if (!this_cpu::is_in_isr()) 207 | { 208 | return 209 | #if (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 210 | xTaskNotifyAndQueryIndexed(handle(), index_ 211 | #else 212 | xTaskNotifyAndQuery(handle() 213 | #endif // (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 214 | , 215 | value, static_cast(action), &last_value_); 216 | } 217 | else 218 | { 219 | BaseType_t needs_yield = false; 220 | bool success = 221 | #if (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 222 | xTaskNotifyAndQueryIndexedFromISR(handle(), index_ 223 | #else 224 | xTaskNotifyAndQueryFromISR(handle() 225 | #endif // (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 226 | , 227 | value, static_cast(action), 228 | &last_value_, &needs_yield); 229 | portYIELD_FROM_ISR(needs_yield); 230 | return success; 231 | } 232 | } 233 | 234 | void thread::notifier::signal() 235 | { 236 | notify(eNoAction, 0); 237 | } 238 | 239 | bool thread::notifier::cancel_signal() 240 | { 241 | configASSERT(!this_cpu::is_in_isr()); 242 | return 243 | #if (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 244 | xTaskNotifyStateClearIndexed(handle(), index_ 245 | #else 246 | xTaskNotifyStateClear(handle() 247 | #endif // (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 248 | ); 249 | } 250 | 251 | void thread::notifier::increment() 252 | { 253 | notify(eIncrement, 0); 254 | } 255 | 256 | void thread::notifier::set_flags(notify_value flags) 257 | { 258 | notify(eSetBits, flags); 259 | } 260 | 261 | notify_value thread::notifier::clear(notify_value flags) 262 | { 263 | configASSERT(!this_cpu::is_in_isr()); 264 | return 265 | #if (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 266 | ulTaskNotifyValueClearIndexed(handle(), index_ 267 | #else 268 | ulTaskNotifyValueClear(handle() 269 | #endif // (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 270 | , 271 | flags); 272 | } 273 | 274 | bool thread::notifier::try_set_value(notify_value new_value) 275 | { 276 | return notify(eSetValueWithoutOverwrite, new_value); 277 | } 278 | 279 | void thread::notifier::set_value(notify_value new_value) 280 | { 281 | notify(eSetValueWithOverwrite, new_value); 282 | } 283 | 284 | bool this_thread::wait_notification_for(const tick_timer::duration& rel_time, 285 | thread::notify_value* value, 286 | thread::notify_value clear_flags_before, 287 | thread::notify_value clear_flags_after) 288 | { 289 | configASSERT(!this_cpu::is_in_isr()); 290 | return xTaskNotifyWait(clear_flags_before, clear_flags_after, value, to_ticks(rel_time)); 291 | } 292 | 293 | thread::notify_value this_thread::try_acquire_notification_for(const tick_timer::duration& rel_time, 294 | bool acquire_single) 295 | { 296 | configASSERT(!this_cpu::is_in_isr()); 297 | return ulTaskNotifyTake(!acquire_single, to_ticks(rel_time)); 298 | } 299 | 300 | #if (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 301 | 302 | bool this_thread::wait_notification_for(thread::notifier::index_type index, 303 | const tick_timer::duration& rel_time, 304 | thread::notify_value* value, 305 | thread::notify_value clear_flags_before, 306 | thread::notify_value clear_flags_after) 307 | { 308 | configASSERT(!this_cpu::is_in_isr()); 309 | return xTaskNotifyWaitIndexed(index, clear_flags_before, clear_flags_after, value, 310 | to_ticks(rel_time)); 311 | } 312 | 313 | thread::notify_value this_thread::try_acquire_notification_for(thread::notifier::index_type index, 314 | const tick_timer::duration& rel_time, 315 | bool acquire_single) 316 | { 317 | configASSERT(!this_cpu::is_in_isr()); 318 | return ulTaskNotifyTakeIndexed(index, !acquire_single, to_ticks(rel_time)); 319 | } 320 | 321 | #endif // (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 322 | 323 | #endif // (configUSE_TASK_NOTIFICATIONS == 1) 324 | 325 | void this_thread::yield() 326 | { 327 | configASSERT(!this_cpu::is_in_isr()); 328 | configASSERT(scheduler::get_state() != scheduler::state::uninitialized); 329 | 330 | taskYIELD(); 331 | } 332 | 333 | thread::id this_thread::get_id() 334 | { 335 | return thread::get_current()->get_id(); 336 | } 337 | 338 | void this_thread::sleep_for(tick_timer::duration rel_time) 339 | { 340 | configASSERT(!this_cpu::is_in_isr()); 341 | configASSERT(scheduler::get_state() == scheduler::state::running); 342 | 343 | vTaskDelay(to_ticks(rel_time)); 344 | } 345 | 346 | void this_thread::terminate() 347 | { 348 | thread::get_current()->~thread(); 349 | } 350 | 351 | } // namespace freertos 352 | -------------------------------------------------------------------------------- /include/freertos/thread.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | #ifndef __FREERTOS_THREAD_HPP_ 3 | #define __FREERTOS_THREAD_HPP_ 4 | 5 | #include "freertos/stdlib.hpp" 6 | #include "freertos/tick_timer.hpp" 7 | 8 | // opaque thread definition 9 | struct tskTaskControlBlock; 10 | 11 | namespace freertos 12 | { 13 | namespace detail 14 | { 15 | constexpr ::UBaseType_t TOP_PRIORITY = configMAX_PRIORITIES - 1; 16 | constexpr uint32_t MIN_STACK_SIZE = configMINIMAL_STACK_SIZE; 17 | } // namespace detail 18 | 19 | class event_group; 20 | 21 | using notify_value = std::uint32_t; 22 | 23 | /// @brief A class representing a thread of execution (equals to a FreeRTOS task). 24 | /// 25 | /// @attention The default FreeRTOS thread termination behavior is modified to allow 26 | /// threads to terminate by returning. The destructor will signal the exit semaphore, 27 | /// then delete the thread. 28 | /// 29 | /// It is the responsibility of the implementer do decide 30 | /// how the lifetime of the thread ends: 31 | /// 32 | /// a) through destruction of the thread object (externally controlled termination) 33 | /// b) by allowing the thread function to return (internally controlled termination) 34 | /// 35 | /// If externally controlled termination is desired, then it must be ensured 36 | /// that the thread function never returns. For this use-case a @ref static_thread 37 | /// is recommended. 38 | /// 39 | /// If internally controlled termination is desired, then the thread must be 40 | /// dynamically allocated, so the working memory is freed as a result of the 41 | /// thread's termination. In this case no smart pointer mechanism should be 42 | /// used to manage the allocated thread pointer. (This is doubly important since 43 | /// the thread's memory is allocated in two chunks.) 44 | /// 45 | class thread : private ::StaticTask_t 46 | { 47 | public: 48 | static constexpr std::size_t NAME_MAX_SIZE = 49 | configMAX_TASK_NAME_LEN - 1; // exclude the terminating '\0' 50 | 51 | using function = ::TaskFunction_t; 52 | 53 | #if (configUSE_TRACE_FACILITY == 1) 54 | 55 | using id = ::UBaseType_t; 56 | 57 | #else 58 | 59 | using id = std::uintptr_t; 60 | 61 | #endif // (configUSE_TRACE_FACILITY == 1) 62 | 63 | /// @brief Number of concurrent threads supported. 64 | /// @return Fixed to 0 since FreeRTOS doesn't have such limitation by design, 65 | /// only resource constraints apply 66 | static constexpr unsigned int hardware_concurrency() { return 0; } 67 | 68 | /// @brief Signals to the thread's observer that it's being terminated, 69 | /// and destroys the thread, stopping its execution and freeing 70 | /// its dynamically allocated memory. 71 | /// @remark Thread context callable 72 | ~thread(); 73 | 74 | #ifdef configTHREAD_EXIT_CONDITION_INDEX 75 | 76 | private: 77 | condition_flags* get_exit_condition() const; 78 | void set_exit_condition(condition_flags* cond); 79 | 80 | public: 81 | /// @brief Waits for the thread to finish execution. 82 | /// @note May only be called when the thread is joinable, and not from the owned thread's 83 | /// context 84 | void join(); 85 | 86 | /// @brief Checks if the thread is joinable (potentially executing). 87 | /// @return true if the thread is valid and hasn't been joined, false otherwise 88 | /// @remark Thread and ISR context callable 89 | bool joinable() const; 90 | 91 | #endif // configTHREAD_EXIT_CONDITION_INDEX 92 | 93 | // detach is not supported. 94 | // if the thread is create()-d with dynamic allocation then the thread is already detached 95 | // if the thread is statically allocated, detach is not possible 96 | 97 | /// @brief Suspends the execution of the thread, until @ref resume is called. 98 | /// @remark Thread context callable 99 | void suspend(); 100 | 101 | /// @brief Resumes the execution of the suspended thread. 102 | /// @remark Thread and ISR context callable 103 | void resume(); 104 | 105 | /// @brief Provides a unique identifier of the thread. 106 | /// @return The thread's unique identifier (0 is reserved as invalid) 107 | /// @remark Thread and ISR context callable 108 | id get_id() const; 109 | 110 | /// @brief Reads the thread's friendly name (the string's size is limited to @ref 111 | /// NAME_MAX_SIZE). 112 | /// @return Pointer to the thread's name 113 | /// @remark Thread and ISR context callable 114 | const char* get_name(); 115 | 116 | /// @brief Possible operating states of a thread. 117 | enum class state 118 | { 119 | running = 0, 120 | ready, 121 | suspended, 122 | terminated, 123 | }; 124 | 125 | /// @brief Reads the current state of the thread. 126 | /// @return The current state of the thread 127 | /// @remark Thread context callable 128 | state get_state() const; 129 | 130 | /// @brief Returns the currently executing thread. 131 | /// @return Pointer to the currently executing thread 132 | /// @remark Thread context callable (obviously) 133 | static thread* get_current(); 134 | 135 | /// @brief Thin type wrapper for thread priority. 136 | class priority 137 | { 138 | public: 139 | using value_type = ::UBaseType_t; 140 | 141 | /// @brief Default priority is 1, to preempt the built-in IDLE thread of the RTOS. 142 | constexpr priority() : value_(1) {} 143 | constexpr priority(value_type value) : value_(value) {} 144 | constexpr operator value_type&() { return value_; } 145 | constexpr operator value_type() const { return value_; } 146 | 147 | static constexpr priority max() { return detail::TOP_PRIORITY; } 148 | static constexpr priority min() { return 0; } 149 | 150 | private: 151 | value_type value_; 152 | }; 153 | 154 | /// @brief Returns the thread's current priority level. 155 | /// @return The priority of the thread 156 | /// @remark Thread and ISR context callable 157 | priority get_priority() const; 158 | 159 | /// @return Pointer to the currently executing thread 160 | /// @remark Thread context callable 161 | void set_priority(priority prio); 162 | 163 | // use make_thread() instead 164 | void* operator new(size_t size) = delete; 165 | void* operator new[](size_t size) = delete; 166 | 167 | /// @brief Empty delete operator, since the destructor does the memory freeing 168 | /// if the object was dynamically allocated 169 | void operator delete(void* p) {} 170 | void operator delete[](void* p) {} 171 | 172 | #if (configUSE_TASK_NOTIFICATIONS == 1) 173 | 174 | using notify_value = std::uint32_t; 175 | 176 | /// @brief A lightweight synchronization and inter-process communication mechanism 177 | /// for a single thread. 178 | class notifier 179 | { 180 | public: 181 | constexpr notifier(thread& t = *thread::get_current()) : thread_(&t) {} 182 | 183 | using index_type = ::UBaseType_t; 184 | 185 | #if (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 186 | 187 | constexpr notifier(thread& t, index_type index) : thread_(&t), index_(index) {} 188 | 189 | #endif // (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 190 | 191 | inline notify_value get_last_value() const { return last_value_; } 192 | 193 | inline notify_value get_value() { return clear(0); } 194 | 195 | void signal(); 196 | bool cancel_signal(); 197 | 198 | void increment(); 199 | 200 | void set_flags(notify_value flags); 201 | template 202 | void set_flags(T flags) 203 | { 204 | const auto method = static_cast(¬ifier::set_flags); 205 | (this->*method)(static_cast(flags)); 206 | } 207 | 208 | void clear_flags(notify_value flags) { last_value_ = clear(flags); } 209 | template 210 | void clear_flags(T flags) 211 | { 212 | const auto method = 213 | static_cast(¬ifier::clear_flags); 214 | (this->*method)(static_cast(flags)); 215 | } 216 | 217 | bool try_set_value(notify_value new_value); 218 | 219 | void set_value(notify_value new_value); 220 | 221 | void reset_value() { last_value_ = clear(~0); } 222 | 223 | thread* get_thread() const { return thread_; } 224 | 225 | private: 226 | thread* const thread_; 227 | notify_value last_value_ = 0; 228 | 229 | #if (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 230 | 231 | const index_type index_ = 0; 232 | 233 | #endif // (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 234 | 235 | inline ::tskTaskControlBlock* handle() const 236 | { 237 | return const_cast(thread_)->handle(); 238 | } 239 | 240 | bool notify(unsigned action, notify_value value); 241 | notify_value clear(notify_value flags); 242 | }; 243 | 244 | #endif // (configUSE_TASK_NOTIFICATIONS == 1) 245 | 246 | static constexpr const char* DEFAULT_NAME = "anonym"; 247 | static constexpr size_t DEFAULT_STACK_SIZE = detail::MIN_STACK_SIZE * sizeof(::StackType_t); 248 | 249 | protected: 250 | thread(::StackType_t* pstack, std::uint32_t stack_size, function func, void* param, 251 | priority prio, const char* name); 252 | 253 | ::tskTaskControlBlock* handle() const 254 | { 255 | return reinterpret_cast<::tskTaskControlBlock*>(const_cast(this)); 256 | } 257 | 258 | private: 259 | // non-copyable 260 | thread(const thread&) = delete; 261 | thread& operator=(const thread&) = delete; 262 | // non-movable 263 | thread(const thread&&) = delete; 264 | thread& operator=(const thread&&) = delete; 265 | 266 | void signal_exit(); 267 | }; 268 | 269 | #if (configSUPPORT_DYNAMIC_ALLOCATION == 1) 270 | 271 | /// @brief Creates a new thread by allocating memory on the heap, and initializing it. 272 | /// The thread becomes ready to execute within this call, meaning that it might 273 | /// have started running by the time this call returns. 274 | /// @param func: the function to execute in the thread context 275 | /// @param param: opaque parameter to pass to the thread function 276 | /// @param stacksize: size of the thread's stack in bytes 277 | /// @param prio: thread priority level 278 | /// @param name: short label for identifying the thread 279 | /// @return pointer to the newly created thread, or nullptr if allocation failed 280 | thread* make_thread(thread::function func, void* param, 281 | size_t stacksize = thread::DEFAULT_STACK_SIZE, 282 | thread::priority prio = thread::priority(), 283 | const char* name = thread::DEFAULT_NAME); 284 | 285 | template 286 | static typename std::enable_if<(sizeof(T) == sizeof(std::uintptr_t)), thread*>::type 287 | make_thread(void (*func)(T), T arg, size_t stacksize = thread::DEFAULT_STACK_SIZE, 288 | thread::priority prio = thread::priority(), const char* name = thread::DEFAULT_NAME) 289 | { 290 | return make_thread(reinterpret_cast(func), bit_cast(arg), stacksize, 291 | prio, name); 292 | } 293 | 294 | template 295 | thread* make_thread(void (*func)(T*), T* arg, size_t stacksize = thread::DEFAULT_STACK_SIZE, 296 | thread::priority prio = thread::priority(), 297 | const char* name = thread::DEFAULT_NAME) 298 | { 299 | return make_thread(reinterpret_cast(func), static_cast(arg), stacksize, 300 | prio, name); 301 | } 302 | 303 | template 304 | thread* make_thread(void (*func)(T*), T& arg, size_t stacksize = thread::DEFAULT_STACK_SIZE, 305 | thread::priority prio = thread::priority(), 306 | const char* name = thread::DEFAULT_NAME) 307 | { 308 | return make_thread(reinterpret_cast(func), static_cast(&arg), 309 | stacksize, prio, name); 310 | } 311 | 312 | template 313 | thread* make_thread(T& obj, void (T::*member_func)(), size_t stacksize = thread::DEFAULT_STACK_SIZE, 314 | thread::priority prio = thread::priority(), 315 | const char* name = thread::DEFAULT_NAME) 316 | { 317 | return make_thread(reinterpret_cast(member_func), static_cast(&obj), 318 | stacksize, prio, name); 319 | } 320 | 321 | #endif // (configSUPPORT_DYNAMIC_ALLOCATION == 1) 322 | 323 | /// @brief A thread with statically allocated stack. 324 | template 325 | class static_thread : public thread 326 | { 327 | public: 328 | static constexpr std::size_t STACK_SIZE = STACK_SIZE_BYTES; 329 | 330 | /// @brief Constructs a static thread. The thread becomes ready to execute 331 | /// within this call, meaning that it might have started running 332 | /// by the time this call returns. 333 | /// @param func: the function to execute in the thread context 334 | /// @param param: opaque parameter to pass to the thread function 335 | /// @param prio: thread priority level 336 | /// @param name: short label for identifying the thread 337 | static_thread(function func, void* param, priority prio = priority(), 338 | const char* name = DEFAULT_NAME) 339 | : thread(stack_, sizeof(stack_) / sizeof(stack_[0]), func, param, prio, name) 340 | {} 341 | 342 | template 343 | static_thread( 344 | typename std::enable_if<(sizeof(T) == sizeof(std::uintptr_t)), void (*)(T)>::type func, 345 | T arg, priority prio = priority(), const char* name = DEFAULT_NAME) 346 | : static_thread(reinterpret_cast(func), bit_cast(arg), prio, name) 347 | {} 348 | 349 | template 350 | static_thread(void (*func)(T*), T* arg, priority prio = priority(), 351 | const char* name = DEFAULT_NAME) 352 | : static_thread(reinterpret_cast(func), static_cast(arg), prio, name) 353 | {} 354 | 355 | template 356 | static_thread(void (*func)(T*), T& arg, priority prio = priority(), 357 | const char* name = DEFAULT_NAME) 358 | : static_thread(reinterpret_cast(func), static_cast(arg), prio, name) 359 | {} 360 | 361 | template 362 | static_thread(T& obj, void (T::*member_func)(), priority prio = priority(), 363 | const char* name = DEFAULT_NAME) 364 | : static_thread(reinterpret_cast(member_func), static_cast(&obj), prio, 365 | name) 366 | {} 367 | 368 | private: 369 | ::StackType_t stack_[STACK_SIZE_BYTES / sizeof(::StackType_t)]; 370 | }; 371 | 372 | /// @brief Namespace offering control on the current thread of execution. 373 | namespace this_thread 374 | { 375 | /// @brief Yields execution of the current thread so the OS can schedule 376 | /// other thread(s) for the remainder of the time slice. 377 | void yield(); 378 | 379 | /// @brief Provides a unique identifier of the current thread. 380 | /// @return The current thread's unique identifier 381 | thread::id get_id(); 382 | 383 | void sleep_for(tick_timer::duration rel_time); 384 | 385 | /// @brief Blocks the current thread's execution for a given duration. 386 | /// @param rel_time: duration to block the current thread 387 | template 388 | inline void sleep_for(const std::chrono::duration& rel_time) 389 | { 390 | // workaround to prevent this function calling itself 391 | const auto ticks_sleep_for = static_cast(&sleep_for); 392 | ticks_sleep_for(std::chrono::duration_cast(rel_time)); 393 | } 394 | 395 | /// @brief Blocks the current thread's execution until the given deadline. 396 | /// @param abs_time: deadline to block the current thread 397 | template 398 | inline void sleep_until(const std::chrono::time_point& abs_time) 399 | { 400 | sleep_for(duration_until(abs_time)); 401 | } 402 | 403 | /// @brief Terminates the current thread, freeing its resources. 404 | [[noreturn]] 405 | void terminate(); 406 | 407 | #if (configUSE_TASK_NOTIFICATIONS == 1) 408 | 409 | /// @brief Wait for a notifier to signal the current thread. 410 | /// @param rel_time: maximum duration to wait for the notification 411 | /// @param value: it is set to the value of the notification as it is received 412 | /// @param clear_flags_before: these flags are cleared from the notification value 413 | /// before the waiting begins 414 | /// @param clear_flags_after: these flags are cleared from the notification value 415 | /// after the signal was received (only if it was received) 416 | /// @return true if a notification was received, false if timed out 417 | bool wait_notification_for(const tick_timer::duration& rel_time, thread::notify_value* value, 418 | thread::notify_value clear_flags_before = 0, 419 | thread::notify_value clear_flags_after = 0); 420 | 421 | template 422 | inline auto wait_notification_for(const std::chrono::duration& rel_time, 423 | thread::notify_value* value, 424 | thread::notify_value clear_flags_before = 0, 425 | thread::notify_value clear_flags_after = 0) 426 | { 427 | // workaround to prevent this function calling itself 428 | const auto ticks_wait_notification_for = 429 | static_cast(&wait_notification_for); 431 | return ticks_wait_notification_for(std::chrono::duration_cast(rel_time), 432 | value, clear_flags_before, clear_flags_after); 433 | } 434 | 435 | template 436 | inline auto wait_notification_until(const std::chrono::time_point& abs_time, 437 | thread::notify_value* value, 438 | thread::notify_value clear_flags_before = 0, 439 | thread::notify_value clear_flags_after = 0) 440 | { 441 | return wait_notification_for(duration_until(abs_time), value, clear_flags_before, 442 | clear_flags_after); 443 | } 444 | 445 | inline void wait_notification(thread::notify_value* value, 446 | thread::notify_value clear_flags_before = 0, 447 | thread::notify_value clear_flags_after = 0) 448 | { 449 | wait_notification_for(infinity, value, clear_flags_before, clear_flags_after); 450 | } 451 | 452 | /// @brief Wait for a notifier to signal the current thread. 453 | /// @param rel_time: maximum duration to wait for the notification 454 | /// @return true if a notification was received, false if timed out 455 | inline bool wait_signal_for(const tick_timer::duration& rel_time) 456 | { 457 | return wait_notification_for(rel_time, nullptr); 458 | } 459 | 460 | template 461 | inline auto wait_signal_for(const std::chrono::duration& rel_time) 462 | { 463 | // workaround to prevent this function calling itself 464 | const auto ticks_wait_signal_for = 465 | static_cast(&wait_signal_for); 466 | return ticks_wait_signal_for(std::chrono::duration_cast(rel_time)); 467 | } 468 | 469 | template 470 | inline auto wait_signal_until(const std::chrono::time_point& abs_time) 471 | { 472 | return wait_signal_for(duration_until(abs_time)); 473 | } 474 | 475 | inline void wait_signal() 476 | { 477 | wait_signal_for(infinity); 478 | } 479 | 480 | notify_value try_acquire_notification_for(const tick_timer::duration& rel_time, 481 | bool acquire_single = false); 482 | 483 | template 484 | inline auto try_acquire_notification_for(const std::chrono::duration& rel_time, 485 | bool acquire_single = false) 486 | { 487 | // workaround to prevent this function calling itself 488 | const auto ticks_try_acquire_notification_for = 489 | static_cast( 490 | &try_acquire_notification_for); 491 | return ticks_try_acquire_notification_for( 492 | std::chrono::duration_cast(rel_time), acquire_single); 493 | } 494 | 495 | template 496 | inline auto try_acquire_notification_until(const std::chrono::time_point& abs_time, 497 | bool acquire_single = false) 498 | { 499 | return try_acquire_notification_for(duration_until(abs_time), acquire_single); 500 | } 501 | 502 | inline notify_value acquire_notification(bool acquire_single = false) 503 | { 504 | return try_acquire_notification_for(infinity, acquire_single); 505 | } 506 | 507 | #if (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 508 | 509 | /// @brief Wait for a notifier to signal the current thread. 510 | /// @param index: notification selector index 511 | /// @param rel_time: maximum duration to wait for the notification 512 | /// @param value: it is set to the value of the notification as it is received 513 | /// @param clear_flags_before: these flags are cleared from the notification value 514 | /// before the waiting begins 515 | /// @param clear_flags_after: these flags are cleared from the notification value 516 | /// after the signal was received (only if it was received) 517 | /// @return true if a notification was received, false if timed out 518 | bool wait_notification_for(thread::notifier::index_type index, const tick_timer::duration& rel_time, 519 | thread::notify_value* value, thread::notify_value clear_flags_before = 0, 520 | thread::notify_value clear_flags_after = 0); 521 | 522 | template 523 | inline auto wait_notification_for(thread::notifier::index_type index, 524 | const std::chrono::duration& rel_time, 525 | thread::notify_value* value, 526 | thread::notify_value clear_flags_before = 0, 527 | thread::notify_value clear_flags_after = 0) 528 | { 529 | // workaround to prevent this function calling itself 530 | const auto ticks_wait_notification_for = 531 | static_cast( 533 | &wait_notification_for); 534 | return ticks_wait_notification_for(index, 535 | std::chrono::duration_cast(rel_time), 536 | value, clear_flags_before, clear_flags_after); 537 | } 538 | 539 | template 540 | inline auto wait_notification_until(thread::notifier::index_type index, 541 | const std::chrono::time_point& abs_time, 542 | thread::notify_value* value, 543 | thread::notify_value clear_flags_before = 0, 544 | thread::notify_value clear_flags_after = 0) 545 | { 546 | return wait_notification_for(index, duration_until(abs_time), value, clear_flags_before, 547 | clear_flags_after); 548 | } 549 | 550 | inline void wait_notification(thread::notifier::index_type index, 551 | const tick_timer::duration& rel_time, thread::notify_value* value, 552 | thread::notify_value clear_flags_before = 0, 553 | thread::notify_value clear_flags_after = 0) 554 | { 555 | wait_notification_for(index, infinity, value, clear_flags_before, clear_flags_after); 556 | } 557 | 558 | /// @brief Wait for a notifier to signal the current thread. 559 | /// @param index: notification selector index 560 | /// @param rel_time: maximum duration to wait for the notification 561 | /// @return true if a notification was received, false if timed out 562 | inline bool wait_signal_for(thread::notifier::index_type index, 563 | const tick_timer::duration& rel_time) 564 | { 565 | return wait_notification_for(index, rel_time, nullptr); 566 | } 567 | 568 | template 569 | inline auto wait_signal_for(thread::notifier::index_type index, 570 | const std::chrono::duration& rel_time) 571 | { 572 | // workaround to prevent this function calling itself 573 | const auto ticks_wait_signal_for = 574 | static_cast( 575 | &wait_signal_for); 576 | return ticks_wait_signal_for(index, std::chrono::duration_cast(rel_time)); 577 | } 578 | 579 | template 580 | inline auto wait_signal_until(thread::notifier::index_type index, 581 | const std::chrono::time_point& abs_time) 582 | { 583 | return wait_signal_for(index, duration_until(abs_time)); 584 | } 585 | 586 | inline void wait_signal(thread::notifier::index_type index) 587 | { 588 | return wait_signal_for(index, infinity); 589 | } 590 | 591 | notify_value try_acquire_notification_for(thread::notifier::index_type index, 592 | const tick_timer::duration& rel_time, 593 | bool acquire_single = false); 594 | 595 | template 596 | inline auto try_acquire_notification_for(thread::notifier::index_type index, 597 | const std::chrono::duration& rel_time, 598 | bool acquire_single = false) 599 | { 600 | // workaround to prevent this function calling itself 601 | const auto ticks_try_acquire_notification_for = 602 | static_cast(&try_acquire_notification_for); 604 | return ticks_try_acquire_notification_for( 605 | index, std::chrono::duration_cast(rel_time), acquire_single); 606 | } 607 | 608 | template 609 | inline auto try_acquire_notification_until(thread::notifier::index_type index, 610 | const std::chrono::time_point& abs_time, 611 | bool acquire_single = false) 612 | { 613 | return try_acquire_notification_for(index, duration_until(abs_time), acquire_single); 614 | } 615 | 616 | inline notify_value acquire_notification(thread::notifier::index_type index, 617 | bool acquire_single = false) 618 | { 619 | return try_acquire_notification_for(index, infinity, acquire_single); 620 | } 621 | 622 | #endif // (configTASK_NOTIFICATION_ARRAY_ENTRIES > 1) 623 | 624 | #endif // (configUSE_TASK_NOTIFICATIONS == 1) 625 | } // namespace this_thread 626 | } // namespace freertos 627 | 628 | #endif // __FREERTOS_THREAD_HPP_ 629 | --------------------------------------------------------------------------------