├── .gitignore ├── CMakeLists.txt ├── LICENSE.md ├── README.md ├── archs ├── arch_arm6m_defs.h ├── arch_arm7m_defs.h ├── arch_armv6m.c └── arch_armv7m.c ├── confs ├── kalango_config.h ├── kalango_config_cortexm0.h └── kalango_config_cortexm4_float.h ├── include ├── arch.h ├── clock.h ├── core.h ├── kalango_api.h ├── kernel_objects.h ├── kernel_types.h ├── list.h ├── macros.h ├── mutex.h ├── object_pool.h ├── platform.h ├── queue.h ├── sched.h ├── semaphore.h ├── task.h └── timer.h ├── src ├── clock.c ├── core.c ├── mutex.c ├── object_pool.c ├── queue.c ├── sched_fifo.c ├── sched_round_robin.c ├── semaphore.c ├── task.c └── timer.c └── utils ├── print_out.c ├── tlsf.c └── tlsf.h /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | *.o 3 | *.hex 4 | *.elf 5 | *.bin 6 | build -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.5) 2 | project(KalangoRTOS C ASM) 3 | 4 | set(KALANGO_CONFIG_FILE_PATH "confs" CACHE STRING "Path to folder that contains the kalango_config.h file") 5 | 6 | set(rtos_sources 7 | "archs/arch_armv6m.c" 8 | "archs/arch_armv7m.c" 9 | "src/clock.c" 10 | "src/core.c" 11 | "src/mutex.c" 12 | "src/object_pool.c" 13 | "src/queue.c" 14 | "src/sched_fifo.c" 15 | "src/sched_round_robin.c" 16 | "src/semaphore.c" 17 | "src/task.c" 18 | "src/timer.c" 19 | "utils/print_out.c" 20 | "utils/tlsf.c") 21 | 22 | add_library(${PROJECT_NAME} STATIC ${rtos_sources}) 23 | target_compile_options(${PROJECT_NAME} PUBLIC -Wall -Werror -Os -g -ffunction-sections -fdata-sections -includekalango_config.h) 24 | target_include_directories(${PROJECT_NAME} PUBLIC ${KALANGO_CONFIG_FILE_PATH}) 25 | target_include_directories(${PROJECT_NAME} PUBLIC include) 26 | target_include_directories(${PROJECT_NAME} PRIVATE utils) 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 - Felipe Neves 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kalango, a always experimental just for fun RTOS: 2 | Simple preemptive-cooperative, realtime, multitask kernel made just for fun and aims to be used 3 | to learn basics and the internals of multitasking programming on microcontrollers, the kernel 4 | is engineered to be simple and scalable allowing others to download, use, learn and scale it 5 | into more professional projects. It is under development status and all updates will 6 | figure here. 7 | 8 | # Main Features: 9 | - Real time preemptive scheduler; 10 | - Fast and predictable execution time context switching; 11 | - Supports up to 32 priority levels; 12 | - round-robin policy with same priority threads; 13 | - Soft timers; 14 | - Counting Semaphores; 15 | - Binary Semaphores; 16 | - Task management; 17 | - Recursive mutexes; 18 | - Message Queues; 19 | - Scalable, user can configure how much kernel objects application need; 20 | - Unlimited kernel objects and threads(limited by processor memory); 21 | - O(1) TLSF memory allocator,leave kernel to manage its memory; 22 | - Written in C with less possible assembly paths (just on context switching); 23 | 24 | # Limitations: 25 | - Please keep in mind this is an experimental project; 26 | - Intended to support popular 32bit microcontrollers, no plan to support 8-bit platforms; 27 | - Most of the code are written with GCC or CLang in mind; 28 | - No C++ support; 29 | - It was designed to take advantage of exisiting manufacturers microcontroller abstraction libraries 30 | such CMSIS and NRFx; 31 | - Timer callbacks are deffered from ISR; 32 | 33 | # Get the Code! 34 | To get this respository: 35 | ``` 36 | $ git clone https://github.com/uLipe/KalangoRTOS 37 | ``` 38 | 39 | # Getting started, using CMake: 40 | You can use the CMake build system to integrate the Kalango into your existing cmake project, 41 | to do so just copy this folder into your project folder and in your cmake project search by this directory 42 | as below: 43 | 44 | ``` 45 | add_subdirectory(KalangoRTOS) 46 | ``` 47 | 48 | After that, in your executable target, just link Kalango library using regular CMake option: 49 | 50 | ``` 51 | target_link_libraries( KalangoRTOS ) 52 | ``` 53 | 54 | The Kalango top-level include will placed in your project and you can invoke all the Kalango related 55 | functions. 56 | 57 | Additionally you need to supply a kalango_config.h header file, inside of confs folder, there 58 | are some samples, just copy to your project and rename it to kalango_config.h, after that 59 | when running your project cmake command just supply the location of this file: 60 | 61 | ``` 62 | cmake -DKALANGO_CONFIG_FILE_PATH=/path/to/kalango_config.h/file 63 | ``` 64 | 65 | # Getting started, stand-alone mode: 66 | - On your embedded project, add include folder to your include search path; 67 | - Add src, desired arch folder and lib folders to search source path; 68 | - from confs board, take a template config and put in your project, rename it to kalango_config.h 69 | - add to your compiling options: 70 | 71 | ``` 72 | -include 73 | ``` 74 | 75 | - include the kalango_api.h on your application code to use RTOS features; 76 | - Inside of your main function, initialize your target then run the scheduler by calling 77 | Kalango_CoreStart() function. See example below. 78 | 79 | ``` 80 | #include "kalango_api.h" 81 | 82 | static TaskId task_a; 83 | static TaskId task_b; 84 | 85 | static void DemoTask1(void *arg) { 86 | uint32_t noof_wakeups = 0; 87 | 88 | for(;;) { 89 | Kalango_Sleep(250); 90 | noof_wakeups++; 91 | } 92 | } 93 | 94 | static void DemoTask2(void *arg) { 95 | uint32_t noof_wakeups = 0; 96 | 97 | for(;;) { 98 | Kalango_Sleep(25); 99 | noof_wakeups++; 100 | } 101 | } 102 | 103 | int main (void) { 104 | TaskSettings settings; 105 | 106 | settings.arg = NULL; 107 | settings.function = DemoTask1; 108 | settings.priority = 8; 109 | settings.stack_size = 512; 110 | 111 | task_a = Kalango_TaskCreate(&settings); 112 | 113 | settings.arg = NULL; 114 | settings.function = DemoTask2; 115 | settings.priority = 4; 116 | settings.stack_size = 512; 117 | 118 | task_b = Kalango_TaskCreate(&settings); 119 | 120 | (void)task_a; 121 | (void)task_b; 122 | 123 | //Start scheduling! 124 | Kalango_CoreStart(); 125 | return 0; 126 | } 127 | 128 | ``` 129 | 130 | # Support: 131 | - If you want some help with this work give a star and contact me: ryukokki.felipe@gmail.com 132 | 133 | 134 | 135 | 136 | -------------------------------------------------------------------------------- /archs/arch_arm6m_defs.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | 4 | /** \brief Structure type to access the System Timer (SysTick). 5 | */ 6 | typedef struct 7 | { 8 | volatile uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ 9 | volatile uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ 10 | volatile uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ 11 | volatile uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ 12 | } SysTick_Type; 13 | 14 | /** \brief Structure type to access the System Control Block (SCB). 15 | */ 16 | typedef struct 17 | { 18 | volatile uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ 19 | volatile uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ 20 | uint32_t RESERVED0; 21 | volatile uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ 22 | volatile uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ 23 | volatile uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ 24 | uint32_t RESERVED1; 25 | volatile uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ 26 | volatile uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ 27 | } SCB_Type; 28 | 29 | #define SysTick_BASE (0xE000E000UL + 0x0010UL) 30 | #define SCB_BASE (0xE000E000UL + 0x0D00UL) 31 | 32 | #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ 33 | #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ 34 | 35 | #define SHP_PENDSV_PRIO 0 36 | #define SHP_SYSTICK_SVCAKK_PRIO 1 37 | 38 | //These functions below were extracted from cmsis to help on avoiding dependency of full 39 | //blown cmsis stack. 40 | 41 | static inline __attribute__((__always_inline__)) uint32_t __get_PRIMASK(void) 42 | { 43 | uint32_t result; 44 | __asm volatile ("MRS %0, primask" : "=r" (result) ); 45 | return(result); 46 | } 47 | 48 | static inline __attribute__((__always_inline__)) void __set_PRIMASK(uint32_t priMask) 49 | { 50 | __asm volatile ("MSR primask, %0 " : "=r" (priMask) ); 51 | } 52 | 53 | static inline __attribute__((__always_inline__)) void __disable_irq(void) 54 | { 55 | __asm volatile ("cpsid i" : : : "memory"); 56 | } 57 | 58 | static inline __attribute__((__always_inline__)) uint32_t __get_IPSR(void) 59 | { 60 | uint32_t result; 61 | __asm volatile ("MRS %0, ipsr" : "=r" (result) ); 62 | return(result); 63 | } -------------------------------------------------------------------------------- /archs/arch_arm7m_defs.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | 4 | /** \brief Structure type to access the System Timer (SysTick). 5 | */ 6 | typedef struct 7 | { 8 | volatile uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ 9 | volatile uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ 10 | volatile uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ 11 | volatile uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ 12 | } SysTick_Type; 13 | 14 | 15 | /** \brief Structure type to access the System Control Block (SCB). 16 | */ 17 | typedef struct 18 | { 19 | volatile uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ 20 | volatile uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ 21 | volatile uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ 22 | volatile uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ 23 | volatile uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ 24 | volatile uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ 25 | volatile uint8_t SHP[12]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ 26 | volatile uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ 27 | volatile uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ 28 | volatile uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ 29 | volatile uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ 30 | volatile uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ 31 | volatile uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ 32 | volatile uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ 33 | volatile uint32_t PFR[2]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ 34 | volatile uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ 35 | volatile uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ 36 | volatile uint32_t MMFR[4]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ 37 | volatile uint32_t ISAR[5]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ 38 | volatile uint32_t RESERVED0[5]; 39 | volatile uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ 40 | } SCB_Type; 41 | 42 | /** 43 | \brief Structure type to access the Floating Point Unit (FPU). 44 | */ 45 | typedef struct 46 | { 47 | uint32_t RESERVED0[1U]; 48 | volatile uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ 49 | volatile uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ 50 | volatile uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ 51 | volatile uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ 52 | volatile uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ 53 | } FPU_Type; 54 | 55 | #define SysTick_BASE (0xE000E000UL + 0x0010UL) 56 | #define SCB_BASE (0xE000E000UL + 0x0D00UL) 57 | #define FPU_BASE (0xE000E000UL + 0x0F30UL) 58 | 59 | #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ 60 | #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ 61 | 62 | #ifdef CONFIG_HAS_FLOAT 63 | #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ 64 | #endif 65 | 66 | #define SHP_SVCALL_PRIO 7 67 | #define SHP_PENDSV_PRIO 10 68 | #define SHP_SYSTICK_PRIO 11 69 | 70 | //These functions below were extracted from cmsis to help on avoiding dependency of full 71 | //blown cmsis stack. 72 | 73 | static inline __attribute__((__always_inline__)) uint32_t __get_PRIMASK(void) 74 | { 75 | uint32_t result; 76 | __asm volatile ("MRS %0, primask" : "=r" (result) ); 77 | return(result); 78 | } 79 | 80 | static inline __attribute__((__always_inline__)) void __set_PRIMASK(uint32_t priMask) 81 | { 82 | __asm volatile ("MSR primask, %0 " : "=r" (priMask) ); 83 | } 84 | 85 | static inline __attribute__((__always_inline__)) void __disable_irq(void) 86 | { 87 | __asm volatile ("cpsid i" : : : "memory"); 88 | } 89 | 90 | static inline __attribute__((__always_inline__)) uint32_t __get_IPSR(void) 91 | { 92 | uint32_t result; 93 | __asm volatile ("MRS %0, ipsr" : "=r" (result) ); 94 | return(result); 95 | } -------------------------------------------------------------------------------- /archs/arch_armv6m.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef CONFIG_ARCH_ARM_V6M 4 | 5 | #include "arch_arm6m_defs.h" 6 | 7 | 8 | static uint32_t irq_nest_level = 0; 9 | static uint32_t irq_lock_level = 0; 10 | static uint32_t irq_saved_level = 0; 11 | uint32_t isr_stack[CONFIG_ISR_STACK_SIZE/4]; 12 | uint8_t *isr_top_of_stack; 13 | 14 | typedef struct { 15 | uint32_t r4; 16 | uint32_t r5; 17 | uint32_t r6; 18 | uint32_t r7; 19 | uint32_t r8; 20 | uint32_t r9; 21 | uint32_t r10; 22 | uint32_t r11; 23 | uint32_t r0; 24 | uint32_t r1; 25 | uint32_t r2; 26 | uint32_t r3; 27 | uint32_t r12; 28 | uint32_t lr; 29 | uint32_t pc; 30 | uint32_t xpsr; 31 | }ArmCortexStackFrameInitial; 32 | 33 | 34 | void SysTick_Handler(void) { 35 | ArchIsrEnter(); 36 | ClockStep(1); 37 | ArchIsrLeave(); 38 | } 39 | 40 | void __attribute__((naked)) PendSV_Handler(void) { 41 | asm volatile( 42 | /* disable interrupts during context switch */ 43 | "mrs r2, PRIMASK \n\r" 44 | "cpsid I \n\r" 45 | 46 | /* Check if this is the first switch, skip register saving if does */ 47 | "ldr r0, =current \n\r" 48 | "ldr r0, [r0] \n\r" 49 | "mrs r1, psp \n\r" 50 | 51 | /* push all registers to stack, incluing fp ones if needed */ 52 | "adds r1, r1, #-32 \n\r" 53 | "stmia r1!, {r4 - r7} \n\r" 54 | "adds r1, r1, #16 \n\r" 55 | 56 | "mov r4, r8 \n\r" 57 | "mov r5, r9 \n\r" 58 | "mov r6, r10 \n\r" 59 | "mov r7, r11 \n\r" 60 | "stmia r1!, {r4 - r7} \n\r" 61 | "adds r1, r1, #-16 \n\r" 62 | 63 | /* send stackpointer back to the tcb */ 64 | "str r1, [r0] \n\r" 65 | 66 | "push {lr} \n\r" 67 | "bl CoreTaskSwitch \n\r" 68 | "pop {lr} \n\r" 69 | "ldr r1, [r0] \n\r" 70 | 71 | /* restore next task callee registers */ 72 | "adds r1, r1, #16 \n\r" 73 | "ldmia r1!, {r4 - r7} \n\r" 74 | "mov r8, r4 \n\r" 75 | "mov r9, r5 \n\r" 76 | "mov r10, r6 \n\r" 77 | "mov r11, r7 \n\r" 78 | 79 | "adds r1, r1, #-16 \n\r" 80 | "ldmia r1!, {r4 - r7} \n\r" 81 | "adds r1, r1, #32 \n\r" 82 | 83 | /* horray the stack pointer is now handled to the CPU */ 84 | "msr psp, r1 \n\r" 85 | 86 | /* if the previous context saving was FP we need to tell the CPU to resume it*/ 87 | /* re-enable interrupts and ensure return in thumb mode */ 88 | "mov r2, #0x04 \n\r" 89 | "mov r1, lr \n\r" 90 | "orrs r1, r1, r2 \n\r" 91 | "mov lr, r1 \n\r" 92 | 93 | "msr PRIMASK,r2 \n\r" 94 | "bx lr \n\r" 95 | ); 96 | } 97 | 98 | void __attribute__((naked)) SVC_Handler(void) { 99 | asm volatile( 100 | "ldr r0, =isr_top_of_stack \n\r" 101 | "ldr r0, [r0] \n\r" 102 | "msr msp, r0 \n\r" 103 | 104 | "push {lr} \n\r" 105 | "bl CoreTaskSwitch \n\r" 106 | "pop {lr} \n\r" 107 | "ldr r1, [r0] \n\r" 108 | 109 | /* ...after the callee regular registers */ 110 | "adds r1, r1, #16 \n\r" 111 | "ldmia r1!, {r4 - r7} \n\r" 112 | "mov r8, r4 \n\r" 113 | "mov r9, r5 \n\r" 114 | "mov r10, r6 \n\r" 115 | "mov r11, r7 \n\r" 116 | 117 | "adds r1, r1, #-16 \n\r" 118 | "ldmia r1!, {r4 - r7} \n\r" 119 | 120 | "adds r1, r1, #32 \n\r" 121 | 122 | /* horray the stack pointer is now handled to the CPU */ 123 | "msr psp, r1 \n\r" 124 | 125 | "push {r2,lr} \n\r" 126 | "bl CoreSetRunning \n\r" 127 | "pop {r2,lr} \n\r" 128 | 129 | /* re-enable interrupts and ensure return in thumb mode */ 130 | "mov r2, #0x04 \n\r" 131 | "mov r1, lr \n\r" 132 | "orrs r1, r1, r2 \n\r" 133 | "mov lr, r1 \n\r" 134 | "bx lr \n\r" 135 | ); 136 | } 137 | 138 | KernelResult ArchInitializeSpecifics() { 139 | 140 | //Align stack to 8-byte boundary: 141 | SCB->CCR |= 0x200; 142 | 143 | //Sets priority of interrupts used by kernel: 144 | SCB->SHP[SHP_SYSTICK_SVCAKK_PRIO] = ((0xFF - CONFIG_IRQ_PRIORITY_LEVELS) << 8) | 145 | (0xFF - CONFIG_IRQ_PRIORITY_LEVELS; 146 | SCB->SHP[SHP_PENDSV_PRIO] = 0xFF - (CONFIG_IRQ_PRIORITY_LEVELS - 8); 147 | 148 | //Setup systick timer to generate interrupt at tick rate: 149 | SysTick->CTRL = 0x00; 150 | SysTick->LOAD = CONFIG_PLATFORM_SYS_CLOCK_HZ/CONFIG_TICKS_PER_SEC; 151 | SysTick->CTRL = 0x07; 152 | 153 | //Setup global isr stack pointer, align into a 8byte boundary: 154 | isr_top_of_stack = (uint8_t *)((uint32_t)(isr_stack + (CONFIG_ISR_STACK_SIZE/4)) & ~0x07); 155 | 156 | return kSuccess; 157 | } 158 | 159 | KernelResult ArchStartKernel(uint32_t to) { 160 | __asm volatile (" svc #0 \n"); 161 | return kSuccess; 162 | } 163 | 164 | KernelResult ArchNewTask(TaskControBlock *task, uint8_t *stack_base, uint32_t stack_size) { 165 | ASSERT_PARAM(task); 166 | ASSERT_PARAM(stack_base); 167 | ASSERT_PARAM(stack_size); 168 | 169 | ArchCriticalSectionEnter(); 170 | 171 | //Stack must be aligned to to 8-byte boundary 172 | uint8_t *aligned_stack = (uint8_t *)((uint32_t)(stack_base + stack_size - 1) & ~0x07); 173 | aligned_stack -= sizeof(ArmCortexStackFrameInitial); 174 | ArmCortexStackFrameInitial *frame = (ArmCortexStackFrameInitial *)(aligned_stack); 175 | 176 | frame->r0 = (uint32_t)task->arg1; 177 | frame->xpsr = 0x01000000; 178 | frame->lr = 0xFFFFFFFD; 179 | frame->pc = (uint32_t)task->entry_point; 180 | frame->r1 = 0xAAAAAAAA; 181 | frame->r2 = 0xAAAAAAAA; 182 | frame->r3 = 0xAAAAAAAA; 183 | frame->r4 = 0xAAAAAAAA; 184 | frame->r5 = 0xAAAAAAAA; 185 | frame->r6 = 0xAAAAAAAA; 186 | frame->r7 = 0xAAAAAAAA; 187 | frame->r8 = 0xAAAAAAAA; 188 | frame->r9 = 0xAAAAAAAA; 189 | frame->r10 = 0xAAAAAAAA; 190 | frame->r11 = 0xAAAAAAAA; 191 | task->stackpointer = aligned_stack; 192 | 193 | ArchCriticalSectionExit(); 194 | return kSuccess; 195 | } 196 | 197 | KernelResult ArchCriticalSectionEnter() { 198 | if(irq_lock_level < 0xFFFFFFFF) { 199 | irq_lock_level++; 200 | } 201 | 202 | if(irq_lock_level == 1) { 203 | irq_saved_level = __get_PRIMASK(); 204 | __disable_irq(); 205 | } 206 | 207 | return kSuccess; 208 | } 209 | 210 | KernelResult ArchCriticalSectionExit() { 211 | if(irq_lock_level) { 212 | irq_lock_level--; 213 | } 214 | 215 | if(!irq_lock_level) { 216 | __set_PRIMASK(irq_saved_level); 217 | } 218 | 219 | return kSuccess; 220 | } 221 | 222 | KernelResult ArchYield() { 223 | SCB->ICSR |= (1<<28); 224 | return kSuccess; 225 | } 226 | 227 | KernelResult ArchIsrEnter() { 228 | if(irq_nest_level < 0xFFFFFFFF) { 229 | irq_nest_level++; 230 | } 231 | 232 | return kSuccess; 233 | } 234 | 235 | KernelResult ArchIsrLeave() { 236 | if(irq_nest_level) { 237 | irq_nest_level--; 238 | } 239 | 240 | if(!irq_nest_level) { 241 | CheckReschedule(); 242 | } 243 | 244 | return kSuccess; 245 | } 246 | 247 | uint32_t ArchGetIsrNesting() { 248 | return irq_nest_level; 249 | } 250 | 251 | bool ArchInIsr() { 252 | return ((__get_IPSR() != 0) ? true : false); 253 | } 254 | 255 | uint8_t ArchCountLeadZeros(uint32_t word) { 256 | return __builtin_clz(word); 257 | } 258 | 259 | #endif -------------------------------------------------------------------------------- /archs/arch_armv7m.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef CONFIG_ARCH_ARM_V7M 4 | 5 | #include "arch_arm7m_defs.h" 6 | 7 | static uint32_t irq_nest_level = 0; 8 | static uint32_t irq_lock_level = 0; 9 | static uint32_t irq_saved_level = 0; 10 | uint32_t isr_stack[CONFIG_ISR_STACK_SIZE/4 + 8]; 11 | uint8_t *isr_top_of_stack; 12 | 13 | typedef struct { 14 | #ifdef CONFIG_HAS_FLOAT 15 | uint32_t has_fpu_context; 16 | #endif 17 | uint32_t r4; 18 | uint32_t r5; 19 | uint32_t r6; 20 | uint32_t r7; 21 | uint32_t r8; 22 | uint32_t r9; 23 | uint32_t r10; 24 | uint32_t r11; 25 | #ifdef CONFIG_HAS_FLOAT 26 | uint32_t s16; 27 | uint32_t s17; 28 | uint32_t s18; 29 | uint32_t s19; 30 | uint32_t s20; 31 | uint32_t s21; 32 | uint32_t s22; 33 | uint32_t s23; 34 | uint32_t s24; 35 | uint32_t s25; 36 | uint32_t s26; 37 | uint32_t s27; 38 | uint32_t s28; 39 | uint32_t s29; 40 | uint32_t s30; 41 | uint32_t s31; 42 | #endif 43 | uint32_t r0; 44 | uint32_t r1; 45 | uint32_t r2; 46 | uint32_t r3; 47 | uint32_t r12; 48 | uint32_t lr; 49 | uint32_t pc; 50 | uint32_t xpsr; 51 | #ifdef CONFIG_HAS_FLOAT 52 | uint32_t s0; 53 | uint32_t s1; 54 | uint32_t s2; 55 | uint32_t s3; 56 | uint32_t s4; 57 | uint32_t s5; 58 | uint32_t s6; 59 | uint32_t s7; 60 | uint32_t s8; 61 | uint32_t s9; 62 | uint32_t s10; 63 | uint32_t s11; 64 | uint32_t s12; 65 | uint32_t s13; 66 | uint32_t s14; 67 | uint32_t s15; 68 | uint32_t fpcsr; 69 | uint32_t fp_reserved; 70 | #endif 71 | }ArmCortexStackFrame; 72 | 73 | typedef struct { 74 | #ifdef CONFIG_HAS_FLOAT 75 | uint32_t has_fpu_context; 76 | #endif 77 | uint32_t r4; 78 | uint32_t r5; 79 | uint32_t r6; 80 | uint32_t r7; 81 | uint32_t r8; 82 | uint32_t r9; 83 | uint32_t r10; 84 | uint32_t r11; 85 | uint32_t r0; 86 | uint32_t r1; 87 | uint32_t r2; 88 | uint32_t r3; 89 | uint32_t r12; 90 | uint32_t lr; 91 | uint32_t pc; 92 | uint32_t xpsr; 93 | }ArmCortexStackFrameInitial; 94 | 95 | 96 | void SysTick_Handler(void) { 97 | ArchIsrEnter(); 98 | ClockStep(1); 99 | ArchIsrLeave(); 100 | } 101 | 102 | void __attribute__((naked)) PendSV_Handler(void) { 103 | asm volatile( 104 | /* disable interrupts during context switch */ 105 | "mrs r2, PRIMASK \n\r" 106 | "cpsid I \n\r" 107 | 108 | /* Check if this is the first switch, skip register saving if does */ 109 | "ldr r0, =current \n\r" 110 | "ldr r0, [r0] \n\r" 111 | "mrs r1, psp \n\r" 112 | 113 | /* push all registers to stack, incluing fp ones if needed */ 114 | #if defined(CONFIG_HAS_FLOAT) 115 | "tst lr, #0x10 \n\r" 116 | "it eq \n\r" 117 | "vstmdbeq r1!, {d8 - d15} \n\r" 118 | #endif 119 | 120 | "stmfd r1!, {r4 - r11} \n\r" 121 | 122 | /* for fp context we need to store that there are a fp active context */ 123 | #if defined(CONFIG_HAS_FLOAT) 124 | "mov r4, #0x00 \n\r" 125 | "tst lr, #0x10 \n\r" 126 | "it eq \n\r" 127 | "moveq r4, #0x01 \n\r" 128 | "stmfd r1!, {r4} \n\r" 129 | #endif 130 | /* send stackpointer back to the tcb */ 131 | "str r1, [r0] \n\r" 132 | 133 | "push {lr} \n\r" 134 | "bl CoreTaskSwitch \n\r" 135 | "pop {lr} \n\r" 136 | "ldr r1, [r0] \n\r" 137 | 138 | /* same here, if a fp context was active, restore the fp registers */ 139 | #if defined(CONFIG_HAS_FLOAT) 140 | "ldmfd r1!, {r3} \n\r" 141 | #endif 142 | /* ...after the callee regular registers */ 143 | "ldmfd r1!, {r4 - r11} \n\r" 144 | 145 | #if defined(CONFIG_HAS_FLOAT) 146 | "cmp r3, #0x00 \n\r" 147 | "it ne \n\r" 148 | "vldmiane r1!, {d8 - d15} \n\r" 149 | #endif 150 | /* horray the stack pointer is now handled to the CPU */ 151 | "msr psp, r1 \n\r" 152 | 153 | /* if the previous context saving was FP we need to tell the CPU to resume it*/ 154 | #if defined(CONFIG_HAS_FLOAT) 155 | "orr lr, lr, #0x10 \n\r" 156 | "cmp r3, #0x00 \n\r" 157 | "it ne \n\r" 158 | "bicne lr, lr, #0x10 \n\r" 159 | #endif 160 | 161 | /* re-enable interrupts and ensure return in thumb mode */ 162 | "orr lr, lr, #0x04 \n\r" 163 | "msr PRIMASK,r2 \n\r" 164 | "bx lr \n\r" 165 | ); 166 | } 167 | 168 | void __attribute__((naked)) SVC_Handler(void) { 169 | asm volatile( 170 | "ldr r0, =isr_top_of_stack \n\r" 171 | "ldr r0, [r0] \n\r" 172 | "msr msp, r0 \n\r" 173 | 174 | "push {lr} \n\r" 175 | "bl CoreTaskSwitch \n\r" 176 | "pop {lr} \n\r" 177 | "ldr r1, [r0] \n\r" 178 | 179 | #if defined(CONFIG_HAS_FLOAT) 180 | "ldmfd r1!, {r3} \n\r" 181 | #endif 182 | /* ...after the callee regular registers */ 183 | "ldmfd r1!, {r4 - r11} \n\r" 184 | 185 | /* horray the stack pointer is now handled to the CPU */ 186 | "msr psp, r1 \n\r" 187 | 188 | "push {r2,lr} \n\r" 189 | "bl CoreSetRunning \n\r" 190 | "pop {r2,lr} \n\r" 191 | 192 | /* re-enable interrupts and ensure return in thumb mode */ 193 | "orr lr, lr, #0x04 \n\r" 194 | "bx lr \n\r" 195 | ); 196 | } 197 | 198 | KernelResult ArchInitializeSpecifics() { 199 | 200 | #ifdef CONFIG_HAS_FLOAT 201 | SCB->CPACR |= ((3UL << 10*2) | (3UL << 11*2)); 202 | FPU->FPCCR = ( 0x3UL << 30UL ); 203 | #endif 204 | 205 | //Align stack to 8-byte boundary: 206 | SCB->CCR |= 0x200; 207 | 208 | //Sets priority of interrupts used by kernel: 209 | SCB->SHP[SHP_SVCALL_PRIO] = 0xFF - CONFIG_IRQ_PRIORITY_LEVELS; 210 | SCB->SHP[SHP_PENDSV_PRIO] = 0xFF - (CONFIG_IRQ_PRIORITY_LEVELS - 8); 211 | SCB->SHP[SHP_SYSTICK_PRIO] = 0xFF - CONFIG_IRQ_PRIORITY_LEVELS; 212 | 213 | //Setup systick timer to generate interrupt at tick rate: 214 | SysTick->CTRL = 0x00; 215 | SysTick->LOAD = CONFIG_PLATFORM_SYS_CLOCK_HZ/CONFIG_TICKS_PER_SEC; 216 | SysTick->CTRL = 0x07; 217 | 218 | //Setup global isr stack pointer, align into a 8byte boundary: 219 | isr_top_of_stack = (uint8_t *)((uint32_t)(isr_stack + (CONFIG_ISR_STACK_SIZE/4)) & ~0x07); 220 | 221 | return kSuccess; 222 | } 223 | 224 | KernelResult ArchStartKernel(uint32_t to) { 225 | __asm volatile ("svc #0 \n"); 226 | return kSuccess; 227 | } 228 | 229 | KernelResult ArchNewTask(TaskControBlock *task, uint8_t *stack_base, uint32_t stack_size) { 230 | ASSERT_PARAM(task); 231 | ASSERT_PARAM(stack_base); 232 | ASSERT_PARAM(stack_size); 233 | 234 | ArchCriticalSectionEnter(); 235 | 236 | //Stack must be aligned to to 8-byte boundary 237 | uint8_t *aligned_stack = (uint8_t *)((uint32_t)(stack_base + stack_size - 1) & ~0x07); 238 | aligned_stack -= sizeof(ArmCortexStackFrameInitial); 239 | ArmCortexStackFrameInitial *frame = (ArmCortexStackFrameInitial *)(aligned_stack); 240 | 241 | frame->r0 = (uint32_t)task->arg1; 242 | frame->xpsr = 0x01000000; 243 | frame->lr = 0xFFFFFFFD; 244 | frame->pc = (uint32_t)task->entry_point; 245 | 246 | #if CONFIG_HAS_FLOAT 247 | frame->has_fpu_context = 0; 248 | #endif 249 | frame->r1 = 0xAAAAAAAA; 250 | frame->r2 = 0xAAAAAAAA; 251 | frame->r3 = 0xAAAAAAAA; 252 | frame->r4 = 0xAAAAAAAA; 253 | frame->r5 = 0xAAAAAAAA; 254 | frame->r6 = 0xAAAAAAAA; 255 | frame->r7 = 0xAAAAAAAA; 256 | frame->r8 = 0xAAAAAAAA; 257 | frame->r9 = 0xAAAAAAAA; 258 | frame->r10 = 0xAAAAAAAA; 259 | frame->r11 = 0xAAAAAAAA; 260 | task->stackpointer = aligned_stack; 261 | 262 | ArchCriticalSectionExit(); 263 | return kSuccess; 264 | } 265 | 266 | KernelResult ArchCriticalSectionEnter() { 267 | if(irq_lock_level < 0xFFFFFFFF) { 268 | irq_lock_level++; 269 | } 270 | 271 | if(irq_lock_level == 1) { 272 | irq_saved_level = __get_PRIMASK(); 273 | __disable_irq(); 274 | } 275 | 276 | return kSuccess; 277 | } 278 | 279 | KernelResult ArchCriticalSectionExit() { 280 | if(irq_lock_level) { 281 | irq_lock_level--; 282 | } 283 | 284 | if(!irq_lock_level) { 285 | __set_PRIMASK(irq_saved_level); 286 | } 287 | 288 | return kSuccess; 289 | } 290 | 291 | KernelResult ArchYield() { 292 | SCB->ICSR |= (1<<28); 293 | return kSuccess; 294 | } 295 | 296 | KernelResult ArchIsrEnter() { 297 | if(irq_nest_level < 0xFFFFFFFF) { 298 | irq_nest_level++; 299 | } 300 | 301 | return kSuccess; 302 | } 303 | 304 | KernelResult ArchIsrLeave() { 305 | if(irq_nest_level) { 306 | irq_nest_level--; 307 | } 308 | 309 | if(!irq_nest_level) { 310 | CheckReschedule(); 311 | } 312 | 313 | return kSuccess; 314 | } 315 | 316 | uint32_t ArchGetIsrNesting() { 317 | return irq_nest_level; 318 | } 319 | 320 | bool ArchInIsr() { 321 | return ((__get_IPSR() != 0) ? true : false); 322 | } 323 | 324 | uint8_t ArchCountLeadZeros(uint32_t word) { 325 | return __builtin_clz(word); 326 | } 327 | 328 | #endif -------------------------------------------------------------------------------- /confs/kalango_config.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #define CONFIG_TICKS_PER_SEC 1000 4 | 5 | #define CONFIG_ENABLE_TASKS 1 6 | #define CONFIG_ENABLE_SEMAPHORES 1 7 | #define CONFIG_ENABLE_MUTEXES 1 8 | #define CONFIG_ENABLE_QUEUES 1 9 | #define CONFIG_ENABLE_TIMERS 1 10 | 11 | #define CONFIG_KERNEL_HEAP_SIZE 16 * 1024 12 | 13 | #define CONFIG_PRIORITY_LEVELS 16 14 | #define CONFIG_MUTEX_CEIL_PRIORITY (CONFIG_PRIORITY_LEVELS - 1) 15 | #define CONFIG_IDLE_TASK_STACK_SIZE 512 16 | #define CONFIG_ISR_STACK_SIZE 1024 17 | 18 | #define CONFIG_USE_PLATFORM_INIT 0 19 | 20 | #define CONFIG_ARCH_ARM_V7M 1 21 | #define CONFIG_ARCH_ARM_V7M_VARIANT_M4 1 22 | #define CONFIG_HAS_FLOAT 1 23 | #define CONFIG_IRQ_PRIORITY_LEVELS 8 24 | 25 | #define CONFIG_PLATFORM_SYS_CLOCK_HZ 64000000 26 | #define CONFIG_DEBUG_KERNEL 1 -------------------------------------------------------------------------------- /confs/kalango_config_cortexm0.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #define CONFIG_TICKS_PER_SEC 1000 4 | 5 | #define CONFIG_ENABLE_TASKS 1 6 | #define CONFIG_ENABLE_SEMAPHORES 1 7 | #define CONFIG_ENABLE_MUTEXES 1 8 | #define CONFIG_ENABLE_QUEUES 1 9 | #define CONFIG_ENABLE_TIMERS 1 10 | #define CONFIG_KERNEL_HEAP_SIZE 16 * 1024 11 | 12 | #define CONFIG_PRIORITY_LEVELS 16 13 | #define CONFIG_MUTEX_CEIL_PRIORITY (CONFIG_PRIORITY_LEVELS - 1) 14 | #define CONFIG_IDLE_TASK_STACK_SIZE 512 15 | #define CONFIG_ISR_STACK_SIZE 1024 16 | 17 | #define CONFIG_USE_PLATFORM_INIT 0 18 | 19 | #define CONFIG_ARCH_ARM_V6M 1 20 | #define CONFIG_ARCH_ARM_V6M_VARIANT_M0 1 21 | #define CONFIG_HAS_FLOAT 0 22 | #define CONFIG_IRQ_PRIORITY_LEVELS 8 23 | 24 | #define CONFIG_PLATFORM_SYS_CLOCK_HZ 48000000 25 | #define CONFIG_DEBUG_KERNEL 1 -------------------------------------------------------------------------------- /confs/kalango_config_cortexm4_float.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #define CONFIG_TICKS_PER_SEC 1000 4 | 5 | #define CONFIG_ENABLE_TASKS 1 6 | #define CONFIG_ENABLE_SEMAPHORES 1 7 | #define CONFIG_ENABLE_MUTEXES 1 8 | #define CONFIG_ENABLE_QUEUES 1 9 | #define CONFIG_ENABLE_TIMERS 1 10 | 11 | #define CONFIG_KERNEL_HEAP_SIZE 16 * 1024 12 | 13 | #define CONFIG_PRIORITY_LEVELS 16 14 | #define CONFIG_MUTEX_CEIL_PRIORITY (CONFIG_PRIORITY_LEVELS - 1) 15 | #define CONFIG_IDLE_TASK_STACK_SIZE 512 16 | #define CONFIG_ISR_STACK_SIZE 1024 17 | 18 | #define CONFIG_USE_PLATFORM_INIT 0 19 | 20 | #define CONFIG_ARCH_ARM_V7M 1 21 | #define CONFIG_ARCH_ARM_V7M_VARIANT_M4 1 22 | #define CONFIG_HAS_FLOAT 1 23 | #define CONFIG_IRQ_PRIORITY_LEVELS 8 24 | 25 | #define CONFIG_PLATFORM_SYS_CLOCK_HZ 64000000 26 | #define CONFIG_DEBUG_KERNEL 1 -------------------------------------------------------------------------------- /include/arch.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | 8 | KernelResult ArchInitializeSpecifics(); 9 | KernelResult ArchStartKernel(); 10 | KernelResult ArchNewTask(TaskControBlock *task, uint8_t *stack_base, uint32_t stack_size); 11 | KernelResult ArchCriticalSectionEnter(); 12 | KernelResult ArchCriticalSectionExit(); 13 | KernelResult ArchYield(); 14 | KernelResult ArchIsrEnter(); 15 | KernelResult ArchIsrLeave(); 16 | bool ArchInIsr(); 17 | uint32_t ArchGetIsrNesting(); 18 | uint8_t ArchCountLeadZeros(uint32_t word); 19 | -------------------------------------------------------------------------------- /include/clock.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | uint32_t GetTicksPerSecond(); 8 | uint32_t GetCurrentTicks(); 9 | KernelResult Sleep(uint32_t ticks); 10 | KernelResult ClockStep(uint32_t ticks); 11 | KernelResult AddTimeout(Timeout *timeout, 12 | uint32_t value, 13 | TimerCallback timeout_callback, 14 | void *user_data, 15 | bool is_task, 16 | TaskPriorityList *optional_list_to_bind); 17 | KernelResult RemoveTimeout(Timeout *timeout); 18 | -------------------------------------------------------------------------------- /include/core.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | KernelResult CoreInit(); 9 | KernelResult CoreStart(); 10 | KernelResult CoreMakeTaskPending(TaskControBlock * task, uint32_t reason, TaskPriorityList *kobject_pending_list); 11 | KernelResult CoreUnpendNextTask(TaskPriorityList *kobject_pending_list); 12 | KernelResult CoreMakeTaskReady(TaskControBlock * task); 13 | KernelResult CoreMakeAllTasksReady(TaskPriorityList *tasks); 14 | KernelResult CheckReschedule(); 15 | KernelResult CoreManageRoundRobin(); 16 | KernelResult CoreInitializeTaskList(TaskPriorityList *list); 17 | TaskControBlock * CoreGetCurrentTask(); 18 | TaskControBlock * CoreTaskSwitch(); 19 | KernelResult CoreSchedulingSuspend(); 20 | KernelResult CoreSchedulingResume(); 21 | bool IsCoreRunning(); -------------------------------------------------------------------------------- /include/kalango_api.h: -------------------------------------------------------------------------------- 1 | /** 2 | * The Kalango project, a always experimental RTOS 3 | */ 4 | #pragma once 5 | /** 6 | * This is the kalang API file, here is a glue file of all 7 | * subsystems of kalango RTOS to be found in a single place, 8 | * your application should call those functions to interact 9 | * with kalango kernel instead calling directly a particular 10 | * subsystem function. 11 | * 12 | * Some definition regarding of types, can be found on kernel_types.h 13 | * file, the other headers are intended to kernel internal use. 14 | */ 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | /** 28 | * @fn Kalango_CoreStart 29 | * @brief Starts the kalango kernel and core system 30 | * @return never returns 31 | * @note calling this function multiple times result in immediate return 32 | */ 33 | static inline KernelResult Kalango_CoreStart() { 34 | return CoreStart(); 35 | } 36 | 37 | /** 38 | * @fn Kalango_GetTicksPerSecond 39 | * @brief Get current ticks per second 40 | * @return Ticks per second 41 | * @note This function depends on kernel configuration 42 | */ 43 | static inline uint32_t Kalango_GetTicksPerSecond() { 44 | return GetTicksPerSecond(); 45 | } 46 | 47 | /** 48 | * @fn Kalango_GetCurrentTicks 49 | * @brief Return the current elapsed ticks since kernel started 50 | * @return value of ticks after the kernel started 51 | */ 52 | static inline uint32_t Kalango_GetCurrentTicks() { 53 | return GetCurrentTicks(); 54 | } 55 | 56 | /** 57 | * @fn Kalango_GetCurrentTaskId 58 | * @brief Return the Id of current executing task 59 | * @return Id of the current task 60 | */ 61 | static inline TaskId Kalango_GetCurrentTaskId() { 62 | return ((TaskId) CoreGetCurrentTask()); 63 | } 64 | 65 | /** 66 | * @fn Kalango_GetHeapFreeBytes 67 | * @brief Return how much free bytes from kernel heap 68 | * @return free space in bytes 69 | */ 70 | static inline uint32_t Kalango_GetHeapFreeBytes() { 71 | return GetKernelFreeBytesOnHeap(); 72 | } 73 | 74 | /** 75 | * @fn Kalango_Sleep 76 | * @brief Put current thread to sleep 77 | * @param ticks - ticks to keep current thread in sleep 78 | * @return kSuccess when task wakes up 79 | * @note calling this function from a ISR results in immediate 80 | * return and error 81 | */ 82 | static inline KernelResult Kalango_Sleep(uint32_t ticks) { 83 | return Sleep(ticks); 84 | } 85 | 86 | /** 87 | * @fn Kalango_TaskCreate 88 | * @brief Creates a new task and put it into the ready list 89 | * @param settings - structure that contains initial settings of task 90 | * @return a unique task_id on succesful creation 91 | * @note if the created task has the highest priority, it will put in execution 92 | * instead of placed only on ready list; 93 | * @note refer TaskSettings contents on kernel_types.h 94 | * @note calling this function from a ISR results in immediate 95 | * return and error 96 | */ 97 | static inline TaskId Kalango_TaskCreate(TaskSettings *settings) { 98 | return TaskCreate(settings); 99 | } 100 | 101 | /** 102 | * @fn Kalango_TaskSuspend 103 | * @brief Suspends the execution of an task 104 | * @param task_id - id of target task to suspend 105 | * @return kSuccess on succesful suspension 106 | * @note Suspension does not support nesting, that is it, 107 | * if this function suspend a already suspended task it will 108 | * return error 109 | * @note calling this function from a ISR results in immediate 110 | * return and error 111 | */ 112 | static inline KernelResult Kalango_TaskSuspend(TaskId task_id) { 113 | return TaskSuspend(task_id); 114 | } 115 | 116 | /** 117 | * @fn Kalango_TaskResume 118 | * @brief Resume the execution of an task 119 | * @param task_id - id of target task to resume 120 | * @return kSuccess on task placed on ready list 121 | * @note calling resume for a ready task results in error 122 | */ 123 | static inline KernelResult Kalango_TaskResume(TaskId task_id) { 124 | return TaskResume(task_id); 125 | } 126 | 127 | /** 128 | * @fn Kalango_TaskDelete 129 | * @brief Terminate a task in execution 130 | * @param task_id - id of target task to terminate 131 | * @return kSuccess on termination 132 | * @note this function actually does not delete a task, it simply put that 133 | * on a not runnable state, then only calling task create can put it 134 | * again on execution. 135 | * @note calling this function from a ISR results in immediate 136 | * return and error 137 | */ 138 | static inline KernelResult Kalango_TaskDelete(TaskId task_id) { 139 | return TaskDelete(task_id); 140 | } 141 | 142 | /** 143 | * @fn Kalango_TaskSetPriority 144 | * @brief Changes the priority of a task 145 | * @param task_id - id of target task to change the priority 146 | * @param new_priority - new priority of target task 147 | * @return old priority of target task or -1 on error 148 | * @note If priority changing results on target task to be the highest one, 149 | * it will placed in execution immediately if is not already. 150 | */ 151 | static inline uint32_t Kalango_TaskSetPriority(TaskId task_id, 152 | uint32_t new_priority) { 153 | return TaskSetPriority(task_id, new_priority); 154 | } 155 | 156 | /** 157 | * @fn Kalango_TaskGetPriority 158 | * @brief Gets the priority of an task 159 | * @param task_id - id of desired priority task 160 | * @return priority of that task 161 | */ 162 | static inline uint32_t Kalango_TaskGetPriority(TaskId task_id) { 163 | return TaskGetPriority(task_id); 164 | } 165 | 166 | /** 167 | * @fn Kalango_TaskYield 168 | * @brief Voluntary releases the CPU for the next task on FIFO 169 | * @return kSuccess on succesful yielding 170 | * @note The yield will only occur if there are, at least 2 tasks of same priority 171 | * on the ready list, otherwise calling this function will be ignored 172 | * @note calling this function from a ISR results in immediate 173 | * return and error 174 | */ 175 | static inline KernelResult Kalango_TaskYield() { 176 | return TaskYield(); 177 | } 178 | 179 | 180 | /** 181 | * @fn Kalango_SemaphoreCreate 182 | * @brief Creates a counting or binary semaphore 183 | * @param initial - initial count available on that semaphore 184 | * @param limit - maximum counting allowed to this semaphore 185 | * @return A unique semaphore id bonded to created object 186 | * @note To create a binary semaphore makes initial equal to 0 and 187 | * the limit equal to 1 188 | */ 189 | static inline SemaphoreId Kalango_SemaphoreCreate(uint32_t initial, 190 | uint32_t limit) { 191 | return SemaphoreCreate(initial, limit); 192 | } 193 | 194 | /** 195 | * @fn Kalango_SemaphoreTake 196 | * @brief Takes a semaphore, blocks if not available 197 | * @param semaphre - id of desired semaphore 198 | * @param timeout - amount of timeout to wait if it is not available 199 | * @return kSuccess on semaphore took, kTimeout if not after wait 200 | * @note passing KERNEL_WAIT_FOREVER as timeout will make the task block until semaphore 201 | * make it available 202 | * @note passing KERNEL_NO_WAIT as timeout causes immediate return if semaphore is not available 203 | * @note calling this function from a ISR results in immediate 204 | * return and error 205 | */ 206 | static inline KernelResult Kalango_SemaphoreTake(SemaphoreId semaphore, 207 | uint32_t timeout) { 208 | return SemaphoreTake(semaphore, timeout); 209 | } 210 | 211 | /** 212 | * @fn Kalango_SemaphoreGive 213 | * @brief Makes a semaphore available by some counts 214 | * @param semaphore - id of target semaphore 215 | * @param count - desired counts of releasing 216 | * @return kSuccess on succesful giving 217 | * @note passing count above the semaphore limit will cause the 218 | * semaphore to make all its counts available 219 | */ 220 | static inline KernelResult Kalango_SemaphoreGive(SemaphoreId semaphore, 221 | uint32_t count) { 222 | return SemaphoreGive(semaphore, count); 223 | } 224 | 225 | /** 226 | * @fn Kalango_SemaphoreDelete 227 | * @brief Deletes a semaphore 228 | * @param semaphore - id of semaphore to be deleted 229 | * @return kSuccess on succesful deletion 230 | * @note Deleting a semaphore will put all the waiting tasks for it in a ready state 231 | * @note calling this function from a ISR results in immediate 232 | * return and error 233 | */ 234 | static inline KernelResult Kalango_SemaphoreDelete (SemaphoreId semaphore) { 235 | return SemaphoreDelete(semaphore); 236 | } 237 | 238 | 239 | /** 240 | * @fn Kalango_MutexCreate 241 | * @brief Creates a mutual exclusion semaphore 242 | * @return A unique id bonded to Mutex object 243 | * @note New mutexes starts all in not locked state 244 | */ 245 | static inline MutexId Kalango_MutexCreate() { 246 | return MutexCreate(); 247 | } 248 | 249 | /** 250 | * @fn Kalango_MutexTryLock 251 | * @brief Tries to lock / acquire the mutex 252 | * @param mutex - id to the desired mutex 253 | * @return kSuccess on mutex locked 254 | * @note on fail to acquire a mutex this function will return 255 | * immediately 256 | * @note calling this function from a ISR results in immediate 257 | * return and error 258 | */ 259 | static inline KernelResult Kalango_MutexTryLock(MutexId mutex) { 260 | return MutexTryLock(mutex); 261 | } 262 | 263 | /** 264 | * @fn Kalango_MutexLock 265 | * @brief Try to acquire a mutex, blocks if it is not available 266 | * @param mutex - id for the desired mutex 267 | * @param timeout - timeout to wait for a locked mutex in ticks 268 | * @return kSuccess on mutex acquired / kErrorTimeout if waiting time expires 269 | * @note passing KERNEL_WAIT_FOREVER as timeout will make the task block until mutex 270 | * make it available 271 | * @note passing KERNEL_NO_WAIT as timeout causes immediate return if mutex is not available 272 | * @note Mutexes prevent deadlock by being recursive, if a particular task lock a mutex by 273 | * multiple times it will nested, mutexes need to be unlocked by the same amount to 274 | * be actually freed. 275 | * @note calling this function from a ISR results in immediate 276 | * return and error 277 | */ 278 | static inline KernelResult Kalango_MutexLock(MutexId mutex, uint32_t timeout) { 279 | return MutexLock(mutex, timeout); 280 | } 281 | 282 | /** 283 | * @fn Kalango_MutexUnlock 284 | * @brief Unlocks a previously locked mutex 285 | * @param mutex - id of target mutex 286 | * @return kSuccess on succesful unlocking 287 | * @note Only the mutex owner can unlock the mutex 288 | * @note Unlock a recursively locked mutex will return kErrorMutexALready taken 289 | * until all recursive lockings will be undone 290 | * @note calling this function from a ISR results in immediate 291 | * return and error 292 | */ 293 | static inline KernelResult Kalango_MutexUnlock(MutexId mutex) { 294 | return MutexUnlock(mutex); 295 | } 296 | 297 | /** 298 | * @fn Kalango_MutexDelete 299 | * @brief Deletes a mutex 300 | * @param mutex - id of desired mutex 301 | * @return kSucess on succesful deletion 302 | * @note Deleting a mutex will put all the waiting tasks for it on ready list 303 | * @note calling this function from a ISR results in immediate 304 | * return and error 305 | */ 306 | static inline KernelResult Kalango_MutexDelete(MutexId mutex) { 307 | return MutexDelete(mutex); 308 | } 309 | 310 | 311 | /** 312 | * @fn Kalango_QueueCreate 313 | * @brief Created a message queue 314 | * @param noof_slots - number of slots of this queue 315 | * @param slot_size - the size of each slot of message queue 316 | * @return A unique id bonded to created queue 317 | * @note slot_size is in bytes 318 | * @note the user allocated buffer should have a size in bytes equal to: 319 | * noof_slots * slot_size, size smaller than this value will result 320 | * in crashes when queue starts to became full 321 | */ 322 | static inline QueueId Kalango_QueueCreate(uint32_t noof_slots, 323 | uint32_t slot_size) { 324 | return QueueCreate(noof_slots, slot_size); 325 | } 326 | 327 | /** 328 | * @fn Kalango_QueueInsert 329 | * @brief Inserts a new slot on tail of queue, blocks if it is full 330 | * @param queue - id of target queue 331 | * @param data - the data to be inserted on the slot 332 | * @param data_size - size of data to be inserted 333 | * @param timeout - ticks to wait until a slot of queue become free 334 | * @return kSuccess on data copied, kErrorTimeout on waiting time expiration 335 | * @note data_size should be the same value of slot_size in bytes, lesser 336 | * values can be passed but must be handled by user application, 337 | * passing larger values than slot will return on error 338 | * @note passing KERNEL_WAIT_FOREVER as timeout will make the task block until 339 | * a slot becomes it available 340 | * @note passing KERNEL_NO_WAIT as timeout causes immediate return if queue is full 341 | * @note calling this function from a ISR is only allowed when passing 342 | * KERNEL_NO_WAIT as timeout parameter, other cases return immediately 343 | * with error. 344 | */ 345 | static inline KernelResult Kalango_QueueInsert(QueueId queue, 346 | void *data, 347 | uint32_t data_size, 348 | uint32_t timeout) { 349 | return QueueInsert(queue, data, data_size, timeout); 350 | } 351 | 352 | /** 353 | * @fn Kalango_QueuePeek 354 | * @brief Receives data from head of the queue, blocks if it is empty 355 | * @param queue - id of target queue 356 | * @param data - user allocated area to store data received from queue 357 | * @param data_size - user pointer to store current slot size 358 | * @param timeout - ticks to wait until at least one slot arrives on queue 359 | * @return kSuccess on data copied, kErrorTimeout on waiting time expiration 360 | * @note peek a queue will receive the data, but will not update the queue head 361 | * @note data_size pointer parameter is actually not in use, can be NULL 362 | * @note passing KERNEL_WAIT_FOREVER as timeout will make the task block until 363 | * a slot becomes it available 364 | * @note passing KERNEL_NO_WAIT as timeout causes immediate return if queue is empty 365 | * @note calling this function from a ISR is only allowed when passing 366 | * KERNEL_NO_WAIT as timeout parameter, other cases return immediately 367 | * with error. 368 | */ 369 | static inline KernelResult Kalango_QueuePeek(QueueId queue, 370 | void *data, 371 | uint32_t *data_size, 372 | uint32_t timeout) { 373 | return QueuePeek(queue, data, data_size, timeout); 374 | } 375 | 376 | /** 377 | * @fn Kalango_QueueRemove 378 | * @brief Receives data from head of the queue, update it or, blocks if it is empty 379 | * @param queue - id of target queue 380 | * @param data - user allocated area to store data received from queue 381 | * @param data_size - user pointer to store current slot size 382 | * @param timeout - ticks to wait until at least one slot arrives on queue 383 | * @return kSuccess on data copied, kErrorTimeout on waiting time expiration 384 | * @note data_size pointer parameter is actually not in use, can be NULL 385 | * @note passing KERNEL_WAIT_FOREVER as timeout will make the task block until 386 | * a slot becomes it available 387 | * @note passing KERNEL_NO_WAIT as timeout causes immediate return if queue is empty 388 | * @note calling this function from a ISR is only allowed when passing 389 | * KERNEL_NO_WAIT as timeout parameter, other cases return immediately 390 | * with error. 391 | */ 392 | static inline KernelResult Kalango_QueueRemove(QueueId queue, 393 | void *data, 394 | uint32_t *data_size, 395 | uint32_t timeout) { 396 | return QueueRemove(queue, data, data_size, timeout); 397 | } 398 | 399 | /** 400 | * @fn Kalango_QueueDelete 401 | * @brief Delete a message queue 402 | * @param queue - id to target queue 403 | * @return kSuccess on succesful deletion 404 | * @note Deleting a queue will put all waiting tasks on ready list, no valid data will 405 | * be inserted or received 406 | * @note calling this function from a ISR results in immediate 407 | * return and error 408 | */ 409 | static inline KernelResult Kalango_QueueDelete(QueueId queue) { 410 | return QueueDelete(queue); 411 | } 412 | 413 | /** 414 | * @fn Kalango_TimerCreate 415 | * @brief Creates a new timer 416 | * @param callback - function invoked by timer on expiration 417 | * @param expiry_time - ticks to count before throw the callback 418 | * @param period_time - if not 0 defines ticks to periodically throw the callback 419 | * @param user_data - pointer to a user defined data if will passed to callback as param 420 | * @return A unique id bonded to the created timer 421 | * @note after created the timer is not running yet. 422 | * @note callbacks thrown by timers actually executes on ISR context so 423 | * avoid processing inside them. 424 | */ 425 | static inline TimerId Kalango_TimerCreate(TimerCallback callback, 426 | uint32_t expiry_time, 427 | uint32_t period_time, 428 | void* user_data) { 429 | return TimerCreate(callback, expiry_time, period_time, user_data); 430 | } 431 | 432 | /** 433 | * @fn Kalango_TimerStart 434 | * @brief Starts or restarts a timer 435 | * @param timer - id of desired timer to start 436 | * @return kSuccess on timer started to count 437 | * @note calling this function for a running timer implies on 438 | * reset its counting to 0 (restart) 439 | */ 440 | static inline KernelResult Kalango_TimerStart(TimerId timer) { 441 | return TimerStart(timer); 442 | } 443 | 444 | /** 445 | * @fn Kalango_TimerStop 446 | * @brief Stops a timer to count 447 | * @param timer - id of desired timer to stop 448 | * @return kSuccess on if timer stopped 449 | */ 450 | static inline KernelResult Kalango_TimerStop(TimerId timer) { 451 | return TimerStop(timer); 452 | } 453 | 454 | /** 455 | * @fn Kalango_TimerSetValues 456 | * @brief Sets the expiration and period of a timer 457 | * @param timer - id of desired timer 458 | * @param expiry_time - ticks to count before throw the callback 459 | * @param period_time - if not 0 defines ticks to periodically throw the callback 460 | * @return kSuccess on succesful setting 461 | * @note calling this function to running timer will cause its stopping before 462 | * the new values are set, the set timer needs to be restarted by 463 | * calling Kalango_TimerStart() again. 464 | */ 465 | static inline KernelResult Kalango_TimerSetValues(TimerId timer, 466 | uint32_t expiry_time, 467 | uint32_t period_time) { 468 | return TimerSetValues(timer, expiry_time, period_time); 469 | } 470 | 471 | /** 472 | * @fn Kalango_TimerDelete 473 | * @brief Deletes a timer 474 | * @param timer - id of timer to delete 475 | * @return kSuccess on succesful deletion 476 | * @note calling this function by a running timer will cause it 477 | * to be stopped before its being deleted. 478 | */ 479 | static inline KernelResult Kalango_TimerDelete(TimerId timer) { 480 | return TimerDelete(timer); 481 | } 482 | 483 | 484 | /** 485 | * @fn Kalango_CriticalEnter 486 | * @brief Globally enables the cpu interrups 487 | * @return always success 488 | * @note If IrqDisable was called recursively before by multiple times 489 | * this function should be called by the same amount to actually 490 | * enables the interrupts 491 | */ 492 | static inline KernelResult Kalango_CriticalExit() { 493 | return ArchCriticalSectionExit(); 494 | } 495 | 496 | /** 497 | * @fn Kalango_CriticalExit 498 | * @brief Disables globally the CPU interrupts 499 | * @return Always success 500 | * @note Calling this function recursively will cause the irq nesting 501 | * refer Kalango_IrqEnable() to know how to enable the interrupts 502 | * in this case. 503 | */ 504 | static inline KernelResult Kalango_CriticalEnter() { 505 | return ArchCriticalSectionEnter(); 506 | } 507 | 508 | /** 509 | * @fn Kalango_IrqEnter 510 | * @brief Starts a ISR safe region 511 | * @return always kSuccess 512 | * @note This function MUST be placed before any other instruction on a ISR if 513 | * application wants to use RTOS functions. 514 | */ 515 | static inline KernelResult Kalango_IrqEnter() { 516 | return ArchIsrEnter(); 517 | } 518 | 519 | /** 520 | * @fn Kalango_IrqLeave 521 | * @brief Ends the ISR safe place 522 | * @return always kSuccess 523 | * @note Place this function as last instruction of an ISR, combined 524 | * witth Kalango_IrqEnter() the user can call RTOS functions 525 | * safely 526 | */ 527 | static inline KernelResult Kalango_IrqLeave() { 528 | return ArchIsrLeave(); 529 | } -------------------------------------------------------------------------------- /include/kernel_objects.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #ifndef CONFIG_KERNEL_BLOCKS 4 | #define CONFIG_KERNEL_BLOCKS 16 5 | #endif 6 | 7 | typedef struct { 8 | sys_dlist_t task_list[CONFIG_PRIORITY_LEVELS]; 9 | uint32_t ready_task_bitmap; 10 | uint32_t lock_level; 11 | }TaskPriorityList; 12 | 13 | typedef struct { 14 | void (*timeout_callback) (void *); 15 | void *user_data; 16 | bool is_task; 17 | uint32_t next_wakeup_tick; 18 | bool expired; 19 | TaskPriorityList *bonded_list; 20 | sys_dnode_t timed_node; 21 | }Timeout; 22 | 23 | typedef struct { 24 | uint8_t *stackpointer; 25 | uint32_t stack_size; 26 | void (*entry_point) (void *); 27 | void *arg1; 28 | uint32_t priority; 29 | uint32_t state; 30 | Timeout timeout; 31 | sys_dnode_t ready_node; 32 | } TaskControBlock; 33 | 34 | typedef struct { 35 | uint32_t count; 36 | uint32_t limit; 37 | TaskPriorityList pending_tasks; 38 | }Semaphore; 39 | 40 | typedef struct { 41 | bool owned; 42 | void *owner; 43 | uint32_t recursive_taking_count; 44 | uint32_t old_priority; 45 | TaskPriorityList pending_tasks; 46 | }Mutex; 47 | 48 | typedef struct { 49 | void (*callback) (void *); 50 | void *user_data; 51 | bool periodic; 52 | bool expired; 53 | bool running; 54 | uint32_t expiry_time; 55 | uint32_t period_time; 56 | Timeout timeout; 57 | }Timer; 58 | 59 | typedef struct { 60 | uint8_t *buffer; 61 | uint32_t tail; 62 | uint32_t head; 63 | uint32_t slot_size; 64 | uint32_t noof_slots; 65 | uint32_t available_slots; 66 | bool full; 67 | bool empty; 68 | TaskPriorityList writer_tasks_pending; 69 | TaskPriorityList reader_tasks_pending; 70 | } Queue; 71 | -------------------------------------------------------------------------------- /include/kernel_types.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "kernel_objects.h" 9 | 10 | #if CONFIG_PRIORITY_LEVELS > 32 11 | #error "Maximum priority level allowed is 32" 12 | #endif 13 | 14 | #define TASK_STATE_READY 0 15 | #define TASK_STATE_PEND_SEMAPHORE 1 16 | #define TASK_STATE_SUPENDED 2 17 | #define TASK_STATE_PEND_QUEUE 4 18 | #define TASK_STATE_PEND_TIMEOUT 8 19 | #define TASK_STATE_PEND_MUTEX 16 20 | #define TASK_STATE_PEND_ALL_SIGNALS 32 21 | #define TASK_STATE_PEND_ANY_SIGNAL 64 22 | #define TASK_STATE_TERMINATED 128 23 | #define TASK_STATE_WOKEN_BY_TIMEOUT 256 24 | 25 | #define KERNEL_WAIT_FOREVER -1 26 | #define KERNEL_NO_WAIT 0 27 | 28 | typedef enum { 29 | kSuccess = 0, 30 | kErrorInvalidParam, 31 | kErrorBufferFull, 32 | kErrorBufferEmpty, 33 | kErrorTimeout, 34 | kErrorDeviceBusy, 35 | kErrorInsideIsr, 36 | kErrorNotEnoughKernelMemory, 37 | kErrorTimerIsNotRunning, 38 | kStatusNoSwitchPending, 39 | kStatusSwitchIsPending, 40 | kStatusSchedLocked, 41 | kStatusSchedUnlocked, 42 | kErrorInvalidKernelState, 43 | kStatusMutexAlreadyTaken, 44 | kStatusSemaphoreUnavailable, 45 | kErrorTaskAlreadySuspended, 46 | kErrorTaskAlreadyResumed, 47 | kErrorInvalidMutexOwner, 48 | kErrorNothingToSchedule, 49 | }KernelResult; 50 | 51 | typedef void (*TimerCallback) (void *user_data); 52 | 53 | typedef TaskControBlock * TaskId; 54 | typedef Timer* TimerId; 55 | typedef Queue* QueueId; 56 | typedef Mutex* MutexId; 57 | typedef Semaphore * SemaphoreId; 58 | 59 | typedef void (*TaskFunction) (void *arg); 60 | 61 | typedef struct { 62 | uint32_t priority; 63 | uint32_t stack_size; 64 | TaskFunction function; 65 | void *arg; 66 | } TaskSettings; 67 | 68 | #ifndef CONFIG_REMOVE_CHECKINGS 69 | #define ASSERT_KERNEL(x, ...) \ 70 | if(!(x)) { \ 71 | return __VA_ARGS__; \ 72 | } 73 | #else 74 | #define ASSERT_KERNEL(x, ...) 75 | #endif 76 | 77 | #define ASSERT_PARAM(x) ASSERT_KERNEL(x, kErrorInvalidParam) -------------------------------------------------------------------------------- /include/list.h: -------------------------------------------------------------------------------- 1 | #ifndef LIST_H 2 | #define LIST_H 3 | 4 | #include 5 | 6 | /* 7 | * Copyright (c) 2013-2015 Wind River Systems, Inc. 8 | * 9 | * SPDX-License-Identifier: Apache-2.0 10 | */ 11 | 12 | /** 13 | * @file 14 | * @brief Doubly-linked list inline implementation 15 | * 16 | * Doubly-linked list implementation. 17 | * 18 | * The lists are expected to be initialized such that both the head and tail 19 | * pointers point to the list itself. Initializing the lists in such a fashion 20 | * simplifies the adding and removing of nodes to/from the list. 21 | */ 22 | 23 | struct _dnode { 24 | union { 25 | struct _dnode *head; /* ptr to head of list (sys_dlist_t) */ 26 | struct _dnode *next; /* ptr to next node (sys_dnode_t) */ 27 | }; 28 | union { 29 | struct _dnode *tail; /* ptr to tail of list (sys_dlist_t) */ 30 | struct _dnode *prev; /* ptr to previous node (sys_dnode_t) */ 31 | }; 32 | }; 33 | 34 | typedef struct _dnode sys_dlist_t; 35 | typedef struct _dnode sys_dnode_t; 36 | typedef struct _dnode k_list_t; 37 | 38 | /** 39 | * @brief Provide the primitive to iterate on a list 40 | * Note: the loop is unsafe and thus __dn should not be removed 41 | * 42 | * User _MUST_ add the loop statement curly braces enclosing its own code: 43 | * 44 | * SYS_DLIST_FOR_EACH_NODE(l, n) { 45 | * 46 | * } 47 | * 48 | * @param __dl A pointer on a sys_dlist_t to iterate on 49 | * @param __dn A sys_dnode_t pointer to peek each node of the list 50 | */ 51 | #define SYS_DLIST_FOR_EACH_NODE(__dl, __dn) \ 52 | for (__dn = sys_dlist_peek_head(__dl); __dn; \ 53 | __dn = sys_dlist_peek_next(__dl, __dn)) 54 | 55 | /** 56 | * @brief Provide the primitive to iterate on a list, from a node in the list 57 | * Note: the loop is unsafe and thus __dn should not be removed 58 | * 59 | * User _MUST_ add the loop statement curly braces enclosing its own code: 60 | * 61 | * SYS_DLIST_ITERATE_FROM_NODE(l, n) { 62 | * 63 | * } 64 | * 65 | * Like SYS_DLIST_FOR_EACH_NODE(), but __dn already contains a node in the list 66 | * where to start searching for the next entry from. If NULL, it starts from 67 | * the head. 68 | * 69 | * @param __dl A pointer on a sys_dlist_t to iterate on 70 | * @param __dn A sys_dnode_t pointer to peek each node of the list; 71 | * it contains the starting node, or NULL to start from the head 72 | */ 73 | #define SYS_DLIST_ITERATE_FROM_NODE(__dl, __dn) \ 74 | for (__dn = __dn ? sys_dlist_peek_next_no_check(__dl, __dn) \ 75 | : sys_dlist_peek_head(__dl); \ 76 | __dn; \ 77 | __dn = sys_dlist_peek_next(__dl, __dn)) 78 | 79 | /** 80 | * @brief Provide the primitive to safely iterate on a list 81 | * Note: __dn can be removed, it will not break the loop. 82 | * 83 | * User _MUST_ add the loop statement curly braces enclosing its own code: 84 | * 85 | * SYS_DLIST_FOR_EACH_NODE_SAFE(l, n, s) { 86 | * 87 | * } 88 | * 89 | * @param __dl A pointer on a sys_dlist_t to iterate on 90 | * @param __dn A sys_dnode_t pointer to peek each node of the list 91 | * @param __dns A sys_dnode_t pointer for the loop to run safely 92 | */ 93 | #define SYS_DLIST_FOR_EACH_NODE_SAFE(__dl, __dn, __dns) \ 94 | for (__dn = sys_dlist_peek_head(__dl), \ 95 | __dns = sys_dlist_peek_next(__dl, __dn); \ 96 | __dn; __dn = __dns, \ 97 | __dns = sys_dlist_peek_next(__dl, __dn)) 98 | 99 | /* 100 | * @brief Provide the primitive to resolve the container of a list node 101 | * Note: it is safe to use with NULL pointer nodes 102 | * 103 | * @param __dn A pointer on a sys_dnode_t to get its container 104 | * @param __cn Container struct type pointer 105 | * @param __n The field name of sys_dnode_t within the container struct 106 | */ 107 | #define SYS_DLIST_CONTAINER(__dn, __cn, __n) \ 108 | (__dn ? CONTAINER_OF(__dn, __typeof__(*__cn), __n) : NULL) 109 | /* 110 | * @brief Provide the primitive to peek container of the list head 111 | * 112 | * @param __dl A pointer on a sys_dlist_t to peek 113 | * @param __cn Container struct type pointer 114 | * @param __n The field name of sys_dnode_t within the container struct 115 | */ 116 | #define SYS_DLIST_PEEK_HEAD_CONTAINER(__dl, __cn, __n) \ 117 | SYS_DLIST_CONTAINER(sys_dlist_peek_head(__dl), __cn, __n) 118 | 119 | /* 120 | * @brief Provide the primitive to peek the next container 121 | * 122 | * @param __dl A pointer on a sys_dlist_t to peek 123 | * @param __cn Container struct type pointer 124 | * @param __n The field name of sys_dnode_t within the container struct 125 | */ 126 | #define SYS_DLIST_PEEK_NEXT_CONTAINER(__dl, __cn, __n) \ 127 | ((__cn) ? SYS_DLIST_CONTAINER(sys_dlist_peek_next(__dl, &(__cn->__n)), \ 128 | __cn, __n) : NULL) 129 | 130 | /** 131 | * @brief Provide the primitive to iterate on a list under a container 132 | * Note: the loop is unsafe and thus __cn should not be detached 133 | * 134 | * User _MUST_ add the loop statement curly braces enclosing its own code: 135 | * 136 | * SYS_DLIST_FOR_EACH_CONTAINER(l, c, n) { 137 | * 138 | * } 139 | * 140 | * @param __dl A pointer on a sys_dlist_t to iterate on 141 | * @param __cn A pointer to peek each entry of the list 142 | * @param __n The field name of sys_dnode_t within the container struct 143 | */ 144 | #define SYS_DLIST_FOR_EACH_CONTAINER(__dl, __cn, __n) \ 145 | for (__cn = SYS_DLIST_PEEK_HEAD_CONTAINER(__dl, __cn, __n); __cn; \ 146 | __cn = SYS_DLIST_PEEK_NEXT_CONTAINER(__dl, __cn, __n)) 147 | 148 | /** 149 | * @brief Provide the primitive to safely iterate on a list under a container 150 | * Note: __cn can be detached, it will not break the loop. 151 | * 152 | * User _MUST_ add the loop statement curly braces enclosing its own code: 153 | * 154 | * SYS_DLIST_FOR_EACH_CONTAINER_SAFE(l, c, cn, n) { 155 | * 156 | * } 157 | * 158 | * @param __dl A pointer on a sys_dlist_t to iterate on 159 | * @param __cn A pointer to peek each entry of the list 160 | * @param __cns A pointer for the loop to run safely 161 | * @param __n The field name of sys_dnode_t within the container struct 162 | */ 163 | #define SYS_DLIST_FOR_EACH_CONTAINER_SAFE(__dl, __cn, __cns, __n) \ 164 | for (__cn = SYS_DLIST_PEEK_HEAD_CONTAINER(__dl, __cn, __n), \ 165 | __cns = SYS_DLIST_PEEK_NEXT_CONTAINER(__dl, __cn, __n); __cn; \ 166 | __cn = __cns, \ 167 | __cns = SYS_DLIST_PEEK_NEXT_CONTAINER(__dl, __cn, __n)) 168 | 169 | /** 170 | * @brief initialize list 171 | * 172 | * @param list the doubly-linked list 173 | * 174 | * @return N/A 175 | */ 176 | 177 | static inline void sys_dlist_init(sys_dlist_t *list) 178 | { 179 | list->head = (sys_dnode_t *)list; 180 | list->tail = (sys_dnode_t *)list; 181 | } 182 | 183 | #define SYS_DLIST_STATIC_INIT(ptr_to_list) {{(ptr_to_list)}, {(ptr_to_list)}} 184 | 185 | /** 186 | * @brief check if a node is the list's head 187 | * 188 | * @param list the doubly-linked list to operate on 189 | * @param node the node to check 190 | * 191 | * @return 1 if node is the head, 0 otherwise 192 | */ 193 | 194 | static inline int sys_dlist_is_head(sys_dlist_t *list, sys_dnode_t *node) 195 | { 196 | return list->head == node; 197 | } 198 | 199 | /** 200 | * @brief check if a node is the list's tail 201 | * 202 | * @param list the doubly-linked list to operate on 203 | * @param node the node to check 204 | * 205 | * @return 1 if node is the tail, 0 otherwise 206 | */ 207 | 208 | static inline int sys_dlist_is_tail(sys_dlist_t *list, sys_dnode_t *node) 209 | { 210 | return list->tail == node; 211 | } 212 | 213 | /** 214 | * @brief check if the list is empty 215 | * 216 | * @param list the doubly-linked list to operate on 217 | * 218 | * @return 1 if empty, 0 otherwise 219 | */ 220 | 221 | static inline int sys_dlist_is_empty(sys_dlist_t *list) 222 | { 223 | return list->head == list; 224 | } 225 | 226 | /** 227 | * @brief check if more than one node present 228 | * 229 | * @param list the doubly-linked list to operate on 230 | * 231 | * @return 1 if multiple nodes, 0 otherwise 232 | */ 233 | 234 | static inline int sys_dlist_has_multiple_nodes(sys_dlist_t *list) 235 | { 236 | return list->head != list->tail; 237 | } 238 | 239 | /** 240 | * @brief get a reference to the head item in the list 241 | * 242 | * @param list the doubly-linked list to operate on 243 | * 244 | * @return a pointer to the head element, NULL if list is empty 245 | */ 246 | 247 | static inline sys_dnode_t *sys_dlist_peek_head(sys_dlist_t *list) 248 | { 249 | return sys_dlist_is_empty(list) ? NULL : list->head; 250 | } 251 | 252 | /** 253 | * @brief get a reference to the head item in the list 254 | * 255 | * The list must be known to be non-empty. 256 | * 257 | * @param list the doubly-linked list to operate on 258 | * 259 | * @return a pointer to the head element 260 | */ 261 | 262 | static inline sys_dnode_t *sys_dlist_peek_head_not_empty(sys_dlist_t *list) 263 | { 264 | return list->head; 265 | } 266 | 267 | /** 268 | * @brief get a reference to the next item in the list, node is not NULL 269 | * 270 | * Faster than sys_dlist_peek_next() if node is known not to be NULL. 271 | * 272 | * @param list the doubly-linked list to operate on 273 | * @param node the node from which to get the next element in the list 274 | * 275 | * @return a pointer to the next element from a node, NULL if node is the tail 276 | */ 277 | 278 | static inline sys_dnode_t *sys_dlist_peek_next_no_check(sys_dlist_t *list, 279 | sys_dnode_t *node) 280 | { 281 | return (node == list->tail) ? NULL : node->next; 282 | } 283 | 284 | /** 285 | * @brief get a reference to the next item in the list 286 | * 287 | * @param list the doubly-linked list to operate on 288 | * @param node the node from which to get the next element in the list 289 | * 290 | * @return a pointer to the next element from a node, NULL if node is the tail 291 | * or NULL (when node comes from reading the head of an empty list). 292 | */ 293 | 294 | static inline sys_dnode_t *sys_dlist_peek_next(sys_dlist_t *list, 295 | sys_dnode_t *node) 296 | { 297 | return node ? sys_dlist_peek_next_no_check(list, node) : NULL; 298 | } 299 | 300 | /** 301 | * @brief get a reference to the tail item in the list 302 | * 303 | * @param list the doubly-linked list to operate on 304 | * 305 | * @return a pointer to the tail element, NULL if list is empty 306 | */ 307 | 308 | static inline sys_dnode_t *sys_dlist_peek_tail(sys_dlist_t *list) 309 | { 310 | return sys_dlist_is_empty(list) ? NULL : list->tail; 311 | } 312 | 313 | /** 314 | * @brief add node to tail of list 315 | * 316 | * @param list the doubly-linked list to operate on 317 | * @param node the element to append 318 | * 319 | * @return N/A 320 | */ 321 | 322 | static inline void sys_dlist_append(sys_dlist_t *list, sys_dnode_t *node) 323 | { 324 | node->next = list; 325 | node->prev = list->tail; 326 | 327 | list->tail->next = node; 328 | list->tail = node; 329 | } 330 | 331 | /** 332 | * @brief add node to head of list 333 | * 334 | * @param list the doubly-linked list to operate on 335 | * @param node the element to append 336 | * 337 | * @return N/A 338 | */ 339 | 340 | static inline void sys_dlist_prepend(sys_dlist_t *list, sys_dnode_t *node) 341 | { 342 | node->next = list->head; 343 | node->prev = list; 344 | 345 | list->head->prev = node; 346 | list->head = node; 347 | } 348 | 349 | /** 350 | * @brief insert node after a node 351 | * 352 | * Insert a node after a specified node in a list. 353 | * 354 | * @param list the doubly-linked list to operate on 355 | * @param insert_point the insert point in the list: if NULL, insert at head 356 | * @param node the element to append 357 | * 358 | * @return N/A 359 | */ 360 | 361 | static inline void sys_dlist_insert_after(sys_dlist_t *list, 362 | sys_dnode_t *insert_point, sys_dnode_t *node) 363 | { 364 | if (!insert_point) { 365 | sys_dlist_prepend(list, node); 366 | } else { 367 | node->next = insert_point->next; 368 | node->prev = insert_point; 369 | insert_point->next->prev = node; 370 | insert_point->next = node; 371 | } 372 | } 373 | 374 | /** 375 | * @brief insert node before a node 376 | * 377 | * Insert a node before a specified node in a list. 378 | * 379 | * @param list the doubly-linked list to operate on 380 | * @param insert_point the insert point in the list: if NULL, insert at tail 381 | * @param node the element to insert 382 | * 383 | * @return N/A 384 | */ 385 | 386 | static inline void sys_dlist_insert_before(sys_dlist_t *list, 387 | sys_dnode_t *insert_point, sys_dnode_t *node) 388 | { 389 | if (!insert_point) { 390 | sys_dlist_append(list, node); 391 | } else { 392 | node->prev = insert_point->prev; 393 | node->next = insert_point; 394 | insert_point->prev->next = node; 395 | insert_point->prev = node; 396 | } 397 | } 398 | 399 | /** 400 | * @brief insert node at position 401 | * 402 | * Insert a node in a location depending on a external condition. The cond() 403 | * function checks if the node is to be inserted _before_ the current node 404 | * against which it is checked. 405 | * 406 | * @param list the doubly-linked list to operate on 407 | * @param node the element to insert 408 | * @param cond a function that determines if the current node is the correct 409 | * insert point 410 | * @param data parameter to cond() 411 | * 412 | * @return N/A 413 | */ 414 | 415 | static inline void sys_dlist_insert_at(sys_dlist_t *list, sys_dnode_t *node, 416 | int (*cond)(sys_dnode_t *, void *), void *data) 417 | { 418 | if (sys_dlist_is_empty(list)) { 419 | sys_dlist_append(list, node); 420 | } else { 421 | sys_dnode_t *pos = sys_dlist_peek_head(list); 422 | 423 | while (pos && !cond(pos, data)) { 424 | pos = sys_dlist_peek_next(list, pos); 425 | } 426 | sys_dlist_insert_before(list, pos, node); 427 | } 428 | } 429 | 430 | /** 431 | * @brief remove a specific node from a list 432 | * 433 | * The list is implicit from the node. The node must be part of a list. 434 | * 435 | * @param node the node to remove 436 | * 437 | * @return N/A 438 | */ 439 | 440 | static inline void sys_dlist_remove(sys_dnode_t *node) 441 | { 442 | node->prev->next = node->next; 443 | node->next->prev = node->prev; 444 | } 445 | 446 | /** 447 | * @brief get the first node in a list 448 | * 449 | * @param list the doubly-linked list to operate on 450 | * 451 | * @return the first node in the list, NULL if list is empty 452 | */ 453 | 454 | static inline sys_dnode_t *sys_dlist_get(sys_dlist_t *list) 455 | { 456 | sys_dnode_t *node; 457 | 458 | if (sys_dlist_is_empty(list)) { 459 | return NULL; 460 | } 461 | 462 | node = list->head; 463 | sys_dlist_remove(node); 464 | return node; 465 | } 466 | 467 | 468 | #endif 469 | -------------------------------------------------------------------------------- /include/macros.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) 4 | #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) 5 | 6 | #define CONTAINER_OF(ptr, type, field) ((type *)(((char *)(ptr)) - offsetof(type, field))) 7 | -------------------------------------------------------------------------------- /include/mutex.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | MutexId MutexCreate(); 11 | KernelResult MutexTryLock(MutexId mutex); 12 | KernelResult MutexLock(MutexId mutex, uint32_t timeout); 13 | KernelResult MutexUnlock(MutexId mutex); 14 | KernelResult MutexDelete(MutexId mutex); 15 | -------------------------------------------------------------------------------- /include/object_pool.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | uint32_t GetKernelFreeBytesOnHeap(); 8 | 9 | KernelResult InitializeObjectPools(); 10 | 11 | uint8_t *AllocateRawBuffer(uint32_t size); 12 | KernelResult FreeRawBuffer(uint8_t *self); 13 | 14 | TaskControBlock *AllocateTaskObject(); 15 | KernelResult FreeTaskObject(TaskControBlock *self); 16 | 17 | Semaphore *AllocateSemaphoreObject(); 18 | KernelResult FreeSemaphoreObject(Semaphore *self); 19 | 20 | Mutex *AllocateMutexObject(); 21 | KernelResult FreeMutexObject(Mutex *self); 22 | 23 | Timer *AllocateTimerObject(); 24 | KernelResult FreeTimerObject(Timer *self); 25 | 26 | Queue *AllocateQueueObject(); 27 | KernelResult FreeQueueObject(Queue *self); 28 | -------------------------------------------------------------------------------- /include/platform.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | extern KernelResult PlatformInit(void *arg); -------------------------------------------------------------------------------- /include/queue.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | QueueId QueueCreate(uint32_t noof_slots, uint32_t slot_size); 10 | KernelResult QueueInsert(QueueId queue, void *data, uint32_t data_size, uint32_t timeout); 11 | KernelResult QueuePeek(QueueId queue, void *data, uint32_t *data_size, uint32_t timeout); 12 | KernelResult QueueRemove(QueueId queue, void *data, uint32_t *data_size, uint32_t timeout); 13 | KernelResult QueueDelete(QueueId queue); 14 | 15 | -------------------------------------------------------------------------------- /include/sched.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | bool IsSchedulerLocked(TaskPriorityList *taskset); 7 | KernelResult SchedulerLock(TaskPriorityList *taskset); 8 | KernelResult SchedulerUnlock(TaskPriorityList *taskset); 9 | TaskControBlock * ScheduleTaskSet(TaskPriorityList *taskset); 10 | void SchedulerInitTaskPriorityList(TaskPriorityList *list); 11 | bool NothingToSched(TaskPriorityList *list); 12 | KernelResult SchedulerSetPriority(TaskPriorityList *list, uint32_t priority); 13 | KernelResult SchedulerResetPriority(TaskPriorityList *list, uint32_t priority); 14 | KernelResult SchedulerDoRoundRobin(TaskPriorityList *list); -------------------------------------------------------------------------------- /include/semaphore.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | SemaphoreId SemaphoreCreate(uint32_t initial, uint32_t limit); 10 | KernelResult SemaphoreTake(SemaphoreId semaphore, uint32_t timeout); 11 | KernelResult SemaphoreGive(SemaphoreId semaphore, uint32_t count); 12 | KernelResult SemaphoreDelete (SemaphoreId semaphore); -------------------------------------------------------------------------------- /include/task.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | TaskId TaskCreate(TaskSettings *settings); 10 | KernelResult TaskSuspend(TaskId task_id); 11 | KernelResult TaskResume(TaskId task_id); 12 | KernelResult TaskDelete(TaskId task_id); 13 | uint32_t TaskSetPriority(TaskId task_id, uint32_t new_priority); 14 | uint32_t TaskGetPriority(TaskId task_id); 15 | KernelResult TaskYield(); 16 | -------------------------------------------------------------------------------- /include/timer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | TimerId TimerCreate(TimerCallback callback, uint32_t expiry_time, uint32_t period_time, void *user_data); 10 | KernelResult TimerStart(TimerId timer); 11 | KernelResult TimerStop(TimerId timer); 12 | KernelResult TimerSetValues(TimerId timer, uint32_t expiry_time, uint32_t period_time); 13 | KernelResult TimerDelete(TimerId timer); 14 | 15 | 16 | -------------------------------------------------------------------------------- /src/clock.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list); 4 | static uint32_t tick_counter = 0; 5 | 6 | #if CONFIG_ENABLE_TIMERS > 0 7 | static KernelResult HandleExpiredTimers(sys_dlist_t *expired_list) { 8 | 9 | sys_dnode_t *next = sys_dlist_peek_head(expired_list); 10 | 11 | while(next) { 12 | Timeout *timeout = CONTAINER_OF(next, Timeout, timed_node); 13 | Timer *timer = CONTAINER_OF(timeout, Timer, timeout); 14 | 15 | if(timeout->timeout_callback) { 16 | timeout->timeout_callback(timeout->user_data); 17 | } 18 | 19 | if(timer->periodic) { 20 | AddTimeout(&timer->timeout, timer->period_time, timer->callback, timer->user_data, false, NULL); 21 | } else { 22 | timer->expired = true; 23 | timer->running = false; 24 | } 25 | 26 | next = sys_dlist_peek_next(expired_list, next); 27 | } 28 | 29 | return kSuccess; 30 | } 31 | #endif 32 | 33 | uint32_t GetTicksPerSecond() { 34 | return CONFIG_TICKS_PER_SEC; 35 | } 36 | 37 | uint32_t GetCurrentTicks() { 38 | return tick_counter; 39 | } 40 | 41 | KernelResult Sleep(uint32_t ticks) { 42 | ASSERT_PARAM(ticks); 43 | 44 | CoreSchedulingSuspend(); 45 | TaskControBlock *current = CoreGetCurrentTask(); 46 | CoreMakeTaskPending(current, TASK_STATE_PEND_TIMEOUT, NULL); 47 | AddTimeout(¤t->timeout, ticks, NULL, NULL, true, NULL); 48 | 49 | return (CheckReschedule()); 50 | } 51 | 52 | KernelResult ClockStep (uint32_t ticks) { 53 | if(!IsCoreRunning()) { 54 | return kErrorInvalidKernelState; 55 | } 56 | 57 | ASSERT_KERNEL(ArchInIsr(), kErrorInvalidKernelState); 58 | 59 | #if CONFIG_ENABLE_ROUND_ROBIN_SCHED 60 | CoreManageRoundRobin(); 61 | #endif 62 | 63 | sys_dlist_t expired_list; 64 | sys_dnode_t *next = sys_dlist_peek_head(&timeout_list); 65 | 66 | sys_dlist_init(&expired_list); 67 | tick_counter += ticks; 68 | 69 | while(next) { 70 | Timeout *timeout = CONTAINER_OF(next, Timeout, timed_node); 71 | if(timeout->next_wakeup_tick <= tick_counter) { 72 | next = sys_dlist_peek_next(&timeout_list, next); 73 | 74 | timeout->expired = true; 75 | sys_dlist_remove(&timeout->timed_node); 76 | 77 | if(!timeout->is_task) { 78 | sys_dlist_prepend(&expired_list, &timeout->timed_node); 79 | } else { 80 | TaskControBlock *timed_out = CONTAINER_OF(timeout, TaskControBlock,timeout); 81 | if(timeout->bonded_list != NULL) { 82 | SchedulerResetPriority(timeout->bonded_list, timed_out->priority); 83 | } 84 | CoreMakeTaskReady(timed_out); 85 | } 86 | } else { 87 | next = sys_dlist_peek_next(&timeout_list, next); 88 | } 89 | } 90 | 91 | #if CONFIG_ENABLE_TIMERS > 0 92 | if(!sys_dlist_is_empty(&expired_list)) { 93 | return HandleExpiredTimers(&expired_list); 94 | } 95 | #endif 96 | 97 | return kSuccess; 98 | } 99 | 100 | KernelResult AddTimeout(Timeout *timeout, 101 | uint32_t value, 102 | TimerCallback timeout_callback, 103 | void *user_data, 104 | bool is_task, 105 | TaskPriorityList *optional_list_to_bind) { 106 | ASSERT_PARAM(timeout); 107 | ASSERT_PARAM(value); 108 | 109 | if(value == KERNEL_WAIT_FOREVER){ 110 | return kSuccess; 111 | } 112 | 113 | timeout->is_task = is_task; 114 | timeout->next_wakeup_tick = tick_counter + value; 115 | timeout->timeout_callback = timeout_callback; 116 | timeout->user_data = user_data; 117 | timeout->expired = false; 118 | if(optional_list_to_bind != NULL) { 119 | timeout->bonded_list = optional_list_to_bind; 120 | } 121 | 122 | ArchCriticalSectionEnter(); 123 | sys_dlist_append(&timeout_list, &timeout->timed_node); 124 | ArchCriticalSectionExit(); 125 | 126 | return kSuccess; 127 | } 128 | 129 | KernelResult RemoveTimeout(Timeout *timeout) { 130 | ASSERT_PARAM(timeout); 131 | 132 | ArchCriticalSectionEnter(); 133 | sys_dlist_remove(&timeout->timed_node); 134 | ArchCriticalSectionExit(); 135 | 136 | return kSuccess; 137 | } -------------------------------------------------------------------------------- /src/core.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | TaskControBlock *current = NULL; 4 | static TaskControBlock *next_task = NULL; 5 | static TaskPriorityList ready_tasks_list; 6 | static bool initialized = false; 7 | static bool is_running = false; 8 | static TaskId task_idle_id; 9 | static sys_dlist_t tasks_waiting_to_delete = SYS_DLIST_STATIC_INIT(&tasks_waiting_to_delete); 10 | 11 | static void IdleTask(void *unused) { 12 | (void)unused; 13 | 14 | for(;;) { 15 | //Delete tasks waiting to termination: 16 | sys_dnode_t *task_node = sys_dlist_peek_head(&tasks_waiting_to_delete); 17 | if(task_node) { 18 | TaskControBlock *task = CONTAINER_OF(task_node, TaskControBlock, ready_node); 19 | 20 | ArchCriticalSectionEnter(); 21 | sys_dlist_remove(&task->ready_node); 22 | ArchCriticalSectionExit(); 23 | 24 | FreeRawBuffer(task->stackpointer); 25 | FreeTaskObject(task); 26 | } 27 | } 28 | } 29 | 30 | KernelResult CoreMakeTaskPending(TaskControBlock * task, uint32_t reason, TaskPriorityList *kobject_pending_list) { 31 | ASSERT_PARAM(task); 32 | ASSERT_PARAM(reason); 33 | 34 | ArchCriticalSectionEnter(); 35 | sys_dlist_remove(&task->ready_node); 36 | SchedulerResetPriority(&ready_tasks_list, task->priority); 37 | 38 | task->state = reason; 39 | 40 | if(kobject_pending_list) { 41 | sys_dlist_append(&kobject_pending_list->task_list[task->priority], &task->ready_node); 42 | SchedulerSetPriority(kobject_pending_list, task->priority); 43 | } 44 | 45 | if(reason & TASK_STATE_TERMINATED) { 46 | sys_dlist_append(&tasks_waiting_to_delete, &task->ready_node); 47 | } 48 | 49 | ArchCriticalSectionExit(); 50 | return kSuccess; 51 | } 52 | 53 | KernelResult CoreUnpendNextTask(TaskPriorityList *kobject_pending_list) { 54 | ASSERT_PARAM(kobject_pending_list); 55 | 56 | TaskControBlock *task = ScheduleTaskSet(kobject_pending_list); 57 | 58 | if(task) { 59 | ArchCriticalSectionEnter(); 60 | RemoveTimeout(&task->timeout); 61 | sys_dlist_remove(&task->ready_node); 62 | SchedulerResetPriority(kobject_pending_list, task->priority); 63 | ArchCriticalSectionExit(); 64 | 65 | return (CoreMakeTaskReady(task)); 66 | } else { 67 | return kErrorNothingToSchedule; 68 | } 69 | } 70 | 71 | KernelResult CoreMakeTaskReady(TaskControBlock * task) { 72 | ASSERT_PARAM(task); 73 | 74 | ArchCriticalSectionEnter(); 75 | 76 | task->state = TASK_STATE_READY; 77 | sys_dlist_append(&ready_tasks_list.task_list[task->priority], &task->ready_node); 78 | SchedulerSetPriority(&ready_tasks_list, task->priority); 79 | 80 | ArchCriticalSectionExit(); 81 | 82 | return kSuccess; 83 | } 84 | 85 | KernelResult CoreMakeAllTasksReady(TaskPriorityList *tasks) { 86 | ASSERT_PARAM(tasks); 87 | 88 | ArchCriticalSectionEnter(); 89 | 90 | while(!NothingToSched(tasks)) { 91 | CoreUnpendNextTask(tasks); 92 | } 93 | 94 | ArchCriticalSectionExit(); 95 | 96 | return kSuccess; 97 | } 98 | 99 | TaskControBlock * CoreTaskSwitch() { 100 | current = next_task; 101 | return next_task; 102 | } 103 | 104 | KernelResult CoreManageRoundRobin() { 105 | return SchedulerDoRoundRobin(&ready_tasks_list); 106 | } 107 | 108 | KernelResult CheckReschedule() { 109 | 110 | CoreSchedulingResume(); 111 | 112 | //We should not reeschedule if scheduler is still locked: 113 | if(IsSchedulerLocked(&ready_tasks_list)) { 114 | return kStatusSchedLocked; 115 | } 116 | 117 | next_task = ScheduleTaskSet(&ready_tasks_list); 118 | ASSERT_KERNEL(next_task, kErrorInvalidKernelState); 119 | 120 | //Shall we switch the context?: 121 | if(next_task != current && (is_running)) { 122 | return ArchYield(); 123 | } 124 | return kSuccess; 125 | } 126 | 127 | KernelResult CoreInitializeTaskList(TaskPriorityList *list) { 128 | ASSERT_PARAM(list); 129 | SchedulerInitTaskPriorityList(list); 130 | return kSuccess; 131 | } 132 | 133 | TaskControBlock * CoreGetCurrentTask() { 134 | return current; 135 | } 136 | 137 | KernelResult CoreInit() { 138 | 139 | if(initialized) { 140 | return kSuccess; 141 | } 142 | 143 | ArchCriticalSectionEnter(); 144 | initialized = true; 145 | InitializeObjectPools(); 146 | CoreInitializeTaskList(&ready_tasks_list); 147 | ArchCriticalSectionExit(); 148 | return kSuccess; 149 | } 150 | 151 | KernelResult CoreStart() { 152 | if(is_running) { 153 | return kSuccess; 154 | } 155 | 156 | ArchCriticalSectionEnter(); 157 | 158 | CoreInit(); 159 | 160 | #if CONFIG_USE_PLATFORM_INIT > 0 161 | PlatformInit(NULL); 162 | #endif 163 | 164 | ArchInitializeSpecifics(); 165 | 166 | TaskSettings settings; 167 | settings.arg = NULL; 168 | settings.function = IdleTask; 169 | settings.priority = 0; 170 | settings.stack_size = CONFIG_IDLE_TASK_STACK_SIZE; 171 | task_idle_id = TaskCreate(&settings); 172 | 173 | ASSERT_KERNEL(task_idle_id != NULL, kErrorInvalidParam); 174 | 175 | next_task = ScheduleTaskSet(&ready_tasks_list); 176 | ASSERT_KERNEL(next_task, kErrorInvalidKernelState); 177 | 178 | current = next_task; 179 | ArchCriticalSectionExit(); 180 | ArchStartKernel(); 181 | 182 | return kSuccess; 183 | } 184 | 185 | bool IsCoreRunning() { 186 | return is_running; 187 | } 188 | 189 | void CoreSetRunning() { 190 | 191 | if(!ArchInIsr()) { 192 | return; 193 | } 194 | 195 | is_running = true; 196 | } 197 | 198 | KernelResult CoreSchedulingSuspend() { 199 | return SchedulerLock(&ready_tasks_list); 200 | } 201 | 202 | KernelResult CoreSchedulingResume() { 203 | return SchedulerUnlock(&ready_tasks_list); 204 | } 205 | -------------------------------------------------------------------------------- /src/mutex.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #if CONFIG_ENABLE_MUTEXES > 0 4 | 5 | MutexId MutexCreate(){ 6 | 7 | CoreInit(); 8 | CoreSchedulingSuspend(); 9 | Mutex *mutex = AllocateMutexObject(); 10 | 11 | if(mutex == NULL) { 12 | CoreSchedulingResume(); 13 | return NULL; 14 | } 15 | 16 | mutex->old_priority = 0; 17 | mutex->owned = false; 18 | mutex->recursive_taking_count = 0; 19 | mutex->owner = NULL; 20 | 21 | KernelResult r = CoreInitializeTaskList(&mutex->pending_tasks); 22 | if(r != kSuccess) { 23 | FreeMutexObject(mutex); 24 | CoreSchedulingResume(); 25 | return NULL; 26 | } 27 | 28 | CoreSchedulingResume(); 29 | return((MutexId)mutex); 30 | } 31 | 32 | KernelResult MutexTryLock(MutexId mutex) { 33 | ASSERT_PARAM(mutex); 34 | ASSERT_KERNEL(!ArchInIsr(), kErrorInsideIsr); 35 | 36 | Mutex *m = (Mutex *)mutex; 37 | 38 | CoreSchedulingSuspend(); 39 | 40 | if(m->owned) { 41 | CoreSchedulingResume(); 42 | return kStatusMutexAlreadyTaken; 43 | } 44 | 45 | m->owned = true; 46 | 47 | if(m->recursive_taking_count < 0xFFFFFFFF) 48 | m->recursive_taking_count++; 49 | 50 | TaskControBlock *task = CoreGetCurrentTask(); 51 | m->owner = task; 52 | 53 | //Raise priority 54 | if(TaskGetPriority(task) < CONFIG_MUTEX_CEIL_PRIORITY) { 55 | m->old_priority = TaskSetPriority(task, CONFIG_MUTEX_CEIL_PRIORITY); 56 | } else { 57 | //Dont bump the priority if it already higher than mutex priority 58 | m->old_priority = TaskGetPriority(task); 59 | } 60 | 61 | return CheckReschedule(); 62 | } 63 | 64 | KernelResult MutexLock(MutexId mutex, uint32_t timeout) { 65 | ASSERT_PARAM(mutex); 66 | ASSERT_KERNEL(!ArchInIsr(), kErrorInsideIsr); 67 | 68 | Mutex *m = (Mutex *)mutex; 69 | 70 | CoreSchedulingSuspend(); 71 | 72 | if(!m->owned) { 73 | m->owned = true; 74 | 75 | if(m->recursive_taking_count < 0xFFFFFFFF) 76 | m->recursive_taking_count++; 77 | 78 | TaskControBlock *task = CoreGetCurrentTask(); 79 | m->owner = task; 80 | 81 | //Raise priority 82 | if(TaskGetPriority((TaskId)task) < CONFIG_MUTEX_CEIL_PRIORITY) { 83 | m->old_priority = TaskSetPriority((TaskId)task, CONFIG_MUTEX_CEIL_PRIORITY); 84 | } else { 85 | //Dont bump the priority if it already higher than mutex priority 86 | m->old_priority = TaskGetPriority((TaskId)task); 87 | } 88 | 89 | return CheckReschedule(); 90 | } 91 | 92 | if(timeout != KERNEL_NO_WAIT) { 93 | TaskControBlock *task = CoreGetCurrentTask(); 94 | CoreMakeTaskPending(task, TASK_STATE_PEND_MUTEX, &m->pending_tasks); 95 | AddTimeout(&task->timeout, timeout, NULL, NULL, true, &m->pending_tasks); 96 | CheckReschedule(); 97 | 98 | //Still locked? 99 | if(task->timeout.expired) { 100 | return kErrorTimeout; 101 | } else { 102 | return kSuccess; 103 | } 104 | } else { 105 | CoreSchedulingResume(); 106 | return kStatusMutexAlreadyTaken; 107 | } 108 | } 109 | 110 | KernelResult MutexUnlock(MutexId mutex) { 111 | ASSERT_PARAM(mutex); 112 | ASSERT_KERNEL(!ArchInIsr(), kErrorInsideIsr); 113 | 114 | Mutex *m = (Mutex *)mutex; 115 | 116 | CoreSchedulingSuspend(); 117 | TaskControBlock *current = CoreGetCurrentTask(); 118 | 119 | if(current != (TaskControBlock *)m->owner) { 120 | CoreSchedulingResume(); 121 | return kErrorInvalidMutexOwner; 122 | } 123 | 124 | if(m->recursive_taking_count) { 125 | m->recursive_taking_count--; 126 | } 127 | 128 | if(m->recursive_taking_count != 0) { 129 | CoreSchedulingResume(); 130 | return kStatusMutexAlreadyTaken; 131 | } 132 | 133 | 134 | if(NothingToSched(&m->pending_tasks)) { 135 | m->owned = false; 136 | m->owner = NULL; 137 | 138 | m->old_priority = TaskSetPriority((TaskId)current, m->old_priority); 139 | CheckReschedule(); 140 | 141 | return kSuccess; 142 | } 143 | 144 | TaskControBlock *task = ScheduleTaskSet(&m->pending_tasks); 145 | m->owner = task; 146 | 147 | //Bumps the priority of next pending task: 148 | if(TaskGetPriority((TaskId)task) < CONFIG_MUTEX_CEIL_PRIORITY) { 149 | m->old_priority = TaskSetPriority((TaskId)task, CONFIG_MUTEX_CEIL_PRIORITY); 150 | } else { 151 | //Dont bump the priority if it already higher than mutex priority 152 | m->old_priority = TaskGetPriority((TaskId)task); 153 | } 154 | 155 | if(m->recursive_taking_count < 0xFFFFFFFF) 156 | m->recursive_taking_count++; 157 | 158 | CoreUnpendNextTask(&m->pending_tasks); 159 | m->old_priority = TaskSetPriority((TaskId)current, m->old_priority); 160 | 161 | return CheckReschedule(); 162 | } 163 | 164 | KernelResult MutexDelete(MutexId mutex) { 165 | ASSERT_PARAM(mutex); 166 | ASSERT_KERNEL(!ArchInIsr(), kErrorInsideIsr); 167 | 168 | Mutex *m = (Mutex *)mutex; 169 | 170 | CoreSchedulingSuspend(); 171 | CoreMakeAllTasksReady(&m->pending_tasks); 172 | FreeMutexObject(m); 173 | return CheckReschedule(); 174 | } 175 | 176 | #endif -------------------------------------------------------------------------------- /src/object_pool.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #ifndef CONFIG_KERNEL_HEAP_SIZE 5 | #warning "Heap size was not defined, defaulting to 4096 bytes" 6 | #define CONFIG_KERNEL_HEAP_SIZE 4096 7 | #endif 8 | 9 | static uint8_t kernel_heap[(CONFIG_KERNEL_HEAP_SIZE + sizeof(control_t) + ALIGN_SIZE) & ~(ALIGN_SIZE - 1)]; 10 | static tlsf_t kernel_tlsf = NULL; 11 | static uint32_t kernel_heap_free_bytes = CONFIG_KERNEL_HEAP_SIZE; 12 | 13 | KernelResult InitializeObjectPools() { 14 | ArchCriticalSectionEnter(); 15 | kernel_tlsf = tlsf_create_with_pool(&kernel_heap, sizeof(kernel_heap)); 16 | ArchCriticalSectionExit(); 17 | 18 | if(kernel_tlsf) { 19 | return kSuccess; 20 | } else { 21 | return kErrorNotEnoughKernelMemory; 22 | } 23 | } 24 | 25 | static void *KMalloc(uint32_t size) { 26 | ArchCriticalSectionEnter(); 27 | void *result = tlsf_malloc(kernel_tlsf, size); 28 | ArchCriticalSectionExit(); 29 | 30 | if(result) { 31 | kernel_heap_free_bytes -= tlsf_block_size(result); 32 | } 33 | 34 | return result; 35 | } 36 | 37 | static void KFree(void *memory) { 38 | ArchCriticalSectionEnter(); 39 | 40 | if(memory) { 41 | kernel_heap_free_bytes += tlsf_block_size(memory); 42 | } 43 | 44 | tlsf_free(kernel_tlsf, memory); 45 | ArchCriticalSectionExit(); 46 | } 47 | 48 | uint32_t GetKernelFreeBytesOnHeap() { 49 | if(!kernel_tlsf) { 50 | return 0; 51 | } else { 52 | return kernel_heap_free_bytes; 53 | } 54 | } 55 | 56 | uint8_t *AllocateRawBuffer(uint32_t size) { 57 | return ((uint8_t *)KMalloc(size)); 58 | } 59 | 60 | KernelResult FreeRawBuffer(uint8_t *self) { 61 | KFree(self); 62 | return kSuccess; 63 | } 64 | 65 | 66 | TaskControBlock *AllocateTaskObject() { 67 | TaskControBlock *task = KMalloc(sizeof(TaskControBlock)); 68 | return (task); 69 | } 70 | 71 | KernelResult FreeTaskObject(TaskControBlock *self) { 72 | KFree(self); 73 | return kSuccess; 74 | } 75 | 76 | Semaphore *AllocateSemaphoreObject() { 77 | 78 | Semaphore *semaphore = KMalloc(sizeof(Semaphore)); 79 | return (semaphore); 80 | } 81 | 82 | KernelResult FreeSemaphoreObject(Semaphore *self) { 83 | KFree(self); 84 | return kSuccess; 85 | } 86 | 87 | Mutex *AllocateMutexObject() { 88 | 89 | Mutex *mutex = KMalloc(sizeof(Mutex)); 90 | return (mutex); 91 | } 92 | 93 | KernelResult FreeMutexObject(Mutex *self) { 94 | KFree(self); 95 | return kSuccess; 96 | } 97 | 98 | Timer *AllocateTimerObject() { 99 | 100 | Timer *timer = KMalloc(sizeof(Timer)); 101 | return (timer); 102 | } 103 | 104 | KernelResult FreeTimerObject(Timer *self) { 105 | KFree(self); 106 | return kSuccess; 107 | } 108 | 109 | Queue *AllocateQueueObject() { 110 | Queue *queue = KMalloc(sizeof(Queue)); 111 | return (queue); 112 | } 113 | 114 | KernelResult FreeQueueObject(Queue *self) { 115 | KFree(self); 116 | return kSuccess; 117 | } 118 | -------------------------------------------------------------------------------- /src/queue.c: -------------------------------------------------------------------------------- 1 | #include 2 | #if CONFIG_ENABLE_QUEUES > 0 3 | 4 | QueueId QueueCreate(uint32_t noof_slots, uint32_t slot_size) { 5 | ASSERT_KERNEL(noof_slots, NULL); 6 | ASSERT_KERNEL(slot_size, NULL); 7 | 8 | CoreInit(); 9 | CoreSchedulingSuspend(); 10 | 11 | Queue *queue = AllocateQueueObject(); 12 | if(queue == NULL) { 13 | CoreSchedulingResume(); 14 | return NULL; 15 | } 16 | 17 | queue->empty = true; 18 | queue->noof_slots = noof_slots; 19 | queue->slot_size = slot_size; 20 | queue->full = false; 21 | queue->head = 0; 22 | queue->tail = 0; 23 | queue->available_slots = noof_slots; 24 | 25 | queue->buffer = AllocateRawBuffer(noof_slots * slot_size); 26 | if(!queue->buffer) { 27 | FreeQueueObject(queue); 28 | CoreSchedulingResume(); 29 | return NULL; 30 | } 31 | 32 | KernelResult r = CoreInitializeTaskList(&queue->reader_tasks_pending); 33 | if(r != kSuccess) { 34 | FreeRawBuffer(queue->buffer); 35 | FreeQueueObject(queue); 36 | CoreSchedulingResume(); 37 | return NULL; 38 | } 39 | 40 | r = CoreInitializeTaskList(&queue->writer_tasks_pending); 41 | if(r != kSuccess) { 42 | FreeRawBuffer(queue->buffer); 43 | FreeQueueObject(queue); 44 | CoreSchedulingResume(); 45 | return NULL; 46 | } 47 | 48 | CoreSchedulingResume(); 49 | return ((QueueId)queue); 50 | } 51 | 52 | KernelResult QueueInsert(QueueId queue, void *data, uint32_t data_size, uint32_t timeout) { 53 | ASSERT_PARAM(queue); 54 | ASSERT_PARAM(data); 55 | 56 | //If called from ISR, requires a IRQ safe block 57 | if(ArchInIsr()) { 58 | if(!ArchGetIsrNesting()){ 59 | return kErrorInvalidKernelState; 60 | } 61 | } 62 | 63 | Queue *q = (Queue *)queue; 64 | CoreSchedulingSuspend(); 65 | 66 | if(data_size != q->slot_size) { 67 | CoreSchedulingResume(); 68 | return kErrorInvalidParam; 69 | } 70 | 71 | if(!q->full) { 72 | 73 | uint32_t write_loc = q->tail * q->slot_size; 74 | 75 | q->empty = false; 76 | memcpy(&q->buffer[write_loc], data, data_size); 77 | 78 | ArchCriticalSectionEnter(); 79 | if(q->available_slots) 80 | q->available_slots--; 81 | ArchCriticalSectionExit(); 82 | 83 | q->tail = ((q->tail + 1) % (q->noof_slots)); 84 | 85 | if(!q->available_slots){ 86 | q->full = true; 87 | } 88 | 89 | if(NothingToSched(&q->reader_tasks_pending)) { 90 | CoreSchedulingResume(); 91 | return kSuccess; 92 | } else { 93 | 94 | CoreUnpendNextTask(&q->reader_tasks_pending); 95 | 96 | //Not need to reeschedule a new unpended task in a ISR, 97 | //it will be done a single time after all ISRs 98 | //get processed 99 | if(ArchInIsr()) { 100 | CoreSchedulingResume(); 101 | return kSuccess; 102 | } else { 103 | return CheckReschedule(); 104 | } 105 | 106 | } 107 | } 108 | 109 | if(timeout == KERNEL_NO_WAIT) { 110 | CoreSchedulingResume(); 111 | return kErrorBufferFull; 112 | } 113 | 114 | if(ArchInIsr()) { 115 | CoreSchedulingResume(); 116 | return kErrorInsideIsr; 117 | } 118 | 119 | TaskControBlock *task = CoreGetCurrentTask(); 120 | CoreMakeTaskPending(task, TASK_STATE_PEND_QUEUE, &q->writer_tasks_pending); 121 | AddTimeout(&task->timeout, timeout, NULL, NULL, true, &q->writer_tasks_pending); 122 | CheckReschedule(); 123 | 124 | CoreSchedulingSuspend(); 125 | 126 | if(task->timeout.expired) { 127 | CoreSchedulingResume(); 128 | return kErrorTimeout; 129 | } else { 130 | uint32_t write_loc = q->tail * q->slot_size; 131 | 132 | q->empty = false; 133 | memcpy(&q->buffer[write_loc], data, data_size); 134 | 135 | ArchCriticalSectionEnter(); 136 | if(q->available_slots) 137 | q->available_slots--; 138 | ArchCriticalSectionExit(); 139 | 140 | q->tail = ((q->tail + 1) % (q->noof_slots)); 141 | 142 | if(!q->available_slots){ 143 | q->full = true; 144 | } 145 | 146 | CoreSchedulingResume(); 147 | return kSuccess; 148 | } 149 | } 150 | 151 | KernelResult QueuePeek(QueueId queue, void *data, uint32_t *data_size, uint32_t timeout) { 152 | ASSERT_PARAM(queue); 153 | ASSERT_PARAM(data); 154 | (void)data_size; 155 | 156 | //If called from ISR, requires a IRQ safe block 157 | if(ArchInIsr()) { 158 | if(!ArchGetIsrNesting()){ 159 | return kErrorInvalidKernelState; 160 | } 161 | } 162 | 163 | Queue *q = (Queue*)queue; 164 | 165 | CoreSchedulingSuspend(); 166 | 167 | if(!q->empty) { 168 | uint32_t read_loc = q->head * q->slot_size; 169 | memcpy(data, &q->buffer[read_loc], q->slot_size); 170 | CoreSchedulingResume(); 171 | return kSuccess; 172 | } 173 | 174 | 175 | if(timeout == KERNEL_NO_WAIT) { 176 | CoreSchedulingResume(); 177 | return kErrorBufferEmpty; 178 | } 179 | 180 | if(ArchInIsr()) { 181 | CoreSchedulingResume(); 182 | return kErrorInsideIsr; 183 | } 184 | 185 | TaskControBlock *task = CoreGetCurrentTask(); 186 | CoreMakeTaskPending(task, TASK_STATE_PEND_QUEUE, &q->reader_tasks_pending); 187 | AddTimeout(&task->timeout, timeout, NULL, NULL, true, &q->reader_tasks_pending); 188 | CheckReschedule(); 189 | 190 | CoreSchedulingSuspend(); 191 | 192 | if(task->timeout.expired) { 193 | CoreSchedulingResume(); 194 | return kErrorTimeout; 195 | } else { 196 | uint32_t read_loc = q->head; 197 | memcpy(data, &q->buffer[read_loc], q->slot_size); 198 | CoreSchedulingResume(); 199 | return kSuccess; 200 | } 201 | } 202 | 203 | KernelResult QueueRemove(QueueId queue, void *data, uint32_t *data_size, uint32_t timeout) { 204 | ASSERT_PARAM(queue); 205 | ASSERT_PARAM(data); 206 | (void)data_size; 207 | 208 | //If called from ISR, requires a IRQ safe block 209 | if(ArchInIsr()) { 210 | if(!ArchGetIsrNesting()){ 211 | return kErrorInvalidKernelState; 212 | } 213 | } 214 | 215 | Queue *q = (Queue*)queue; 216 | 217 | CoreSchedulingSuspend(); 218 | 219 | if(!q->empty) { 220 | uint32_t read_loc = q->head * q->slot_size; 221 | 222 | q->full = false; 223 | memcpy(data, &q->buffer[read_loc], q->slot_size); 224 | 225 | q->head = ((q->head + 1) % q->slot_size); 226 | 227 | ArchCriticalSectionEnter(); 228 | if(q->available_slots < 0xFFFFFFFF) 229 | q->available_slots++; 230 | q->head = read_loc; 231 | ArchCriticalSectionExit(); 232 | 233 | if(q->available_slots >= q->noof_slots) { 234 | q->available_slots = q->noof_slots; 235 | q->empty = true; 236 | } 237 | 238 | if(NothingToSched(&q->writer_tasks_pending)) { 239 | CoreSchedulingResume(); 240 | return kSuccess; 241 | } else { 242 | 243 | CoreUnpendNextTask(&q->writer_tasks_pending); 244 | 245 | //Not need to reeschedule a new unpended task in a ISR, 246 | //it will be done a single time after all ISRs 247 | //get processed 248 | if(ArchInIsr()) { 249 | CoreSchedulingResume(); 250 | return kSuccess; 251 | } else { 252 | return CheckReschedule(); 253 | } 254 | } 255 | 256 | } 257 | 258 | if(timeout == KERNEL_NO_WAIT) { 259 | CoreSchedulingResume(); 260 | return kErrorBufferEmpty; 261 | } 262 | 263 | if(ArchInIsr()) { 264 | CoreSchedulingResume(); 265 | return kErrorInsideIsr; 266 | } 267 | 268 | TaskControBlock *task = CoreGetCurrentTask(); 269 | CoreMakeTaskPending(task, TASK_STATE_PEND_QUEUE, &q->reader_tasks_pending); 270 | AddTimeout(&task->timeout, timeout, NULL, NULL, true, &q->reader_tasks_pending); 271 | CheckReschedule(); 272 | 273 | CoreSchedulingSuspend(); 274 | 275 | if(task->timeout.expired) { 276 | CoreSchedulingResume(); 277 | return kErrorTimeout; 278 | } else { 279 | 280 | uint32_t read_loc = q->head * q->slot_size; 281 | 282 | q->full = false; 283 | memcpy(data, &q->buffer[read_loc], q->slot_size); 284 | 285 | read_loc = ((read_loc + 1) % q->slot_size); 286 | 287 | ArchCriticalSectionEnter(); 288 | if(q->available_slots < 0xFFFFFFFF) 289 | q->available_slots++; 290 | q->head = read_loc; 291 | ArchCriticalSectionExit(); 292 | 293 | if(q->available_slots >= q->noof_slots) { 294 | q->available_slots = q->noof_slots; 295 | q->empty = true; 296 | } 297 | 298 | CoreSchedulingResume(); 299 | return kSuccess; 300 | } 301 | } 302 | 303 | KernelResult QueueDelete(QueueId queue) { 304 | ASSERT_PARAM(queue); 305 | ASSERT_KERNEL(!ArchInIsr(), kErrorInsideIsr); 306 | 307 | Queue *q = (Queue *)queue; 308 | 309 | CoreSchedulingSuspend(); 310 | CoreMakeAllTasksReady(&q->writer_tasks_pending); 311 | CoreMakeAllTasksReady(&q->reader_tasks_pending); 312 | FreeRawBuffer(q->buffer); 313 | FreeQueueObject(q); 314 | 315 | return CheckReschedule(); 316 | } 317 | 318 | #endif -------------------------------------------------------------------------------- /src/sched_fifo.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | 4 | bool IsSchedulerLocked(TaskPriorityList *taskset) { 5 | 6 | ArchCriticalSectionEnter(); 7 | bool result = ((taskset->lock_level == 0) ? false : true); 8 | ArchCriticalSectionExit(); 9 | 10 | return result; 11 | } 12 | 13 | KernelResult SchedulerLock(TaskPriorityList *taskset) { 14 | ASSERT_PARAM(taskset); 15 | 16 | ArchCriticalSectionEnter(); 17 | if(taskset->lock_level < 0xFFFFFFFF) 18 | taskset->lock_level++; 19 | ArchCriticalSectionExit(); 20 | 21 | return kStatusSchedLocked; 22 | } 23 | 24 | KernelResult SchedulerUnlock(TaskPriorityList *taskset) { 25 | ASSERT_PARAM(taskset); 26 | 27 | ArchCriticalSectionEnter(); 28 | if(taskset->lock_level > 0x0) 29 | taskset->lock_level--; 30 | ArchCriticalSectionExit(); 31 | 32 | return (taskset->lock_level) ? kStatusSchedLocked : kStatusSchedUnlocked; 33 | } 34 | 35 | 36 | TaskControBlock *ScheduleTaskSet(TaskPriorityList *taskset) { 37 | ASSERT_KERNEL(taskset, NULL); 38 | 39 | if(taskset->lock_level) { 40 | return NULL; 41 | } 42 | 43 | ArchCriticalSectionEnter(); 44 | 45 | uint8_t top_priority = (31 - ArchCountLeadZeros(taskset->ready_task_bitmap)); 46 | sys_dnode_t *node = NULL; 47 | TaskControBlock *top_priority_task = NULL; 48 | 49 | node = sys_dlist_peek_head(&taskset->task_list[top_priority]); 50 | top_priority_task = CONTAINER_OF(node, TaskControBlock, ready_node); 51 | 52 | ArchCriticalSectionExit(); 53 | 54 | return (top_priority_task); 55 | } 56 | 57 | void SchedulerInitTaskPriorityList(TaskPriorityList *list) { 58 | ArchCriticalSectionEnter(); 59 | 60 | for(uint32_t i = 0; i < CONFIG_PRIORITY_LEVELS; i++) { 61 | sys_dlist_init(&list->task_list[i]); 62 | } 63 | list->ready_task_bitmap = 0; 64 | list->lock_level = 0; 65 | 66 | ArchCriticalSectionExit(); 67 | } 68 | 69 | bool NothingToSched(TaskPriorityList *list) { 70 | ASSERT_PARAM(list); 71 | 72 | ArchCriticalSectionEnter(); 73 | bool result = (list->ready_task_bitmap == 0 ? true : false ); 74 | ArchCriticalSectionExit(); 75 | 76 | return result; 77 | } 78 | 79 | KernelResult SchedulerSetPriority(TaskPriorityList *list, uint32_t priority) { 80 | ASSERT_PARAM(list); 81 | ASSERT_PARAM(priority < CONFIG_PRIORITY_LEVELS); 82 | 83 | ArchCriticalSectionEnter(); 84 | list->ready_task_bitmap |= (1 << priority); 85 | ArchCriticalSectionExit(); 86 | return kSuccess; 87 | } 88 | 89 | KernelResult SchedulerResetPriority(TaskPriorityList *list, uint32_t priority) { 90 | ASSERT_PARAM(list); 91 | ASSERT_PARAM(priority < CONFIG_PRIORITY_LEVELS); 92 | 93 | ArchCriticalSectionEnter(); 94 | if(sys_dlist_is_empty(&list->task_list[priority])) { 95 | list->ready_task_bitmap &= ~(1 << priority); 96 | ArchCriticalSectionExit(); 97 | return kSuccess; 98 | } else { 99 | ArchCriticalSectionExit(); 100 | return kErrorBufferFull; 101 | } 102 | } -------------------------------------------------------------------------------- /src/sched_round_robin.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #if CONFIG_ENABLE_ROUND_ROBIN_SCHED 4 | 5 | KernelResult SchedulerDoRoundRobin(TaskPriorityList *list) { 6 | ASSERT_PARAM(list); 7 | 8 | uint8_t top_priority = (31 - ArchCountLeadZeros(list->ready_task_bitmap)); 9 | 10 | ArchCriticalSectionEnter(); 11 | sys_dnode_t *current_head = sys_dlist_peek_head(&list->task_list[top_priority]); 12 | sys_dnode_t *next = sys_dlist_peek_next(&list->task_list[top_priority], current_head); 13 | 14 | //The list has at least one more element, round robin allowed: 15 | if(next) { 16 | //move current head to the tail of the list, dont worry to 17 | //scheduling, the core module is responsible to manage 18 | //ready lists 19 | sys_dlist_remove(current_head); 20 | sys_dlist_append(&list->task_list[top_priority], current_head); 21 | } 22 | 23 | ArchCriticalSectionExit(); 24 | return kSuccess; 25 | } 26 | 27 | #endif -------------------------------------------------------------------------------- /src/semaphore.c: -------------------------------------------------------------------------------- 1 | #include 2 | #if CONFIG_ENABLE_SEMAPHORES > 0 3 | 4 | SemaphoreId SemaphoreCreate(uint32_t initial, uint32_t limit) { 5 | ASSERT_KERNEL(limit, NULL); 6 | 7 | CoreInit(); 8 | CoreSchedulingSuspend(); 9 | 10 | Semaphore *semaphore = AllocateSemaphoreObject(); 11 | if(semaphore == NULL) { 12 | CoreSchedulingResume(); 13 | return NULL; 14 | } 15 | 16 | 17 | semaphore->count = initial; 18 | semaphore->limit = limit; 19 | KernelResult r = CoreInitializeTaskList(&semaphore->pending_tasks); 20 | 21 | if(r != kSuccess) { 22 | FreeSemaphoreObject(semaphore); 23 | CoreSchedulingResume(); 24 | return NULL; 25 | } 26 | 27 | CoreSchedulingResume(); 28 | return ((SemaphoreId)semaphore); 29 | } 30 | 31 | KernelResult SemaphoreTake(SemaphoreId semaphore, uint32_t timeout) { 32 | ASSERT_PARAM(semaphore); 33 | ASSERT_KERNEL(!ArchInIsr(), kErrorInsideIsr); 34 | 35 | CoreSchedulingSuspend(); 36 | Semaphore * s = (Semaphore *)semaphore; 37 | 38 | if(s->count) { 39 | s->count--; 40 | CoreSchedulingResume(); 41 | return kSuccess; 42 | } 43 | 44 | if(timeout != KERNEL_NO_WAIT) { 45 | TaskControBlock *task = CoreGetCurrentTask(); 46 | CoreMakeTaskPending(task, TASK_STATE_PEND_SEMAPHORE, &s->pending_tasks); 47 | AddTimeout(&task->timeout, timeout, NULL, NULL, true, &s->pending_tasks); 48 | CheckReschedule(); 49 | 50 | //Still locked or expired: 51 | if(task->timeout.expired) { 52 | return kErrorTimeout; 53 | } 54 | return kSuccess; 55 | 56 | } else { 57 | CoreSchedulingResume(); 58 | return kStatusSemaphoreUnavailable; 59 | } 60 | } 61 | 62 | KernelResult SemaphoreGive(SemaphoreId semaphore, uint32_t count) { 63 | ASSERT_PARAM(semaphore); 64 | ASSERT_PARAM(count); 65 | 66 | //If called from ISR, requires a IRQ safe block 67 | if(ArchInIsr()) { 68 | if(!ArchGetIsrNesting()){ 69 | return kErrorInvalidKernelState; 70 | } 71 | } 72 | 73 | CoreSchedulingSuspend(); 74 | Semaphore * s = (Semaphore *)semaphore; 75 | 76 | s->count += count; 77 | (s->count > s->limit) ? s->count = s->limit : s->count; 78 | 79 | if(NothingToSched(&s->pending_tasks)) { 80 | CoreSchedulingResume(); 81 | return kSuccess; 82 | } else { 83 | 84 | if(s->count > 0) { 85 | s->count--; 86 | } 87 | 88 | CoreUnpendNextTask(&s->pending_tasks); 89 | 90 | if(ArchInIsr()) { 91 | CoreSchedulingResume(); 92 | return kSuccess; 93 | } else { 94 | return CheckReschedule(); 95 | } 96 | } 97 | } 98 | 99 | KernelResult SemaphoreDelete (SemaphoreId semaphore) { 100 | ASSERT_PARAM(semaphore); 101 | ASSERT_KERNEL(!ArchInIsr(), kErrorInsideIsr); 102 | Semaphore * s = (Semaphore *)semaphore; 103 | 104 | CoreSchedulingSuspend(); 105 | CoreMakeAllTasksReady(&s->pending_tasks); 106 | FreeSemaphoreObject(s); 107 | return CheckReschedule(); 108 | } 109 | 110 | #endif -------------------------------------------------------------------------------- /src/task.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | TaskId TaskCreate(TaskSettings *settings) { 4 | ASSERT_KERNEL(settings, NULL); 5 | ASSERT_KERNEL(settings->function,NULL); 6 | ASSERT_KERNEL(settings->stack_size, NULL); 7 | ASSERT_KERNEL(settings->priority <= CONFIG_PRIORITY_LEVELS, NULL); 8 | ASSERT_KERNEL(settings->priority >= 0, NULL); 9 | 10 | CoreInit(); 11 | 12 | CoreSchedulingSuspend(); 13 | TaskControBlock *task = AllocateTaskObject(); 14 | 15 | if(task == NULL) { 16 | CoreSchedulingResume(); 17 | return NULL; 18 | } 19 | 20 | task->entry_point = settings->function; 21 | task->priority = settings->priority; 22 | task->arg1 = settings->arg; 23 | task->stack_size = settings->stack_size; 24 | task->state = 0; 25 | 26 | task->stackpointer = AllocateRawBuffer(settings->stack_size); 27 | if(!task->stackpointer) { 28 | FreeTaskObject(task); 29 | CoreSchedulingResume(); 30 | return NULL; 31 | } 32 | 33 | KernelResult r = ArchNewTask(task, task->stackpointer, task->stack_size); 34 | if(r != kSuccess) { 35 | FreeRawBuffer(task->stackpointer); 36 | FreeTaskObject(task); 37 | CoreSchedulingResume(); 38 | return NULL; 39 | } 40 | 41 | CoreMakeTaskReady(task); 42 | CheckReschedule(); 43 | 44 | return ((TaskId)task); 45 | } 46 | 47 | KernelResult TaskSuspend(TaskId task_id) { 48 | ASSERT_PARAM(task_id); 49 | 50 | //If called from ISR, requires a IRQ safe block 51 | if(ArchInIsr()) { 52 | if(!ArchGetIsrNesting()){ 53 | return kErrorInvalidKernelState; 54 | } 55 | } 56 | 57 | TaskControBlock *task = (TaskControBlock *)task_id; 58 | CoreSchedulingSuspend(); 59 | 60 | if(task->state & TASK_STATE_SUPENDED) { 61 | CoreSchedulingResume(); 62 | return kErrorTaskAlreadySuspended; 63 | } 64 | 65 | CoreMakeTaskPending(task, TASK_STATE_SUPENDED, NULL); 66 | return (CheckReschedule()); 67 | } 68 | 69 | KernelResult TaskResume(TaskId task_id) { 70 | ASSERT_PARAM(task_id); 71 | 72 | //If called from ISR, requires a IRQ safe block 73 | if(ArchInIsr()) { 74 | if(!ArchGetIsrNesting()){ 75 | return kErrorInvalidKernelState; 76 | } 77 | } 78 | 79 | TaskControBlock *task = (TaskControBlock *)task_id; 80 | CoreSchedulingSuspend(); 81 | 82 | if((task->state & TASK_STATE_SUPENDED) == 0) { 83 | CoreSchedulingResume(); 84 | return kErrorTaskAlreadyResumed; 85 | } 86 | 87 | CoreMakeTaskReady(task); 88 | 89 | //Not need to reeschedule a new unpended task in a ISR, 90 | //it will be done a single time after all ISRs 91 | //get processed 92 | if(ArchInIsr()) { 93 | CoreSchedulingResume(); 94 | return kSuccess; 95 | } else { 96 | return CheckReschedule(); 97 | } 98 | } 99 | 100 | KernelResult TaskDelete(TaskId task_id) { 101 | ASSERT_PARAM(task_id); 102 | 103 | TaskControBlock *task = (TaskControBlock *)task_id; 104 | CoreSchedulingSuspend(); 105 | 106 | CoreMakeTaskPending(task, TASK_STATE_TERMINATED, NULL); 107 | 108 | return (CheckReschedule()); 109 | } 110 | 111 | uint32_t TaskSetPriority(TaskId task_id, uint32_t new_priority) { 112 | ASSERT_KERNEL(task_id, 0xFFFFFFFF); 113 | ASSERT_KERNEL(new_priority < CONFIG_PRIORITY_LEVELS, 0xFFFFFFFF); 114 | 115 | //If called from ISR, requires a IRQ safe block 116 | if(ArchInIsr()) { 117 | if(!ArchGetIsrNesting()){ 118 | return kErrorInvalidKernelState; 119 | } 120 | } 121 | 122 | TaskControBlock *task = (TaskControBlock *)task_id; 123 | CoreSchedulingSuspend(); 124 | 125 | uint32_t old_prio = task->priority; 126 | if(task->state == TASK_STATE_READY) { 127 | //Force ready task to be moved to correct place on ready queue; 128 | //Suspended task will be moved once the pending condition terminates 129 | CoreMakeTaskPending(task, TASK_STATE_SUPENDED, NULL); 130 | task->priority = new_priority; 131 | CoreMakeTaskReady(task); 132 | } 133 | 134 | //Not need to reeschedule a new unpended task in a ISR, 135 | //it will be done a single time after all ISRs 136 | //get processed 137 | if(ArchInIsr()) { 138 | CoreSchedulingResume(); 139 | return (old_prio); 140 | } else { 141 | CheckReschedule(); 142 | return (old_prio); 143 | } 144 | } 145 | 146 | uint32_t TaskGetPriority(TaskId task_id) { 147 | ASSERT_PARAM(task_id); 148 | TaskControBlock *task = (TaskControBlock *)task_id; 149 | 150 | return(task->priority); 151 | } 152 | 153 | KernelResult TaskYield() { 154 | ASSERT_KERNEL(!ArchInIsr(), kErrorInsideIsr); 155 | 156 | TaskControBlock *task = CoreGetCurrentTask(); 157 | CoreMakeTaskPending(task, TASK_STATE_SUPENDED, NULL); 158 | CoreMakeTaskReady(task); 159 | CheckReschedule(); 160 | return (kSuccess); 161 | } 162 | 163 | -------------------------------------------------------------------------------- /src/timer.c: -------------------------------------------------------------------------------- 1 | #include 2 | #if CONFIG_ENABLE_TIMERS > 0 3 | 4 | TimerId TimerCreate(TimerCallback callback, uint32_t expiry_time, uint32_t period_time, void *user_data) { 5 | ASSERT_KERNEL(callback, NULL); 6 | ASSERT_KERNEL(expiry_time, NULL); 7 | 8 | CoreInit(); 9 | CoreSchedulingSuspend(); 10 | Timer *timer = AllocateTimerObject(); 11 | 12 | if(timer == NULL) { 13 | CoreSchedulingResume(); 14 | return NULL; 15 | } 16 | timer->callback = callback; 17 | timer->expired=false; 18 | timer->running=false; 19 | timer->expiry_time = expiry_time; 20 | timer->user_data = user_data; 21 | 22 | if(period_time) { 23 | timer->period_time = period_time; 24 | timer->periodic = true; 25 | } else { 26 | timer->period_time = 0; 27 | timer->periodic = false; 28 | } 29 | 30 | CoreSchedulingResume(); 31 | return ((TimerId) timer); 32 | } 33 | 34 | KernelResult TimerStart(TimerId timer) { 35 | ASSERT_PARAM(timer); 36 | Timer * t = (Timer *)timer; 37 | 38 | CoreSchedulingSuspend(); 39 | KernelResult result = RemoveTimeout(&t->timeout); 40 | 41 | if(result == kSuccess) { 42 | result = AddTimeout(&t->timeout, t->expiry_time, t->callback, t->user_data, false, NULL); 43 | if(result == kSuccess) { 44 | t->expired = false; 45 | t->running = true; 46 | } 47 | } 48 | 49 | CoreSchedulingResume(); 50 | return result; 51 | } 52 | 53 | KernelResult TimerStop(TimerId timer) { 54 | ASSERT_PARAM(timer); 55 | Timer * t = (Timer *)timer; 56 | 57 | CoreSchedulingSuspend(); 58 | if(!t->running) { 59 | CoreSchedulingResume(); 60 | return kErrorTimerIsNotRunning; 61 | } 62 | 63 | KernelResult result = RemoveTimeout(&t->timeout); 64 | if(result == kSuccess) { 65 | t->running = false; 66 | t->expired = false; 67 | } 68 | 69 | CoreSchedulingResume(); 70 | return result; 71 | } 72 | 73 | KernelResult TimerSetValues(TimerId timer, uint32_t expiry_time, uint32_t period_time) { 74 | ASSERT_PARAM(timer); 75 | ASSERT_PARAM(expiry_time); 76 | 77 | Timer *t = (Timer *)timer; 78 | 79 | CoreSchedulingSuspend(); 80 | 81 | if(t->running) { 82 | RemoveTimeout(&t->timeout); 83 | } 84 | 85 | timer->expired=false; 86 | timer->running=false; 87 | timer->expiry_time = expiry_time; 88 | 89 | if(period_time) { 90 | timer->period_time = period_time; 91 | timer->periodic = true; 92 | } else { 93 | timer->period_time = 0; 94 | timer->periodic = false; 95 | } 96 | 97 | CoreSchedulingResume(); 98 | return kSuccess; 99 | } 100 | 101 | KernelResult TimerDelete(TimerId timer) { 102 | ASSERT_PARAM(timer); 103 | Timer * t = (Timer *)timer; 104 | 105 | CoreSchedulingSuspend(); 106 | 107 | if(t->running) { 108 | RemoveTimeout(&t->timeout); 109 | } 110 | t->callback = NULL; 111 | FreeTimerObject(t); 112 | CoreSchedulingResume(); 113 | 114 | return kSuccess; 115 | } 116 | 117 | #endif -------------------------------------------------------------------------------- /utils/print_out.c: -------------------------------------------------------------------------------- 1 | 2 | __attribute__ ((weak)) void _putchar(char c) { 3 | (void)c; 4 | } 5 | 6 | -------------------------------------------------------------------------------- /utils/tlsf.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "tlsf.h" 9 | 10 | /* 11 | ** Architecture-specific bit manipulation routines. 12 | ** 13 | ** TLSF achieves O(1) cost for malloc and free operations by limiting 14 | ** the search for a free block to a free list of guaranteed size 15 | ** adequate to fulfill the request, combined with efficient free list 16 | ** queries using bitmasks and architecture-specific bit-manipulation 17 | ** routines. 18 | ** 19 | ** Most modern processors provide instructions to count leading zeroes 20 | ** in a word, find the lowest and highest set bit, etc. These 21 | ** specific implementations will be used when available, falling back 22 | ** to a reasonably efficient generic implementation. 23 | ** 24 | ** NOTE: TLSF spec relies on ffs/fls returning value 0..31. 25 | ** ffs/fls return 1-32 by default, returning 0 for error. 26 | */ 27 | static inline int tlsf_ffs(unsigned int word) 28 | { 29 | return __builtin_ffs(word) - 1; 30 | } 31 | 32 | static inline int tlsf_fls(unsigned int word) 33 | { 34 | const int bit = word ? 32 - __builtin_clz(word) : 0; 35 | return bit - 1; 36 | } 37 | 38 | #define tlsf_fls_sizet tlsf_fls 39 | 40 | /* 41 | ** Cast and min/max macros. 42 | */ 43 | 44 | #define tlsf_cast(t, exp) ((t) (exp)) 45 | #define tlsf_min(a, b) ((a) < (b) ? (a) : (b)) 46 | #define tlsf_max(a, b) ((a) > (b) ? (a) : (b)) 47 | 48 | /* 49 | ** Set assert macro, if it has not been provided by the user. 50 | */ 51 | #if !defined (tlsf_assert) 52 | #define tlsf_assert assert 53 | #endif 54 | 55 | /* 56 | ** Static assertion mechanism. 57 | */ 58 | 59 | #define _tlsf_glue2(x, y) x ## y 60 | #define _tlsf_glue(x, y) _tlsf_glue2(x, y) 61 | #define tlsf_static_assert(exp) \ 62 | typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1] 63 | 64 | /* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */ 65 | tlsf_static_assert(sizeof(int) * CHAR_BIT == 32); 66 | tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32); 67 | tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64); 68 | 69 | /* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */ 70 | tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT); 71 | 72 | /* Ensure we've properly tuned our sizes. */ 73 | tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT); 74 | 75 | /* 76 | ** Data structures and associated constants. 77 | */ 78 | 79 | /* 80 | ** Since block sizes are always at least a multiple of 4, the two least 81 | ** significant bits of the size field are used to store the block status: 82 | ** - bit 0: whether block is busy or free 83 | ** - bit 1: whether previous block is busy or free 84 | */ 85 | static const size_t block_header_free_bit = 1 << 0; 86 | static const size_t block_header_prev_free_bit = 1 << 1; 87 | 88 | /* 89 | ** The size of the block header exposed to used blocks is the size field. 90 | ** The prev_phys_block field is stored *inside* the previous free block. 91 | */ 92 | static const size_t block_header_overhead = sizeof(size_t); 93 | 94 | /* User data starts directly after the size field in a used block. */ 95 | static const size_t block_start_offset = 96 | offsetof(block_header_t, size) + sizeof(size_t); 97 | 98 | /* 99 | ** A free block must be large enough to store its header minus the size of 100 | ** the prev_phys_block field, and no larger than the number of addressable 101 | ** bits for FL_INDEX. 102 | */ 103 | static const size_t block_size_min = 104 | sizeof(block_header_t) - sizeof(block_header_t*); 105 | static const size_t block_size_max = tlsf_cast(size_t, 1) << FL_INDEX_MAX; 106 | 107 | /* A type used for casting when doing pointer arithmetic. */ 108 | typedef ptrdiff_t tlsfptr_t; 109 | 110 | /* 111 | ** block_header_t member functions. 112 | */ 113 | 114 | static inline size_t block_size(const block_header_t* block) 115 | { 116 | return block->size & ~(block_header_free_bit | block_header_prev_free_bit); 117 | } 118 | 119 | static inline void block_set_size(block_header_t* block, size_t size) 120 | { 121 | const size_t oldsize = block->size; 122 | block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit)); 123 | } 124 | 125 | static inline int block_is_last(const block_header_t* block) 126 | { 127 | return block_size(block) == 0; 128 | } 129 | 130 | static inline int block_is_free(const block_header_t* block) 131 | { 132 | return tlsf_cast(int, block->size & block_header_free_bit); 133 | } 134 | 135 | static inline void block_set_free(block_header_t* block) 136 | { 137 | block->size |= block_header_free_bit; 138 | } 139 | 140 | static inline void block_set_used(block_header_t* block) 141 | { 142 | block->size &= ~block_header_free_bit; 143 | } 144 | 145 | static inline int block_is_prev_free(const block_header_t* block) 146 | { 147 | return tlsf_cast(int, block->size & block_header_prev_free_bit); 148 | } 149 | 150 | static inline void block_set_prev_free(block_header_t* block) 151 | { 152 | block->size |= block_header_prev_free_bit; 153 | } 154 | 155 | static inline void block_set_prev_used(block_header_t* block) 156 | { 157 | block->size &= ~block_header_prev_free_bit; 158 | } 159 | 160 | static inline block_header_t* block_from_ptr(const void* ptr) 161 | { 162 | return tlsf_cast(block_header_t*, 163 | tlsf_cast(unsigned char*, ptr) - block_start_offset); 164 | } 165 | 166 | static inline void* block_to_ptr(const block_header_t* block) 167 | { 168 | return tlsf_cast(void*, 169 | tlsf_cast(unsigned char*, block) + block_start_offset); 170 | } 171 | 172 | /* Return location of next block after block of given size. */ 173 | static inline block_header_t* offset_to_block(const void* ptr, size_t size) 174 | { 175 | return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size); 176 | } 177 | 178 | /* Return location of previous block. */ 179 | static inline block_header_t* block_prev(const block_header_t* block) 180 | { 181 | tlsf_assert(block_is_prev_free(block) && "previous block must be free"); 182 | return block->prev_phys_block; 183 | } 184 | 185 | /* Return location of next existing block. */ 186 | static inline block_header_t* block_next(const block_header_t* block) 187 | { 188 | block_header_t* next = offset_to_block(block_to_ptr(block), 189 | block_size(block) - block_header_overhead); 190 | tlsf_assert(!block_is_last(block)); 191 | return next; 192 | } 193 | 194 | /* Link a new block with its physical neighbor, return the neighbor. */ 195 | static inline block_header_t* block_link_next(block_header_t* block) 196 | { 197 | block_header_t* next = block_next(block); 198 | next->prev_phys_block = block; 199 | return next; 200 | } 201 | 202 | static inline void block_mark_as_free(block_header_t* block) 203 | { 204 | /* Link the block to the next block, first. */ 205 | block_header_t* next = block_link_next(block); 206 | block_set_prev_free(next); 207 | block_set_free(block); 208 | } 209 | 210 | static inline void block_mark_as_used(block_header_t* block) 211 | { 212 | block_header_t* next = block_next(block); 213 | block_set_prev_used(next); 214 | block_set_used(block); 215 | } 216 | 217 | static inline size_t align_up(size_t x, size_t align) 218 | { 219 | tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); 220 | return (x + (align - 1)) & ~(align - 1); 221 | } 222 | 223 | static inline size_t align_down(size_t x, size_t align) 224 | { 225 | tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); 226 | return x - (x & (align - 1)); 227 | } 228 | 229 | static inline void* align_ptr(const void* ptr, size_t align) 230 | { 231 | const tlsfptr_t aligned = 232 | (tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1); 233 | tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); 234 | return tlsf_cast(void*, aligned); 235 | } 236 | 237 | /* 238 | ** Adjust an allocation size to be aligned to word size, and no smaller 239 | ** than internal minimum. 240 | */ 241 | static inline size_t adjust_request_size(size_t size, size_t align) 242 | { 243 | size_t adjust = 0; 244 | if (size) 245 | { 246 | const size_t aligned = align_up(size, align); 247 | 248 | /* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */ 249 | if (aligned < block_size_max) 250 | { 251 | adjust = tlsf_max(aligned, block_size_min); 252 | } 253 | } 254 | return adjust; 255 | } 256 | 257 | /* 258 | ** TLSF utility functions. In most cases, these are direct translations of 259 | ** the documentation found in the white paper. 260 | */ 261 | static void mapping_insert(size_t size, int* fli, int* sli) 262 | { 263 | int fl, sl; 264 | if (size < SMALL_BLOCK_SIZE) 265 | { 266 | /* Store small blocks in first list. */ 267 | fl = 0; 268 | sl = tlsf_cast(int, size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT); 269 | } 270 | else 271 | { 272 | fl = tlsf_fls_sizet(size); 273 | sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2); 274 | fl -= (FL_INDEX_SHIFT - 1); 275 | } 276 | *fli = fl; 277 | *sli = sl; 278 | } 279 | 280 | /* This version rounds up to the next block size (for allocations) */ 281 | static void mapping_search(size_t size, int* fli, int* sli) 282 | { 283 | if (size >= SMALL_BLOCK_SIZE) 284 | { 285 | const size_t round = (1 << (tlsf_fls_sizet(size) - SL_INDEX_COUNT_LOG2)) - 1; 286 | size += round; 287 | } 288 | mapping_insert(size, fli, sli); 289 | } 290 | 291 | static block_header_t* search_suitable_block(control_t* control, int* fli, int* sli) 292 | { 293 | int fl = *fli; 294 | int sl = *sli; 295 | 296 | /* 297 | ** First, search for a block in the list associated with the given 298 | ** fl/sl index. 299 | */ 300 | unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl); 301 | if (!sl_map) 302 | { 303 | /* No block exists. Search in the next largest first-level list. */ 304 | const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1)); 305 | if (!fl_map) 306 | { 307 | /* No free blocks available, memory has been exhausted. */ 308 | return 0; 309 | } 310 | 311 | fl = tlsf_ffs(fl_map); 312 | *fli = fl; 313 | sl_map = control->sl_bitmap[fl]; 314 | } 315 | tlsf_assert(sl_map && "internal error - second level bitmap is null"); 316 | sl = tlsf_ffs(sl_map); 317 | *sli = sl; 318 | 319 | /* Return the first block in the free list. */ 320 | return control->blocks[fl][sl]; 321 | } 322 | 323 | /* Remove a free block from the free list.*/ 324 | static void remove_free_block(control_t* control, block_header_t* block, int fl, int sl) 325 | { 326 | block_header_t* prev = block->prev_free; 327 | block_header_t* next = block->next_free; 328 | tlsf_assert(prev && "prev_free field can not be null"); 329 | tlsf_assert(next && "next_free field can not be null"); 330 | next->prev_free = prev; 331 | prev->next_free = next; 332 | 333 | /* If this block is the head of the free list, set new head. */ 334 | if (control->blocks[fl][sl] == block) 335 | { 336 | control->blocks[fl][sl] = next; 337 | 338 | /* If the new head is null, clear the bitmap. */ 339 | if (next == &control->block_null) 340 | { 341 | control->sl_bitmap[fl] &= ~(1 << sl); 342 | 343 | /* If the second bitmap is now empty, clear the fl bitmap. */ 344 | if (!control->sl_bitmap[fl]) 345 | { 346 | control->fl_bitmap &= ~(1 << fl); 347 | } 348 | } 349 | } 350 | } 351 | 352 | /* Insert a free block into the free block list. */ 353 | static void insert_free_block(control_t* control, block_header_t* block, int fl, int sl) 354 | { 355 | block_header_t* current = control->blocks[fl][sl]; 356 | tlsf_assert(current && "free list cannot have a null entry"); 357 | tlsf_assert(block && "cannot insert a null entry into the free list"); 358 | block->next_free = current; 359 | block->prev_free = &control->block_null; 360 | current->prev_free = block; 361 | 362 | tlsf_assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE) 363 | && "block not aligned properly"); 364 | /* 365 | ** Insert the new block at the head of the list, and mark the first- 366 | ** and second-level bitmaps appropriately. 367 | */ 368 | control->blocks[fl][sl] = block; 369 | control->fl_bitmap |= (1 << fl); 370 | control->sl_bitmap[fl] |= (1 << sl); 371 | } 372 | 373 | /* Remove a given block from the free list. */ 374 | static void block_remove(control_t* control, block_header_t* block) 375 | { 376 | int fl, sl; 377 | mapping_insert(block_size(block), &fl, &sl); 378 | remove_free_block(control, block, fl, sl); 379 | } 380 | 381 | /* Insert a given block into the free list. */ 382 | static void block_insert(control_t* control, block_header_t* block) 383 | { 384 | int fl, sl; 385 | mapping_insert(block_size(block), &fl, &sl); 386 | insert_free_block(control, block, fl, sl); 387 | } 388 | 389 | static inline int block_can_split(block_header_t* block, size_t size) 390 | { 391 | return block_size(block) >= sizeof(block_header_t) + size; 392 | } 393 | 394 | /* Split a block into two, the second of which is free. */ 395 | static block_header_t* block_split(block_header_t* block, size_t size) 396 | { 397 | /* Calculate the amount of space left in the remaining block. */ 398 | block_header_t* remaining = 399 | offset_to_block(block_to_ptr(block), size - block_header_overhead); 400 | 401 | const size_t remain_size = block_size(block) - (size + block_header_overhead); 402 | 403 | tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE) 404 | && "remaining block not aligned properly"); 405 | 406 | tlsf_assert(block_size(block) == remain_size + size + block_header_overhead); 407 | block_set_size(remaining, remain_size); 408 | tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size"); 409 | 410 | block_set_size(block, size); 411 | block_mark_as_free(remaining); 412 | 413 | return remaining; 414 | } 415 | 416 | /* Absorb a free block's storage into an adjacent previous free block. */ 417 | static block_header_t* block_absorb(block_header_t* prev, block_header_t* block) 418 | { 419 | tlsf_assert(!block_is_last(prev) && "previous block can't be last"); 420 | /* Note: Leaves flags untouched. */ 421 | prev->size += block_size(block) + block_header_overhead; 422 | block_link_next(prev); 423 | return prev; 424 | } 425 | 426 | /* Merge a just-freed block with an adjacent previous free block. */ 427 | static block_header_t* block_merge_prev(control_t* control, block_header_t* block) 428 | { 429 | if (block_is_prev_free(block)) 430 | { 431 | block_header_t* prev = block_prev(block); 432 | tlsf_assert(prev && "prev physical block can't be null"); 433 | tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such"); 434 | block_remove(control, prev); 435 | block = block_absorb(prev, block); 436 | } 437 | 438 | return block; 439 | } 440 | 441 | /* Merge a just-freed block with an adjacent free block. */ 442 | static block_header_t* block_merge_next(control_t* control, block_header_t* block) 443 | { 444 | block_header_t* next = block_next(block); 445 | tlsf_assert(next && "next physical block can't be null"); 446 | 447 | if (block_is_free(next)) 448 | { 449 | tlsf_assert(!block_is_last(block) && "previous block can't be last"); 450 | block_remove(control, next); 451 | block = block_absorb(block, next); 452 | } 453 | 454 | return block; 455 | } 456 | 457 | /* Trim any trailing block space off the end of a block, return to pool. */ 458 | static void block_trim_free(control_t* control, block_header_t* block, size_t size) 459 | { 460 | tlsf_assert(block_is_free(block) && "block must be free"); 461 | if (block_can_split(block, size)) 462 | { 463 | block_header_t* remaining_block = block_split(block, size); 464 | block_link_next(block); 465 | block_set_prev_free(remaining_block); 466 | block_insert(control, remaining_block); 467 | } 468 | } 469 | 470 | /* Trim any trailing block space off the end of a used block, return to pool. */ 471 | static void block_trim_used(control_t* control, block_header_t* block, size_t size) 472 | { 473 | tlsf_assert(!block_is_free(block) && "block must be used"); 474 | if (block_can_split(block, size)) 475 | { 476 | /* If the next block is free, we must coalesce. */ 477 | block_header_t* remaining_block = block_split(block, size); 478 | block_set_prev_used(remaining_block); 479 | 480 | remaining_block = block_merge_next(control, remaining_block); 481 | block_insert(control, remaining_block); 482 | } 483 | } 484 | 485 | static block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size) 486 | { 487 | block_header_t* remaining_block = block; 488 | if (block_can_split(block, size)) 489 | { 490 | /* We want the 2nd block. */ 491 | remaining_block = block_split(block, size - block_header_overhead); 492 | block_set_prev_free(remaining_block); 493 | 494 | block_link_next(block); 495 | block_insert(control, block); 496 | } 497 | 498 | return remaining_block; 499 | } 500 | 501 | static block_header_t* block_locate_free(control_t* control, size_t size) 502 | { 503 | int fl = 0, sl = 0; 504 | block_header_t* block = 0; 505 | 506 | if (size) 507 | { 508 | mapping_search(size, &fl, &sl); 509 | 510 | /* 511 | ** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up 512 | ** with indices that are off the end of the block array. 513 | ** So, we protect against that here, since this is the only callsite of mapping_search. 514 | ** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range. 515 | */ 516 | if (fl < FL_INDEX_COUNT) 517 | { 518 | block = search_suitable_block(control, &fl, &sl); 519 | } 520 | } 521 | 522 | if (block) 523 | { 524 | tlsf_assert(block_size(block) >= size); 525 | remove_free_block(control, block, fl, sl); 526 | } 527 | 528 | return block; 529 | } 530 | 531 | static void* block_prepare_used(control_t* control, block_header_t* block, size_t size) 532 | { 533 | void* p = 0; 534 | if (block) 535 | { 536 | tlsf_assert(size && "size must be non-zero"); 537 | block_trim_free(control, block, size); 538 | block_mark_as_used(block); 539 | p = block_to_ptr(block); 540 | } 541 | return p; 542 | } 543 | 544 | /* Clear structure and point all empty lists at the null block. */ 545 | static void control_construct(control_t* control) 546 | { 547 | int i, j; 548 | 549 | control->block_null.next_free = &control->block_null; 550 | control->block_null.prev_free = &control->block_null; 551 | 552 | control->fl_bitmap = 0; 553 | for (i = 0; i < FL_INDEX_COUNT; ++i) 554 | { 555 | control->sl_bitmap[i] = 0; 556 | for (j = 0; j < SL_INDEX_COUNT; ++j) 557 | { 558 | control->blocks[i][j] = &control->block_null; 559 | } 560 | } 561 | } 562 | 563 | size_t tlsf_block_size(void* ptr) 564 | { 565 | size_t size = 0; 566 | if (ptr) 567 | { 568 | const block_header_t* block = block_from_ptr(ptr); 569 | size = block_size(block); 570 | } 571 | return size; 572 | } 573 | 574 | /* 575 | ** Size of the TLSF structures in a given memory block passed to 576 | ** tlsf_create, equal to the size of a control_t 577 | */ 578 | size_t tlsf_size(void) 579 | { 580 | return sizeof(control_t); 581 | } 582 | 583 | size_t tlsf_align_size(void) 584 | { 585 | return ALIGN_SIZE; 586 | } 587 | 588 | size_t tlsf_block_size_min(void) 589 | { 590 | return block_size_min; 591 | } 592 | 593 | size_t tlsf_block_size_max(void) 594 | { 595 | return block_size_max; 596 | } 597 | 598 | /* 599 | ** Overhead of the TLSF structures in a given memory block passed to 600 | ** tlsf_add_pool, equal to the overhead of a free block and the 601 | ** sentinel block. 602 | */ 603 | size_t tlsf_pool_overhead(void) 604 | { 605 | return 2 * block_header_overhead; 606 | } 607 | 608 | size_t tlsf_alloc_overhead(void) 609 | { 610 | return block_header_overhead; 611 | } 612 | 613 | static pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes) 614 | { 615 | block_header_t* block; 616 | block_header_t* next; 617 | 618 | const size_t pool_overhead = tlsf_pool_overhead(); 619 | const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE); 620 | 621 | if (((ptrdiff_t)mem % ALIGN_SIZE) != 0) 622 | { 623 | return 0; 624 | } 625 | 626 | if (pool_bytes < block_size_min || pool_bytes > block_size_max) 627 | { 628 | return 0; 629 | } 630 | 631 | /* 632 | ** Create the main free block. Offset the start of the block slightly 633 | ** so that the prev_phys_block field falls outside of the pool - 634 | ** it will never be used. 635 | */ 636 | block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead); 637 | block_set_size(block, pool_bytes); 638 | block_set_free(block); 639 | block_set_prev_used(block); 640 | block_insert(tlsf_cast(control_t*, tlsf), block); 641 | 642 | /* Split the block to create a zero-size sentinel block. */ 643 | next = block_link_next(block); 644 | block_set_size(next, 0); 645 | block_set_used(next); 646 | block_set_prev_free(next); 647 | 648 | return mem; 649 | } 650 | 651 | tlsf_t tlsf_create(void* mem) 652 | { 653 | if (((tlsfptr_t)mem % ALIGN_SIZE) != 0) 654 | { 655 | return 0; 656 | } 657 | 658 | control_construct(tlsf_cast(control_t*, mem)); 659 | 660 | return tlsf_cast(tlsf_t, mem); 661 | } 662 | 663 | tlsf_t tlsf_create_with_pool(void* mem, size_t bytes) 664 | { 665 | tlsf_t tlsf = tlsf_create(mem); 666 | tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size()); 667 | return tlsf; 668 | } 669 | 670 | void* tlsf_malloc(tlsf_t tlsf, size_t size) 671 | { 672 | control_t* control = tlsf_cast(control_t*, tlsf); 673 | const size_t adjust = adjust_request_size(size, ALIGN_SIZE); 674 | block_header_t* block = block_locate_free(control, adjust); 675 | return block_prepare_used(control, block, adjust); 676 | } 677 | 678 | void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size) 679 | { 680 | control_t* control = tlsf_cast(control_t*, tlsf); 681 | const size_t adjust = adjust_request_size(size, ALIGN_SIZE); 682 | 683 | /* 684 | ** We must allocate an additional minimum block size bytes so that if 685 | ** our free block will leave an alignment gap which is smaller, we can 686 | ** trim a leading free block and release it back to the pool. We must 687 | ** do this because the previous physical block is in use, therefore 688 | ** the prev_phys_block field is not valid, and we can't simply adjust 689 | ** the size of that block. 690 | */ 691 | const size_t gap_minimum = sizeof(block_header_t); 692 | const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align); 693 | 694 | /* 695 | ** If alignment is less than or equals base alignment, we're done. 696 | ** If we requested 0 bytes, return null, as tlsf_malloc(0) does. 697 | */ 698 | const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust; 699 | 700 | block_header_t* block = block_locate_free(control, aligned_size); 701 | 702 | /* This can't be a static assert. */ 703 | tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead); 704 | 705 | if (block) 706 | { 707 | void* ptr = block_to_ptr(block); 708 | void* aligned = align_ptr(ptr, align); 709 | size_t gap = tlsf_cast(size_t, 710 | tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr)); 711 | 712 | /* If gap size is too small, offset to next aligned boundary. */ 713 | if (gap && gap < gap_minimum) 714 | { 715 | const size_t gap_remain = gap_minimum - gap; 716 | const size_t offset = tlsf_max(gap_remain, align); 717 | const void* next_aligned = tlsf_cast(void*, 718 | tlsf_cast(tlsfptr_t, aligned) + offset); 719 | 720 | aligned = align_ptr(next_aligned, align); 721 | gap = tlsf_cast(size_t, 722 | tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr)); 723 | } 724 | 725 | if (gap) 726 | { 727 | tlsf_assert(gap >= gap_minimum && "gap size too small"); 728 | block = block_trim_free_leading(control, block, gap); 729 | } 730 | } 731 | 732 | return block_prepare_used(control, block, adjust); 733 | } 734 | 735 | void tlsf_free(tlsf_t tlsf, void* ptr) 736 | { 737 | /* Don't attempt to free a NULL pointer. */ 738 | if (ptr) 739 | { 740 | control_t* control = tlsf_cast(control_t*, tlsf); 741 | block_header_t* block = block_from_ptr(ptr); 742 | tlsf_assert(!block_is_free(block) && "block already marked as free"); 743 | block_mark_as_free(block); 744 | block = block_merge_prev(control, block); 745 | block = block_merge_next(control, block); 746 | block_insert(control, block); 747 | } 748 | } 749 | 750 | /* 751 | ** The TLSF block information provides us with enough information to 752 | ** provide a reasonably intelligent implementation of realloc, growing or 753 | ** shrinking the currently allocated block as required. 754 | ** 755 | ** This routine handles the somewhat esoteric edge cases of realloc: 756 | ** - a non-zero size with a null pointer will behave like malloc 757 | ** - a zero size with a non-null pointer will behave like free 758 | ** - a request that cannot be satisfied will leave the original buffer 759 | ** untouched 760 | ** - an extended buffer size will leave the newly-allocated area with 761 | ** contents undefined 762 | */ 763 | void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size) 764 | { 765 | control_t* control = tlsf_cast(control_t*, tlsf); 766 | void* p = 0; 767 | 768 | /* Zero-size requests are treated as free. */ 769 | if (ptr && size == 0) 770 | { 771 | tlsf_free(tlsf, ptr); 772 | } 773 | /* Requests with NULL pointers are treated as malloc. */ 774 | else if (!ptr) 775 | { 776 | p = tlsf_malloc(tlsf, size); 777 | } 778 | else 779 | { 780 | block_header_t* block = block_from_ptr(ptr); 781 | block_header_t* next = block_next(block); 782 | 783 | const size_t cursize = block_size(block); 784 | const size_t combined = cursize + block_size(next) + block_header_overhead; 785 | const size_t adjust = adjust_request_size(size, ALIGN_SIZE); 786 | 787 | tlsf_assert(!block_is_free(block) && "block already marked as free"); 788 | 789 | /* 790 | ** If the next block is used, or when combined with the current 791 | ** block, does not offer enough space, we must reallocate and copy. 792 | */ 793 | if (adjust > cursize && (!block_is_free(next) || adjust > combined)) 794 | { 795 | p = tlsf_malloc(tlsf, size); 796 | if (p) 797 | { 798 | const size_t minsize = tlsf_min(cursize, size); 799 | memcpy(p, ptr, minsize); 800 | tlsf_free(tlsf, ptr); 801 | } 802 | } 803 | else 804 | { 805 | /* Do we need to expand to the next block? */ 806 | if (adjust > cursize) 807 | { 808 | block_merge_next(control, block); 809 | block_mark_as_used(block); 810 | } 811 | 812 | /* Trim the resulting block and return the original pointer. */ 813 | block_trim_used(control, block, adjust); 814 | p = ptr; 815 | } 816 | } 817 | 818 | return p; 819 | } 820 | -------------------------------------------------------------------------------- /utils/tlsf.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | /* 4 | ** Two Level Segregated Fit memory allocator, version 3.1. 5 | ** Written by Matthew Conte 6 | ** http://tlsf.baisoku.org 7 | ** 8 | ** Based on the original documentation by Miguel Masmano: 9 | ** http://www.gii.upv.es/tlsf/main/docs 10 | ** 11 | ** This implementation was written to the specification 12 | ** of the document, therefore no GPL restrictions apply. 13 | ** 14 | ** Copyright (c) 2006-2016, Matthew Conte 15 | ** All rights reserved. 16 | ** 17 | ** Redistribution and use in source and binary forms, with or without 18 | ** modification, are permitted provided that the following conditions are met: 19 | ** * Redistributions of source code must retain the above copyright 20 | ** notice, this list of conditions and the following disclaimer. 21 | ** * Redistributions in binary form must reproduce the above copyright 22 | ** notice, this list of conditions and the following disclaimer in the 23 | ** documentation and/or other materials provided with the distribution. 24 | ** * Neither the name of the copyright holder nor the 25 | ** names of its contributors may be used to endorse or promote products 26 | ** derived from this software without specific prior written permission. 27 | ** 28 | ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 29 | ** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 30 | ** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 31 | ** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY 32 | ** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 33 | ** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 34 | ** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 35 | ** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 | ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 37 | ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 | */ 39 | 40 | #include 41 | 42 | /* 43 | ** Constants. 44 | */ 45 | 46 | /* Public constants: may be modified. */ 47 | enum tlsf_public 48 | { 49 | /* log2 of number of linear subdivisions of block sizes. Larger 50 | ** values require more memory in the control structure. Values of 51 | ** 4 or 5 are typical. 52 | */ 53 | SL_INDEX_COUNT_LOG2 = 4, 54 | }; 55 | 56 | /* Private constants: do not modify. */ 57 | enum tlsf_private 58 | { 59 | /* All allocation sizes and addresses are aligned to 4 bytes. */ 60 | ALIGN_SIZE_LOG2 = 2, 61 | ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2), 62 | 63 | /* 64 | ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits. 65 | ** However, because we linearly subdivide the second-level lists, and 66 | ** our minimum size granularity is 4 bytes, it doesn't make sense to 67 | ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4, 68 | ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be 69 | ** trying to split size ranges into more slots than we have available. 70 | ** Instead, we calculate the minimum threshold size, and place all 71 | ** blocks below that size into the 0th first-level list. 72 | */ 73 | 74 | FL_INDEX_MAX = 20, //handle up to 1MB of Heap, acceptable to microcontrollers 75 | SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2), 76 | FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2), 77 | FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1), 78 | SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT), 79 | }; 80 | 81 | /* 82 | ** Block header structure. 83 | ** 84 | ** There are several implementation subtleties involved: 85 | ** - The prev_phys_block field is only valid if the previous block is free. 86 | ** - The prev_phys_block field is actually stored at the end of the 87 | ** previous block. It appears at the beginning of this structure only to 88 | ** simplify the implementation. 89 | ** - The next_free / prev_free fields are only valid if the block is free. 90 | */ 91 | typedef struct block_header_t 92 | { 93 | /* Points to the previous physical block. */ 94 | struct block_header_t* prev_phys_block; 95 | 96 | /* The size of this block, excluding the block header. */ 97 | size_t size; 98 | 99 | /* Next and previous free blocks. */ 100 | struct block_header_t* next_free; 101 | struct block_header_t* prev_free; 102 | } block_header_t; 103 | 104 | /* The TLSF control structure. */ 105 | typedef struct control_t 106 | { 107 | /* Empty lists point at this block to indicate they are free. */ 108 | block_header_t block_null; 109 | 110 | /* Bitmaps for free lists. */ 111 | unsigned int fl_bitmap; 112 | unsigned int sl_bitmap[FL_INDEX_COUNT]; 113 | 114 | /* Head of free lists. */ 115 | block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT]; 116 | } control_t; 117 | 118 | /* tlsf_t: a TLSF structure. Can contain 1 to N pools. */ 119 | /* pool_t: a block of memory that TLSF can manage. */ 120 | typedef void* tlsf_t; 121 | typedef void* pool_t; 122 | 123 | /* Create/destroy a memory pool. */ 124 | tlsf_t tlsf_create(void* mem); 125 | tlsf_t tlsf_create_with_pool(void* mem, size_t bytes); 126 | 127 | /* malloc/memalign/realloc/free replacements. */ 128 | void* tlsf_malloc(tlsf_t tlsf, size_t bytes); 129 | void tlsf_free(tlsf_t tlsf, void* ptr); 130 | 131 | /* Returns internal block size, not original request size */ 132 | size_t tlsf_block_size(void* ptr); 133 | 134 | /* Overheads/limits of internal structures. */ 135 | size_t tlsf_size(void); 136 | size_t tlsf_align_size(void); 137 | size_t tlsf_block_size_min(void); 138 | size_t tlsf_block_size_max(void); 139 | size_t tlsf_pool_overhead(void); 140 | size_t tlsf_alloc_overhead(void); 141 | 142 | --------------------------------------------------------------------------------