├── LICENSE ├── Makefile ├── README.md ├── device_headers ├── cmsis_compiler.h ├── cmsis_gcc.h ├── cmsis_version.h ├── core_cm7.h ├── mpu_armv7.h ├── stm32f723xx.h ├── stm32f7xx.h └── system_stm32f7xx.h ├── generate_vt.py ├── ld ├── STM32F723IE.ld └── sections.ld └── src ├── global.c ├── global.h ├── main.c ├── main.h ├── qspi.c └── qspi.h /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License: 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TARGET = main 2 | 3 | # Default target chip. 4 | MCU ?= STM32F723IE 5 | 6 | # Define target chip information. 7 | ifeq ($(MCU), STM32F723IE) 8 | MCU_FILES = STM32F723IE 9 | ST_MCU_DEF = STM32F723xx 10 | MCU_CLASS = F7 11 | endif 12 | 13 | # 14 | # Generic STM32 makefile: 15 | # 16 | ifeq ($(MCU_CLASS), F0) 17 | MCU_SPEC = cortex-m0 18 | else ifeq ($(MCU_CLASS), $(filter $(MCU_CLASS), L0 G0)) 19 | MCU_SPEC = cortex-m0plus 20 | else ifeq ($(MCU_CLASS), $(filter $(MCU_CLASS), F1 L1)) 21 | MCU_SPEC = cortex-m3 22 | else ifeq ($(MCU_CLASS), $(filter $(MCU_CLASS), L4 G4 WB)) 23 | MCU_SPEC = cortex-m4 24 | else ifeq ($(MCU_CLASS), F7) 25 | MCU_SPEC = cortex-m7 26 | endif 27 | 28 | # Toolchain definitions (ARM bare metal defaults) 29 | TOOLCHAIN = /usr 30 | CC = $(TOOLCHAIN)/bin/arm-none-eabi-gcc 31 | AS = $(TOOLCHAIN)/bin/arm-none-eabi-as 32 | LD = $(TOOLCHAIN)/bin/arm-none-eabi-ld 33 | OC = $(TOOLCHAIN)/bin/arm-none-eabi-objcopy 34 | OD = $(TOOLCHAIN)/bin/arm-none-eabi-objdump 35 | OS = $(TOOLCHAIN)/bin/arm-none-eabi-size 36 | 37 | # Assembly directives. 38 | ASFLAGS += -c 39 | ASFLAGS += -O0 40 | ASFLAGS += -mcpu=$(MCU_SPEC) 41 | ASFLAGS += -mthumb 42 | ASFLAGS += -Wall 43 | # (Set error messages to appear on a single line.) 44 | ASFLAGS += -fmessage-length=0 45 | ASFLAGS += -DVVC_$(MCU_CLASS) 46 | 47 | # C compilation directives 48 | CFLAGS += -mcpu=$(MCU_SPEC) 49 | CFLAGS += -mthumb 50 | ifeq ($(MCU_CLASS), $(filter $(MCU_CLASS), F0 F1 L0 L1 G0)) 51 | CFLAGS += -msoft-float 52 | CFLAGS += -mfloat-abi=soft 53 | else ifeq ($(MCU_CLASS), F7) 54 | CFLAGS += -mhard-float 55 | CFLAGS += -mfloat-abi=hard 56 | CFLAGS += -mfpu=fpv5-sp-d16 57 | else 58 | CFLAGS += -mhard-float 59 | CFLAGS += -mfloat-abi=hard 60 | CFLAGS += -mfpu=fpv4-sp-d16 61 | endif 62 | CFLAGS += -Wall 63 | CFLAGS += -g 64 | CFLAGS += -fmessage-length=0 65 | CFLAGS += -ffunction-sections 66 | CFLAGS += -fdata-sections 67 | CFLAGS += --specs=nosys.specs 68 | CFLAGS += -D$(ST_MCU_DEF) 69 | CFLAGS += -D$(MCU_FILES) 70 | CFLAGS += -DVVC_$(MCU_CLASS) 71 | 72 | # Linker directives. 73 | LSCRIPT = ./ld/$(MCU_FILES).ld 74 | LFLAGS += -mcpu=$(MCU_SPEC) 75 | LFLAGS += -mthumb 76 | ifeq ($(MCU_CLASS), $(filter $(MCU_CLASS), F0 F1 L0 L1 G0)) 77 | LFLAGS += -msoft-float 78 | LFLAGS += -mfloat-abi=soft 79 | else ifeq ($(MCU_CLASS), F7) 80 | LFLAGS += -mhard-float 81 | LFLAGS += -mfloat-abi=hard 82 | LFLAGS += -mfpu=fpv5-sp-d16 83 | else 84 | LFLAGS += -mhard-float 85 | LFLAGS += -mfloat-abi=hard 86 | LFLAGS += -mfpu=fpv4-sp-d16 87 | endif 88 | LFLAGS += -Wall 89 | LFLAGS += --specs=nosys.specs 90 | LFLAGS += -lgcc 91 | LFLAGS += -Wl,--gc-sections 92 | LFLAGS += -Wl,-L./ld 93 | LFLAGS += -T$(LSCRIPT) 94 | 95 | AS_SRC = ./$(ST_MCU_DEF)_vt.S 96 | C_SRC = ./src/main.c 97 | C_SRC += ./src/global.c 98 | C_SRC += ./src/qspi.c 99 | 100 | INCLUDE = -I./ 101 | INCLUDE += -I./device_headers 102 | 103 | OBJS = $(AS_SRC:.S=.o) 104 | OBJS += $(C_SRC:.c=.o) 105 | 106 | .PHONY: all 107 | all: $(TARGET).bin 108 | 109 | ./$(ST_MCU_DEF)_vt.S: 110 | python generate_vt.py $(ST_MCU_DEF) $(MCU_SPEC) 111 | 112 | %.o: %.S 113 | $(CC) -x assembler-with-cpp $(ASFLAGS) $< -o $@ 114 | 115 | %.o: %.c 116 | $(CC) -c $(CFLAGS) $(INCLUDE) $< -o $@ 117 | 118 | $(TARGET).elf: $(OBJS) 119 | $(CC) $^ $(LFLAGS) -o $@ 120 | 121 | $(TARGET).bin: $(TARGET).elf 122 | $(OC) -S -O binary $< $@ 123 | $(OS) $< 124 | 125 | .PHONY: clean 126 | clean: 127 | rm -f $(OBJS) 128 | rm -f $(ST_MCU_DEF)_vt.S 129 | rm -f $(TARGET).elf 130 | rm -f $(TARGET).bin 131 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # STM32F723E Discovery Kit Quad-SPI Flash Test 2 | 3 | This is a bare-metal example demonstrating how to use the STM32's QSPI peripheral to transparently map an external Quad-SPI Flash chip to internal memory space. It's meant to go with a companion blog post about the peripheral. 4 | 5 | It targets an STM32F723E Discovery Kit board, which includes a 64MB QSPI Flash chip along with a few other external memories. 6 | -------------------------------------------------------------------------------- /device_headers/cmsis_compiler.h: -------------------------------------------------------------------------------- 1 | /**************************************************************************//** 2 | * @file cmsis_compiler.h 3 | * @brief CMSIS compiler generic header file 4 | * @version V5.0.4 5 | * @date 10. January 2018 6 | ******************************************************************************/ 7 | /* 8 | * Copyright (c) 2009-2018 Arm Limited. All rights reserved. 9 | * 10 | * SPDX-License-Identifier: Apache-2.0 11 | * 12 | * Licensed under the Apache License, Version 2.0 (the License); you may 13 | * not use this file except in compliance with the License. 14 | * You may obtain a copy of the License at 15 | * 16 | * www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT 20 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | #ifndef __CMSIS_COMPILER_H 26 | #define __CMSIS_COMPILER_H 27 | 28 | #include 29 | 30 | /* 31 | * Arm Compiler 4/5 32 | */ 33 | #if defined ( __CC_ARM ) 34 | #include "cmsis_armcc.h" 35 | 36 | 37 | /* 38 | * Arm Compiler 6 (armclang) 39 | */ 40 | #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) 41 | #include "cmsis_armclang.h" 42 | 43 | 44 | /* 45 | * GNU Compiler 46 | */ 47 | #elif defined ( __GNUC__ ) 48 | #include "cmsis_gcc.h" 49 | 50 | 51 | /* 52 | * IAR Compiler 53 | */ 54 | #elif defined ( __ICCARM__ ) 55 | #include 56 | 57 | 58 | /* 59 | * TI Arm Compiler 60 | */ 61 | #elif defined ( __TI_ARM__ ) 62 | #include 63 | 64 | #ifndef __ASM 65 | #define __ASM __asm 66 | #endif 67 | #ifndef __INLINE 68 | #define __INLINE inline 69 | #endif 70 | #ifndef __STATIC_INLINE 71 | #define __STATIC_INLINE static inline 72 | #endif 73 | #ifndef __STATIC_FORCEINLINE 74 | #define __STATIC_FORCEINLINE __STATIC_INLINE 75 | #endif 76 | #ifndef __NO_RETURN 77 | #define __NO_RETURN __attribute__((noreturn)) 78 | #endif 79 | #ifndef __USED 80 | #define __USED __attribute__((used)) 81 | #endif 82 | #ifndef __WEAK 83 | #define __WEAK __attribute__((weak)) 84 | #endif 85 | #ifndef __PACKED 86 | #define __PACKED __attribute__((packed)) 87 | #endif 88 | #ifndef __PACKED_STRUCT 89 | #define __PACKED_STRUCT struct __attribute__((packed)) 90 | #endif 91 | #ifndef __PACKED_UNION 92 | #define __PACKED_UNION union __attribute__((packed)) 93 | #endif 94 | #ifndef __UNALIGNED_UINT32 /* deprecated */ 95 | struct __attribute__((packed)) T_UINT32 { uint32_t v; }; 96 | #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) 97 | #endif 98 | #ifndef __UNALIGNED_UINT16_WRITE 99 | __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; 100 | #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void*)(addr))->v) = (val)) 101 | #endif 102 | #ifndef __UNALIGNED_UINT16_READ 103 | __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; 104 | #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) 105 | #endif 106 | #ifndef __UNALIGNED_UINT32_WRITE 107 | __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; 108 | #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) 109 | #endif 110 | #ifndef __UNALIGNED_UINT32_READ 111 | __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; 112 | #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) 113 | #endif 114 | #ifndef __ALIGNED 115 | #define __ALIGNED(x) __attribute__((aligned(x))) 116 | #endif 117 | #ifndef __RESTRICT 118 | #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. 119 | #define __RESTRICT 120 | #endif 121 | 122 | 123 | /* 124 | * TASKING Compiler 125 | */ 126 | #elif defined ( __TASKING__ ) 127 | /* 128 | * The CMSIS functions have been implemented as intrinsics in the compiler. 129 | * Please use "carm -?i" to get an up to date list of all intrinsics, 130 | * Including the CMSIS ones. 131 | */ 132 | 133 | #ifndef __ASM 134 | #define __ASM __asm 135 | #endif 136 | #ifndef __INLINE 137 | #define __INLINE inline 138 | #endif 139 | #ifndef __STATIC_INLINE 140 | #define __STATIC_INLINE static inline 141 | #endif 142 | #ifndef __STATIC_FORCEINLINE 143 | #define __STATIC_FORCEINLINE __STATIC_INLINE 144 | #endif 145 | #ifndef __NO_RETURN 146 | #define __NO_RETURN __attribute__((noreturn)) 147 | #endif 148 | #ifndef __USED 149 | #define __USED __attribute__((used)) 150 | #endif 151 | #ifndef __WEAK 152 | #define __WEAK __attribute__((weak)) 153 | #endif 154 | #ifndef __PACKED 155 | #define __PACKED __packed__ 156 | #endif 157 | #ifndef __PACKED_STRUCT 158 | #define __PACKED_STRUCT struct __packed__ 159 | #endif 160 | #ifndef __PACKED_UNION 161 | #define __PACKED_UNION union __packed__ 162 | #endif 163 | #ifndef __UNALIGNED_UINT32 /* deprecated */ 164 | struct __packed__ T_UINT32 { uint32_t v; }; 165 | #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) 166 | #endif 167 | #ifndef __UNALIGNED_UINT16_WRITE 168 | __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; 169 | #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) 170 | #endif 171 | #ifndef __UNALIGNED_UINT16_READ 172 | __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; 173 | #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) 174 | #endif 175 | #ifndef __UNALIGNED_UINT32_WRITE 176 | __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; 177 | #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) 178 | #endif 179 | #ifndef __UNALIGNED_UINT32_READ 180 | __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; 181 | #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) 182 | #endif 183 | #ifndef __ALIGNED 184 | #define __ALIGNED(x) __align(x) 185 | #endif 186 | #ifndef __RESTRICT 187 | #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. 188 | #define __RESTRICT 189 | #endif 190 | 191 | 192 | /* 193 | * COSMIC Compiler 194 | */ 195 | #elif defined ( __CSMC__ ) 196 | #include 197 | 198 | #ifndef __ASM 199 | #define __ASM _asm 200 | #endif 201 | #ifndef __INLINE 202 | #define __INLINE inline 203 | #endif 204 | #ifndef __STATIC_INLINE 205 | #define __STATIC_INLINE static inline 206 | #endif 207 | #ifndef __STATIC_FORCEINLINE 208 | #define __STATIC_FORCEINLINE __STATIC_INLINE 209 | #endif 210 | #ifndef __NO_RETURN 211 | // NO RETURN is automatically detected hence no warning here 212 | #define __NO_RETURN 213 | #endif 214 | #ifndef __USED 215 | #warning No compiler specific solution for __USED. __USED is ignored. 216 | #define __USED 217 | #endif 218 | #ifndef __WEAK 219 | #define __WEAK __weak 220 | #endif 221 | #ifndef __PACKED 222 | #define __PACKED @packed 223 | #endif 224 | #ifndef __PACKED_STRUCT 225 | #define __PACKED_STRUCT @packed struct 226 | #endif 227 | #ifndef __PACKED_UNION 228 | #define __PACKED_UNION @packed union 229 | #endif 230 | #ifndef __UNALIGNED_UINT32 /* deprecated */ 231 | @packed struct T_UINT32 { uint32_t v; }; 232 | #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) 233 | #endif 234 | #ifndef __UNALIGNED_UINT16_WRITE 235 | __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; 236 | #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) 237 | #endif 238 | #ifndef __UNALIGNED_UINT16_READ 239 | __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; 240 | #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) 241 | #endif 242 | #ifndef __UNALIGNED_UINT32_WRITE 243 | __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; 244 | #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) 245 | #endif 246 | #ifndef __UNALIGNED_UINT32_READ 247 | __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; 248 | #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) 249 | #endif 250 | #ifndef __ALIGNED 251 | #warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored. 252 | #define __ALIGNED(x) 253 | #endif 254 | #ifndef __RESTRICT 255 | #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. 256 | #define __RESTRICT 257 | #endif 258 | 259 | 260 | #else 261 | #error Unknown compiler. 262 | #endif 263 | 264 | 265 | #endif /* __CMSIS_COMPILER_H */ 266 | 267 | -------------------------------------------------------------------------------- /device_headers/cmsis_gcc.h: -------------------------------------------------------------------------------- 1 | /**************************************************************************//** 2 | * @file cmsis_gcc.h 3 | * @brief CMSIS compiler GCC header file 4 | * @version V5.0.4 5 | * @date 09. April 2018 6 | ******************************************************************************/ 7 | /* 8 | * Copyright (c) 2009-2018 Arm Limited. All rights reserved. 9 | * 10 | * SPDX-License-Identifier: Apache-2.0 11 | * 12 | * Licensed under the Apache License, Version 2.0 (the License); you may 13 | * not use this file except in compliance with the License. 14 | * You may obtain a copy of the License at 15 | * 16 | * www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT 20 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | #ifndef __CMSIS_GCC_H 26 | #define __CMSIS_GCC_H 27 | 28 | /* ignore some GCC warnings */ 29 | #pragma GCC diagnostic push 30 | #pragma GCC diagnostic ignored "-Wsign-conversion" 31 | #pragma GCC diagnostic ignored "-Wconversion" 32 | #pragma GCC diagnostic ignored "-Wunused-parameter" 33 | 34 | /* Fallback for __has_builtin */ 35 | #ifndef __has_builtin 36 | #define __has_builtin(x) (0) 37 | #endif 38 | 39 | /* CMSIS compiler specific defines */ 40 | #ifndef __ASM 41 | #define __ASM __asm 42 | #endif 43 | #ifndef __INLINE 44 | #define __INLINE inline 45 | #endif 46 | #ifndef __STATIC_INLINE 47 | #define __STATIC_INLINE static inline 48 | #endif 49 | #ifndef __STATIC_FORCEINLINE 50 | #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline 51 | #endif 52 | #ifndef __NO_RETURN 53 | #define __NO_RETURN __attribute__((__noreturn__)) 54 | #endif 55 | #ifndef __USED 56 | #define __USED __attribute__((used)) 57 | #endif 58 | #ifndef __WEAK 59 | #define __WEAK __attribute__((weak)) 60 | #endif 61 | #ifndef __PACKED 62 | #define __PACKED __attribute__((packed, aligned(1))) 63 | #endif 64 | #ifndef __PACKED_STRUCT 65 | #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) 66 | #endif 67 | #ifndef __PACKED_UNION 68 | #define __PACKED_UNION union __attribute__((packed, aligned(1))) 69 | #endif 70 | #ifndef __UNALIGNED_UINT32 /* deprecated */ 71 | #pragma GCC diagnostic push 72 | #pragma GCC diagnostic ignored "-Wpacked" 73 | #pragma GCC diagnostic ignored "-Wattributes" 74 | struct __attribute__((packed)) T_UINT32 { uint32_t v; }; 75 | #pragma GCC diagnostic pop 76 | #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) 77 | #endif 78 | #ifndef __UNALIGNED_UINT16_WRITE 79 | #pragma GCC diagnostic push 80 | #pragma GCC diagnostic ignored "-Wpacked" 81 | #pragma GCC diagnostic ignored "-Wattributes" 82 | __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; 83 | #pragma GCC diagnostic pop 84 | #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) 85 | #endif 86 | #ifndef __UNALIGNED_UINT16_READ 87 | #pragma GCC diagnostic push 88 | #pragma GCC diagnostic ignored "-Wpacked" 89 | #pragma GCC diagnostic ignored "-Wattributes" 90 | __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; 91 | #pragma GCC diagnostic pop 92 | #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) 93 | #endif 94 | #ifndef __UNALIGNED_UINT32_WRITE 95 | #pragma GCC diagnostic push 96 | #pragma GCC diagnostic ignored "-Wpacked" 97 | #pragma GCC diagnostic ignored "-Wattributes" 98 | __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; 99 | #pragma GCC diagnostic pop 100 | #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) 101 | #endif 102 | #ifndef __UNALIGNED_UINT32_READ 103 | #pragma GCC diagnostic push 104 | #pragma GCC diagnostic ignored "-Wpacked" 105 | #pragma GCC diagnostic ignored "-Wattributes" 106 | __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; 107 | #pragma GCC diagnostic pop 108 | #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) 109 | #endif 110 | #ifndef __ALIGNED 111 | #define __ALIGNED(x) __attribute__((aligned(x))) 112 | #endif 113 | #ifndef __RESTRICT 114 | #define __RESTRICT __restrict 115 | #endif 116 | 117 | 118 | /* ########################### Core Function Access ########################### */ 119 | /** \ingroup CMSIS_Core_FunctionInterface 120 | \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions 121 | @{ 122 | */ 123 | 124 | /** 125 | \brief Enable IRQ Interrupts 126 | \details Enables IRQ interrupts by clearing the I-bit in the CPSR. 127 | Can only be executed in Privileged modes. 128 | */ 129 | __STATIC_FORCEINLINE void __enable_irq(void) 130 | { 131 | __ASM volatile ("cpsie i" : : : "memory"); 132 | } 133 | 134 | 135 | /** 136 | \brief Disable IRQ Interrupts 137 | \details Disables IRQ interrupts by setting the I-bit in the CPSR. 138 | Can only be executed in Privileged modes. 139 | */ 140 | __STATIC_FORCEINLINE void __disable_irq(void) 141 | { 142 | __ASM volatile ("cpsid i" : : : "memory"); 143 | } 144 | 145 | 146 | /** 147 | \brief Get Control Register 148 | \details Returns the content of the Control Register. 149 | \return Control Register value 150 | */ 151 | __STATIC_FORCEINLINE uint32_t __get_CONTROL(void) 152 | { 153 | uint32_t result; 154 | 155 | __ASM volatile ("MRS %0, control" : "=r" (result) ); 156 | return(result); 157 | } 158 | 159 | 160 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 161 | /** 162 | \brief Get Control Register (non-secure) 163 | \details Returns the content of the non-secure Control Register when in secure mode. 164 | \return non-secure Control Register value 165 | */ 166 | __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) 167 | { 168 | uint32_t result; 169 | 170 | __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); 171 | return(result); 172 | } 173 | #endif 174 | 175 | 176 | /** 177 | \brief Set Control Register 178 | \details Writes the given value to the Control Register. 179 | \param [in] control Control Register value to set 180 | */ 181 | __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) 182 | { 183 | __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); 184 | } 185 | 186 | 187 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 188 | /** 189 | \brief Set Control Register (non-secure) 190 | \details Writes the given value to the non-secure Control Register when in secure state. 191 | \param [in] control Control Register value to set 192 | */ 193 | __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) 194 | { 195 | __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); 196 | } 197 | #endif 198 | 199 | 200 | /** 201 | \brief Get IPSR Register 202 | \details Returns the content of the IPSR Register. 203 | \return IPSR Register value 204 | */ 205 | __STATIC_FORCEINLINE uint32_t __get_IPSR(void) 206 | { 207 | uint32_t result; 208 | 209 | __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); 210 | return(result); 211 | } 212 | 213 | 214 | /** 215 | \brief Get APSR Register 216 | \details Returns the content of the APSR Register. 217 | \return APSR Register value 218 | */ 219 | __STATIC_FORCEINLINE uint32_t __get_APSR(void) 220 | { 221 | uint32_t result; 222 | 223 | __ASM volatile ("MRS %0, apsr" : "=r" (result) ); 224 | return(result); 225 | } 226 | 227 | 228 | /** 229 | \brief Get xPSR Register 230 | \details Returns the content of the xPSR Register. 231 | \return xPSR Register value 232 | */ 233 | __STATIC_FORCEINLINE uint32_t __get_xPSR(void) 234 | { 235 | uint32_t result; 236 | 237 | __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); 238 | return(result); 239 | } 240 | 241 | 242 | /** 243 | \brief Get Process Stack Pointer 244 | \details Returns the current value of the Process Stack Pointer (PSP). 245 | \return PSP Register value 246 | */ 247 | __STATIC_FORCEINLINE uint32_t __get_PSP(void) 248 | { 249 | uint32_t result; 250 | 251 | __ASM volatile ("MRS %0, psp" : "=r" (result) ); 252 | return(result); 253 | } 254 | 255 | 256 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 257 | /** 258 | \brief Get Process Stack Pointer (non-secure) 259 | \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. 260 | \return PSP Register value 261 | */ 262 | __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) 263 | { 264 | uint32_t result; 265 | 266 | __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); 267 | return(result); 268 | } 269 | #endif 270 | 271 | 272 | /** 273 | \brief Set Process Stack Pointer 274 | \details Assigns the given value to the Process Stack Pointer (PSP). 275 | \param [in] topOfProcStack Process Stack Pointer value to set 276 | */ 277 | __STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) 278 | { 279 | __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); 280 | } 281 | 282 | 283 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 284 | /** 285 | \brief Set Process Stack Pointer (non-secure) 286 | \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. 287 | \param [in] topOfProcStack Process Stack Pointer value to set 288 | */ 289 | __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) 290 | { 291 | __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); 292 | } 293 | #endif 294 | 295 | 296 | /** 297 | \brief Get Main Stack Pointer 298 | \details Returns the current value of the Main Stack Pointer (MSP). 299 | \return MSP Register value 300 | */ 301 | __STATIC_FORCEINLINE uint32_t __get_MSP(void) 302 | { 303 | uint32_t result; 304 | 305 | __ASM volatile ("MRS %0, msp" : "=r" (result) ); 306 | return(result); 307 | } 308 | 309 | 310 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 311 | /** 312 | \brief Get Main Stack Pointer (non-secure) 313 | \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. 314 | \return MSP Register value 315 | */ 316 | __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) 317 | { 318 | uint32_t result; 319 | 320 | __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); 321 | return(result); 322 | } 323 | #endif 324 | 325 | 326 | /** 327 | \brief Set Main Stack Pointer 328 | \details Assigns the given value to the Main Stack Pointer (MSP). 329 | \param [in] topOfMainStack Main Stack Pointer value to set 330 | */ 331 | __STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) 332 | { 333 | __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); 334 | } 335 | 336 | 337 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 338 | /** 339 | \brief Set Main Stack Pointer (non-secure) 340 | \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. 341 | \param [in] topOfMainStack Main Stack Pointer value to set 342 | */ 343 | __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) 344 | { 345 | __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); 346 | } 347 | #endif 348 | 349 | 350 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 351 | /** 352 | \brief Get Stack Pointer (non-secure) 353 | \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. 354 | \return SP Register value 355 | */ 356 | __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) 357 | { 358 | uint32_t result; 359 | 360 | __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); 361 | return(result); 362 | } 363 | 364 | 365 | /** 366 | \brief Set Stack Pointer (non-secure) 367 | \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. 368 | \param [in] topOfStack Stack Pointer value to set 369 | */ 370 | __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) 371 | { 372 | __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); 373 | } 374 | #endif 375 | 376 | 377 | /** 378 | \brief Get Priority Mask 379 | \details Returns the current state of the priority mask bit from the Priority Mask Register. 380 | \return Priority Mask value 381 | */ 382 | __STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) 383 | { 384 | uint32_t result; 385 | 386 | __ASM volatile ("MRS %0, primask" : "=r" (result) :: "memory"); 387 | return(result); 388 | } 389 | 390 | 391 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 392 | /** 393 | \brief Get Priority Mask (non-secure) 394 | \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. 395 | \return Priority Mask value 396 | */ 397 | __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) 398 | { 399 | uint32_t result; 400 | 401 | __ASM volatile ("MRS %0, primask_ns" : "=r" (result) :: "memory"); 402 | return(result); 403 | } 404 | #endif 405 | 406 | 407 | /** 408 | \brief Set Priority Mask 409 | \details Assigns the given value to the Priority Mask Register. 410 | \param [in] priMask Priority Mask 411 | */ 412 | __STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) 413 | { 414 | __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); 415 | } 416 | 417 | 418 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 419 | /** 420 | \brief Set Priority Mask (non-secure) 421 | \details Assigns the given value to the non-secure Priority Mask Register when in secure state. 422 | \param [in] priMask Priority Mask 423 | */ 424 | __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) 425 | { 426 | __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); 427 | } 428 | #endif 429 | 430 | 431 | #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ 432 | (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ 433 | (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) 434 | /** 435 | \brief Enable FIQ 436 | \details Enables FIQ interrupts by clearing the F-bit in the CPSR. 437 | Can only be executed in Privileged modes. 438 | */ 439 | __STATIC_FORCEINLINE void __enable_fault_irq(void) 440 | { 441 | __ASM volatile ("cpsie f" : : : "memory"); 442 | } 443 | 444 | 445 | /** 446 | \brief Disable FIQ 447 | \details Disables FIQ interrupts by setting the F-bit in the CPSR. 448 | Can only be executed in Privileged modes. 449 | */ 450 | __STATIC_FORCEINLINE void __disable_fault_irq(void) 451 | { 452 | __ASM volatile ("cpsid f" : : : "memory"); 453 | } 454 | 455 | 456 | /** 457 | \brief Get Base Priority 458 | \details Returns the current value of the Base Priority register. 459 | \return Base Priority register value 460 | */ 461 | __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) 462 | { 463 | uint32_t result; 464 | 465 | __ASM volatile ("MRS %0, basepri" : "=r" (result) ); 466 | return(result); 467 | } 468 | 469 | 470 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 471 | /** 472 | \brief Get Base Priority (non-secure) 473 | \details Returns the current value of the non-secure Base Priority register when in secure state. 474 | \return Base Priority register value 475 | */ 476 | __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) 477 | { 478 | uint32_t result; 479 | 480 | __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); 481 | return(result); 482 | } 483 | #endif 484 | 485 | 486 | /** 487 | \brief Set Base Priority 488 | \details Assigns the given value to the Base Priority register. 489 | \param [in] basePri Base Priority value to set 490 | */ 491 | __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) 492 | { 493 | __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); 494 | } 495 | 496 | 497 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 498 | /** 499 | \brief Set Base Priority (non-secure) 500 | \details Assigns the given value to the non-secure Base Priority register when in secure state. 501 | \param [in] basePri Base Priority value to set 502 | */ 503 | __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) 504 | { 505 | __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); 506 | } 507 | #endif 508 | 509 | 510 | /** 511 | \brief Set Base Priority with condition 512 | \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, 513 | or the new value increases the BASEPRI priority level. 514 | \param [in] basePri Base Priority value to set 515 | */ 516 | __STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) 517 | { 518 | __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); 519 | } 520 | 521 | 522 | /** 523 | \brief Get Fault Mask 524 | \details Returns the current value of the Fault Mask register. 525 | \return Fault Mask register value 526 | */ 527 | __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) 528 | { 529 | uint32_t result; 530 | 531 | __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); 532 | return(result); 533 | } 534 | 535 | 536 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 537 | /** 538 | \brief Get Fault Mask (non-secure) 539 | \details Returns the current value of the non-secure Fault Mask register when in secure state. 540 | \return Fault Mask register value 541 | */ 542 | __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) 543 | { 544 | uint32_t result; 545 | 546 | __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); 547 | return(result); 548 | } 549 | #endif 550 | 551 | 552 | /** 553 | \brief Set Fault Mask 554 | \details Assigns the given value to the Fault Mask register. 555 | \param [in] faultMask Fault Mask value to set 556 | */ 557 | __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) 558 | { 559 | __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); 560 | } 561 | 562 | 563 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 564 | /** 565 | \brief Set Fault Mask (non-secure) 566 | \details Assigns the given value to the non-secure Fault Mask register when in secure state. 567 | \param [in] faultMask Fault Mask value to set 568 | */ 569 | __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) 570 | { 571 | __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); 572 | } 573 | #endif 574 | 575 | #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ 576 | (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ 577 | (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ 578 | 579 | 580 | #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ 581 | (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) 582 | 583 | /** 584 | \brief Get Process Stack Pointer Limit 585 | Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure 586 | Stack Pointer Limit register hence zero is returned always in non-secure 587 | mode. 588 | 589 | \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). 590 | \return PSPLIM Register value 591 | */ 592 | __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) 593 | { 594 | #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ 595 | (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) 596 | // without main extensions, the non-secure PSPLIM is RAZ/WI 597 | return 0U; 598 | #else 599 | uint32_t result; 600 | __ASM volatile ("MRS %0, psplim" : "=r" (result) ); 601 | return result; 602 | #endif 603 | } 604 | 605 | #if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) 606 | /** 607 | \brief Get Process Stack Pointer Limit (non-secure) 608 | Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure 609 | Stack Pointer Limit register hence zero is returned always. 610 | 611 | \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. 612 | \return PSPLIM Register value 613 | */ 614 | __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) 615 | { 616 | #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) 617 | // without main extensions, the non-secure PSPLIM is RAZ/WI 618 | return 0U; 619 | #else 620 | uint32_t result; 621 | __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); 622 | return result; 623 | #endif 624 | } 625 | #endif 626 | 627 | 628 | /** 629 | \brief Set Process Stack Pointer Limit 630 | Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure 631 | Stack Pointer Limit register hence the write is silently ignored in non-secure 632 | mode. 633 | 634 | \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). 635 | \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set 636 | */ 637 | __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) 638 | { 639 | #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ 640 | (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) 641 | // without main extensions, the non-secure PSPLIM is RAZ/WI 642 | (void)ProcStackPtrLimit; 643 | #else 644 | __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); 645 | #endif 646 | } 647 | 648 | 649 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 650 | /** 651 | \brief Set Process Stack Pointer (non-secure) 652 | Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure 653 | Stack Pointer Limit register hence the write is silently ignored. 654 | 655 | \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. 656 | \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set 657 | */ 658 | __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) 659 | { 660 | #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) 661 | // without main extensions, the non-secure PSPLIM is RAZ/WI 662 | (void)ProcStackPtrLimit; 663 | #else 664 | __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); 665 | #endif 666 | } 667 | #endif 668 | 669 | 670 | /** 671 | \brief Get Main Stack Pointer Limit 672 | Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure 673 | Stack Pointer Limit register hence zero is returned always in non-secure 674 | mode. 675 | 676 | \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). 677 | \return MSPLIM Register value 678 | */ 679 | __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) 680 | { 681 | #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ 682 | (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) 683 | // without main extensions, the non-secure MSPLIM is RAZ/WI 684 | return 0U; 685 | #else 686 | uint32_t result; 687 | __ASM volatile ("MRS %0, msplim" : "=r" (result) ); 688 | return result; 689 | #endif 690 | } 691 | 692 | 693 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 694 | /** 695 | \brief Get Main Stack Pointer Limit (non-secure) 696 | Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure 697 | Stack Pointer Limit register hence zero is returned always. 698 | 699 | \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. 700 | \return MSPLIM Register value 701 | */ 702 | __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) 703 | { 704 | #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) 705 | // without main extensions, the non-secure MSPLIM is RAZ/WI 706 | return 0U; 707 | #else 708 | uint32_t result; 709 | __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); 710 | return result; 711 | #endif 712 | } 713 | #endif 714 | 715 | 716 | /** 717 | \brief Set Main Stack Pointer Limit 718 | Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure 719 | Stack Pointer Limit register hence the write is silently ignored in non-secure 720 | mode. 721 | 722 | \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). 723 | \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set 724 | */ 725 | __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) 726 | { 727 | #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ 728 | (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) 729 | // without main extensions, the non-secure MSPLIM is RAZ/WI 730 | (void)MainStackPtrLimit; 731 | #else 732 | __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); 733 | #endif 734 | } 735 | 736 | 737 | #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) 738 | /** 739 | \brief Set Main Stack Pointer Limit (non-secure) 740 | Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure 741 | Stack Pointer Limit register hence the write is silently ignored. 742 | 743 | \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. 744 | \param [in] MainStackPtrLimit Main Stack Pointer value to set 745 | */ 746 | __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) 747 | { 748 | #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) 749 | // without main extensions, the non-secure MSPLIM is RAZ/WI 750 | (void)MainStackPtrLimit; 751 | #else 752 | __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); 753 | #endif 754 | } 755 | #endif 756 | 757 | #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ 758 | (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ 759 | 760 | 761 | /** 762 | \brief Get FPSCR 763 | \details Returns the current value of the Floating Point Status/Control register. 764 | \return Floating Point Status/Control register value 765 | */ 766 | __STATIC_FORCEINLINE uint32_t __get_FPSCR(void) 767 | { 768 | #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ 769 | (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) 770 | #if __has_builtin(__builtin_arm_get_fpscr) 771 | // Re-enable using built-in when GCC has been fixed 772 | // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) 773 | /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ 774 | return __builtin_arm_get_fpscr(); 775 | #else 776 | uint32_t result; 777 | 778 | __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); 779 | return(result); 780 | #endif 781 | #else 782 | return(0U); 783 | #endif 784 | } 785 | 786 | 787 | /** 788 | \brief Set FPSCR 789 | \details Assigns the given value to the Floating Point Status/Control register. 790 | \param [in] fpscr Floating Point Status/Control value to set 791 | */ 792 | __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) 793 | { 794 | #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ 795 | (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) 796 | #if __has_builtin(__builtin_arm_set_fpscr) 797 | // Re-enable using built-in when GCC has been fixed 798 | // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) 799 | /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ 800 | __builtin_arm_set_fpscr(fpscr); 801 | #else 802 | __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); 803 | #endif 804 | #else 805 | (void)fpscr; 806 | #endif 807 | } 808 | 809 | 810 | /*@} end of CMSIS_Core_RegAccFunctions */ 811 | 812 | 813 | /* ########################## Core Instruction Access ######################### */ 814 | /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface 815 | Access to dedicated instructions 816 | @{ 817 | */ 818 | 819 | /* Define macros for porting to both thumb1 and thumb2. 820 | * For thumb1, use low register (r0-r7), specified by constraint "l" 821 | * Otherwise, use general registers, specified by constraint "r" */ 822 | #if defined (__thumb__) && !defined (__thumb2__) 823 | #define __CMSIS_GCC_OUT_REG(r) "=l" (r) 824 | #define __CMSIS_GCC_RW_REG(r) "+l" (r) 825 | #define __CMSIS_GCC_USE_REG(r) "l" (r) 826 | #else 827 | #define __CMSIS_GCC_OUT_REG(r) "=r" (r) 828 | #define __CMSIS_GCC_RW_REG(r) "+r" (r) 829 | #define __CMSIS_GCC_USE_REG(r) "r" (r) 830 | #endif 831 | 832 | /** 833 | \brief No Operation 834 | \details No Operation does nothing. This instruction can be used for code alignment purposes. 835 | */ 836 | #define __NOP() __ASM volatile ("nop") 837 | 838 | /** 839 | \brief Wait For Interrupt 840 | \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. 841 | */ 842 | #define __WFI() __ASM volatile ("wfi") 843 | 844 | 845 | /** 846 | \brief Wait For Event 847 | \details Wait For Event is a hint instruction that permits the processor to enter 848 | a low-power state until one of a number of events occurs. 849 | */ 850 | #define __WFE() __ASM volatile ("wfe") 851 | 852 | 853 | /** 854 | \brief Send Event 855 | \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. 856 | */ 857 | #define __SEV() __ASM volatile ("sev") 858 | 859 | 860 | /** 861 | \brief Instruction Synchronization Barrier 862 | \details Instruction Synchronization Barrier flushes the pipeline in the processor, 863 | so that all instructions following the ISB are fetched from cache or memory, 864 | after the instruction has been completed. 865 | */ 866 | __STATIC_FORCEINLINE void __ISB(void) 867 | { 868 | __ASM volatile ("isb 0xF":::"memory"); 869 | } 870 | 871 | 872 | /** 873 | \brief Data Synchronization Barrier 874 | \details Acts as a special kind of Data Memory Barrier. 875 | It completes when all explicit memory accesses before this instruction complete. 876 | */ 877 | __STATIC_FORCEINLINE void __DSB(void) 878 | { 879 | __ASM volatile ("dsb 0xF":::"memory"); 880 | } 881 | 882 | 883 | /** 884 | \brief Data Memory Barrier 885 | \details Ensures the apparent order of the explicit memory operations before 886 | and after the instruction, without ensuring their completion. 887 | */ 888 | __STATIC_FORCEINLINE void __DMB(void) 889 | { 890 | __ASM volatile ("dmb 0xF":::"memory"); 891 | } 892 | 893 | 894 | /** 895 | \brief Reverse byte order (32 bit) 896 | \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. 897 | \param [in] value Value to reverse 898 | \return Reversed value 899 | */ 900 | __STATIC_FORCEINLINE uint32_t __REV(uint32_t value) 901 | { 902 | #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) 903 | return __builtin_bswap32(value); 904 | #else 905 | uint32_t result; 906 | 907 | __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); 908 | return result; 909 | #endif 910 | } 911 | 912 | 913 | /** 914 | \brief Reverse byte order (16 bit) 915 | \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. 916 | \param [in] value Value to reverse 917 | \return Reversed value 918 | */ 919 | __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) 920 | { 921 | uint32_t result; 922 | 923 | __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); 924 | return result; 925 | } 926 | 927 | 928 | /** 929 | \brief Reverse byte order (16 bit) 930 | \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. 931 | \param [in] value Value to reverse 932 | \return Reversed value 933 | */ 934 | __STATIC_FORCEINLINE int16_t __REVSH(int16_t value) 935 | { 936 | #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) 937 | return (int16_t)__builtin_bswap16(value); 938 | #else 939 | int16_t result; 940 | 941 | __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); 942 | return result; 943 | #endif 944 | } 945 | 946 | 947 | /** 948 | \brief Rotate Right in unsigned value (32 bit) 949 | \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. 950 | \param [in] op1 Value to rotate 951 | \param [in] op2 Number of Bits to rotate 952 | \return Rotated value 953 | */ 954 | __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) 955 | { 956 | op2 %= 32U; 957 | if (op2 == 0U) 958 | { 959 | return op1; 960 | } 961 | return (op1 >> op2) | (op1 << (32U - op2)); 962 | } 963 | 964 | 965 | /** 966 | \brief Breakpoint 967 | \details Causes the processor to enter Debug state. 968 | Debug tools can use this to investigate system state when the instruction at a particular address is reached. 969 | \param [in] value is ignored by the processor. 970 | If required, a debugger can use it to store additional information about the breakpoint. 971 | */ 972 | #define __BKPT(value) __ASM volatile ("bkpt "#value) 973 | 974 | 975 | /** 976 | \brief Reverse bit order of value 977 | \details Reverses the bit order of the given value. 978 | \param [in] value Value to reverse 979 | \return Reversed value 980 | */ 981 | __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) 982 | { 983 | uint32_t result; 984 | 985 | #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ 986 | (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ 987 | (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) 988 | __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) ); 989 | #else 990 | uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ 991 | 992 | result = value; /* r will be reversed bits of v; first get LSB of v */ 993 | for (value >>= 1U; value != 0U; value >>= 1U) 994 | { 995 | result <<= 1U; 996 | result |= value & 1U; 997 | s--; 998 | } 999 | result <<= s; /* shift when v's highest bits are zero */ 1000 | #endif 1001 | return result; 1002 | } 1003 | 1004 | 1005 | /** 1006 | \brief Count leading zeros 1007 | \details Counts the number of leading zeros of a data value. 1008 | \param [in] value Value to count the leading zeros 1009 | \return number of leading zeros in value 1010 | */ 1011 | #define __CLZ (uint8_t)__builtin_clz 1012 | 1013 | 1014 | #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ 1015 | (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ 1016 | (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ 1017 | (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) 1018 | /** 1019 | \brief LDR Exclusive (8 bit) 1020 | \details Executes a exclusive LDR instruction for 8 bit value. 1021 | \param [in] ptr Pointer to data 1022 | \return value of type uint8_t at (*ptr) 1023 | */ 1024 | __STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) 1025 | { 1026 | uint32_t result; 1027 | 1028 | #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) 1029 | __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); 1030 | #else 1031 | /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not 1032 | accepted by assembler. So has to use following less efficient pattern. 1033 | */ 1034 | __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); 1035 | #endif 1036 | return ((uint8_t) result); /* Add explicit type cast here */ 1037 | } 1038 | 1039 | 1040 | /** 1041 | \brief LDR Exclusive (16 bit) 1042 | \details Executes a exclusive LDR instruction for 16 bit values. 1043 | \param [in] ptr Pointer to data 1044 | \return value of type uint16_t at (*ptr) 1045 | */ 1046 | __STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) 1047 | { 1048 | uint32_t result; 1049 | 1050 | #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) 1051 | __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); 1052 | #else 1053 | /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not 1054 | accepted by assembler. So has to use following less efficient pattern. 1055 | */ 1056 | __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); 1057 | #endif 1058 | return ((uint16_t) result); /* Add explicit type cast here */ 1059 | } 1060 | 1061 | 1062 | /** 1063 | \brief LDR Exclusive (32 bit) 1064 | \details Executes a exclusive LDR instruction for 32 bit values. 1065 | \param [in] ptr Pointer to data 1066 | \return value of type uint32_t at (*ptr) 1067 | */ 1068 | __STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) 1069 | { 1070 | uint32_t result; 1071 | 1072 | __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); 1073 | return(result); 1074 | } 1075 | 1076 | 1077 | /** 1078 | \brief STR Exclusive (8 bit) 1079 | \details Executes a exclusive STR instruction for 8 bit values. 1080 | \param [in] value Value to store 1081 | \param [in] ptr Pointer to location 1082 | \return 0 Function succeeded 1083 | \return 1 Function failed 1084 | */ 1085 | __STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) 1086 | { 1087 | uint32_t result; 1088 | 1089 | __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); 1090 | return(result); 1091 | } 1092 | 1093 | 1094 | /** 1095 | \brief STR Exclusive (16 bit) 1096 | \details Executes a exclusive STR instruction for 16 bit values. 1097 | \param [in] value Value to store 1098 | \param [in] ptr Pointer to location 1099 | \return 0 Function succeeded 1100 | \return 1 Function failed 1101 | */ 1102 | __STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) 1103 | { 1104 | uint32_t result; 1105 | 1106 | __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); 1107 | return(result); 1108 | } 1109 | 1110 | 1111 | /** 1112 | \brief STR Exclusive (32 bit) 1113 | \details Executes a exclusive STR instruction for 32 bit values. 1114 | \param [in] value Value to store 1115 | \param [in] ptr Pointer to location 1116 | \return 0 Function succeeded 1117 | \return 1 Function failed 1118 | */ 1119 | __STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) 1120 | { 1121 | uint32_t result; 1122 | 1123 | __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); 1124 | return(result); 1125 | } 1126 | 1127 | 1128 | /** 1129 | \brief Remove the exclusive lock 1130 | \details Removes the exclusive lock which is created by LDREX. 1131 | */ 1132 | __STATIC_FORCEINLINE void __CLREX(void) 1133 | { 1134 | __ASM volatile ("clrex" ::: "memory"); 1135 | } 1136 | 1137 | #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ 1138 | (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ 1139 | (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ 1140 | (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ 1141 | 1142 | 1143 | #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ 1144 | (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ 1145 | (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) 1146 | /** 1147 | \brief Signed Saturate 1148 | \details Saturates a signed value. 1149 | \param [in] ARG1 Value to be saturated 1150 | \param [in] ARG2 Bit position to saturate to (1..32) 1151 | \return Saturated value 1152 | */ 1153 | #define __SSAT(ARG1,ARG2) \ 1154 | __extension__ \ 1155 | ({ \ 1156 | int32_t __RES, __ARG1 = (ARG1); \ 1157 | __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ 1158 | __RES; \ 1159 | }) 1160 | 1161 | 1162 | /** 1163 | \brief Unsigned Saturate 1164 | \details Saturates an unsigned value. 1165 | \param [in] ARG1 Value to be saturated 1166 | \param [in] ARG2 Bit position to saturate to (0..31) 1167 | \return Saturated value 1168 | */ 1169 | #define __USAT(ARG1,ARG2) \ 1170 | __extension__ \ 1171 | ({ \ 1172 | uint32_t __RES, __ARG1 = (ARG1); \ 1173 | __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ 1174 | __RES; \ 1175 | }) 1176 | 1177 | 1178 | /** 1179 | \brief Rotate Right with Extend (32 bit) 1180 | \details Moves each bit of a bitstring right by one bit. 1181 | The carry input is shifted in at the left end of the bitstring. 1182 | \param [in] value Value to rotate 1183 | \return Rotated value 1184 | */ 1185 | __STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) 1186 | { 1187 | uint32_t result; 1188 | 1189 | __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); 1190 | return(result); 1191 | } 1192 | 1193 | 1194 | /** 1195 | \brief LDRT Unprivileged (8 bit) 1196 | \details Executes a Unprivileged LDRT instruction for 8 bit value. 1197 | \param [in] ptr Pointer to data 1198 | \return value of type uint8_t at (*ptr) 1199 | */ 1200 | __STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) 1201 | { 1202 | uint32_t result; 1203 | 1204 | #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) 1205 | __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); 1206 | #else 1207 | /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not 1208 | accepted by assembler. So has to use following less efficient pattern. 1209 | */ 1210 | __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); 1211 | #endif 1212 | return ((uint8_t) result); /* Add explicit type cast here */ 1213 | } 1214 | 1215 | 1216 | /** 1217 | \brief LDRT Unprivileged (16 bit) 1218 | \details Executes a Unprivileged LDRT instruction for 16 bit values. 1219 | \param [in] ptr Pointer to data 1220 | \return value of type uint16_t at (*ptr) 1221 | */ 1222 | __STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) 1223 | { 1224 | uint32_t result; 1225 | 1226 | #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) 1227 | __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); 1228 | #else 1229 | /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not 1230 | accepted by assembler. So has to use following less efficient pattern. 1231 | */ 1232 | __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); 1233 | #endif 1234 | return ((uint16_t) result); /* Add explicit type cast here */ 1235 | } 1236 | 1237 | 1238 | /** 1239 | \brief LDRT Unprivileged (32 bit) 1240 | \details Executes a Unprivileged LDRT instruction for 32 bit values. 1241 | \param [in] ptr Pointer to data 1242 | \return value of type uint32_t at (*ptr) 1243 | */ 1244 | __STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) 1245 | { 1246 | uint32_t result; 1247 | 1248 | __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); 1249 | return(result); 1250 | } 1251 | 1252 | 1253 | /** 1254 | \brief STRT Unprivileged (8 bit) 1255 | \details Executes a Unprivileged STRT instruction for 8 bit values. 1256 | \param [in] value Value to store 1257 | \param [in] ptr Pointer to location 1258 | */ 1259 | __STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) 1260 | { 1261 | __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); 1262 | } 1263 | 1264 | 1265 | /** 1266 | \brief STRT Unprivileged (16 bit) 1267 | \details Executes a Unprivileged STRT instruction for 16 bit values. 1268 | \param [in] value Value to store 1269 | \param [in] ptr Pointer to location 1270 | */ 1271 | __STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) 1272 | { 1273 | __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); 1274 | } 1275 | 1276 | 1277 | /** 1278 | \brief STRT Unprivileged (32 bit) 1279 | \details Executes a Unprivileged STRT instruction for 32 bit values. 1280 | \param [in] value Value to store 1281 | \param [in] ptr Pointer to location 1282 | */ 1283 | __STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) 1284 | { 1285 | __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); 1286 | } 1287 | 1288 | #else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ 1289 | (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ 1290 | (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ 1291 | 1292 | /** 1293 | \brief Signed Saturate 1294 | \details Saturates a signed value. 1295 | \param [in] value Value to be saturated 1296 | \param [in] sat Bit position to saturate to (1..32) 1297 | \return Saturated value 1298 | */ 1299 | __STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) 1300 | { 1301 | if ((sat >= 1U) && (sat <= 32U)) 1302 | { 1303 | const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); 1304 | const int32_t min = -1 - max ; 1305 | if (val > max) 1306 | { 1307 | return max; 1308 | } 1309 | else if (val < min) 1310 | { 1311 | return min; 1312 | } 1313 | } 1314 | return val; 1315 | } 1316 | 1317 | /** 1318 | \brief Unsigned Saturate 1319 | \details Saturates an unsigned value. 1320 | \param [in] value Value to be saturated 1321 | \param [in] sat Bit position to saturate to (0..31) 1322 | \return Saturated value 1323 | */ 1324 | __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) 1325 | { 1326 | if (sat <= 31U) 1327 | { 1328 | const uint32_t max = ((1U << sat) - 1U); 1329 | if (val > (int32_t)max) 1330 | { 1331 | return max; 1332 | } 1333 | else if (val < 0) 1334 | { 1335 | return 0U; 1336 | } 1337 | } 1338 | return (uint32_t)val; 1339 | } 1340 | 1341 | #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ 1342 | (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ 1343 | (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ 1344 | 1345 | 1346 | #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ 1347 | (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) 1348 | /** 1349 | \brief Load-Acquire (8 bit) 1350 | \details Executes a LDAB instruction for 8 bit value. 1351 | \param [in] ptr Pointer to data 1352 | \return value of type uint8_t at (*ptr) 1353 | */ 1354 | __STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) 1355 | { 1356 | uint32_t result; 1357 | 1358 | __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) ); 1359 | return ((uint8_t) result); 1360 | } 1361 | 1362 | 1363 | /** 1364 | \brief Load-Acquire (16 bit) 1365 | \details Executes a LDAH instruction for 16 bit values. 1366 | \param [in] ptr Pointer to data 1367 | \return value of type uint16_t at (*ptr) 1368 | */ 1369 | __STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) 1370 | { 1371 | uint32_t result; 1372 | 1373 | __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) ); 1374 | return ((uint16_t) result); 1375 | } 1376 | 1377 | 1378 | /** 1379 | \brief Load-Acquire (32 bit) 1380 | \details Executes a LDA instruction for 32 bit values. 1381 | \param [in] ptr Pointer to data 1382 | \return value of type uint32_t at (*ptr) 1383 | */ 1384 | __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) 1385 | { 1386 | uint32_t result; 1387 | 1388 | __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) ); 1389 | return(result); 1390 | } 1391 | 1392 | 1393 | /** 1394 | \brief Store-Release (8 bit) 1395 | \details Executes a STLB instruction for 8 bit values. 1396 | \param [in] value Value to store 1397 | \param [in] ptr Pointer to location 1398 | */ 1399 | __STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) 1400 | { 1401 | __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); 1402 | } 1403 | 1404 | 1405 | /** 1406 | \brief Store-Release (16 bit) 1407 | \details Executes a STLH instruction for 16 bit values. 1408 | \param [in] value Value to store 1409 | \param [in] ptr Pointer to location 1410 | */ 1411 | __STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) 1412 | { 1413 | __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); 1414 | } 1415 | 1416 | 1417 | /** 1418 | \brief Store-Release (32 bit) 1419 | \details Executes a STL instruction for 32 bit values. 1420 | \param [in] value Value to store 1421 | \param [in] ptr Pointer to location 1422 | */ 1423 | __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) 1424 | { 1425 | __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); 1426 | } 1427 | 1428 | 1429 | /** 1430 | \brief Load-Acquire Exclusive (8 bit) 1431 | \details Executes a LDAB exclusive instruction for 8 bit value. 1432 | \param [in] ptr Pointer to data 1433 | \return value of type uint8_t at (*ptr) 1434 | */ 1435 | __STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) 1436 | { 1437 | uint32_t result; 1438 | 1439 | __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) ); 1440 | return ((uint8_t) result); 1441 | } 1442 | 1443 | 1444 | /** 1445 | \brief Load-Acquire Exclusive (16 bit) 1446 | \details Executes a LDAH exclusive instruction for 16 bit values. 1447 | \param [in] ptr Pointer to data 1448 | \return value of type uint16_t at (*ptr) 1449 | */ 1450 | __STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) 1451 | { 1452 | uint32_t result; 1453 | 1454 | __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) ); 1455 | return ((uint16_t) result); 1456 | } 1457 | 1458 | 1459 | /** 1460 | \brief Load-Acquire Exclusive (32 bit) 1461 | \details Executes a LDA exclusive instruction for 32 bit values. 1462 | \param [in] ptr Pointer to data 1463 | \return value of type uint32_t at (*ptr) 1464 | */ 1465 | __STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) 1466 | { 1467 | uint32_t result; 1468 | 1469 | __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) ); 1470 | return(result); 1471 | } 1472 | 1473 | 1474 | /** 1475 | \brief Store-Release Exclusive (8 bit) 1476 | \details Executes a STLB exclusive instruction for 8 bit values. 1477 | \param [in] value Value to store 1478 | \param [in] ptr Pointer to location 1479 | \return 0 Function succeeded 1480 | \return 1 Function failed 1481 | */ 1482 | __STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) 1483 | { 1484 | uint32_t result; 1485 | 1486 | __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); 1487 | return(result); 1488 | } 1489 | 1490 | 1491 | /** 1492 | \brief Store-Release Exclusive (16 bit) 1493 | \details Executes a STLH exclusive instruction for 16 bit values. 1494 | \param [in] value Value to store 1495 | \param [in] ptr Pointer to location 1496 | \return 0 Function succeeded 1497 | \return 1 Function failed 1498 | */ 1499 | __STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) 1500 | { 1501 | uint32_t result; 1502 | 1503 | __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); 1504 | return(result); 1505 | } 1506 | 1507 | 1508 | /** 1509 | \brief Store-Release Exclusive (32 bit) 1510 | \details Executes a STL exclusive instruction for 32 bit values. 1511 | \param [in] value Value to store 1512 | \param [in] ptr Pointer to location 1513 | \return 0 Function succeeded 1514 | \return 1 Function failed 1515 | */ 1516 | __STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) 1517 | { 1518 | uint32_t result; 1519 | 1520 | __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); 1521 | return(result); 1522 | } 1523 | 1524 | #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ 1525 | (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ 1526 | 1527 | /*@}*/ /* end of group CMSIS_Core_InstructionInterface */ 1528 | 1529 | 1530 | /* ################### Compiler specific Intrinsics ########################### */ 1531 | /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics 1532 | Access to dedicated SIMD instructions 1533 | @{ 1534 | */ 1535 | 1536 | #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) 1537 | 1538 | __STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) 1539 | { 1540 | uint32_t result; 1541 | 1542 | __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1543 | return(result); 1544 | } 1545 | 1546 | __STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) 1547 | { 1548 | uint32_t result; 1549 | 1550 | __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1551 | return(result); 1552 | } 1553 | 1554 | __STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) 1555 | { 1556 | uint32_t result; 1557 | 1558 | __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1559 | return(result); 1560 | } 1561 | 1562 | __STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) 1563 | { 1564 | uint32_t result; 1565 | 1566 | __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1567 | return(result); 1568 | } 1569 | 1570 | __STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) 1571 | { 1572 | uint32_t result; 1573 | 1574 | __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1575 | return(result); 1576 | } 1577 | 1578 | __STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) 1579 | { 1580 | uint32_t result; 1581 | 1582 | __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1583 | return(result); 1584 | } 1585 | 1586 | 1587 | __STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) 1588 | { 1589 | uint32_t result; 1590 | 1591 | __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1592 | return(result); 1593 | } 1594 | 1595 | __STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) 1596 | { 1597 | uint32_t result; 1598 | 1599 | __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1600 | return(result); 1601 | } 1602 | 1603 | __STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) 1604 | { 1605 | uint32_t result; 1606 | 1607 | __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1608 | return(result); 1609 | } 1610 | 1611 | __STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) 1612 | { 1613 | uint32_t result; 1614 | 1615 | __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1616 | return(result); 1617 | } 1618 | 1619 | __STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) 1620 | { 1621 | uint32_t result; 1622 | 1623 | __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1624 | return(result); 1625 | } 1626 | 1627 | __STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) 1628 | { 1629 | uint32_t result; 1630 | 1631 | __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1632 | return(result); 1633 | } 1634 | 1635 | 1636 | __STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) 1637 | { 1638 | uint32_t result; 1639 | 1640 | __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1641 | return(result); 1642 | } 1643 | 1644 | __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) 1645 | { 1646 | uint32_t result; 1647 | 1648 | __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1649 | return(result); 1650 | } 1651 | 1652 | __STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) 1653 | { 1654 | uint32_t result; 1655 | 1656 | __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1657 | return(result); 1658 | } 1659 | 1660 | __STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) 1661 | { 1662 | uint32_t result; 1663 | 1664 | __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1665 | return(result); 1666 | } 1667 | 1668 | __STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) 1669 | { 1670 | uint32_t result; 1671 | 1672 | __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1673 | return(result); 1674 | } 1675 | 1676 | __STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) 1677 | { 1678 | uint32_t result; 1679 | 1680 | __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1681 | return(result); 1682 | } 1683 | 1684 | __STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) 1685 | { 1686 | uint32_t result; 1687 | 1688 | __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1689 | return(result); 1690 | } 1691 | 1692 | __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) 1693 | { 1694 | uint32_t result; 1695 | 1696 | __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1697 | return(result); 1698 | } 1699 | 1700 | __STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) 1701 | { 1702 | uint32_t result; 1703 | 1704 | __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1705 | return(result); 1706 | } 1707 | 1708 | __STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) 1709 | { 1710 | uint32_t result; 1711 | 1712 | __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1713 | return(result); 1714 | } 1715 | 1716 | __STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) 1717 | { 1718 | uint32_t result; 1719 | 1720 | __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1721 | return(result); 1722 | } 1723 | 1724 | __STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) 1725 | { 1726 | uint32_t result; 1727 | 1728 | __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1729 | return(result); 1730 | } 1731 | 1732 | __STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) 1733 | { 1734 | uint32_t result; 1735 | 1736 | __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1737 | return(result); 1738 | } 1739 | 1740 | __STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) 1741 | { 1742 | uint32_t result; 1743 | 1744 | __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1745 | return(result); 1746 | } 1747 | 1748 | __STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) 1749 | { 1750 | uint32_t result; 1751 | 1752 | __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1753 | return(result); 1754 | } 1755 | 1756 | __STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) 1757 | { 1758 | uint32_t result; 1759 | 1760 | __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1761 | return(result); 1762 | } 1763 | 1764 | __STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) 1765 | { 1766 | uint32_t result; 1767 | 1768 | __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1769 | return(result); 1770 | } 1771 | 1772 | __STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) 1773 | { 1774 | uint32_t result; 1775 | 1776 | __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1777 | return(result); 1778 | } 1779 | 1780 | __STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) 1781 | { 1782 | uint32_t result; 1783 | 1784 | __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1785 | return(result); 1786 | } 1787 | 1788 | __STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) 1789 | { 1790 | uint32_t result; 1791 | 1792 | __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1793 | return(result); 1794 | } 1795 | 1796 | __STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) 1797 | { 1798 | uint32_t result; 1799 | 1800 | __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1801 | return(result); 1802 | } 1803 | 1804 | __STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) 1805 | { 1806 | uint32_t result; 1807 | 1808 | __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1809 | return(result); 1810 | } 1811 | 1812 | __STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) 1813 | { 1814 | uint32_t result; 1815 | 1816 | __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1817 | return(result); 1818 | } 1819 | 1820 | __STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) 1821 | { 1822 | uint32_t result; 1823 | 1824 | __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1825 | return(result); 1826 | } 1827 | 1828 | __STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) 1829 | { 1830 | uint32_t result; 1831 | 1832 | __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1833 | return(result); 1834 | } 1835 | 1836 | __STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) 1837 | { 1838 | uint32_t result; 1839 | 1840 | __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); 1841 | return(result); 1842 | } 1843 | 1844 | #define __SSAT16(ARG1,ARG2) \ 1845 | ({ \ 1846 | int32_t __RES, __ARG1 = (ARG1); \ 1847 | __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ 1848 | __RES; \ 1849 | }) 1850 | 1851 | #define __USAT16(ARG1,ARG2) \ 1852 | ({ \ 1853 | uint32_t __RES, __ARG1 = (ARG1); \ 1854 | __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ 1855 | __RES; \ 1856 | }) 1857 | 1858 | __STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) 1859 | { 1860 | uint32_t result; 1861 | 1862 | __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); 1863 | return(result); 1864 | } 1865 | 1866 | __STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) 1867 | { 1868 | uint32_t result; 1869 | 1870 | __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1871 | return(result); 1872 | } 1873 | 1874 | __STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) 1875 | { 1876 | uint32_t result; 1877 | 1878 | __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); 1879 | return(result); 1880 | } 1881 | 1882 | __STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) 1883 | { 1884 | uint32_t result; 1885 | 1886 | __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1887 | return(result); 1888 | } 1889 | 1890 | __STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) 1891 | { 1892 | uint32_t result; 1893 | 1894 | __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1895 | return(result); 1896 | } 1897 | 1898 | __STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) 1899 | { 1900 | uint32_t result; 1901 | 1902 | __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1903 | return(result); 1904 | } 1905 | 1906 | __STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) 1907 | { 1908 | uint32_t result; 1909 | 1910 | __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); 1911 | return(result); 1912 | } 1913 | 1914 | __STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) 1915 | { 1916 | uint32_t result; 1917 | 1918 | __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); 1919 | return(result); 1920 | } 1921 | 1922 | __STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) 1923 | { 1924 | union llreg_u{ 1925 | uint32_t w32[2]; 1926 | uint64_t w64; 1927 | } llr; 1928 | llr.w64 = acc; 1929 | 1930 | #ifndef __ARMEB__ /* Little endian */ 1931 | __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); 1932 | #else /* Big endian */ 1933 | __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); 1934 | #endif 1935 | 1936 | return(llr.w64); 1937 | } 1938 | 1939 | __STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) 1940 | { 1941 | union llreg_u{ 1942 | uint32_t w32[2]; 1943 | uint64_t w64; 1944 | } llr; 1945 | llr.w64 = acc; 1946 | 1947 | #ifndef __ARMEB__ /* Little endian */ 1948 | __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); 1949 | #else /* Big endian */ 1950 | __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); 1951 | #endif 1952 | 1953 | return(llr.w64); 1954 | } 1955 | 1956 | __STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) 1957 | { 1958 | uint32_t result; 1959 | 1960 | __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1961 | return(result); 1962 | } 1963 | 1964 | __STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) 1965 | { 1966 | uint32_t result; 1967 | 1968 | __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 1969 | return(result); 1970 | } 1971 | 1972 | __STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) 1973 | { 1974 | uint32_t result; 1975 | 1976 | __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); 1977 | return(result); 1978 | } 1979 | 1980 | __STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) 1981 | { 1982 | uint32_t result; 1983 | 1984 | __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); 1985 | return(result); 1986 | } 1987 | 1988 | __STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) 1989 | { 1990 | union llreg_u{ 1991 | uint32_t w32[2]; 1992 | uint64_t w64; 1993 | } llr; 1994 | llr.w64 = acc; 1995 | 1996 | #ifndef __ARMEB__ /* Little endian */ 1997 | __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); 1998 | #else /* Big endian */ 1999 | __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); 2000 | #endif 2001 | 2002 | return(llr.w64); 2003 | } 2004 | 2005 | __STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) 2006 | { 2007 | union llreg_u{ 2008 | uint32_t w32[2]; 2009 | uint64_t w64; 2010 | } llr; 2011 | llr.w64 = acc; 2012 | 2013 | #ifndef __ARMEB__ /* Little endian */ 2014 | __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); 2015 | #else /* Big endian */ 2016 | __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); 2017 | #endif 2018 | 2019 | return(llr.w64); 2020 | } 2021 | 2022 | __STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) 2023 | { 2024 | uint32_t result; 2025 | 2026 | __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 2027 | return(result); 2028 | } 2029 | 2030 | __STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) 2031 | { 2032 | int32_t result; 2033 | 2034 | __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 2035 | return(result); 2036 | } 2037 | 2038 | __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) 2039 | { 2040 | int32_t result; 2041 | 2042 | __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); 2043 | return(result); 2044 | } 2045 | 2046 | #if 0 2047 | #define __PKHBT(ARG1,ARG2,ARG3) \ 2048 | ({ \ 2049 | uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ 2050 | __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ 2051 | __RES; \ 2052 | }) 2053 | 2054 | #define __PKHTB(ARG1,ARG2,ARG3) \ 2055 | ({ \ 2056 | uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ 2057 | if (ARG3 == 0) \ 2058 | __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ 2059 | else \ 2060 | __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ 2061 | __RES; \ 2062 | }) 2063 | #endif 2064 | 2065 | #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ 2066 | ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) 2067 | 2068 | #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ 2069 | ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) 2070 | 2071 | __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) 2072 | { 2073 | int32_t result; 2074 | 2075 | __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); 2076 | return(result); 2077 | } 2078 | 2079 | #endif /* (__ARM_FEATURE_DSP == 1) */ 2080 | /*@} end of group CMSIS_SIMD_intrinsics */ 2081 | 2082 | 2083 | #pragma GCC diagnostic pop 2084 | 2085 | #endif /* __CMSIS_GCC_H */ 2086 | -------------------------------------------------------------------------------- /device_headers/cmsis_version.h: -------------------------------------------------------------------------------- 1 | /**************************************************************************//** 2 | * @file cmsis_version.h 3 | * @brief CMSIS Core(M) Version definitions 4 | * @version V5.0.2 5 | * @date 19. April 2017 6 | ******************************************************************************/ 7 | /* 8 | * Copyright (c) 2009-2017 ARM Limited. All rights reserved. 9 | * 10 | * SPDX-License-Identifier: Apache-2.0 11 | * 12 | * Licensed under the Apache License, Version 2.0 (the License); you may 13 | * not use this file except in compliance with the License. 14 | * You may obtain a copy of the License at 15 | * 16 | * www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT 20 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | #if defined ( __ICCARM__ ) 26 | #pragma system_include /* treat file as system include file for MISRA check */ 27 | #elif defined (__clang__) 28 | #pragma clang system_header /* treat file as system include file */ 29 | #endif 30 | 31 | #ifndef __CMSIS_VERSION_H 32 | #define __CMSIS_VERSION_H 33 | 34 | /* CMSIS Version definitions */ 35 | #define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */ 36 | #define __CM_CMSIS_VERSION_SUB ( 1U) /*!< [15:0] CMSIS Core(M) sub version */ 37 | #define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \ 38 | __CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */ 39 | #endif 40 | -------------------------------------------------------------------------------- /device_headers/mpu_armv7.h: -------------------------------------------------------------------------------- 1 | /****************************************************************************** 2 | * @file mpu_armv7.h 3 | * @brief CMSIS MPU API for Armv7-M MPU 4 | * @version V5.0.4 5 | * @date 10. January 2018 6 | ******************************************************************************/ 7 | /* 8 | * Copyright (c) 2017-2018 Arm Limited. All rights reserved. 9 | * 10 | * SPDX-License-Identifier: Apache-2.0 11 | * 12 | * Licensed under the Apache License, Version 2.0 (the License); you may 13 | * not use this file except in compliance with the License. 14 | * You may obtain a copy of the License at 15 | * 16 | * www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT 20 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | #if defined ( __ICCARM__ ) 26 | #pragma system_include /* treat file as system include file for MISRA check */ 27 | #elif defined (__clang__) 28 | #pragma clang system_header /* treat file as system include file */ 29 | #endif 30 | 31 | #ifndef ARM_MPU_ARMV7_H 32 | #define ARM_MPU_ARMV7_H 33 | 34 | #define ARM_MPU_REGION_SIZE_32B ((uint8_t)0x04U) ///!< MPU Region Size 32 Bytes 35 | #define ARM_MPU_REGION_SIZE_64B ((uint8_t)0x05U) ///!< MPU Region Size 64 Bytes 36 | #define ARM_MPU_REGION_SIZE_128B ((uint8_t)0x06U) ///!< MPU Region Size 128 Bytes 37 | #define ARM_MPU_REGION_SIZE_256B ((uint8_t)0x07U) ///!< MPU Region Size 256 Bytes 38 | #define ARM_MPU_REGION_SIZE_512B ((uint8_t)0x08U) ///!< MPU Region Size 512 Bytes 39 | #define ARM_MPU_REGION_SIZE_1KB ((uint8_t)0x09U) ///!< MPU Region Size 1 KByte 40 | #define ARM_MPU_REGION_SIZE_2KB ((uint8_t)0x0AU) ///!< MPU Region Size 2 KBytes 41 | #define ARM_MPU_REGION_SIZE_4KB ((uint8_t)0x0BU) ///!< MPU Region Size 4 KBytes 42 | #define ARM_MPU_REGION_SIZE_8KB ((uint8_t)0x0CU) ///!< MPU Region Size 8 KBytes 43 | #define ARM_MPU_REGION_SIZE_16KB ((uint8_t)0x0DU) ///!< MPU Region Size 16 KBytes 44 | #define ARM_MPU_REGION_SIZE_32KB ((uint8_t)0x0EU) ///!< MPU Region Size 32 KBytes 45 | #define ARM_MPU_REGION_SIZE_64KB ((uint8_t)0x0FU) ///!< MPU Region Size 64 KBytes 46 | #define ARM_MPU_REGION_SIZE_128KB ((uint8_t)0x10U) ///!< MPU Region Size 128 KBytes 47 | #define ARM_MPU_REGION_SIZE_256KB ((uint8_t)0x11U) ///!< MPU Region Size 256 KBytes 48 | #define ARM_MPU_REGION_SIZE_512KB ((uint8_t)0x12U) ///!< MPU Region Size 512 KBytes 49 | #define ARM_MPU_REGION_SIZE_1MB ((uint8_t)0x13U) ///!< MPU Region Size 1 MByte 50 | #define ARM_MPU_REGION_SIZE_2MB ((uint8_t)0x14U) ///!< MPU Region Size 2 MBytes 51 | #define ARM_MPU_REGION_SIZE_4MB ((uint8_t)0x15U) ///!< MPU Region Size 4 MBytes 52 | #define ARM_MPU_REGION_SIZE_8MB ((uint8_t)0x16U) ///!< MPU Region Size 8 MBytes 53 | #define ARM_MPU_REGION_SIZE_16MB ((uint8_t)0x17U) ///!< MPU Region Size 16 MBytes 54 | #define ARM_MPU_REGION_SIZE_32MB ((uint8_t)0x18U) ///!< MPU Region Size 32 MBytes 55 | #define ARM_MPU_REGION_SIZE_64MB ((uint8_t)0x19U) ///!< MPU Region Size 64 MBytes 56 | #define ARM_MPU_REGION_SIZE_128MB ((uint8_t)0x1AU) ///!< MPU Region Size 128 MBytes 57 | #define ARM_MPU_REGION_SIZE_256MB ((uint8_t)0x1BU) ///!< MPU Region Size 256 MBytes 58 | #define ARM_MPU_REGION_SIZE_512MB ((uint8_t)0x1CU) ///!< MPU Region Size 512 MBytes 59 | #define ARM_MPU_REGION_SIZE_1GB ((uint8_t)0x1DU) ///!< MPU Region Size 1 GByte 60 | #define ARM_MPU_REGION_SIZE_2GB ((uint8_t)0x1EU) ///!< MPU Region Size 2 GBytes 61 | #define ARM_MPU_REGION_SIZE_4GB ((uint8_t)0x1FU) ///!< MPU Region Size 4 GBytes 62 | 63 | #define ARM_MPU_AP_NONE 0U ///!< MPU Access Permission no access 64 | #define ARM_MPU_AP_PRIV 1U ///!< MPU Access Permission privileged access only 65 | #define ARM_MPU_AP_URO 2U ///!< MPU Access Permission unprivileged access read-only 66 | #define ARM_MPU_AP_FULL 3U ///!< MPU Access Permission full access 67 | #define ARM_MPU_AP_PRO 5U ///!< MPU Access Permission privileged access read-only 68 | #define ARM_MPU_AP_RO 6U ///!< MPU Access Permission read-only access 69 | 70 | /** MPU Region Base Address Register Value 71 | * 72 | * \param Region The region to be configured, number 0 to 15. 73 | * \param BaseAddress The base address for the region. 74 | */ 75 | #define ARM_MPU_RBAR(Region, BaseAddress) \ 76 | (((BaseAddress) & MPU_RBAR_ADDR_Msk) | \ 77 | ((Region) & MPU_RBAR_REGION_Msk) | \ 78 | (MPU_RBAR_VALID_Msk)) 79 | 80 | /** 81 | * MPU Memory Access Attributes 82 | * 83 | * \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral. 84 | * \param IsShareable Region is shareable between multiple bus masters. 85 | * \param IsCacheable Region is cacheable, i.e. its value may be kept in cache. 86 | * \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. 87 | */ 88 | #define ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable) \ 89 | ((((TypeExtField ) << MPU_RASR_TEX_Pos) & MPU_RASR_TEX_Msk) | \ 90 | (((IsShareable ) << MPU_RASR_S_Pos) & MPU_RASR_S_Msk) | \ 91 | (((IsCacheable ) << MPU_RASR_C_Pos) & MPU_RASR_C_Msk) | \ 92 | (((IsBufferable ) << MPU_RASR_B_Pos) & MPU_RASR_B_Msk)) 93 | 94 | /** 95 | * MPU Region Attribute and Size Register Value 96 | * 97 | * \param DisableExec Instruction access disable bit, 1= disable instruction fetches. 98 | * \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. 99 | * \param AccessAttributes Memory access attribution, see \ref ARM_MPU_ACCESS_. 100 | * \param SubRegionDisable Sub-region disable field. 101 | * \param Size Region size of the region to be configured, for example 4K, 8K. 102 | */ 103 | #define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \ 104 | ((((DisableExec ) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \ 105 | (((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \ 106 | (((AccessAttributes) ) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) 107 | 108 | /** 109 | * MPU Region Attribute and Size Register Value 110 | * 111 | * \param DisableExec Instruction access disable bit, 1= disable instruction fetches. 112 | * \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. 113 | * \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral. 114 | * \param IsShareable Region is shareable between multiple bus masters. 115 | * \param IsCacheable Region is cacheable, i.e. its value may be kept in cache. 116 | * \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. 117 | * \param SubRegionDisable Sub-region disable field. 118 | * \param Size Region size of the region to be configured, for example 4K, 8K. 119 | */ 120 | #define ARM_MPU_RASR(DisableExec, AccessPermission, TypeExtField, IsShareable, IsCacheable, IsBufferable, SubRegionDisable, Size) \ 121 | ARM_MPU_RASR_EX(DisableExec, AccessPermission, ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable), SubRegionDisable, Size) 122 | 123 | /** 124 | * MPU Memory Access Attribute for strongly ordered memory. 125 | * - TEX: 000b 126 | * - Shareable 127 | * - Non-cacheable 128 | * - Non-bufferable 129 | */ 130 | #define ARM_MPU_ACCESS_ORDERED ARM_MPU_ACCESS_(0U, 1U, 0U, 0U) 131 | 132 | /** 133 | * MPU Memory Access Attribute for device memory. 134 | * - TEX: 000b (if non-shareable) or 010b (if shareable) 135 | * - Shareable or non-shareable 136 | * - Non-cacheable 137 | * - Bufferable (if shareable) or non-bufferable (if non-shareable) 138 | * 139 | * \param IsShareable Configures the device memory as shareable or non-shareable. 140 | */ 141 | #define ARM_MPU_ACCESS_DEVICE(IsShareable) ((IsShareable) ? ARM_MPU_ACCESS_(0U, 1U, 0U, 1U) : ARM_MPU_ACCESS_(2U, 0U, 0U, 0U)) 142 | 143 | /** 144 | * MPU Memory Access Attribute for normal memory. 145 | * - TEX: 1BBb (reflecting outer cacheability rules) 146 | * - Shareable or non-shareable 147 | * - Cacheable or non-cacheable (reflecting inner cacheability rules) 148 | * - Bufferable or non-bufferable (reflecting inner cacheability rules) 149 | * 150 | * \param OuterCp Configures the outer cache policy. 151 | * \param InnerCp Configures the inner cache policy. 152 | * \param IsShareable Configures the memory as shareable or non-shareable. 153 | */ 154 | #define ARM_MPU_ACCESS_NORMAL(OuterCp, InnerCp, IsShareable) ARM_MPU_ACCESS_((4U | (OuterCp)), IsShareable, ((InnerCp) & 2U), ((InnerCp) & 1U)) 155 | 156 | /** 157 | * MPU Memory Access Attribute non-cacheable policy. 158 | */ 159 | #define ARM_MPU_CACHEP_NOCACHE 0U 160 | 161 | /** 162 | * MPU Memory Access Attribute write-back, write and read allocate policy. 163 | */ 164 | #define ARM_MPU_CACHEP_WB_WRA 1U 165 | 166 | /** 167 | * MPU Memory Access Attribute write-through, no write allocate policy. 168 | */ 169 | #define ARM_MPU_CACHEP_WT_NWA 2U 170 | 171 | /** 172 | * MPU Memory Access Attribute write-back, no write allocate policy. 173 | */ 174 | #define ARM_MPU_CACHEP_WB_NWA 3U 175 | 176 | 177 | /** 178 | * Struct for a single MPU Region 179 | */ 180 | typedef struct { 181 | uint32_t RBAR; //!< The region base address register value (RBAR) 182 | uint32_t RASR; //!< The region attribute and size register value (RASR) \ref MPU_RASR 183 | } ARM_MPU_Region_t; 184 | 185 | /** Enable the MPU. 186 | * \param MPU_Control Default access permissions for unconfigured regions. 187 | */ 188 | __STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) 189 | { 190 | __DSB(); 191 | __ISB(); 192 | MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; 193 | #ifdef SCB_SHCSR_MEMFAULTENA_Msk 194 | SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; 195 | #endif 196 | } 197 | 198 | /** Disable the MPU. 199 | */ 200 | __STATIC_INLINE void ARM_MPU_Disable(void) 201 | { 202 | __DSB(); 203 | __ISB(); 204 | #ifdef SCB_SHCSR_MEMFAULTENA_Msk 205 | SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; 206 | #endif 207 | MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; 208 | } 209 | 210 | /** Clear and disable the given MPU region. 211 | * \param rnr Region number to be cleared. 212 | */ 213 | __STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) 214 | { 215 | MPU->RNR = rnr; 216 | MPU->RASR = 0U; 217 | } 218 | 219 | /** Configure an MPU region. 220 | * \param rbar Value for RBAR register. 221 | * \param rsar Value for RSAR register. 222 | */ 223 | __STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr) 224 | { 225 | MPU->RBAR = rbar; 226 | MPU->RASR = rasr; 227 | } 228 | 229 | /** Configure the given MPU region. 230 | * \param rnr Region number to be configured. 231 | * \param rbar Value for RBAR register. 232 | * \param rsar Value for RSAR register. 233 | */ 234 | __STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr) 235 | { 236 | MPU->RNR = rnr; 237 | MPU->RBAR = rbar; 238 | MPU->RASR = rasr; 239 | } 240 | 241 | /** Memcopy with strictly ordered memory access, e.g. for register targets. 242 | * \param dst Destination data is copied to. 243 | * \param src Source data is copied from. 244 | * \param len Amount of data words to be copied. 245 | */ 246 | __STATIC_INLINE void orderedCpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) 247 | { 248 | uint32_t i; 249 | for (i = 0U; i < len; ++i) 250 | { 251 | dst[i] = src[i]; 252 | } 253 | } 254 | 255 | /** Load the given number of MPU regions from a table. 256 | * \param table Pointer to the MPU configuration table. 257 | * \param cnt Amount of regions to be configured. 258 | */ 259 | __STATIC_INLINE void ARM_MPU_Load(ARM_MPU_Region_t const* table, uint32_t cnt) 260 | { 261 | const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; 262 | while (cnt > MPU_TYPE_RALIASES) { 263 | orderedCpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize); 264 | table += MPU_TYPE_RALIASES; 265 | cnt -= MPU_TYPE_RALIASES; 266 | } 267 | orderedCpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize); 268 | } 269 | 270 | #endif 271 | -------------------------------------------------------------------------------- /device_headers/stm32f723xx.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRansohoff/STM32F723E_QSPI_Example/451350d140572754e36909ffe6cea879dd68cf14/device_headers/stm32f723xx.h -------------------------------------------------------------------------------- /device_headers/stm32f7xx.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRansohoff/STM32F723E_QSPI_Example/451350d140572754e36909ffe6cea879dd68cf14/device_headers/stm32f7xx.h -------------------------------------------------------------------------------- /device_headers/system_stm32f7xx.h: -------------------------------------------------------------------------------- 1 | #ifndef _VVC_DUMMY_STM32_SYSTEM 2 | #define _VVC_DUMMY_STM32_SYSTEM 3 | 4 | /* ST seems to use this file for keeping track of clock settings 5 | * in their applications. But we're managing that ourselves. 6 | * This file only exists so we don't have to comment out a line 7 | * in all of the device header files which contain macro 8 | * definitions for all of the peripheral registers. 9 | */ 10 | 11 | #endif 12 | -------------------------------------------------------------------------------- /generate_vt.py: -------------------------------------------------------------------------------- 1 | # I am *so. done.* writing vector tables by hand. Help me out, Python? 2 | import sys 3 | 4 | # Don't run the script if the wrong # of arguments are passed in. 5 | if not len( sys.argv ) == 3: 6 | print( 'Usage: python generate_vt.py \n' + 7 | 'Example: python generate_vt.py STM32WBxx cortex-m4' ) 8 | sys.exit( 1 ) 9 | 10 | # Array to hold relevant lines in the device header file. 11 | irq_lines = [] 12 | # State-tracking variable for the header file parsing. 13 | irq_appending = False 14 | # Filename to write the vector table to. 15 | vt_fn = sys.argv[ 1 ] + '_vt.S' 16 | # Device header filename to read the interrupt information from. 17 | dev_hdr_fp = 'device_headers/' + sys.argv[1].lower() + '.h' 18 | 19 | # Open the device header file, and read its interrupt definition 20 | # enum into the 'irq_lines' array. 21 | with open( dev_hdr_fp, 'r' ) as dev_hdr: 22 | for line in dev_hdr: 23 | if irq_appending: 24 | irq_lines.append( line ) 25 | if 'IRQn_Type' in line: 26 | # End of the interrupt enum; no need to keep reading. 27 | break 28 | elif 'Interrupt Number Definition' in line: 29 | # Near the start of the interrupt enum; start reading. 30 | irq_appending = True 31 | 32 | # Dictionary to hold [ interrupt_index : interrupt_name ] pairs. 33 | irq_dict = { } 34 | 35 | # Run through the interrupt definitions enum, and parse each relevant 36 | # line into a (key, value) pair in the 'irq_dict' variable. 37 | for line in irq_lines: 38 | if '=' in line: 39 | while ' ' in line: 40 | line = line.replace( ' ', ' ' ) 41 | line = line.replace( ',', '' ) 42 | blocks = line.split( ' ' ) 43 | irq_dict[ int( blocks[ 3 ] ) ] = blocks[ 1 ] 44 | 45 | # Create a sorted list from the dictionary, so that interrupts 46 | # will be in the right order in the vector table. 47 | irq_list = sorted( irq_dict.items() ) 48 | 49 | # Open the target file and write a vector table. 50 | with open( vt_fn, 'w+' ) as vt_as: 51 | # Header comment. 52 | vt_as.write( '/* Autogenerated vector table for ' + 53 | sys.argv[ 1 ] + ' */\n\n' ) 54 | # Assembler directives. 55 | vt_as.write( '.syntax unified\n' ) 56 | vt_as.write( '.cpu ' + sys.argv[ 2 ] + '\n' ) 57 | vt_as.write( '.thumb\n\n' ) 58 | # Labels to export to the wider program context. 59 | vt_as.write( '.global vtable\n' ) 60 | vt_as.write( '.global default_interrupt_handler\n\n' ) 61 | 62 | # The vector table definition. Goes into a special 'vector_table' 63 | # section so that the linker script can ensure it is placed at the 64 | # very beginning of the chip's memory space. 65 | vt_as.write( '.type vtable, %object\n' ) 66 | vt_as.write( '.section .vector_table,"a",%progbits\n' ) 67 | vt_as.write( 'vtable:\n' ) 68 | # The first two entries are the 'end of stack' address and the 69 | # reset handler - neither are present in the device header file. 70 | vt_as.write( ' .word _estack\n' ) 71 | vt_as.write( ' .word reset_handler\n' ) 72 | # For each number in the range of interrupt indices, check if 73 | # an interrupt exists at that index. If so, make an entry for it. 74 | # If not, write a '0' to keep the correct ordering. 75 | for i in range( irq_list[ 0 ][ 0 ], ( irq_list[ -1 ][ 0 ] + 1 ) ): 76 | if i in irq_dict: 77 | vt_as.write( ' .word ' + irq_dict[ i ] + '_handler\n' ) 78 | else: 79 | vt_as.write( ' .word 0\n' ) 80 | vt_as.write( '\n' ) 81 | 82 | # Create weak references for each interrupt handler, so that they 83 | # point to a dummy handler if the application writer does not 84 | # define an interrupt handler in their program. 85 | for i in range( irq_list[ 0 ][ 0 ], ( irq_list[ -1 ][ 0 ] + 1 ) ): 86 | if i in irq_dict: 87 | vt_as.write( ' .weak ' + irq_dict[ i ] + '_handler\n' ) 88 | vt_as.write( ' .thumb_set ' + irq_dict[ i ] + '_handler,default_interrupt_handler\n' ) 89 | 90 | # Close the definition of the vector table. 91 | vt_as.write( '.size vtable, .-vtable\n\n' ) 92 | vt_as.write( '.section .text.default_interrupt_handler,"ax",%progbits\n' ) 93 | 94 | # Define a default 'dummy' interrupt handler. This is about the same 95 | # as `while(1) {};`, it's an infinite loop. So if an interrupt is 96 | # triggered while a handler is not defined, the program will freeze. 97 | # That is better than accidentally corrupting memory, though. 98 | vt_as.write( 'default_interrupt_handler:\n' ) 99 | vt_as.write( ' default_interrupt_loop:\n' ) 100 | vt_as.write( ' B default_interrupt_loop\n' ) 101 | vt_as.write( '.size default_interrupt_handler, .-default_interrupt_handler\n' ) 102 | 103 | # Done. 104 | sys.exit( 0 ) 105 | -------------------------------------------------------------------------------- /ld/STM32F723IE.ld: -------------------------------------------------------------------------------- 1 | /* Label for the program's entry point */ 2 | ENTRY(reset_handler) 3 | 4 | /* End of RAM/Start of stack */ 5 | /* (176KB in SRAM1) TODO: Use SRAM2 for stack? */ 6 | _estack = 0x2003C000; 7 | 8 | /* Set minimum size for stack and dynamic memory. */ 9 | /* (The linker will generate an error if there is 10 | * less than this much RAM leftover.) */ 11 | /* (1KB) */ 12 | _Min_Leftover_RAM = 0x400; 13 | 14 | MEMORY 15 | { 16 | FLASH ( rx ) : ORIGIN = 0x08000000, LENGTH = 512K 17 | RAM2 ( rxw ) : ORIGIN = 0x2003C000, LENGTH = 16K 18 | RAM ( rxw ) : ORIGIN = 0x20010000, LENGTH = 176K 19 | DTCMRAM ( rxw ) : ORIGIN = 0x20000000, LENGTH = 64K 20 | ITCMRAM ( rxw ) : ORIGIN = 0x00000000, LENGTH = 16K 21 | } 22 | 23 | INCLUDE "sections.ld" 24 | -------------------------------------------------------------------------------- /ld/sections.ld: -------------------------------------------------------------------------------- 1 | SECTIONS 2 | { 3 | /* The vector table goes to the start of flash. */ 4 | .vector_table : 5 | { 6 | . = ALIGN(4); 7 | KEEP (*(.vector_table)) 8 | . = ALIGN(4); 9 | } >FLASH 10 | 11 | /* The 'text' section contains the main program code. */ 12 | .text : 13 | { 14 | . = ALIGN(4); 15 | *(.text) 16 | *(.text*) 17 | KEEP (*(.init)) 18 | KEEP (*(.fini)) 19 | KEEP (*(.eh_frame)) 20 | . = ALIGN(4); 21 | } >FLASH 22 | 23 | /* Sections required by the standard libraries. */ 24 | .ARM.extab : 25 | { 26 | *(.ARM.extab* .gnu.linkonce.armextab.*) 27 | } >FLASH 28 | .ARM : 29 | { 30 | *(.ARM.exidx*) 31 | } >FLASH 32 | 33 | /* The 'rodata' section contains read-only data, 34 | * constants, strings, information that won't change. */ 35 | .rodata : 36 | { 37 | . = ALIGN(4); 38 | *(.rodata) 39 | *(.rodata*) 40 | . = ALIGN(4); 41 | } >FLASH 42 | 43 | /* The 'data' section is space set aside in RAM for 44 | * things like variables, which can change. */ 45 | _sidata = .; 46 | .data : AT(_sidata) 47 | { 48 | . = ALIGN(4); 49 | /* Mark start/end locations for the 'data' section. */ 50 | _sdata = .; 51 | *(.data) 52 | *(.data*) 53 | _edata = .; 54 | . = ALIGN(4); 55 | } >RAM 56 | 57 | /* The 'bss' section is similar to the 'data' section, 58 | * but its space is initialized to all 0s at the 59 | * start of the program. */ 60 | .bss : 61 | { 62 | . = ALIGN(4); 63 | /* Also mark the start/end of the BSS section. */ 64 | _sbss = .; 65 | __bss_start__ = _sbss; 66 | *(.bss) 67 | *(.bss*) 68 | *(COMMON) 69 | . = ALIGN(4); 70 | _ebss = .; 71 | __bss_end__ = _ebss; 72 | } >RAM 73 | /* Mark the end of statically-allocated RAM. */ 74 | end = .; 75 | _end = end; 76 | __end = end; 77 | 78 | /* Space set aside for the application's heap/stack. */ 79 | .dynamic_allocations : 80 | { 81 | . = ALIGN(4); 82 | _ssystem_ram = .; 83 | . = . + _Min_Leftover_RAM; 84 | . = ALIGN(4); 85 | _esystem_ram = .; 86 | } >RAM 87 | } 88 | -------------------------------------------------------------------------------- /src/global.c: -------------------------------------------------------------------------------- 1 | #include "global.h" 2 | 3 | uint32_t SystemCoreClock = 16000000; 4 | volatile uint32_t systick = 0; 5 | 6 | // System call to support standard library print functions. 7 | int _write( int handle, char* data, int size ) { 8 | int count = size; 9 | while( count-- ) { 10 | while( !( USART6->ISR & USART_ISR_TXE ) ) {}; 11 | USART6->TDR = *data++; 12 | } 13 | return size; 14 | } 15 | 16 | // Delay for a specified number of milliseconds. 17 | // TODO: Prevent rollover bug on the 'systick' value. 18 | void delay_ms( uint32_t ms ) { 19 | // Calculate the 'end of delay' tick value, then wait for it. 20 | uint32_t next = systick + ms; 21 | while ( systick < next ) { __WFI(); } 22 | } 23 | -------------------------------------------------------------------------------- /src/global.h: -------------------------------------------------------------------------------- 1 | #ifndef VVC_GLOBAL_H 2 | #define VVC_GLOBAL_H 3 | 4 | // Standard library includes. 5 | #include 6 | #include 7 | #include 8 | 9 | // Device header file. 10 | #include "stm32f7xx.h" 11 | 12 | // Global program variables. 13 | uint32_t SystemCoreClock; 14 | volatile uint32_t systick; 15 | // Memory section boundaries which are defined in the linker script. 16 | extern uint32_t _sidata, _sdata, _edata, _sbss, _ebss; 17 | 18 | // System call to enable standard library print functions. 19 | int _write( int handle, char* data, int size ); 20 | 21 | // Helper method to perform blocking millisecond delays. 22 | void delay_ms( uint32_t ms ); 23 | 24 | #endif 25 | -------------------------------------------------------------------------------- /src/main.c: -------------------------------------------------------------------------------- 1 | #include "main.h" 2 | 3 | // Reset handler: set the stack pointer and branch to main(). 4 | __attribute__( ( naked ) ) void reset_handler( void ) { 5 | // Set the stack pointer to the 'end of stack' value. 6 | __asm__( "LDR r0, =_estack\n\t" 7 | "MOV sp, r0" ); 8 | // Branch to main(). 9 | __asm__( "B main" ); 10 | } 11 | 12 | /** 13 | * Main program. 14 | */ 15 | int main( void ) { 16 | // Copy initialized data from .sidata (Flash) to .data (RAM) 17 | memcpy( &_sdata, &_sidata, ( ( void* )&_edata - ( void* )&_sdata ) ); 18 | // Clear the .bss section in RAM. 19 | memset( &_sbss, 0x00, ( ( void* )&_ebss - ( void* )&_sbss ) ); 20 | 21 | // Enable floating-point unit. 22 | SCB->CPACR |= ( 0xF << 20 ); 23 | 24 | // Set clock speed to 216MHz (each tick is a bit less than 5ns) 25 | // PLL out = ( 16MHz * ( N / M ) / P ). P = 2, N = 54, M = 2. 26 | FLASH->ACR |= ( 7 << FLASH_ACR_LATENCY_Pos ); 27 | RCC->PLLCFGR &= ~( RCC_PLLCFGR_PLLN | 28 | RCC_PLLCFGR_PLLM ); 29 | RCC->PLLCFGR |= ( ( 54 << RCC_PLLCFGR_PLLN_Pos ) | 30 | ( 2 << RCC_PLLCFGR_PLLM_Pos ) ); 31 | RCC->CR |= ( RCC_CR_PLLON ); 32 | while ( !( RCC->CR & RCC_CR_PLLRDY ) ) {}; 33 | RCC->CFGR |= ( 2 << RCC_CFGR_SW_Pos ); 34 | while ( ( RCC->CFGR & RCC_CFGR_SWS ) != ( 2 << RCC_CFGR_SWS_Pos ) ) {}; 35 | SystemCoreClock = 216000000; 36 | 37 | // Enable peripheral clocks: GPIOB-E, QSPI, USART6. 38 | RCC->AHB1ENR |= ( RCC_AHB1ENR_GPIOBEN | 39 | RCC_AHB1ENR_GPIOCEN | 40 | RCC_AHB1ENR_GPIODEN | 41 | RCC_AHB1ENR_GPIOEEN ); 42 | RCC->AHB3ENR |= ( RCC_AHB3ENR_QSPIEN ); 43 | RCC->APB2ENR |= ( RCC_APB2ENR_USART6EN ); 44 | 45 | // Initialize pins C6 and C7 for USART6. 46 | GPIOC->MODER |= ( ( 2 << ( 6 * 2 ) ) | 47 | ( 2 << ( 7 * 2 ) ) ); 48 | GPIOC->OSPEEDR |= ( ( 2 << ( 6 * 2 ) ) | 49 | ( 2 << ( 7 * 2 ) ) ); 50 | GPIOC->AFR[ 0 ] |= ( ( 8 << ( 6 * 4 ) ) | 51 | ( 8 << ( 7 * 4 ) ) ); 52 | // Initialize pins B2, B6, C9, C10, D13, E2 for QSPI. 53 | GPIOB->MODER |= ( ( 2 << ( 2 * 2 ) ) | 54 | ( 2 << ( 6 * 2 ) ) ); 55 | GPIOB->OSPEEDR |= ( ( 3 << ( 2 * 2 ) ) | 56 | ( 3 << ( 6 * 2 ) ) ); 57 | GPIOB->PUPDR |= ( 1 << ( 6 * 2 ) ); 58 | GPIOB->AFR[ 0 ] |= ( ( 9 << ( 2 * 4 ) ) | 59 | ( 10 << ( 6 * 4 ) ) ); 60 | GPIOC->MODER |= ( ( 2 << ( 9 * 2 ) ) | 61 | ( 2 << ( 10 * 2 ) ) ); 62 | GPIOC->OSPEEDR |= ( ( 3 << ( 9 * 2 ) ) | 63 | ( 3 << ( 10 * 2 ) ) ); 64 | GPIOC->AFR[ 1 ] |= ( ( 9 << ( ( 9 - 8 ) * 4 ) ) | 65 | ( 9 << ( ( 10 - 8 ) * 4 ) ) ); 66 | GPIOD->MODER |= ( 2 << ( 13 * 2 ) ); 67 | GPIOD->OSPEEDR |= ( 3 << ( 13 * 2 ) ); 68 | GPIOD->AFR[ 1 ] |= ( 9 << ( ( 13 - 8 ) * 4 ) ); 69 | GPIOE->MODER |= ( 2 << ( 2 * 2 ) ); 70 | GPIOE->OSPEEDR |= ( 3 << ( 2 * 2 ) ); 71 | GPIOE->AFR[ 0 ] |= ( 9 << ( 2 * 4 ) ); 72 | 73 | // Setup USART6 for 115200-baud TX. 74 | USART6->BRR = ( SystemCoreClock / 115200 ); 75 | USART6->CR1 |= ( USART_CR1_UE | USART_CR1_TE ); 76 | 77 | // QSPI peripheral initialization. 78 | // Set Flash size; 512Mb = 64MB = 2^(25+1) bytes. 79 | QUADSPI->DCR |= ( 25 << QUADSPI_DCR_FSIZE_Pos ); 80 | // Set 1-wire data mode with 32-bit addressing. 81 | QUADSPI->CCR |= ( ( 3 << QUADSPI_CCR_ADSIZE_Pos ) | 82 | ( 1 << QUADSPI_CCR_IMODE_Pos ) ); 83 | // Wait an extra half-cycle to read, and set a clock prescaler. 84 | QUADSPI->CR |= ( QUADSPI_CR_SSHIFT | 85 | ( 2 << QUADSPI_CR_PRESCALER_Pos ) ); 86 | 87 | // Flash chip initialization. 88 | // Send 'enter QSPI mode' command. 89 | // Enable the peripheral. 90 | QUADSPI->CR |= ( QUADSPI_CR_EN ); 91 | // Set the 'enter QSPI mode' instruction. 92 | QUADSPI->CCR |= ( 0x35 << QUADSPI_CCR_INSTRUCTION_Pos ); 93 | // Wait for the transaction to complete, and disable the peripheral. 94 | while ( QUADSPI->SR & QUADSPI_SR_BUSY ) {}; 95 | QUADSPI->CR &= ~( QUADSPI_CR_EN ); 96 | // Wait for the 'QSPI mode enabled' bit. 97 | qspi_reg_wait( 0x05, 0x41, 0x40 ); 98 | 99 | // Send 'enable 4-byte addressing' command. 100 | // The peripheral may start a new transfer as soon as the 101 | // 'instruction' field is written, so it is safest to disable 102 | // the peripheral before clearing that field. 103 | while ( QUADSPI->SR & QUADSPI_SR_BUSY ) {}; 104 | QUADSPI->CR &= ~( QUADSPI_CR_EN ); 105 | QUADSPI->CCR &= ~( QUADSPI_CCR_INSTRUCTION ); 106 | // Use all 4 data lines to send the instruction. 107 | QUADSPI->CCR |= ( 3 << QUADSPI_CCR_IMODE_Pos ); 108 | // Enable the peripheral and send the 'enable 4B addresses' command. 109 | QUADSPI->CR |= ( QUADSPI_CR_EN ); 110 | QUADSPI->CCR |= ( 0xB7 << QUADSPI_CCR_INSTRUCTION_Pos ); 111 | // Wait for the transaction to complete, and disable the peripheral. 112 | while ( QUADSPI->SR & QUADSPI_SR_BUSY ) {}; 113 | QUADSPI->CR &= ~( QUADSPI_CR_EN ); 114 | // Wait for the '4-byte addressing enabled' bit to be set. 115 | qspi_reg_wait( 0x15, 0x20, 0x20 ); 116 | 117 | // Test writing some data. 118 | // No need to run this every time; Flash is non-volatile, but it 119 | // has limited "write endurance" on the order of ~10k-100k cycles. 120 | qspi_erase_sector( 0 ); 121 | qspi_write_word( 0, 0x01234567 ); 122 | qspi_write_word( 4, 0x89ABCDEF ); 123 | 124 | // Enable memory-mapped mode. MX25L512 Flash chips use 125 | // 6 "dummy cycles" with Quad I/O "fast read" instructions by 126 | // default, which allows up to 84MHz communication speed. 127 | QUADSPI->CR &= ~( QUADSPI_CR_EN ); 128 | QUADSPI->CCR &= ~( QUADSPI_CCR_INSTRUCTION ); 129 | QUADSPI->CCR |= ( 3 << QUADSPI_CCR_FMODE_Pos | 130 | 3 << QUADSPI_CCR_ADMODE_Pos | 131 | 3 << QUADSPI_CCR_DMODE_Pos | 132 | 3 << QUADSPI_CCR_IMODE_Pos | 133 | 0xEC << QUADSPI_CCR_INSTRUCTION_Pos | 134 | 6 << QUADSPI_CCR_DCYC_Pos ); 135 | QUADSPI->CR |= ( QUADSPI_CR_EN ); 136 | 137 | // Add a dummy cycle; if memory-mapped access is attempted 138 | // immediately after enabling the peripheral, it seems to fail. 139 | // I'm not sure why, but adding one nop instruction seems to fix it. 140 | __asm( "NOP" ); 141 | 142 | // Test reading values from memory-mapped Flash. 143 | int val = *( ( uint32_t* ) 0x90000000 ); 144 | printf( "QSPI[0]: 0x%08X\r\n", val ); 145 | val = *( ( uint32_t* ) 0x90000002 ); 146 | printf( "QSPI[2]: 0x%08X\r\n", val ); 147 | val = *( ( uint32_t* ) 0x90000008 ); 148 | printf( "QSPI[8]: 0x%08X\r\n", val ); 149 | 150 | // Done; empty main loop. 151 | while( 1 ) {}; 152 | return 0; // lol 153 | } 154 | -------------------------------------------------------------------------------- /src/main.h: -------------------------------------------------------------------------------- 1 | #ifndef VVC_MAIN_H 2 | #define VVC_MAIN_H 3 | 4 | // Project header files. 5 | #include "global.h" 6 | #include "qspi.h" 7 | 8 | #endif 9 | -------------------------------------------------------------------------------- /src/qspi.c: -------------------------------------------------------------------------------- 1 | #include "qspi.h" 2 | 3 | // Use 'status-polling' mode to wait for Flash register status. 4 | void qspi_reg_wait( uint8_t reg, uint32_t msk, uint32_t mat ) { 5 | // Disable the peripheral. 6 | QUADSPI->CR &= ~( QUADSPI_CR_EN ); 7 | // Set the 'mask', 'match', and 'polling interval' values. 8 | QUADSPI->PSMKR = msk; 9 | QUADSPI->PSMAR = mat; 10 | QUADSPI->PIR = 0x10; 11 | // Set the 'auto-stop' bit to end the transaction after a match. 12 | QUADSPI->CR |= ( QUADSPI_CR_APMS ); 13 | // Clear instruction, mode and transaction phases. 14 | QUADSPI->CCR &= ~( QUADSPI_CCR_INSTRUCTION | 15 | QUADSPI_CCR_FMODE | 16 | QUADSPI_CCR_IMODE | 17 | QUADSPI_CCR_DMODE | 18 | QUADSPI_CCR_ADMODE ); 19 | // Set 4-wire instruction and data modes, and auto-polling mode. 20 | QUADSPI->CCR |= ( ( 3 << QUADSPI_CCR_IMODE_Pos ) | 21 | ( 3 << QUADSPI_CCR_DMODE_Pos ) | 22 | ( 2 << QUADSPI_CCR_FMODE_Pos ) ); 23 | // Enable the peripheral. 24 | QUADSPI->CR |= ( QUADSPI_CR_EN ); 25 | // Set the given 'read register' instruction to start polling. 26 | QUADSPI->CCR |= ( reg << QUADSPI_CCR_INSTRUCTION_Pos ); 27 | // Wait for a match. 28 | while ( QUADSPI->SR & QUADSPI_SR_BUSY ) {}; 29 | // Acknowledge the 'status match flag.' 30 | QUADSPI->FCR |= ( QUADSPI_FCR_CSMF ); 31 | // Un-set the data mode and disable auto-polling. 32 | QUADSPI->CCR &= ~( QUADSPI_CCR_FMODE | 33 | QUADSPI_CCR_DMODE ); 34 | // Disable the peripheral. 35 | QUADSPI->CR &= ~( QUADSPI_CR_EN ); 36 | } 37 | 38 | // Enable writes on the QSPI Flash. Must be done before every 39 | // erase / program operation. 40 | void qspi_wen() { 41 | // Disable the peripheral. 42 | QUADSPI->CR &= ~( QUADSPI_CR_EN ); 43 | // Clear the instruction, mode, and transaction phases. 44 | QUADSPI->CCR &= ~( QUADSPI_CCR_INSTRUCTION | 45 | QUADSPI_CCR_FMODE | 46 | QUADSPI_CCR_IMODE | 47 | QUADSPI_CCR_DMODE | 48 | QUADSPI_CCR_ADMODE ); 49 | // Set 4-wire instruction mode. 50 | QUADSPI->CCR |= ( 3 << QUADSPI_CCR_IMODE_Pos ); 51 | // Enable the peripheral and send the 'write enable' command. 52 | QUADSPI->CR |= ( QUADSPI_CR_EN ); 53 | QUADSPI->CCR |= ( 0x06 << QUADSPI_CCR_INSTRUCTION_Pos ); 54 | // Wait for the transaction to finish. 55 | while ( QUADSPI->SR & QUADSPI_SR_BUSY ) {}; 56 | // Disable the peripheral. 57 | QUADSPI->CR &= ~( QUADSPI_CR_EN ); 58 | // Wait until 'writes enabled' is set in the config register. 59 | qspi_reg_wait( 0x05, 0x43, 0x42 ); 60 | } 61 | 62 | // Erase a 4KB sector. Sector address = ( snum * 0x1000 ) 63 | void qspi_erase_sector( uint32_t snum ) { 64 | // Send 'enable writes' command. 65 | qspi_wen(); 66 | // Erase the sector, and wait for the operation to complete. 67 | while ( QUADSPI->SR & QUADSPI_SR_BUSY ) {}; 68 | QUADSPI->CCR &= ~( QUADSPI_CCR_INSTRUCTION | 69 | QUADSPI_CCR_FMODE | 70 | QUADSPI_CCR_IMODE | 71 | QUADSPI_CCR_DMODE | 72 | QUADSPI_CCR_ADMODE ); 73 | QUADSPI->CCR |= ( ( 3 << QUADSPI_CCR_IMODE_Pos ) | 74 | ( 3 << QUADSPI_CCR_ADMODE_Pos ) ); 75 | QUADSPI->CR |= ( QUADSPI_CR_EN ); 76 | // 0x20 is the "sector erase" command. 77 | QUADSPI->CCR |= ( 0x20 << QUADSPI_CCR_INSTRUCTION_Pos ); 78 | // The address is equal to the sector number * 4KB. 79 | QUADSPI->AR = ( snum * 0x1000 ); 80 | while ( QUADSPI->SR & QUADSPI_SR_BUSY ) {}; 81 | // Disable the peripheral once the transaction is complete. 82 | QUADSPI->CR &= ~( QUADSPI_CR_EN ); 83 | // Wait for the 'write in progress' bit to clear. 84 | qspi_reg_wait( 0x05, 0x43, 0x40 ); 85 | } 86 | 87 | // Write one word of data (4 bytes) to a QSPI Flash chip. 88 | void qspi_write_word( uint32_t addr, uint32_t data ) { 89 | // Send 'enable writes' command. 90 | qspi_wen(); 91 | // Write the word of data. 92 | while ( QUADSPI->SR & QUADSPI_SR_BUSY ) {}; 93 | QUADSPI->CCR &= ~( QUADSPI_CCR_INSTRUCTION | 94 | QUADSPI_CCR_FMODE | 95 | QUADSPI_CCR_IMODE | 96 | QUADSPI_CCR_DMODE | 97 | QUADSPI_CCR_ADMODE ); 98 | QUADSPI->CCR |= ( ( 3 << QUADSPI_CCR_IMODE_Pos ) | 99 | ( 3 << QUADSPI_CCR_ADMODE_Pos ) | 100 | ( 3 << QUADSPI_CCR_DMODE_Pos ) ); 101 | // Set data length (3 + 1 = 4 bytes). 102 | QUADSPI->DLR = 3; 103 | // Enable the peripheral and set instruction, address, data. 104 | QUADSPI->CR |= ( QUADSPI_CR_EN ); 105 | QUADSPI->CCR |= ( 0x12 << QUADSPI_CCR_INSTRUCTION_Pos ); 106 | QUADSPI->AR = ( addr ); 107 | QUADSPI->DR = ( data ); 108 | // Wait for the transaction to complete, and disable the peripheral. 109 | while ( QUADSPI->SR & QUADSPI_SR_BUSY ) {}; 110 | QUADSPI->CR &= ~( QUADSPI_CR_EN ); 111 | // Clear the data length register. 112 | QUADSPI->DLR = 0; 113 | // Wait for the 'write in progress' bit to clear. 114 | qspi_reg_wait( 0x05, 0x41, 0x40 ); 115 | } 116 | -------------------------------------------------------------------------------- /src/qspi.h: -------------------------------------------------------------------------------- 1 | #ifndef _VVC_QSPI_H 2 | #define _VVC_QSPI_H 3 | 4 | #include "global.h" 5 | 6 | void qspi_reg_wait( uint8_t reg, uint32_t msk, uint32_t mat ); 7 | void qspi_wen(); 8 | void qspi_erase_sector( uint32_t snum ); 9 | void qspi_write_word( uint32_t addr, uint32_t data ); 10 | 11 | #endif 12 | --------------------------------------------------------------------------------