├── .gitignore ├── Makefile ├── umodsi3.S ├── umoddi3.S ├── lcmp.S ├── uwrite4.S ├── uread8.S ├── uread4.S ├── ulcmp.S ├── modsi3.S ├── uwrite8.S ├── llsl.S ├── llsr.S ├── lasr.S ├── crt.S ├── moddi3.S ├── lmul.S ├── idiv.S ├── divdi3.S ├── README.md ├── idivmod.S ├── memset.S ├── memmove.S └── ldivmod.S /.gitignore: -------------------------------------------------------------------------------- 1 | *.a 2 | *.o 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | AS=arm-none-eabi-as -mthumb 2 | 3 | SRC_FILES=$(wildcard *.S) 4 | 5 | %.o: %.S 6 | $(AS) $< -o $@ 7 | 8 | libaeabi-cortexm0.a: $(SRC_FILES:%.S=%.o) 9 | ar rcs libaeabi-cortexm0.a $(SRC_FILES:%.S=%.o) 10 | 11 | 12 | 13 | .PHONY: all clean 14 | 15 | all: libaeabi-cortexm0.a 16 | 17 | clean: 18 | -rm -f *.o *.a 19 | -------------------------------------------------------------------------------- /umodsi3.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * umodsi3.S: wrapper for unsigned 32 bit division remainder 3 | * 4 | * Copyright (c) 2012 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ unsigned __umodsi3(unsigned num, unsigned denom) 29 | @ 30 | @ libgcc wrapper: use __aeabi_uidivmod() and return remainder 31 | @ 32 | .thumb_func 33 | .global __umodsi3 34 | __umodsi3: 35 | push {lr} 36 | bl __aeabi_uidivmod 37 | mov r0, r1 38 | pop {pc} 39 | -------------------------------------------------------------------------------- /umoddi3.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * uldivmod.S: unsigned 64 bit division 3 | * 4 | * Copyright (c) 2012 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | @ unsigned long long __umoddi3(unsigned long long num, unsigned long long denom) 28 | @ 29 | @ libgcc wrapper: use __aeabi_uldivmod() and return remainder 30 | @ 31 | .thumb_func 32 | .global __umoddi3 33 | __umoddi3: 34 | push {lr} 35 | bl __aeabi_uldivmod 36 | mov r0, r2 37 | mov r1, r3 38 | pop {pc} 39 | -------------------------------------------------------------------------------- /lcmp.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * lcmp.S: signed 64 bit comparison 3 | * 4 | * Copyright (c) 2017 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ int __aeabi_lcmp(long long r1:r0, long long r3:r2) 29 | @ 30 | @ Compare signed integers and return -1 if lower, 0 if equal or +1 if greater 31 | @ 32 | .thumb_func 33 | .global __aeabi_lcmp 34 | __aeabi_lcmp: 35 | 36 | cmp r1, r3 37 | blt 1f 38 | bgt 2f 39 | cmp r0, r2 40 | bhi 2f 41 | sbcs r0, r0, r0 @ 0 if cs and -1 if cc 42 | bx lr 43 | 1: movs r0, #1 44 | rsbs r0, r0, #0 45 | bx lr 46 | 2: movs r0, #1 47 | bx lr 48 | -------------------------------------------------------------------------------- /uwrite4.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * uread4.S: unaligned memory read 3 | * 4 | * Copyright (c) 2017 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ int __aeabi_uwrite4(int r0, void *r1) 29 | @ 30 | @ Write 4 little endian bytes to unaligned memory address 31 | @ 32 | .thumb_func 33 | .global __aeabi_uwrite4 34 | __aeabi_uwrite4: 35 | 36 | lsrs r2, r1, #1 37 | bcc .Lhalfword_aligned 38 | 39 | strb r0, [r1] 40 | adds r1, r1, #1 41 | lsrs r0, r0, #8 42 | strh r0, [r1] 43 | lsrs r0, r0, #16 44 | strb r0, [r1, #2] 45 | bx lr 46 | 47 | .Lhalfword_aligned: 48 | strh r0, [r1] 49 | lsrs r0, r0, #16 50 | strh r0, [r1, #2] 51 | bx lr 52 | -------------------------------------------------------------------------------- /uread8.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * uread8.S: unaligned memory read 3 | * 4 | * Copyright (c) 2017 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ long long __aeabi_uread8(void *r0) 29 | @ 30 | @ Read 8 little endian bytes from unaligned memory address 31 | @ 32 | .thumb_func 33 | .global __aeabi_uread8 34 | __aeabi_uread8: 35 | 36 | 37 | push {r4} 38 | lsrs r1, r0, #2 39 | lsls r1, r1, #2 40 | lsls r2, r0, #30 41 | lsrs r2, r2, #27 42 | ldm r1, {r0, r1, r3} 43 | lsrs r0, r2 44 | mov r4, r1 45 | lsrs r1, r2 46 | subs r2, #32 47 | negs r2, r2 48 | lsls r4, r2 49 | lsls r3, r2 50 | orrs r0, r4 51 | orrs r1, r3 52 | pop {r4} 53 | bx lr 54 | -------------------------------------------------------------------------------- /uread4.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * uread4.S: unaligned memory read 3 | * 4 | * Copyright (c) 2017 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ int __aeabi_uread4(void *r0) 29 | @ 30 | @ Read 4 little endian bytes from unaligned memory address 31 | @ 32 | .thumb_func 33 | .global __aeabi_uread4 34 | __aeabi_uread4: 35 | 36 | 37 | lsls r1, r0, #30 38 | lsrs r1, r1, #27 @ r1 = bit offset relative to aligned address 39 | lsrs r0, r0, #2 40 | lsls r0, r0, #2 @ r0 = round down address 41 | ldm r0, {r0,r2} @ r2:r0 = 8 bytes including the unaligned word 42 | lsrs r0, r1 43 | subs r1, #32 44 | negs r1, r1 45 | lsls r2, r1 46 | orrs r0, r2 @ r0 = (r0>>r1) | (r2<<(32-r1)) 47 | bx lr 48 | -------------------------------------------------------------------------------- /ulcmp.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * ulcmp.S: unsigned 64 bit comparison 3 | * 4 | * Copyright (c) 2017 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ int __aeabi_ulcmp(unsigned long long r1:r0, unsigned long long r3:r2) 29 | @ 30 | @ Compare unsigned integers and return -1 if lower, 0 if equal or +1 if greater 31 | @ 32 | .thumb_func 33 | .global __aeabi_ulcmp 34 | __aeabi_ulcmp: 35 | 36 | cmp r1, r3 37 | blo 1f @ cc 38 | bhi 2f 39 | cmp r0, r2 40 | blo 1f 41 | bhi 2f 42 | eors r0, r0 43 | bx lr 44 | 1: movs r0, #1 45 | rsbs r0, r0, #0 46 | bx lr 47 | 2: movs r0, #1 48 | bx lr 49 | 50 | 51 | cmp r1, r3 52 | blo 1f 53 | bhi 2f 54 | cmp r0, r2 55 | bhi 2f 56 | 1: sbcs r0, r0, r0 @ 0 if cs and -1 if cc 57 | bx lr 58 | 2: movs r0, #1 59 | bx lr 60 | -------------------------------------------------------------------------------- /modsi3.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * idivmod.S: signed 32 bit division remainder 3 | * 4 | * Copyright (c) 2012 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ int __modsi3(int numerator:r0, int denominator:r1) 29 | @ 30 | @ Divide r0 by r1 and return the remainder in r0 31 | @ Special case of __aeabi_idivmod() that is a lot simpler and moves the 32 | @ remainder to r0. 33 | @ 34 | .thumb_func 35 | .global __modsi3 36 | __modsi3: 37 | 38 | cmp r1, #0 39 | bge L_dont_neg_den 40 | rsbs r1, r1, #0 @ den = -den 41 | L_dont_neg_den: 42 | 43 | cmp r0, #0 44 | bge L_pos_result 45 | 46 | rsbs r0, r0, #0 @ num = -num 47 | push {lr} 48 | bl __aeabi_uidivmod 49 | rsbs r0, r1, #0 @ return -rem 50 | pop {pc} 51 | 52 | L_pos_result: 53 | push {lr} 54 | bl __aeabi_uidivmod 55 | movs r0, r1 @ return rem 56 | pop {pc} 57 | -------------------------------------------------------------------------------- /uwrite8.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * uread4.S: unaligned memory read 3 | * 4 | * Copyright (c) 2017 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ int __aeabi_uwrite8(long longt r1:r0, void *r2) 29 | @ 30 | @ Write 8 little endian bytes to unaligned memory address 31 | @ 32 | .thumb_func 33 | .global __aeabi_uwrite8 34 | __aeabi_uwrite8: 35 | 36 | lsrs r3, r2, #1 37 | bcc .Lhalfword_aligned 38 | 39 | strb r0, [r2] 40 | adds r2, #1 41 | lsrs r0, r0, #8 42 | strh r0, [r2] 43 | lsrs r0, r0, #16 44 | lsls r3, r1, #8 45 | orrs r0, r3 46 | strh r0, [r2, #2] 47 | lsrs r1, r1, #8 48 | strh r1, [r2, #4] 49 | lsrs r1, r1, #16 50 | strb r1, [r2, #6] 51 | bx lr 52 | 53 | .Lhalfword_aligned: 54 | strh r0, [r2] 55 | lsrs r0, r0, #16 56 | strh r0, [r2, #2] 57 | strh r1, [r2, #4] 58 | lsrs r1, r1, #16 59 | strh r1, [r2, #6] 60 | bx lr 61 | -------------------------------------------------------------------------------- /llsl.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * llsl.S: 64 bit shift left 3 | * 4 | * Copyright (c) 2012 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ long long __ashldi3(long long r1:r0, int r2) 29 | @ 30 | @ libgcc wrapper: just an alias for __aeabi_llsl() 31 | @ 32 | .thumb_func 33 | .global __ashldi3 34 | __ashldi3: 35 | 36 | 37 | 38 | @ long long __aeabi_llsl(long long r1:r0, int r2) 39 | @ 40 | @ Sshift r1:r0 left by r2 bits 41 | @ 42 | .thumb_func 43 | .global __aeabi_llsl 44 | __aeabi_llsl: 45 | 46 | cmp r2, #31 47 | bhi 1f 48 | 49 | movs r3, r1 @ n < 32: 50 | lsls r0, r2 @ lo = lo << n 51 | lsls r1, r2 52 | rsbs r2, r2, #0 53 | adds r2, #32 54 | lsrs r3, r2 55 | orrs r1, r3 @ hi = hi << n | lo >> (32-n) 56 | bx lr 57 | 58 | 1: subs r2, #32 @ n >= 32: 59 | movs r1, r0 60 | lsls r1, r2 @ hi = lo << (n-32) 61 | movs r0, #0 @ lo = 0 62 | bx lr 63 | -------------------------------------------------------------------------------- /llsr.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * llsr.S: 64 bit logical shift right 3 | * 4 | * Copyright (c) 2012 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ long long __lshrdi3(long long r1:r0, int r2) 29 | @ 30 | @ libgcc wrapper: just an alias for __aeabi_llsr() 31 | @ 32 | .thumb_func 33 | .global __lshrdi3 34 | __lshrdi3: 35 | 36 | 37 | 38 | @ long long __aeabi_llsr(long long r1:r0, int r2) 39 | @ 40 | @ Logical shift r1:r0 right by r2 bits 41 | @ 42 | .thumb_func 43 | .global __aeabi_llsr 44 | __aeabi_llsr: 45 | 46 | cmp r2, #31 47 | bhi 1f 48 | 49 | movs r3, r1 @ n < 32: 50 | lsrs r0, r2 51 | lsrs r1, r2 @ hi = hi >> n 52 | rsbs r2, r2, #0 53 | adds r2, #32 54 | lsls r3, r2 55 | orrs r0, r3 @ lo = lo >> n | hi << (32-n) 56 | bx lr 57 | 58 | 1: subs r2, #32 @ n >= 32: 59 | movs r0, r1 60 | lsrs r0, r2 @ lo = hi >> (n-32) 61 | movs r1, #0 @ hi = 0 62 | bx lr 63 | -------------------------------------------------------------------------------- /lasr.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * lasr.S: 64 bit arithmetic shift right 3 | * 4 | * Copyright (c) 2012 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ long long __ashrdi3(long long r1:r0, int r2) 29 | @ 30 | @ libgcc wrapper: just an alias for __aeabi_lasr() 31 | @ 32 | .thumb_func 33 | .global __ashrdi3 34 | __ashrdi3: 35 | 36 | 37 | 38 | @ long long __aeabi_lasr(long long r1:r0, int r2) 39 | @ 40 | @ Arithmetic shift r1:r0 right by r2 bits 41 | @ 42 | .thumb_func 43 | .global __aeabi_lasr 44 | __aeabi_lasr: 45 | 46 | cmp r2, #31 47 | bhi 1f 48 | 49 | movs r3, r1 @ n < 32: 50 | lsrs r0, r2 51 | asrs r1, r2 @ hi = hi >> n 52 | rsbs r2, r2, #0 53 | adds r2, #32 54 | lsls r3, r2 55 | orrs r0, r3 @ lo = lo >> n | hi << (32-n) 56 | bx lr 57 | 58 | 1: subs r2, #32 @ n >= 32: 59 | movs r0, r1 60 | asrs r0, r2 @ lo = hi >> (n-32) 61 | asrs r1, #31 @ hi = hi >> 31 62 | bx lr 63 | -------------------------------------------------------------------------------- /crt.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * crt.S: C runtime environment 3 | * 4 | * Copyright (c) 2012 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ _start 29 | @ 30 | @ Program entry point: call main(), then exit() 31 | @ 32 | .thumb_func 33 | .global _start 34 | _start: 35 | bl main 36 | @ fallthrough to exit() 37 | 38 | 39 | 40 | @ void exit(int status) 41 | @ 42 | @ Exit from program: breakpoint 0 43 | @ 44 | .thumb_func 45 | .global exit 46 | exit: 47 | bkpt #0 48 | 49 | 50 | 51 | @ void abort(void) 52 | @ 53 | @ Abnormal program termination: breakpoint 1 54 | @ 55 | .thumb_func 56 | .global abort 57 | abort: 58 | bkpt #1 59 | 60 | 61 | 62 | @ int __aeabi_idiv0(int r) 63 | @ 64 | @ Handler for 32 bit division by zero 65 | @ 66 | .thumb_func 67 | .global __aeabi_idiv0 68 | __aeabi_idiv0: 69 | 70 | 71 | 72 | @ long long __aeabi_ldiv0(long long r) 73 | @ 74 | @ Handler for 64 bit division by zero 75 | @ 76 | .thumb_func 77 | .global __aeabi_ldiv0 78 | __aeabi_ldiv0: 79 | bx lr 80 | -------------------------------------------------------------------------------- /moddi3.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * ldivmod.S: signed 64 bit division (only remainder) 3 | * 4 | * Copyright (c) 2012 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ long long __moddi3(long long numerator, long long denominator) 29 | @ 30 | @ Divide r1:r0 by r3:r2 and return the remainder in r1:r0 (all signed) 31 | @ Special case of __aeabi_ldivmod() that is a lot simpler and moves the 32 | @ remainder to r1:r0. 33 | @ 34 | .thumb_func 35 | .global __moddi3 36 | __moddi3: 37 | 38 | push {r4, lr} 39 | cmp r3, #0 40 | bge L_dont_neg_den 41 | 42 | movs r4, #0 @ den = -den 43 | rsbs r2, r2, #0 44 | sbcs r4, r3 45 | mov r3, r4 46 | L_dont_neg_den: 47 | 48 | cmp r1, #0 49 | bge L_pos_result 50 | 51 | movs r4, #0 @ num = -num 52 | rsbs r0, r0, #0 53 | sbcs r4, r1 54 | mov r1, r4 55 | 56 | bl __aeabi_uldivmod 57 | movs r0, #0 @ rem = -rem 58 | movs r1, #0 59 | subs r0, r2 60 | sbcs r1, r3 61 | pop {r4, pc} 62 | 63 | L_pos_result: 64 | bl __aeabi_uldivmod 65 | mov r1, r3 66 | mov r0, r2 67 | pop {r4, pc} 68 | -------------------------------------------------------------------------------- /lmul.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * lmul.S: 64 bit multiplication 3 | * 4 | * Copyright (c) 2013 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ long long __muldi3(long long, long long) 29 | @ 30 | @ libgcc wrapper: just an alias for __aeabi_lmul() 31 | @ 32 | .thumb_func 33 | .global __muldi3 34 | __muldi3: 35 | 36 | 37 | 38 | @ long long __aeabi_lmul(long long r1:r0, long long r3:r2) 39 | @ 40 | @ Multiply r1:r0 and r3:r2 and return the product in r1:r0 41 | @ Can also be used for unsigned long product 42 | @ 43 | .thumb_func 44 | .global __aeabi_lmul 45 | __aeabi_lmul: 46 | 47 | push {r4, lr} 48 | muls r1, r2 49 | muls r3, r0 50 | adds r1, r3 51 | 52 | lsrs r3, r0, #16 53 | lsrs r4, r2, #16 54 | muls r3, r4 55 | adds r1, r3 56 | 57 | lsrs r3, r0, #16 58 | uxth r0, r0 59 | uxth r2, r2 60 | muls r3, r2 61 | muls r4, r0 62 | muls r0, r2 63 | 64 | movs r2, #0 65 | adds r3, r4 66 | adcs r2, r2 67 | lsls r2, #16 68 | adds r1, r2 69 | 70 | lsls r2, r3, #16 71 | lsrs r3, #16 72 | adds r0, r2 73 | adcs r1, r3 74 | pop {r4, pc} 75 | -------------------------------------------------------------------------------- /idiv.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * idiv.S: signed 32 bit division (only quotient) 3 | * 4 | * Copyright (c) 2012-2017 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ int __divsi3(int num, int denom) 29 | @ 30 | @ libgcc wrapper: just an alias for __aeabi_idivmod(), the remainder is ignored 31 | @ 32 | .thumb_func 33 | .global __divsi3 34 | __divsi3: 35 | 36 | 37 | 38 | @ int __aeabi_idiv(int num:r0, int denom:r1) 39 | @ 40 | @ Divide r0 by r1 and return quotient in r0 (all signed). 41 | @ Use __aeabi_uidivmod() but check signs before and change signs afterwards. 42 | @ 43 | .thumb_func 44 | .global __aeabi_idiv 45 | __aeabi_idiv: 46 | 47 | cmp r0, #0 48 | bge .Lnumerator_pos 49 | rsbs r0, r0, #0 @ num = -num 50 | cmp r1, #0 51 | bge .Lneg_result 52 | rsbs r1, r1, #0 @ den = -den 53 | 54 | .Luidivmod: 55 | b __aeabi_uidivmod 56 | 57 | .Lnumerator_pos: 58 | cmp r1, #0 59 | bge .Luidivmod 60 | rsbs r1, r1, #0 @ den = -den 61 | 62 | .Lneg_result: 63 | push {lr} 64 | bl __aeabi_uidivmod 65 | rsbs r0, r0, #0 @ quot = -quot 66 | pop {pc} 67 | -------------------------------------------------------------------------------- /divdi3.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * ldivmod.S: signed 64 bit division (only quotient) 3 | * 4 | * Copyright (c) 2012 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ long long __divdi3(long long numerator, long long denominator) 29 | @ 30 | @ Divide r1:r0 by r3:r2 and return the quotient in r1:r0. 31 | @ Same as _aeabi_ldivmod(), but ignore remainder. 32 | @ 33 | .thumb_func 34 | .global __divdi3 35 | __divdi3: 36 | 37 | cmp r1, #0 38 | bge L_num_pos 39 | 40 | push {r4, lr} 41 | movs r4, #0 @ num = -num 42 | rsbs r0, r0, #0 43 | sbcs r4, r1 44 | mov r1, r4 45 | 46 | cmp r3, #0 47 | bge L_neg_result 48 | 49 | movs r4, #0 @ den = -den 50 | rsbs r2, r2, #0 51 | sbcs r4, r3 52 | mov r3, r4 53 | bl __aeabi_uldivmod 54 | pop {r4, pc} 55 | 56 | L_num_pos: 57 | cmp r3, #0 58 | bge __aeabi_uldivmod 59 | 60 | push {r4, lr} 61 | movs r4, #0 @ den = -den 62 | rsbs r2, r2, #0 63 | sbcs r4, r3 64 | mov r3, r4 65 | 66 | L_neg_result: 67 | bl __aeabi_uldivmod 68 | movs r4, #0 @ quot = -quot 69 | rsbs r0, r0, #0 70 | sbcs r4, r1 71 | mov r1, r4 72 | pop {r4, pc} 73 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ARM Run-Time ABI for the Cortex-M0 processor 2 | ============================================ 3 | 4 | This library implements the Run-time ABI for the ARM architecture as defined in 5 | document [ARM IHI 0043](https://developer.arm.com/documentation/ihi0043/latest) 6 | for the Thumb-2 ISA subset of the Cortex-M0. 7 | 8 | 9 | So Far Implemented 10 | ------------------ 11 | 12 | ~~~~ 13 | __aeabi_lmul() 14 | __aeabi_ldivmod() 15 | __aeabi_uldivmod() 16 | __aeabi_llsl() 17 | __aeabi_llsr() 18 | __aeabi_lasr() 19 | __aeabi_lcmp() 20 | __aeabi_ulcmp() 21 | __aeabi_idiv() 22 | __aeabi_uidiv() 23 | __aeabi_idivmod() 24 | __aeabi_uidivmod() 25 | __aeabi_uread4() 26 | __aeabi_uwrite4() 27 | __aeabi_uread8() 28 | __aeabi_uwrite8() 29 | __aeabi_memcpy8() 30 | __aeabi_memcpy4() 31 | __aeabi_memcpy() 32 | __aeabi_memmove8() 33 | __aeabi_memmove4() 34 | __aeabi_memmove() 35 | __aeabi_memset8() 36 | __aeabi_memset4() 37 | __aeabi_memset() 38 | __aeabi_memclr8() 39 | __aeabi_memclr4() 40 | __aeabi_memclr() 41 | ~~~~ 42 | 43 | 44 | Additional libgcc wrapper functions 45 | ----------------------------------- 46 | Older LLVM versions required them, now LLVM also uses the ARM EABI, therefore 47 | these wrappers will be removed soon. 48 | 49 | ~~~~ 50 | __muldi3() 51 | __moddi3() 52 | __divdi3() 53 | __umoddi3() 54 | __udivdi3() 55 | __ashldi3() 56 | __lshrdi3() 57 | __ashrdi3() 58 | __modsi3() 59 | __divsi3() 60 | __umodsi3() 61 | __udivsi3() 62 | ~~~~ 63 | 64 | 65 | 66 | Cross compiling 67 | --------------- 68 | 69 | ### GNU binutils / gcc 70 | 71 | Ubuntu provides the package `gcc-arm-none-eabi` with the ARM cross compiler 72 | based on gcc. To cross compile for the ARM Cortex-M0 use 73 | 74 | arm-none-eabi-gcc -mcpu=cortex-m0 -mthumb -o foo.arm foo.c -nostdlib -laeabi-cortexm0 75 | 76 | ### clang / LLVM 77 | 78 | Clang is a native cross compiler, but the standard linker `/usr/bin/ld` is not 79 | able to link ARM binaries. Therefore use LLD (ubuntu package `lld-7`): 80 | 81 | clang -target armv6m-none-eabi -fuse-ld=lld-7 -o foo.arm foo.c -nostdlib -laeabi-cortexm0 82 | 83 | 84 | License 85 | ------- 86 | Licensed under the ISC licence (similar to the MIT/Expat license). 87 | -------------------------------------------------------------------------------- /idivmod.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * idivmod.S: signed 32 bit division (quotient and remainder) 3 | * 4 | * Copyright (c) 2012 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ {int quotient:r0, int remainder:r1} 29 | @ __aeabi_idivmod(int numerator:r0, int denominator:r1) 30 | @ 31 | @ Divide r0 by r1 and return the quotient in r0 and the remainder in r1 32 | @ 33 | .thumb_func 34 | .global __aeabi_idivmod 35 | __aeabi_idivmod: 36 | 37 | cmp r0, #0 38 | bge .Lnumerator_pos 39 | rsbs r0, r0, #0 @ num = -num 40 | cmp r1, #0 41 | bge .Lboth_neg 42 | 43 | rsbs r1, r1, #0 @ den = -den 44 | push {lr} 45 | bl __aeabi_uidivmod 46 | rsbs r1, r1, #0 @ rem = -rem 47 | pop {pc} 48 | 49 | .Lboth_neg: 50 | push {lr} 51 | bl __aeabi_uidivmod 52 | rsbs r0, r0, #0 @ quot = -quot 53 | rsbs r1, r1, #0 @ rem = -rem 54 | pop {pc} 55 | 56 | .Lnumerator_pos: 57 | cmp r1, #0 58 | bge .Luidivmod 59 | 60 | rsbs r1, r1, #0 @ den = -den 61 | push {lr} 62 | bl __aeabi_uidivmod 63 | rsbs r0, r0, #0 @ quot = -quot 64 | pop {pc} 65 | 66 | 67 | 68 | 69 | 70 | @ unsigned __udivsi3(unsigned num, unsigned denom) 71 | @ 72 | @ libgcc wrapper: just an alias for __aeabi_uidivmod(), the remainder is ignored 73 | @ 74 | .thumb_func 75 | .global __udivsi3 76 | __udivsi3: 77 | 78 | 79 | 80 | @ unsigned __aeabi_uidiv(unsigned num, unsigned denom) 81 | @ 82 | @ Just an alias for __aeabi_uidivmod(), the remainder is ignored 83 | @ 84 | .thumb_func 85 | .global __aeabi_uidiv 86 | __aeabi_uidiv: 87 | 88 | 89 | 90 | @ {unsigned quotient:r0, unsigned remainder:r1} 91 | @ __aeabi_uidivmod(unsigned numerator:r0, unsigned denominator:r1) 92 | @ 93 | @ Divide r0 by r1 and return the quotient in r0 and the remainder in r1 94 | @ 95 | .thumb_func 96 | .global __aeabi_uidivmod 97 | __aeabi_uidivmod: 98 | 99 | 100 | 101 | .Luidivmod: 102 | cmp r1, #0 103 | bne 1f 104 | b __aeabi_idiv0 105 | 1: 106 | 107 | @ Shift left the denominator until it is greater than the numerator 108 | movs r2, #1 @ counter 109 | movs r3, #0 @ result 110 | cmp r0, r1 111 | bls .Lsub_loop 112 | adds r1, #0 @ dont shift if denominator would overflow 113 | bmi .Lsub_loop 114 | 115 | .Ldenom_shift_loop: 116 | lsls r2, #1 117 | lsls r1, #1 118 | bmi .Lsub_loop 119 | cmp r0, r1 120 | bhi .Ldenom_shift_loop 121 | 122 | .Lsub_loop: 123 | cmp r0, r1 124 | bcc .Ldont_sub @ if (num>denom) 125 | 126 | subs r0, r1 @ numerator -= denom 127 | orrs r3, r2 @ result(r3) |= bitmask(r2) 128 | .Ldont_sub: 129 | 130 | lsrs r1, #1 @ denom(r1) >>= 1 131 | lsrs r2, #1 @ bitmask(r2) >>= 1 132 | bne .Lsub_loop 133 | 134 | mov r1, r0 @ remainder(r1) = numerator(r0) 135 | mov r0, r3 @ quotient(r0) = result(r3) 136 | bx lr 137 | -------------------------------------------------------------------------------- /memset.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * memset.S: set memory region 3 | * 4 | * Copyright (c) 2013 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ void __aeabi_memclr(void *r0, size_t r1) 29 | @ 30 | @ Set the r1 bytes beginning with *r0 to 0. 31 | @ 32 | .thumb_func 33 | .global __aeabi_memclr 34 | __aeabi_memclr: 35 | 36 | 37 | eors r2, r2 @ fallthrough to memset 38 | 39 | 40 | @ void __aeabi_memset(void *r0, size_t r1, int r2) 41 | @ 42 | @ Set the r1 bytes beginning with *r0 to r2 43 | @ 44 | .thumb_func 45 | .global __aeabi_memset 46 | __aeabi_memset: 47 | 48 | @ check if length=0 49 | cmp r1, #0 50 | beq L_return1 51 | 52 | movs r3, #1 @ set one byte if odd address 53 | tst r0, r3 54 | beq L_align2 55 | strb r2, [r0] 56 | adds r0, #1 57 | subs r1, #1 58 | beq L_return1 59 | 60 | L_align2: 61 | movs r3, #2 @ set one halfword if address is not 32 bit aligned 62 | tst r0, r3 63 | beq __aeabi_memset4 64 | strb r2, [r0] 65 | cmp r1, #1 @ if length is 1 copy only 1 byte 66 | beq L_return1 67 | strb r2, [r0, #1] 68 | adds r0, #2 69 | subs r1, #2 70 | bne __aeabi_memset4 71 | 72 | L_return1: 73 | bx lr 74 | 75 | 76 | 77 | 78 | @ void __aeabi_memclr4(void *r0, size_t r1) 79 | @ 80 | @ Set the r1 bytes beginning with *r0 to 0. 81 | @ r0 must be 4-byte-aligned 82 | @ 83 | .thumb_func 84 | .global __aeabi_memclr4 85 | __aeabi_memclr4: 86 | 87 | 88 | @ void __aeabi_memclr8(void *r0, size_t r1) 89 | @ 90 | @ Set the r1 bytes beginning with *r0 to 0. 91 | @ r0 must be 8-byte-aligned 92 | @ 93 | .thumb_func 94 | .global __aeabi_memclr8 95 | __aeabi_memclr8: 96 | 97 | 98 | eors r2, r2 @ fallthrough to memset4 99 | 100 | 101 | @ void __aeabi_memset4(void *r0, size_t r1, int r2) 102 | @ 103 | @ Set the r1 bytes beginning with *r0 to r2. 104 | @ r0 must be 4-byte-aligned 105 | @ 106 | .thumb_func 107 | .global __aeabi_memset4 108 | __aeabi_memset4: 109 | 110 | 111 | @ void __aeabi_memset8(void *r0, size_t r1, int r2) 112 | @ 113 | @ Set the r1 bytes beginning with *r0 to r2. 114 | @ r0 must be 8-byte-aligned 115 | @ 116 | .thumb_func 117 | .global __aeabi_memset8 118 | __aeabi_memset8: 119 | 120 | 121 | lsls r2, r2, #24 @ copy lowest byte of r2 to all other bytes in r2 122 | lsrs r3, r2, #8 123 | orrs r2, r3 124 | lsrs r3, r2, #16 125 | orrs r2, r3 126 | 127 | subs r1, #4 128 | blo L_last_3bytes 129 | 130 | L_loop: 131 | str r2, [r0] 132 | adds r0, #4 133 | subs r1, #4 134 | bhs L_loop 135 | 136 | L_last_3bytes: @ r1 = remaining len - 4 137 | adds r1, #2 138 | blo L_one_left @ branch if r1 was -4 or -3 139 | strh r2, [r0] 140 | beq L_return2 @ finished if r1 was -2 141 | strb r2, [r0, #2] 142 | 143 | L_return2: 144 | bx lr 145 | 146 | L_one_left: 147 | adds r1, #1 148 | bne L_return3 149 | strb r2, [r0] 150 | 151 | L_return3: 152 | bx lr 153 | -------------------------------------------------------------------------------- /memmove.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * memmove.S: move memory block 3 | * 4 | * Copyright (c) 2017 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ void __aeabi_memmove8(void *r0, const void *r1, size_t r2); 29 | @ 30 | @ Move r2 bytes from r1 to r0 and check for overlap. 31 | @ r1 and r0 must be aligned to 8 bytes. 32 | @ 33 | .thumb_func 34 | .global __aeabi_memmove8 35 | __aeabi_memmove8: 36 | 37 | 38 | 39 | @ void __aeabi_memmove4(void *r0, const void *r1, size_t r2); 40 | @ 41 | @ Move r2 bytes from r1 to r0 and check for overlap. 42 | @ r1 and r0 must be aligned to 4 bytes. 43 | @ 44 | .thumb_func 45 | .global __aeabi_memmove4 46 | __aeabi_memmove4: 47 | 48 | 49 | 50 | cmp r0, r1 51 | bls __aeabi_memcpy4 52 | adds r3, r1, r2 53 | cmp r0, r3 54 | bhs __aeabi_memcpy4 55 | 56 | b .Lbackward_entry 57 | 58 | .Lbackward_loop: 59 | ldrb r3, [r1, r2] 60 | strb r3, [r0, r2] 61 | 62 | .Lbackward_entry: 63 | subs r2, #1 64 | bhs .Lbackward_loop 65 | 66 | bx lr 67 | 68 | 69 | 70 | @ void __aeabi_memmove(void *r0, const void *r1, size_t r2); 71 | @ 72 | @ Move r2 bytes from r1 to r0 and check for overlap. 73 | @ r0 and r1 need not be aligned. 74 | @ 75 | .thumb_func 76 | .global __aeabi_memmove 77 | __aeabi_memmove: 78 | 79 | 80 | 81 | cmp r0, r1 82 | bls __aeabi_memcpy 83 | adds r3, r1, r2 84 | cmp r0, r3 85 | blo .Lbackward_entry 86 | 87 | 88 | 89 | @ void __aeabi_memcpy(void *r0, const void *r1, size_t r2); 90 | @ 91 | @ Move r2 bytes from r1 to r0. No overlap allowed. 92 | @ r0 and r1 need not be aligned. 93 | @ 94 | .thumb_func 95 | .global __aeabi_memcpy 96 | __aeabi_memcpy: 97 | 98 | 99 | 100 | cmp r2, #8 101 | blo .Lforward1 102 | mov r3, r0 103 | eors r3, r1 104 | lsls r3, r3, #30 105 | bne .Lforward1 106 | 107 | lsrs r3, r0, #1 108 | bcc .Lalign2 109 | ldrb r3, [r1] 110 | strb r3, [r0] 111 | adds r0, #1 112 | adds r1, #1 113 | subs r2, #1 114 | .Lalign2: 115 | lsrs r3, r0, #2 116 | bcc .Lalign4 117 | ldrh r3, [r1] 118 | strh r3, [r0] 119 | adds r0, #2 120 | adds r1, #2 121 | subs r2, #2 122 | .Lalign4: 123 | 124 | 125 | 126 | @ void __aeabi_memcpy8(void *r0, const void *r1, size_t r2); 127 | @ 128 | @ Move r2 bytes from r1 to r0. No overlap allowed. 129 | @ r0 and r1 must be aligned to 8 bytes. 130 | @ 131 | .thumb_func 132 | .global __aeabi_memcpy8 133 | __aeabi_memcpy8: 134 | 135 | 136 | 137 | @ void __aeabi_memcpy4(void *r0, const void *r1, size_t r2); 138 | @ 139 | @ Move r2 bytes from r1 to r0. No overlap allowed. 140 | @ r0 and r1 must be aligned to 4 bytes. 141 | @ 142 | .thumb_func 143 | .global __aeabi_memcpy4 144 | __aeabi_memcpy4: 145 | 146 | 147 | 148 | subs r2, #20 149 | blo .Lforward4 150 | push {r4, r5, r6, r7} 151 | .Lforward20_loop: 152 | ldm r1!, {r3, r4, r5, r6, r7} 153 | stm r0!, {r3, r4, r5, r6, r7} 154 | subs r2, #20 155 | bhs .Lforward20_loop 156 | pop {r4, r5, r6, r7} 157 | 158 | .Lforward4: 159 | adds r2, #16 160 | blo .Lforward4_corr 161 | .Lforward4_loop: 162 | ldm r1!, {r3} 163 | stm r0!, {r3} 164 | subs r2, #4 165 | bhs .Lforward4_loop 166 | 167 | .Lforward4_corr: 168 | adds r2, #4 169 | 170 | .Lforward1: 171 | orrs r2, r2 172 | beq 9f 173 | push {r4} 174 | eors r4, r4 175 | 176 | .Lforward1_loop: 177 | ldrb r3, [r1, r4] 178 | strb r3, [r0, r4] 179 | adds r4, #1 180 | cmp r4, r2 181 | blo .Lforward1_loop 182 | pop {r4} 183 | 9: bx lr 184 | -------------------------------------------------------------------------------- /ldivmod.S: -------------------------------------------------------------------------------- 1 | /* Runtime ABI for the ARM Cortex-M0 2 | * ldivmod.S: 64 bit division (quotient and remainder) 3 | * 4 | * Copyright (c) 2012-2017 Jörg Mische 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any 7 | * purpose with or without fee is hereby granted, provided that the above 8 | * copyright notice and this permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 16 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | */ 18 | 19 | 20 | 21 | .syntax unified 22 | .text 23 | .thumb 24 | .cpu cortex-m0 25 | 26 | 27 | 28 | @ {long long quotient, long long remainder} 29 | @ __aeabi_ldivmod(long long numerator, long long denominator) 30 | @ 31 | @ Divide r1:r0 by r3:r2 and return the quotient in r1:r0 and the remainder in 32 | @ r3:r2 (all signed) 33 | @ 34 | .thumb_func 35 | .global __aeabi_ldivmod 36 | __aeabi_ldivmod: 37 | 38 | cmp r1, #0 39 | bge .Lnumerator_pos 40 | 41 | push {r4, lr} 42 | movs r4, #0 @ num = -num 43 | rsbs r0, r0, #0 44 | sbcs r4, r1 45 | mov r1, r4 46 | 47 | cmp r3, #0 48 | bge .Lboth_neg 49 | 50 | movs r4, #0 @ den = -den 51 | rsbs r2, r2, #0 52 | sbcs r4, r3 53 | mov r3, r4 54 | bl __aeabi_uldivmod 55 | movs r4, #0 @ rem = -rem 56 | rsbs r2, r2, #0 57 | sbcs r4, r3 58 | mov r3, r4 59 | pop {r4, pc} 60 | 61 | .Lboth_neg: 62 | bl __aeabi_uldivmod 63 | movs r4, #0 @ quot = -quot 64 | rsbs r0, r0, #0 65 | sbcs r4, r1 66 | mov r1, r4 67 | movs r4, #0 @ rem = -rem 68 | rsbs r2, r2, #0 69 | sbcs r4, r3 70 | mov r3, r4 71 | pop {r4, pc} 72 | 73 | .Lnumerator_pos: 74 | cmp r3, #0 75 | bge .Luldivmod 76 | 77 | push {r4, lr} 78 | movs r4, #0 @ den = -den 79 | rsbs r2, r2, #0 80 | sbcs r4, r3 81 | mov r3, r4 82 | bl __aeabi_uldivmod 83 | movs r4, #0 @ quot = -quot 84 | rsbs r0, r0, #0 85 | sbcs r4, r1 86 | mov r1, r4 87 | pop {r4, pc} 88 | 89 | 90 | 91 | 92 | @ unsigned long long __udivdi3(unsigned long long num, unsigned long long denom) 93 | @ 94 | @ libgcc wrapper: just an alias for __aeabi_uldivmod(), the remainder is ignored 95 | @ 96 | .thumb_func 97 | .global __udivdi3 98 | __udivdi3: 99 | 100 | 101 | 102 | @ {unsigned long long quotient, unsigned long long remainder} 103 | @ __aeabi_uldivmod(unsigned long long numerator, unsigned long long denominator) 104 | @ 105 | @ Divide r1:r0 by r3:r2 and return the quotient in r1:r0 and the remainder 106 | @ in r3:r2 (all unsigned) 107 | @ 108 | .thumb_func 109 | .global __aeabi_uldivmod 110 | __aeabi_uldivmod: 111 | 112 | 113 | 114 | .Luldivmod: 115 | cmp r3, #0 116 | bne .L_large_denom 117 | cmp r2, #0 118 | beq .L_divison_by_0 119 | cmp r1, #0 120 | beq .L_fallback_32bits 121 | 122 | 123 | 124 | @ case 1: num >= 2^32 and denom < 2^32 125 | @ Result might be > 2^32, therefore we first calculate the upper 32 126 | @ bits of the result. It is done similar to the calculation of the 127 | @ lower 32 bits, but with a denominator that is shifted by 32. 128 | @ Hence the lower 32 bits of the denominator are always 0 and the 129 | @ costly 64 bit shift and sub operations can be replaced by cheap 32 130 | @ bit operations. 131 | 132 | push {r4, r5, r6, r7, lr} 133 | 134 | @ shift left the denominator until it is greater than the numerator 135 | @ denom(r7:r6) = r3:r2 << 32 136 | 137 | movs r5, #1 @ bitmask 138 | adds r7, r2, #0 @ dont shift if denominator would overflow 139 | bmi .L_upper_result 140 | cmp r1, r7 141 | blo .L_upper_result 142 | 143 | .L_denom_shift_loop1: 144 | lsls r5, #1 145 | lsls r7, #1 146 | bmi .L_upper_result @ dont shift if overflow 147 | cmp r1, r7 148 | bhs .L_denom_shift_loop1 149 | 150 | .L_upper_result: 151 | mov r3, r1 152 | mov r2, r0 153 | movs r1, #0 @ upper result = 0 154 | b .L_sub_entry1 155 | 156 | .L_sub_loop1: 157 | lsrs r7, #1 @ denom(r7:r6) >>= 1 158 | 159 | .L_sub_entry1: 160 | cmp r3, r7 161 | bcc .L_dont_sub1 @ if (num>denom) 162 | 163 | subs r3, r7 @ num -= denom 164 | orrs r1, r5 @ result(r7:r6) |= bitmask(r5) 165 | .L_dont_sub1: 166 | 167 | lsrs r5, #1 @ bitmask(r5) >>= 1 168 | bne .L_sub_loop1 169 | 170 | movs r5, #1 171 | lsls r5, #31 172 | lsls r6, r7, #31 @ denom(r7:r6) = (r7:0) >> 1 173 | lsrs r7, #1 @ dont forget least significant bit! 174 | b .L_lower_result 175 | 176 | 177 | 178 | @ case 2: division by 0 179 | @ call __aeabi_ldiv0 180 | 181 | .L_divison_by_0: 182 | b __aeabi_ldiv0 183 | 184 | 185 | 186 | @ case 3: num < 2^32 and denom < 2^32 187 | @ fallback to 32 bit division 188 | 189 | .L_fallback_32bits: 190 | mov r1, r2 191 | push {lr} 192 | bl __aeabi_uidivmod 193 | mov r2, r1 194 | movs r1, #0 195 | movs r3, #0 196 | pop {pc} 197 | 198 | 199 | 200 | @ case 4: denom >= 2^32 201 | @ result is smaller than 2^32 202 | 203 | .L_large_denom: 204 | push {r4, r5, r6, r7, lr} 205 | 206 | mov r7, r3 207 | mov r6, r2 208 | mov r3, r1 209 | mov r2, r0 210 | 211 | @ Shift left the denominator until it is greater than the numerator 212 | 213 | movs r1, #0 @ high word of result is 0 214 | movs r5, #1 @ bitmask 215 | adds r7, #0 @ dont shift if denominator would overflow 216 | bmi .L_lower_result 217 | cmp r3, r7 218 | blo .L_lower_result 219 | 220 | .L_denom_shift_loop4: 221 | lsls r5, #1 222 | lsls r7, #1 223 | lsls r6, #1 224 | adcs r7, r1 @ r1=0 225 | bmi .L_lower_result @ dont shift if overflow 226 | cmp r3, r7 227 | bhs .L_denom_shift_loop4 228 | 229 | 230 | 231 | .L_lower_result: 232 | eors r0, r0 233 | 234 | .L_sub_loop4: 235 | mov r4, r3 236 | cmp r2, r6 237 | sbcs r4, r7 238 | bcc .L_dont_sub4 @ if (num>denom) 239 | 240 | subs r2, r6 @ numerator -= denom 241 | sbcs r3, r7 242 | orrs r0, r5 @ result(r1:r0) |= bitmask(r5) 243 | .L_dont_sub4: 244 | 245 | lsls r4, r7, #31 @ denom(r7:r6) >>= 1 246 | lsrs r6, #1 247 | lsrs r7, #1 248 | orrs r6, r4 249 | lsrs r5, #1 @ bitmask(r5) >>= 1 250 | bne .L_sub_loop4 251 | 252 | pop {r4, r5, r6, r7, pc} 253 | --------------------------------------------------------------------------------