├── .gitignore ├── arch └── csky │ ├── Kconfig │ ├── Kconfig.debug │ ├── Kconfig.platforms │ ├── Makefile │ ├── abiv1 │ ├── Makefile │ ├── alignment.c │ ├── bswapdi.c │ ├── bswapsi.c │ ├── cacheflush.c │ ├── inc │ │ └── abi │ │ │ ├── cacheflush.h │ │ │ ├── ckmmu.h │ │ │ ├── elf.h │ │ │ ├── entry.h │ │ │ ├── page.h │ │ │ ├── pgtable-bits.h │ │ │ ├── reg_ops.h │ │ │ ├── regdef.h │ │ │ ├── string.h │ │ │ ├── switch_context.h │ │ │ └── vdso.h │ ├── memcmp.c │ ├── memcopy.h │ ├── mmap.c │ ├── strcpy.c │ ├── strksyms.c │ ├── strlen.c │ └── wordcopy.c │ ├── abiv2 │ ├── Makefile │ ├── cacheflush.c │ ├── fpu.c │ ├── inc │ │ └── abi │ │ │ ├── cacheflush.h │ │ │ ├── ckmmu.h │ │ │ ├── elf.h │ │ │ ├── entry.h │ │ │ ├── fpu.h │ │ │ ├── page.h │ │ │ ├── pgtable-bits.h │ │ │ ├── reg_ops.h │ │ │ ├── regdef.h │ │ │ ├── string.h │ │ │ ├── switch_context.h │ │ │ └── vdso.h │ ├── mcount.S │ ├── memcmp.S │ ├── memcpy.S │ ├── memmove.S │ ├── memset.S │ ├── strcmp.S │ ├── strcpy.S │ ├── strksyms.c │ ├── strlen.S │ └── sysdep.h │ ├── boot │ ├── Makefile │ └── dts │ │ ├── Makefile │ │ └── include │ │ └── dt-bindings │ ├── configs │ └── defconfig │ ├── include │ ├── asm │ │ ├── Kbuild │ │ ├── addrspace.h │ │ ├── asid.h │ │ ├── barrier.h │ │ ├── bitops.h │ │ ├── bug.h │ │ ├── cache.h │ │ ├── cacheflush.h │ │ ├── checksum.h │ │ ├── cmpxchg.h │ │ ├── elf.h │ │ ├── fixmap.h │ │ ├── ftrace.h │ │ ├── futex.h │ │ ├── highmem.h │ │ ├── io.h │ │ ├── irqflags.h │ │ ├── kprobes.h │ │ ├── memory.h │ │ ├── mmu.h │ │ ├── mmu_context.h │ │ ├── page.h │ │ ├── pci.h │ │ ├── perf_event.h │ │ ├── pgalloc.h │ │ ├── pgtable.h │ │ ├── probes.h │ │ ├── processor.h │ │ ├── ptrace.h │ │ ├── reg_ops.h │ │ ├── segment.h │ │ ├── shmparam.h │ │ ├── smp.h │ │ ├── spinlock.h │ │ ├── spinlock_types.h │ │ ├── stackprotector.h │ │ ├── string.h │ │ ├── switch_to.h │ │ ├── syscall.h │ │ ├── syscalls.h │ │ ├── tcm.h │ │ ├── thread_info.h │ │ ├── tlb.h │ │ ├── tlbflush.h │ │ ├── traps.h │ │ ├── uaccess.h │ │ ├── unistd.h │ │ ├── uprobes.h │ │ └── vdso.h │ └── uapi │ │ └── asm │ │ ├── Kbuild │ │ ├── byteorder.h │ │ ├── cachectl.h │ │ ├── perf_regs.h │ │ ├── ptrace.h │ │ ├── sigcontext.h │ │ └── unistd.h │ ├── kernel │ ├── Makefile │ ├── asm-offsets.c │ ├── atomic.S │ ├── cpu-probe.c │ ├── entry.S │ ├── ftrace.c │ ├── head.S │ ├── io.c │ ├── irq.c │ ├── module.c │ ├── pci.c │ ├── perf_callchain.c │ ├── perf_event.c │ ├── perf_regs.c │ ├── power.c │ ├── probes │ │ ├── Makefile │ │ ├── decode-insn.c │ │ ├── decode-insn.h │ │ ├── ftrace.c │ │ ├── kprobes.c │ │ ├── kprobes_trampoline.S │ │ ├── simulate-insn.c │ │ ├── simulate-insn.h │ │ └── uprobes.c │ ├── process.c │ ├── ptrace.c │ ├── setup.c │ ├── signal.c │ ├── smp.c │ ├── stacktrace.c │ ├── syscall.c │ ├── syscall_table.c │ ├── time.c │ ├── traps.c │ ├── vdso.c │ └── vmlinux.lds.S │ ├── lib │ ├── Makefile │ ├── delay.c │ ├── string.c │ └── usercopy.c │ └── mm │ ├── Makefile │ ├── asid.c │ ├── cachev1.c │ ├── cachev2.c │ ├── context.c │ ├── dma-mapping.c │ ├── dma │ ├── Makefile │ └── dma-mapping.c │ ├── fault.c │ ├── highmem.c │ ├── init.c │ ├── ioremap.c │ ├── syscache.c │ ├── tcm.c │ └── tlb.c ├── drivers ├── Makefile ├── clocksource │ ├── Makefile │ ├── timer-gx6605s.c │ ├── timer-mp-csky.c │ └── timer-of.h ├── irqchip │ ├── Makefile │ ├── irq-csky-apb-intc.c │ └── irq-csky-mpintc.c ├── prfl-csky.c └── qemu-exit.c ├── merge.sh └── patch ├── 0001-scripts-recordmcount.pl-Add-csky-support.patch ├── 0002-csky-Add-support-for-libdw.patch └── 0003-perf-evsel-Use-hweight64-instead-of-hweight_long-att.patch /.gitignore: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE! Don't add files that are generated in specific 3 | # subdirectories here. Add them in the ".gitignore" file 4 | # in that subdirectory instead. 5 | # 6 | # NOTE! Please use 'git ls-files -i --exclude-standard' 7 | # command after changing this file, to see if there are 8 | # any tracked files which get ignored after the change. 9 | # 10 | # Normal rules 11 | # 12 | .* 13 | *.o 14 | *.o.* 15 | *.a 16 | *.s 17 | *.dtb 18 | *.lds 19 | *.ko 20 | *.so 21 | *.so.dbg 22 | *.mod.c 23 | *.i 24 | *.lst 25 | *.symtypes 26 | *.order 27 | *.elf 28 | *.bin 29 | *.tar 30 | *.gz 31 | *.bz2 32 | *.lzma 33 | *.xz 34 | *.lz4 35 | *.lzo 36 | *.gcno 37 | modules.builtin 38 | Module.symvers 39 | *.dwo 40 | *.su 41 | *.c.[012]*.* 42 | eragon* 43 | # 44 | # Top-level generic files 45 | # 46 | /tags 47 | /TAGS 48 | /linux 49 | /vmlinux 50 | /vmlinux.32 51 | /vmlinux-gdb.py 52 | /vmlinuz 53 | /System.map 54 | /Module.markers 55 | 56 | # 57 | # Debian directory (make deb-pkg) 58 | # 59 | /debian/ 60 | 61 | # 62 | # tar directory (make tar*-pkg) 63 | # 64 | /tar-install/ 65 | 66 | # 67 | # git files that we don't want to ignore even if they are dot-files 68 | # 69 | !.gitignore 70 | !.mailmap 71 | !.cocciconfig 72 | 73 | # 74 | # Generated include files 75 | # 76 | include/config 77 | include/generated 78 | arch/*/include/generated 79 | 80 | # stgit generated dirs 81 | patches-* 82 | 83 | # quilt's files 84 | patches 85 | series 86 | 87 | # cscope files 88 | cscope.* 89 | ncscope.* 90 | 91 | # gnu global files 92 | GPATH 93 | GRTAGS 94 | GSYMS 95 | GTAGS 96 | 97 | # id-utils files 98 | ID 99 | 100 | *.orig 101 | *~ 102 | \#*# 103 | 104 | # 105 | # Leavings from module signing 106 | # 107 | extra_certificates 108 | signing_key.pem 109 | signing_key.priv 110 | signing_key.x509 111 | x509.genkey 112 | 113 | # Kconfig presets 114 | all.config 115 | 116 | # Kdevelop4 117 | *.kdev4 118 | -------------------------------------------------------------------------------- /arch/csky/Kconfig.debug: -------------------------------------------------------------------------------- 1 | menu "C-SKY Debug Options" 2 | config CSKY_BUILTIN_DTB 3 | string "Use kernel builtin dtb" 4 | help 5 | User could define the dtb instead of the one which is passed from 6 | bootloader. 7 | Sometimes for debug, we want to use a built-in dtb and then we needn't 8 | modify bootloader at all. 9 | endmenu 10 | -------------------------------------------------------------------------------- /arch/csky/Kconfig.platforms: -------------------------------------------------------------------------------- 1 | menu "Platform drivers selection" 2 | 3 | config ARCH_CSKY_DW_APB_ICTL 4 | bool "Select dw-apb interrupt controller" 5 | select DW_APB_ICTL 6 | default y 7 | help 8 | This enables support for snps dw-apb-ictl 9 | endmenu 10 | -------------------------------------------------------------------------------- /arch/csky/Makefile: -------------------------------------------------------------------------------- 1 | OBJCOPYFLAGS :=-O binary 2 | GZFLAGS :=-9 3 | KBUILD_DEFCONFIG := defconfig 4 | 5 | ifdef CONFIG_CPU_HAS_FPU 6 | FPUEXT = f 7 | endif 8 | 9 | ifdef CONFIG_CPU_HAS_VDSP 10 | VDSPEXT = v 11 | endif 12 | 13 | ifdef CONFIG_CPU_HAS_TEE 14 | TEEEXT = t 15 | endif 16 | 17 | ifdef CONFIG_CPU_CK610 18 | CPUTYPE = ck610 19 | CSKYABI = abiv1 20 | endif 21 | 22 | ifdef CONFIG_CPU_CK810 23 | CPUTYPE = ck810 24 | CSKYABI = abiv2 25 | endif 26 | 27 | ifdef CONFIG_CPU_CK807 28 | CPUTYPE = ck807 29 | CSKYABI = abiv2 30 | endif 31 | 32 | ifdef CONFIG_CPU_CK860 33 | CPUTYPE = ck860 34 | CSKYABI = abiv2 35 | endif 36 | 37 | ifneq ($(CSKYABI),) 38 | MCPU_STR = $(CPUTYPE)$(FPUEXT)$(VDSPEXT)$(TEEEXT) 39 | KBUILD_CFLAGS += -mcpu=$(CPUTYPE) -Wa,-mcpu=$(MCPU_STR) 40 | KBUILD_CFLAGS += -DCSKYCPU_DEF_NAME=\"$(MCPU_STR)\" 41 | KBUILD_CFLAGS += -msoft-float -mdiv 42 | KBUILD_CFLAGS += -fno-tree-vectorize 43 | endif 44 | 45 | KBUILD_CFLAGS += -pipe 46 | ifeq ($(CSKYABI),abiv2) 47 | KBUILD_CFLAGS += -mno-stack-size 48 | endif 49 | 50 | ifdef CONFIG_FRAME_POINTER 51 | KBUILD_CFLAGS += -mbacktrace 52 | endif 53 | 54 | abidirs := $(patsubst %,arch/csky/%/,$(CSKYABI)) 55 | KBUILD_CFLAGS += $(patsubst %,-I$(srctree)/%inc,$(abidirs)) 56 | 57 | KBUILD_CPPFLAGS += -mlittle-endian 58 | LDFLAGS += -EL 59 | 60 | KBUILD_AFLAGS += $(KBUILD_CFLAGS) 61 | 62 | head-y := arch/csky/kernel/head.o 63 | 64 | core-y += arch/csky/kernel/ 65 | core-y += arch/csky/mm/ 66 | core-y += arch/csky/$(CSKYABI)/ 67 | 68 | libs-y += arch/csky/lib/ \ 69 | $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) 70 | 71 | boot := arch/csky/boot 72 | ifneq '$(CONFIG_CSKY_BUILTIN_DTB)' '""' 73 | core-y += $(boot)/dts/ 74 | endif 75 | 76 | all: zImage 77 | 78 | 79 | dtbs: scripts 80 | $(Q)$(MAKE) $(build)=$(boot)/dts 81 | 82 | %.dtb %.dtb.S %.dtb.o: scripts 83 | $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ 84 | 85 | zImage Image uImage: vmlinux dtbs 86 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 87 | 88 | archclean: 89 | $(Q)$(MAKE) $(clean)=$(boot) 90 | $(Q)$(MAKE) $(clean)=$(boot)/dts 91 | rm -rf arch/csky/include/generated 92 | 93 | define archhelp 94 | echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' 95 | echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' 96 | echo ' uImage - U-Boot wrapped zImage' 97 | endef 98 | -------------------------------------------------------------------------------- /arch/csky/abiv1/Makefile: -------------------------------------------------------------------------------- 1 | obj-$(CONFIG_CPU_NEED_SOFTALIGN) += alignment.o 2 | obj-y += bswapdi.o 3 | obj-y += bswapsi.o 4 | obj-y += cacheflush.o 5 | obj-y += mmap.o 6 | obj-y += strksyms.o 7 | obj-y += memcmp.o 8 | obj-y += strcpy.o 9 | obj-y += strlen.o 10 | obj-y += wordcopy.o 11 | -------------------------------------------------------------------------------- /arch/csky/abiv1/bswapdi.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | unsigned long long notrace __bswapdi2(unsigned long long u) 9 | { 10 | return ___constant_swab64(u); 11 | } 12 | EXPORT_SYMBOL(__bswapdi2); 13 | -------------------------------------------------------------------------------- /arch/csky/abiv1/bswapsi.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | unsigned int notrace __bswapsi2(unsigned int u) 9 | { 10 | return ___constant_swab32(u); 11 | } 12 | EXPORT_SYMBOL(__bswapsi2); 13 | -------------------------------------------------------------------------------- /arch/csky/abiv1/cacheflush.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #define PG_dcache_clean PG_arch_1 16 | 17 | void flush_dcache_page(struct page *page) 18 | { 19 | struct address_space *mapping; 20 | 21 | if (page == ZERO_PAGE(0)) 22 | return; 23 | 24 | mapping = page_mapping_file(page); 25 | 26 | if (mapping && !page_mapcount(page)) 27 | clear_bit(PG_dcache_clean, &page->flags); 28 | else { 29 | dcache_wbinv_all(); 30 | if (mapping) 31 | icache_inv_all(); 32 | set_bit(PG_dcache_clean, &page->flags); 33 | } 34 | } 35 | EXPORT_SYMBOL(flush_dcache_page); 36 | 37 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, 38 | pte_t *ptep) 39 | { 40 | unsigned long pfn = pte_pfn(*ptep); 41 | struct page *page; 42 | 43 | flush_tlb_page(vma, addr); 44 | 45 | if (!pfn_valid(pfn)) 46 | return; 47 | 48 | page = pfn_to_page(pfn); 49 | if (page == ZERO_PAGE(0)) 50 | return; 51 | 52 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 53 | dcache_wbinv_all(); 54 | 55 | if (page_mapping_file(page)) { 56 | if (vma->vm_flags & VM_EXEC) 57 | icache_inv_all(); 58 | } 59 | } 60 | 61 | void flush_kernel_dcache_page(struct page *page) 62 | { 63 | struct address_space *mapping; 64 | 65 | mapping = page_mapping_file(page); 66 | 67 | if (!mapping || mapping_mapped(mapping)) 68 | dcache_wbinv_all(); 69 | } 70 | EXPORT_SYMBOL(flush_kernel_dcache_page); 71 | 72 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 73 | unsigned long end) 74 | { 75 | dcache_wbinv_all(); 76 | 77 | if (vma->vm_flags & VM_EXEC) 78 | icache_inv_all(); 79 | } 80 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/cacheflush.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ABI_CSKY_CACHEFLUSH_H 5 | #define __ABI_CSKY_CACHEFLUSH_H 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 12 | extern void flush_dcache_page(struct page *); 13 | 14 | #define flush_cache_mm(mm) dcache_wbinv_all() 15 | #define flush_cache_page(vma, page, pfn) cache_wbinv_all() 16 | #define flush_cache_dup_mm(mm) cache_wbinv_all() 17 | 18 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 19 | extern void flush_kernel_dcache_page(struct page *); 20 | 21 | #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 22 | #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) 23 | 24 | static inline void flush_kernel_vmap_range(void *addr, int size) 25 | { 26 | dcache_wbinv_all(); 27 | } 28 | static inline void invalidate_kernel_vmap_range(void *addr, int size) 29 | { 30 | dcache_wbinv_all(); 31 | } 32 | 33 | #define ARCH_HAS_FLUSH_ANON_PAGE 34 | static inline void flush_anon_page(struct vm_area_struct *vma, 35 | struct page *page, unsigned long vmaddr) 36 | { 37 | if (PageAnon(page)) 38 | cache_wbinv_all(); 39 | } 40 | 41 | /* 42 | * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. 43 | * Use cache_wbinv_all() here and need to be improved in future. 44 | */ 45 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 46 | #define flush_cache_vmap(start, end) cache_wbinv_all() 47 | #define flush_cache_vunmap(start, end) cache_wbinv_all() 48 | 49 | #define flush_icache_page(vma, page) do {} while (0); 50 | #define flush_icache_range(start, end) cache_wbinv_range(start, end) 51 | #define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end) 52 | #define flush_icache_deferred(mm) do {} while (0); 53 | 54 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 55 | do { \ 56 | memcpy(dst, src, len); \ 57 | } while (0) 58 | 59 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 60 | do { \ 61 | memcpy(dst, src, len); \ 62 | cache_wbinv_all(); \ 63 | } while (0) 64 | 65 | #endif /* __ABI_CSKY_CACHEFLUSH_H */ 66 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/ckmmu.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_CKMMUV1_H 5 | #define __ASM_CSKY_CKMMUV1_H 6 | #include 7 | 8 | static inline int read_mmu_index(void) 9 | { 10 | return cprcr("cpcr0"); 11 | } 12 | 13 | static inline void write_mmu_index(int value) 14 | { 15 | cpwcr("cpcr0", value); 16 | } 17 | 18 | static inline int read_mmu_entrylo0(void) 19 | { 20 | return cprcr("cpcr2") << 6; 21 | } 22 | 23 | static inline int read_mmu_entrylo1(void) 24 | { 25 | return cprcr("cpcr3") << 6; 26 | } 27 | 28 | static inline void write_mmu_pagemask(int value) 29 | { 30 | cpwcr("cpcr6", value); 31 | } 32 | 33 | static inline int read_mmu_entryhi(void) 34 | { 35 | return cprcr("cpcr4"); 36 | } 37 | 38 | static inline void write_mmu_entryhi(int value) 39 | { 40 | cpwcr("cpcr4", value); 41 | } 42 | 43 | static inline unsigned long read_mmu_msa0(void) 44 | { 45 | return cprcr("cpcr30"); 46 | } 47 | 48 | static inline void write_mmu_msa0(unsigned long value) 49 | { 50 | cpwcr("cpcr30", value); 51 | } 52 | 53 | static inline unsigned long read_mmu_msa1(void) 54 | { 55 | return cprcr("cpcr31"); 56 | } 57 | 58 | static inline void write_mmu_msa1(unsigned long value) 59 | { 60 | cpwcr("cpcr31", value); 61 | } 62 | 63 | /* 64 | * TLB operations. 65 | */ 66 | static inline void tlb_probe(void) 67 | { 68 | cpwcr("cpcr8", 0x80000000); 69 | } 70 | 71 | static inline void tlb_read(void) 72 | { 73 | cpwcr("cpcr8", 0x40000000); 74 | } 75 | 76 | static inline void tlb_invalid_all(void) 77 | { 78 | cpwcr("cpcr8", 0x04000000); 79 | } 80 | 81 | 82 | static inline void local_tlb_invalid_all(void) 83 | { 84 | tlb_invalid_all(); 85 | } 86 | 87 | static inline void tlb_invalid_indexed(void) 88 | { 89 | cpwcr("cpcr8", 0x02000000); 90 | } 91 | 92 | static inline void setup_pgd(pgd_t *pgd, int asid) 93 | { 94 | cpwcr("cpcr29", __pa(pgd) | BIT(0)); 95 | write_mmu_entryhi(asid); 96 | } 97 | 98 | static inline pgd_t *get_pgd(void) 99 | { 100 | return __va(cprcr("cpcr29") & ~BIT(0)); 101 | } 102 | #endif /* __ASM_CSKY_CKMMUV1_H */ 103 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/elf.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ABI_CSKY_ELF_H 4 | #define __ABI_CSKY_ELF_H 5 | 6 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ 7 | pr_reg[0] = regs->pc; \ 8 | pr_reg[1] = regs->regs[9]; \ 9 | pr_reg[2] = regs->usp; \ 10 | pr_reg[3] = regs->sr; \ 11 | pr_reg[4] = regs->a0; \ 12 | pr_reg[5] = regs->a1; \ 13 | pr_reg[6] = regs->a2; \ 14 | pr_reg[7] = regs->a3; \ 15 | pr_reg[8] = regs->regs[0]; \ 16 | pr_reg[9] = regs->regs[1]; \ 17 | pr_reg[10] = regs->regs[2]; \ 18 | pr_reg[11] = regs->regs[3]; \ 19 | pr_reg[12] = regs->regs[4]; \ 20 | pr_reg[13] = regs->regs[5]; \ 21 | pr_reg[14] = regs->regs[6]; \ 22 | pr_reg[15] = regs->regs[7]; \ 23 | pr_reg[16] = regs->regs[8]; \ 24 | pr_reg[17] = regs->lr; \ 25 | } while (0); 26 | #endif /* __ABI_CSKY_ELF_H */ 27 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/entry.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_ENTRY_H 5 | #define __ASM_CSKY_ENTRY_H 6 | 7 | #include 8 | #include 9 | 10 | #define LSAVE_PC 8 11 | #define LSAVE_PSR 12 12 | #define LSAVE_A0 24 13 | #define LSAVE_A1 28 14 | #define LSAVE_A2 32 15 | #define LSAVE_A3 36 16 | #define LSAVE_A4 40 17 | #define LSAVE_A5 44 18 | 19 | #define usp ss1 20 | 21 | .macro USPTOKSP 22 | mtcr sp, usp 23 | mfcr sp, ss0 24 | .endm 25 | 26 | .macro KSPTOUSP 27 | mtcr sp, ss0 28 | mfcr sp, usp 29 | .endm 30 | 31 | .macro SAVE_ALL epc_inc 32 | mtcr r13, ss2 33 | mfcr r13, epsr 34 | btsti r13, 31 35 | bt 1f 36 | USPTOKSP 37 | 1: 38 | subi sp, 32 39 | subi sp, 32 40 | subi sp, 16 41 | stw r13, (sp, 12) 42 | 43 | stw lr, (sp, 4) 44 | 45 | mfcr lr, epc 46 | movi r13, \epc_inc 47 | add lr, r13 48 | stw lr, (sp, 8) 49 | 50 | mov lr, sp 51 | addi lr, 32 52 | addi lr, 32 53 | addi lr, 16 54 | bt 2f 55 | mfcr lr, ss1 56 | 2: 57 | stw lr, (sp, 16) 58 | 59 | stw a0, (sp, 20) 60 | stw a0, (sp, 24) 61 | stw a1, (sp, 28) 62 | stw a2, (sp, 32) 63 | stw a3, (sp, 36) 64 | 65 | addi sp, 32 66 | addi sp, 8 67 | mfcr r13, ss2 68 | stw r6, (sp) 69 | stw r7, (sp, 4) 70 | stw r8, (sp, 8) 71 | stw r9, (sp, 12) 72 | stw r10, (sp, 16) 73 | stw r11, (sp, 20) 74 | stw r12, (sp, 24) 75 | stw r13, (sp, 28) 76 | stw r14, (sp, 32) 77 | stw r1, (sp, 36) 78 | subi sp, 32 79 | subi sp, 8 80 | .endm 81 | 82 | .macro RESTORE_ALL 83 | ldw lr, (sp, 4) 84 | ldw a0, (sp, 8) 85 | mtcr a0, epc 86 | ldw a0, (sp, 12) 87 | mtcr a0, epsr 88 | btsti a0, 31 89 | bt 1f 90 | ldw a0, (sp, 16) 91 | mtcr a0, ss1 92 | 1: 93 | ldw a0, (sp, 24) 94 | ldw a1, (sp, 28) 95 | ldw a2, (sp, 32) 96 | ldw a3, (sp, 36) 97 | 98 | addi sp, 32 99 | addi sp, 8 100 | ldw r6, (sp) 101 | ldw r7, (sp, 4) 102 | ldw r8, (sp, 8) 103 | ldw r9, (sp, 12) 104 | ldw r10, (sp, 16) 105 | ldw r11, (sp, 20) 106 | ldw r12, (sp, 24) 107 | ldw r13, (sp, 28) 108 | ldw r14, (sp, 32) 109 | ldw r1, (sp, 36) 110 | addi sp, 32 111 | addi sp, 8 112 | 113 | bt 2f 114 | KSPTOUSP 115 | 2: 116 | rte 117 | .endm 118 | 119 | .macro SAVE_SWITCH_STACK 120 | subi sp, 32 121 | stm r8-r15, (sp) 122 | .endm 123 | 124 | .macro RESTORE_SWITCH_STACK 125 | ldm r8-r15, (sp) 126 | addi sp, 32 127 | .endm 128 | 129 | /* MMU registers operators. */ 130 | .macro RD_MIR rx 131 | cprcr \rx, cpcr0 132 | .endm 133 | 134 | .macro RD_MEH rx 135 | cprcr \rx, cpcr4 136 | .endm 137 | 138 | .macro RD_MCIR rx 139 | cprcr \rx, cpcr8 140 | .endm 141 | 142 | .macro RD_PGDR rx 143 | cprcr \rx, cpcr29 144 | .endm 145 | 146 | .macro WR_MEH rx 147 | cpwcr \rx, cpcr4 148 | .endm 149 | 150 | .macro WR_MCIR rx 151 | cpwcr \rx, cpcr8 152 | .endm 153 | 154 | .macro SETUP_MMU 155 | /* Init psr and enable ee */ 156 | lrw r6, DEFAULT_PSR_VALUE 157 | mtcr r6, psr 158 | psrset ee 159 | 160 | /* Select MMU as co-processor */ 161 | cpseti cp15 162 | 163 | /* 164 | * cpcr30 format: 165 | * 31 - 29 | 28 - 4 | 3 | 2 | 1 | 0 166 | * BA Reserved C D V 167 | */ 168 | cprcr r6, cpcr30 169 | lsri r6, 29 170 | lsli r6, 29 171 | addi r6, 0xe 172 | cpwcr r6, cpcr30 173 | 174 | movi r6, 0 175 | cpwcr r6, cpcr31 176 | .endm 177 | #endif /* __ASM_CSKY_ENTRY_H */ 178 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/page.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | 6 | extern void flush_dcache_page(struct page *page); 7 | 8 | static inline unsigned long pages_do_alias(unsigned long addr1, 9 | unsigned long addr2) 10 | { 11 | return (addr1 ^ addr2) & (SHMLBA-1); 12 | } 13 | 14 | static inline void clear_user_page(void *addr, unsigned long vaddr, 15 | struct page *page) 16 | { 17 | clear_page(addr); 18 | if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) 19 | flush_dcache_page(page); 20 | } 21 | 22 | static inline void copy_user_page(void *to, void *from, unsigned long vaddr, 23 | struct page *page) 24 | { 25 | copy_page(to, from); 26 | if (pages_do_alias((unsigned long) to, vaddr & PAGE_MASK)) 27 | flush_dcache_page(page); 28 | } 29 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/pgtable-bits.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_PGTABLE_BITS_H 5 | #define __ASM_CSKY_PGTABLE_BITS_H 6 | 7 | /* implemented in software */ 8 | #define _PAGE_ACCESSED (1<<3) 9 | #define PAGE_ACCESSED_BIT (3) 10 | 11 | #define _PAGE_READ (1<<1) 12 | #define _PAGE_WRITE (1<<2) 13 | #define _PAGE_PRESENT (1<<0) 14 | 15 | #define _PAGE_MODIFIED (1<<4) 16 | #define PAGE_MODIFIED_BIT (4) 17 | 18 | /* implemented in hardware */ 19 | #define _PAGE_GLOBAL (1<<6) 20 | 21 | #define _PAGE_VALID (1<<7) 22 | #define PAGE_VALID_BIT (7) 23 | 24 | #define _PAGE_DIRTY (1<<8) 25 | #define PAGE_DIRTY_BIT (8) 26 | 27 | #define _PAGE_CACHE (3<<9) 28 | #define _PAGE_UNCACHE (2<<9) 29 | #define _PAGE_SO _PAGE_UNCACHE 30 | 31 | #define _CACHE_MASK (7<<9) 32 | 33 | #define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE) 34 | #define _CACHE_UNCACHED (_PAGE_VALID | _PAGE_UNCACHE) 35 | 36 | #define HAVE_ARCH_UNMAPPED_AREA 37 | 38 | #endif /* __ASM_CSKY_PGTABLE_BITS_H */ 39 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/reg_ops.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ABI_REG_OPS_H 5 | #define __ABI_REG_OPS_H 6 | #include 7 | 8 | #define cprcr(reg) \ 9 | ({ \ 10 | unsigned int tmp; \ 11 | asm volatile("cprcr %0, "reg"\n":"=b"(tmp)); \ 12 | tmp; \ 13 | }) 14 | 15 | #define cpwcr(reg, val) \ 16 | ({ \ 17 | asm volatile("cpwcr %0, "reg"\n"::"b"(val)); \ 18 | }) 19 | 20 | static inline unsigned int mfcr_hint(void) 21 | { 22 | return mfcr("cr30"); 23 | } 24 | 25 | static inline unsigned int mfcr_ccr2(void) { return 0; } 26 | 27 | #endif /* __ABI_REG_OPS_H */ 28 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/regdef.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_REGDEF_H 5 | #define __ASM_CSKY_REGDEF_H 6 | 7 | #define syscallid r1 8 | #define regs_syscallid(regs) regs->regs[9] 9 | #define regs_fp(regs) regs->regs[2] 10 | 11 | /* 12 | * PSR format: 13 | * | 31 | 30-24 | 23-16 | 15 14 | 13-0 | 14 | * S CPID VEC TM 15 | * 16 | * S: Super Mode 17 | * CPID: Coprocessor id, only 15 for MMU 18 | * VEC: Exception Number 19 | * TM: Trace Mode 20 | */ 21 | #define DEFAULT_PSR_VALUE 0x8f000000 22 | 23 | #define SYSTRACE_SAVENUM 2 24 | 25 | #define TRAP0_SIZE 2 26 | 27 | #endif /* __ASM_CSKY_REGDEF_H */ 28 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/string.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ABI_CSKY_STRING_H 5 | #define __ABI_CSKY_STRING_H 6 | 7 | #define __HAVE_ARCH_MEMCMP 8 | extern int memcmp(const void *, const void *, __kernel_size_t); 9 | 10 | #define __HAVE_ARCH_MEMCPY 11 | extern void *memcpy(void *, const void *, __kernel_size_t); 12 | 13 | #define __HAVE_ARCH_MEMMOVE 14 | extern void *memmove(void *, const void *, __kernel_size_t); 15 | 16 | #define __HAVE_ARCH_MEMSET 17 | extern void *memset(void *, int, __kernel_size_t); 18 | 19 | #define __HAVE_ARCH_STRCPY 20 | extern char *strcpy(char *, const char *); 21 | 22 | #define __HAVE_ARCH_STRLEN 23 | extern __kernel_size_t strlen(const char *); 24 | 25 | #endif /* __ABI_CSKY_STRING_H */ 26 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/switch_context.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ABI_CSKY_PTRACE_H 5 | #define __ABI_CSKY_PTRACE_H 6 | 7 | struct switch_stack { 8 | unsigned long r8; 9 | unsigned long r9; 10 | unsigned long r10; 11 | unsigned long r11; 12 | unsigned long r12; 13 | unsigned long r13; 14 | unsigned long r14; 15 | unsigned long r15; 16 | }; 17 | #endif /* __ABI_CSKY_PTRACE_H */ 18 | -------------------------------------------------------------------------------- /arch/csky/abiv1/inc/abi/vdso.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #include 4 | 5 | static inline int setup_vdso_page(unsigned short *ptr) 6 | { 7 | int err = 0; 8 | 9 | /* movi r1, 127 */ 10 | err |= __put_user(0x67f1, ptr + 0); 11 | /* addi r1, (139 - 127) */ 12 | err |= __put_user(0x20b1, ptr + 1); 13 | /* trap 0 */ 14 | err |= __put_user(0x0008, ptr + 2); 15 | 16 | return err; 17 | } 18 | -------------------------------------------------------------------------------- /arch/csky/abiv1/mmap.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #define COLOUR_ALIGN(addr,pgoff) \ 13 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 14 | (((pgoff)<mm; 29 | struct vm_area_struct *vma; 30 | int do_align = 0; 31 | struct vm_unmapped_area_info info; 32 | 33 | /* 34 | * We only need to do colour alignment if either the I or D 35 | * caches alias. 36 | */ 37 | do_align = filp || (flags & MAP_SHARED); 38 | 39 | /* 40 | * We enforce the MAP_FIXED case. 41 | */ 42 | if (flags & MAP_FIXED) { 43 | if (flags & MAP_SHARED && 44 | (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 45 | return -EINVAL; 46 | return addr; 47 | } 48 | 49 | if (len > TASK_SIZE) 50 | return -ENOMEM; 51 | 52 | if (addr) { 53 | if (do_align) 54 | addr = COLOUR_ALIGN(addr, pgoff); 55 | else 56 | addr = PAGE_ALIGN(addr); 57 | 58 | vma = find_vma(mm, addr); 59 | if (TASK_SIZE - len >= addr && 60 | (!vma || addr + len <= vm_start_gap(vma))) 61 | return addr; 62 | } 63 | 64 | info.flags = 0; 65 | info.length = len; 66 | info.low_limit = mm->mmap_base; 67 | info.high_limit = TASK_SIZE; 68 | info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 69 | info.align_offset = pgoff << PAGE_SHIFT; 70 | return vm_unmapped_area(&info); 71 | } 72 | -------------------------------------------------------------------------------- /arch/csky/abiv1/strcpy.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 1991-2018 Free Software Foundation, Inc. 3 | 4 | #include "memcopy.h" 5 | 6 | #undef strcpy 7 | 8 | #ifndef STRCPY 9 | # define STRCPY strcpy 10 | #endif 11 | 12 | /* Copy SRC to DEST. */ 13 | char * 14 | STRCPY (char *dest, const char *src) 15 | { 16 | return memcpy (dest, src, strlen (src) + 1); 17 | } 18 | -------------------------------------------------------------------------------- /arch/csky/abiv1/strksyms.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | 6 | EXPORT_SYMBOL(memcpy); 7 | EXPORT_SYMBOL(memcmp); 8 | EXPORT_SYMBOL(memset); 9 | EXPORT_SYMBOL(memmove); 10 | EXPORT_SYMBOL(strcpy); 11 | EXPORT_SYMBOL(strlen); 12 | -------------------------------------------------------------------------------- /arch/csky/abiv1/strlen.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 1991-2018 Free Software Foundation, Inc. 3 | 4 | #include "memcopy.h" 5 | 6 | #undef strlen 7 | 8 | #ifndef STRLEN 9 | # define STRLEN strlen 10 | #endif 11 | 12 | /* Return the length of the null-terminated string STR. Scan for 13 | the null terminator quickly by testing four bytes at a time. */ 14 | size_t 15 | STRLEN (const char *str) 16 | { 17 | const char *char_ptr; 18 | const unsigned long int *longword_ptr; 19 | unsigned long int longword, himagic, lomagic; 20 | 21 | /* Handle the first few characters by reading one character at a time. 22 | Do this until CHAR_PTR is aligned on a longword boundary. */ 23 | for (char_ptr = str; ((unsigned long int) char_ptr 24 | & (sizeof (longword) - 1)) != 0; 25 | ++char_ptr) 26 | if (*char_ptr == '\0') 27 | return char_ptr - str; 28 | 29 | /* All these elucidatory comments refer to 4-byte longwords, 30 | but the theory applies equally well to 8-byte longwords. */ 31 | 32 | longword_ptr = (unsigned long int *) char_ptr; 33 | 34 | /* Bits 31, 24, 16, and 8 of this number are zero. Call these bits 35 | the "holes." Note that there is a hole just to the left of 36 | each byte, with an extra at the end: 37 | 38 | bits: 01111110 11111110 11111110 11111111 39 | bytes: AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD 40 | 41 | The 1-bits make sure that carries propagate to the next 0-bit. 42 | The 0-bits provide holes for carries to fall into. */ 43 | himagic = 0x80808080L; 44 | lomagic = 0x01010101L; 45 | if (sizeof (longword) > 4) 46 | { 47 | /* 64-bit version of the magic. */ 48 | /* Do the shift in two steps to avoid a warning if long has 32 bits. */ 49 | himagic = ((himagic << 16) << 16) | himagic; 50 | lomagic = ((lomagic << 16) << 16) | lomagic; 51 | } 52 | BUG_ON(sizeof (longword) > 8); 53 | 54 | /* Instead of the traditional loop which tests each character, 55 | we will test a longword at a time. The tricky part is testing 56 | if *any of the four* bytes in the longword in question are zero. */ 57 | for (;;) 58 | { 59 | longword = *longword_ptr++; 60 | 61 | if (((longword - lomagic) & ~longword & himagic) != 0) 62 | { 63 | /* Which of the bytes was the zero? If none of them were, it was 64 | a misfire; continue the search. */ 65 | 66 | const char *cp = (const char *) (longword_ptr - 1); 67 | 68 | if (cp[0] == 0) 69 | return cp - str; 70 | if (cp[1] == 0) 71 | return cp - str + 1; 72 | if (cp[2] == 0) 73 | return cp - str + 2; 74 | if (cp[3] == 0) 75 | return cp - str + 3; 76 | if (sizeof (longword) > 4) 77 | { 78 | if (cp[4] == 0) 79 | return cp - str + 4; 80 | if (cp[5] == 0) 81 | return cp - str + 5; 82 | if (cp[6] == 0) 83 | return cp - str + 6; 84 | if (cp[7] == 0) 85 | return cp - str + 7; 86 | } 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /arch/csky/abiv2/Makefile: -------------------------------------------------------------------------------- 1 | obj-y += cacheflush.o 2 | obj-$(CONFIG_CPU_HAS_FPU) += fpu.o 3 | obj-y += memcmp.o 4 | ifeq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), y) 5 | obj-y += memcpy.o 6 | obj-y += memmove.o 7 | obj-y += memset.o 8 | endif 9 | obj-y += strcmp.o 10 | obj-y += strcpy.o 11 | obj-y += strlen.o 12 | obj-y += strksyms.o 13 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o 14 | -------------------------------------------------------------------------------- /arch/csky/abiv2/cacheflush.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 11 | pte_t *pte) 12 | { 13 | unsigned long addr; 14 | struct page *page; 15 | 16 | flush_tlb_page(vma, address); 17 | 18 | if (!(vma->vm_flags & VM_EXEC)) 19 | return; 20 | 21 | if (!pfn_valid(pte_pfn(*pte))) 22 | return; 23 | 24 | page = pfn_to_page(pte_pfn(*pte)); 25 | if (page == ZERO_PAGE(0)) 26 | return; 27 | 28 | if (test_and_set_bit(PG_dcache_clean, &page->flags)) 29 | goto out; 30 | 31 | addr = (unsigned long) kmap_atomic(page); 32 | 33 | dcache_wb_range(addr, addr + PAGE_SIZE); 34 | 35 | kunmap_atomic((void *) addr); 36 | out: 37 | icache_inv_range(address, address + PAGE_SIZE); 38 | } 39 | 40 | void flush_icache_deferred(struct mm_struct *mm) 41 | { 42 | unsigned int cpu = smp_processor_id(); 43 | cpumask_t *mask = &mm->context.icache_stale_mask; 44 | 45 | if (cpumask_test_cpu(cpu, mask)) { 46 | cpumask_clear_cpu(cpu, mask); 47 | /* 48 | * Ensure the remote hart's writes are visible to this hart. 49 | * This pairs with a barrier in flush_icache_mm. 50 | */ 51 | smp_mb(); 52 | local_icache_inv_all(NULL); 53 | } 54 | } 55 | 56 | void flush_icache_mm_range(struct mm_struct *mm, 57 | unsigned long start, unsigned long end) 58 | { 59 | unsigned int cpu; 60 | cpumask_t others, *mask; 61 | 62 | preempt_disable(); 63 | 64 | #ifdef CONFIG_CPU_HAS_ICACHE_INS 65 | if (mm == current->mm) { 66 | icache_inv_range(start, end); 67 | preempt_enable(); 68 | return; 69 | } 70 | #endif 71 | 72 | /* Mark every hart's icache as needing a flush for this MM. */ 73 | mask = &mm->context.icache_stale_mask; 74 | cpumask_setall(mask); 75 | 76 | /* Flush this hart's I$ now, and mark it as flushed. */ 77 | cpu = smp_processor_id(); 78 | cpumask_clear_cpu(cpu, mask); 79 | local_icache_inv_all(NULL); 80 | 81 | /* 82 | * Flush the I$ of other harts concurrently executing, and mark them as 83 | * flushed. 84 | */ 85 | cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); 86 | 87 | if (mm != current->active_mm || !cpumask_empty(&others)) { 88 | on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); 89 | cpumask_clear(mask); 90 | } 91 | 92 | preempt_enable(); 93 | } 94 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/cacheflush.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ABI_CSKY_CACHEFLUSH_H 4 | #define __ABI_CSKY_CACHEFLUSH_H 5 | 6 | /* Keep includes the same across arches. */ 7 | #include 8 | 9 | /* 10 | * The cache doesn't need to be flushed when TLB entries change when 11 | * the cache is mapped to physical memory, not virtual memory 12 | */ 13 | #define flush_cache_all() do { } while (0) 14 | #define flush_cache_mm(mm) do { } while (0) 15 | #define flush_cache_dup_mm(mm) do { } while (0) 16 | #define flush_cache_range(vma, start, end) do { } while (0) 17 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 18 | 19 | #define PG_dcache_clean PG_arch_1 20 | 21 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 22 | static inline void flush_dcache_page(struct page *page) 23 | { 24 | if (test_bit(PG_dcache_clean, &page->flags)) 25 | clear_bit(PG_dcache_clean, &page->flags); 26 | } 27 | 28 | #define flush_dcache_mmap_lock(mapping) do { } while (0) 29 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) 30 | #define flush_icache_page(vma, page) do { } while (0) 31 | 32 | #define flush_icache_range(start, end) cache_wbinv_range(start, end) 33 | 34 | void flush_icache_mm_range(struct mm_struct *mm, 35 | unsigned long start, unsigned long end); 36 | void flush_icache_deferred(struct mm_struct *mm); 37 | 38 | #define flush_cache_vmap(start, end) do { } while (0) 39 | #define flush_cache_vunmap(start, end) do { } while (0) 40 | 41 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 42 | do { \ 43 | memcpy(dst, src, len); \ 44 | if (vma->vm_flags & VM_EXEC) { \ 45 | dcache_wb_range((unsigned long)dst, \ 46 | (unsigned long)dst + len); \ 47 | flush_icache_mm_range(current->mm, \ 48 | (unsigned long)dst, \ 49 | (unsigned long)dst + len); \ 50 | } \ 51 | } while (0) 52 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 53 | memcpy(dst, src, len) 54 | 55 | #endif /* __ABI_CSKY_CACHEFLUSH_H */ 56 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/ckmmu.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_CKMMUV2_H 5 | #define __ASM_CSKY_CKMMUV2_H 6 | 7 | #include 8 | #include 9 | 10 | static inline int read_mmu_index(void) 11 | { 12 | return mfcr("cr<0, 15>"); 13 | } 14 | 15 | static inline void write_mmu_index(int value) 16 | { 17 | mtcr("cr<0, 15>", value); 18 | } 19 | 20 | static inline int read_mmu_entrylo0(void) 21 | { 22 | return mfcr("cr<2, 15>"); 23 | } 24 | 25 | static inline int read_mmu_entrylo1(void) 26 | { 27 | return mfcr("cr<3, 15>"); 28 | } 29 | 30 | static inline void write_mmu_pagemask(int value) 31 | { 32 | mtcr("cr<6, 15>", value); 33 | } 34 | 35 | static inline int read_mmu_entryhi(void) 36 | { 37 | return mfcr("cr<4, 15>"); 38 | } 39 | 40 | static inline void write_mmu_entryhi(int value) 41 | { 42 | #ifdef CONFIG_CPU_HAS_TLBI 43 | sync_is(); 44 | #endif 45 | mtcr("cr<4, 15>", value); 46 | } 47 | 48 | static inline unsigned long read_mmu_msa0(void) 49 | { 50 | return mfcr("cr<30, 15>"); 51 | } 52 | 53 | static inline void write_mmu_msa0(unsigned long value) 54 | { 55 | mtcr("cr<30, 15>", value); 56 | } 57 | 58 | static inline unsigned long read_mmu_msa1(void) 59 | { 60 | return mfcr("cr<31, 15>"); 61 | } 62 | 63 | static inline void write_mmu_msa1(unsigned long value) 64 | { 65 | mtcr("cr<31, 15>", value); 66 | } 67 | 68 | /* 69 | * TLB operations. 70 | */ 71 | static inline void tlb_probe(void) 72 | { 73 | mtcr("cr<8, 15>", 0x80000000); 74 | } 75 | 76 | static inline void tlb_read(void) 77 | { 78 | mtcr("cr<8, 15>", 0x40000000); 79 | } 80 | 81 | static inline void tlb_invalid_all(void) 82 | { 83 | #ifdef CONFIG_CPU_HAS_TLBI 84 | sync_is(); 85 | asm volatile( 86 | "tlbi.alls \n" 87 | "sync.i \n" 88 | : 89 | : 90 | : "memory"); 91 | #else 92 | mtcr("cr<8, 15>", 0x04000000); 93 | #endif 94 | } 95 | 96 | static inline void local_tlb_invalid_all(void) 97 | { 98 | #ifdef CONFIG_CPU_HAS_TLBI 99 | sync_is(); 100 | asm volatile( 101 | "tlbi.all \n" 102 | "sync.i \n" 103 | : 104 | : 105 | : "memory"); 106 | #else 107 | tlb_invalid_all(); 108 | #endif 109 | } 110 | 111 | static inline void tlb_invalid_indexed(void) 112 | { 113 | mtcr("cr<8, 15>", 0x02000000); 114 | } 115 | 116 | #define NOP32 ".long 0x4820c400\n" 117 | 118 | static inline void setup_pgd(pgd_t *pgd, int asid) 119 | { 120 | #ifdef CONFIG_CPU_HAS_TLBI 121 | sync_is(); 122 | #else 123 | mb(); 124 | #endif 125 | asm volatile( 126 | #ifdef CONFIG_CPU_HAS_TLBI 127 | "mtcr %1, cr<28, 15> \n" 128 | #endif 129 | "mtcr %1, cr<29, 15> \n" 130 | "mtcr %0, cr< 4, 15> \n" 131 | ".rept 64 \n" 132 | NOP32 133 | ".endr \n" 134 | : 135 | :"r"(asid), "r"(__pa(pgd) | BIT(0)) 136 | :"memory"); 137 | } 138 | 139 | static inline pgd_t *get_pgd(void) 140 | { 141 | return __va(mfcr("cr<29, 15>") & ~BIT(0)); 142 | } 143 | #endif /* __ASM_CSKY_CKMMUV2_H */ 144 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/elf.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ABI_CSKY_ELF_H 4 | #define __ABI_CSKY_ELF_H 5 | 6 | /* The member sort in array pr_reg[x] is defined by GDB. */ 7 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ 8 | pr_reg[0] = regs->pc; \ 9 | pr_reg[1] = regs->a1; \ 10 | pr_reg[2] = regs->a0; \ 11 | pr_reg[3] = regs->sr; \ 12 | pr_reg[4] = regs->a2; \ 13 | pr_reg[5] = regs->a3; \ 14 | pr_reg[6] = regs->regs[0]; \ 15 | pr_reg[7] = regs->regs[1]; \ 16 | pr_reg[8] = regs->regs[2]; \ 17 | pr_reg[9] = regs->regs[3]; \ 18 | pr_reg[10] = regs->regs[4]; \ 19 | pr_reg[11] = regs->regs[5]; \ 20 | pr_reg[12] = regs->regs[6]; \ 21 | pr_reg[13] = regs->regs[7]; \ 22 | pr_reg[14] = regs->regs[8]; \ 23 | pr_reg[15] = regs->regs[9]; \ 24 | pr_reg[16] = regs->usp; \ 25 | pr_reg[17] = regs->lr; \ 26 | pr_reg[18] = regs->exregs[0]; \ 27 | pr_reg[19] = regs->exregs[1]; \ 28 | pr_reg[20] = regs->exregs[2]; \ 29 | pr_reg[21] = regs->exregs[3]; \ 30 | pr_reg[22] = regs->exregs[4]; \ 31 | pr_reg[23] = regs->exregs[5]; \ 32 | pr_reg[24] = regs->exregs[6]; \ 33 | pr_reg[25] = regs->exregs[7]; \ 34 | pr_reg[26] = regs->exregs[8]; \ 35 | pr_reg[27] = regs->exregs[9]; \ 36 | pr_reg[28] = regs->exregs[10]; \ 37 | pr_reg[29] = regs->exregs[11]; \ 38 | pr_reg[30] = regs->exregs[12]; \ 39 | pr_reg[31] = regs->exregs[13]; \ 40 | pr_reg[32] = regs->exregs[14]; \ 41 | pr_reg[33] = regs->tls; \ 42 | } while (0); 43 | #endif /* __ABI_CSKY_ELF_H */ 44 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/fpu.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_FPU_H 5 | #define __ASM_CSKY_FPU_H 6 | 7 | #include 8 | #include 9 | 10 | int fpu_libc_helper(struct pt_regs *regs); 11 | void fpu_fpe(struct pt_regs *regs); 12 | 13 | static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); } 14 | 15 | void save_to_user_fp(struct user_fp *user_fp); 16 | void restore_from_user_fp(struct user_fp *user_fp); 17 | 18 | /* 19 | * Define the fesr bit for fpe handle. 20 | */ 21 | #define FPE_ILLE (1 << 16) /* Illegal instruction */ 22 | #define FPE_FEC (1 << 7) /* Input float-point arithmetic exception */ 23 | #define FPE_IDC (1 << 5) /* Input denormalized exception */ 24 | #define FPE_IXC (1 << 4) /* Inexact exception */ 25 | #define FPE_UFC (1 << 3) /* Underflow exception */ 26 | #define FPE_OFC (1 << 2) /* Overflow exception */ 27 | #define FPE_DZC (1 << 1) /* Divide by zero exception */ 28 | #define FPE_IOC (1 << 0) /* Invalid operation exception */ 29 | #define FPE_REGULAR_EXCEPTION (FPE_IXC | FPE_UFC | FPE_OFC | FPE_DZC | FPE_IOC) 30 | 31 | #ifdef CONFIG_OPEN_FPU_IDE 32 | #define IDE_STAT (1 << 5) 33 | #else 34 | #define IDE_STAT 0 35 | #endif 36 | 37 | #ifdef CONFIG_OPEN_FPU_IXE 38 | #define IXE_STAT (1 << 4) 39 | #else 40 | #define IXE_STAT 0 41 | #endif 42 | 43 | #ifdef CONFIG_OPEN_FPU_UFE 44 | #define UFE_STAT (1 << 3) 45 | #else 46 | #define UFE_STAT 0 47 | #endif 48 | 49 | #ifdef CONFIG_OPEN_FPU_OFE 50 | #define OFE_STAT (1 << 2) 51 | #else 52 | #define OFE_STAT 0 53 | #endif 54 | 55 | #ifdef CONFIG_OPEN_FPU_DZE 56 | #define DZE_STAT (1 << 1) 57 | #else 58 | #define DZE_STAT 0 59 | #endif 60 | 61 | #ifdef CONFIG_OPEN_FPU_IOE 62 | #define IOE_STAT (1 << 0) 63 | #else 64 | #define IOE_STAT 0 65 | #endif 66 | 67 | #endif /* __ASM_CSKY_FPU_H */ 68 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/page.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | static inline void clear_user_page(void *addr, unsigned long vaddr, 5 | struct page *page) 6 | { 7 | clear_page(addr); 8 | } 9 | 10 | static inline void copy_user_page(void *to, void *from, unsigned long vaddr, 11 | struct page *page) 12 | { 13 | copy_page(to, from); 14 | } 15 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/pgtable-bits.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_PGTABLE_BITS_H 5 | #define __ASM_CSKY_PGTABLE_BITS_H 6 | 7 | /* implemented in software */ 8 | #define _PAGE_ACCESSED (1<<7) 9 | #define PAGE_ACCESSED_BIT (7) 10 | 11 | #define _PAGE_READ (1<<8) 12 | #define _PAGE_WRITE (1<<9) 13 | #define _PAGE_PRESENT (1<<10) 14 | 15 | #define _PAGE_MODIFIED (1<<11) 16 | #define PAGE_MODIFIED_BIT (11) 17 | 18 | /* implemented in hardware */ 19 | #define _PAGE_GLOBAL (1<<0) 20 | 21 | #define _PAGE_VALID (1<<1) 22 | #define PAGE_VALID_BIT (1) 23 | 24 | #define _PAGE_DIRTY (1<<2) 25 | #define PAGE_DIRTY_BIT (2) 26 | 27 | #define _PAGE_SO (1<<5) 28 | #define _PAGE_BUF (1<<6) 29 | 30 | #define _PAGE_CACHE (1<<3) 31 | 32 | #define _CACHE_MASK _PAGE_CACHE 33 | 34 | #define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE | _PAGE_BUF) 35 | #define _CACHE_UNCACHED (_PAGE_VALID) 36 | 37 | #endif /* __ASM_CSKY_PGTABLE_BITS_H */ 38 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/reg_ops.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ABI_REG_OPS_H 5 | #define __ABI_REG_OPS_H 6 | #include 7 | 8 | static inline unsigned int mfcr_hint(void) 9 | { 10 | return mfcr("cr31"); 11 | } 12 | 13 | static inline unsigned int mfcr_ccr2(void) 14 | { 15 | return mfcr("cr23"); 16 | } 17 | #endif /* __ABI_REG_OPS_H */ 18 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/regdef.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_REGDEF_H 5 | #define __ASM_CSKY_REGDEF_H 6 | 7 | #define syscallid r7 8 | #define regs_syscallid(regs) regs->regs[3] 9 | #define regs_fp(regs) regs->regs[4] 10 | 11 | /* 12 | * PSR format: 13 | * | 31 | 30-24 | 23-16 | 15 14 | 13-10 | 9 | 8-0 | 14 | * S VEC TM MM 15 | * 16 | * S: Super Mode 17 | * VEC: Exception Number 18 | * TM: Trace Mode 19 | * MM: Memory unaligned addr access 20 | */ 21 | #define DEFAULT_PSR_VALUE 0x80000200 22 | 23 | #define SYSTRACE_SAVENUM 5 24 | 25 | #define TRAP0_SIZE 4 26 | 27 | #endif /* __ASM_CSKY_REGDEF_H */ 28 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/string.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ABI_CSKY_STRING_H 4 | #define __ABI_CSKY_STRING_H 5 | 6 | #define __HAVE_ARCH_MEMCMP 7 | extern int memcmp(const void *, const void *, __kernel_size_t); 8 | 9 | #define __HAVE_ARCH_MEMCPY 10 | extern void *memcpy(void *, const void *, __kernel_size_t); 11 | 12 | #define __HAVE_ARCH_MEMMOVE 13 | extern void *memmove(void *, const void *, __kernel_size_t); 14 | 15 | #define __HAVE_ARCH_MEMSET 16 | extern void *memset(void *, int, __kernel_size_t); 17 | 18 | #define __HAVE_ARCH_STRCMP 19 | extern int strcmp(const char *, const char *); 20 | 21 | #define __HAVE_ARCH_STRCPY 22 | extern char *strcpy(char *, const char *); 23 | 24 | #define __HAVE_ARCH_STRLEN 25 | extern __kernel_size_t strlen(const char *); 26 | 27 | #endif /* __ABI_CSKY_STRING_H */ 28 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/switch_context.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ABI_CSKY_PTRACE_H 5 | #define __ABI_CSKY_PTRACE_H 6 | 7 | struct switch_stack { 8 | #ifdef CONFIG_CPU_HAS_HILO 9 | unsigned long rhi; 10 | unsigned long rlo; 11 | unsigned long cr14; 12 | unsigned long pad; 13 | #endif 14 | unsigned long r4; 15 | unsigned long r5; 16 | unsigned long r6; 17 | unsigned long r7; 18 | unsigned long r8; 19 | unsigned long r9; 20 | unsigned long r10; 21 | unsigned long r11; 22 | 23 | unsigned long r15; 24 | unsigned long r16; 25 | unsigned long r17; 26 | unsigned long r26; 27 | unsigned long r27; 28 | unsigned long r28; 29 | unsigned long r29; 30 | unsigned long r30; 31 | }; 32 | #endif /* __ABI_CSKY_PTRACE_H */ 33 | -------------------------------------------------------------------------------- /arch/csky/abiv2/inc/abi/vdso.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ABI_CSKY_VDSO_H 4 | #define __ABI_CSKY_VDSO_H 5 | 6 | #include 7 | 8 | static inline int setup_vdso_page(unsigned short *ptr) 9 | { 10 | int err = 0; 11 | 12 | /* movi r7, 173 */ 13 | err |= __put_user(0xea07, ptr); 14 | err |= __put_user(0x008b, ptr+1); 15 | 16 | /* trap 0 */ 17 | err |= __put_user(0xc000, ptr+2); 18 | err |= __put_user(0x2020, ptr+3); 19 | 20 | return err; 21 | } 22 | 23 | #endif /* __ABI_CSKY_STRING_H */ 24 | -------------------------------------------------------------------------------- /arch/csky/abiv2/memcmp.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include "sysdep.h" 6 | 7 | ENTRY(memcmp) 8 | /* Test if len less than 4 bytes. */ 9 | mov r3, r0 10 | movi r0, 0 11 | mov r12, r4 12 | cmplti r2, 4 13 | bt .L_compare_by_byte 14 | 15 | andi r13, r0, 3 16 | movi r19, 4 17 | 18 | /* Test if s1 is not 4 bytes aligned. */ 19 | bnez r13, .L_s1_not_aligned 20 | 21 | LABLE_ALIGN 22 | .L_s1_aligned: 23 | /* If dest is aligned, then copy. */ 24 | zext r18, r2, 31, 4 25 | /* Test if len less than 16 bytes. */ 26 | bez r18, .L_compare_by_word 27 | 28 | .L_compare_by_4word: 29 | /* If aligned, load word each time. */ 30 | ldw r20, (r3, 0) 31 | ldw r21, (r1, 0) 32 | /* If s1[i] != s2[i], goto .L_byte_check. */ 33 | cmpne r20, r21 34 | bt .L_byte_check 35 | 36 | ldw r20, (r3, 4) 37 | ldw r21, (r1, 4) 38 | cmpne r20, r21 39 | bt .L_byte_check 40 | 41 | ldw r20, (r3, 8) 42 | ldw r21, (r1, 8) 43 | cmpne r20, r21 44 | bt .L_byte_check 45 | 46 | ldw r20, (r3, 12) 47 | ldw r21, (r1, 12) 48 | cmpne r20, r21 49 | bt .L_byte_check 50 | 51 | PRE_BNEZAD (r18) 52 | addi a3, 16 53 | addi a1, 16 54 | 55 | BNEZAD (r18, .L_compare_by_4word) 56 | 57 | .L_compare_by_word: 58 | zext r18, r2, 3, 2 59 | bez r18, .L_compare_by_byte 60 | .L_compare_by_word_loop: 61 | ldw r20, (r3, 0) 62 | ldw r21, (r1, 0) 63 | addi r3, 4 64 | PRE_BNEZAD (r18) 65 | cmpne r20, r21 66 | addi r1, 4 67 | bt .L_byte_check 68 | BNEZAD (r18, .L_compare_by_word_loop) 69 | 70 | .L_compare_by_byte: 71 | zext r18, r2, 1, 0 72 | bez r18, .L_return 73 | .L_compare_by_byte_loop: 74 | ldb r0, (r3, 0) 75 | ldb r4, (r1, 0) 76 | addi r3, 1 77 | subu r0, r4 78 | PRE_BNEZAD (r18) 79 | addi r1, 1 80 | bnez r0, .L_return 81 | BNEZAD (r18, .L_compare_by_byte_loop) 82 | 83 | .L_return: 84 | mov r4, r12 85 | rts 86 | 87 | # ifdef __CSKYBE__ 88 | /* d[i] != s[i] in word, so we check byte 0. */ 89 | .L_byte_check: 90 | xtrb0 r0, r20 91 | xtrb0 r2, r21 92 | subu r0, r2 93 | bnez r0, .L_return 94 | 95 | /* check byte 1 */ 96 | xtrb1 r0, r20 97 | xtrb1 r2, r21 98 | subu r0, r2 99 | bnez r0, .L_return 100 | 101 | /* check byte 2 */ 102 | xtrb2 r0, r20 103 | xtrb2 r2, r21 104 | subu r0, r2 105 | bnez r0, .L_return 106 | 107 | /* check byte 3 */ 108 | xtrb3 r0, r20 109 | xtrb3 r2, r21 110 | subu r0, r2 111 | # else 112 | /* s1[i] != s2[i] in word, so we check byte 3. */ 113 | .L_byte_check: 114 | xtrb3 r0, r20 115 | xtrb3 r2, r21 116 | subu r0, r2 117 | bnez r0, .L_return 118 | 119 | /* check byte 2 */ 120 | xtrb2 r0, r20 121 | xtrb2 r2, r21 122 | subu r0, r2 123 | bnez r0, .L_return 124 | 125 | /* check byte 1 */ 126 | xtrb1 r0, r20 127 | xtrb1 r2, r21 128 | subu r0, r2 129 | bnez r0, .L_return 130 | 131 | /* check byte 0 */ 132 | xtrb0 r0, r20 133 | xtrb0 r2, r21 134 | subu r0, r2 135 | br .L_return 136 | # endif /* !__CSKYBE__ */ 137 | 138 | /* Compare when s1 is not aligned. */ 139 | .L_s1_not_aligned: 140 | sub r13, r19, r13 141 | sub r2, r13 142 | .L_s1_not_aligned_loop: 143 | ldb r0, (r3, 0) 144 | ldb r4, (r1, 0) 145 | addi r3, 1 146 | subu r0, r4 147 | PRE_BNEZAD (r13) 148 | addi r1, 1 149 | bnez r0, .L_return 150 | BNEZAD (r13, .L_s1_not_aligned_loop) 151 | br .L_s1_aligned 152 | ENDPROC(memcmp) 153 | -------------------------------------------------------------------------------- /arch/csky/abiv2/memcpy.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include "sysdep.h" 6 | 7 | ENTRY(__memcpy) 8 | ENTRY(memcpy) 9 | /* Test if len less than 4 bytes. */ 10 | mov r12, r0 11 | cmplti r2, 4 12 | bt .L_copy_by_byte 13 | 14 | andi r13, r0, 3 15 | movi r19, 4 16 | /* Test if dest is not 4 bytes aligned. */ 17 | bnez r13, .L_dest_not_aligned 18 | 19 | /* Hardware can handle unaligned access directly. */ 20 | .L_dest_aligned: 21 | /* If dest is aligned, then copy. */ 22 | zext r18, r2, 31, 4 23 | 24 | /* Test if len less than 16 bytes. */ 25 | bez r18, .L_len_less_16bytes 26 | movi r19, 0 27 | 28 | LABLE_ALIGN 29 | .L_len_larger_16bytes: 30 | #if defined(__CK860__) 31 | ldw r3, (r1, 0) 32 | stw r3, (r0, 0) 33 | ldw r3, (r1, 4) 34 | stw r3, (r0, 4) 35 | ldw r3, (r1, 8) 36 | stw r3, (r0, 8) 37 | ldw r3, (r1, 12) 38 | addi r1, 16 39 | stw r3, (r0, 12) 40 | addi r0, 16 41 | #else 42 | ldw r20, (r1, 0) 43 | ldw r21, (r1, 4) 44 | ldw r22, (r1, 8) 45 | ldw r23, (r1, 12) 46 | stw r20, (r0, 0) 47 | stw r21, (r0, 4) 48 | stw r22, (r0, 8) 49 | stw r23, (r0, 12) 50 | PRE_BNEZAD (r18) 51 | addi r1, 16 52 | addi r0, 16 53 | #endif 54 | BNEZAD (r18, .L_len_larger_16bytes) 55 | 56 | .L_len_less_16bytes: 57 | zext r18, r2, 3, 2 58 | bez r18, .L_copy_by_byte 59 | .L_len_less_16bytes_loop: 60 | ldw r3, (r1, 0) 61 | PRE_BNEZAD (r18) 62 | addi r1, 4 63 | stw r3, (r0, 0) 64 | addi r0, 4 65 | BNEZAD (r18, .L_len_less_16bytes_loop) 66 | 67 | /* Test if len less than 4 bytes. */ 68 | .L_copy_by_byte: 69 | zext r18, r2, 1, 0 70 | bez r18, .L_return 71 | .L_copy_by_byte_loop: 72 | ldb r3, (r1, 0) 73 | PRE_BNEZAD (r18) 74 | addi r1, 1 75 | stb r3, (r0, 0) 76 | addi r0, 1 77 | BNEZAD (r18, .L_copy_by_byte_loop) 78 | 79 | .L_return: 80 | mov r0, r12 81 | rts 82 | 83 | /* 84 | * If dest is not aligned, just copying some bytes makes the 85 | * dest align. 86 | */ 87 | .L_dest_not_aligned: 88 | sub r13, r19, r13 89 | sub r2, r13 90 | 91 | /* Makes the dest align. */ 92 | .L_dest_not_aligned_loop: 93 | ldb r3, (r1, 0) 94 | PRE_BNEZAD (r13) 95 | addi r1, 1 96 | stb r3, (r0, 0) 97 | addi r0, 1 98 | BNEZAD (r13, .L_dest_not_aligned_loop) 99 | cmplti r2, 4 100 | bt .L_copy_by_byte 101 | 102 | /* Check whether the src is aligned. */ 103 | jbr .L_dest_aligned 104 | ENDPROC(__memcpy) 105 | -------------------------------------------------------------------------------- /arch/csky/abiv2/memmove.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include "sysdep.h" 6 | 7 | .weak memmove 8 | ENTRY(__memmove) 9 | ENTRY(memmove) 10 | subu r3, r0, r1 11 | cmphs r3, r2 12 | bt memcpy 13 | 14 | mov r12, r0 15 | addu r0, r0, r2 16 | addu r1, r1, r2 17 | 18 | /* Test if len less than 4 bytes. */ 19 | cmplti r2, 4 20 | bt .L_copy_by_byte 21 | 22 | andi r13, r0, 3 23 | /* Test if dest is not 4 bytes aligned. */ 24 | bnez r13, .L_dest_not_aligned 25 | /* Hardware can handle unaligned access directly. */ 26 | .L_dest_aligned: 27 | /* If dest is aligned, then copy. */ 28 | zext r18, r2, 31, 4 29 | /* Test if len less than 16 bytes. */ 30 | bez r18, .L_len_less_16bytes 31 | movi r19, 0 32 | 33 | /* len > 16 bytes */ 34 | LABLE_ALIGN 35 | .L_len_larger_16bytes: 36 | subi r1, 16 37 | subi r0, 16 38 | #if defined(__CK860__) 39 | ldw r3, (r1, 12) 40 | stw r3, (r0, 12) 41 | ldw r3, (r1, 8) 42 | stw r3, (r0, 8) 43 | ldw r3, (r1, 4) 44 | stw r3, (r0, 4) 45 | ldw r3, (r1, 0) 46 | stw r3, (r0, 0) 47 | #else 48 | ldw r20, (r1, 0) 49 | ldw r21, (r1, 4) 50 | ldw r22, (r1, 8) 51 | ldw r23, (r1, 12) 52 | stw r20, (r0, 0) 53 | stw r21, (r0, 4) 54 | stw r22, (r0, 8) 55 | stw r23, (r0, 12) 56 | PRE_BNEZAD (r18) 57 | #endif 58 | BNEZAD (r18, .L_len_larger_16bytes) 59 | 60 | .L_len_less_16bytes: 61 | zext r18, r2, 3, 2 62 | bez r18, .L_copy_by_byte 63 | .L_len_less_16bytes_loop: 64 | subi r1, 4 65 | subi r0, 4 66 | ldw r3, (r1, 0) 67 | PRE_BNEZAD (r18) 68 | stw r3, (r0, 0) 69 | BNEZAD (r18, .L_len_less_16bytes_loop) 70 | 71 | /* Test if len less than 4 bytes. */ 72 | .L_copy_by_byte: 73 | zext r18, r2, 1, 0 74 | bez r18, .L_return 75 | .L_copy_by_byte_loop: 76 | subi r1, 1 77 | subi r0, 1 78 | ldb r3, (r1, 0) 79 | PRE_BNEZAD (r18) 80 | stb r3, (r0, 0) 81 | BNEZAD (r18, .L_copy_by_byte_loop) 82 | 83 | .L_return: 84 | mov r0, r12 85 | rts 86 | 87 | /* If dest is not aligned, just copy some bytes makes the dest 88 | align. */ 89 | .L_dest_not_aligned: 90 | sub r2, r13 91 | .L_dest_not_aligned_loop: 92 | subi r1, 1 93 | subi r0, 1 94 | /* Makes the dest align. */ 95 | ldb r3, (r1, 0) 96 | PRE_BNEZAD (r13) 97 | stb r3, (r0, 0) 98 | BNEZAD (r13, .L_dest_not_aligned_loop) 99 | cmplti r2, 4 100 | bt .L_copy_by_byte 101 | /* Check whether the src is aligned. */ 102 | jbr .L_dest_aligned 103 | ENDPROC(memmove) 104 | ENDPROC(__memmove) 105 | -------------------------------------------------------------------------------- /arch/csky/abiv2/memset.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include "sysdep.h" 6 | 7 | .weak memset 8 | ENTRY(__memset) 9 | ENTRY(memset) 10 | /* Test if len less than 4 bytes. */ 11 | mov r12, r0 12 | cmplti r2, 8 13 | bt .L_set_by_byte 14 | 15 | andi r13, r0, 3 16 | movi r19, 4 17 | /* Test if dest is not 4 bytes aligned. */ 18 | bnez r13, .L_dest_not_aligned 19 | /* Hardware can handle unaligned access directly. */ 20 | .L_dest_aligned: 21 | zextb r3, r1 22 | lsli r1, 8 23 | or r1, r3 24 | lsli r3, r1, 16 25 | or r3, r1 26 | 27 | /* If dest is aligned, then copy. */ 28 | zext r18, r2, 31, 4 29 | /* Test if len less than 16 bytes. */ 30 | bez r18, .L_len_less_16bytes 31 | 32 | LABLE_ALIGN 33 | .L_len_larger_16bytes: 34 | stw r3, (r0, 0) 35 | stw r3, (r0, 4) 36 | stw r3, (r0, 8) 37 | stw r3, (r0, 12) 38 | PRE_BNEZAD (r18) 39 | addi r0, 16 40 | BNEZAD (r18, .L_len_larger_16bytes) 41 | 42 | .L_len_less_16bytes: 43 | zext r18, r2, 3, 2 44 | andi r2, 3 45 | bez r18, .L_set_by_byte 46 | .L_len_less_16bytes_loop: 47 | stw r3, (r0, 0) 48 | PRE_BNEZAD (r18) 49 | addi r0, 4 50 | BNEZAD (r18, .L_len_less_16bytes_loop) 51 | 52 | /* Test if len less than 4 bytes. */ 53 | .L_set_by_byte: 54 | zext r18, r2, 2, 0 55 | bez r18, .L_return 56 | .L_set_by_byte_loop: 57 | stb r1, (r0, 0) 58 | PRE_BNEZAD (r18) 59 | addi r0, 1 60 | BNEZAD (r18, .L_set_by_byte_loop) 61 | 62 | .L_return: 63 | mov r0, r12 64 | rts 65 | 66 | /* If dest is not aligned, just set some bytes makes the dest 67 | align. */ 68 | 69 | .L_dest_not_aligned: 70 | sub r13, r19, r13 71 | sub r2, r13 72 | .L_dest_not_aligned_loop: 73 | /* Makes the dest align. */ 74 | stb r1, (r0, 0) 75 | PRE_BNEZAD (r13) 76 | addi r0, 1 77 | BNEZAD (r13, .L_dest_not_aligned_loop) 78 | cmplti r2, 8 79 | bt .L_set_by_byte 80 | /* Check whether the src is aligned. */ 81 | jbr .L_dest_aligned 82 | ENDPROC(memset) 83 | ENDPROC(__memset) 84 | -------------------------------------------------------------------------------- /arch/csky/abiv2/strcmp.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include "sysdep.h" 6 | 7 | ENTRY(strcmp) 8 | mov a3, a0 9 | /* Check if the s1 addr is aligned. */ 10 | xor a2, a3, a1 11 | andi a2, 0x3 12 | bnez a2, 7f 13 | andi t1, a0, 0x3 14 | bnez t1, 5f 15 | 16 | 1: 17 | /* If aligned, load word each time. */ 18 | ldw t0, (a3, 0) 19 | ldw t1, (a1, 0) 20 | /* If s1[i] != s2[i], goto 2f. */ 21 | cmpne t0, t1 22 | bt 2f 23 | /* If s1[i] == s2[i], check if s1 or s2 is at the end. */ 24 | tstnbz t0 25 | /* If at the end, goto 3f (finish comparing). */ 26 | bf 3f 27 | 28 | ldw t0, (a3, 4) 29 | ldw t1, (a1, 4) 30 | cmpne t0, t1 31 | bt 2f 32 | tstnbz t0 33 | bf 3f 34 | 35 | ldw t0, (a3, 8) 36 | ldw t1, (a1, 8) 37 | cmpne t0, t1 38 | bt 2f 39 | tstnbz t0 40 | bf 3f 41 | 42 | ldw t0, (a3, 12) 43 | ldw t1, (a1, 12) 44 | cmpne t0, t1 45 | bt 2f 46 | tstnbz t0 47 | bf 3f 48 | 49 | ldw t0, (a3, 16) 50 | ldw t1, (a1, 16) 51 | cmpne t0, t1 52 | bt 2f 53 | tstnbz t0 54 | bf 3f 55 | 56 | ldw t0, (a3, 20) 57 | ldw t1, (a1, 20) 58 | cmpne t0, t1 59 | bt 2f 60 | tstnbz t0 61 | bf 3f 62 | 63 | ldw t0, (a3, 24) 64 | ldw t1, (a1, 24) 65 | cmpne t0, t1 66 | bt 2f 67 | tstnbz t0 68 | bf 3f 69 | 70 | ldw t0, (a3, 28) 71 | ldw t1, (a1, 28) 72 | cmpne t0, t1 73 | bt 2f 74 | tstnbz t0 75 | bf 3f 76 | 77 | addi a3, 32 78 | addi a1, 32 79 | 80 | br 1b 81 | 82 | # ifdef __CSKYBE__ 83 | /* d[i] != s[i] in word, so we check byte 0. */ 84 | 2: 85 | xtrb0 a0, t0 86 | xtrb0 a2, t1 87 | subu a0, a2 88 | bez a2, 4f 89 | bnez a0, 4f 90 | 91 | /* check byte 1 */ 92 | xtrb1 a0, t0 93 | xtrb1 a2, t1 94 | subu a0, a2 95 | bez a2, 4f 96 | bnez a0, 4f 97 | 98 | /* check byte 2 */ 99 | xtrb2 a0, t0 100 | xtrb2 a2, t1 101 | subu a0, a2 102 | bez a2, 4f 103 | bnez a0, 4f 104 | 105 | /* check byte 3 */ 106 | xtrb3 a0, t0 107 | xtrb3 a2, t1 108 | subu a0, a2 109 | # else 110 | /* s1[i] != s2[i] in word, so we check byte 3. */ 111 | 2: 112 | xtrb3 a0, t0 113 | xtrb3 a2, t1 114 | subu a0, a2 115 | bez a2, 4f 116 | bnez a0, 4f 117 | 118 | /* check byte 2 */ 119 | xtrb2 a0, t0 120 | xtrb2 a2, t1 121 | subu a0, a2 122 | bez a2, 4f 123 | bnez a0, 4f 124 | 125 | /* check byte 1 */ 126 | xtrb1 a0, t0 127 | xtrb1 a2, t1 128 | subu a0, a2 129 | bez a2, 4f 130 | bnez a0, 4f 131 | 132 | /* check byte 0 */ 133 | xtrb0 a0, t0 134 | xtrb0 a2, t1 135 | subu a0, a2 136 | 137 | # endif /* !__CSKYBE__ */ 138 | jmp lr 139 | 3: 140 | movi a0, 0 141 | 4: 142 | jmp lr 143 | 144 | /* Compare when s1 or s2 is not aligned. */ 145 | 5: 146 | subi t1, 4 147 | 6: 148 | ldb a0, (a3, 0) 149 | ldb a2, (a1, 0) 150 | subu a0, a2 151 | bez a2, 4b 152 | bnez a0, 4b 153 | addi t1, 1 154 | addi a1, 1 155 | addi a3, 1 156 | bnez t1, 6b 157 | br 1b 158 | 159 | 7: 160 | ldb a0, (a3, 0) 161 | addi a3, 1 162 | ldb a2, (a1, 0) 163 | addi a1, 1 164 | subu a0, a2 165 | bnez a0, 4b 166 | bnez a2, 7b 167 | jmp r15 168 | ENDPROC(strcmp) 169 | -------------------------------------------------------------------------------- /arch/csky/abiv2/strcpy.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include "sysdep.h" 6 | 7 | ENTRY(strcpy) 8 | mov a3, a0 9 | /* Check if the src addr is aligned. */ 10 | andi t0, a1, 3 11 | bnez t0, 11f 12 | 1: 13 | /* Check if all the bytes in the word are not zero. */ 14 | ldw a2, (a1) 15 | tstnbz a2 16 | bf 9f 17 | stw a2, (a3) 18 | 19 | ldw a2, (a1, 4) 20 | tstnbz a2 21 | bf 2f 22 | stw a2, (a3, 4) 23 | 24 | ldw a2, (a1, 8) 25 | tstnbz a2 26 | bf 3f 27 | stw a2, (a3, 8) 28 | 29 | ldw a2, (a1, 12) 30 | tstnbz a2 31 | bf 4f 32 | stw a2, (a3, 12) 33 | 34 | ldw a2, (a1, 16) 35 | tstnbz a2 36 | bf 5f 37 | stw a2, (a3, 16) 38 | 39 | ldw a2, (a1, 20) 40 | tstnbz a2 41 | bf 6f 42 | stw a2, (a3, 20) 43 | 44 | ldw a2, (a1, 24) 45 | tstnbz a2 46 | bf 7f 47 | stw a2, (a3, 24) 48 | 49 | ldw a2, (a1, 28) 50 | tstnbz a2 51 | bf 8f 52 | stw a2, (a3, 28) 53 | 54 | addi a3, 32 55 | addi a1, 32 56 | br 1b 57 | 58 | 59 | 2: 60 | addi a3, 4 61 | br 9f 62 | 63 | 3: 64 | addi a3, 8 65 | br 9f 66 | 67 | 4: 68 | addi a3, 12 69 | br 9f 70 | 71 | 5: 72 | addi a3, 16 73 | br 9f 74 | 75 | 6: 76 | addi a3, 20 77 | br 9f 78 | 79 | 7: 80 | addi a3, 24 81 | br 9f 82 | 83 | 8: 84 | addi a3, 28 85 | 9: 86 | # ifdef __CSKYBE__ 87 | xtrb0 t0, a2 88 | st.b t0, (a3) 89 | bez t0, 10f 90 | xtrb1 t0, a2 91 | st.b t0, (a3, 1) 92 | bez t0, 10f 93 | xtrb2 t0, a2 94 | st.b t0, (a3, 2) 95 | bez t0, 10f 96 | stw a2, (a3) 97 | # else 98 | xtrb3 t0, a2 99 | st.b t0, (a3) 100 | bez t0, 10f 101 | xtrb2 t0, a2 102 | st.b t0, (a3, 1) 103 | bez t0, 10f 104 | xtrb1 t0, a2 105 | st.b t0, (a3, 2) 106 | bez t0, 10f 107 | stw a2, (a3) 108 | # endif /* !__CSKYBE__ */ 109 | 10: 110 | jmp lr 111 | 112 | 11: 113 | subi t0, 4 114 | 12: 115 | ld.b a2, (a1) 116 | st.b a2, (a3) 117 | bez a2, 10b 118 | addi t0, 1 119 | addi a1, a1, 1 120 | addi a3, a3, 1 121 | bnez t0, 12b 122 | jbr 1b 123 | ENDPROC(strcpy) 124 | -------------------------------------------------------------------------------- /arch/csky/abiv2/strksyms.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | 6 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 7 | EXPORT_SYMBOL(memcpy); 8 | EXPORT_SYMBOL(memset); 9 | EXPORT_SYMBOL(memmove); 10 | #endif 11 | EXPORT_SYMBOL(memcmp); 12 | EXPORT_SYMBOL(strcmp); 13 | EXPORT_SYMBOL(strcpy); 14 | EXPORT_SYMBOL(strlen); 15 | -------------------------------------------------------------------------------- /arch/csky/abiv2/strlen.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include "sysdep.h" 6 | 7 | ENTRY(strlen) 8 | /* Check if the start addr is aligned. */ 9 | mov r3, r0 10 | andi r1, r0, 3 11 | movi r2, 4 12 | movi r0, 0 13 | bnez r1, .L_start_not_aligned 14 | 15 | LABLE_ALIGN 16 | .L_start_addr_aligned: 17 | /* Check if all the bytes in the word are not zero. */ 18 | ldw r1, (r3) 19 | tstnbz r1 20 | bf .L_string_tail 21 | 22 | ldw r1, (r3, 4) 23 | addi r0, 4 24 | tstnbz r1 25 | bf .L_string_tail 26 | 27 | ldw r1, (r3, 8) 28 | addi r0, 4 29 | tstnbz r1 30 | bf .L_string_tail 31 | 32 | ldw r1, (r3, 12) 33 | addi r0, 4 34 | tstnbz r1 35 | bf .L_string_tail 36 | 37 | ldw r1, (r3, 16) 38 | addi r0, 4 39 | tstnbz r1 40 | bf .L_string_tail 41 | 42 | ldw r1, (r3, 20) 43 | addi r0, 4 44 | tstnbz r1 45 | bf .L_string_tail 46 | 47 | ldw r1, (r3, 24) 48 | addi r0, 4 49 | tstnbz r1 50 | bf .L_string_tail 51 | 52 | ldw r1, (r3, 28) 53 | addi r0, 4 54 | tstnbz r1 55 | bf .L_string_tail 56 | 57 | addi r0, 4 58 | addi r3, 32 59 | br .L_start_addr_aligned 60 | 61 | .L_string_tail: 62 | # ifdef __CSKYBE__ 63 | xtrb0 r3, r1 64 | bez r3, .L_return 65 | addi r0, 1 66 | xtrb1 r3, r1 67 | bez r3, .L_return 68 | addi r0, 1 69 | xtrb2 r3, r1 70 | bez r3, .L_return 71 | addi r0, 1 72 | # else 73 | xtrb3 r3, r1 74 | bez r3, .L_return 75 | addi r0, 1 76 | xtrb2 r3, r1 77 | bez r3, .L_return 78 | addi r0, 1 79 | xtrb1 r3, r1 80 | bez r3, .L_return 81 | addi r0, 1 82 | # endif /* !__CSKYBE__ */ 83 | 84 | .L_return: 85 | rts 86 | 87 | .L_start_not_aligned: 88 | sub r2, r2, r1 89 | .L_start_not_aligned_loop: 90 | ldb r1, (r3) 91 | PRE_BNEZAD (r2) 92 | addi r3, 1 93 | bez r1, .L_return 94 | addi r0, 1 95 | BNEZAD (r2, .L_start_not_aligned_loop) 96 | br .L_start_addr_aligned 97 | ENDPROC(strlen) 98 | -------------------------------------------------------------------------------- /arch/csky/abiv2/sysdep.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __SYSDEP_H 5 | #define __SYSDEP_H 6 | 7 | #ifdef __ASSEMBLER__ 8 | 9 | #if defined(__CK860__) 10 | #define LABLE_ALIGN \ 11 | .balignw 16, 0x6c03 12 | 13 | #define PRE_BNEZAD(R) 14 | 15 | #define BNEZAD(R, L) \ 16 | bnezad R, L 17 | #else 18 | #define LABLE_ALIGN \ 19 | .balignw 8, 0x6c03 20 | 21 | #define PRE_BNEZAD(R) \ 22 | subi R, 1 23 | 24 | #define BNEZAD(R, L) \ 25 | bnez R, L 26 | #endif 27 | 28 | #endif 29 | 30 | #endif 31 | -------------------------------------------------------------------------------- /arch/csky/boot/Makefile: -------------------------------------------------------------------------------- 1 | targets := Image zImage uImage 2 | targets += $(dtb-y) 3 | 4 | $(obj)/Image: vmlinux FORCE 5 | $(call if_changed,objcopy) 6 | @echo ' Kernel: $@ is ready' 7 | 8 | compress-$(CONFIG_KERNEL_GZIP) = gzip 9 | compress-$(CONFIG_KERNEL_LZO) = lzo 10 | compress-$(CONFIG_KERNEL_LZMA) = lzma 11 | compress-$(CONFIG_KERNEL_XZ) = xzkern 12 | compress-$(CONFIG_KERNEL_LZ4) = lz4 13 | 14 | $(obj)/zImage: $(obj)/Image FORCE 15 | $(call if_changed,$(compress-y)) 16 | @echo ' Kernel: $@ is ready' 17 | 18 | UIMAGE_ARCH = sandbox 19 | UIMAGE_COMPRESSION = $(compress-y) 20 | UIMAGE_LOADADDR = $(shell $(NM) vmlinux | awk '$$NF == "_start" {print $$1}') 21 | 22 | $(obj)/uImage: $(obj)/zImage 23 | $(call if_changed,uimage) 24 | @echo 'Image: $@ is ready' 25 | -------------------------------------------------------------------------------- /arch/csky/boot/dts/Makefile: -------------------------------------------------------------------------------- 1 | dtstree := $(srctree)/$(src) 2 | 3 | ifneq '$(CONFIG_CSKY_BUILTIN_DTB)' '""' 4 | builtindtb-y := $(patsubst "%",%,$(CONFIG_CSKY_BUILTIN_DTB)) 5 | dtb-y += $(builtindtb-y).dtb 6 | obj-y += $(builtindtb-y).dtb.o 7 | .SECONDARY: $(obj)/$(builtindtb-y).dtb.S 8 | else 9 | dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) 10 | endif 11 | 12 | always += $(dtb-y) 13 | clean-files += *.dtb *.dtb.S 14 | -------------------------------------------------------------------------------- /arch/csky/boot/dts/include/dt-bindings: -------------------------------------------------------------------------------- 1 | ../../../../../include/dt-bindings -------------------------------------------------------------------------------- /arch/csky/configs/defconfig: -------------------------------------------------------------------------------- 1 | # CONFIG_LOCALVERSION_AUTO is not set 2 | CONFIG_DEFAULT_HOSTNAME="csky" 3 | CONFIG_KPROBES=y 4 | CONFIG_STACKTRACE_SUPPORT=y 5 | CONFIG_FUNCTION_TRACER=y 6 | CONFIG_FTRACE_SYSCALLS=y 7 | CONFIG_UPROBE_EVENT=y 8 | # CONFIG_SWAP is not set 9 | CONFIG_SYSVIPC=y 10 | CONFIG_POSIX_MQUEUE=y 11 | CONFIG_AUDIT=y 12 | CONFIG_NO_HZ_IDLE=y 13 | CONFIG_HIGH_RES_TIMERS=y 14 | CONFIG_BSD_PROCESS_ACCT=y 15 | CONFIG_BSD_PROCESS_ACCT_V3=y 16 | CONFIG_MODULES=y 17 | CONFIG_MODULE_UNLOAD=y 18 | CONFIG_NET=y 19 | CONFIG_PACKET=y 20 | CONFIG_UNIX=y 21 | CONFIG_INET=y 22 | CONFIG_DEVTMPFS=y 23 | CONFIG_DEVTMPFS_MOUNT=y 24 | CONFIG_BLK_DEV_LOOP=y 25 | CONFIG_BLK_DEV_RAM=y 26 | CONFIG_BLK_DEV_RAM_SIZE=65536 27 | CONFIG_VT_HW_CONSOLE_BINDING=y 28 | CONFIG_SERIAL_NONSTANDARD=y 29 | CONFIG_SERIAL_8250=y 30 | CONFIG_SERIAL_8250_CONSOLE=y 31 | CONFIG_SERIAL_OF_PLATFORM=y 32 | # CONFIG_VGA_CONSOLE is not set 33 | CONFIG_PM_DEVFREQ=y 34 | CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y 35 | CONFIG_DEVFREQ_GOV_PERFORMANCE=y 36 | CONFIG_DEVFREQ_GOV_POWERSAVE=y 37 | CONFIG_DEVFREQ_GOV_USERSPACE=y 38 | CONFIG_GENERIC_PHY=y 39 | CONFIG_EXT4_FS=y 40 | CONFIG_FANOTIFY=y 41 | CONFIG_QUOTA=y 42 | CONFIG_FSCACHE=m 43 | CONFIG_FSCACHE_STATS=y 44 | CONFIG_CACHEFILES=m 45 | CONFIG_MSDOS_FS=y 46 | CONFIG_VFAT_FS=y 47 | CONFIG_FAT_DEFAULT_UTF8=y 48 | CONFIG_NTFS_FS=y 49 | CONFIG_PROC_KCORE=y 50 | CONFIG_PROC_CHILDREN=y 51 | CONFIG_TMPFS=y 52 | CONFIG_TMPFS_POSIX_ACL=y 53 | CONFIG_CONFIGFS_FS=y 54 | CONFIG_CRAMFS=y 55 | CONFIG_ROMFS_FS=y 56 | CONFIG_NFS_FS=y 57 | CONFIG_PRINTK_TIME=y 58 | CONFIG_MAGIC_SYSRQ=y 59 | CONFIG_PREEMPT=y 60 | CONFIG_PREEMPT_TRACER=y 61 | CONFIG_DEBUG_FS=y 62 | -------------------------------------------------------------------------------- /arch/csky/include/asm/Kbuild: -------------------------------------------------------------------------------- 1 | generic-y += asm-offsets.h 2 | generic-y += atomic.h 3 | generic-y += bugs.h 4 | generic-y += clkdev.h 5 | generic-y += compat.h 6 | generic-y += current.h 7 | generic-y += delay.h 8 | generic-y += device.h 9 | generic-y += div64.h 10 | generic-y += dma.h 11 | generic-y += dma-contiguous.h 12 | generic-y += dma-mapping.h 13 | generic-y += emergency-restart.h 14 | generic-y += exec.h 15 | generic-y += fb.h 16 | generic-y += futex.h 17 | generic-y += gpio.h 18 | generic-y += hardirq.h 19 | generic-y += hw_irq.h 20 | generic-y += irq.h 21 | generic-y += irq_regs.h 22 | generic-y += irq_work.h 23 | generic-y += kdebug.h 24 | generic-y += kmap_types.h 25 | generic-y += kvm_para.h 26 | generic-y += linkage.h 27 | generic-y += local.h 28 | generic-y += local64.h 29 | generic-y += mm-arch-hooks.h 30 | generic-y += module.h 31 | generic-y += mutex.h 32 | generic-y += pci.h 33 | generic-y += percpu.h 34 | generic-y += preempt.h 35 | generic-y += qrwlock.h 36 | generic-y += scatterlist.h 37 | generic-y += sections.h 38 | generic-y += serial.h 39 | generic-y += shm.h 40 | generic-y += timex.h 41 | generic-y += topology.h 42 | generic-y += trace_clock.h 43 | generic-y += unaligned.h 44 | generic-y += user.h 45 | generic-y += vga.h 46 | generic-y += vmlinux.lds.h 47 | generic-y += word-at-a-time.h 48 | generic-y += xor.h 49 | generic-y += msi.h 50 | -------------------------------------------------------------------------------- /arch/csky/include/asm/addrspace.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_ADDRSPACE_H 5 | #define __ASM_CSKY_ADDRSPACE_H 6 | 7 | #define KSEG0 0x80000000ul 8 | #define KSEG0ADDR(a) (((unsigned long)a & 0x1fffffff) | KSEG0) 9 | 10 | #endif /* __ASM_CSKY_ADDRSPACE_H */ 11 | -------------------------------------------------------------------------------- /arch/csky/include/asm/asid.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | #ifndef __ASM_ASM_ASID_H 3 | #define __ASM_ASM_ASID_H 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | struct asid_info 12 | { 13 | atomic64_t generation; 14 | unsigned long *map; 15 | atomic64_t __percpu *active; 16 | u64 __percpu *reserved; 17 | u32 bits; 18 | /* Lock protecting the structure */ 19 | raw_spinlock_t lock; 20 | /* Which CPU requires context flush on next call */ 21 | cpumask_t flush_pending; 22 | /* Number of ASID allocated by context (shift value) */ 23 | unsigned int ctxt_shift; 24 | /* Callback to locally flush the context. */ 25 | void (*flush_cpu_ctxt_cb)(void); 26 | }; 27 | 28 | #define NUM_ASIDS(info) (1UL << ((info)->bits)) 29 | #define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift) 30 | 31 | #define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu) 32 | 33 | void asid_new_context(struct asid_info *info, atomic64_t *pasid, 34 | unsigned int cpu, struct mm_struct *mm); 35 | 36 | /* 37 | * Check the ASID is still valid for the context. If not generate a new ASID. 38 | * 39 | * @pasid: Pointer to the current ASID batch 40 | * @cpu: current CPU ID. Must have been acquired throught get_cpu() 41 | */ 42 | static inline void asid_check_context(struct asid_info *info, 43 | atomic64_t *pasid, unsigned int cpu, 44 | struct mm_struct *mm) 45 | { 46 | u64 asid, old_active_asid; 47 | 48 | asid = atomic64_read(pasid); 49 | 50 | /* 51 | * The memory ordering here is subtle. 52 | * If our active_asid is non-zero and the ASID matches the current 53 | * generation, then we update the active_asid entry with a relaxed 54 | * cmpxchg. Racing with a concurrent rollover means that either: 55 | * 56 | * - We get a zero back from the cmpxchg and end up waiting on the 57 | * lock. Taking the lock synchronises with the rollover and so 58 | * we are forced to see the updated generation. 59 | * 60 | * - We get a valid ASID back from the cmpxchg, which means the 61 | * relaxed xchg in flush_context will treat us as reserved 62 | * because atomic RmWs are totally ordered for a given location. 63 | */ 64 | old_active_asid = atomic64_read(&active_asid(info, cpu)); 65 | if (old_active_asid && 66 | !((asid ^ atomic64_read(&info->generation)) >> info->bits) && 67 | atomic64_cmpxchg_relaxed(&active_asid(info, cpu), 68 | old_active_asid, asid)) 69 | return; 70 | 71 | asid_new_context(info, pasid, cpu, mm); 72 | } 73 | 74 | int asid_allocator_init(struct asid_info *info, 75 | u32 bits, unsigned int asid_per_ctxt, 76 | void (*flush_cpu_ctxt_cb)(void)); 77 | 78 | #endif 79 | -------------------------------------------------------------------------------- /arch/csky/include/asm/barrier.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_BARRIER_H 5 | #define __ASM_CSKY_BARRIER_H 6 | 7 | #ifndef __ASSEMBLY__ 8 | 9 | #define nop() asm volatile ("nop\n":::"memory") 10 | 11 | #ifdef CONFIG_SMP 12 | 13 | /* 14 | * bar.brwarws: ordering barrier for all load/store instructions 15 | * before/after 16 | * 17 | * |31|30 26|25 21|20 16|15 10|9 5|4 0| 18 | * 1 10000 00000 00000 100001 00001 0 bw br aw ar 19 | * 20 | * b: before 21 | * a: after 22 | * r: read 23 | * w: write 24 | * 25 | * Here are all combinations: 26 | * 27 | * bar.brw 28 | * bar.br 29 | * bar.bw 30 | * bar.arw 31 | * bar.ar 32 | * bar.aw 33 | * bar.brwarw 34 | * bar.brarw 35 | * bar.bwarw 36 | * bar.brwar 37 | * bar.brwaw 38 | * bar.brar 39 | * bar.bwaw 40 | */ 41 | #define __bar_brw() asm volatile (".long 0x842cc000\n":::"memory") 42 | #define __bar_br() asm volatile (".long 0x8424c000\n":::"memory") 43 | #define __bar_bw() asm volatile (".long 0x8428c000\n":::"memory") 44 | #define __bar_arw() asm volatile (".long 0x8423c000\n":::"memory") 45 | #define __bar_ar() asm volatile (".long 0x8421c000\n":::"memory") 46 | #define __bar_aw() asm volatile (".long 0x8422c000\n":::"memory") 47 | #define __bar_brwarw() asm volatile (".long 0x842fc000\n":::"memory") 48 | #define __bar_brarw() asm volatile (".long 0x8427c000\n":::"memory") 49 | #define __bar_bwarw() asm volatile (".long 0x842bc000\n":::"memory") 50 | #define __bar_brwar() asm volatile (".long 0x842dc000\n":::"memory") 51 | #define __bar_brwaw() asm volatile (".long 0x842ec000\n":::"memory") 52 | #define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory") 53 | #define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory") 54 | #define __bar_bwaw() asm volatile (".long 0x842ac000\n":::"memory") 55 | 56 | #define __smp_mb() __bar_brwarw() 57 | #define __smp_rmb() __bar_brar() 58 | #define __smp_wmb() __bar_bwaw() 59 | 60 | #define ACQUIRE_FENCE ".long 0x8427c000\n" 61 | #define __smp_acquire_fence() __bar_brarw() 62 | #define __smp_release_fence() __bar_brwaw() 63 | 64 | #endif /* CONFIG_SMP */ 65 | 66 | /* 67 | * sync: completion barrier, all sync.xx instructions 68 | * guarantee the last response recieved by bus transaction 69 | * made by ld/st instructions before sync.s 70 | * sync.s: inherit from sync, but also shareable to other cores 71 | * sync.i: inherit from sync, but also flush cpu pipeline 72 | * sync.is: the same with sync.i + sync.s 73 | */ 74 | #define mb() asm volatile ("sync\n":::"memory") 75 | 76 | #ifdef CONFIG_CPU_HAS_CACHEV2 77 | /* 78 | * Using three sync.is to prevent speculative PTW 79 | */ 80 | #define sync_is() asm volatile ("sync.is\nsync.is\nsync.is\n":::"memory") 81 | #endif 82 | 83 | #include 84 | 85 | #endif /* __ASSEMBLY__ */ 86 | #endif /* __ASM_CSKY_BARRIER_H */ 87 | -------------------------------------------------------------------------------- /arch/csky/include/asm/bitops.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_BITOPS_H 5 | #define __ASM_CSKY_BITOPS_H 6 | 7 | #include 8 | #include 9 | 10 | /* 11 | * asm-generic/bitops/ffs.h 12 | */ 13 | static inline int ffs(int x) 14 | { 15 | if (!x) 16 | return 0; 17 | 18 | asm volatile ( 19 | "brev %0\n" 20 | "ff1 %0\n" 21 | "addi %0, 1\n" 22 | : "=&r"(x) 23 | : "0"(x)); 24 | return x; 25 | } 26 | 27 | /* 28 | * asm-generic/bitops/__ffs.h 29 | */ 30 | static __always_inline unsigned long __ffs(unsigned long x) 31 | { 32 | asm volatile ( 33 | "brev %0\n" 34 | "ff1 %0\n" 35 | : "=&r"(x) 36 | : "0"(x)); 37 | return x; 38 | } 39 | 40 | /* 41 | * asm-generic/bitops/fls.h 42 | */ 43 | static __always_inline int fls(int x) 44 | { 45 | asm volatile( 46 | "ff1 %0\n" 47 | : "=&r"(x) 48 | : "0"(x)); 49 | 50 | return (32 - x); 51 | } 52 | 53 | /* 54 | * asm-generic/bitops/__fls.h 55 | */ 56 | static __always_inline unsigned long __fls(unsigned long x) 57 | { 58 | return fls(x) - 1; 59 | } 60 | 61 | #include 62 | #include 63 | #include 64 | 65 | #ifndef _LINUX_BITOPS_H 66 | #error only can be included directly 67 | #endif 68 | 69 | #include 70 | #include 71 | #include 72 | #include 73 | 74 | /* 75 | * bug fix, why only could use atomic!!!! 76 | */ 77 | #include 78 | #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) 79 | 80 | #include 81 | #include 82 | #endif /* __ASM_CSKY_BITOPS_H */ 83 | -------------------------------------------------------------------------------- /arch/csky/include/asm/bug.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_BUG_H 5 | #define __ASM_CSKY_BUG_H 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | #define BUG() \ 12 | do { \ 13 | asm volatile ("bkpt\n"); \ 14 | unreachable(); \ 15 | } while (0) 16 | 17 | #define HAVE_ARCH_BUG 18 | 19 | #include 20 | 21 | struct pt_regs; 22 | 23 | void die(struct pt_regs *regs, const char *str); 24 | void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr); 25 | 26 | void show_regs(struct pt_regs *regs); 27 | void show_code(struct pt_regs *regs); 28 | 29 | #endif /* __ASM_CSKY_BUG_H */ 30 | -------------------------------------------------------------------------------- /arch/csky/include/asm/cache.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_CACHE_H 4 | #define __ASM_CSKY_CACHE_H 5 | 6 | /* bytes per L1 cache line */ 7 | #define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT 8 | 9 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 10 | 11 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 12 | 13 | #ifndef __ASSEMBLY__ 14 | 15 | void dcache_wb_line(unsigned long start); 16 | 17 | void icache_inv_range(unsigned long start, unsigned long end); 18 | void icache_inv_all(void); 19 | void local_icache_inv_all(void *priv); 20 | 21 | void dcache_wb_range(unsigned long start, unsigned long end); 22 | void dcache_wbinv_all(void); 23 | 24 | void cache_wbinv_range(unsigned long start, unsigned long end); 25 | void cache_wbinv_all(void); 26 | 27 | void dma_wbinv_range(unsigned long start, unsigned long end); 28 | void dma_inv_range(unsigned long start, unsigned long end); 29 | void dma_wb_range(unsigned long start, unsigned long end); 30 | 31 | #endif 32 | #endif /* __ASM_CSKY_CACHE_H */ 33 | -------------------------------------------------------------------------------- /arch/csky/include/asm/cacheflush.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_CACHEFLUSH_H 5 | #define __ASM_CSKY_CACHEFLUSH_H 6 | 7 | #include 8 | #include 9 | 10 | #endif /* __ASM_CSKY_CACHEFLUSH_H */ 11 | -------------------------------------------------------------------------------- /arch/csky/include/asm/checksum.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_CHECKSUM_H 5 | #define __ASM_CSKY_CHECKSUM_H 6 | 7 | #include 8 | #include 9 | 10 | static inline __sum16 csum_fold(__wsum csum) 11 | { 12 | u32 tmp; 13 | 14 | asm volatile( 15 | "mov %1, %0\n" 16 | "rori %0, 16\n" 17 | "addu %0, %1\n" 18 | "lsri %0, 16\n" 19 | : "=r"(csum), "=r"(tmp) 20 | : "0"(csum)); 21 | 22 | return (__force __sum16) ~csum; 23 | } 24 | #define csum_fold csum_fold 25 | 26 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 27 | unsigned short len, unsigned short proto, __wsum sum) 28 | { 29 | asm volatile( 30 | "clrc\n" 31 | "addc %0, %1\n" 32 | "addc %0, %2\n" 33 | "addc %0, %3\n" 34 | "inct %0\n" 35 | : "=r"(sum) 36 | : "r"((__force u32)saddr), "r"((__force u32)daddr), 37 | #ifdef __BIG_ENDIAN 38 | "r"(proto + len), 39 | #else 40 | "r"((proto + len) << 8), 41 | #endif 42 | "0" ((__force unsigned long)sum) 43 | : "cc"); 44 | return sum; 45 | } 46 | #define csum_tcpudp_nofold csum_tcpudp_nofold 47 | 48 | #include 49 | 50 | #endif /* __ASM_CSKY_CHECKSUM_H */ 51 | -------------------------------------------------------------------------------- /arch/csky/include/asm/cmpxchg.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_CMPXCHG_H 4 | #define __ASM_CSKY_CMPXCHG_H 5 | 6 | #ifdef CONFIG_CPU_HAS_LDSTEX 7 | #include 8 | 9 | extern void __bad_xchg(void); 10 | 11 | #define __xchg(new, ptr, size) \ 12 | ({ \ 13 | __typeof__(ptr) __ptr = (ptr); \ 14 | __typeof__(new) __new = (new); \ 15 | __typeof__(*(ptr)) __ret; \ 16 | unsigned long tmp; \ 17 | switch (size) { \ 18 | case 4: \ 19 | smp_mb(); \ 20 | asm volatile ( \ 21 | "1: ldex.w %0, (%3) \n" \ 22 | " mov %1, %2 \n" \ 23 | " stex.w %1, (%3) \n" \ 24 | " bez %1, 1b \n" \ 25 | : "=&r" (__ret), "=&r" (tmp) \ 26 | : "r" (__new), "r"(__ptr) \ 27 | :); \ 28 | smp_mb(); \ 29 | break; \ 30 | default: \ 31 | __bad_xchg(); \ 32 | } \ 33 | __ret; \ 34 | }) 35 | 36 | #define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)))) 37 | 38 | #define __cmpxchg(ptr, old, new, size) \ 39 | ({ \ 40 | __typeof__(ptr) __ptr = (ptr); \ 41 | __typeof__(new) __new = (new); \ 42 | __typeof__(new) __tmp; \ 43 | __typeof__(old) __old = (old); \ 44 | __typeof__(*(ptr)) __ret; \ 45 | switch (size) { \ 46 | case 4: \ 47 | smp_mb(); \ 48 | asm volatile ( \ 49 | "1: ldex.w %0, (%3) \n" \ 50 | " cmpne %0, %4 \n" \ 51 | " bt 2f \n" \ 52 | " mov %1, %2 \n" \ 53 | " stex.w %1, (%3) \n" \ 54 | " bez %1, 1b \n" \ 55 | "2: \n" \ 56 | : "=&r" (__ret), "=&r" (__tmp) \ 57 | : "r" (__new), "r"(__ptr), "r"(__old) \ 58 | :); \ 59 | smp_mb(); \ 60 | break; \ 61 | default: \ 62 | __bad_xchg(); \ 63 | } \ 64 | __ret; \ 65 | }) 66 | 67 | #define cmpxchg(ptr, o, n) \ 68 | (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))) 69 | #else 70 | #include 71 | #endif 72 | 73 | #endif /* __ASM_CSKY_CMPXCHG_H */ 74 | -------------------------------------------------------------------------------- /arch/csky/include/asm/elf.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_ELF_H 5 | #define __ASM_CSKY_ELF_H 6 | 7 | #include 8 | #include 9 | 10 | #define EM_CSKY_OLD 39 11 | #define EM_CSKY 252 12 | #define ELF_ARCH EM_CSKY_OLD 13 | 14 | /* CSKY Relocations */ 15 | #define R_CSKY_NONE 0 16 | #define R_CSKY_32 1 17 | #define R_CSKY_PCIMM8BY4 2 18 | #define R_CSKY_PCIMM11BY2 3 19 | #define R_CSKY_PCIMM4BY2 4 20 | #define R_CSKY_PC32 5 21 | #define R_CSKY_PCRELJSR_IMM11BY2 6 22 | #define R_CSKY_GNU_VTINHERIT 7 23 | #define R_CSKY_GNU_VTENTRY 8 24 | #define R_CSKY_RELATIVE 9 25 | #define R_CSKY_COPY 10 26 | #define R_CSKY_GLOB_DAT 11 27 | #define R_CSKY_JUMP_SLOT 12 28 | #define R_CSKY_ADDR_HI16 24 29 | #define R_CSKY_ADDR_LO16 25 30 | #define R_CSKY_PCRELJSR_IMM26BY2 40 31 | 32 | typedef unsigned long elf_greg_t; 33 | 34 | typedef struct user_fp elf_fpregset_t; 35 | 36 | /* 37 | * In gdb/bfd elf32-csky.c, csky_elf_grok_prstatus() use fixed size of 38 | * elf_prstatus. It's 148 for abiv1 and 220 for abiv2, the size is enough 39 | * for coredump and no need full sizeof(struct pt_regs). 40 | */ 41 | #define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 2) 42 | 43 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 44 | 45 | /* 46 | * This is used to ensure we don't load something for the wrong architecture. 47 | */ 48 | #define elf_check_arch(x) (((x)->e_machine == EM_CSKY) || \ 49 | ((x)->e_machine == EM_CSKY_OLD)) 50 | 51 | /* 52 | * These are used to set parameters in the core dumps. 53 | */ 54 | #define USE_ELF_CORE_DUMP 55 | #define ELF_EXEC_PAGESIZE 4096 56 | #define ELF_CLASS ELFCLASS32 57 | #define ELF_PLAT_INIT(_r, load_addr) { _r->a0 = 0; } 58 | 59 | #ifdef __cskyBE__ 60 | #define ELF_DATA ELFDATA2MSB 61 | #else 62 | #define ELF_DATA ELFDATA2LSB 63 | #endif 64 | 65 | /* 66 | * This is the location that an ET_DYN program is loaded if exec'ed. Typical 67 | * use of this is to invoke "./ld.so someprog" to test out a new version of 68 | * the loader. We need to make sure that it is out of the way of the program 69 | * that it will "exec", and that there is sufficient room for the brk. 70 | */ 71 | #define ELF_ET_DYN_BASE 0x0UL 72 | #include 73 | 74 | /* Similar, but for a thread other than current. */ 75 | struct task_struct; 76 | extern int dump_task_regs(struct task_struct *tsk, elf_gregset_t *elf_regs); 77 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) 78 | 79 | #define ELF_HWCAP (0) 80 | 81 | /* 82 | * This yields a string that ld.so will use to load implementation specific 83 | * libraries for optimization. This is more specific in intent than poking 84 | * at uname or /proc/cpuinfo. 85 | */ 86 | #define ELF_PLATFORM (NULL) 87 | #define SET_PERSONALITY(ex) set_personality(PER_LINUX) 88 | 89 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 90 | struct linux_binprm; 91 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, 92 | int uses_interp); 93 | #endif /* __ASM_CSKY_ELF_H */ 94 | -------------------------------------------------------------------------------- /arch/csky/include/asm/fixmap.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_FIXMAP_H 5 | #define __ASM_CSKY_FIXMAP_H 6 | 7 | #include 8 | #include 9 | #ifdef CONFIG_HIGHMEM 10 | #include 11 | #include 12 | #endif 13 | 14 | enum fixed_addresses { 15 | #ifdef CONFIG_HAVE_TCM 16 | FIX_TCM = TCM_NR_PAGES, 17 | #endif 18 | #ifdef CONFIG_HIGHMEM 19 | FIX_KMAP_BEGIN, 20 | FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, 21 | #endif 22 | __end_of_fixed_addresses 23 | }; 24 | 25 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 26 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 27 | 28 | #include 29 | 30 | extern void fixrange_init(unsigned long start, unsigned long end, 31 | pgd_t *pgd_base); 32 | extern void __init fixaddr_init(void); 33 | 34 | #endif /* __ASM_CSKY_FIXMAP_H */ 35 | -------------------------------------------------------------------------------- /arch/csky/include/asm/ftrace.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_FTRACE_H 5 | #define __ASM_CSKY_FTRACE_H 6 | 7 | #define MCOUNT_INSN_SIZE 14 8 | 9 | #define HAVE_FUNCTION_GRAPH_FP_TEST 10 | 11 | #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 12 | 13 | #define ARCH_SUPPORTS_FTRACE_OPS 1 14 | 15 | #define MCOUNT_ADDR ((unsigned long)_mcount) 16 | 17 | #ifndef __ASSEMBLY__ 18 | 19 | extern void _mcount(unsigned long); 20 | 21 | extern void ftrace_graph_call(void); 22 | 23 | static inline unsigned long ftrace_call_adjust(unsigned long addr) 24 | { 25 | return addr; 26 | } 27 | 28 | struct dyn_arch_ftrace { 29 | }; 30 | #endif /* !__ASSEMBLY__ */ 31 | #endif /* __ASM_CSKY_FTRACE_H */ 32 | -------------------------------------------------------------------------------- /arch/csky/include/asm/futex.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_FUTEX_H 4 | #define __ASM_CSKY_FUTEX_H 5 | 6 | #ifndef CONFIG_SMP 7 | #include 8 | #else 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 15 | { \ 16 | u32 tmp; \ 17 | \ 18 | __atomic_pre_full_fence(); \ 19 | \ 20 | __asm__ __volatile__ ( \ 21 | "1: ldex.w %[ov], %[u] \n" \ 22 | " "insn" \n" \ 23 | "2: stex.w %[t], %[u] \n" \ 24 | " bez %[t], 1b \n" \ 25 | " br 4f \n" \ 26 | "3: mov %[r], %[e] \n" \ 27 | "4: \n" \ 28 | " .section __ex_table,\"a\" \n" \ 29 | " .balign 4 \n" \ 30 | " .long 1b, 3b \n" \ 31 | " .long 2b, 3b \n" \ 32 | " .previous \n" \ 33 | : [r] "+r" (ret), [ov] "=&r" (oldval), \ 34 | [u] "+m" (*uaddr), [t] "=&r" (tmp) \ 35 | : [op] "Jr" (oparg), [e] "jr" (-EFAULT) \ 36 | : "memory"); \ 37 | \ 38 | __atomic_post_full_fence(); \ 39 | } 40 | 41 | static inline int 42 | arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) 43 | { 44 | int oldval = 0, ret = 0; 45 | 46 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 47 | return -EFAULT; 48 | 49 | switch (op) { 50 | case FUTEX_OP_SET: 51 | __futex_atomic_op("mov %[t], %[ov]", 52 | ret, oldval, uaddr, oparg); 53 | break; 54 | case FUTEX_OP_ADD: 55 | __futex_atomic_op("add %[t], %[ov], %[op]", 56 | ret, oldval, uaddr, oparg); 57 | break; 58 | case FUTEX_OP_OR: 59 | __futex_atomic_op("or %[t], %[ov], %[op]", 60 | ret, oldval, uaddr, oparg); 61 | break; 62 | case FUTEX_OP_ANDN: 63 | __futex_atomic_op("and %[t], %[ov], %[op]", 64 | ret, oldval, uaddr, ~oparg); 65 | break; 66 | case FUTEX_OP_XOR: 67 | __futex_atomic_op("xor %[t], %[ov], %[op]", 68 | ret, oldval, uaddr, oparg); 69 | break; 70 | default: 71 | ret = -ENOSYS; 72 | } 73 | 74 | if (!ret) 75 | *oval = oldval; 76 | 77 | return ret; 78 | } 79 | 80 | 81 | 82 | static inline int 83 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 84 | u32 oldval, u32 newval) 85 | { 86 | int ret = 0; 87 | u32 val, tmp; 88 | 89 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 90 | return -EFAULT; 91 | 92 | __atomic_pre_full_fence(); 93 | 94 | __asm__ __volatile__ ( 95 | "1: ldex.w %[v], %[u] \n" 96 | " cmpne %[v], %[ov] \n" 97 | " bt 4f \n" 98 | " mov %[t], %[nv] \n" 99 | "2: stex.w %[t], %[u] \n" 100 | " bez %[t], 1b \n" 101 | " br 4f \n" 102 | "3: mov %[r], %[e] \n" 103 | "4: \n" 104 | " .section __ex_table,\"a\" \n" 105 | " .balign 4 \n" 106 | " .long 1b, 3b \n" 107 | " .long 2b, 3b \n" 108 | " .previous \n" 109 | : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), 110 | [t] "=&r" (tmp) 111 | : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "Jr" (-EFAULT) 112 | : "memory"); 113 | 114 | __atomic_post_full_fence(); 115 | 116 | *uval = val; 117 | return ret; 118 | } 119 | 120 | #endif /* CONFIG_SMP */ 121 | #endif /* __ASM_CSKY_FUTEX_H */ 122 | -------------------------------------------------------------------------------- /arch/csky/include/asm/highmem.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_HIGHMEM_H 5 | #define __ASM_CSKY_HIGHMEM_H 6 | 7 | #ifdef __KERNEL__ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | /* undef for production */ 16 | #define HIGHMEM_DEBUG 1 17 | 18 | /* declarations for highmem.c */ 19 | extern unsigned long highstart_pfn, highend_pfn; 20 | 21 | extern pte_t *pkmap_page_table; 22 | 23 | /* 24 | * Right now we initialize only a single pte table. It can be extended 25 | * easily, subsequent pte tables have to be allocated in one physical 26 | * chunk of RAM. 27 | */ 28 | #define LAST_PKMAP 1024 29 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) 30 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) 31 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 32 | 33 | extern void *kmap_high(struct page *page); 34 | extern void kunmap_high(struct page *page); 35 | 36 | extern void *kmap(struct page *page); 37 | extern void kunmap(struct page *page); 38 | extern void *kmap_atomic(struct page *page); 39 | extern void __kunmap_atomic(void *kvaddr); 40 | extern void *kmap_atomic_pfn(unsigned long pfn); 41 | extern struct page *kmap_atomic_to_page(void *ptr); 42 | 43 | #define flush_cache_kmaps() do {} while (0) 44 | 45 | extern void kmap_init(void); 46 | 47 | #define kmap_prot PAGE_KERNEL 48 | 49 | #endif /* __KERNEL__ */ 50 | 51 | #endif /* __ASM_CSKY_HIGHMEM_H */ 52 | -------------------------------------------------------------------------------- /arch/csky/include/asm/io.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_IO_H 5 | #define __ASM_CSKY_IO_H 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | /* 12 | * I/O memory access primitives. Reads are ordered relative to any 13 | * following Normal memory access. Writes are ordered relative to any prior 14 | * Normal memory access. 15 | * 16 | * For CACHEV1 (807, 810), store instruction could fast retire, so we need 17 | * another mb() to prevent st fast retire. 18 | * 19 | * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't 20 | * fast retire. 21 | */ 22 | 23 | #define readb_relaxed readb_relaxed 24 | static inline u8 readb_relaxed(const volatile void __iomem *addr) 25 | { 26 | return *(const volatile u8 __force *)addr; 27 | } 28 | 29 | #define readw_relaxed readw_relaxed 30 | static inline u16 readw_relaxed(const volatile void __iomem *addr) 31 | { 32 | return *(const volatile u16 __force *)addr; 33 | } 34 | 35 | #define readl_relaxed readl_relaxed 36 | static inline u32 readl_relaxed(const volatile void __iomem *addr) 37 | { 38 | return *(const volatile u32 __force *)addr; 39 | } 40 | 41 | #define writeb_relaxed writeb_relaxed 42 | static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) 43 | { 44 | *(volatile u8 __force *)addr = value; 45 | } 46 | 47 | #define writew_relaxed writew_relaxed 48 | static inline void writew_relaxed(u16 value, volatile void __iomem *addr) 49 | { 50 | *(volatile u16 __force *)addr = value; 51 | } 52 | 53 | #define writel_relaxed writel_relaxed 54 | static inline void writel_relaxed(u32 value, volatile void __iomem *addr) 55 | { 56 | *(volatile u32 __force *)addr = value; 57 | } 58 | 59 | #define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; }) 60 | #define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; }) 61 | #define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; }) 62 | 63 | #ifdef CONFIG_CPU_HAS_CACHEV2 64 | #define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); }) 65 | #define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); }) 66 | #define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); }) 67 | #else 68 | #define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); }) 69 | #define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); }) 70 | #define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); }) 71 | #endif 72 | 73 | /* 74 | * String version of I/O memory access operations. 75 | */ 76 | extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t); 77 | extern void __memcpy_toio(volatile void __iomem *, const void *, size_t); 78 | extern void __memset_io(volatile void __iomem *, int, size_t); 79 | 80 | #define memset_io(c,v,l) __memset_io((c),(v),(l)) 81 | #define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l)) 82 | #define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l)) 83 | 84 | /* 85 | * I/O memory mapping functions. 86 | */ 87 | extern void __iomem *ioremap_cache(phys_addr_t addr, size_t size); 88 | extern void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot); 89 | extern void iounmap(void *addr); 90 | 91 | #define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL)) 92 | #define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL)) 93 | #define ioremap_nocache(addr, size) ioremap((addr), (size)) 94 | #define ioremap_wt(addr, size) ioremap((addr), (size)) 95 | #define ioremap_cache ioremap_cache 96 | 97 | #include 98 | 99 | #endif /* __ASM_CSKY_IO_H */ 100 | -------------------------------------------------------------------------------- /arch/csky/include/asm/irqflags.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_IRQFLAGS_H 4 | #define __ASM_CSKY_IRQFLAGS_H 5 | #include 6 | 7 | static inline unsigned long arch_local_irq_save(void) 8 | { 9 | unsigned long flags; 10 | 11 | flags = mfcr("psr"); 12 | asm volatile("psrclr ie\n":::"memory"); 13 | return flags; 14 | } 15 | #define arch_local_irq_save arch_local_irq_save 16 | 17 | static inline void arch_local_irq_enable(void) 18 | { 19 | asm volatile("psrset ee, ie\n":::"memory"); 20 | } 21 | #define arch_local_irq_enable arch_local_irq_enable 22 | 23 | static inline void arch_local_irq_disable(void) 24 | { 25 | asm volatile("psrclr ie\n":::"memory"); 26 | } 27 | #define arch_local_irq_disable arch_local_irq_disable 28 | 29 | static inline unsigned long arch_local_save_flags(void) 30 | { 31 | return mfcr("psr"); 32 | } 33 | #define arch_local_save_flags arch_local_save_flags 34 | 35 | static inline void arch_local_irq_restore(unsigned long flags) 36 | { 37 | mtcr("psr", flags); 38 | } 39 | #define arch_local_irq_restore arch_local_irq_restore 40 | 41 | static inline int arch_irqs_disabled_flags(unsigned long flags) 42 | { 43 | return !(flags & (1<<6)); 44 | } 45 | #define arch_irqs_disabled_flags arch_irqs_disabled_flags 46 | 47 | #include 48 | 49 | #endif /* __ASM_CSKY_IRQFLAGS_H */ 50 | -------------------------------------------------------------------------------- /arch/csky/include/asm/kprobes.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | 3 | #ifndef __ASM_CSKY_KPROBES_H 4 | #define __ASM_CSKY_KPROBES_H 5 | 6 | #include 7 | 8 | #ifdef CONFIG_KPROBES 9 | #include 10 | #include 11 | #include 12 | 13 | #define __ARCH_WANT_KPROBES_INSN_SLOT 14 | #define MAX_INSN_SIZE 1 15 | 16 | #define flush_insn_slot(p) do { } while (0) 17 | #define kretprobe_blacklist_size 0 18 | 19 | #include 20 | 21 | struct prev_kprobe { 22 | struct kprobe *kp; 23 | unsigned int status; 24 | }; 25 | 26 | /* Single step context for kprobe */ 27 | struct kprobe_step_ctx { 28 | unsigned long ss_pending; 29 | unsigned long match_addr; 30 | }; 31 | 32 | /* per-cpu kprobe control block */ 33 | struct kprobe_ctlblk { 34 | unsigned int kprobe_status; 35 | unsigned long saved_sr; 36 | struct prev_kprobe prev_kprobe; 37 | struct kprobe_step_ctx ss_ctx; 38 | }; 39 | 40 | void arch_remove_kprobe(struct kprobe *p); 41 | int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); 42 | int kprobe_breakpoint_handler(struct pt_regs *regs); 43 | int kprobe_single_step_handler(struct pt_regs *regs); 44 | void kretprobe_trampoline(void); 45 | void __kprobes *trampoline_probe_handler(struct pt_regs *regs); 46 | 47 | #endif /* CONFIG_KPROBES */ 48 | #endif /* __ASM_CSKY_KPROBES_H */ 49 | -------------------------------------------------------------------------------- /arch/csky/include/asm/memory.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_MEMORY_H 4 | #define __ASM_CSKY_MEMORY_H 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #define FIXADDR_TOP _AC(0xffffc000, UL) 12 | #define PKMAP_BASE _AC(0xff800000, UL) 13 | #define VMALLOC_START (PAGE_OFFSET + LOWMEM_LIMIT + (PAGE_SIZE * 8)) 14 | #define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2)) 15 | 16 | #ifdef CONFIG_HAVE_TCM 17 | #ifdef CONFIG_HAVE_DTCM 18 | #define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES) 19 | #else 20 | #define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES) 21 | #endif 22 | #define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL) 23 | #endif 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /arch/csky/include/asm/mmu.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_MMU_H 5 | #define __ASM_CSKY_MMU_H 6 | 7 | typedef struct { 8 | atomic64_t asid; 9 | void *vdso; 10 | cpumask_t icache_stale_mask; 11 | } mm_context_t; 12 | 13 | #endif /* __ASM_CSKY_MMU_H */ 14 | -------------------------------------------------------------------------------- /arch/csky/include/asm/mmu_context.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_MMU_CONTEXT_H 5 | #define __ASM_CSKY_MMU_CONTEXT_H 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | #define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1) 18 | #define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK) 19 | 20 | #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; }) 21 | #define activate_mm(prev,next) switch_mm(prev, next, current) 22 | 23 | #define destroy_context(mm) do {} while (0) 24 | #define enter_lazy_tlb(mm, tsk) do {} while (0) 25 | #define deactivate_mm(tsk, mm) do {} while (0) 26 | 27 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); 28 | 29 | static inline void 30 | switch_mm(struct mm_struct *prev, struct mm_struct *next, 31 | struct task_struct *tsk) 32 | { 33 | unsigned int cpu = smp_processor_id(); 34 | 35 | if (prev != next) 36 | check_and_switch_context(next, cpu); 37 | 38 | setup_pgd(next->pgd, next->context.asid.counter); 39 | 40 | flush_icache_deferred(next); 41 | } 42 | #endif /* __ASM_CSKY_MMU_CONTEXT_H */ 43 | -------------------------------------------------------------------------------- /arch/csky/include/asm/page.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_PAGE_H 4 | #define __ASM_CSKY_PAGE_H 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | /* 11 | * PAGE_SHIFT determines the page size: 4KB 12 | */ 13 | #define PAGE_SHIFT 12 14 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 15 | #define PAGE_MASK (~(PAGE_SIZE - 1)) 16 | #define THREAD_SIZE (PAGE_SIZE * 2) 17 | #define THREAD_MASK (~(THREAD_SIZE - 1)) 18 | #define THREAD_SHIFT (PAGE_SHIFT + 1) 19 | 20 | 21 | /* 22 | * For C-SKY "User-space:Kernel-space" is "2GB:2GB" fixed by hardware and there 23 | * are two segment registers (MSA0 + MSA1) to mapping 512MB + 512MB physical 24 | * address region. We use them mapping kernel 1GB direct-map address area and 25 | * for more than 1GB of memory we use highmem. 26 | */ 27 | #define PAGE_OFFSET CONFIG_PAGE_OFFSET 28 | #define SSEG_SIZE 0x20000000 29 | #define LOWMEM_LIMIT (SSEG_SIZE * 2) 30 | 31 | #define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1)) 32 | 33 | #ifndef __ASSEMBLY__ 34 | 35 | #include 36 | 37 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 38 | #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) 39 | 40 | #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \ 41 | (void *)(kaddr) < high_memory) 42 | #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) 43 | 44 | extern void *memset(void *dest, int c, size_t l); 45 | extern void *memcpy(void *to, const void *from, size_t l); 46 | 47 | #define clear_page(page) memset((page), 0, PAGE_SIZE) 48 | #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 49 | 50 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 51 | #define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr))) 52 | 53 | struct page; 54 | 55 | #include 56 | 57 | struct vm_area_struct; 58 | 59 | typedef struct { unsigned long pte_low; } pte_t; 60 | #define pte_val(x) ((x).pte_low) 61 | 62 | typedef struct { unsigned long pgd; } pgd_t; 63 | typedef struct { unsigned long pgprot; } pgprot_t; 64 | typedef struct page *pgtable_t; 65 | 66 | #define pgd_val(x) ((x).pgd) 67 | #define pgprot_val(x) ((x).pgprot) 68 | 69 | #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) 70 | 71 | #define __pte(x) ((pte_t) { (x) }) 72 | #define __pgd(x) ((pgd_t) { (x) }) 73 | #define __pgprot(x) ((pgprot_t) { (x) }) 74 | 75 | extern unsigned long va_pa_offset; 76 | 77 | #define ARCH_PFN_OFFSET PFN_DOWN(va_pa_offset + PHYS_OFFSET_OFFSET) 78 | 79 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + va_pa_offset) 80 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - va_pa_offset)) 81 | 82 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 83 | 84 | #define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \ 85 | PHYS_OFFSET_OFFSET) 86 | #define virt_to_page(x) (mem_map + MAP_NR(x)) 87 | 88 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 89 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 90 | 91 | #define pfn_to_kaddr(x) __va(PFN_PHYS(x)) 92 | 93 | #include 94 | #include 95 | 96 | #endif /* !__ASSEMBLY__ */ 97 | #endif /* __ASM_CSKY_PAGE_H */ 98 | -------------------------------------------------------------------------------- /arch/csky/include/asm/pci.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_PCI_H 4 | #define __ASM_CSKY_PCI_H 5 | 6 | #define PCIBIOS_MIN_IO 0 7 | #define PCIBIOS_MIN_MEM 0 8 | 9 | /* C-SKY shim does not initialize PCI bus */ 10 | #define pcibios_assign_all_busses() 1 11 | 12 | extern int isa_dma_bridge_buggy; 13 | 14 | #ifdef CONFIG_PCI 15 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) 16 | { 17 | /* no legacy IRQ on csky */ 18 | return -ENODEV; 19 | } 20 | 21 | static inline int pci_proc_domain(struct pci_bus *bus) 22 | { 23 | /* always show the domain in /proc */ 24 | return 1; 25 | } 26 | #endif /* CONFIG_PCI */ 27 | 28 | #define PCI_DMA_BUS_IS_PHYS (1) 29 | 30 | #endif /* __ASM_CSKY_PCI_H */ 31 | -------------------------------------------------------------------------------- /arch/csky/include/asm/perf_event.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_PERF_EVENT_H 5 | #define __ASM_CSKY_PERF_EVENT_H 6 | 7 | #include 8 | 9 | #define perf_arch_fetch_caller_regs(regs, __ip) { \ 10 | (regs)->pc = (__ip); \ 11 | regs_fp(regs) = (unsigned long) __builtin_frame_address(0); \ 12 | asm volatile("mov %0, sp\n":"=r"((regs)->usp)); \ 13 | } 14 | 15 | #endif /* __ASM_PERF_EVENT_ELF_H */ 16 | -------------------------------------------------------------------------------- /arch/csky/include/asm/pgalloc.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_PGALLOC_H 5 | #define __ASM_CSKY_PGALLOC_H 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 12 | pte_t *pte) 13 | { 14 | set_pmd(pmd, __pmd(__pa(pte))); 15 | } 16 | 17 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 18 | pgtable_t pte) 19 | { 20 | set_pmd(pmd, __pmd(__pa(page_address(pte)))); 21 | } 22 | 23 | #define pmd_pgtable(pmd) pmd_page(pmd) 24 | 25 | extern void pgd_init(unsigned long *p); 26 | 27 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) 28 | { 29 | pte_t *pte; 30 | unsigned long i; 31 | 32 | pte = (pte_t *) __get_free_page(GFP_KERNEL); 33 | if (!pte) 34 | return NULL; 35 | 36 | for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++) 37 | (pte + i)->pte_low = _PAGE_GLOBAL; 38 | 39 | return pte; 40 | } 41 | 42 | static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr) 43 | { 44 | struct page *pte; 45 | 46 | pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); 47 | if (!pte) 48 | return NULL; 49 | 50 | if (!pgtable_page_ctor(pte)) { 51 | __free_page(pte); 52 | return NULL; 53 | } 54 | 55 | return pte; 56 | } 57 | 58 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 59 | { 60 | free_pages((unsigned long)pte, PTE_ORDER); 61 | } 62 | 63 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 64 | { 65 | pgtable_page_dtor(pte); 66 | __free_pages(pte, PTE_ORDER); 67 | } 68 | 69 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 70 | { 71 | free_pages((unsigned long)pgd, PGD_ORDER); 72 | } 73 | 74 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) 75 | { 76 | pgd_t *ret; 77 | pgd_t *init; 78 | 79 | ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); 80 | if (ret) { 81 | init = pgd_offset(&init_mm, 0UL); 82 | pgd_init((unsigned long *)ret); 83 | memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, 84 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 85 | /* prevent out of order excute */ 86 | smp_mb(); 87 | #ifdef CONFIG_CPU_NEED_TLBSYNC 88 | dcache_wb_range((unsigned int)ret, 89 | (unsigned int)(ret + PTRS_PER_PGD)); 90 | #endif 91 | } 92 | 93 | return ret; 94 | } 95 | 96 | #define __pte_free_tlb(tlb, pte, address) \ 97 | do { \ 98 | pgtable_page_dtor(pte); \ 99 | tlb_remove_page(tlb, pte); \ 100 | } while (0) 101 | 102 | #define check_pgt_cache() do {} while (0) 103 | 104 | extern void pagetable_init(void); 105 | extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn); 106 | extern void pre_trap_init(void); 107 | 108 | #endif /* __ASM_CSKY_PGALLOC_H */ 109 | -------------------------------------------------------------------------------- /arch/csky/include/asm/probes.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_PROBES_H 4 | #define __ASM_CSKY_PROBES_H 5 | 6 | typedef u32 probe_opcode_t; 7 | typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *); 8 | 9 | /* architecture specific copy of original instruction */ 10 | struct arch_probe_insn { 11 | probe_opcode_t *insn; 12 | probes_handler_t *handler; 13 | /* restore address after simulation */ 14 | unsigned long restore; 15 | }; 16 | 17 | #ifdef CONFIG_KPROBES 18 | typedef u32 kprobe_opcode_t; 19 | struct arch_specific_insn { 20 | struct arch_probe_insn api; 21 | }; 22 | #endif 23 | 24 | #endif /* __ASM_CSKY_PROBES_H */ 25 | -------------------------------------------------------------------------------- /arch/csky/include/asm/processor.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_PROCESSOR_H 5 | #define __ASM_CSKY_PROCESSOR_H 6 | 7 | /* 8 | * Default implementation of macro that returns current 9 | * instruction pointer ("program counter"). 10 | */ 11 | #define current_text_addr() ({ __label__ _l; _l: &&_l; }) 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #ifdef CONFIG_CPU_HAS_FPU 22 | #include 23 | #endif 24 | 25 | struct cpuinfo_csky { 26 | unsigned long asid_cache; 27 | } __aligned(SMP_CACHE_BYTES); 28 | 29 | extern struct cpuinfo_csky cpu_data[]; 30 | 31 | /* 32 | * User space process size: 2GB. This is hardcoded into a few places, 33 | * so don't change it unless you know what you are doing. TASK_SIZE 34 | * for a 64 bit kernel expandable to 8192EB, of which the current CSKY 35 | * implementations will "only" be able to use 1TB ... 36 | */ 37 | #define TASK_SIZE (PAGE_OFFSET - (PAGE_SIZE * 8)) 38 | 39 | #ifdef __KERNEL__ 40 | #define STACK_TOP TASK_SIZE 41 | #define STACK_TOP_MAX STACK_TOP 42 | #endif 43 | 44 | /* This decides where the kernel will search for a free chunk of vm 45 | * space during mmap's. 46 | */ 47 | #define TASK_UNMAPPED_BASE (TASK_SIZE / 4) 48 | 49 | struct thread_struct { 50 | unsigned long sp; /* kernel stack pointer */ 51 | unsigned long trap_no; /* saved status register */ 52 | 53 | /* FPU regs */ 54 | struct user_fp __aligned(16) user_fp; 55 | }; 56 | 57 | #define INIT_THREAD { \ 58 | .sp = sizeof(init_stack) + (unsigned long) &init_stack, \ 59 | } 60 | 61 | /* 62 | * Do necessary setup to start up a newly executed thread. 63 | * 64 | * pass the data segment into user programs if it exists, 65 | * it can't hurt anything as far as I can tell 66 | */ 67 | #define start_thread(_regs, _pc, _usp) \ 68 | do { \ 69 | set_fs(USER_DS); /* reads from user space */ \ 70 | (_regs)->pc = (_pc); \ 71 | (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \ 72 | (_regs)->regs[2] = 0; \ 73 | (_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */ \ 74 | (_regs)->sr &= ~PS_S; \ 75 | (_regs)->usp = (_usp); \ 76 | } while (0) 77 | 78 | /* Forward declaration, a strange C thing */ 79 | struct task_struct; 80 | 81 | /* Free all resources held by a thread. */ 82 | static inline void release_thread(struct task_struct *dead_task) 83 | { 84 | } 85 | 86 | /* Prepare to copy thread state - unlazy all lazy status */ 87 | #define prepare_to_copy(tsk) do { } while (0) 88 | 89 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 90 | 91 | #define copy_segments(tsk, mm) do { } while (0) 92 | #define release_segments(mm) do { } while (0) 93 | #define forget_segments() do { } while (0) 94 | 95 | extern unsigned long thread_saved_pc(struct task_struct *tsk); 96 | 97 | unsigned long get_wchan(struct task_struct *p); 98 | 99 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) 100 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) 101 | 102 | #define task_pt_regs(p) \ 103 | ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) 104 | 105 | #define cpu_relax() barrier() 106 | 107 | #endif /* __ASM_CSKY_PROCESSOR_H */ 108 | -------------------------------------------------------------------------------- /arch/csky/include/asm/ptrace.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_PTRACE_H 5 | #define __ASM_CSKY_PTRACE_H 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #ifndef __ASSEMBLY__ 13 | 14 | #define PS_S 0x80000000 /* Supervisor Mode */ 15 | 16 | #define USR_BKPT 0x1464 17 | 18 | #define arch_has_single_step() (1) 19 | #define current_pt_regs() \ 20 | ({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; }) 21 | 22 | #define user_stack_pointer(regs) ((regs)->usp) 23 | 24 | #define user_mode(regs) (!((regs)->sr & PS_S)) 25 | #define instruction_pointer(regs) ((regs)->pc) 26 | #define profile_pc(regs) instruction_pointer(regs) 27 | #define trap_no(regs) ((regs->sr >> 16) & 0xff) 28 | 29 | static inline void instruction_pointer_set(struct pt_regs *regs, 30 | unsigned long val) 31 | { 32 | regs->pc = val; 33 | } 34 | 35 | #if defined(__CSKYABIV2__) 36 | #define MAX_REG_OFFSET offsetof(struct pt_regs, dcsr) 37 | #else 38 | #define MAX_REG_OFFSET offsetof(struct pt_regs, regs[9]) 39 | #endif 40 | 41 | static inline bool in_syscall(struct pt_regs const *regs) 42 | { 43 | return ((regs->sr >> 16) & 0xff) == VEC_TRAP0; 44 | } 45 | 46 | static inline void forget_syscall(struct pt_regs *regs) 47 | { 48 | regs->sr &= ~(0xff << 16); 49 | } 50 | 51 | static inline unsigned long regs_return_value(struct pt_regs *regs) 52 | { 53 | return regs->a0; 54 | } 55 | 56 | /* Valid only for Kernel mode traps. */ 57 | static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) 58 | { 59 | return regs->usp; 60 | } 61 | 62 | static inline unsigned long frame_pointer(struct pt_regs *regs) 63 | { 64 | return regs->regs[4]; 65 | } 66 | static inline void frame_pointer_set(struct pt_regs *regs, 67 | unsigned long val) 68 | { 69 | regs->regs[4] = val; 70 | } 71 | 72 | extern int regs_query_register_offset(const char *name); 73 | extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, 74 | unsigned int n); 75 | 76 | /* 77 | * regs_get_register() - get register value from its offset 78 | * @regs: pt_regs from which register value is gotten 79 | * @offset: offset of the register. 80 | * 81 | * regs_get_register returns the value of a register whose offset from @regs. 82 | * The @offset is the offset of the register in struct pt_regs. 83 | * If @offset is bigger than MAX_REG_OFFSET, this returns 0. 84 | */ 85 | static inline unsigned long regs_get_register(struct pt_regs *regs, 86 | unsigned int offset) 87 | { 88 | if (unlikely(offset > MAX_REG_OFFSET)) 89 | return 0; 90 | 91 | return *(unsigned long *)((unsigned long)regs + offset); 92 | } 93 | 94 | #endif /* __ASSEMBLY__ */ 95 | #endif /* __ASM_CSKY_PTRACE_H */ 96 | -------------------------------------------------------------------------------- /arch/csky/include/asm/reg_ops.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_REGS_OPS_H 4 | #define __ASM_REGS_OPS_H 5 | 6 | #define mfcr(reg) \ 7 | ({ \ 8 | unsigned int tmp; \ 9 | asm volatile( \ 10 | "mfcr %0, "reg"\n" \ 11 | : "=r"(tmp) \ 12 | : \ 13 | : "memory"); \ 14 | tmp; \ 15 | }) 16 | 17 | #define mtcr(reg, val) \ 18 | ({ \ 19 | asm volatile( \ 20 | "mtcr %0, "reg"\n" \ 21 | : \ 22 | : "r"(val) \ 23 | : "memory"); \ 24 | }) 25 | 26 | #endif /* __ASM_REGS_OPS_H */ 27 | -------------------------------------------------------------------------------- /arch/csky/include/asm/segment.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_SEGMENT_H 5 | #define __ASM_CSKY_SEGMENT_H 6 | 7 | typedef struct { 8 | unsigned long seg; 9 | } mm_segment_t; 10 | 11 | #define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) 12 | #define get_ds() KERNEL_DS 13 | 14 | #define USER_DS ((mm_segment_t) { PAGE_OFFSET }) 15 | #define get_fs() (current_thread_info()->addr_limit) 16 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) 17 | #define segment_eq(a, b) ((a).seg == (b).seg) 18 | 19 | #endif /* __ASM_CSKY_SEGMENT_H */ 20 | -------------------------------------------------------------------------------- /arch/csky/include/asm/shmparam.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_SHMPARAM_H 5 | #define __ASM_CSKY_SHMPARAM_H 6 | 7 | #define SHMLBA (4 * PAGE_SIZE) 8 | 9 | #define __ARCH_FORCE_SHMLBA 10 | 11 | #endif /* __ASM_CSKY_SHMPARAM_H */ 12 | -------------------------------------------------------------------------------- /arch/csky/include/asm/smp.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_SMP_H 4 | #define __ASM_CSKY_SMP_H 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #ifdef CONFIG_SMP 11 | 12 | void __init setup_smp(void); 13 | 14 | void __init setup_smp_ipi(void); 15 | 16 | void arch_send_call_function_ipi_mask(struct cpumask *mask); 17 | 18 | void arch_send_call_function_single_ipi(int cpu); 19 | 20 | void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq); 21 | 22 | #define raw_smp_processor_id() (current_thread_info()->cpu) 23 | 24 | int __cpu_disable(void); 25 | 26 | void __cpu_die(unsigned int cpu); 27 | 28 | #endif /* CONFIG_SMP */ 29 | 30 | #endif /* __ASM_CSKY_SMP_H */ 31 | -------------------------------------------------------------------------------- /arch/csky/include/asm/spinlock_types.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_SPINLOCK_TYPES_H 4 | #define __ASM_CSKY_SPINLOCK_TYPES_H 5 | 6 | #ifndef __LINUX_SPINLOCK_TYPES_H 7 | # error "please don't include this file directly" 8 | #endif 9 | 10 | #define TICKET_NEXT 16 11 | 12 | typedef struct { 13 | union { 14 | u32 lock; 15 | struct __raw_tickets { 16 | /* little endian */ 17 | u16 owner; 18 | u16 next; 19 | } tickets; 20 | }; 21 | } arch_spinlock_t; 22 | 23 | #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } 24 | 25 | #ifdef CONFIG_QUEUED_RWLOCKS 26 | #include 27 | 28 | #else /* CONFIG_NR_CPUS > 2 */ 29 | 30 | typedef struct { 31 | u32 lock; 32 | } arch_rwlock_t; 33 | 34 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } 35 | 36 | #endif /* CONFIG_QUEUED_RWLOCKS */ 37 | #endif /* __ASM_CSKY_SPINLOCK_TYPES_H */ 38 | -------------------------------------------------------------------------------- /arch/csky/include/asm/stackprotector.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | #ifndef _ASM_STACKPROTECTOR_H 3 | #define _ASM_STACKPROTECTOR_H 1 4 | 5 | #include 6 | #include 7 | 8 | extern unsigned long __stack_chk_guard; 9 | 10 | /* 11 | * Initialize the stackprotector canary value. 12 | * 13 | * NOTE: this must only be called from functions that never return, 14 | * and it must always be inlined. 15 | */ 16 | static __always_inline void boot_init_stack_canary(void) 17 | { 18 | unsigned long canary; 19 | 20 | /* Try to get a semi random initial value. */ 21 | get_random_bytes(&canary, sizeof(canary)); 22 | canary ^= LINUX_VERSION_CODE; 23 | canary &= CANARY_MASK; 24 | 25 | current->stack_canary = canary; 26 | __stack_chk_guard = current->stack_canary; 27 | } 28 | 29 | #endif /* __ASM_SH_STACKPROTECTOR_H */ 30 | -------------------------------------------------------------------------------- /arch/csky/include/asm/string.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef _CSKY_STRING_MM_H_ 5 | #define _CSKY_STRING_MM_H_ 6 | 7 | #ifndef __ASSEMBLY__ 8 | #include 9 | #include 10 | #include 11 | #endif 12 | 13 | #endif /* _CSKY_STRING_MM_H_ */ 14 | -------------------------------------------------------------------------------- /arch/csky/include/asm/switch_to.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_SWITCH_TO_H 5 | #define __ASM_CSKY_SWITCH_TO_H 6 | 7 | #include 8 | #ifdef CONFIG_CPU_HAS_FPU 9 | #include 10 | static inline void __switch_to_fpu(struct task_struct *prev, 11 | struct task_struct *next) 12 | { 13 | save_to_user_fp(&prev->thread.user_fp); 14 | restore_from_user_fp(&next->thread.user_fp); 15 | } 16 | #else 17 | static inline void __switch_to_fpu(struct task_struct *prev, 18 | struct task_struct *next) 19 | {} 20 | #endif 21 | 22 | /* 23 | * Context switching is now performed out-of-line in switch_to.S 24 | */ 25 | extern struct task_struct *__switch_to(struct task_struct *, 26 | struct task_struct *); 27 | 28 | #define switch_to(prev, next, last) \ 29 | do { \ 30 | struct task_struct *__prev = (prev); \ 31 | struct task_struct *__next = (next); \ 32 | __switch_to_fpu(__prev, __next); \ 33 | ((last) = __switch_to((prev), (next))); \ 34 | } while (0) 35 | 36 | #endif /* __ASM_CSKY_SWITCH_TO_H */ 37 | -------------------------------------------------------------------------------- /arch/csky/include/asm/syscall.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_SYSCALL_H 4 | #define __ASM_SYSCALL_H 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | extern void *sys_call_table[]; 11 | 12 | static inline int 13 | syscall_get_nr(struct task_struct *task, struct pt_regs *regs) 14 | { 15 | return regs_syscallid(regs); 16 | } 17 | 18 | static inline void 19 | syscall_rollback(struct task_struct *task, struct pt_regs *regs) 20 | { 21 | regs->a0 = regs->orig_a0; 22 | } 23 | 24 | static inline long 25 | syscall_get_error(struct task_struct *task, struct pt_regs *regs) 26 | { 27 | unsigned long error = regs->a0; 28 | 29 | return IS_ERR_VALUE(error) ? error : 0; 30 | } 31 | 32 | static inline long 33 | syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) 34 | { 35 | return regs->a0; 36 | } 37 | 38 | static inline void 39 | syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, 40 | int error, long val) 41 | { 42 | regs->a0 = (long) error ?: val; 43 | } 44 | 45 | static inline void 46 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, 47 | unsigned int i, unsigned int n, unsigned long *args) 48 | { 49 | BUG_ON(i + n > 6); 50 | if (i == 0) { 51 | args[0] = regs->orig_a0; 52 | args++; 53 | n--; 54 | } else { 55 | i--; 56 | } 57 | memcpy(args, ®s->a1 + i, n * sizeof(args[0])); 58 | } 59 | 60 | static inline void 61 | syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, 62 | unsigned int i, unsigned int n, const unsigned long *args) 63 | { 64 | BUG_ON(i + n > 6); 65 | if (i == 0) { 66 | regs->orig_a0 = args[0]; 67 | args++; 68 | n--; 69 | } else { 70 | i--; 71 | } 72 | memcpy(®s->a1 + i, args, n * sizeof(regs->a1)); 73 | } 74 | 75 | #endif /* __ASM_SYSCALL_H */ 76 | -------------------------------------------------------------------------------- /arch/csky/include/asm/syscalls.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_SYSCALLS_H 5 | #define __ASM_CSKY_SYSCALLS_H 6 | 7 | #include 8 | 9 | long sys_cacheflush(void __user *, unsigned long, int); 10 | 11 | long sys_set_thread_area(unsigned long addr); 12 | 13 | long sys_csky_fadvise64_64(int fd, int advice, loff_t offset, loff_t len); 14 | 15 | #endif /* __ASM_CSKY_SYSCALLS_H */ 16 | -------------------------------------------------------------------------------- /arch/csky/include/asm/tcm.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_TCM_H 4 | #define __ASM_CSKY_TCM_H 5 | 6 | #ifndef CONFIG_HAVE_TCM 7 | #error "You should not be including tcm.h unless you have a TCM!" 8 | #endif 9 | 10 | #include 11 | 12 | /* Tag variables with this */ 13 | #define __tcmdata __section(.tcm.data) 14 | /* Tag constants with this */ 15 | #define __tcmconst __section(.tcm.rodata) 16 | /* Tag functions inside TCM called from outside TCM with this */ 17 | #define __tcmfunc __section(.tcm.text) noinline 18 | /* Tag function inside TCM called from inside TCM with this */ 19 | #define __tcmlocalfunc __section(.tcm.text) 20 | 21 | void *tcm_alloc(size_t len); 22 | void tcm_free(void *addr, size_t len); 23 | 24 | #endif 25 | -------------------------------------------------------------------------------- /arch/csky/include/asm/thread_info.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef _ASM_CSKY_THREAD_INFO_H 5 | #define _ASM_CSKY_THREAD_INFO_H 6 | 7 | #ifndef __ASSEMBLY__ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | struct thread_info { 16 | struct task_struct *task; 17 | void *dump_exec_domain; 18 | unsigned long flags; 19 | int preempt_count; 20 | unsigned long tp_value; 21 | mm_segment_t addr_limit; 22 | struct restart_block restart_block; 23 | struct pt_regs *regs; 24 | unsigned int cpu; 25 | }; 26 | 27 | #define INIT_THREAD_INFO(tsk) \ 28 | { \ 29 | .task = &tsk, \ 30 | .preempt_count = INIT_PREEMPT_COUNT, \ 31 | .addr_limit = KERNEL_DS, \ 32 | .cpu = 0, \ 33 | .restart_block = { \ 34 | .fn = do_no_restart_syscall, \ 35 | }, \ 36 | } 37 | 38 | #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 39 | 40 | #define thread_saved_fp(tsk) \ 41 | ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r8)) 42 | 43 | #define thread_saved_sp(tsk) \ 44 | ((unsigned long)(tsk->thread.sp)) 45 | 46 | #define thread_saved_lr(tsk) \ 47 | ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r15)) 48 | 49 | static inline struct thread_info *current_thread_info(void) 50 | { 51 | unsigned long sp; 52 | 53 | asm volatile("mov %0, sp\n":"=r"(sp)); 54 | 55 | return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); 56 | } 57 | 58 | #endif /* !__ASSEMBLY__ */ 59 | 60 | #define TIF_SIGPENDING 0 /* signal pending */ 61 | #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 62 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 63 | #define TIF_UPROBE 3 /* uprobe breakpoint or singlestep */ 64 | #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ 65 | #define TIF_SYSCALL_TRACEPOINT 5 /* syscall tracepoint instrumentation */ 66 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing */ 67 | #define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */ 68 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 69 | #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ 70 | #define TIF_SECCOMP 21 /* secure computing */ 71 | 72 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 73 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 74 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 75 | #define _TIF_UPROBE (1 << TIF_UPROBE) 76 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 77 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 78 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 79 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 80 | #define _TIF_MEMDIE (1 << TIF_MEMDIE) 81 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 82 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) 83 | 84 | #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ 85 | _TIF_NOTIFY_RESUME | _TIF_UPROBE) 86 | 87 | #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 88 | _TIF_SYSCALL_TRACEPOINT) 89 | 90 | #endif /* _ASM_CSKY_THREAD_INFO_H */ 91 | -------------------------------------------------------------------------------- /arch/csky/include/asm/tlb.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_TLB_H 5 | #define __ASM_CSKY_TLB_H 6 | 7 | #include 8 | 9 | #define tlb_start_vma(tlb, vma) \ 10 | do { \ 11 | if (!tlb->fullmm) \ 12 | flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 13 | } while (0) 14 | 15 | #define tlb_end_vma(tlb, vma) \ 16 | do { \ 17 | if (!tlb->fullmm) \ 18 | flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ 19 | } while (0) 20 | 21 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 22 | 23 | #include 24 | 25 | #endif /* __ASM_CSKY_TLB_H */ 26 | -------------------------------------------------------------------------------- /arch/csky/include/asm/tlbflush.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_TLBFLUSH_H 5 | #define __ASM_TLBFLUSH_H 6 | 7 | /* 8 | * TLB flushing: 9 | * 10 | * - flush_tlb_all() flushes all processes TLB entries 11 | * - flush_tlb_mm(mm) flushes the specified mm context TLB entries 12 | * - flush_tlb_page(vma, vmaddr) flushes one page 13 | * - flush_tlb_range(vma, start, end) flushes a range of pages 14 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 15 | */ 16 | extern void flush_tlb_all(void); 17 | extern void flush_tlb_mm(struct mm_struct *mm); 18 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 19 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 20 | unsigned long end); 21 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 22 | 23 | extern void flush_tlb_one(unsigned long vaddr); 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /arch/csky/include/asm/traps.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_TRAPS_H 5 | #define __ASM_CSKY_TRAPS_H 6 | 7 | #define VEC_RESET 0 8 | #define VEC_ALIGN 1 9 | #define VEC_ACCESS 2 10 | #define VEC_ZERODIV 3 11 | #define VEC_ILLEGAL 4 12 | #define VEC_PRIV 5 13 | #define VEC_TRACE 6 14 | #define VEC_BREAKPOINT 7 15 | #define VEC_UNRECOVER 8 16 | #define VEC_SOFTRESET 9 17 | #define VEC_AUTOVEC 10 18 | #define VEC_FAUTOVEC 11 19 | #define VEC_HWACCEL 12 20 | #define VEC_TFATAL 13 21 | #define VEC_TLBMISS 14 22 | #define VEC_TLBMODIFIED 15 23 | 24 | #define VEC_TRAP0 16 25 | #define VEC_TRAP1 17 26 | #define VEC_TRAP2 18 27 | #define VEC_TRAP3 19 28 | 29 | #define VEC_TLBINVALIDL 20 30 | #define VEC_TLBINVALIDS 21 31 | 32 | #define VEC_PRFL 29 33 | #define VEC_FPE 30 34 | 35 | extern void *vec_base[]; 36 | 37 | #define VEC_INIT(i, func) \ 38 | do { \ 39 | vec_base[i] = (void *)func; \ 40 | } while (0) 41 | 42 | void csky_alignment(struct pt_regs *regs); 43 | 44 | #endif /* __ASM_CSKY_TRAPS_H */ 45 | -------------------------------------------------------------------------------- /arch/csky/include/asm/unistd.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | 6 | #define NR_syscalls (__NR_syscalls) 7 | -------------------------------------------------------------------------------- /arch/csky/include/asm/uprobes.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | 3 | #ifndef __ASM_CSKY_UPROBES_H 4 | #define __ASM_CSKY_UPROBES_H 5 | 6 | #include 7 | 8 | #define MAX_UINSN_BYTES 4 9 | 10 | #define UPROBE_SWBP_INSN USR_BKPT 11 | #define UPROBE_SWBP_INSN_SIZE 2 12 | #define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES 13 | 14 | typedef u32 uprobe_opcode_t; 15 | 16 | struct arch_uprobe_task { 17 | unsigned long saved_trap_no; 18 | }; 19 | 20 | struct arch_uprobe { 21 | union { 22 | u8 insn[MAX_UINSN_BYTES]; 23 | u8 ixol[MAX_UINSN_BYTES]; 24 | }; 25 | struct arch_probe_insn api; 26 | unsigned long insn_size; 27 | bool simulate; 28 | }; 29 | 30 | int uprobe_breakpoint_handler(struct pt_regs *regs); 31 | int uprobe_single_step_handler(struct pt_regs *regs); 32 | 33 | #endif /* __ASM_CSKY_UPROBES_H */ 34 | -------------------------------------------------------------------------------- /arch/csky/include/asm/vdso.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_VDSO_H 4 | #define __ASM_CSKY_VDSO_H 5 | 6 | #include 7 | 8 | struct csky_vdso { 9 | unsigned short rt_signal_retcode[4]; 10 | }; 11 | 12 | #endif /* __ASM_CSKY_VDSO_H */ 13 | -------------------------------------------------------------------------------- /arch/csky/include/uapi/asm/Kbuild: -------------------------------------------------------------------------------- 1 | include include/uapi/asm-generic/Kbuild.asm 2 | 3 | header-y += cachectl.h 4 | 5 | generic-y += auxvec.h 6 | generic-y += param.h 7 | generic-y += bpf_perf_event.h 8 | generic-y += errno.h 9 | generic-y += fcntl.h 10 | generic-y += ioctl.h 11 | generic-y += ioctls.h 12 | generic-y += ipcbuf.h 13 | generic-y += shmbuf.h 14 | generic-y += bitsperlong.h 15 | generic-y += mman.h 16 | generic-y += msgbuf.h 17 | generic-y += poll.h 18 | generic-y += posix_types.h 19 | generic-y += resource.h 20 | generic-y += sembuf.h 21 | generic-y += siginfo.h 22 | generic-y += signal.h 23 | generic-y += socket.h 24 | generic-y += sockios.h 25 | generic-y += statfs.h 26 | generic-y += stat.h 27 | generic-y += setup.h 28 | generic-y += swab.h 29 | generic-y += termbits.h 30 | generic-y += termios.h 31 | generic-y += types.h 32 | generic-y += ucontext.h 33 | -------------------------------------------------------------------------------- /arch/csky/include/uapi/asm/byteorder.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_BYTEORDER_H 5 | #define __ASM_CSKY_BYTEORDER_H 6 | 7 | #include 8 | 9 | #endif /* __ASM_CSKY_BYTEORDER_H */ 10 | -------------------------------------------------------------------------------- /arch/csky/include/uapi/asm/cachectl.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef __ASM_CSKY_CACHECTL_H 4 | #define __ASM_CSKY_CACHECTL_H 5 | 6 | /* 7 | * See "man cacheflush" 8 | */ 9 | #define ICACHE (1<<0) 10 | #define DCACHE (1<<1) 11 | #define BCACHE (ICACHE|DCACHE) 12 | 13 | #endif /* __ASM_CSKY_CACHECTL_H */ 14 | -------------------------------------------------------------------------------- /arch/csky/include/uapi/asm/perf_regs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef _ASM_CSKY_PERF_REGS_H 5 | #define _ASM_CSKY_PERF_REGS_H 6 | 7 | /* Index of struct pt_regs */ 8 | enum perf_event_csky_regs { 9 | PERF_REG_CSKY_TLS, 10 | PERF_REG_CSKY_LR, 11 | PERF_REG_CSKY_PC, 12 | PERF_REG_CSKY_SR, 13 | PERF_REG_CSKY_SP, 14 | PERF_REG_CSKY_ORIG_A0, 15 | PERF_REG_CSKY_A0, 16 | PERF_REG_CSKY_A1, 17 | PERF_REG_CSKY_A2, 18 | PERF_REG_CSKY_A3, 19 | PERF_REG_CSKY_REGS0, 20 | PERF_REG_CSKY_REGS1, 21 | PERF_REG_CSKY_REGS2, 22 | PERF_REG_CSKY_REGS3, 23 | PERF_REG_CSKY_REGS4, 24 | PERF_REG_CSKY_REGS5, 25 | PERF_REG_CSKY_REGS6, 26 | PERF_REG_CSKY_REGS7, 27 | PERF_REG_CSKY_REGS8, 28 | PERF_REG_CSKY_REGS9, 29 | #if defined(__CSKYABIV2__) 30 | PERF_REG_CSKY_EXREGS0, 31 | PERF_REG_CSKY_EXREGS1, 32 | PERF_REG_CSKY_EXREGS2, 33 | PERF_REG_CSKY_EXREGS3, 34 | PERF_REG_CSKY_EXREGS4, 35 | PERF_REG_CSKY_EXREGS5, 36 | PERF_REG_CSKY_EXREGS6, 37 | PERF_REG_CSKY_EXREGS7, 38 | PERF_REG_CSKY_EXREGS8, 39 | PERF_REG_CSKY_EXREGS9, 40 | PERF_REG_CSKY_EXREGS10, 41 | PERF_REG_CSKY_EXREGS11, 42 | PERF_REG_CSKY_EXREGS12, 43 | PERF_REG_CSKY_EXREGS13, 44 | PERF_REG_CSKY_EXREGS14, 45 | PERF_REG_CSKY_HI, 46 | PERF_REG_CSKY_LO, 47 | PERF_REG_CSKY_DCSR, 48 | #endif 49 | PERF_REG_CSKY_MAX, 50 | }; 51 | #endif /* _ASM_CSKY_PERF_REGS_H */ 52 | -------------------------------------------------------------------------------- /arch/csky/include/uapi/asm/ptrace.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef _CSKY_PTRACE_H 5 | #define _CSKY_PTRACE_H 6 | 7 | #ifndef __ASSEMBLY__ 8 | 9 | struct pt_regs { 10 | unsigned long tls; 11 | unsigned long lr; 12 | unsigned long pc; 13 | unsigned long sr; 14 | unsigned long usp; 15 | 16 | /* 17 | * a0, a1, a2, a3: 18 | * abiv1: r2, r3, r4, r5 19 | * abiv2: r0, r1, r2, r3 20 | */ 21 | unsigned long orig_a0; 22 | unsigned long a0; 23 | unsigned long a1; 24 | unsigned long a2; 25 | unsigned long a3; 26 | 27 | /* 28 | * ABIV2: r4 ~ r13 29 | * ABIV1: r6 ~ r14, r1 30 | */ 31 | unsigned long regs[10]; 32 | 33 | #if defined(__CSKYABIV2__) 34 | /* r16 ~ r30 */ 35 | unsigned long exregs[15]; 36 | 37 | unsigned long rhi; 38 | unsigned long rlo; 39 | unsigned long dcsr; 40 | #endif 41 | }; 42 | 43 | struct user_fp { 44 | unsigned long vr[96]; 45 | unsigned long fcr; 46 | unsigned long fesr; 47 | unsigned long fid; 48 | unsigned long reserved; 49 | }; 50 | 51 | #endif /* __ASSEMBLY__ */ 52 | #endif /* _CSKY_PTRACE_H */ 53 | -------------------------------------------------------------------------------- /arch/csky/include/uapi/asm/sigcontext.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #ifndef __ASM_CSKY_SIGCONTEXT_H 5 | #define __ASM_CSKY_SIGCONTEXT_H 6 | 7 | #include 8 | 9 | struct sigcontext { 10 | struct pt_regs sc_pt_regs; 11 | struct user_fp sc_user_fp; 12 | }; 13 | 14 | #endif /* __ASM_CSKY_SIGCONTEXT_H */ 15 | -------------------------------------------------------------------------------- /arch/csky/include/uapi/asm/unistd.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #define __ARCH_WANT_SYS_CLONE 5 | #include 6 | 7 | #define __NR_set_thread_area (__NR_arch_specific_syscall + 0) 8 | __SYSCALL(__NR_set_thread_area, sys_set_thread_area) 9 | #define __NR_cacheflush (__NR_arch_specific_syscall + 1) 10 | __SYSCALL(__NR_cacheflush, sys_cacheflush) 11 | -------------------------------------------------------------------------------- /arch/csky/kernel/Makefile: -------------------------------------------------------------------------------- 1 | extra-y := head.o vmlinux.lds 2 | 3 | obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o 4 | obj-y += power.o syscall.o syscall_table.o setup.o io.o 5 | obj-y += process.o cpu-probe.o ptrace.o stacktrace.o 6 | obj-y += probes/ 7 | 8 | obj-$(CONFIG_MODULES) += module.o 9 | obj-$(CONFIG_SMP) += smp.o 10 | obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o 11 | obj-$(CONFIG_STACKTRACE) += stacktrace.o 12 | obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o 13 | obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o 14 | obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o 15 | obj-$(CONFIG_PCI) += pci.o 16 | 17 | ifdef CONFIG_FUNCTION_TRACER 18 | CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) 19 | endif 20 | -------------------------------------------------------------------------------- /arch/csky/kernel/asm-offsets.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | int main(void) 10 | { 11 | /* offsets into the task struct */ 12 | DEFINE(TASK_STATE, offsetof(struct task_struct, state)); 13 | DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); 14 | DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); 15 | DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); 16 | DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); 17 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); 18 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); 19 | 20 | /* offsets into the thread struct */ 21 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, sp)); 22 | DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr)); 23 | DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr)); 24 | DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr)); 25 | 26 | /* offsets into the thread_info struct */ 27 | DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); 28 | DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count)); 29 | DEFINE(TINFO_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); 30 | DEFINE(TINFO_TP_VALUE, offsetof(struct thread_info, tp_value)); 31 | DEFINE(TINFO_TASK, offsetof(struct thread_info, task)); 32 | 33 | /* offsets into the pt_regs */ 34 | DEFINE(PT_PC, offsetof(struct pt_regs, pc)); 35 | DEFINE(PT_ORIG_AO, offsetof(struct pt_regs, orig_a0)); 36 | DEFINE(PT_SR, offsetof(struct pt_regs, sr)); 37 | 38 | DEFINE(PT_A0, offsetof(struct pt_regs, a0)); 39 | DEFINE(PT_A1, offsetof(struct pt_regs, a1)); 40 | DEFINE(PT_A2, offsetof(struct pt_regs, a2)); 41 | DEFINE(PT_A3, offsetof(struct pt_regs, a3)); 42 | DEFINE(PT_REGS0, offsetof(struct pt_regs, regs[0])); 43 | DEFINE(PT_REGS1, offsetof(struct pt_regs, regs[1])); 44 | DEFINE(PT_REGS2, offsetof(struct pt_regs, regs[2])); 45 | DEFINE(PT_REGS3, offsetof(struct pt_regs, regs[3])); 46 | DEFINE(PT_REGS4, offsetof(struct pt_regs, regs[4])); 47 | DEFINE(PT_REGS5, offsetof(struct pt_regs, regs[5])); 48 | DEFINE(PT_REGS6, offsetof(struct pt_regs, regs[6])); 49 | DEFINE(PT_REGS7, offsetof(struct pt_regs, regs[7])); 50 | DEFINE(PT_REGS8, offsetof(struct pt_regs, regs[8])); 51 | DEFINE(PT_REGS9, offsetof(struct pt_regs, regs[9])); 52 | DEFINE(PT_R15, offsetof(struct pt_regs, lr)); 53 | #if defined(__CSKYABIV2__) 54 | DEFINE(PT_R16, offsetof(struct pt_regs, exregs[0])); 55 | DEFINE(PT_R17, offsetof(struct pt_regs, exregs[1])); 56 | DEFINE(PT_R18, offsetof(struct pt_regs, exregs[2])); 57 | DEFINE(PT_R19, offsetof(struct pt_regs, exregs[3])); 58 | DEFINE(PT_R20, offsetof(struct pt_regs, exregs[4])); 59 | DEFINE(PT_R21, offsetof(struct pt_regs, exregs[5])); 60 | DEFINE(PT_R22, offsetof(struct pt_regs, exregs[6])); 61 | DEFINE(PT_R23, offsetof(struct pt_regs, exregs[7])); 62 | DEFINE(PT_R24, offsetof(struct pt_regs, exregs[8])); 63 | DEFINE(PT_R25, offsetof(struct pt_regs, exregs[9])); 64 | DEFINE(PT_R26, offsetof(struct pt_regs, exregs[10])); 65 | DEFINE(PT_R27, offsetof(struct pt_regs, exregs[11])); 66 | DEFINE(PT_R28, offsetof(struct pt_regs, exregs[12])); 67 | DEFINE(PT_R29, offsetof(struct pt_regs, exregs[13])); 68 | DEFINE(PT_R30, offsetof(struct pt_regs, exregs[14])); 69 | DEFINE(PT_R31, offsetof(struct pt_regs, exregs[15])); 70 | DEFINE(PT_RHI, offsetof(struct pt_regs, rhi)); 71 | DEFINE(PT_RLO, offsetof(struct pt_regs, rlo)); 72 | #endif 73 | DEFINE(PT_USP, offsetof(struct pt_regs, usp)); 74 | DEFINE(PT_FRAME_SIZE, sizeof(struct pt_regs)); 75 | 76 | /* offsets into the irq_cpustat_t struct */ 77 | DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, 78 | __softirq_pending)); 79 | 80 | /* signal defines */ 81 | DEFINE(SIGSEGV, SIGSEGV); 82 | DEFINE(SIGTRAP, SIGTRAP); 83 | 84 | return 0; 85 | } 86 | -------------------------------------------------------------------------------- /arch/csky/kernel/atomic.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | 7 | .text 8 | 9 | /* 10 | * int csky_cmpxchg(int oldval, int newval, int *ptr) 11 | * 12 | * If *ptr != oldval && return 1, 13 | * else *ptr = newval return 0. 14 | */ 15 | ENTRY(csky_cmpxchg) 16 | USPTOKSP 17 | 18 | RD_MEH a3 19 | WR_MEH a3 20 | 21 | mfcr a3, epc 22 | addi a3, TRAP0_SIZE 23 | 24 | subi sp, 16 25 | stw a3, (sp, 0) 26 | mfcr a3, epsr 27 | stw a3, (sp, 4) 28 | mfcr a3, usp 29 | stw a3, (sp, 8) 30 | 31 | psrset ee 32 | #ifdef CONFIG_CPU_HAS_LDSTEX 33 | 1: 34 | ldex a3, (a2) 35 | cmpne a0, a3 36 | bt16 2f 37 | mov a3, a1 38 | stex a3, (a2) 39 | bez a3, 1b 40 | 2: 41 | sync.is 42 | #else 43 | .globl csky_cmpxchg_ldw 44 | csky_cmpxchg_ldw: 45 | ldw a3, (a2) 46 | cmpne a0, a3 47 | bt16 3f 48 | .globl csky_cmpxchg_stw 49 | csky_cmpxchg_stw: 50 | stw a1, (a2) 51 | 3: 52 | #endif 53 | mvc a0 54 | ldw a3, (sp, 0) 55 | mtcr a3, epc 56 | ldw a3, (sp, 4) 57 | mtcr a3, epsr 58 | ldw a3, (sp, 8) 59 | mtcr a3, usp 60 | addi sp, 16 61 | KSPTOUSP 62 | rte 63 | END(csky_cmpxchg) 64 | -------------------------------------------------------------------------------- /arch/csky/kernel/cpu-probe.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | static void percpu_print(void *arg) 12 | { 13 | struct seq_file *m = (struct seq_file *)arg; 14 | unsigned int cur, next, i; 15 | 16 | seq_printf(m, "processor : %d\n", smp_processor_id()); 17 | seq_printf(m, "C-SKY CPU model : %s\n", CSKYCPU_DEF_NAME); 18 | 19 | /* read processor id, max is 100 */ 20 | cur = mfcr("cr13"); 21 | for (i = 0; i < 100; i++) { 22 | seq_printf(m, "product info[%d] : 0x%08x\n", i, cur); 23 | 24 | next = mfcr("cr13"); 25 | 26 | /* some CPU only has one id reg */ 27 | if (cur == next) 28 | break; 29 | 30 | cur = next; 31 | 32 | /* cpid index is 31-28, reset */ 33 | if (!(next >> 28)) { 34 | while ((mfcr("cr13") >> 28) != i); 35 | break; 36 | } 37 | } 38 | 39 | /* CPU feature regs, setup by bootloader or gdbinit */ 40 | seq_printf(m, "hint (CPU funcs): 0x%08x\n", mfcr_hint()); 41 | seq_printf(m, "ccr (L1C & MMU): 0x%08x\n", mfcr("cr18")); 42 | seq_printf(m, "ccr2 (L2C) : 0x%08x\n", mfcr_ccr2()); 43 | seq_printf(m, "\n"); 44 | } 45 | 46 | static int c_show(struct seq_file *m, void *v) 47 | { 48 | int cpu; 49 | 50 | for_each_online_cpu(cpu) 51 | smp_call_function_single(cpu, percpu_print, m, true); 52 | 53 | #ifdef CSKY_ARCH_VERSION 54 | seq_printf(m, "arch-version : %s\n", CSKY_ARCH_VERSION); 55 | seq_printf(m, "\n"); 56 | #endif 57 | 58 | return 0; 59 | } 60 | 61 | static void *c_start(struct seq_file *m, loff_t *pos) 62 | { 63 | return *pos < 1 ? (void *)1 : NULL; 64 | } 65 | 66 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) 67 | { 68 | ++*pos; 69 | return NULL; 70 | } 71 | 72 | static void c_stop(struct seq_file *m, void *v) {} 73 | 74 | const struct seq_operations cpuinfo_op = { 75 | .start = c_start, 76 | .next = c_next, 77 | .stop = c_stop, 78 | .show = c_show, 79 | }; 80 | -------------------------------------------------------------------------------- /arch/csky/kernel/head.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | __HEAD 9 | ENTRY(_start) 10 | SETUP_MMU 11 | 12 | /* set stack point */ 13 | lrw r6, init_thread_union + THREAD_SIZE 14 | mov sp, r6 15 | 16 | jmpi csky_start 17 | END(_start) 18 | 19 | #ifdef CONFIG_SMP 20 | .align 10 21 | ENTRY(_start_smp_secondary) 22 | SETUP_MMU 23 | 24 | #ifdef CONFIG_PAGE_OFFSET_80000000 25 | lrw r6, secondary_msa1 26 | ld.w r6, (r6, 0) 27 | mtcr r6, cr<31, 15> 28 | #endif 29 | 30 | lrw r6, secondary_pgd 31 | ld.w r6, (r6, 0) 32 | mtcr r6, cr<28, 15> 33 | mtcr r6, cr<29, 15> 34 | 35 | /* set stack point */ 36 | lrw r6, secondary_stack 37 | ld.w r6, (r6, 0) 38 | mov sp, r6 39 | 40 | jmpi csky_start_secondary 41 | END(_start_smp_secondary) 42 | #endif 43 | -------------------------------------------------------------------------------- /arch/csky/kernel/io.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | /* 8 | * Copy data from IO memory space to "real" memory space. 9 | */ 10 | void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) 11 | { 12 | while (count && !IS_ALIGNED((unsigned long)from, 4)) { 13 | *(u8 *)to = __raw_readb(from); 14 | from++; 15 | to++; 16 | count--; 17 | } 18 | 19 | while (count >= 4) { 20 | *(u32 *)to = __raw_readl(from); 21 | from += 4; 22 | to += 4; 23 | count -= 4; 24 | } 25 | 26 | while (count) { 27 | *(u8 *)to = __raw_readb(from); 28 | from++; 29 | to++; 30 | count--; 31 | } 32 | } 33 | EXPORT_SYMBOL(__memcpy_fromio); 34 | 35 | /* 36 | * Copy data from "real" memory space to IO memory space. 37 | */ 38 | void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) 39 | { 40 | while (count && !IS_ALIGNED((unsigned long)to, 4)) { 41 | __raw_writeb(*(u8 *)from, to); 42 | from++; 43 | to++; 44 | count--; 45 | } 46 | 47 | while (count >= 4) { 48 | __raw_writel(*(u32 *)from, to); 49 | from += 4; 50 | to += 4; 51 | count -= 4; 52 | } 53 | 54 | while (count) { 55 | __raw_writeb(*(u8 *)from, to); 56 | from++; 57 | to++; 58 | count--; 59 | } 60 | } 61 | EXPORT_SYMBOL(__memcpy_toio); 62 | 63 | /* 64 | * "memset" on IO memory space. 65 | */ 66 | void __memset_io(volatile void __iomem *dst, int c, size_t count) 67 | { 68 | u32 qc = (u8)c; 69 | 70 | qc |= qc << 8; 71 | qc |= qc << 16; 72 | 73 | while (count && !IS_ALIGNED((unsigned long)dst, 4)) { 74 | __raw_writeb(c, dst); 75 | dst++; 76 | count--; 77 | } 78 | 79 | while (count >= 4) { 80 | __raw_writel(qc, dst); 81 | dst += 4; 82 | count -= 4; 83 | } 84 | 85 | while (count) { 86 | __raw_writeb(c, dst); 87 | dst++; 88 | count--; 89 | } 90 | } 91 | EXPORT_SYMBOL(__memset_io); 92 | -------------------------------------------------------------------------------- /arch/csky/kernel/irq.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | void __init init_IRQ(void) 12 | { 13 | irqchip_init(); 14 | #ifdef CONFIG_SMP 15 | setup_smp_ipi(); 16 | #endif 17 | } 18 | 19 | asmlinkage void __irq_entry csky_do_IRQ(struct pt_regs *regs) 20 | { 21 | handle_arch_irq(regs); 22 | } 23 | -------------------------------------------------------------------------------- /arch/csky/kernel/module.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #ifdef CONFIG_CPU_CK810 16 | #define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000) 17 | #define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0) 18 | 19 | #define CHANGE_JSRI_TO_LRW(addr) do { \ 20 | *(uint16_t *)(addr) = (*(uint16_t *)(addr) & 0xFF9F) | 0x001a; \ 21 | *((uint16_t *)(addr) + 1) = *((uint16_t *)(addr) + 1) & 0xFFFF; \ 22 | } while (0) 23 | 24 | #define SET_JSR32_R26(addr) do { \ 25 | *(uint16_t *)(addr) = 0xE8Fa; \ 26 | *((uint16_t *)(addr) + 1) = 0x0000; \ 27 | } while (0) 28 | 29 | static void jsri_2_lrw_jsr(uint32_t *location) 30 | { 31 | uint16_t *location_tmp = (uint16_t *)location; 32 | 33 | if (IS_BSR32(*location_tmp, *(location_tmp + 1))) 34 | return; 35 | 36 | if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) { 37 | /* jsri 0x... --> lrw r26, 0x... */ 38 | CHANGE_JSRI_TO_LRW(location); 39 | /* lsli r0, r0 --> jsr r26 */ 40 | SET_JSR32_R26(location + 1); 41 | } 42 | } 43 | #else 44 | static void inline jsri_2_lrw_jsr(uint32_t *location) 45 | { 46 | return; 47 | } 48 | #endif 49 | 50 | int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, 51 | unsigned int symindex, unsigned int relsec, struct module *me) 52 | { 53 | unsigned int i; 54 | Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr; 55 | Elf32_Sym *sym; 56 | uint32_t *location; 57 | short *temp; 58 | 59 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 60 | /* This is where to make the change */ 61 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 62 | + rel[i].r_offset; 63 | sym = (Elf32_Sym *)sechdrs[symindex].sh_addr 64 | + ELF32_R_SYM(rel[i].r_info); 65 | 66 | switch (ELF32_R_TYPE(rel[i].r_info)) { 67 | case R_CSKY_32: 68 | /* We add the value into the location given */ 69 | *location = rel[i].r_addend + sym->st_value; 70 | break; 71 | case R_CSKY_PC32: 72 | /* Add the value, subtract its postition */ 73 | *location = rel[i].r_addend + sym->st_value 74 | - (uint32_t)location; 75 | break; 76 | case R_CSKY_PCRELJSR_IMM11BY2: 77 | break; 78 | case R_CSKY_PCRELJSR_IMM26BY2: 79 | jsri_2_lrw_jsr(location); 80 | break; 81 | case R_CSKY_ADDR_HI16: 82 | temp = ((short *)location) + 1; 83 | *temp = (short) 84 | ((rel[i].r_addend + sym->st_value) >> 16); 85 | break; 86 | case R_CSKY_ADDR_LO16: 87 | temp = ((short *)location) + 1; 88 | *temp = (short) 89 | ((rel[i].r_addend + sym->st_value) & 0xffff); 90 | break; 91 | default: 92 | pr_err("module %s: Unknown relocation: %u\n", 93 | me->name, ELF32_R_TYPE(rel[i].r_info)); 94 | return -ENOEXEC; 95 | } 96 | } 97 | return 0; 98 | } 99 | -------------------------------------------------------------------------------- /arch/csky/kernel/pci.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | 3 | #include 4 | 5 | void pcibios_fixup_bus(struct pci_bus *bus) 6 | { 7 | } 8 | EXPORT_SYMBOL(pcibios_fixup_bus); 9 | 10 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, 11 | resource_size_t size, resource_size_t align) 12 | { 13 | return res->start; 14 | } 15 | EXPORT_SYMBOL(pcibios_align_resource); 16 | -------------------------------------------------------------------------------- /arch/csky/kernel/perf_callchain.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | 7 | /* Kernel callchain */ 8 | struct stackframe { 9 | unsigned long fp; 10 | unsigned long lr; 11 | }; 12 | 13 | static int unwind_frame_kernel(struct stackframe *frame) 14 | { 15 | unsigned long low = (unsigned long)task_stack_page(current); 16 | unsigned long high = low + THREAD_SIZE; 17 | 18 | if (unlikely(frame->fp < low || frame->fp > high)) 19 | return -EPERM; 20 | 21 | if (kstack_end((void *)frame->fp) || frame->fp & 0x3) 22 | return -EPERM; 23 | 24 | *frame = *(struct stackframe *)frame->fp; 25 | 26 | if (__kernel_text_address(frame->lr)) { 27 | int graph = 0; 28 | 29 | frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr, 30 | NULL); 31 | } 32 | return 0; 33 | } 34 | 35 | static void notrace walk_stackframe(struct stackframe *fr, 36 | struct perf_callchain_entry_ctx *entry) 37 | { 38 | do { 39 | perf_callchain_store(entry, fr->lr); 40 | } while (unwind_frame_kernel(fr) >= 0); 41 | } 42 | 43 | /* 44 | * Get the return address for a single stackframe and return a pointer to the 45 | * next frame tail. 46 | */ 47 | static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, 48 | unsigned long fp, unsigned long reg_lr) 49 | { 50 | struct stackframe buftail; 51 | unsigned long lr = 0; 52 | unsigned long *user_frame_tail = (unsigned long *)fp; 53 | 54 | /* Check accessibility of one struct frame_tail beyond */ 55 | if (!access_ok(VERIFY_READ, user_frame_tail, sizeof(buftail))) 56 | return 0; 57 | if (__copy_from_user_inatomic(&buftail, user_frame_tail, 58 | sizeof(buftail))) 59 | return 0; 60 | 61 | if (reg_lr != 0) 62 | lr = reg_lr; 63 | else 64 | lr = buftail.lr; 65 | 66 | fp = buftail.fp; 67 | perf_callchain_store(entry, lr); 68 | 69 | return fp; 70 | } 71 | 72 | /* 73 | * This will be called when the target is in user mode 74 | * This function will only be called when we use 75 | * "PERF_SAMPLE_CALLCHAIN" in 76 | * kernel/events/core.c:perf_prepare_sample() 77 | * 78 | * How to trigger perf_callchain_[user/kernel] : 79 | * $ perf record -e cpu-clock --call-graph fp ./program 80 | * $ perf report --call-graph 81 | * 82 | * On C-SKY platform, the program being sampled and the C library 83 | * need to be compiled with * -mbacktrace, otherwise the user 84 | * stack will not contain function frame. 85 | */ 86 | void perf_callchain_user(struct perf_callchain_entry_ctx *entry, 87 | struct pt_regs *regs) 88 | { 89 | unsigned long fp = 0; 90 | 91 | /* C-SKY does not support virtualization. */ 92 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) 93 | return; 94 | 95 | fp = regs->regs[4]; 96 | perf_callchain_store(entry, regs->pc); 97 | 98 | /* 99 | * While backtrace from leaf function, lr is normally 100 | * not saved inside frame on C-SKY, so get lr from pt_regs 101 | * at the sample point. However, lr value can be incorrect if 102 | * lr is used as temp register 103 | */ 104 | fp = user_backtrace(entry, fp, regs->lr); 105 | 106 | while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) 107 | fp = user_backtrace(entry, fp, 0); 108 | } 109 | 110 | void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 111 | struct pt_regs *regs) 112 | { 113 | struct stackframe fr; 114 | 115 | /* C-SKY does not support virtualization. */ 116 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 117 | pr_warn("C-SKY does not support perf in guest mode!"); 118 | return; 119 | } 120 | 121 | fr.fp = regs->regs[4]; 122 | fr.lr = regs->lr; 123 | walk_stackframe(&fr, entry); 124 | } 125 | -------------------------------------------------------------------------------- /arch/csky/kernel/perf_regs.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | u64 perf_reg_value(struct pt_regs *regs, int idx) 12 | { 13 | if (WARN_ON_ONCE((u32)idx >= PERF_REG_CSKY_MAX)) 14 | return 0; 15 | 16 | return (u64)*((u32 *)regs + idx); 17 | } 18 | 19 | #define REG_RESERVED (~((1ULL << PERF_REG_CSKY_MAX) - 1)) 20 | 21 | int perf_reg_validate(u64 mask) 22 | { 23 | if (!mask || mask & REG_RESERVED) 24 | return -EINVAL; 25 | 26 | return 0; 27 | } 28 | 29 | u64 perf_reg_abi(struct task_struct *task) 30 | { 31 | return PERF_SAMPLE_REGS_ABI_32; 32 | } 33 | 34 | void perf_get_regs_user(struct perf_regs *regs_user, 35 | struct pt_regs *regs, 36 | struct pt_regs *regs_user_copy) 37 | { 38 | regs_user->regs = task_pt_regs(current); 39 | regs_user->abi = perf_reg_abi(current); 40 | } 41 | -------------------------------------------------------------------------------- /arch/csky/kernel/power.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | 6 | void (*pm_power_off)(void); 7 | EXPORT_SYMBOL(pm_power_off); 8 | 9 | void machine_power_off(void) 10 | { 11 | local_irq_disable(); 12 | if (pm_power_off) 13 | pm_power_off(); 14 | asm volatile ("bkpt"); 15 | } 16 | 17 | void machine_halt(void) 18 | { 19 | local_irq_disable(); 20 | if (pm_power_off) 21 | pm_power_off(); 22 | asm volatile ("bkpt"); 23 | } 24 | 25 | void machine_restart(char *cmd) 26 | { 27 | local_irq_disable(); 28 | do_kernel_restart(cmd); 29 | asm volatile ("bkpt"); 30 | } 31 | -------------------------------------------------------------------------------- /arch/csky/kernel/probes/Makefile: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0 2 | obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o 3 | obj-$(CONFIG_KPROBES) += kprobes_trampoline.o 4 | obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o 5 | obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o 6 | 7 | CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE) 8 | -------------------------------------------------------------------------------- /arch/csky/kernel/probes/decode-insn.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0+ 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "decode-insn.h" 10 | #include "simulate-insn.h" 11 | 12 | /* Return: 13 | * INSN_REJECTED If instruction is one not allowed to kprobe, 14 | * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. 15 | */ 16 | enum probe_insn __kprobes 17 | csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api) 18 | { 19 | probe_opcode_t insn = le32_to_cpu(*addr); 20 | 21 | CSKY_INSN_SET_SIMULATE(br16, insn); 22 | CSKY_INSN_SET_SIMULATE(bt16, insn); 23 | CSKY_INSN_SET_SIMULATE(bf16, insn); 24 | CSKY_INSN_SET_SIMULATE(jmp16, insn); 25 | CSKY_INSN_SET_SIMULATE(jsr16, insn); 26 | CSKY_INSN_SET_SIMULATE(lrw16, insn); 27 | CSKY_INSN_SET_SIMULATE(pop16, insn); 28 | 29 | CSKY_INSN_SET_SIMULATE(br32, insn); 30 | CSKY_INSN_SET_SIMULATE(bt32, insn); 31 | CSKY_INSN_SET_SIMULATE(bf32, insn); 32 | CSKY_INSN_SET_SIMULATE(jmp32, insn); 33 | CSKY_INSN_SET_SIMULATE(jsr32, insn); 34 | CSKY_INSN_SET_SIMULATE(lrw32, insn); 35 | CSKY_INSN_SET_SIMULATE(pop32, insn); 36 | 37 | CSKY_INSN_SET_SIMULATE(bez32, insn); 38 | CSKY_INSN_SET_SIMULATE(bnez32, insn); 39 | CSKY_INSN_SET_SIMULATE(bnezad32, insn); 40 | CSKY_INSN_SET_SIMULATE(bhsz32, insn); 41 | CSKY_INSN_SET_SIMULATE(bhz32, insn); 42 | CSKY_INSN_SET_SIMULATE(blsz32, insn); 43 | CSKY_INSN_SET_SIMULATE(blz32, insn); 44 | CSKY_INSN_SET_SIMULATE(bsr32, insn); 45 | CSKY_INSN_SET_SIMULATE(jmpi32, insn); 46 | CSKY_INSN_SET_SIMULATE(jsri32, insn); 47 | 48 | return INSN_GOOD; 49 | } 50 | -------------------------------------------------------------------------------- /arch/csky/kernel/probes/decode-insn.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0+ */ 2 | 3 | #ifndef __CSKY_KERNEL_KPROBES_DECODE_INSN_H 4 | #define __CSKY_KERNEL_KPROBES_DECODE_INSN_H 5 | 6 | #include 7 | #include 8 | 9 | enum probe_insn { 10 | INSN_REJECTED, 11 | INSN_GOOD_NO_SLOT, 12 | INSN_GOOD, 13 | }; 14 | 15 | #define is_insn32(insn) ((insn & 0xc000) == 0xc000) 16 | 17 | enum probe_insn __kprobes 18 | csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi); 19 | 20 | #endif /* __CSKY_KERNEL_KPROBES_DECODE_INSN_H */ 21 | -------------------------------------------------------------------------------- /arch/csky/kernel/probes/ftrace.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | 3 | #include 4 | 5 | int arch_check_ftrace_location(struct kprobe *p) 6 | { 7 | if (ftrace_location((unsigned long)p->addr)) 8 | p->flags |= KPROBE_FLAG_FTRACE; 9 | return 0; 10 | } 11 | 12 | /* Ftrace callback handler for kprobes -- called under preepmt disabed */ 13 | void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 14 | struct ftrace_ops *ops, struct pt_regs *regs) 15 | { 16 | bool lr_saver = false; 17 | struct kprobe *p; 18 | struct kprobe_ctlblk *kcb; 19 | 20 | /* Preempt is disabled by ftrace */ 21 | p = get_kprobe((kprobe_opcode_t *)ip); 22 | if (!p) { 23 | p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE)); 24 | if (unlikely(!p) || kprobe_disabled(p)) 25 | return; 26 | lr_saver = true; 27 | } 28 | 29 | kcb = get_kprobe_ctlblk(); 30 | if (kprobe_running()) { 31 | kprobes_inc_nmissed_count(p); 32 | } else { 33 | unsigned long orig_ip = instruction_pointer(regs); 34 | 35 | if (lr_saver) 36 | ip -= MCOUNT_INSN_SIZE; 37 | instruction_pointer_set(regs, ip); 38 | __this_cpu_write(current_kprobe, p); 39 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; 40 | if (!p->pre_handler || !p->pre_handler(p, regs)) { 41 | /* 42 | * Emulate singlestep (and also recover regs->pc) 43 | * as if there is a nop 44 | */ 45 | instruction_pointer_set(regs, 46 | (unsigned long)p->addr + MCOUNT_INSN_SIZE); 47 | if (unlikely(p->post_handler)) { 48 | kcb->kprobe_status = KPROBE_HIT_SSDONE; 49 | p->post_handler(p, regs, 0); 50 | } 51 | instruction_pointer_set(regs, orig_ip); 52 | } 53 | /* 54 | * If pre_handler returns !0, it changes regs->pc. We have to 55 | * skip emulating post_handler. 56 | */ 57 | __this_cpu_write(current_kprobe, NULL); 58 | } 59 | } 60 | NOKPROBE_SYMBOL(kprobe_ftrace_handler); 61 | 62 | int arch_prepare_kprobe_ftrace(struct kprobe *p) 63 | { 64 | p->ainsn.api.insn = NULL; 65 | return 0; 66 | } 67 | -------------------------------------------------------------------------------- /arch/csky/kernel/probes/kprobes_trampoline.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0+ */ 2 | 3 | #include 4 | 5 | #include 6 | 7 | ENTRY(kretprobe_trampoline) 8 | SAVE_REGS_FTRACE 9 | 10 | mov a0, sp /* pt_regs */ 11 | 12 | jbsr trampoline_probe_handler 13 | 14 | /* use the result as the return-address */ 15 | mov lr, a0 16 | 17 | RESTORE_REGS_FTRACE 18 | rts 19 | ENDPROC(kretprobe_trampoline) 20 | -------------------------------------------------------------------------------- /arch/csky/kernel/probes/simulate-insn.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0+ */ 2 | 3 | #ifndef __CSKY_KERNEL_PROBES_SIMULATE_INSN_H 4 | #define __CSKY_KERNEL_PROBES_SIMULATE_INSN_H 5 | 6 | #define __CSKY_INSN_FUNCS(name, mask, val) \ 7 | static __always_inline bool csky_insn_is_##name(probe_opcode_t code) \ 8 | { \ 9 | BUILD_BUG_ON(~(mask) & (val)); \ 10 | return (code & (mask)) == (val); \ 11 | } \ 12 | void simulate_##name(u32 opcode, long addr, struct pt_regs *regs); 13 | 14 | #define CSKY_INSN_SET_SIMULATE(name, code) \ 15 | do { \ 16 | if (csky_insn_is_##name(code)) { \ 17 | api->handler = simulate_##name; \ 18 | return INSN_GOOD_NO_SLOT; \ 19 | } \ 20 | } while (0) 21 | 22 | __CSKY_INSN_FUNCS(br16, 0xfc00, 0x0400) 23 | __CSKY_INSN_FUNCS(bt16, 0xfc00, 0x0800) 24 | __CSKY_INSN_FUNCS(bf16, 0xfc00, 0x0c00) 25 | __CSKY_INSN_FUNCS(jmp16, 0xffc3, 0x7800) 26 | __CSKY_INSN_FUNCS(jsr16, 0xffc3, 0x7801) 27 | __CSKY_INSN_FUNCS(lrw16, 0xfc00, 0x1000) 28 | __CSKY_INSN_FUNCS(pop16, 0xffe0, 0x1480) 29 | 30 | __CSKY_INSN_FUNCS(br32, 0x0000ffff, 0x0000e800) 31 | __CSKY_INSN_FUNCS(bt32, 0x0000ffff, 0x0000e860) 32 | __CSKY_INSN_FUNCS(bf32, 0x0000ffff, 0x0000e840) 33 | __CSKY_INSN_FUNCS(jmp32, 0xffffffe0, 0x0000e8c0) 34 | __CSKY_INSN_FUNCS(jsr32, 0xffffffe0, 0x0000e8e0) 35 | __CSKY_INSN_FUNCS(lrw32, 0x0000ffe0, 0x0000ea80) 36 | __CSKY_INSN_FUNCS(pop32, 0xfe00ffff, 0x0000ebc0) 37 | 38 | __CSKY_INSN_FUNCS(bez32, 0x0000ffe0, 0x0000e900) 39 | __CSKY_INSN_FUNCS(bnez32, 0x0000ffe0, 0x0000e920) 40 | __CSKY_INSN_FUNCS(bnezad32, 0x0000ffe0, 0x0000e820) 41 | __CSKY_INSN_FUNCS(bhsz32, 0x0000ffe0, 0x0000e9a0) 42 | __CSKY_INSN_FUNCS(bhz32, 0x0000ffe0, 0x0000e940) 43 | __CSKY_INSN_FUNCS(blsz32, 0x0000ffe0, 0x0000e960) 44 | __CSKY_INSN_FUNCS(blz32, 0x0000ffe0, 0x0000e980) 45 | __CSKY_INSN_FUNCS(bsr32, 0x0000fc00, 0x0000e000) 46 | __CSKY_INSN_FUNCS(jmpi32, 0x0000ffff, 0x0000eac0) 47 | __CSKY_INSN_FUNCS(jsri32, 0x0000ffff, 0x0000eae0) 48 | 49 | #endif /* __CSKY_KERNEL_PROBES_SIMULATE_INSN_H */ 50 | -------------------------------------------------------------------------------- /arch/csky/kernel/probes/uprobes.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | /* 3 | * Copyright (C) 2014-2016 Pratyush Anand 4 | */ 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "decode-insn.h" 11 | 12 | #define UPROBE_TRAP_NR UINT_MAX 13 | 14 | bool is_swbp_insn(uprobe_opcode_t *insn) 15 | { 16 | return (*insn & 0xffff) == UPROBE_SWBP_INSN; 17 | } 18 | 19 | unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) 20 | { 21 | return instruction_pointer(regs); 22 | } 23 | 24 | int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, 25 | unsigned long addr) 26 | { 27 | probe_opcode_t insn; 28 | 29 | insn = *(probe_opcode_t *)(&auprobe->insn[0]); 30 | 31 | auprobe->insn_size = is_insn32(insn) ? 4 : 2; 32 | 33 | switch (csky_probe_decode_insn(&insn, &auprobe->api)) { 34 | case INSN_REJECTED: 35 | return -EINVAL; 36 | 37 | case INSN_GOOD_NO_SLOT: 38 | auprobe->simulate = true; 39 | break; 40 | 41 | default: 42 | break; 43 | } 44 | 45 | return 0; 46 | } 47 | 48 | int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 49 | { 50 | struct uprobe_task *utask = current->utask; 51 | 52 | utask->autask.saved_trap_no = current->thread.trap_no; 53 | current->thread.trap_no = UPROBE_TRAP_NR; 54 | 55 | instruction_pointer_set(regs, utask->xol_vaddr); 56 | 57 | user_enable_single_step(current); 58 | 59 | return 0; 60 | } 61 | 62 | int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 63 | { 64 | struct uprobe_task *utask = current->utask; 65 | 66 | WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR); 67 | 68 | instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size); 69 | 70 | user_disable_single_step(current); 71 | 72 | return 0; 73 | } 74 | 75 | bool arch_uprobe_xol_was_trapped(struct task_struct *t) 76 | { 77 | if (t->thread.trap_no != UPROBE_TRAP_NR) 78 | return true; 79 | 80 | return false; 81 | } 82 | 83 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) 84 | { 85 | probe_opcode_t insn; 86 | unsigned long addr; 87 | 88 | if (!auprobe->simulate) 89 | return false; 90 | 91 | insn = *(probe_opcode_t *)(&auprobe->insn[0]); 92 | addr = instruction_pointer(regs); 93 | 94 | if (auprobe->api.handler) 95 | auprobe->api.handler(insn, addr, regs); 96 | 97 | return true; 98 | } 99 | 100 | void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 101 | { 102 | struct uprobe_task *utask = current->utask; 103 | 104 | /* 105 | * Task has received a fatal signal, so reset back to probbed 106 | * address. 107 | */ 108 | instruction_pointer_set(regs, utask->vaddr); 109 | 110 | user_disable_single_step(current); 111 | } 112 | 113 | bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, 114 | struct pt_regs *regs) 115 | { 116 | if (ctx == RP_CHECK_CHAIN_CALL) 117 | return regs->usp <= ret->stack; 118 | else 119 | return regs->usp < ret->stack; 120 | } 121 | 122 | unsigned long 123 | arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, 124 | struct pt_regs *regs) 125 | { 126 | unsigned long ra; 127 | 128 | ra = regs->lr; 129 | 130 | regs->lr = trampoline_vaddr; 131 | 132 | return ra; 133 | } 134 | 135 | int arch_uprobe_exception_notify(struct notifier_block *self, 136 | unsigned long val, void *data) 137 | { 138 | return NOTIFY_DONE; 139 | } 140 | 141 | int uprobe_breakpoint_handler(struct pt_regs *regs) 142 | { 143 | if (uprobe_pre_sstep_notifier(regs)) 144 | return 1; 145 | 146 | return 0; 147 | } 148 | 149 | int uprobe_single_step_handler(struct pt_regs *regs) 150 | { 151 | if (uprobe_post_sstep_notifier(regs)) 152 | return 1; 153 | 154 | return 0; 155 | } 156 | -------------------------------------------------------------------------------- /arch/csky/kernel/process.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | #include 16 | 17 | struct cpuinfo_csky cpu_data[NR_CPUS]; 18 | 19 | #ifdef CONFIG_STACKPROTECTOR 20 | #include 21 | unsigned long __stack_chk_guard __read_mostly; 22 | EXPORT_SYMBOL(__stack_chk_guard); 23 | #endif 24 | 25 | asmlinkage void ret_from_fork(void); 26 | asmlinkage void ret_from_kernel_thread(void); 27 | 28 | /* 29 | * Some archs flush debug and FPU info here 30 | */ 31 | void flush_thread(void){} 32 | 33 | /* 34 | * Return saved PC from a blocked thread 35 | */ 36 | unsigned long thread_saved_pc(struct task_struct *tsk) 37 | { 38 | struct switch_stack *sw = (struct switch_stack *)tsk->thread.sp; 39 | 40 | return sw->r15; 41 | } 42 | 43 | int copy_thread(unsigned long clone_flags, 44 | unsigned long usp, 45 | unsigned long kthread_arg, 46 | struct task_struct *p) 47 | { 48 | struct switch_stack *childstack; 49 | struct pt_regs *childregs = task_pt_regs(p); 50 | 51 | #ifdef CONFIG_CPU_HAS_FPU 52 | save_to_user_fp(&p->thread.user_fp); 53 | #endif 54 | 55 | childstack = ((struct switch_stack *) childregs) - 1; 56 | memset(childstack, 0, sizeof(struct switch_stack)); 57 | 58 | /* setup thread.sp for switch_to !!! */ 59 | p->thread.sp = (unsigned long)childstack; 60 | 61 | if (unlikely(p->flags & PF_KTHREAD)) { 62 | memset(childregs, 0, sizeof(struct pt_regs)); 63 | childstack->r15 = (unsigned long) ret_from_kernel_thread; 64 | childstack->r10 = kthread_arg; 65 | childstack->r9 = usp; 66 | childregs->sr = mfcr("psr"); 67 | } else { 68 | *childregs = *(current_pt_regs()); 69 | if (usp) 70 | childregs->usp = usp; 71 | if (clone_flags & CLONE_SETTLS) 72 | task_thread_info(p)->tp_value = childregs->tls 73 | = childregs->regs[0]; 74 | 75 | childregs->a0 = 0; 76 | childstack->r15 = (unsigned long) ret_from_fork; 77 | } 78 | 79 | return 0; 80 | } 81 | 82 | /* Fill in the fpu structure for a core dump. */ 83 | int dump_fpu(struct pt_regs *regs, struct user_fp *fpu) 84 | { 85 | memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu)); 86 | return 1; 87 | } 88 | EXPORT_SYMBOL(dump_fpu); 89 | 90 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs) 91 | { 92 | struct pt_regs *regs = task_pt_regs(tsk); 93 | 94 | /* NOTE: usp is error value. */ 95 | ELF_CORE_COPY_REGS((*pr_regs), regs) 96 | 97 | return 1; 98 | } 99 | 100 | #ifndef CONFIG_CPU_PM_NONE 101 | void arch_cpu_idle(void) 102 | { 103 | dcache_wbinv_all(); 104 | #ifdef CONFIG_CPU_PM_WAIT 105 | asm volatile("wait\n"); 106 | #endif 107 | 108 | #ifdef CONFIG_CPU_PM_DOZE 109 | asm volatile("doze\n"); 110 | #endif 111 | 112 | #ifdef CONFIG_CPU_PM_STOP 113 | asm volatile("stop\n"); 114 | #endif 115 | local_irq_enable(); 116 | } 117 | #endif 118 | -------------------------------------------------------------------------------- /arch/csky/kernel/setup.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #ifdef CONFIG_DUMMY_CONSOLE 18 | struct screen_info screen_info = { 19 | .orig_video_lines = 30, 20 | .orig_video_cols = 80, 21 | .orig_video_mode = 0, 22 | .orig_video_ega_bx = 0, 23 | .orig_video_isVGA = 1, 24 | .orig_video_points = 8 25 | }; 26 | #endif 27 | 28 | static void __init csky_memblock_init(void) 29 | { 30 | unsigned long zone_size[MAX_NR_ZONES]; 31 | signed long size; 32 | 33 | memblock_reserve(__pa(_stext), _end - _stext); 34 | 35 | early_init_fdt_reserve_self(); 36 | early_init_fdt_scan_reserved_mem(); 37 | 38 | memblock_dump_all(); 39 | 40 | memset(zone_size, 0, sizeof(zone_size)); 41 | 42 | min_low_pfn = PFN_UP(memblock_start_of_DRAM()); 43 | max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM()); 44 | 45 | size = max_pfn - min_low_pfn; 46 | 47 | if (size <= PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET)) 48 | zone_size[ZONE_NORMAL] = size; 49 | else if (size < PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) { 50 | zone_size[ZONE_NORMAL] = 51 | PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET); 52 | max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL]; 53 | } else { 54 | zone_size[ZONE_NORMAL] = 55 | PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); 56 | max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL]; 57 | #ifdef CONFIG_PAGE_OFFSET_80000000 58 | write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE); 59 | #endif 60 | } 61 | 62 | mmu_init(min_low_pfn, max_low_pfn); 63 | #ifdef CONFIG_HIGHMEM 64 | zone_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; 65 | 66 | highstart_pfn = max_low_pfn; 67 | highend_pfn = max_pfn; 68 | #endif 69 | memblock_set_current_limit(PFN_PHYS(max_low_pfn)); 70 | 71 | dma_contiguous_reserve(0); 72 | 73 | free_area_init_node(0, zone_size, min_low_pfn, NULL); 74 | } 75 | 76 | void __init setup_arch(char **cmdline_p) 77 | { 78 | *cmdline_p = boot_command_line; 79 | 80 | console_verbose(); 81 | 82 | pr_info("Phys. mem: %ldMB\n", 83 | (unsigned long) memblock_phys_mem_size()/1024/1024); 84 | 85 | init_mm.start_code = (unsigned long) _stext; 86 | init_mm.end_code = (unsigned long) _etext; 87 | init_mm.end_data = (unsigned long) _edata; 88 | init_mm.brk = (unsigned long) _end; 89 | 90 | parse_early_param(); 91 | 92 | csky_memblock_init(); 93 | 94 | unflatten_and_copy_device_tree(); 95 | 96 | #ifdef CONFIG_SMP 97 | setup_smp(); 98 | #endif 99 | 100 | sparse_init(); 101 | 102 | fixaddr_init(); 103 | 104 | #ifdef CONFIG_HIGHMEM 105 | kmap_init(); 106 | #endif 107 | 108 | #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) 109 | conswitchp = &dummy_con; 110 | #endif 111 | } 112 | 113 | unsigned long va_pa_offset; 114 | EXPORT_SYMBOL(va_pa_offset); 115 | 116 | static inline unsigned long read_mmu_msa(void) 117 | { 118 | #ifdef CONFIG_PAGE_OFFSET_80000000 119 | return read_mmu_msa0(); 120 | #endif 121 | 122 | #ifdef CONFIG_PAGE_OFFSET_A0000000 123 | return read_mmu_msa1(); 124 | #endif 125 | } 126 | 127 | asmlinkage __visible void __init csky_start(unsigned int unused, 128 | void *dtb_start) 129 | { 130 | /* Clean up bss section */ 131 | memset(__bss_start, 0, __bss_stop - __bss_start); 132 | 133 | va_pa_offset = read_mmu_msa() & ~(SSEG_SIZE - 1); 134 | 135 | pre_trap_init(); 136 | 137 | if (dtb_start == NULL) 138 | early_init_dt_scan(__dtb_start); 139 | else 140 | early_init_dt_scan(dtb_start); 141 | 142 | start_kernel(); 143 | 144 | asm volatile("br .\n"); 145 | } 146 | -------------------------------------------------------------------------------- /arch/csky/kernel/stacktrace.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #ifdef CONFIG_FRAME_POINTER 10 | 11 | struct stackframe { 12 | unsigned long fp; 13 | unsigned long ra; 14 | }; 15 | 16 | void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, 17 | bool (*fn)(unsigned long, void *), void *arg) 18 | { 19 | unsigned long fp, sp, pc; 20 | 21 | if (regs) { 22 | fp = frame_pointer(regs); 23 | sp = user_stack_pointer(regs); 24 | pc = instruction_pointer(regs); 25 | } else if (task == NULL || task == current) { 26 | const register unsigned long current_sp __asm__ ("sp"); 27 | const register unsigned long current_fp __asm__ ("r8"); 28 | fp = current_fp; 29 | sp = current_sp; 30 | pc = (unsigned long)walk_stackframe; 31 | } else { 32 | /* task blocked in __switch_to */ 33 | fp = thread_saved_fp(task); 34 | sp = thread_saved_sp(task); 35 | pc = thread_saved_lr(task); 36 | } 37 | 38 | for (;;) { 39 | unsigned long low, high; 40 | struct stackframe *frame; 41 | 42 | if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) 43 | break; 44 | 45 | /* Validate frame pointer */ 46 | low = sp; 47 | high = ALIGN(sp, THREAD_SIZE); 48 | if (unlikely(fp < low || fp > high || fp & 0x3)) 49 | break; 50 | /* Unwind stack frame */ 51 | frame = (struct stackframe *)fp; 52 | sp = fp; 53 | fp = frame->fp; 54 | pc = ftrace_graph_ret_addr(current, NULL, frame->ra, 55 | (unsigned long *)(fp - 8)); 56 | } 57 | } 58 | 59 | #else /* !CONFIG_FRAME_POINTER */ 60 | 61 | static void notrace walk_stackframe(struct task_struct *task, 62 | struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) 63 | { 64 | unsigned long sp, pc; 65 | unsigned long *ksp; 66 | 67 | if (regs) { 68 | sp = user_stack_pointer(regs); 69 | pc = instruction_pointer(regs); 70 | } else if (task == NULL || task == current) { 71 | const register unsigned long current_sp __asm__ ("sp"); 72 | sp = current_sp; 73 | pc = (unsigned long)walk_stackframe; 74 | } else { 75 | /* task blocked in __switch_to */ 76 | sp = thread_saved_sp(task); 77 | pc = thread_saved_lr(task); 78 | } 79 | 80 | if (unlikely(sp & 0x3)) 81 | return; 82 | 83 | ksp = (unsigned long *)sp; 84 | while (!kstack_end(ksp)) { 85 | if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) 86 | break; 87 | pc = (*ksp++) - 0x4; 88 | } 89 | } 90 | #endif /* CONFIG_FRAME_POINTER */ 91 | 92 | static bool print_trace_address(unsigned long pc, void *arg) 93 | { 94 | print_ip_sym(pc); 95 | return false; 96 | } 97 | 98 | void show_stack(struct task_struct *task, unsigned long *sp) 99 | { 100 | pr_cont("Call Trace:\n"); 101 | walk_stackframe(task, NULL, print_trace_address, NULL); 102 | } 103 | 104 | static bool save_wchan(unsigned long pc, void *arg) 105 | { 106 | if (!in_sched_functions(pc)) { 107 | unsigned long *p = arg; 108 | *p = pc; 109 | return true; 110 | } 111 | return false; 112 | } 113 | 114 | unsigned long get_wchan(struct task_struct *task) 115 | { 116 | unsigned long pc = 0; 117 | 118 | if (likely(task && task != current && task->state != TASK_RUNNING)) 119 | walk_stackframe(task, NULL, save_wchan, &pc); 120 | return pc; 121 | } 122 | 123 | #ifdef CONFIG_STACKTRACE 124 | static bool __save_trace(unsigned long pc, void *arg, bool nosched) 125 | { 126 | struct stack_trace *trace = arg; 127 | 128 | if (unlikely(nosched && in_sched_functions(pc))) 129 | return false; 130 | if (unlikely(trace->skip > 0)) { 131 | trace->skip--; 132 | return false; 133 | } 134 | 135 | trace->entries[trace->nr_entries++] = pc; 136 | return (trace->nr_entries >= trace->max_entries); 137 | } 138 | 139 | static bool save_trace(unsigned long pc, void *arg) 140 | { 141 | return __save_trace(pc, arg, false); 142 | } 143 | 144 | /* 145 | * Save stack-backtrace addresses into a stack_trace buffer. 146 | */ 147 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 148 | { 149 | walk_stackframe(tsk, NULL, save_trace, trace); 150 | } 151 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 152 | 153 | void save_stack_trace(struct stack_trace *trace) 154 | { 155 | save_stack_trace_tsk(NULL, trace); 156 | } 157 | EXPORT_SYMBOL_GPL(save_stack_trace); 158 | 159 | #endif /* CONFIG_STACKTRACE */ 160 | -------------------------------------------------------------------------------- /arch/csky/kernel/syscall.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | 6 | SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) 7 | { 8 | struct thread_info *ti = task_thread_info(current); 9 | struct pt_regs *reg = current_pt_regs(); 10 | 11 | reg->tls = addr; 12 | ti->tp_value = addr; 13 | 14 | return 0; 15 | } 16 | 17 | SYSCALL_DEFINE6(mmap2, 18 | unsigned long, addr, 19 | unsigned long, len, 20 | unsigned long, prot, 21 | unsigned long, flags, 22 | unsigned long, fd, 23 | off_t, offset) 24 | { 25 | if (unlikely(offset & (~PAGE_MASK >> 12))) 26 | return -EINVAL; 27 | 28 | return ksys_mmap_pgoff(addr, len, prot, flags, fd, 29 | offset >> (PAGE_SHIFT - 12)); 30 | } 31 | 32 | /* 33 | * for abiv1 the 64bits args should be even th, So we need mov the advice 34 | * forward. 35 | */ 36 | SYSCALL_DEFINE4(csky_fadvise64_64, 37 | int, fd, 38 | int, advice, 39 | loff_t, offset, 40 | loff_t, len) 41 | { 42 | return ksys_fadvise64_64(fd, offset, len, advice); 43 | } 44 | -------------------------------------------------------------------------------- /arch/csky/kernel/syscall_table.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | 7 | #undef __SYSCALL 8 | #define __SYSCALL(nr, call)[nr] = (call), 9 | 10 | #define sys_fadvise64_64 sys_csky_fadvise64_64 11 | void * const sys_call_table[__NR_syscalls] __page_aligned_data = { 12 | [0 ... __NR_syscalls - 1] = sys_ni_syscall, 13 | #include 14 | }; 15 | -------------------------------------------------------------------------------- /arch/csky/kernel/time.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | 7 | void __init time_init(void) 8 | { 9 | of_clk_init(NULL); 10 | timer_probe(); 11 | } 12 | -------------------------------------------------------------------------------- /arch/csky/kernel/vdso.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | #include 17 | 18 | static struct page *vdso_page; 19 | 20 | static int __init init_vdso(void) 21 | { 22 | struct csky_vdso *vdso; 23 | int err = 0; 24 | 25 | vdso_page = alloc_page(GFP_KERNEL); 26 | if (!vdso_page) 27 | panic("Cannot allocate vdso"); 28 | 29 | vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); 30 | if (!vdso) 31 | panic("Cannot map vdso"); 32 | 33 | clear_page(vdso); 34 | 35 | err = setup_vdso_page(vdso->rt_signal_retcode); 36 | if (err) 37 | panic("Cannot set signal return code, err: %x.", err); 38 | 39 | dcache_wb_range((unsigned long)vdso, (unsigned long)vdso + 16); 40 | 41 | vunmap(vdso); 42 | 43 | return 0; 44 | } 45 | subsys_initcall(init_vdso); 46 | 47 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 48 | { 49 | int ret; 50 | unsigned long addr; 51 | struct mm_struct *mm = current->mm; 52 | 53 | down_write(&mm->mmap_sem); 54 | 55 | addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0); 56 | if (IS_ERR_VALUE(addr)) { 57 | ret = addr; 58 | goto up_fail; 59 | } 60 | 61 | ret = install_special_mapping( 62 | mm, 63 | addr, 64 | PAGE_SIZE, 65 | VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 66 | &vdso_page); 67 | if (ret) 68 | goto up_fail; 69 | 70 | mm->context.vdso = (void *)addr; 71 | 72 | up_fail: 73 | up_write(&mm->mmap_sem); 74 | return ret; 75 | } 76 | 77 | const char *arch_vma_name(struct vm_area_struct *vma) 78 | { 79 | if (vma->vm_mm == NULL) 80 | return NULL; 81 | 82 | if (vma->vm_start == (long)vma->vm_mm->context.vdso) 83 | return "[vdso]"; 84 | else 85 | return NULL; 86 | } 87 | -------------------------------------------------------------------------------- /arch/csky/kernel/vmlinux.lds.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | OUTPUT_ARCH(csky) 8 | ENTRY(_start) 9 | 10 | #ifndef __cskyBE__ 11 | jiffies = jiffies_64; 12 | #else 13 | jiffies = jiffies_64 + 4; 14 | #endif 15 | 16 | #define VBR_BASE \ 17 | . = ALIGN(1024); \ 18 | vec_base = .; \ 19 | . += 512; 20 | 21 | SECTIONS 22 | { 23 | . = PAGE_OFFSET + PHYS_OFFSET_OFFSET; 24 | 25 | _stext = .; 26 | __init_begin = .; 27 | HEAD_TEXT_SECTION 28 | INIT_TEXT_SECTION(PAGE_SIZE) 29 | INIT_DATA_SECTION(PAGE_SIZE) 30 | PERCPU_SECTION(L1_CACHE_BYTES) 31 | . = ALIGN(PAGE_SIZE); 32 | __init_end = .; 33 | 34 | .text : AT(ADDR(.text) - LOAD_OFFSET) { 35 | _text = .; 36 | VBR_BASE 37 | IRQENTRY_TEXT 38 | SOFTIRQENTRY_TEXT 39 | TEXT_TEXT 40 | SCHED_TEXT 41 | CPUIDLE_TEXT 42 | LOCK_TEXT 43 | KPROBES_TEXT 44 | *(.fixup) 45 | *(.gnu.warning) 46 | } = 0 47 | _etext = .; 48 | 49 | /* __init_begin __init_end must be page aligned for free_initmem */ 50 | . = ALIGN(PAGE_SIZE); 51 | 52 | 53 | _sdata = .; 54 | RO_DATA_SECTION(PAGE_SIZE) 55 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 56 | _edata = .; 57 | 58 | #ifdef CONFIG_HAVE_TCM 59 | .tcm_start : { 60 | . = ALIGN(PAGE_SIZE); 61 | __tcm_start = .; 62 | } 63 | 64 | .text_data_tcm FIXADDR_TCM : AT(__tcm_start) 65 | { 66 | . = ALIGN(4); 67 | __stcm_text_data = .; 68 | *(.tcm.text) 69 | *(.tcm.rodata) 70 | #ifndef CONFIG_HAVE_DTCM 71 | *(.tcm.data) 72 | #endif 73 | . = ALIGN(4); 74 | __etcm_text_data = .; 75 | } 76 | 77 | . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm); 78 | 79 | #ifdef CONFIG_HAVE_DTCM 80 | #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE 81 | 82 | .dtcm_start : { 83 | __dtcm_start = .; 84 | } 85 | 86 | .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start) 87 | { 88 | . = ALIGN(4); 89 | __stcm_data = .; 90 | *(.tcm.data) 91 | . = ALIGN(4); 92 | __etcm_data = .; 93 | } 94 | 95 | . = ADDR(.dtcm_start) + SIZEOF(.data_tcm); 96 | 97 | .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) { 98 | #else 99 | .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) { 100 | #endif 101 | . = ALIGN(PAGE_SIZE); 102 | __tcm_end = .; 103 | } 104 | #endif 105 | 106 | NOTES 107 | EXCEPTION_TABLE(L1_CACHE_BYTES) 108 | BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) 109 | _end = . ; 110 | 111 | STABS_DEBUG 112 | DWARF_DEBUG 113 | 114 | DISCARDS 115 | } 116 | -------------------------------------------------------------------------------- /arch/csky/lib/Makefile: -------------------------------------------------------------------------------- 1 | lib-y := usercopy.o delay.o 2 | ifneq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), y) 3 | lib-y += string.o 4 | endif 5 | -------------------------------------------------------------------------------- /arch/csky/lib/delay.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | void __delay(unsigned long loops) 9 | { 10 | asm volatile ( 11 | "mov r0, r0\n" 12 | "1:declt %0\n" 13 | "bf 1b" 14 | : "=r"(loops) 15 | : "0"(loops)); 16 | } 17 | EXPORT_SYMBOL(__delay); 18 | 19 | void __const_udelay(unsigned long xloops) 20 | { 21 | unsigned long long loops; 22 | 23 | loops = (unsigned long long)xloops * loops_per_jiffy * HZ; 24 | 25 | __delay(loops >> 32); 26 | } 27 | EXPORT_SYMBOL(__const_udelay); 28 | 29 | void __udelay(unsigned long usecs) 30 | { 31 | __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */ 32 | } 33 | EXPORT_SYMBOL(__udelay); 34 | 35 | void __ndelay(unsigned long nsecs) 36 | { 37 | __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */ 38 | } 39 | EXPORT_SYMBOL(__ndelay); 40 | -------------------------------------------------------------------------------- /arch/csky/lib/string.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | /* 3 | * String functions optimized for hardware which doesn't 4 | * handle unaligned memory accesses efficiently. 5 | * 6 | * Copyright (C) 2021 Matteo Croce 7 | */ 8 | 9 | #include 10 | #include 11 | 12 | /* Minimum size for a word copy to be convenient */ 13 | #define BYTES_LONG sizeof(long) 14 | #define WORD_MASK (BYTES_LONG - 1) 15 | #define MIN_THRESHOLD (BYTES_LONG * 2) 16 | 17 | /* convenience union to avoid cast between different pointer types */ 18 | union types { 19 | u8 *as_u8; 20 | unsigned long *as_ulong; 21 | uintptr_t as_uptr; 22 | }; 23 | 24 | union const_types { 25 | const u8 *as_u8; 26 | unsigned long *as_ulong; 27 | uintptr_t as_uptr; 28 | }; 29 | 30 | void *memcpy(void *dest, const void *src, size_t count) 31 | { 32 | union const_types s = { .as_u8 = src }; 33 | union types d = { .as_u8 = dest }; 34 | int distance = 0; 35 | 36 | if (count < MIN_THRESHOLD) 37 | goto copy_remainder; 38 | 39 | /* Copy a byte at time until destination is aligned. */ 40 | for (; d.as_uptr & WORD_MASK; count--) 41 | *d.as_u8++ = *s.as_u8++; 42 | 43 | distance = s.as_uptr & WORD_MASK; 44 | 45 | if (distance) { 46 | unsigned long last, next; 47 | 48 | /* 49 | * s is distance bytes ahead of d, and d just reached 50 | * the alignment boundary. Move s backward to word align it 51 | * and shift data to compensate for distance, in order to do 52 | * word-by-word copy. 53 | */ 54 | s.as_u8 -= distance; 55 | 56 | next = s.as_ulong[0]; 57 | for (; count >= BYTES_LONG; count -= BYTES_LONG) { 58 | last = next; 59 | next = s.as_ulong[1]; 60 | 61 | d.as_ulong[0] = last >> (distance * 8) | 62 | next << ((BYTES_LONG - distance) * 8); 63 | 64 | d.as_ulong++; 65 | s.as_ulong++; 66 | } 67 | 68 | /* Restore s with the original offset. */ 69 | s.as_u8 += distance; 70 | } else { 71 | /* 72 | * If the source and dest lower bits are the same, do a simple 73 | * 32/64 bit wide copy. 74 | */ 75 | for (; count >= BYTES_LONG; count -= BYTES_LONG) 76 | *d.as_ulong++ = *s.as_ulong++; 77 | } 78 | 79 | copy_remainder: 80 | while (count--) 81 | *d.as_u8++ = *s.as_u8++; 82 | 83 | return dest; 84 | } 85 | EXPORT_SYMBOL(memcpy); 86 | 87 | /* 88 | * Simply check if the buffer overlaps an call memcpy() in case, 89 | * otherwise do a simple one byte at time backward copy. 90 | */ 91 | void *memmove(void *dest, const void *src, size_t count) 92 | { 93 | if (dest < src || src + count <= dest) 94 | return memcpy(dest, src, count); 95 | 96 | if (dest > src) { 97 | const char *s = src + count; 98 | char *tmp = dest + count; 99 | 100 | while (count--) 101 | *--tmp = *--s; 102 | } 103 | return dest; 104 | } 105 | EXPORT_SYMBOL(memmove); 106 | 107 | void *memset(void *s, int c, size_t count) 108 | { 109 | union types dest = { .as_u8 = s }; 110 | 111 | if (count >= MIN_THRESHOLD) { 112 | unsigned long cu = (unsigned long)c; 113 | 114 | /* Compose an ulong with 'c' repeated 4/8 times */ 115 | cu |= cu << 8; 116 | cu |= cu << 16; 117 | /* Suppress warning on 32 bit machines */ 118 | cu |= (cu << 16) << 16; 119 | 120 | for (; count && dest.as_uptr & WORD_MASK; count--) 121 | *dest.as_u8++ = c; 122 | 123 | /* Copy using the largest size allowed */ 124 | for (; count >= BYTES_LONG; count -= BYTES_LONG) 125 | *dest.as_ulong++ = cu; 126 | } 127 | 128 | /* copy the remainder */ 129 | while (count--) 130 | *dest.as_u8++ = c; 131 | 132 | return s; 133 | } 134 | EXPORT_SYMBOL(memset); 135 | -------------------------------------------------------------------------------- /arch/csky/mm/Makefile: -------------------------------------------------------------------------------- 1 | ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) 2 | obj-y += cachev2.o 3 | CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE) 4 | else 5 | obj-y += cachev1.o 6 | CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE) 7 | endif 8 | obj-y += dma/ 9 | obj-y += dma-mapping.o 10 | obj-y += fault.o 11 | obj-$(CONFIG_HIGHMEM) += highmem.o 12 | obj-y += init.o 13 | obj-y += ioremap.o 14 | obj-y += syscache.o 15 | obj-y += tlb.o 16 | obj-y += asid.o 17 | obj-y += context.o 18 | obj-$(CONFIG_HAVE_TCM) += tcm.o 19 | -------------------------------------------------------------------------------- /arch/csky/mm/cachev1.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | /* for L1-cache */ 9 | #define INS_CACHE (1 << 0) 10 | #define DATA_CACHE (1 << 1) 11 | #define CACHE_INV (1 << 4) 12 | #define CACHE_CLR (1 << 5) 13 | #define CACHE_OMS (1 << 6) 14 | #define CACHE_ITS (1 << 7) 15 | #define CACHE_LICF (1 << 31) 16 | 17 | /* for L2-cache */ 18 | #define CR22_LEVEL_SHIFT (1) 19 | #define CR22_SET_SHIFT (7) 20 | #define CR22_WAY_SHIFT (30) 21 | #define CR22_WAY_SHIFT_L2 (29) 22 | 23 | static DEFINE_SPINLOCK(cache_lock); 24 | 25 | static inline void cache_op_line(unsigned long i, unsigned int val) 26 | { 27 | mtcr("cr22", i); 28 | mtcr("cr17", val); 29 | } 30 | 31 | #define CCR2_L2E (1 << 3) 32 | static void cache_op_all(unsigned int value, unsigned int l2) 33 | { 34 | mtcr("cr17", value | CACHE_CLR); 35 | mb(); 36 | 37 | if (l2 && (mfcr_ccr2() & CCR2_L2E)) { 38 | mtcr("cr24", value | CACHE_CLR); 39 | mb(); 40 | } 41 | } 42 | 43 | static void cache_op_range( 44 | unsigned int start, 45 | unsigned int end, 46 | unsigned int value, 47 | unsigned int l2) 48 | { 49 | unsigned long i, flags; 50 | unsigned int val = value | CACHE_CLR | CACHE_OMS; 51 | bool l2_sync; 52 | 53 | if (unlikely((end - start) >= PAGE_SIZE) || 54 | unlikely(start < PAGE_OFFSET) || 55 | unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) { 56 | cache_op_all(value, l2); 57 | return; 58 | } 59 | 60 | if ((mfcr_ccr2() & CCR2_L2E) && l2) 61 | l2_sync = 1; 62 | else 63 | l2_sync = 0; 64 | 65 | spin_lock_irqsave(&cache_lock, flags); 66 | 67 | i = start & ~(L1_CACHE_BYTES - 1); 68 | for (; i < end; i += L1_CACHE_BYTES) { 69 | cache_op_line(i, val); 70 | if (l2_sync) { 71 | mb(); 72 | mtcr("cr24", val); 73 | } 74 | } 75 | spin_unlock_irqrestore(&cache_lock, flags); 76 | 77 | mb(); 78 | } 79 | 80 | void dcache_wb_line(unsigned long start) 81 | { 82 | asm volatile("idly4\n":::"memory"); 83 | cache_op_line(start, DATA_CACHE|CACHE_CLR); 84 | mb(); 85 | } 86 | 87 | void icache_inv_range(unsigned long start, unsigned long end) 88 | { 89 | cache_op_range(start, end, INS_CACHE|CACHE_INV, 0); 90 | } 91 | 92 | void icache_inv_all(void) 93 | { 94 | cache_op_all(INS_CACHE|CACHE_INV, 0); 95 | } 96 | 97 | void local_icache_inv_all(void *priv) 98 | { 99 | cache_op_all(INS_CACHE|CACHE_INV, 0); 100 | } 101 | 102 | void dcache_wb_range(unsigned long start, unsigned long end) 103 | { 104 | cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); 105 | } 106 | 107 | void dcache_wbinv_all(void) 108 | { 109 | cache_op_all(DATA_CACHE|CACHE_CLR|CACHE_INV, 0); 110 | } 111 | 112 | void cache_wbinv_range(unsigned long start, unsigned long end) 113 | { 114 | cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); 115 | } 116 | EXPORT_SYMBOL(cache_wbinv_range); 117 | 118 | void cache_wbinv_all(void) 119 | { 120 | cache_op_all(INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); 121 | } 122 | 123 | void dma_wbinv_range(unsigned long start, unsigned long end) 124 | { 125 | cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); 126 | } 127 | 128 | void dma_inv_range(unsigned long start, unsigned long end) 129 | { 130 | cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); 131 | } 132 | 133 | void dma_wb_range(unsigned long start, unsigned long end) 134 | { 135 | cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); 136 | } 137 | -------------------------------------------------------------------------------- /arch/csky/mm/cachev2.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | /* for L1-cache */ 11 | #define INS_CACHE (1 << 0) 12 | #define DATA_CACHE (1 << 1) 13 | #define CACHE_INV (1 << 4) 14 | #define CACHE_CLR (1 << 5) 15 | #define CACHE_OMS (1 << 6) 16 | 17 | void local_icache_inv_all(void *priv) 18 | { 19 | mtcr("cr17", INS_CACHE|CACHE_INV); 20 | sync_is(); 21 | } 22 | 23 | #ifdef CONFIG_CPU_HAS_ICACHE_INS 24 | void icache_inv_range(unsigned long start, unsigned long end) 25 | { 26 | unsigned long i = start & ~(L1_CACHE_BYTES - 1); 27 | 28 | for (; i < end; i += L1_CACHE_BYTES) 29 | asm volatile("icache.iva %0\n"::"r"(i):"memory"); 30 | sync_is(); 31 | } 32 | #else 33 | struct cache_range { 34 | unsigned long start; 35 | unsigned long end; 36 | }; 37 | 38 | static DEFINE_SPINLOCK(cache_lock); 39 | 40 | static inline void cache_op_line(unsigned long i, unsigned int val) 41 | { 42 | mtcr("cr22", i); 43 | mtcr("cr17", val); 44 | } 45 | 46 | void local_icache_inv_range(void *priv) 47 | { 48 | struct cache_range *param = priv; 49 | unsigned long i = param->start & ~(L1_CACHE_BYTES - 1); 50 | unsigned long flags; 51 | 52 | spin_lock_irqsave(&cache_lock, flags); 53 | 54 | for (; i < param->end; i += L1_CACHE_BYTES) 55 | cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS); 56 | 57 | spin_unlock_irqrestore(&cache_lock, flags); 58 | 59 | sync_is(); 60 | } 61 | 62 | void icache_inv_range(unsigned long start, unsigned long end) 63 | { 64 | struct cache_range param = { start, end }; 65 | 66 | if (irqs_disabled()) 67 | local_icache_inv_range(¶m); 68 | else 69 | on_each_cpu(local_icache_inv_range, ¶m, 1); 70 | } 71 | #endif 72 | 73 | inline void dcache_wb_line(unsigned long start) 74 | { 75 | asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); 76 | sync_is(); 77 | } 78 | 79 | void dcache_wb_range(unsigned long start, unsigned long end) 80 | { 81 | unsigned long i = start & ~(L1_CACHE_BYTES - 1); 82 | 83 | for (; i < end; i += L1_CACHE_BYTES) 84 | asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); 85 | sync_is(); 86 | } 87 | 88 | void dcache_wbinv_all(void) 89 | { 90 | asm volatile("dcache.ciall\n":::"memory"); 91 | sync_is(); 92 | } 93 | 94 | void cache_wbinv_range(unsigned long start, unsigned long end) 95 | { 96 | dcache_wb_range(start, end); 97 | icache_inv_range(start, end); 98 | } 99 | EXPORT_SYMBOL(cache_wbinv_range); 100 | 101 | void dma_wbinv_range(unsigned long start, unsigned long end) 102 | { 103 | unsigned long i = start & ~(L1_CACHE_BYTES - 1); 104 | 105 | for (; i < end; i += L1_CACHE_BYTES) 106 | asm volatile("dcache.civa %0\n"::"r"(i):"memory"); 107 | sync_is(); 108 | } 109 | 110 | void dma_inv_range(unsigned long start, unsigned long end) 111 | { 112 | unsigned long i = start & ~(L1_CACHE_BYTES - 1); 113 | 114 | for (; i < end; i += L1_CACHE_BYTES) 115 | asm volatile("dcache.iva %0\n"::"r"(i):"memory"); 116 | sync_is(); 117 | } 118 | 119 | void dma_wb_range(unsigned long start, unsigned long end) 120 | { 121 | unsigned long i = start & ~(L1_CACHE_BYTES - 1); 122 | 123 | for (; i < end; i += L1_CACHE_BYTES) 124 | asm volatile("dcache.cva %0\n"::"r"(i):"memory"); 125 | sync_is(); 126 | } 127 | -------------------------------------------------------------------------------- /arch/csky/mm/context.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | static DEFINE_PER_CPU(atomic64_t, active_asids); 15 | static DEFINE_PER_CPU(u64, reserved_asids); 16 | 17 | struct asid_info asid_info; 18 | 19 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) 20 | { 21 | asid_check_context(&asid_info, &mm->context.asid, cpu, mm); 22 | } 23 | 24 | static void asid_flush_cpu_ctxt(void) 25 | { 26 | local_tlb_invalid_all(); 27 | } 28 | 29 | static int asids_init(void) 30 | { 31 | BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus()); 32 | 33 | if (asid_allocator_init(&asid_info, CONFIG_CPU_ASID_BITS, 1, 34 | asid_flush_cpu_ctxt)) 35 | panic("Unable to initialize ASID allocator for %lu ASIDs\n", 36 | NUM_ASIDS(&asid_info)); 37 | 38 | asid_info.active = &active_asids; 39 | asid_info.reserved = &reserved_asids; 40 | 41 | pr_info("ASID allocator initialised with %lu entries\n", 42 | NUM_CTXT_ASIDS(&asid_info)); 43 | 44 | return 0; 45 | } 46 | early_initcall(asids_init); 47 | -------------------------------------------------------------------------------- /arch/csky/mm/dma-mapping.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | extern int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot); 17 | static int __init atomic_pool_init(void) 18 | { 19 | return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL)); 20 | } 21 | arch_initcall(atomic_pool_init); 22 | 23 | static inline void cache_op(phys_addr_t paddr, size_t size, 24 | void (*fn)(unsigned long start, unsigned long end)) 25 | { 26 | struct page *page = phys_to_page(paddr); 27 | void *start = __va(page_to_phys(page)); 28 | unsigned long offset = offset_in_page(paddr); 29 | size_t left = size; 30 | 31 | do { 32 | size_t len = left; 33 | 34 | if (offset + len > PAGE_SIZE) 35 | len = PAGE_SIZE - offset; 36 | 37 | if (PageHighMem(page)) { 38 | start = kmap_atomic(page); 39 | 40 | fn((unsigned long)start + offset, 41 | (unsigned long)start + offset + len); 42 | 43 | kunmap_atomic(start); 44 | } else { 45 | fn((unsigned long)start + offset, 46 | (unsigned long)start + offset + len); 47 | } 48 | offset = 0; 49 | 50 | page++; 51 | start += PAGE_SIZE; 52 | left -= len; 53 | } while (left); 54 | } 55 | 56 | static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end) 57 | { 58 | memset((void *)start, 0, end - start); 59 | dma_wbinv_range(start, end); 60 | } 61 | 62 | void arch_dma_prep_coherent(struct page *page, size_t size) 63 | { 64 | cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range); 65 | } 66 | 67 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 68 | size_t size, enum dma_data_direction dir) 69 | { 70 | switch (dir) { 71 | case DMA_TO_DEVICE: 72 | cache_op(paddr, size, dma_wb_range); 73 | break; 74 | case DMA_FROM_DEVICE: 75 | case DMA_BIDIRECTIONAL: 76 | cache_op(paddr, size, dma_wbinv_range); 77 | break; 78 | default: 79 | BUG(); 80 | } 81 | } 82 | 83 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, 84 | size_t size, enum dma_data_direction dir) 85 | { 86 | switch (dir) { 87 | case DMA_TO_DEVICE: 88 | return; 89 | case DMA_FROM_DEVICE: 90 | case DMA_BIDIRECTIONAL: 91 | cache_op(paddr, size, dma_inv_range); 92 | break; 93 | default: 94 | BUG(); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /arch/csky/mm/dma/Makefile: -------------------------------------------------------------------------------- 1 | obj-y += dma-mapping.o 2 | -------------------------------------------------------------------------------- /arch/csky/mm/highmem.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | static pte_t *kmap_pte; 13 | 14 | unsigned long highstart_pfn, highend_pfn; 15 | 16 | void *kmap(struct page *page) 17 | { 18 | void *addr; 19 | 20 | might_sleep(); 21 | if (!PageHighMem(page)) 22 | return page_address(page); 23 | addr = kmap_high(page); 24 | flush_tlb_one((unsigned long)addr); 25 | 26 | return addr; 27 | } 28 | EXPORT_SYMBOL(kmap); 29 | 30 | void kunmap(struct page *page) 31 | { 32 | BUG_ON(in_interrupt()); 33 | if (!PageHighMem(page)) 34 | return; 35 | kunmap_high(page); 36 | } 37 | EXPORT_SYMBOL(kunmap); 38 | 39 | void *kmap_atomic(struct page *page) 40 | { 41 | unsigned long vaddr; 42 | int idx, type; 43 | 44 | preempt_disable(); 45 | pagefault_disable(); 46 | if (!PageHighMem(page)) 47 | return page_address(page); 48 | 49 | type = kmap_atomic_idx_push(); 50 | idx = type + KM_TYPE_NR*smp_processor_id(); 51 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 52 | #ifdef CONFIG_DEBUG_HIGHMEM 53 | BUG_ON(!pte_none(*(kmap_pte - idx))); 54 | #endif 55 | set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); 56 | flush_tlb_one((unsigned long)vaddr); 57 | 58 | return (void *)vaddr; 59 | } 60 | EXPORT_SYMBOL(kmap_atomic); 61 | 62 | void __kunmap_atomic(void *kvaddr) 63 | { 64 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 65 | int idx; 66 | 67 | if (vaddr < FIXADDR_START) 68 | goto out; 69 | 70 | #ifdef CONFIG_DEBUG_HIGHMEM 71 | idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx(); 72 | 73 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 74 | 75 | pte_clear(&init_mm, vaddr, kmap_pte - idx); 76 | flush_tlb_one(vaddr); 77 | #else 78 | (void) idx; /* to kill a warning */ 79 | #endif 80 | kmap_atomic_idx_pop(); 81 | out: 82 | pagefault_enable(); 83 | preempt_enable(); 84 | } 85 | EXPORT_SYMBOL(__kunmap_atomic); 86 | 87 | /* 88 | * This is the same as kmap_atomic() but can map memory that doesn't 89 | * have a struct page associated with it. 90 | */ 91 | void *kmap_atomic_pfn(unsigned long pfn) 92 | { 93 | unsigned long vaddr; 94 | int idx, type; 95 | 96 | pagefault_disable(); 97 | 98 | type = kmap_atomic_idx_push(); 99 | idx = type + KM_TYPE_NR*smp_processor_id(); 100 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 101 | set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); 102 | flush_tlb_one(vaddr); 103 | 104 | return (void *) vaddr; 105 | } 106 | 107 | struct page *kmap_atomic_to_page(void *ptr) 108 | { 109 | unsigned long idx, vaddr = (unsigned long)ptr; 110 | pte_t *pte; 111 | 112 | if (vaddr < FIXADDR_START) 113 | return virt_to_page(ptr); 114 | 115 | idx = virt_to_fix(vaddr); 116 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 117 | return pte_page(*pte); 118 | } 119 | 120 | static void __init kmap_pages_init(void) 121 | { 122 | unsigned long vaddr; 123 | pgd_t *pgd; 124 | pmd_t *pmd; 125 | pud_t *pud; 126 | pte_t *pte; 127 | 128 | vaddr = PKMAP_BASE; 129 | fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); 130 | 131 | pgd = swapper_pg_dir + __pgd_offset(vaddr); 132 | pud = (pud_t *)pgd; 133 | pmd = pmd_offset(pud, vaddr); 134 | pte = pte_offset_kernel(pmd, vaddr); 135 | pkmap_page_table = pte; 136 | } 137 | 138 | void __init kmap_init(void) 139 | { 140 | unsigned long vaddr; 141 | 142 | kmap_pages_init(); 143 | 144 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN); 145 | 146 | kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); 147 | } 148 | -------------------------------------------------------------------------------- /arch/csky/mm/ioremap.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size, 12 | pgprot_t prot, void *caller) 13 | { 14 | phys_addr_t last_addr; 15 | unsigned long offset, vaddr; 16 | struct vm_struct *area; 17 | 18 | last_addr = addr + size - 1; 19 | if (!size || last_addr < addr) 20 | return NULL; 21 | 22 | offset = addr & (~PAGE_MASK); 23 | addr &= PAGE_MASK; 24 | size = PAGE_ALIGN(size + offset); 25 | 26 | area = get_vm_area_caller(size, VM_IOREMAP, caller); 27 | if (!area) 28 | return NULL; 29 | 30 | vaddr = (unsigned long)area->addr; 31 | 32 | if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { 33 | free_vm_area(area); 34 | return NULL; 35 | } 36 | 37 | return (void __iomem *)(vaddr + offset); 38 | } 39 | 40 | void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot) 41 | { 42 | return __ioremap_caller(phys_addr, size, prot, 43 | __builtin_return_address(0)); 44 | } 45 | EXPORT_SYMBOL(__ioremap); 46 | 47 | void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) 48 | { 49 | return __ioremap_caller(phys_addr, size, PAGE_KERNEL, 50 | __builtin_return_address(0)); 51 | } 52 | EXPORT_SYMBOL(ioremap_cache); 53 | 54 | void iounmap(void __iomem *addr) 55 | { 56 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); 57 | } 58 | EXPORT_SYMBOL(iounmap); 59 | 60 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 61 | unsigned long size, pgprot_t vma_prot) 62 | { 63 | if (!pfn_valid(pfn)) { 64 | return pgprot_noncached(vma_prot); 65 | } else if (file->f_flags & O_SYNC) { 66 | return pgprot_writecombine(vma_prot); 67 | } 68 | 69 | return vma_prot; 70 | } 71 | EXPORT_SYMBOL(phys_mem_access_prot); 72 | -------------------------------------------------------------------------------- /arch/csky/mm/syscache.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | SYSCALL_DEFINE3(cacheflush, 10 | void __user *, addr, 11 | unsigned long, bytes, 12 | int, cache) 13 | { 14 | switch (cache) { 15 | case ICACHE: 16 | case BCACHE: 17 | flush_icache_mm_range(current->mm, 18 | (unsigned long)addr, 19 | (unsigned long)addr + bytes); 20 | case DCACHE: 21 | dcache_wb_range((unsigned long)addr, 22 | (unsigned long)addr + bytes); 23 | break; 24 | default: 25 | return -EINVAL; 26 | } 27 | 28 | return 0; 29 | } 30 | -------------------------------------------------------------------------------- /drivers/Makefile: -------------------------------------------------------------------------------- 1 | obj-y += irqchip/ 2 | obj-y += clocksource/ 3 | 4 | ifneq ($(CONFIG_CPU_CK610),y) 5 | obj-y += prfl-csky.o 6 | endif 7 | 8 | obj-y += qemu-exit.o 9 | -------------------------------------------------------------------------------- /drivers/clocksource/Makefile: -------------------------------------------------------------------------------- 1 | obj-y += timer-gx6605s.o 2 | ifeq ($(CONFIG_CPU_CK860),y) 3 | obj-y += timer-mp-csky.o 4 | endif 5 | -------------------------------------------------------------------------------- /drivers/clocksource/timer-of.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | #ifndef __TIMER_OF_H__ 3 | #define __TIMER_OF_H__ 4 | 5 | #include 6 | 7 | #define TIMER_OF_BASE 0x1 8 | #define TIMER_OF_CLOCK 0x2 9 | #define TIMER_OF_IRQ 0x4 10 | 11 | struct of_timer_irq { 12 | int irq; 13 | int index; 14 | int percpu; 15 | const char *name; 16 | unsigned long flags; 17 | irq_handler_t handler; 18 | }; 19 | 20 | struct of_timer_base { 21 | void __iomem *base; 22 | const char *name; 23 | int index; 24 | }; 25 | 26 | struct of_timer_clk { 27 | struct clk *clk; 28 | const char *name; 29 | int index; 30 | unsigned long rate; 31 | unsigned long period; 32 | }; 33 | 34 | struct timer_of { 35 | unsigned int flags; 36 | struct device_node *np; 37 | struct clock_event_device clkevt; 38 | struct of_timer_base of_base; 39 | struct of_timer_irq of_irq; 40 | struct of_timer_clk of_clk; 41 | void *private_data; 42 | }; 43 | 44 | static inline struct timer_of *to_timer_of(struct clock_event_device *clkevt) 45 | { 46 | return container_of(clkevt, struct timer_of, clkevt); 47 | } 48 | 49 | static inline void __iomem *timer_of_base(struct timer_of *to) 50 | { 51 | return to->of_base.base; 52 | } 53 | 54 | static inline int timer_of_irq(struct timer_of *to) 55 | { 56 | return to->of_irq.irq; 57 | } 58 | 59 | static inline unsigned long timer_of_rate(struct timer_of *to) 60 | { 61 | return to->of_clk.rate; 62 | } 63 | 64 | static inline unsigned long timer_of_period(struct timer_of *to) 65 | { 66 | return to->of_clk.period; 67 | } 68 | 69 | extern int __init timer_of_init(struct device_node *np, 70 | struct timer_of *to); 71 | 72 | extern void __init timer_of_cleanup(struct timer_of *to); 73 | 74 | #endif 75 | -------------------------------------------------------------------------------- /drivers/irqchip/Makefile: -------------------------------------------------------------------------------- 1 | obj-y += irq-csky-apb-intc.o 2 | ifeq ($(CONFIG_CPU_CK860),y) 3 | obj-y += irq-csky-mpintc.o 4 | endif 5 | -------------------------------------------------------------------------------- /drivers/qemu-exit.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | static volatile void *pmaddr; 12 | 13 | static void qemu_pm_power_off(void) 14 | { 15 | *(unsigned int *)pmaddr = 0; 16 | } 17 | 18 | static int qemuexit_platform_probe(struct platform_device *dev) 19 | { 20 | struct resource *res_mem; 21 | void * __pmaddr; 22 | int err; 23 | 24 | res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0); 25 | __pmaddr = devm_ioremap_resource(&dev->dev, res_mem); 26 | if (IS_ERR(__pmaddr)) { 27 | err = PTR_ERR(__pmaddr); 28 | return err; 29 | } 30 | 31 | pmaddr = __pmaddr; 32 | pm_power_off = qemu_pm_power_off; 33 | 34 | return 0; 35 | } 36 | 37 | static const struct of_device_id qemuexit_ids[] = { 38 | { .compatible = "csky,qemu-exit", }, 39 | {} 40 | }; 41 | MODULE_DEVICE_TABLE(of, qemuexit_ids); 42 | 43 | static struct platform_driver qemuexit_platform_driver = { 44 | .probe = qemuexit_platform_probe, 45 | .driver = { 46 | .name = "qemu-exit", 47 | .of_match_table = qemuexit_ids, 48 | } 49 | }; 50 | 51 | static int __init qemuexit_platform_init(void) 52 | { 53 | return platform_driver_register(&qemuexit_platform_driver); 54 | } 55 | module_init(qemuexit_platform_init); 56 | 57 | static void __exit qemuexit_cleanup(void) 58 | { 59 | platform_driver_unregister(&qemuexit_platform_driver); 60 | } 61 | module_exit(qemuexit_cleanup); 62 | 63 | MODULE_DESCRIPTION("C-SKY QMEU exit"); 64 | MODULE_LICENSE("GPL"); 65 | -------------------------------------------------------------------------------- /merge.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo "Merge $1" 3 | 4 | cp -raf ./arch/csky $1/arch/ 5 | 6 | cp -raf ./drivers $1/arch-csky-drivers 7 | awk '/:= drivers/{print $$0,"arch-csky-drivers/";next}{print $$0}' $1/Makefile 1<>$1/Makefile 8 | 9 | mkdir -p $1/tools/arch/csky/include/uapi/asm/ 10 | cp $1/tools/arch/arm/include/uapi/asm/mman.h $1/tools/arch/csky/include/uapi/asm/mman.h 11 | 12 | cat ./patch/*.patch | patch -g0 -p1 -E -d "$1" -t -N -s 13 | -------------------------------------------------------------------------------- /patch/0001-scripts-recordmcount.pl-Add-csky-support.patch: -------------------------------------------------------------------------------- 1 | From 389c00b0565c78a7ca8bc8a13c6e7a5cad6af0a4 Mon Sep 17 00:00:00 2001 2 | From: Guo Ren 3 | Date: Tue, 26 Mar 2019 12:12:03 +0800 4 | Subject: [PATCH] scripts/recordmcount.pl: Add csky support 5 | 6 | Signed-off-by: Guo Ren 7 | --- 8 | scripts/recordmcount.pl | 3 +++ 9 | 1 file changed, 3 insertions(+) 10 | 11 | diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl 12 | index f599031..81d8af9 100755 13 | --- a/scripts/recordmcount.pl 14 | +++ b/scripts/recordmcount.pl 15 | @@ -392,6 +392,9 @@ if ($arch eq "x86_64") { 16 | } elsif ($arch eq "nds32") { 17 | $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$"; 18 | $alignment = 2; 19 | +} elsif ($arch eq "csky") { 20 | + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_CKCORE_PCREL_JSR_IMM26BY2\\s+_mcount\$"; 21 | + $alignment = 2; 22 | } else { 23 | die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; 24 | } 25 | -- 26 | 2.7.4 27 | 28 | -------------------------------------------------------------------------------- /patch/0003-perf-evsel-Use-hweight64-instead-of-hweight_long-att.patch: -------------------------------------------------------------------------------- 1 | From 7879c364411d2b170ce88b30dcd79a59d736a45e Mon Sep 17 00:00:00 2001 2 | Message-Id: <7879c364411d2b170ce88b30dcd79a59d736a45e.1558422394.git.han_mao@c-sky.com> 3 | From: Mao Han 4 | Date: Tue, 21 May 2019 15:05:47 +0800 5 | Subject: [PATCH 1/1] perf evsel: Use hweight64() instead of 6 | hweight_long(attr.sample_regs_user) 7 | 8 | On 32-bits platform with more than 32 registers, the 64 bits mask is 9 | truncate to the lower 32 bits and the return value of hweight_long will 10 | always smaller than 32. When kernel outputs more than 32 registers, but 11 | the user perf program only counts 32, there will be a data mismatch 12 | result to overflow check fail. 13 | 14 | Signed-off-by: Mao Han 15 | --- 16 | tools/perf/util/evsel.c | 12 ++++++------ 17 | 1 file changed, 6 insertions(+), 6 deletions(-) 18 | 19 | diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c 20 | index e7dbdcc..91ba950 100644 21 | --- a/tools/perf/util/evsel.c 22 | +++ b/tools/perf/util/evsel.c 23 | @@ -2290,7 +2290,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, 24 | if (data->user_regs.abi) { 25 | u64 mask = evsel->attr.sample_regs_user; 26 | 27 | - sz = hweight_long(mask) * sizeof(u64); 28 | + sz = hweight64(mask) * sizeof(u64); 29 | OVERFLOW_CHECK(array, sz, max_size); 30 | data->user_regs.mask = mask; 31 | data->user_regs.regs = (u64 *)array; 32 | @@ -2346,7 +2346,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, 33 | if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { 34 | u64 mask = evsel->attr.sample_regs_intr; 35 | 36 | - sz = hweight_long(mask) * sizeof(u64); 37 | + sz = hweight64(mask) * sizeof(u64); 38 | OVERFLOW_CHECK(array, sz, max_size); 39 | data->intr_regs.mask = mask; 40 | data->intr_regs.regs = (u64 *)array; 41 | @@ -2474,7 +2474,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, 42 | if (type & PERF_SAMPLE_REGS_USER) { 43 | if (sample->user_regs.abi) { 44 | result += sizeof(u64); 45 | - sz = hweight_long(sample->user_regs.mask) * sizeof(u64); 46 | + sz = hweight64(sample->user_regs.mask) * sizeof(u64); 47 | result += sz; 48 | } else { 49 | result += sizeof(u64); 50 | @@ -2502,7 +2502,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, 51 | if (type & PERF_SAMPLE_REGS_INTR) { 52 | if (sample->intr_regs.abi) { 53 | result += sizeof(u64); 54 | - sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); 55 | + sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 56 | result += sz; 57 | } else { 58 | result += sizeof(u64); 59 | @@ -2632,7 +2632,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, 60 | if (type & PERF_SAMPLE_REGS_USER) { 61 | if (sample->user_regs.abi) { 62 | *array++ = sample->user_regs.abi; 63 | - sz = hweight_long(sample->user_regs.mask) * sizeof(u64); 64 | + sz = hweight64(sample->user_regs.mask) * sizeof(u64); 65 | memcpy(array, sample->user_regs.regs, sz); 66 | array = (void *)array + sz; 67 | } else { 68 | @@ -2668,7 +2668,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, 69 | if (type & PERF_SAMPLE_REGS_INTR) { 70 | if (sample->intr_regs.abi) { 71 | *array++ = sample->intr_regs.abi; 72 | - sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); 73 | + sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 74 | memcpy(array, sample->intr_regs.regs, sz); 75 | array = (void *)array + sz; 76 | } else { 77 | -- 78 | 2.7.4 79 | 80 | --------------------------------------------------------------------------------