├── vmm ├── physical_memory.h ├── hart_trap.h ├── physical_memory.c ├── mmu_tlb.c ├── debug.h ├── ini.h ├── config.h ├── hart_interrupt.h ├── util.c ├── Makefile ├── vmm_trap.S ├── main.c ├── csr.h ├── emulate_clint.c ├── pm_region.h ├── vm.h ├── log.h ├── hart_interrupt.c ├── hart_exception.h ├── csr_misc.c ├── debug_breakpoint.c ├── hart_def.h ├── fdt.h ├── search.h ├── hart_exception.c ├── pm_region.c ├── mmu_tlb.h ├── hart_util.h ├── log.c ├── emulate_rom.c ├── vm.c ├── mmu.c ├── util.h ├── translate_fence.c ├── emulate_uart.c ├── mmu_pagewalker.c ├── translate_unconditional_jump_intrs.c ├── csr_machine_level_pmp.c ├── hart.h ├── emulate_ram.c ├── sort.h ├── hart_trap.c ├── csr_representation.c ├── translate_memory_store.c ├── ini.c ├── mmu.h ├── translate_amo.c ├── hart.c ├── translate_memory_load.c ├── csr_supervisor_level.c ├── debug.c └── translate_privileged_instr.c ├── bootrom ├── entry.S ├── printk.h ├── sha1.h ├── init.c ├── linker.ld ├── fdt.h ├── uart.h ├── fdt.c ├── Makefile ├── uart.c ├── unittest.h ├── printk.c └── sha1.c ├── Makefile ├── .gitignore ├── LICENSE └── test.vm.ini /vmm/physical_memory.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #ifndef _PHYSICAL_MEMORY_H 5 | #define _PHYSICAL_MEMORY_H 6 | 7 | #include 8 | 9 | void * 10 | preallocate_physical_memory(int64_t nr_bytes); 11 | 12 | #endif 13 | -------------------------------------------------------------------------------- /vmm/hart_trap.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #ifndef _HART_TRAP_H 5 | #define _HART_TRAP_H 6 | #include 7 | #include 8 | 9 | void 10 | raise_trap_raw(struct hart * hartptr, uint8_t target_privilege_level, 11 | uint32_t cause, uint32_t tval); 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /vmm/physical_memory.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | #define VMM_BASE_PAGE_SIZE 4096 10 | 11 | void * 12 | preallocate_physical_memory(int64_t nr_bytes) 13 | { 14 | return aligned_alloc(VMM_BASE_PAGE_SIZE, nr_bytes); 15 | } 16 | 17 | -------------------------------------------------------------------------------- /bootrom/entry.S: -------------------------------------------------------------------------------- 1 | .section .rom_init_entry 2 | 3 | 4 | .extern rom_init 5 | 6 | .global rom_entry 7 | rom_entry: 8 | 9 | la sp, __init_stack 10 | auipc a0, 0x1 11 | //ebreak 12 | jal ra, rom_init 13 | 14 | 1: 15 | wfi 16 | j 1b 17 | 18 | 19 | 20 | .section .bss 21 | .space 4096 * 2 22 | .align 16 23 | __init_stack: 24 | 25 | -------------------------------------------------------------------------------- /vmm/mmu_tlb.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #include 5 | 6 | void 7 | invalidate_tlb(struct tlb_entry * tlb_base, int tlb_cap) 8 | { 9 | int idx = 0; 10 | struct tlb_entry * entry; 11 | for (; idx < tlb_cap; idx++) { 12 | entry = tlb_base + idx; 13 | entry->entry_valid = 0; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /bootrom/printk.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #ifndef _PRINTK_H 5 | #define _PRINTK_H 6 | #include 7 | 8 | #define ANSI_COLOR_RED "\x1b[31m" 9 | #define ANSI_COLOR_GREEN "\x1b[32m" 10 | #define ANSI_COLOR_YELLOW "\x1b[33m" 11 | #define ANSI_COLOR_BLUE "\x1b[34m" 12 | #define ANSI_COLOR_MAGENTA "\x1b[35m" 13 | #define ANSI_COLOR_CYAN "\x1b[36m" 14 | #define ANSI_COLOR_RESET "\x1b[0m" 15 | 16 | void 17 | printk_no_prefix(const char *fmt, ...); 18 | 19 | #define printk(fmt, ...) printk_no_prefix("BOOTROM: "fmt, ##__VA_ARGS__) 20 | #endif 21 | -------------------------------------------------------------------------------- /vmm/debug.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | 5 | #ifndef _DEBUG_H 6 | #define _DEBUG_H 7 | 8 | #include 9 | #include 10 | 11 | enum ACTIONS { 12 | ACTION_CONTINUE, 13 | ACTION_STOP 14 | }; 15 | 16 | void 17 | enter_vmm_dbg_shell(struct hart * hartptr, int check_bps); 18 | 19 | 20 | int 21 | add_breakpoint(uint32_t guest_addr); 22 | 23 | int 24 | is_address_breakpoint(uint32_t guest_addr); 25 | 26 | void 27 | dump_breakpoints(void); 28 | 29 | int 30 | add_breakpoint_command(struct hart * hartptr, int argc, char *argv[]); 31 | 32 | #endif 33 | -------------------------------------------------------------------------------- /vmm/ini.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2016 rxi 3 | * Copyright (c) 2020 Jie Zheng 4 | * 5 | * This library is free software; you can redistribute it and/or modify it 6 | * under the terms of the MIT license. See `ini.c` for details. 7 | */ 8 | 9 | #ifndef INI_H 10 | #define INI_H 11 | 12 | #define INI_VERSION "0.1.1" 13 | 14 | typedef struct ini_t ini_t; 15 | 16 | ini_t* ini_load(const char *filename); 17 | void ini_free(ini_t *ini); 18 | const char* ini_get(ini_t *ini, const char *section, const char *key); 19 | int ini_sget(ini_t *ini, const char *section, const char *key, const char *scanfmt, void *dst); 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | @make --no-print-directory -C bootrom 3 | #@make --no-print-directory -C guest 4 | @make --no-print-directory -C vmm 5 | 6 | clean: 7 | @make --no-print-directory clean -C bootrom 8 | #@make --no-print-directory clean -C guest 9 | @make --no-print-directory clean -C vmm 10 | run:all 11 | @./vmm/vmx test.vm.ini 12 | 13 | 14 | # to start qemmu and make it blocking, SET QEMU_STOP=-S environmental variable 15 | #guest_qemu: 16 | # @qemu-system-riscv32 -monitor null -nographic -machine virt -m 128M -kernel guest/guest.rv32.elf -gdb tcp::5070 ${QEMU_STOP} 17 | #guest_gdb: 18 | # @riscv32-unknown-elf-gdb guest/guest.rv32.elf --eval-command "target remote 0:5070" 19 | -------------------------------------------------------------------------------- /vmm/config.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | 5 | 6 | #ifndef _CONFIG_H 7 | #define _CONFIG_H 8 | #define BUILD_TYPE_DEBUG 0x1 9 | #define BUILD_TYPE_RELEASE 0x2 10 | 11 | #define BUILD_TYPE BUILD_TYPE_DEBUG 12 | 13 | #define XLEN 32 14 | // The maximum number of harts a vm can support 15 | #define MAX_NR_HARTS 64 16 | 17 | //#define DEBUG_TRACE 18 | //#define DEBUG_TRANSLATION 19 | #define NATIVE_DEBUGER 20 | 21 | #define COLORED_OUTPUT 22 | 23 | 24 | /* 25 | * initial logging level 26 | * LOG_TRACE, LOG_DEBUG, LOG_INFO,LOG_WARN, LOG_ERROR, LOG_FATAL, LOG_UART 27 | * it may be overriden by per-vm debug.verbosity 28 | */ 29 | #define LOGGING_LEVEL LOG_TRACE 30 | #endif 31 | -------------------------------------------------------------------------------- /bootrom/sha1.h: -------------------------------------------------------------------------------- 1 | #ifndef SHA1_H 2 | #define SHA1_H 3 | 4 | /* 5 | SHA-1 in C 6 | By Steve Reid 7 | 100% Public Domain 8 | */ 9 | 10 | #include "stdint.h" 11 | 12 | typedef struct 13 | { 14 | uint32_t state[5]; 15 | uint32_t count[2]; 16 | unsigned char buffer[64]; 17 | } SHA1_CTX; 18 | 19 | void SHA1Transform( 20 | uint32_t state[5], 21 | const unsigned char buffer[64] 22 | ); 23 | 24 | void SHA1Init( 25 | SHA1_CTX * context 26 | ); 27 | 28 | void SHA1Update( 29 | SHA1_CTX * context, 30 | const unsigned char *data, 31 | uint32_t len 32 | ); 33 | 34 | void SHA1Final( 35 | unsigned char digest[20], 36 | SHA1_CTX * context 37 | ); 38 | 39 | void SHA1( 40 | char *hash_out, 41 | const char *str, 42 | int len); 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /bootrom/init.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019-2020 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | 12 | uint32_t 13 | hartid(void) 14 | { 15 | uint32_t hartid = 0; 16 | __asm__ volatile("csrr %[RD], mhartid;" 17 | :[RD]"=r"(hartid) 18 | : 19 | :"memory"); 20 | return hartid; 21 | } 22 | 23 | void 24 | rom_init(void) 25 | { 26 | uart16550_init(); 27 | // perform basic unit tests 28 | unit_test(); 29 | validate_dtb(); 30 | 31 | // at last we jump to guest image, and never be back 32 | uint32_t hart_id = hartid(); 33 | void * dtb = (void *)0x1000; 34 | ((void (*)(uint32_t, void *))0x80000000)(hart_id, dtb); 35 | } 36 | 37 | -------------------------------------------------------------------------------- /bootrom/linker.ld: -------------------------------------------------------------------------------- 1 | OUTPUT_FORMAT("elf32-littleriscv") 2 | OUTPUT_ARCH(riscv) 3 | ENTRY(rom_entry) 4 | 5 | 6 | SECTIONS 7 | { 8 | /* 9 | * ROM actually begins at 0x1000, but first 3 pages are reserved for 10 | * device tree blob. 11 | * XXX: that's to say, size of DTB must not exceed 4096 * 3 bytes. 12 | * . = 0x1000; 13 | */ 14 | . = 0x4000; 15 | .text : 16 | { 17 | *(.rom_init_entry*) 18 | *(.text*) 19 | *(.rodata*) 20 | } 21 | .data : 22 | { 23 | KEEP(*( .init_array )); 24 | KEEP(*(SORT_BY_INIT_PRIORITY( .init_array.* ))); 25 | *(.data*) 26 | } 27 | 28 | .bss : 29 | { 30 | *(.bss*) 31 | } 32 | 33 | /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) } 34 | } 35 | 36 | 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Object files 5 | *.o 6 | *.ko 7 | *.obj 8 | *.elf 9 | 10 | # Linker output 11 | *.ilk 12 | *.map 13 | *.exp 14 | 15 | # Precompiled Headers 16 | *.gch 17 | *.pch 18 | 19 | # Libraries 20 | *.lib 21 | *.a 22 | *.la 23 | *.lo 24 | 25 | # Shared objects (inc. Windows DLLs) 26 | *.dll 27 | *.so 28 | *.so.* 29 | *.dylib 30 | 31 | # Executables 32 | *.exe 33 | *.out 34 | *.app 35 | *.i*86 36 | *.x86_64 37 | *.hex 38 | 39 | # Debug files 40 | *.dSYM/ 41 | *.su 42 | *.idb 43 | *.pdb 44 | 45 | # Kernel Module Compile Results 46 | *.mod* 47 | *.cmd 48 | .tmp_versions/ 49 | modules.order 50 | Module.symvers 51 | Mkfile.old 52 | dkms.conf 53 | #kernel bin 54 | *.bin 55 | *.iso 56 | *.map 57 | *.o.plain 58 | *.img 59 | *.qcow2 60 | Zelda.drive 61 | !runtime/libc/* 62 | qemu.log 63 | zelda.dtb 64 | guest/guest.rv32.img 65 | vmm/vmx 66 | -------------------------------------------------------------------------------- /vmm/hart_interrupt.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | * 4 | * 5 | * Hart Interrupt is a wrapper layer for interrupt delivery in hart 6 | */ 7 | 8 | #ifndef _HART_INTERRUPT_H 9 | #define _HART_INTERRUPT_H 10 | #include 11 | 12 | #define INTERRUPT_USER_SOFTWARE 0x0 13 | #define INTERRUPT_SUPERVISOR_SOFTWARE 0x1 14 | #define INTERRUPT_MACHINE_SOFTWARE 0x3 15 | #define INTERRUPT_USER_TIMER 0x4 16 | #define INTERRUPT_SUPERVISOR_TIMER 0x5 17 | #define INTERRUPT_MACHINE_TIMER 0x7 18 | #define INTERRUPT_USER_EXTERNAL 0x8 19 | #define INTERRUPT_SUPERVISOR_EXTERNAL 0x9 20 | #define INTERRUPT_MACHINE_EXTERNAL 0xb 21 | 22 | 23 | uint8_t 24 | is_interrupt_deliverable(struct hart * hartptr, uint8_t vector); 25 | 26 | void 27 | deliver_interrupt(struct hart * hartptr, uint8_t vector); 28 | 29 | #endif 30 | 31 | -------------------------------------------------------------------------------- /vmm/util.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | 8 | int 9 | preload_binary_image(void * addr, int64_t length, const char * image_path) 10 | { 11 | int fd = open(image_path, O_RDONLY); 12 | if (fd < 0) { 13 | return fd; 14 | } 15 | int nr_image_length = lseek(fd, 0, SEEK_END); 16 | lseek(fd, 0, SEEK_SET); 17 | int nr_left = nr_image_length; 18 | int nr_read = 0; 19 | while (nr_left > 0) { 20 | int nr_to_read = MIN(nr_left, length - nr_read); 21 | if (!nr_to_read) { 22 | break; 23 | } 24 | int tmp = read(fd, addr + nr_read, nr_to_read); 25 | if (tmp <= 0) { 26 | break; 27 | } 28 | nr_left -= tmp; 29 | nr_read += tmp; 30 | } 31 | close(fd); 32 | return !(nr_left == 0); 33 | } 34 | 35 | -------------------------------------------------------------------------------- /bootrom/fdt.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | 5 | #ifndef _FDT_H 6 | #define _FDT_H 7 | #include 8 | 9 | #define FDT_MAGIC 0xd00dfeed 10 | 11 | struct fdt_header { 12 | uint32_t magic; 13 | uint32_t totalsize; 14 | uint32_t off_dt_struct; 15 | uint32_t off_dt_strings; 16 | uint32_t off_mem_rsvmap; 17 | uint32_t version; 18 | uint32_t last_comp_version; 19 | uint32_t boot_cpuid_phys; 20 | uint32_t size_dt_strings; 21 | uint32_t size_dt_struct; 22 | }__attribute__((packed)); 23 | 24 | #define LITTLE_ENDIAN32(v) ((((v) & 0xff) << 24) | \ 25 | (((v) & 0xff00) << 8) | \ 26 | (((v) & 0xff0000) >> 8) | \ 27 | (((v) & 0xff000000) >> 24)) 28 | 29 | #define BIG_ENDIAN32(v) LITTLE_ENDIAN32(v) 30 | 31 | void 32 | validate_dtb(void); 33 | #endif 34 | -------------------------------------------------------------------------------- /bootrom/uart.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | 5 | #ifndef _UART16550_H 6 | #define _UART16550_H 7 | #include 8 | 9 | #define UART_REG_QUEUE 0 // rx/tx fifo data 10 | #define UART_REG_DLL 0 // divisor latch (LSB) 11 | #define UART_REG_IER 1 // interrupt enable register 12 | #define UART_REG_DLM 1 // divisor latch (MSB) 13 | #define UART_REG_FCR 2 // fifo control register 14 | #define UART_REG_LCR 3 // line control register 15 | #define UART_REG_MCR 4 // modem control register 16 | #define UART_REG_LSR 5 // line status register 17 | #define UART_REG_MSR 6 // modem status register 18 | #define UART_REG_SCR 7 // scratch register 19 | #define UART_REG_STATUS_RX 0x01 20 | #define UART_REG_STATUS_TX 0x20 21 | 22 | 23 | void 24 | uart16550_init(void); 25 | 26 | void 27 | uart16550_putchar(uint8_t ch); 28 | 29 | int 30 | uart16550_getchar(void); 31 | 32 | #endif 33 | -------------------------------------------------------------------------------- /vmm/Makefile: -------------------------------------------------------------------------------- 1 | item = . 2 | C_FILES = $(foreach item,.,$(wildcard $(item)/*.c)) 3 | C_OBJS = $(patsubst %.c,%.o,$(C_FILES)) 4 | 5 | AS_FILES = $(foreach item, .,$(wildcard $(item)/*.S)) 6 | AS_OBJS = $(patsubst %.S,%.o,$(AS_FILES)) 7 | 8 | 9 | # Use gnu11. . . 10 | CCPARAMS = -m64 -std=gnu11 -O0 -g3 -Werror -Wall -Wstrict-prototypes 11 | CCPARAMS += -include config.h 12 | 13 | GUEST_ELF = vmx 14 | GUEST_MAP = vmx.map 15 | 16 | %.o: %.S 17 | @echo "[AS] $<" 18 | @$(CC) $(CCPARAMS) -I. -o $@ -c $< 19 | 20 | %.o: %.c 21 | @echo "[CC] $<" 22 | @$(CC) $(CCPARAMS) -I. -o $@ -c $< 23 | 24 | 25 | $(GUEST_ELF):$(AS_OBJS) $(C_OBJS) 26 | @echo "[LD] $@" 27 | @$(CC) $(LDPARAMS) -Wl,-Map=$(GUEST_MAP) -lm -o $(GUEST_ELF) $(AS_OBJS) $(C_OBJS) 28 | 29 | clean: 30 | @echo "[Cleaning] $(GUEST_ELF)" 31 | @rm -f *.o $(GUEST_ELF) $(GUEST_IMG) $(GUEST_MAP) 32 | run:$(GUEST_ELF) 33 | @./$(GUEST_ELF) ../rom/rom.rv32.img 34 | 35 | debug:$(GUEST_ELF) 36 | @gdb ./$(GUEST_ELF) ../rom/rom.rv32.img 37 | -------------------------------------------------------------------------------- /vmm/vmm_trap.S: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | 5 | .extern offset_of_vmm_stack 6 | .section .text 7 | .global vmm_entry_point 8 | vmm_entry_point: 9 | // FIXED: switch stack during context switching, or the stack of the vmm 10 | // may be potentially exhausted. 11 | movq $offset_of_vmm_stack, %rsi 12 | movq (%rsi), %rdx 13 | addq %r12, %rdx 14 | movq (%rdx), %rdx 15 | movq %rdx, %rsp 16 | 17 | .extern vmexit 18 | pushq %r12 19 | movq %r12, %rdi 20 | call vmexit 21 | 22 | .extern vmpanic 23 | popq %r12 24 | movq %r12, %rdi 25 | call vmpanic 26 | 27 | 28 | 29 | 30 | .global vmm_jumper_begin 31 | .global vmm_jumper_end 32 | vmm_jumper_begin: 33 | movq $vmm_entry_point, %rax 34 | jmpq *%rax 35 | vmm_jumper_end: 36 | 37 | 38 | 39 | 40 | .global translation_slow_path 41 | translation_slow_path: 42 | pushq %r15 43 | pushq %r14 44 | pushq %r13 45 | pushq %r12 46 | pushq %r11 47 | pushq %r10 48 | pushq %r9 49 | pushq %r8 50 | 51 | -------------------------------------------------------------------------------- /bootrom/fdt.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | 8 | void 9 | validate_dtb(void) 10 | { 11 | uint8_t * dtb_base = (uint8_t *)0x1000; 12 | struct fdt_header * fdt = (struct fdt_header *)dtb_base; 13 | if (fdt->magic == BIG_ENDIAN32(FDT_MAGIC)) { 14 | printk("device tree detected at 0x%x\n", 0x1000); 15 | printk("device tree blob size: %d bytes\n", LITTLE_ENDIAN32(fdt->totalsize)); 16 | int dtb_size = LITTLE_ENDIAN32(fdt->totalsize); 17 | char * index_tbl="0123456789abcdef"; 18 | int idx = 0; 19 | char result[21]; 20 | char hex_result[41]; 21 | SHA1(result, (const char *)dtb_base, dtb_size); 22 | for (idx = 0; idx < 20; idx++) { 23 | hex_result[idx * 2 + 1] = index_tbl[result[idx] & 0xf]; 24 | hex_result[idx * 2] = index_tbl[(result[idx] >> 4) & 0xf]; 25 | } 26 | hex_result[40] = '\0'; 27 | printk("device tree blob sha1 checksum: %s\n", hex_result); 28 | } else { 29 | printk(ANSI_COLOR_RED"device tree not detected\n"ANSI_COLOR_RESET); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /vmm/main.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019-2020 Jie Zheng 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | 16 | static void 17 | vmm_misc_init(ini_t * ini) 18 | { 19 | const char * verbosity_string = ini_get(ini, "debug", "verbosity"); 20 | if (verbosity_string) { 21 | int verbosity = strtol(verbosity_string, NULL, 10); 22 | log_set_level(verbosity); 23 | } 24 | } 25 | int main(int argc, char ** argv) 26 | { 27 | if (argc <= 1) { 28 | log_fatal("please specify the vm config file\n"); 29 | exit(1); 30 | } 31 | // once config file is loaded, don't release it ever. 32 | ini_t * ini_config = ini_load(argv[1]); 33 | if (!ini_config) { 34 | log_fatal("can not load ini file:%s\n", argv[1]); 35 | exit(2); 36 | } 37 | // vmm platform init 38 | vmm_misc_init(ini_config); 39 | // Boot a VM 40 | struct virtual_machine vm; 41 | virtual_machine_init(&vm, ini_config); 42 | vmresume(hart_by_id(&vm, vm.boot_hart)); 43 | __not_reach(); 44 | return 0; 45 | } 46 | -------------------------------------------------------------------------------- /vmm/csr.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #ifndef _CSR_H 5 | #define _CSR_H 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #define WPRI_MASK_ALL 0xffffffff 12 | 13 | struct csr_entry { 14 | uint32_t is_valid:1; 15 | uint32_t csr_addr:16; 16 | uint32_t csr_blob; 17 | 18 | // on write: csr_blob &= wpri_mask 19 | // on read: rd &= wpri_mask 20 | uint32_t wpri_mask; 21 | 22 | void (*write)(struct hart *hartptr, struct csr_entry * csr, uint32_t value); 23 | uint32_t (*read)(struct hart *hartptr, struct csr_entry *csr); 24 | void (*reset)(struct hart *hartptr, struct csr_entry *csr); 25 | }; 26 | 27 | #define NR_CSRS 4096 28 | 29 | 30 | struct csr_registery_entry { 31 | struct csr_registery_entry * next; 32 | int csr_addr; 33 | // the fields are legal to register: 34 | // wpri_mask, write, read, reset 35 | struct csr_entry csr_registery; 36 | }; 37 | 38 | extern struct csr_registery_entry * csr_registery_head; 39 | 40 | 41 | static inline void 42 | register_csr_entry(struct csr_registery_entry * entry) 43 | { 44 | entry->next = csr_registery_head; 45 | csr_registery_head = entry; 46 | log_debug("registering csr 0x%x\n", entry->csr_addr); 47 | } 48 | 49 | 50 | #endif 51 | -------------------------------------------------------------------------------- /bootrom/Makefile: -------------------------------------------------------------------------------- 1 | item = . 2 | C_FILES = $(foreach item,.,$(wildcard $(item)/*.c)) 3 | C_OBJS = $(patsubst %.c,%.o,$(C_FILES)) 4 | 5 | AS_FILES = $(foreach item, .,$(wildcard $(item)/*.S)) 6 | AS_OBJS = $(patsubst %.S,%.o,$(AS_FILES)) 7 | 8 | ASPARAMS = -m64 -mno-red-zone 9 | #CCPARAMS = -O0 -g3 -ffreestanding -mno-fdiv -mno-div -march=rv32ima -mabi=ilp32 -nostdlib -fno-builtin -fno-exceptions -Werror -Wall -Wstrict-prototypes 10 | CCPARAMS = -O0 -g3 -ffreestanding -mno-fdiv -march=rv32imafd -mabi=ilp32 -nostdlib -fno-builtin -fno-exceptions -Werror -Wall -Wstrict-prototypes 11 | LDPARAMS = -m elf32lriscv -static 12 | 13 | ROM_ELF = bootrom.rv32.elf 14 | ROM_IMG = bootrom.rv32.img 15 | ROM_MAP = bootrom.rv32.map 16 | 17 | CROSS_COMPILE = riscv32-unknown-linux-gnu- 18 | %.o: %.S 19 | @echo "[AS] $<" 20 | @$(CROSS_COMPILE)gcc $(CCPARAMS) -I. -o $@ -c $< 21 | 22 | %.o: %.c 23 | @echo "[CC] $<" 24 | @$(CROSS_COMPILE)gcc $(CCPARAMS) -I. -g -Wa,-adhls -o $@ -c $< > $@.plain 25 | 26 | $(ROM_IMG):$(ROM_ELF) 27 | @echo "[CT] $@" 28 | @cp $< $@ 29 | @$(CROSS_COMPILE)objcopy $@ -O binary 30 | 31 | $(ROM_ELF):$(AS_OBJS) $(C_OBJS) 32 | @echo "[LD] $@" 33 | @$(CROSS_COMPILE)ld $(LDPARAMS) -Map=$(ROM_MAP) -T linker.ld -o $(ROM_ELF) $(AS_OBJS) $(C_OBJS) 34 | 35 | clean: 36 | @echo "[Cleaning] bootrom" 37 | @rm -f *.o $(ROM_ELF) $(ROM_IMG) $(ROM_MAP) 38 | @rm -f *.o.plain 39 | 40 | -------------------------------------------------------------------------------- /vmm/emulate_clint.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | static uint64_t 10 | clint_read(uint64_t addr, int access_size, struct hart * hartptr, 11 | struct pm_region_operation * pmr) 12 | { 13 | 14 | __not_reach(); 15 | return 0; 16 | } 17 | 18 | 19 | 20 | static void 21 | clint_write(uint64_t addr, int access_size, uint64_t value, 22 | struct hart * hartptr, 23 | struct pm_region_operation * pmr) 24 | { 25 | //__not_reach(); 26 | } 27 | 28 | 29 | void 30 | clint_init(struct virtual_machine * vm) 31 | { 32 | const char * clint_base_string = ini_get(vm->ini_config, "cpu", "clint_base"); 33 | const char * clint_size_string = ini_get(vm->ini_config, "cpu", "clint_size"); 34 | ASSERT(clint_base_string); 35 | ASSERT(clint_size_string); 36 | uint32_t clint_base = strtol(clint_base_string, NULL, 16); 37 | uint32_t clint_size = strtol(clint_size_string, NULL, 16); 38 | struct pm_region_operation clint_pmr = { 39 | .addr_low = clint_base, 40 | .addr_high = clint_base + clint_size, 41 | .pmr_read = clint_read, 42 | .pmr_write = clint_write, 43 | .pmr_desc = "clint.mmio" 44 | }; 45 | register_pm_region_operation(&clint_pmr); 46 | } 47 | 48 | -------------------------------------------------------------------------------- /vmm/pm_region.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #ifndef _PM_REGION_H 5 | #define _PM_REGION_H 6 | #include 7 | #include 8 | #include 9 | 10 | #define MAX_NR_PM_REGIONS 256 11 | 12 | struct pm_region_operation; 13 | 14 | typedef uint64_t pm_region_read_callback(uint64_t addr, int access_size, 15 | struct hart * hartptr, 16 | struct pm_region_operation * pmr); 17 | 18 | typedef void pm_region_write_callback(uint64_t addr, int access_size, uint64_t value, 19 | struct hart * hartptr, 20 | struct pm_region_operation * pmr); 21 | 22 | typedef void * pm_region_direct_address(uint64_t addr, 23 | struct hart * hartptr, 24 | struct pm_region_operation * pmr); 25 | 26 | struct pm_region_operation { 27 | uint32_t addr_low; 28 | uint32_t addr_high; 29 | pm_region_read_callback * pmr_read; 30 | pm_region_write_callback * pmr_write; 31 | pm_region_direct_address * pmr_direct; 32 | char * pmr_desc; 33 | }; 34 | 35 | void 36 | register_pm_region_operation(const struct pm_region_operation * pro); 37 | 38 | struct pm_region_operation * 39 | search_pm_region_callback(uint64_t guest_pa); 40 | 41 | void 42 | dump_memory_regions(void); 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /bootrom/uart.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | 5 | #include 6 | 7 | 8 | volatile uint8_t* uart16550; 9 | static uint32_t uart16550_reg_shift; 10 | //static uint32_t uart16550_clock = 1843200; 11 | 12 | void 13 | uart16550_init(void) 14 | { 15 | uart16550 = (uint8_t *)0x10000000; 16 | uart16550_reg_shift = 0; 17 | 18 | uart16550[UART_REG_IER << uart16550_reg_shift] = 0x00; // Disable all interrupts 19 | uart16550[UART_REG_LCR << uart16550_reg_shift] = 0x80; // Enable DLAB (set baud rate divisor) 20 | uart16550[UART_REG_DLL << uart16550_reg_shift] = 0x03; // Set divisor (lo byte) 21 | uart16550[UART_REG_DLM << uart16550_reg_shift] = 0x00; // (hi byte) 22 | uart16550[UART_REG_LCR << uart16550_reg_shift] = 0x03; // 8 bits, no parity, one stop bit 23 | uart16550[UART_REG_FCR << uart16550_reg_shift] = 0xC7; // Enable FIFO, clear them, with 14-byte threshold 24 | } 25 | 26 | void 27 | uart16550_putchar(uint8_t ch) 28 | { 29 | while ((uart16550[UART_REG_LSR << uart16550_reg_shift] & UART_REG_STATUS_TX) == 0); 30 | uart16550[UART_REG_QUEUE << uart16550_reg_shift] = ch; 31 | } 32 | 33 | int 34 | uart16550_getchar() 35 | { 36 | if (uart16550[UART_REG_LSR << uart16550_reg_shift] & UART_REG_STATUS_RX) 37 | return uart16550[UART_REG_QUEUE << uart16550_reg_shift]; 38 | return -1; 39 | } 40 | -------------------------------------------------------------------------------- /vmm/vm.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | 5 | #ifndef _VM_H 6 | #define _VM_H 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | struct virtual_machine { 15 | 16 | int nr_harts; 17 | int boot_hart; 18 | struct hart * harts; 19 | 20 | // main memory 21 | int64_t main_mem_size; 22 | int64_t main_mem_base; 23 | void * main_mem_host_base; 24 | 25 | // bootrom 26 | int64_t bootrom_size; 27 | int64_t bootrom_base; 28 | void * bootrom_host_base; 29 | 30 | // the build buffer of FDT 31 | struct fdt_build_blob fdt; 32 | 33 | // the ini conifguration 34 | ini_t * ini_config; 35 | }; 36 | 37 | __attribute__((always_inline)) 38 | static inline struct hart * 39 | hart_by_id(struct virtual_machine * vm, int hart_id) 40 | { 41 | ASSERT(hart_id >= 0 && hart_id < vm->nr_harts); 42 | return &vm->harts[hart_id]; 43 | } 44 | 45 | 46 | #define MEGA(nr_mega) (1024 * 1024 * (nr_mega)) 47 | 48 | #define IMAGE_TYPE_BINARY 0x1 49 | #define IMAGE_TYPE_ELF 0x2 50 | 51 | void 52 | virtual_machine_init(struct virtual_machine * vm, ini_t *); 53 | 54 | void 55 | bootrom_init(struct virtual_machine * vm); 56 | 57 | void 58 | ram_init(struct virtual_machine * vm); 59 | 60 | void 61 | uart_init(void); 62 | 63 | void 64 | clint_init(struct virtual_machine * vm); 65 | 66 | #endif 67 | -------------------------------------------------------------------------------- /vmm/log.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2017 rxi 3 | * Copyright (c) 2019 Jie Zheng 4 | * 5 | * This library is free software; you can redistribute it and/or modify it 6 | * under the terms of the MIT license. See `log.c` for details. 7 | */ 8 | 9 | #ifndef LOG_H 10 | #define LOG_H 11 | 12 | #include 13 | #include 14 | 15 | #if defined(COLORED_OUTPUT) 16 | #define LOG_USE_COLOR 17 | #endif 18 | 19 | #define LOG_VERSION "0.1.0" 20 | 21 | typedef void (*log_LockFn)(void *udata, int lock); 22 | 23 | enum { 24 | LOG_TRACE, LOG_DEBUG, LOG_INFO, 25 | LOG_WARN, LOG_ERROR, LOG_FATAL, 26 | LOG_UART 27 | }; 28 | 29 | #define log_uart(...) log_log(LOG_UART, __FILE__, __LINE__, __VA_ARGS__) 30 | #define log_trace(...) log_log(LOG_TRACE, __FILE__, __LINE__, __VA_ARGS__) 31 | #define log_debug(...) log_log(LOG_DEBUG, __FILE__, __LINE__, __VA_ARGS__) 32 | #define log_info(...) log_log(LOG_INFO, __FILE__, __LINE__, __VA_ARGS__) 33 | #define log_warn(...) log_log(LOG_WARN, __FILE__, __LINE__, __VA_ARGS__) 34 | #define log_error(...) log_log(LOG_ERROR, __FILE__, __LINE__, __VA_ARGS__) 35 | #define log_fatal(...) log_log(LOG_FATAL, __FILE__, __LINE__, __VA_ARGS__) 36 | 37 | void log_set_udata(void *udata); 38 | void log_set_lock(log_LockFn fn); 39 | void log_set_fp(FILE *fp); 40 | void log_set_level(int level); 41 | void log_set_quiet(int enable); 42 | 43 | void log_log(int level, const char *file, int line, const char *fmt, ...); 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /vmm/hart_interrupt.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #include 5 | 6 | 7 | uint8_t 8 | interrupt_target_privilege_level(struct hart * hartptr, uint8_t vector) 9 | { 10 | uint8_t target_privilege_level = PRIVILEGE_LEVEL_MACHINE; 11 | ASSERT(vector < 12); 12 | if (hartptr->idelegation.dword & (1 << vector)) { 13 | target_privilege_level = PRIVILEGE_LEVEL_SUPERVISOR; 14 | } 15 | return target_privilege_level; 16 | } 17 | 18 | uint8_t 19 | is_interrupt_deliverable(struct hart * hartptr, uint8_t vector) 20 | { 21 | uint8_t target_pl = interrupt_target_privilege_level(hartptr, vector); 22 | uint8_t current_pl = hartptr->privilege_level; 23 | return (current_pl <= target_pl) && 24 | (hartptr->ipending.dword & (1 << vector)) && 25 | (hartptr->ienable.dword & (1 << vector)) && 26 | (((target_pl == PRIVILEGE_LEVEL_MACHINE) && hartptr->status.mie) || 27 | ((target_pl == PRIVILEGE_LEVEL_SUPERVISOR) && hartptr->status.sie)); 28 | } 29 | 30 | void 31 | deliver_interrupt(struct hart * hartptr, uint8_t vector) 32 | { 33 | // No need to check whether the interrupt is deliverable. 34 | // the caller should make sure the interrupt window open and then call this 35 | // to deliver the interrupt. 36 | //ASSERT(is_interrupt_deliverable(hartptr, vector)); 37 | uint8_t target_pl = interrupt_target_privilege_level(hartptr, vector); 38 | raise_trap_raw(hartptr, target_pl, 1 << 31 | vector, 0); 39 | } 40 | -------------------------------------------------------------------------------- /vmm/hart_exception.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | * 4 | * 5 | * Hart Exception is a wrapper layer for exception delivery in hart 6 | */ 7 | 8 | #ifndef _HART_EXCEPTION_H 9 | #define _HART_EXCEPTION_H 10 | #include 11 | 12 | #define EXCEPTION_INSTRUCTION_ADDRESS_MISALIGN 0x0 13 | #define EXCEPTION_INSTRUCTION_ACCESS_FAULT 0x1 14 | #define EXCEPTION_ILLEEGAL_INSTRUCTION 0x2 15 | #define EXCEPTION_BREAKPOINT 0x3 16 | #define EXCEPTION_LOAD_ADDRESS_MISALIGN 0x4 17 | #define EXCEPTION_LOAD_ACCESS_FAULT 0x5 18 | #define EXCEPTION_STORE_ADDRESS_MISALIGN 0x6 19 | #define EXCEPTION_STORE_ACCESS_FAULT 0x7 20 | #define EXCEPTION_ECALL_FROM_UMODE 0x8 21 | #define EXCEPTION_ECALL_FROM_SMODE 0x9 22 | #define EXCEPTION_ECALL_FROM_MMODE 0xb 23 | #define EXCEPTION_INSTRUCTION_PAGE_FAULT 0xc 24 | #define EXCEPTION_LOAD_PAGE_FAULT 0xd 25 | #define EXCEPTION_STORE_PAGE_FAULT 0xf 26 | 27 | void 28 | raise_exception_internal(struct hart * hartptr, uint8_t exception_cause, 29 | uint32_t trap_value); 30 | 31 | #define raise_exception(hart, cause) { \ 32 | raise_exception_internal(hart, cause, 0); \ 33 | } 34 | 35 | 36 | #define raise_exception_with_tvalue(hart, cause, tval) { \ 37 | raise_exception_internal(hart, cause, tval); \ 38 | } 39 | 40 | #endif 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, Jie (Link) Zheng 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /vmm/csr_misc.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | 5 | #include 6 | 7 | static uint64_t 8 | get_host_tsc(void) 9 | { 10 | uint32_t eax = 0; 11 | uint32_t edx = 0; 12 | __asm__ volatile("rdtsc;" 13 | :"=a"(eax), "=d"(edx) 14 | : 15 | :"memory"); 16 | return ((uint64_t)eax) | (((uint64_t)edx) << 32); 17 | } 18 | 19 | static void 20 | csr_time_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 21 | { 22 | } 23 | 24 | static uint32_t 25 | csr_time_read(struct hart *hartptr, struct csr_entry *csr) 26 | { 27 | hartptr->tsc = get_host_tsc(); 28 | log_trace("hart id:%d, csr:time read:0x%x\n", 29 | hartptr->hart_id, (uint32_t)hartptr->tsc); 30 | return (uint32_t)hartptr->tsc; 31 | } 32 | 33 | static struct csr_registery_entry time_csr_entry = { 34 | .csr_addr = CSR_ADDRESS_TIME, 35 | .csr_registery = { 36 | .wpri_mask = WPRI_MASK_ALL, 37 | .read = csr_time_read, 38 | .write = csr_time_write 39 | } 40 | }; 41 | 42 | static void 43 | csr_timeh_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 44 | { 45 | } 46 | 47 | static uint32_t 48 | csr_timeh_read(struct hart *hartptr, struct csr_entry *csr) 49 | { 50 | log_trace("hart id:%d, csr:timeh read:0x%x\n", 51 | hartptr->hart_id, (uint32_t)(hartptr->tsc >> 32)); 52 | return (uint32_t)(hartptr->tsc >> 32); 53 | } 54 | 55 | static struct csr_registery_entry timeh_csr_entry = { 56 | .csr_addr = CSR_ADDRESS_TIMEH, 57 | .csr_registery = { 58 | .wpri_mask = WPRI_MASK_ALL, 59 | .read = csr_timeh_read, 60 | .write = csr_timeh_write 61 | } 62 | }; 63 | 64 | __attribute__((constructor)) static void 65 | csr_misc_init(void) 66 | { 67 | register_csr_entry(&time_csr_entry); 68 | register_csr_entry(&timeh_csr_entry); 69 | } 70 | 71 | -------------------------------------------------------------------------------- /vmm/debug_breakpoint.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #define MAX_BREAKPOINTS 256 11 | static uint32_t breakpoints[MAX_BREAKPOINTS]; 12 | static int nr_breakpoints = 0; 13 | 14 | static int 15 | addr_cmp(uint32_t * paddr0, uint32_t *paddr1) 16 | { 17 | return (int)(*paddr0 - *paddr1); 18 | } 19 | 20 | int 21 | add_breakpoint(uint32_t guest_addr) 22 | { 23 | if (guest_addr & 0x3) { 24 | return -2; 25 | } 26 | if (nr_breakpoints >= MAX_BREAKPOINTS) { 27 | return -1; 28 | } 29 | if (is_address_breakpoint(guest_addr)) { 30 | return 0; 31 | } 32 | breakpoints[nr_breakpoints] = guest_addr; 33 | nr_breakpoints += 1; 34 | INSERTION_SORT(uint32_t, breakpoints, nr_breakpoints, addr_cmp); 35 | return 0; 36 | } 37 | 38 | int 39 | is_address_breakpoint(uint32_t guest_addr) 40 | { 41 | uint32_t * pbp = SEARCH(uint32_t, breakpoints, nr_breakpoints, addr_cmp, 42 | &guest_addr); 43 | return !!pbp; 44 | } 45 | 46 | void 47 | dump_breakpoints(void) 48 | { 49 | printf(ANSI_COLOR_MAGENTA"There are %d breakpoints:\n", nr_breakpoints); 50 | int idx = 0; 51 | for (; idx < nr_breakpoints; idx++) { 52 | printf("0x%08x ", breakpoints[idx]); 53 | if ((idx + 1) % 8 == 0) { 54 | printf("\n"); 55 | } 56 | } 57 | if (idx % 8 != 0) { 58 | printf("\n"); 59 | } 60 | printf(ANSI_COLOR_RESET); 61 | } 62 | 63 | int 64 | add_breakpoint_command(struct hart * hartptr, int argc, char *argv[]) 65 | { 66 | if (argc == 0) { 67 | goto out; 68 | } 69 | uint32_t addr = strtol(argv[0], NULL, 16); 70 | int rc = add_breakpoint(addr); 71 | printf("adding breakpoint: 0x%x %s\n", addr, rc ? "fails" : "succeeds"); 72 | out: 73 | return ACTION_CONTINUE; 74 | } 75 | -------------------------------------------------------------------------------- /vmm/hart_def.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | 5 | #ifndef _HART_DEF_H 6 | #define _HART_DEF_H 7 | 8 | #if XLEN == 32 9 | #define REGISTER_TYPE uint32_t 10 | #elif XLEN == 64 11 | #define REGISTER_TYPE uint64_t 12 | #endif 13 | 14 | 15 | // XXX: make it big, so it doesn't need to be flushed when debuging the TC 16 | #define TRANSLATION_CACHE_SIZE (1024 * 64) 17 | // XXX: make it not that big, because it takes too much to search translated instruction. 18 | #define MAX_INSTRUCTIONS_TOTRANSLATE 512 19 | 20 | // reserve a small trunk of space to transfer control to vmm 21 | #define VMM_STACK_SIZE (1024 * 8) 22 | 23 | // for debug reason, put a magic word in each hart. 24 | #define HART_MAGIC_WORD 0xdeadbeef 25 | 26 | #define PRIVILEGE_LEVEL_USER 0x0 27 | #define PRIVILEGE_LEVEL_SUPERVISOR 0x1 28 | #define PRIVILEGE_LEVEL_RESERVED 0x2 29 | #define PRIVILEGE_LEVEL_MACHINE 0x3 30 | 31 | 32 | 33 | #define CSR_ADDRESS_MSCRATCH 0x340 34 | #define CSR_ADDRESS_MEPC 0x341 35 | #define CSR_ADDRESS_MSTATUS 0x300 36 | #define CSR_ADDRESS_MIDELEG 0x303 37 | #define CSR_ADDRESS_MEDELEG 0x302 38 | #define CSR_ADDRESS_MIP 0x344 39 | #define CSR_ADDRESS_MIE 0x304 40 | #define CSR_ADDRESS_MCAUSE 0x342 41 | #define CSR_ADDRESS_MTVAL 0x343 42 | #define CSR_ADDRESS_MTVEC 0x305 43 | 44 | 45 | #define CSR_ADDRESS_SIE 0x104 46 | #define CSR_ADDRESS_SIP 0x144 47 | #define CSR_ADDRESS_SSTATUS 0x100 48 | #define CSR_ADDRESS_STVEC 0x105 49 | #define CSR_ADDRESS_SATP 0x180 50 | 51 | #define CSR_ADDRESS_SCAUSE 0x142 52 | #define CSR_ADDRESS_STVAL 0x143 53 | #define CSR_ADDRESS_SEPC 0x141 54 | #define CSR_ADDRESS_SSCRATCH 0x140 55 | 56 | #define CSR_ADDRESS_TIME 0xC01 57 | #define CSR_ADDRESS_TIMEH 0xC81 58 | 59 | #endif 60 | -------------------------------------------------------------------------------- /test.vm.ini: -------------------------------------------------------------------------------- 1 | ; This is the config to boot a vm. 2 | [image] 3 | name = test.guest 4 | ; image path, alway required. 5 | ;path = guest/guest.rv32.img 6 | kernel = /root/workspace/riscv-pk-master/build/bbl.img 7 | 8 | ; type must be in [binary, elf], required, no default value 9 | ;type = binary 10 | ; the memory start to load the image into. if type == binary, load_offset must be provided. 11 | ; it must be in hexadecimal 12 | kernel_load_base = 0x80000000 13 | 14 | ; the boot argument delivered to Linux kernel 15 | ; earlycon or console must be specified here 16 | bootarg = console=uart8250,mmio,0x10000000 17 | 18 | ; init ramdisk arguments. 19 | initrd = /root/workspace/busybox-1.31.1/initrd.cpio 20 | initrd_load_base = 0x84000000 21 | 22 | [rom] 23 | rom_image = bootrom/bootrom.rv32.img 24 | rom_start = 0x1000 25 | rom_size = 0x1000000 26 | 27 | 28 | [cpu] 29 | ; number of cpus, always required. 30 | nr_cpus = 1 31 | ; the cpu index as the primary cpu to boot: optional, default:0 32 | boot_cpu = 0 33 | ; the program counter will be set to the value when the hart is reset or poweron 34 | ; also the rom image will be loaded to the address 35 | pc_on_reset = 0x4000 36 | ; the Core Local Interrupt Controller which generates and delivers machine 37 | ; and supervisor timer interrupts to HLIC 38 | clint_base = 0x02000000 39 | clint_size = 0x00010000 40 | ; the capacity of TLB, MUST BE POWER OF 2, and 16K is quite enough. 41 | itlb_size = 0x4000 42 | dtlb_size = 0x4000 43 | 44 | [mem] 45 | ; required 46 | ; in hexadecimal 47 | main_memory_start = 0x80000000 48 | ; required 49 | ; in decimal 50 | main_memory_size_in_mega = 1024 51 | 52 | [misc] 53 | ; dump the device tree blob, optional 54 | dump_dtb = ./zelda.dtb 55 | 56 | [debug] 57 | ; the log verbosity is an integer. 58 | ;LOG_TRACE = 0, LOG_DEBUG = 1, LOG_INFO = 2, LOG_WARN = 3, LOG_ERROR = 4, 59 | ;LOG_FATAL = 5, LOG_UART = 6 60 | verbosity = 2 61 | ; initial breakpoints, optional. 62 | ;breakpoints = c011de8c 63 | ;breakpoints = c0031e00 64 | ;breakpoints = c00d0a9c 65 | ;breakpoints = c0089f68 66 | ;breakpoints = c0089658 67 | ;breakpoints = 0xc0002aac, 0xc000012c, 0xc0000130 68 | ;breakpoints = 0xc00000d0 69 | ;breakpoints = 0x80400138, 0x80400134 70 | ;breakpoints = 0x8040011c 71 | ;breakpoints = 0x80000040 0x80000044 72 | -------------------------------------------------------------------------------- /vmm/fdt.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #ifndef _FDT_H 5 | #define _FDT_H 6 | 7 | #include 8 | #include 9 | 10 | #define FDT_MAGIC 0xd00dfeed 11 | struct fdt_header { 12 | uint32_t magic; 13 | uint32_t totalsize; 14 | uint32_t off_dt_struct; 15 | uint32_t off_dt_strings; 16 | uint32_t off_mem_rsvmap; 17 | uint32_t version; 18 | uint32_t last_comp_version; 19 | uint32_t boot_cpuid_phys; 20 | uint32_t size_dt_strings; 21 | uint32_t size_dt_struct; 22 | }__attribute__((packed)); 23 | 24 | // the tokens of structure 25 | #define FDT_BEGIN_NODE 0x00000001 26 | #define FDT_END_NODE 0x00000002 27 | #define FDT_PROP 0x00000003 28 | #define FDT_NOP 0x00000004 29 | #define FDT_END 0x00000009 30 | 31 | struct fdt_property { 32 | uint32_t len; 33 | uint32_t nameoff; 34 | }__attribute__((packed)); 35 | 36 | 37 | struct fdt_build_blob { 38 | uint8_t * buffer; 39 | uint32_t buffer_size; 40 | uint32_t buffer_iptr; 41 | 42 | uint8_t * string_buffer; 43 | uint32_t string_buffer_size; 44 | uint32_t string_buffer_iptr; 45 | 46 | // auxiliary variables. 47 | int harts_phandles[MAX_NR_HARTS]; 48 | int hart_interrupt_controllers_phandles[MAX_NR_HARTS]; 49 | int phandle_generator; 50 | }; 51 | 52 | static inline int 53 | generate_phandle(struct fdt_build_blob * blob) 54 | { 55 | int phandle = blob->phandle_generator; 56 | blob->phandle_generator += 1; 57 | return phandle; 58 | } 59 | 60 | void 61 | fdt_build_init(struct fdt_build_blob * blob, int buffer_size, 62 | int string_buffer_size); 63 | 64 | 65 | void 66 | fdt_begin_node(struct fdt_build_blob * blob, const char * node_name); 67 | 68 | void 69 | fdt_prop(struct fdt_build_blob * blob, const char * prop_name, 70 | uint32_t prop_len, void * prop_value); 71 | 72 | void 73 | fdt_end_node(struct fdt_build_blob * blob); 74 | 75 | 76 | void 77 | fdt_nop(struct fdt_build_blob * blob); 78 | 79 | 80 | void 81 | fdt_end(struct fdt_build_blob * blob); 82 | 83 | 84 | void 85 | fdt_blob_destroy(struct fdt_build_blob * blob); 86 | 87 | 88 | void 89 | fdt_init(struct fdt_build_blob * blob); 90 | 91 | void 92 | dump_fdt(uint8_t * dtb, int size); 93 | 94 | 95 | // DEVICES FDT BUILD ROUTINES 96 | void 97 | build_uart_fdt_node(struct fdt_build_blob * blob); 98 | 99 | #endif 100 | -------------------------------------------------------------------------------- /vmm/search.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #ifndef _SEARCH_H 5 | #define _SEARCH_H 6 | #include 7 | #include 8 | 9 | // this is to search the target in a binary way. 10 | #define SEARCH(type, base, num, comp, target) ({ \ 11 | type * elem_found = (void *)0; \ 12 | int idx_low = 0; \ 13 | int idx_high = (num) - 1; \ 14 | for (; idx_low <= idx_high;) { \ 15 | type * elem_low = ELEM(type, base, idx_low); \ 16 | type * elem_high = ELEM(type, base, idx_high); \ 17 | if (!comp(target, elem_low)) { \ 18 | elem_found = elem_low; \ 19 | break; \ 20 | } else if (!comp(target, elem_high)) { \ 21 | elem_found = elem_high; \ 22 | break; \ 23 | } else { \ 24 | int idx_mid = (idx_low + idx_high) / 2; \ 25 | if (idx_mid == idx_low || idx_mid == idx_high) { \ 26 | break; \ 27 | } \ 28 | type * elem_mid = ELEM(type, base, idx_mid); \ 29 | if (comp(target, elem_mid) < 0) { \ 30 | idx_high = idx_mid; \ 31 | } else { \ 32 | idx_low = idx_mid; \ 33 | } \ 34 | } \ 35 | } \ 36 | elem_found; \ 37 | }) 38 | #endif 39 | -------------------------------------------------------------------------------- /vmm/hart_exception.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | /* 9 | * exception is one kind of synchronous trap. 10 | * the exception trap is finally taken at the privilege level by following the 11 | * selection logic: 12 | * 1). exception occurs in m-mode, trap taken in m-mode. 13 | * 2). exception occurs in s-mode, the exception is not delegated, trap taken in m-mode. 14 | * 3). exception occurs in s-mode, the exception is delegated, trap taken in s-mode. 15 | * 4). exception occurs in u-mode, the exception is not delegated, trap taken in m-mdoe. 16 | * 5). exception occurs in u-mode, the exception is delegated, trap taken in s-mode. 17 | */ 18 | static uint8_t 19 | exception_target_privilege_level(struct hart * hartptr, uint8_t exception_number) 20 | { 21 | uint8_t current_pl = hartptr->privilege_level; 22 | uint8_t target_pl = PRIVILEGE_LEVEL_MACHINE; 23 | struct csr_entry * csr_medeleg = 24 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_MEDELEG]; 25 | if ((current_pl != PRIVILEGE_LEVEL_MACHINE) && 26 | (csr_medeleg->csr_blob & (1 << exception_number))) { 27 | target_pl = PRIVILEGE_LEVEL_SUPERVISOR; 28 | } 29 | return target_pl; 30 | } 31 | 32 | 33 | static uint8_t trap_value_map[16]; 34 | 35 | void 36 | raise_exception_internal(struct hart * hartptr, uint8_t exception_cause, 37 | uint32_t trap_value) 38 | { 39 | uint8_t target_pl = 40 | exception_target_privilege_level(hartptr, exception_cause); 41 | ASSERT(exception_cause < 16); 42 | raise_trap_raw(hartptr, target_pl, exception_cause, trap_value); 43 | 44 | } 45 | 46 | __attribute__((unused)) void 47 | trap_value_map_init(void) 48 | { 49 | memset(trap_value_map, 0x0, sizeof(trap_value_map)); 50 | trap_value_map[EXCEPTION_BREAKPOINT] = 1; 51 | trap_value_map[EXCEPTION_INSTRUCTION_ADDRESS_MISALIGN] = 1; 52 | trap_value_map[EXCEPTION_INSTRUCTION_ACCESS_FAULT] = 1; 53 | trap_value_map[EXCEPTION_ILLEEGAL_INSTRUCTION] = 1; 54 | trap_value_map[EXCEPTION_LOAD_ADDRESS_MISALIGN] = 1; 55 | trap_value_map[EXCEPTION_LOAD_ACCESS_FAULT] = 1; 56 | trap_value_map[EXCEPTION_STORE_ADDRESS_MISALIGN] = 1; 57 | trap_value_map[EXCEPTION_STORE_ACCESS_FAULT] = 1; 58 | trap_value_map[EXCEPTION_INSTRUCTION_PAGE_FAULT] = 1; 59 | trap_value_map[EXCEPTION_LOAD_PAGE_FAULT] = 1; 60 | trap_value_map[EXCEPTION_STORE_PAGE_FAULT] = 1; 61 | } 62 | -------------------------------------------------------------------------------- /vmm/pm_region.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | 7 | // XXX: Define MMIO operations globally on a per-vm basis. for simpicity purpose 8 | // I don't put it in a VM's blob. 9 | static struct pm_region_operation pmr_ops[MAX_NR_PM_REGIONS]; 10 | static int nr_pmr_ops = 0; 11 | 12 | void 13 | dump_memory_regions(void) 14 | { 15 | int idx = 0; 16 | log_info("dump memory layout\n"); 17 | for(idx = 0; idx < nr_pmr_ops; idx++) { 18 | log_info("\t[0x%08x - 0x%08x] %s\n", pmr_ops[idx].addr_low, 19 | pmr_ops[idx].addr_high, pmr_ops[idx].pmr_desc); 20 | } 21 | } 22 | int 23 | pm_region_operation_compare(const struct pm_region_operation * pmr1, 24 | const struct pm_region_operation * pmr2) 25 | { 26 | ASSERT(pmr1->addr_low <= pmr1->addr_high); 27 | ASSERT(pmr2->addr_low <= pmr2->addr_high); 28 | if (pmr1->addr_high <= pmr2->addr_low) { 29 | return -1; 30 | } 31 | if (pmr1->addr_low >= pmr2->addr_high) { 32 | return 1; 33 | } 34 | 35 | #if BUILD_TYPE == BUILD_TYPE_DEBUG 36 | if ((pmr1->addr_low <= pmr2->addr_low && 37 | pmr1->addr_high >= pmr2->addr_high) || 38 | (pmr2->addr_low <= pmr1->addr_low && 39 | pmr2->addr_high >= pmr1->addr_high)) { 40 | return 0; 41 | } 42 | __not_reach(); 43 | return 0; 44 | #else 45 | return 0; 46 | #endif 47 | } 48 | 49 | 50 | struct pm_region_operation * 51 | search_pm_region_callback(uint64_t guest_pa) 52 | { 53 | struct pm_region_operation target = { 54 | .addr_low = guest_pa, 55 | .addr_high = guest_pa + 1 56 | }; 57 | struct pm_region_operation * rc; 58 | rc = SEARCH(struct pm_region_operation, pmr_ops, nr_pmr_ops, 59 | pm_region_operation_compare, &target); 60 | #if BUILD_TYPE == BUILD_TYPE_DEBUG 61 | if (!rc) { 62 | log_debug("can not find a memory for address: 0x%x\n", guest_pa); 63 | } 64 | #endif 65 | return rc; 66 | } 67 | 68 | void 69 | register_pm_region_operation(const struct pm_region_operation * pmr) 70 | { 71 | ASSERT(pmr->pmr_read && pmr->pmr_write); 72 | ASSERT(!search_pm_region_callback(pmr->addr_low)) 73 | ASSERT(nr_pmr_ops < MAX_NR_PM_REGIONS); 74 | memcpy(&pmr_ops[nr_pmr_ops], pmr, sizeof(struct pm_region_operation)); 75 | nr_pmr_ops += 1; 76 | SORT(struct pm_region_operation, pmr_ops, nr_pmr_ops, pm_region_operation_compare); 77 | { 78 | ASSERT(search_pm_region_callback(pmr->addr_low)); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /vmm/mmu_tlb.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | 5 | #ifndef _MMU_TLB_H 6 | #define _MMU_TLB_H 7 | #include 8 | #include 9 | 10 | #define PAGE_SHIFT_4K 12 11 | #define PAGE_SHIFT_4M 22 12 | 13 | #define PAGE_MASK_4K 0xfffff000 14 | #define PAGE_MASK_4M 0xffc00000 15 | 16 | struct sv32_pte { 17 | uint32_t valid:1; 18 | uint32_t read:1; 19 | uint32_t write:1; 20 | uint32_t execute:1; 21 | uint32_t user_access:1; 22 | uint32_t global:1; 23 | uint32_t accessed:1; 24 | uint32_t dirty:1; 25 | uint32_t rsw:2; 26 | uint32_t ppn0:10; 27 | uint32_t ppn1:10; 28 | uint32_t ppn2:2; // we don't physical address space beyond 4GB, so ignore these two bits 29 | }__attribute__((packed)); 30 | // WE DESIGN TLB AS ONE WAY SET ASSOCIATIVE ADDRESS TRANSLATION CACHE 31 | // FIXME: WE DON'T SUPPORT ASID, WE MAY SUPPORT IT ONEDAY 32 | 33 | struct tlb_entry { 34 | // PAGE_SHIFT == 12(4K page) or 22() 35 | uint32_t va_tag; // va << PAGE_SHIFT 36 | uint32_t pa_tag; // pa << PAGE_SHIFT 37 | uint32_t page_mask; 38 | uint32_t entry_valid:1; 39 | struct sv32_pte * level1_pte; 40 | struct sv32_pte * level2_pte; 41 | struct pm_region_operation * pmr; 42 | }; 43 | 44 | 45 | void 46 | invalidate_tlb(struct tlb_entry * tlb_base, int tlb_cap); 47 | 48 | 49 | #define VA_TO_4K_TLB_ENTRY(tlb_base, tlb_cap, va) ({ \ 50 | uint32_t __offset = ((uint32_t)(va)) >> PAGE_SHIFT_4K; \ 51 | __offset &= ((tlb_cap) - 1); \ 52 | ((struct tlb_entry *)tlb_base) + __offset; \ 53 | }) 54 | 55 | 56 | #define VA_TO_4M_TLB_ENTRY(tlb_base, tlb_cap, va) ({ \ 57 | uint32_t __offset = ((uint32_t)(va)) >> PAGE_SHIFT_4M; \ 58 | __offset &= ((tlb_cap) - 1); \ 59 | ((struct tlb_entry *)tlb_base) + __offset; \ 60 | }) 61 | 62 | 63 | #define VA_TO_TLB_ENTRY(tlb_base, tlb_cap, va) ({ \ 64 | struct tlb_entry * __entry = VA_TO_4K_TLB_ENTRY(tlb_base, tlb_cap, va); \ 65 | if (!__entry->entry_valid) { \ 66 | __entry = VA_TO_4M_TLB_ENTRY(tlb_base, tlb_cap, va); \ 67 | } \ 68 | __entry->entry_valid ? __entry : NULL; \ 69 | }) 70 | 71 | 72 | #endif 73 | -------------------------------------------------------------------------------- /vmm/hart_util.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | * 4 | * This file defines the wrapper to better access hart CSRs and other hart 5 | * local resource 6 | */ 7 | 8 | #ifndef _HART_UTIL_H 9 | #define _HART_UTIL_H 10 | #include 11 | #include 12 | #include 13 | 14 | static inline uint32_t 15 | get_hart_mepc(struct hart * hartptr) 16 | { 17 | struct csr_entry * csr = 18 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_MEPC]; 19 | return csr->csr_blob; 20 | } 21 | 22 | static inline void 23 | adjust_pc_upon_mret(struct hart * hartptr) 24 | { 25 | uint32_t mepc = get_hart_mepc(hartptr); 26 | hartptr->pc = mepc; 27 | log_debug("machine mode returns to:0x%x\n", mepc); 28 | } 29 | 30 | static inline void 31 | adjust_pc_upon_sret(struct hart * hartptr) 32 | { 33 | struct csr_entry * csr = 34 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_SEPC]; 35 | uint32_t sepc = csr->csr_blob; 36 | hartptr->pc = sepc; 37 | log_debug("supervisor mode returns to:0x%x\n", sepc); 38 | } 39 | 40 | static inline void 41 | adjust_mstatus_upon_mret(struct hart * hartptr) 42 | { 43 | uint8_t mpp = hartptr->status.mpp; 44 | ASSERT(mpp == PRIVILEGE_LEVEL_USER || 45 | mpp == PRIVILEGE_LEVEL_SUPERVISOR || 46 | mpp == PRIVILEGE_LEVEL_MACHINE); 47 | hartptr->privilege_level = mpp; 48 | hartptr->status.mpp = PRIVILEGE_LEVEL_USER; 49 | hartptr->status.mie = hartptr->status.mpie; 50 | hartptr->status.mpie = 1; 51 | #if 0 52 | struct csr_entry * csr = 53 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_MSTATUS]; 54 | uint8_t mpp = (csr->csr_blob >> 11) & 0x3; 55 | ASSERT(mpp == PRIVILEGE_LEVEL_USER || 56 | mpp == PRIVILEGE_LEVEL_SUPERVISOR || 57 | mpp == PRIVILEGE_LEVEL_MACHINE); 58 | csr->csr_blob &= ~(3 << 11); 59 | csr->csr_blob |= PRIVILEGE_LEVEL_USER << 11; 60 | uint8_t mpie = (csr->csr_blob >> 7) & 0x1; 61 | csr->csr_blob |= 1 << 7; 62 | csr->csr_blob &= ~(1 << 3); 63 | csr->csr_blob |= mpie << 3; 64 | #endif 65 | } 66 | 67 | static inline void 68 | adjust_mstatus_upon_sret(struct hart * hartptr) 69 | { 70 | uint8_t spp = hartptr->status.spp; 71 | ASSERT(spp == PRIVILEGE_LEVEL_USER || 72 | spp == PRIVILEGE_LEVEL_SUPERVISOR); 73 | hartptr->privilege_level = spp; 74 | hartptr->status.spp = PRIVILEGE_LEVEL_USER; 75 | hartptr->status.sie = hartptr->status.spie; 76 | hartptr->status.spie = 1; 77 | } 78 | 79 | static inline void 80 | assert_hart_running_in_mmode(struct hart * hartptr) 81 | { 82 | if (hartptr->privilege_level != PRIVILEGE_LEVEL_MACHINE) { 83 | raise_exception(hartptr, EXCEPTION_ILLEEGAL_INSTRUCTION); 84 | } 85 | } 86 | 87 | static inline void 88 | assert_hart_running_in_smode(struct hart * hartptr) 89 | { 90 | if (hartptr->privilege_level != PRIVILEGE_LEVEL_SUPERVISOR) { 91 | raise_exception(hartptr, EXCEPTION_ILLEEGAL_INSTRUCTION); 92 | } 93 | } 94 | 95 | #endif 96 | -------------------------------------------------------------------------------- /vmm/log.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017 rxi 3 | * Copyright (c) 2019 Jie Zheng 4 | * 5 | * Permission is hereby granted, free of charge, to any person obtaining a copy 6 | * of this software and associated documentation files (the "Software"), to 7 | * deal in the Software without restriction, including without limitation the 8 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | * sell copies of the Software, and to permit persons to whom the Software is 10 | * furnished to do so, subject to the following conditions: 11 | * 12 | * The above copyright notice and this permission notice shall be included in 13 | * all copies or substantial portions of the Software. 14 | * 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 | * IN THE SOFTWARE. 22 | */ 23 | 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | #include "log.h" 31 | 32 | static struct { 33 | void *udata; 34 | log_LockFn lock; 35 | FILE *fp; 36 | int level; 37 | int quiet; 38 | } L; 39 | 40 | 41 | __attribute__((unused)) 42 | static const char *level_names[] = { 43 | "TRACE", "DEBUG", "INFO", "WARN", "ERROR", "FATAL", "UART16550" 44 | }; 45 | 46 | #ifdef LOG_USE_COLOR 47 | static const char *level_colors[] = { 48 | "\x1b[94m", "\x1b[36m", "\x1b[32m", 49 | "\x1b[33m", "\x1b[31m", "\x1b[35m", 50 | "\x1b[37m" 51 | }; 52 | #endif 53 | 54 | void log_set_udata(void *udata) { 55 | L.udata = udata; 56 | } 57 | 58 | 59 | void log_set_lock(log_LockFn fn) { 60 | L.lock = fn; 61 | } 62 | 63 | 64 | void log_set_fp(FILE *fp) { 65 | L.fp = fp; 66 | } 67 | 68 | 69 | void log_set_level(int level) { 70 | L.level = level; 71 | } 72 | 73 | 74 | void log_set_quiet(int enable) { 75 | L.quiet = enable ? 1 : 0; 76 | } 77 | 78 | 79 | void log_log(int level, const char *file, int line, const char *fmt, ...) { 80 | if (level < L.level) { 81 | return; 82 | } 83 | /* Log to stdout */ 84 | if (!L.quiet) { 85 | va_list args; 86 | #ifdef LOG_USE_COLOR 87 | if (level == LOG_UART) { 88 | fprintf(stdout, "%s[%s]\x1b[0m ", 89 | level_colors[level], level_names[level]); 90 | } else { 91 | fprintf(stdout, "%s%-5s\x1b[0m \x1b[90m%s:%d:\x1b[0m ", 92 | level_colors[level], level_names[level], file, line); 93 | } 94 | #else 95 | if (level == LOG_UART) { 96 | fprintf(stdout, "[%s] ", level_names[level]); 97 | } else { 98 | fprintf(stdout, "%-5s %s:%d: ", level_names[level], file, line); 99 | } 100 | #endif 101 | va_start(args, fmt); 102 | vfprintf(stdout, fmt, args); 103 | va_end(args); 104 | fflush(stdout); 105 | } 106 | 107 | } 108 | 109 | __attribute__((constructor)) static void 110 | logging_init(void) 111 | { 112 | log_set_level(LOGGING_LEVEL); 113 | } 114 | -------------------------------------------------------------------------------- /vmm/emulate_rom.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | static uint64_t 10 | bootrom_read(uint64_t addr, int access_size, struct hart * hartptr, 11 | struct pm_region_operation * pmr) 12 | { 13 | uint64_t val = 0; 14 | struct virtual_machine * vm = hartptr->vmptr; 15 | void * memory_access_base = vm->bootrom_host_base + (addr - pmr->addr_low); 16 | switch (access_size) 17 | { 18 | #define _(size, type) \ 19 | case size: \ 20 | val = *(type *)memory_access_base; \ 21 | break 22 | _(1, uint8_t); 23 | _(2, uint16_t); 24 | _(4, uint32_t); 25 | _(8, uint64_t); 26 | default: 27 | __not_reach(); 28 | break; 29 | #undef _ 30 | } 31 | return val; 32 | } 33 | 34 | 35 | static void 36 | bootrom_write(uint64_t addr, int access_size, uint64_t value, 37 | struct hart * hartptr, 38 | struct pm_region_operation * pmr) 39 | { 40 | struct virtual_machine * vm = hartptr->vmptr; 41 | void * memory_access_base = vm->bootrom_host_base + (addr - pmr->addr_low); 42 | switch (access_size) 43 | { 44 | #define _(size, type) \ 45 | case size: \ 46 | *(type *)memory_access_base = (type)value; \ 47 | break 48 | _(1, uint8_t); 49 | _(2, uint16_t); 50 | _(4, uint32_t); 51 | _(8, uint64_t); 52 | default: 53 | __not_reach(); 54 | break; 55 | #undef _ 56 | } 57 | } 58 | 59 | void 60 | bootrom_init(struct virtual_machine * vm) 61 | { 62 | // Preallocate the memory area of BOOTROM 63 | const char * rom_start_string = ini_get(vm->ini_config, "rom", "rom_start"); 64 | const char * rom_size_string = ini_get(vm->ini_config, "rom", "rom_size"); 65 | ASSERT(rom_start_string); 66 | ASSERT(rom_size_string); 67 | vm->bootrom_base = strtol(rom_start_string, NULL, 16); 68 | vm->bootrom_size = strtol(rom_size_string, NULL, 16); 69 | vm->bootrom_host_base = preallocate_physical_memory(vm->bootrom_size); 70 | ASSERT(vm->bootrom_host_base); 71 | 72 | // register physical memory region 73 | struct pm_region_operation bootrom_pmr = { 74 | .addr_low = vm->bootrom_base, 75 | .addr_high = vm->bootrom_base + vm->bootrom_size, 76 | .pmr_read = bootrom_read, 77 | .pmr_write = bootrom_write, 78 | .pmr_desc = "bootrom" 79 | }; 80 | register_pm_region_operation(&bootrom_pmr); 81 | 82 | // load the rom image to 0x4000. 83 | const char * rom_image = ini_get(vm->ini_config, "rom", "rom_image"); 84 | const char * pc_on_reset_string = ini_get(vm->ini_config, "cpu", "pc_on_reset"); 85 | ASSERT(rom_image); 86 | ASSERT(pc_on_reset_string); 87 | uint32_t pc_on_reset = strtol(pc_on_reset_string, NULL, 16); 88 | ASSERT(!preload_binary_image(vm->bootrom_host_base + pc_on_reset - vm->bootrom_base, 89 | vm->bootrom_size - pc_on_reset + vm->bootrom_base, 90 | rom_image)); 91 | } 92 | -------------------------------------------------------------------------------- /vmm/vm.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | static void 16 | device_init(struct virtual_machine * vm) 17 | { 18 | clint_init(vm); 19 | uart_init(); 20 | } 21 | 22 | static void 23 | hart_tlb_init(struct hart * hartptr) 24 | { 25 | hartptr->itlb = aligned_alloc(64, sizeof(struct tlb_entry) * hartptr->itlb_cap); 26 | hartptr->dtlb = aligned_alloc(64, sizeof(struct tlb_entry) * hartptr->dtlb_cap); 27 | ASSERT(hartptr->itlb && hartptr->dtlb); 28 | invalidate_tlb(hartptr->itlb, hartptr->itlb_cap); 29 | invalidate_tlb(hartptr->dtlb, hartptr->dtlb_cap); 30 | } 31 | 32 | static void 33 | cpu_init(struct virtual_machine * vm) 34 | { 35 | const char * nr_cpus_string = ini_get(vm->ini_config, "cpu", "nr_cpus"); 36 | const char * boot_cpu_string = ini_get(vm->ini_config, "cpu", "boot_cpu"); 37 | ASSERT(nr_cpus_string); 38 | vm->nr_harts = strtol(nr_cpus_string, NULL, 10); 39 | vm->boot_hart = 0; 40 | if (boot_cpu_string) { 41 | vm->boot_hart = strtol(boot_cpu_string, NULL, 10); 42 | } 43 | vm->harts = aligned_alloc(64, vm->nr_harts * sizeof(struct hart)); 44 | ASSERT(vm->harts); 45 | 46 | const char * pc_on_reset_string = ini_get(vm->ini_config, "cpu", "pc_on_reset"); 47 | ASSERT(pc_on_reset_string); 48 | uint32_t pc_on_reset = strtol(pc_on_reset_string, NULL, 16); 49 | 50 | int idx = 0; 51 | for (; idx < vm->nr_harts; idx++) { 52 | struct hart * hartptr = hart_by_id(vm, idx); 53 | hart_init(hartptr, idx); 54 | hartptr->pc = pc_on_reset; 55 | hartptr->vmptr = vm; 56 | } 57 | 58 | const char * itlb_size_string = ini_get(vm->ini_config, "cpu", "itlb_size"); 59 | const char * dtlb_size_string = ini_get(vm->ini_config, "cpu", "dtlb_size"); 60 | uint32_t itlb_size = strtol(itlb_size_string, NULL, 16); 61 | uint32_t dtlb_size = strtol(dtlb_size_string, NULL, 16); 62 | log_debug("itlb size:%d dtlb size:%d\n", itlb_size, dtlb_size); 63 | for (idx = 0; idx < vm->nr_harts; idx++) { 64 | struct hart * hartptr = hart_by_id(vm, idx); 65 | hartptr->itlb_cap = itlb_size; 66 | hartptr->dtlb_cap = dtlb_size; 67 | hart_tlb_init(hartptr); 68 | } 69 | } 70 | 71 | static void 72 | misc_init(struct virtual_machine * vm) 73 | { 74 | // Load breakpoints if there is any. 75 | char * breakpoints = (char *)ini_get(vm->ini_config, "debug", "breakpoints"); 76 | char delimiter[] = " "; 77 | if (breakpoints) { 78 | char * bp = strtok(breakpoints, delimiter); 79 | while (bp) { 80 | add_breakpoint(strtol(bp, NULL, 16)); 81 | bp = strtok(NULL, delimiter); 82 | } 83 | } 84 | } 85 | 86 | void 87 | virtual_machine_init(struct virtual_machine * vm, ini_t * ini) 88 | { 89 | memset(vm, 0x0, sizeof(struct virtual_machine)); 90 | // nothing can precede ini configuration 91 | vm->ini_config = ini; 92 | 93 | bootrom_init(vm); 94 | ram_init(vm); 95 | cpu_init(vm); 96 | device_init(vm); 97 | misc_init(vm); 98 | fdt_init(&vm->fdt); 99 | 100 | dump_memory_regions(); 101 | } 102 | 103 | -------------------------------------------------------------------------------- /vmm/mmu.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | uint8_t 10 | mmu_read8(struct hart * hartptr, uint32_t location) 11 | { 12 | return vmread8(hartptr, location); 13 | } 14 | 15 | uint16_t 16 | mmu_read16(struct hart * hartptr, uint32_t location) 17 | { 18 | return vmread16(hartptr, location); 19 | } 20 | 21 | 22 | uint32_t 23 | mmu_read32(struct hart * hartptr, uint32_t location) 24 | { 25 | return vmread32(hartptr, location); 26 | } 27 | 28 | uint32_t 29 | mmu_read32_aligned(struct hart * hartptr, uint32_t location) 30 | { 31 | if (location & 0x3) { 32 | raise_exception_with_tvalue(hartptr, EXCEPTION_LOAD_ADDRESS_MISALIGN, 33 | location); 34 | } 35 | return vmread32(hartptr, location); 36 | } 37 | 38 | 39 | 40 | void 41 | mmu_write8(struct hart * hartptr, uint32_t location, uint8_t value) 42 | { 43 | vmwrite8(hartptr, location, value); 44 | } 45 | 46 | void 47 | mmu_write16(struct hart * hartptr, uint32_t location, uint16_t value) 48 | { 49 | vmwrite16(hartptr, location, value); 50 | } 51 | 52 | 53 | void 54 | mmu_write32(struct hart * hartptr, uint32_t location, uint32_t value) 55 | { 56 | vmwrite32(hartptr, location, value); 57 | } 58 | 59 | 60 | void 61 | mmu_write32_aligned(struct hart * hartptr, uint32_t location, uint32_t value) 62 | { 63 | if (location & 0x3) { 64 | raise_exception_with_tvalue(hartptr, EXCEPTION_LOAD_ADDRESS_MISALIGN, 65 | location); 66 | } 67 | vmwrite32(hartptr, location, value); 68 | } 69 | 70 | /* 71 | * CAVEATS: 72 | * https://github.com/riscv/riscv-isa-manual/issues/486 73 | * c0000134: 18061073 csrw satp,a2 74 | * c0000138: 12000073 sfence.vma 75 | * c000013c: 00008067 ret 76 | * 77 | */ 78 | uint32_t 79 | mmu_instruction_read32(struct hart * hartptr, uint32_t instruction_va) 80 | { 81 | struct csr_entry * csr = &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_SATP]; 82 | // MAKE SURE PAGING IS ENABLED IN CURRENT MODE 83 | if (hartptr->privilege_level < PRIVILEGE_LEVEL_MACHINE && 84 | csr->csr_blob & 0x80000000) { 85 | struct tlb_entry * entry = VA_TO_TLB_ENTRY(hartptr->itlb, 86 | hartptr->itlb_cap, 87 | instruction_va); 88 | if (!entry) { 89 | // HIT ITLB MISS, PERFORME A PAGE TABLE WALK AND LOAD THE TLB CACHE 90 | walk_page_table(hartptr, instruction_va, hartptr->itlb, 91 | hartptr->itlb_cap); 92 | entry = VA_TO_TLB_ENTRY(hartptr->itlb, hartptr->itlb_cap, 93 | instruction_va); 94 | } 95 | if (!entry) { 96 | raise_exception_with_tvalue(hartptr, 97 | EXCEPTION_INSTRUCTION_PAGE_FAULT, 98 | instruction_va); 99 | __not_reach(); 100 | } 101 | return entry->pmr->pmr_read(entry->pa_tag | ((instruction_va & ~(entry->page_mask))), 102 | 4, hartptr, entry->pmr); 103 | } else { 104 | return direct_read32(hartptr, instruction_va); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /vmm/util.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #ifndef _UTIL_H 5 | #define _UTIL_H 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #define OFFSET_OF(structure, field) ((int64_t)(&(((structure *)0)->field))) 14 | #define CONTAINER_OF(ptr, structure, field) \ 15 | (structure *)(((uint64_t)(ptr)) - OFFSET_OF(structure, field)) 16 | 17 | #define MAX(a, b) (((a) > (b)) ? (a) : (b)) 18 | #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 19 | 20 | #define LITTLE_ENDIAN32(v) ((((v) & 0xff) << 24) | \ 21 | (((v) & 0xff00) << 8) | \ 22 | (((v) & 0xff0000) >> 8) | \ 23 | (((v) & 0xff000000) >> 24)) 24 | 25 | #define BIG_ENDIAN32(v) LITTLE_ENDIAN32(v) 26 | 27 | 28 | #define ALIGNADDR(addr, align) \ 29 | ((void *)((((uint64_t)(addr)) & (align - 1)) ? ((((uint64_t)(addr)) & ~(align - 1)) + align) : (uint64_t)(addr))) 30 | 31 | #define ALIGNINT(var, align, size) \ 32 | ((size)((((size)(var)) & (align - 1)) ? ((((size)(var)) & ~(align - 1)) + align) : (size)(var))) 33 | 34 | #define ALIGN32(var, align) ALIGNINT(var, align, uint32_t) 35 | 36 | static inline int32_t 37 | sign_extend32(uint32_t data, int sign_bit) 38 | { 39 | int32_t ret = data; 40 | int bit_position = 0; 41 | int32_t sign = (data >> sign_bit) & 1; 42 | for (bit_position = sign_bit; bit_position <= 31; bit_position++) { 43 | ret |= (sign << bit_position); 44 | } 45 | return ret; 46 | } 47 | #define ASSERT(exp) \ 48 | if (!(exp)) { \ 49 | log_fatal("assertion:%s fails\n", #exp); \ 50 | exit(-1); \ 51 | } 52 | 53 | #define __NOT_REACH 0 54 | #define __not_reach() \ 55 | ASSERT(__NOT_REACH) 56 | 57 | #define __not_used __attribute__((unused)) 58 | #define BREAKPOINT() \ 59 | __asm__ volatile(".byte 0xcc") 60 | 61 | #if defined(COLORED_OUTPUT) 62 | #define ANSI_COLOR_RED "\x1b[31m" 63 | #define ANSI_COLOR_GREEN "\x1b[32m" 64 | #define ANSI_COLOR_YELLOW "\x1b[33m" 65 | #define ANSI_COLOR_BLUE "\x1b[34m" 66 | #define ANSI_COLOR_MAGENTA "\x1b[35m" 67 | #define ANSI_COLOR_CYAN "\x1b[36m" 68 | #define ANSI_COLOR_RESET "\x1b[0m" 69 | 70 | #else 71 | #define ANSI_COLOR_RED "" 72 | #define ANSI_COLOR_GREEN "" 73 | #define ANSI_COLOR_YELLOW "" 74 | #define ANSI_COLOR_BLUE "" 75 | #define ANSI_COLOR_MAGENTA "" 76 | #define ANSI_COLOR_CYAN "" 77 | #define ANSI_COLOR_RESET " " 78 | #endif 79 | 80 | int 81 | preload_binary_image(void * addr, int64_t length, const char * image_path); 82 | 83 | #define PANIC(hart) { \ 84 | printf(ANSI_COLOR_RED); \ 85 | dump_hart(hart); \ 86 | printf(ANSI_COLOR_RESET); \ 87 | __not_reach(); \ 88 | } 89 | 90 | #endif 91 | -------------------------------------------------------------------------------- /vmm/translate_fence.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019-2020 Jie Zheng 3 | */ 4 | 5 | #include 6 | #include 7 | 8 | // FENCE.I instruction order instruction cache and data cache, any guest JIT 9 | // system should issue a FENCE.I instruction at its end of self-modifying code 10 | // LIKE JAVA. 11 | static void 12 | riscv_fence_i_translator(struct decoding * dec, struct prefetch_blob * blob, 13 | uint32_t instruction) 14 | { 15 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 16 | struct hart * hartptr = (struct hart *)blob->opaque; 17 | PRECHECK_TRANSLATION_CACHE(fence_i_instruction, blob); 18 | BEGIN_TRANSLATION(fence_i_instruction); 19 | __asm__ volatile("movq %%r12, %%rdi;" 20 | "movq $flush_translation_cache, %%rax;" 21 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 22 | "call *%%rax;" 23 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 24 | PROCEED_TO_NEXT_INSTRUCTION() 25 | TRAP_TO_VMM(fence_i_instruction) 26 | : 27 | : 28 | :"memory"); 29 | BEGIN_PARAM_SCHEMA() 30 | PARAM32() 31 | END_PARAM_SCHEMA() 32 | END_TRANSLATION(fence_i_instruction); 33 | BEGIN_PARAM(fence_i_instruction) 34 | instruction_linear_address 35 | END_PARAM() 36 | COMMIT_TRANSLATION(fence_i_instruction, hartptr, instruction_linear_address); 37 | // Stop translation because after the fence.i instruction, the 38 | // translation cache will be flushed 39 | blob->is_to_stop = 1; 40 | } 41 | 42 | 43 | // FENCE instruction is to order memeroy Read/Write and Device Input/Ouput 44 | // in our hart implementation(emulation), all instructions are exactly in order. 45 | static void 46 | riscv_fence_translator(struct decoding * dec, struct prefetch_blob * blob, 47 | uint32_t instruction) 48 | { 49 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 50 | struct hart * hartptr = (struct hart *)blob->opaque; 51 | PRECHECK_TRANSLATION_CACHE(fence_instruction, blob); 52 | BEGIN_TRANSLATION(fence_instruction); 53 | __asm__ volatile(PROCEED_TO_NEXT_INSTRUCTION() 54 | END_INSTRUCTION(fence_instruction) 55 | : 56 | : 57 | :"memory"); 58 | BEGIN_PARAM_SCHEMA() 59 | PARAM32() 60 | END_PARAM_SCHEMA() 61 | END_TRANSLATION(fence_instruction); 62 | BEGIN_PARAM(fence_instruction) 63 | instruction_linear_address 64 | END_PARAM() 65 | COMMIT_TRANSLATION(fence_instruction, hartptr, instruction_linear_address); 66 | blob->next_instruction_to_fetch += 4; 67 | } 68 | 69 | static instruction_sub_translator per_funct3_handlers[8]; 70 | 71 | 72 | void 73 | riscv_fence_instructions_translation_entry(struct prefetch_blob * blob, 74 | uint32_t instruction) 75 | { 76 | struct decoding dec; 77 | instruction_decoding_per_type(&dec, instruction, ENCODING_TYPE_S); 78 | if (!per_funct3_handlers[dec.funct3]) { 79 | log_fatal("no handler for fence_instructions:%d @0x%x\n", 80 | dec.funct3, blob->next_instruction_to_fetch); 81 | } 82 | ASSERT(per_funct3_handlers[dec.funct3]); 83 | per_funct3_handlers[dec.funct3](&dec, blob, instruction); 84 | } 85 | 86 | 87 | __attribute__((constructor)) static void 88 | fence_constructor(void) 89 | { 90 | memset(per_funct3_handlers, 0x0, sizeof(per_funct3_handlers)); 91 | per_funct3_handlers[0x0] = riscv_fence_translator; 92 | per_funct3_handlers[0x1] = riscv_fence_i_translator; 93 | } 94 | 95 | -------------------------------------------------------------------------------- /bootrom/unittest.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | 5 | #ifndef _UNITTEST_H 6 | #define _UNITTEST_H 7 | #include 8 | #include 9 | 10 | #define TEST_R_R_R(intr, rd, rs1, rs2) \ 11 | { \ 12 | uint32_t __output_operand = 0; \ 13 | __asm__ volatile(#intr " %[RD], %[RS1], %[RS2];" \ 14 | :[RD]"=r"(__output_operand) \ 15 | :[RS1]"r"(rs1), [RS2]"r"(rs2) \ 16 | :"memory"); \ 17 | if (__output_operand == rd) { \ 18 | printk(ANSI_COLOR_GREEN"PASS"ANSI_COLOR_RESET" instruction [" #intr " "\ 19 | #rd ", " #rs1 ", " #rs2 "]\n"); \ 20 | } else { \ 21 | printk(ANSI_COLOR_RED"FAIL"ANSI_COLOR_RESET" instruction [" #intr " " \ 22 | #rd ", " #rs1 ", " #rs2 "]\n"); \ 23 | } \ 24 | } 25 | 26 | 27 | #define TEST_R_R_I(intr, rd, rs1, imm) \ 28 | { \ 29 | uint32_t __output_operand = 0; \ 30 | __asm__ volatile(#intr " %[RD], %[RS1], " #imm ";" \ 31 | :[RD]"=r"(__output_operand) \ 32 | :[RS1]"r"(rs1) \ 33 | :"memory"); \ 34 | if (__output_operand == rd) { \ 35 | printk(ANSI_COLOR_GREEN"PASS"ANSI_COLOR_RESET" instruction [" #intr " "\ 36 | #rd ", " #rs1 ", " #imm "]\n"); \ 37 | } else { \ 38 | printk(ANSI_COLOR_RED"FAIL"ANSI_COLOR_RESET" instruction [" #intr " " \ 39 | #rd ", " #rs1 ", " #imm "]\n"); \ 40 | } \ 41 | } 42 | 43 | 44 | #define MEM_LOAD_COMMON(instr, addr, ret) \ 45 | __asm__ volatile(#instr " %[RET], (%[ADDR]);" \ 46 | :[RET]"=r"(ret) \ 47 | :[ADDR]"r"(addr) \ 48 | :"memory") 49 | 50 | #define LB(addr, ret) MEM_LOAD_COMMON(lb, addr, ret) 51 | #define LBU(addr, ret) MEM_LOAD_COMMON(lbu, addr, ret) 52 | #define LH(addr, ret) MEM_LOAD_COMMON(lh, addr, ret) 53 | #define LHU(addr, ret) MEM_LOAD_COMMON(lhu, addr, ret) 54 | #define LW(addr, ret) MEM_LOAD_COMMON(lw, addr, ret) 55 | 56 | #define MEM_STORE_COMMON(instr, addr, value) \ 57 | __asm__ volatile(#instr " %[VALUE], (%[ADDR]);" \ 58 | : \ 59 | :[ADDR]"r"(addr), [VALUE]"r"(value) \ 60 | :"memory") 61 | 62 | #define SB(addr, value) MEM_STORE_COMMON(sb, addr, value) 63 | #define SH(addr, value) MEM_STORE_COMMON(sh, addr, value) 64 | #define SW(addr, value) MEM_STORE_COMMON(sw, addr, value) 65 | 66 | void 67 | unit_test(void); 68 | #endif 69 | -------------------------------------------------------------------------------- /vmm/emulate_uart.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | 8 | #define UART_16550_BASE 0x10000000 9 | 10 | 11 | #define UART_REG_QUEUE 0 // rx/tx fifo data 12 | #define UART_REG_DLL 0 // divisor latch (LSB) 13 | #define UART_REG_IER 1 // interrupt enable register 14 | #define UART_REG_DLM 1 // divisor latch (MSB) 15 | #define UART_REG_FCR 2 // fifo control register 16 | #define UART_REG_LCR 3 // line control register 17 | #define UART_REG_MCR 4 // modem control register 18 | #define UART_REG_LSR 5 // line status register 19 | #define UART_REG_MSR 6 // modem status register 20 | #define UART_REG_SCR 7 // scratch register 21 | #define UART_REG_STATUS_RX 0x01 22 | #define UART_REG_STATUS_TX 0x20 23 | 24 | #define UART_LSR_TEMT 0x40 25 | #define UART_LSR_THRE 0x20 26 | #define UART_BUFFER_SIZE 1024 27 | 28 | static uint8_t uart_buffer[UART_BUFFER_SIZE + 1]; 29 | static int32_t uart_buffer_ptr = 0; 30 | 31 | static uint64_t 32 | uart_mmio_read(uint64_t addr, int access_size, 33 | struct hart * hartptr, 34 | struct pm_region_operation * pmr) 35 | { 36 | uint8_t ret = 0; 37 | ASSERT(access_size == 1); 38 | switch (addr - UART_16550_BASE) 39 | { 40 | case UART_REG_LSR: 41 | // always ready to transmit 42 | ret = UART_LSR_TEMT | UART_LSR_THRE; 43 | break; 44 | default: 45 | //__not_reach(); 46 | break; 47 | } 48 | return ret; 49 | } 50 | 51 | static void 52 | uart_mmio_write(uint64_t addr, int access_size, uint64_t value, 53 | struct hart * hartptr, struct pm_region_operation * pmr) 54 | { 55 | if (access_size != 1) { 56 | return; 57 | } 58 | ASSERT(access_size == 1); 59 | switch (addr - UART_16550_BASE) 60 | { 61 | case UART_REG_FCR: 62 | case UART_REG_LCR: 63 | uart_buffer_ptr = 0; 64 | break; 65 | case UART_REG_QUEUE: 66 | uart_buffer[uart_buffer_ptr++] = (uint8_t)value; 67 | if (uart_buffer_ptr == UART_BUFFER_SIZE || '\n' == (uint8_t)value) { 68 | uart_buffer[uart_buffer_ptr] = '\x0'; 69 | log_uart("%s", uart_buffer); 70 | uart_buffer_ptr = 0x0; 71 | } 72 | break; 73 | default: 74 | //__not_reach(); 75 | break; 76 | } 77 | } 78 | 79 | void 80 | build_uart_fdt_node(struct fdt_build_blob * blob) 81 | { 82 | char node_name[64]; 83 | sprintf(node_name, "uart@%x", UART_16550_BASE); 84 | fdt_begin_node(blob, node_name); 85 | uint32_t clock_frequency = BIG_ENDIAN32(0x00384000); 86 | uint32_t interrupts = BIG_ENDIAN32(0x0000000a); 87 | uint32_t interrupts_parent = BIG_ENDIAN32(0x0000000d); 88 | fdt_prop(blob, "interrupts", 4, &interrupts); 89 | fdt_prop(blob, "interrupt-parent", 4, &interrupts_parent); 90 | fdt_prop(blob, "clock-frequency", 4, &clock_frequency); 91 | fdt_prop(blob, "compatible", strlen("ns16550a") + 1, "ns16550a"); 92 | uint32_t regs[4] = {BIG_ENDIAN32(0), BIG_ENDIAN32(UART_16550_BASE), 93 | BIG_ENDIAN32(0), BIG_ENDIAN32(0x100)}; 94 | fdt_prop(blob, "reg", 16, regs); 95 | fdt_end_node(blob); 96 | } 97 | 98 | void 99 | uart_init(void) 100 | { 101 | struct pm_region_operation uart_mmio_region = { 102 | .addr_low = UART_16550_BASE, 103 | .addr_high = UART_16550_BASE + 0x100, 104 | .pmr_read = uart_mmio_read, 105 | .pmr_write = uart_mmio_write, 106 | .pmr_desc = "uart.mmio" 107 | }; 108 | register_pm_region_operation(&uart_mmio_region); 109 | } 110 | -------------------------------------------------------------------------------- /vmm/mmu_pagewalker.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #define MAX_PTES 1024 11 | 12 | int 13 | pa_to_va(struct hart * hartptr, uint32_t pa, struct tlb_entry * tlb, 14 | int tlb_cap, uint32_t * va) 15 | { 16 | int idx = 0; 17 | struct csr_entry * csr = &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_SATP]; 18 | uint32_t satp = csr->csr_blob; 19 | ASSERT(satp & 0x80000000); 20 | uint32_t level1_base = satp << 12; 21 | struct pm_region_operation * level1_pmr; 22 | ASSERT((level1_pmr = search_pm_region_callback(level1_base))); 23 | struct sv32_pte * level1_pte = level1_pmr->pmr_direct(level1_base, hartptr, level1_pmr); 24 | for (idx = 0; idx < MAX_PTES; idx++) { 25 | struct sv32_pte * pte = level1_pte + idx; 26 | if (!pte->valid) { 27 | continue; 28 | } 29 | if (pte->read || pte->write || pte->execute) { 30 | // 4M paging matching 31 | uint32_t ppn_4m = ((*(uint32_t *)pte) >> 10) << 12; 32 | if ((ppn_4m & PAGE_MASK_4M) == (pa & PAGE_MASK_4M)) { 33 | *va = (pa & ~PAGE_MASK_4M) | (((uint32_t)idx) << 22); 34 | return 0; 35 | } 36 | } else { 37 | //dump_hart(hartptr); 38 | __not_reach(); 39 | } 40 | } 41 | return -1; 42 | } 43 | 44 | int 45 | walk_page_table(struct hart * hartptr, uint32_t va, struct tlb_entry * tlb, 46 | int tlb_cap) 47 | { 48 | struct csr_entry * csr = &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_SATP]; 49 | uint32_t satp = csr->csr_blob; 50 | ASSERT(satp & 0x80000000); 51 | uint32_t level1_base = satp << 12; 52 | uint32_t level1_entry_offset = ((va >> 22) & 0x3ff) * sizeof(struct sv32_pte); 53 | struct pm_region_operation * level1_pmr; 54 | ASSERT((level1_pmr = search_pm_region_callback(level1_base + level1_entry_offset))); 55 | struct sv32_pte * level1_pte = 56 | level1_pmr->pmr_direct(level1_base + level1_entry_offset, hartptr, level1_pmr); 57 | if (!level1_pte->valid) { 58 | return -1; 59 | } 60 | uint32_t level1_ppn = (*(const uint32_t *)level1_pte) >> 10; 61 | if (level1_pte->read || level1_pte->write || level1_pte->execute) { 62 | // this is a 4M hugepage 63 | struct tlb_entry * entry_4m = VA_TO_4M_TLB_ENTRY(tlb, tlb_cap, va); 64 | entry_4m->page_mask = PAGE_MASK_4M; 65 | entry_4m->va_tag = va & PAGE_MASK_4M; 66 | entry_4m->pa_tag = (level1_ppn << 12) & PAGE_MASK_4M; 67 | entry_4m->entry_valid = 1; 68 | entry_4m->level1_pte = level1_pte; 69 | entry_4m->level2_pte = NULL; 70 | entry_4m->pmr = search_pm_region_callback(entry_4m->pa_tag); 71 | ASSERT(entry_4m->pmr); 72 | } else { 73 | // This is a 4K page 74 | uint32_t level2_base = level1_ppn << 12; 75 | uint32_t level2_entry_offset = ((va >> 12) & 0x3ff) * sizeof(struct sv32_pte); 76 | struct pm_region_operation * level2_pmr; 77 | ASSERT((level2_pmr = search_pm_region_callback(level2_base + level2_entry_offset))); 78 | struct sv32_pte * level2_pte = 79 | level2_pmr->pmr_direct(level2_base + level2_entry_offset, hartptr, level2_pmr); 80 | if (!level2_pte->valid) { 81 | return -2; 82 | } 83 | uint32_t level2_ppn = (*(const uint32_t *)level2_pte) >> 10; 84 | 85 | struct tlb_entry * entry_4k = VA_TO_4K_TLB_ENTRY(tlb, tlb_cap, va); 86 | entry_4k->page_mask = PAGE_MASK_4K; 87 | entry_4k->va_tag = va & PAGE_MASK_4K; 88 | entry_4k->pa_tag = (level2_ppn << 12) & PAGE_MASK_4K; 89 | entry_4k->entry_valid = 1; 90 | entry_4k->level1_pte = level1_pte; 91 | entry_4k->level2_pte = level2_pte; 92 | entry_4k->pmr = search_pm_region_callback(entry_4k->pa_tag); 93 | ASSERT(entry_4k->pmr); 94 | } 95 | 96 | return 0; 97 | } 98 | 99 | 100 | 101 | -------------------------------------------------------------------------------- /vmm/translate_unconditional_jump_intrs.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | void 10 | riscv_jal_translator(struct prefetch_blob * blob, uint32_t instruction) 11 | { 12 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 13 | struct hart * hartptr = (struct hart *)blob->opaque; 14 | struct decoding dec; 15 | instruction_decoding_per_type(&dec, instruction, ENCODING_TYPE_UJ); 16 | int jump_target = instruction_linear_address + 17 | sign_extend32(dec.imm << 1, 20); 18 | 19 | { 20 | PRECHECK_TRANSLATION_CACHE(jal_instruction_without_target, blob); 21 | BEGIN_TRANSLATION(jal_instruction_without_target); 22 | __asm__ volatile("movl "PIC_PARAM(0)", %%edx;" 23 | "shl $2, %%edx;" 24 | "addq %%r15, %%rdx;" 25 | "movl "PIC_PARAM(1)", %%eax;" 26 | "movl %%eax, (%%rdx);" 27 | "movl "PIC_PARAM(2)", %%eax;" 28 | "movl %%eax, (%%r14);" 29 | RESET_ZERO_REGISTER() 30 | // FIXED: insert instructions to trap to VMM 31 | TRAP_TO_VMM(jal_instruction_without_target) 32 | : 33 | : 34 | :"memory", "%rax", "%rdx"); 35 | BEGIN_PARAM_SCHEMA() 36 | PARAM32() /*rd*/ 37 | PARAM32() /*pc + 4*/ 38 | PARAM32() /*unconditional jump_target of guest*/ 39 | END_PARAM_SCHEMA() 40 | END_TRANSLATION(jal_instruction_without_target); 41 | BEGIN_PARAM(jal_instruction_without_target) 42 | dec.rd_index, 43 | instruction_linear_address + 4, 44 | jump_target 45 | END_PARAM() 46 | COMMIT_TRANSLATION(jal_instruction_without_target, hartptr, 47 | instruction_linear_address); 48 | } 49 | 50 | blob->is_to_stop = 1; 51 | } 52 | 53 | void 54 | riscv_jalr_translator(struct prefetch_blob * blob, uint32_t instruction) 55 | { 56 | // for riscv jalr instruction, the jump target is calculated only at runtime 57 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 58 | struct hart * hartptr = (struct hart *)blob->opaque; 59 | struct decoding dec; 60 | instruction_decoding_per_type(&dec, instruction, ENCODING_TYPE_I); 61 | int32_t signed_offset = sign_extend32(dec.imm, 11); 62 | PRECHECK_TRANSLATION_CACHE(jalr_instruction, blob); 63 | BEGIN_TRANSLATION(jalr_instruction); 64 | __asm__ volatile("movl "PIC_PARAM(2)", %%edx;" 65 | "shl $2, %%edx;" 66 | "addq %%r15, %%rdx;" 67 | "movl (%%rdx), %%ebx;" 68 | "movl "PIC_PARAM(3)", %%eax;" 69 | "addl %%eax, %%ebx;" // <=== the jump target 70 | "btr $0x0, %%ebx;" 71 | "movl "PIC_PARAM(0)", %%edx;" 72 | "shl $2, %%edx;" 73 | "addq %%r15, %%rdx;" 74 | "movl "PIC_PARAM(1)", %%eax;" 75 | "movl %%eax, (%%rdx);" 76 | "movl %%ebx, (%%r14);" // Update the hart PC 77 | RESET_ZERO_REGISTER() 78 | TRAP_TO_VMM(jalr_instruction) 79 | : 80 | : 81 | :"memory", "%eax", "%ebx", "%ecx", "%edx"); 82 | BEGIN_PARAM_SCHEMA() 83 | PARAM32() /*rd index*/ 84 | PARAM32() /*pc + 4*/ 85 | PARAM32() /*rs1 index*/ 86 | PARAM32() /*imm: signed*/ 87 | END_PARAM_SCHEMA() 88 | END_TRANSLATION(jalr_instruction); 89 | BEGIN_PARAM(jalr_instruction) 90 | dec.rd_index, 91 | instruction_linear_address + 4, 92 | dec.rs1_index, 93 | signed_offset 94 | END_PARAM() 95 | COMMIT_TRANSLATION(jalr_instruction, hartptr, instruction_linear_address); 96 | blob->is_to_stop = 1; 97 | } 98 | -------------------------------------------------------------------------------- /bootrom/printk.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019-2020 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | 8 | #define UART_TX_BASE 0x10000000 9 | 10 | #define DEFAULT_RESOLVE_STACK 128 11 | 12 | static void 13 | uart_put_char(uint8_t value) 14 | { 15 | uart16550_putchar(value); 16 | } 17 | 18 | #define EBREAK() __asm__ volatile("ebreak;":::"memory") 19 | static void 20 | resolve_int32(int32_t qword) 21 | { 22 | uint8_t stack[DEFAULT_RESOLVE_STACK]; 23 | int iptr = 0; 24 | uint8_t mod = 0x0; 25 | if (qword < 0) { 26 | return; 27 | } 28 | while (qword && iptr < DEFAULT_RESOLVE_STACK) { 29 | mod = qword % 10; 30 | stack[iptr++] = '0' + mod; 31 | qword /= 10; 32 | } 33 | if (!iptr) { 34 | stack[iptr++] = '0'; 35 | } 36 | while (iptr > 0) { 37 | uart_put_char(stack[--iptr]); 38 | } 39 | } 40 | 41 | static void 42 | resolve_hex_uint32(uint32_t qword, uint8_t is_lowercase) 43 | { 44 | uint8_t stack[DEFAULT_RESOLVE_STACK]; 45 | uint8_t lower[] = "0123456789abcdef"; 46 | uint8_t upper[] = "0123456789ABCDEF"; 47 | int iptr = 0; 48 | int mod; 49 | while (qword && iptr < DEFAULT_RESOLVE_STACK) { 50 | mod = qword & 0xf; 51 | stack[iptr++] = is_lowercase ? lower[mod] : upper[mod]; 52 | qword = qword >> 4; 53 | } 54 | 55 | if (!iptr) { 56 | stack[iptr++] = '0'; 57 | } 58 | while (iptr > 0) { 59 | uart_put_char(stack[--iptr]); 60 | } 61 | } 62 | 63 | 64 | static void 65 | printk_mp_raw(const char * fmt, va_list arg_ptr) 66 | { 67 | const char * ptr = fmt; 68 | 69 | for (; *ptr; ptr++) { 70 | if (*ptr != '%') { 71 | uart_put_char(*ptr); 72 | } else { 73 | ptr++; 74 | switch (*ptr) 75 | { 76 | case 's': 77 | { 78 | char * string_ptr = va_arg(arg_ptr, char *); 79 | for (; *string_ptr; string_ptr++) { 80 | uart_put_char(*string_ptr); 81 | } 82 | } 83 | break; 84 | case 'c': 85 | { 86 | char char_arg = (char)va_arg(arg_ptr, uint32_t); 87 | uart_put_char(char_arg); 88 | } 89 | break; 90 | case 'd': 91 | { 92 | int32_t dword_arg = va_arg(arg_ptr, uint32_t); 93 | if (dword_arg < 0) { 94 | uart_put_char('-'); 95 | dword_arg = - dword_arg; 96 | } 97 | resolve_int32(dword_arg); 98 | } 99 | break; 100 | #if 0 101 | case 'q': // This is a new notation for quad word integer type 102 | { 103 | int64_t qword_arg = va_arg(arg_ptr, uint64_t); 104 | if (qword_arg < 0) { 105 | uart_put_char('-'); 106 | qword_arg = -qword_arg; 107 | } 108 | resolve_int32(qword_arg); 109 | } 110 | break; 111 | #endif 112 | case 'x': 113 | { 114 | uint32_t qword_arg = va_arg(arg_ptr, uint32_t); 115 | resolve_hex_uint32(qword_arg, 1); 116 | } 117 | break; 118 | case 'X': 119 | { 120 | uint32_t qword_arg = va_arg(arg_ptr, uint32_t); 121 | resolve_hex_uint32(qword_arg, 0); 122 | } 123 | break; 124 | default: 125 | break; 126 | } 127 | } 128 | } 129 | } 130 | 131 | 132 | void 133 | printk_no_prefix(const char *fmt, ...) 134 | { 135 | va_list args; 136 | va_start(args, fmt); 137 | printk_mp_raw(fmt, args); 138 | va_end(args); 139 | } 140 | 141 | 142 | 143 | 144 | -------------------------------------------------------------------------------- /vmm/csr_machine_level_pmp.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #include 5 | 6 | #define PMPADDR_BASE 0x3b0 7 | #define NR_PMPADDR 16 8 | 9 | #define PMPCFG_BASE 0x3a0 10 | #define NR_PMPCFG 4 11 | 12 | static void 13 | csr_pmpaddr_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 14 | { 15 | csr->csr_blob = value; 16 | log_debug("hart id:%d, csr:pmpaddr%d write 0x:%x\n", 17 | hartptr->hart_id, 18 | csr->csr_addr - PMPADDR_BASE, 19 | csr->csr_blob); 20 | } 21 | 22 | static uint32_t 23 | csr_pmpaddr_read(struct hart *hartptr, struct csr_entry *csr) 24 | { 25 | log_debug("hart id:%d, csr:pmpaddr%d read:0x%x\n", 26 | hartptr->hart_id, 27 | csr->csr_addr - PMPADDR_BASE, 28 | csr->csr_blob); 29 | return csr->csr_blob; 30 | } 31 | 32 | static void 33 | csr_pmpaddr_reset(struct hart *hartptr, struct csr_entry * csr) 34 | { 35 | csr->csr_blob = 0x0; 36 | } 37 | 38 | #define DEFINE_PMPADDR_ENTRY(idx) \ 39 | static struct csr_registery_entry pmpaddr##idx##_csr_entry = { \ 40 | .csr_addr = PMPADDR_BASE + idx, \ 41 | .csr_registery = { \ 42 | .wpri_mask = WPRI_MASK_ALL, \ 43 | .reset = csr_pmpaddr_reset, \ 44 | .read = csr_pmpaddr_read, \ 45 | .write = csr_pmpaddr_write \ 46 | } \ 47 | } 48 | 49 | DEFINE_PMPADDR_ENTRY(0); 50 | DEFINE_PMPADDR_ENTRY(1); 51 | DEFINE_PMPADDR_ENTRY(2); 52 | DEFINE_PMPADDR_ENTRY(3); 53 | DEFINE_PMPADDR_ENTRY(4); 54 | DEFINE_PMPADDR_ENTRY(5); 55 | DEFINE_PMPADDR_ENTRY(6); 56 | DEFINE_PMPADDR_ENTRY(7); 57 | DEFINE_PMPADDR_ENTRY(8); 58 | DEFINE_PMPADDR_ENTRY(9); 59 | DEFINE_PMPADDR_ENTRY(10); 60 | DEFINE_PMPADDR_ENTRY(11); 61 | DEFINE_PMPADDR_ENTRY(12); 62 | DEFINE_PMPADDR_ENTRY(13); 63 | DEFINE_PMPADDR_ENTRY(14); 64 | DEFINE_PMPADDR_ENTRY(15); 65 | 66 | static void 67 | csr_pmpcfg_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 68 | { 69 | csr->csr_blob = value; 70 | log_debug("hart id:%d, csr:pmpcfg%d write 0x:%x\n", 71 | hartptr->hart_id, 72 | csr->csr_addr - PMPCFG_BASE, 73 | csr->csr_blob); 74 | } 75 | 76 | static uint32_t 77 | csr_pmpcfg_read(struct hart *hartptr, struct csr_entry *csr) 78 | { 79 | log_debug("hart id:%d, csr:pmpcfg%d read:0x%x\n", 80 | hartptr->hart_id, 81 | csr->csr_addr - PMPCFG_BASE, 82 | csr->csr_blob); 83 | return csr->csr_blob; 84 | } 85 | 86 | static void 87 | csr_pmpcfg_reset(struct hart *hartptr, struct csr_entry * csr) 88 | { 89 | csr->csr_blob = 0x0; 90 | } 91 | 92 | #define DEFINE_PMPCFG_ENTRY(idx) \ 93 | static struct csr_registery_entry pmpcfg##idx##_csr_entry = { \ 94 | .csr_addr = PMPCFG_BASE + idx, \ 95 | .csr_registery = { \ 96 | .wpri_mask = WPRI_MASK_ALL, \ 97 | .reset = csr_pmpcfg_reset, \ 98 | .read = csr_pmpcfg_read, \ 99 | .write = csr_pmpcfg_write \ 100 | } \ 101 | } 102 | 103 | DEFINE_PMPCFG_ENTRY(0); 104 | DEFINE_PMPCFG_ENTRY(1); 105 | DEFINE_PMPCFG_ENTRY(2); 106 | DEFINE_PMPCFG_ENTRY(3); 107 | 108 | __attribute__((constructor)) static void 109 | csr_machine_level_init(void) 110 | { 111 | #define _(idx) register_csr_entry(&pmpaddr##idx##_csr_entry) 112 | _(0); 113 | _(1); 114 | _(2); 115 | _(3); 116 | _(4); 117 | _(5); 118 | _(6); 119 | _(7); 120 | _(8); 121 | _(9); 122 | _(10); 123 | _(11); 124 | _(12); 125 | _(13); 126 | _(14); 127 | _(15); 128 | #undef _ 129 | 130 | #define _(idx) register_csr_entry(&pmpcfg##idx##_csr_entry) 131 | _(0); 132 | _(1); 133 | _(2); 134 | _(3); 135 | #undef _ 136 | } 137 | -------------------------------------------------------------------------------- /vmm/hart.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019-2020 Jie Zheng 3 | */ 4 | #ifndef _HART_H 5 | #define _HART_H 6 | #include 7 | #include 8 | #include 9 | 10 | struct integer_register_profile { 11 | REGISTER_TYPE zero; 12 | REGISTER_TYPE ra; 13 | REGISTER_TYPE sp; 14 | REGISTER_TYPE gp; 15 | REGISTER_TYPE tp; 16 | REGISTER_TYPE t0, t1, t2; 17 | REGISTER_TYPE s0, s1; 18 | REGISTER_TYPE a0, a1, a2, a3, a4, a5, a6, a7; 19 | REGISTER_TYPE s2, s3, s4, s5, s6, s7, s8, s9, s10, s11; 20 | REGISTER_TYPE t3, t4, t5, t6; 21 | }__attribute__((packed)); 22 | 23 | 24 | 25 | struct virtual_machine; 26 | 27 | struct program_counter_mapping_item { 28 | uint32_t guest_pc; 29 | uint32_t tc_offset; 30 | }__attribute__((packed)); 31 | 32 | union interrupt_control_blob { 33 | struct { 34 | uint32_t usi:1; 35 | uint32_t ssi:1; 36 | uint32_t wpri0:1; 37 | uint32_t msi:1; 38 | uint32_t uti:1; 39 | uint32_t sti:1; 40 | uint32_t wpri1:1; 41 | uint32_t mti:1; 42 | uint32_t uei:1; 43 | uint32_t sei:1; 44 | uint32_t wpri2:1; 45 | uint32_t mei:1; 46 | uint32_t wpri3:20; 47 | } bits; 48 | uint32_t dword; 49 | }__attribute__((packed)); 50 | 51 | struct status_control_blob { 52 | uint32_t uie:1; 53 | uint32_t sie:1; 54 | uint32_t mie:1; 55 | uint32_t upie:1; 56 | uint32_t spie:1; 57 | uint32_t mpie:1; 58 | uint32_t spp:1; 59 | uint32_t mpp:2; 60 | }; 61 | 62 | struct tlb_entry; 63 | struct hart { 64 | struct integer_register_profile registers __attribute__((aligned(64))); 65 | REGISTER_TYPE pc; 66 | int hart_id; 67 | struct virtual_machine * vmptr; 68 | 69 | int nr_translated_instructions; 70 | void * pc_mappings; 71 | 72 | void * translation_cache; 73 | int translation_cache_ptr; 74 | 75 | void * vmm_stack_ptr; 76 | 77 | void * csrs_base; 78 | uint32_t hart_magic; 79 | 80 | union interrupt_control_blob idelegation; 81 | union interrupt_control_blob ienable; 82 | union interrupt_control_blob ipending; 83 | struct status_control_blob status; 84 | 85 | uint32_t privilege_level:2; 86 | 87 | struct tlb_entry * itlb; 88 | struct tlb_entry * dtlb; 89 | uint32_t itlb_cap; 90 | uint32_t dtlb_cap; 91 | 92 | uint64_t tsc; 93 | }__attribute__((aligned(64))); 94 | 95 | 96 | #define HART_REG(hartptr, index) \ 97 | (((uint32_t *)&((hartptr)->registers))[index]) 98 | struct prefetch_blob { 99 | // The guest address of instruction to fetch and translate in the next round 100 | uint32_t next_instruction_to_fetch; 101 | // indicating whether to stop fetch, there are several reasons to stop: 102 | // 1. translation cache is full 103 | // 2. encounter a jump/branch instruction which is considered as a terminator 104 | // of a translation unit. 105 | // 3. the target instruction is already in the translation cache 106 | uint8_t is_to_stop; 107 | uint8_t is_flushable; 108 | // The pointer of current hart. 109 | void * opaque; 110 | }; 111 | 112 | static inline int 113 | unoccupied_cache_size(struct hart * hart_instance) 114 | { 115 | extern void * vmm_jumper_begin; 116 | extern void * vmm_jumper_end; 117 | uint8_t * jumper_code_begin = (uint8_t *)&vmm_jumper_begin; 118 | uint8_t * jumper_code_end = (uint8_t *)&vmm_jumper_end; 119 | 120 | int ret = 0; 121 | if (hart_instance->nr_translated_instructions < 122 | MAX_INSTRUCTIONS_TOTRANSLATE) { 123 | ret = TRANSLATION_CACHE_SIZE - hart_instance->translation_cache_ptr - 124 | (jumper_code_end - jumper_code_begin); 125 | } 126 | //ASSERT(ret >= 0); 127 | return ret; 128 | } 129 | 130 | void 131 | hart_init(struct hart * hart_instance, int hart_id); 132 | 133 | void 134 | flush_translation_cache(struct hart * hart_instance); 135 | 136 | 137 | int 138 | add_translation_item(struct hart * hart_instance, 139 | uint32_t guest_instruction_address, 140 | const void * translation_instruction_block, 141 | int instruction_block_length); 142 | 143 | struct program_counter_mapping_item * 144 | search_translation_item(struct hart * hart_instance, 145 | uint32_t guest_instruction_address); 146 | 147 | void 148 | dump_hart(struct hart * hartptr); 149 | 150 | void 151 | dump_translation_cache(struct hart *hartptr); 152 | 153 | #endif 154 | -------------------------------------------------------------------------------- /vmm/emulate_ram.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | 5 | #include 6 | #include 7 | 8 | static void * 9 | ram_direct(uint64_t addr, struct hart * hartptr, 10 | struct pm_region_operation * pmr) 11 | { 12 | struct virtual_machine * vm = hartptr->vmptr; 13 | void * memory_access_base = vm->main_mem_host_base + (addr - pmr->addr_low); 14 | return memory_access_base; 15 | } 16 | 17 | static uint64_t 18 | ram_read(uint64_t addr, int access_size, struct hart * hartptr, 19 | struct pm_region_operation * pmr) 20 | { 21 | uint64_t val = 0; 22 | struct virtual_machine * vm = hartptr->vmptr; 23 | void * memory_access_base = vm->main_mem_host_base + (addr - pmr->addr_low); 24 | switch (access_size) 25 | { 26 | #define _(size, type) \ 27 | case size: \ 28 | val = *(type *)memory_access_base; \ 29 | break 30 | _(1, uint8_t); 31 | _(2, uint16_t); 32 | _(4, uint32_t); 33 | _(8, uint64_t); 34 | default: 35 | __not_reach(); 36 | break; 37 | #undef _ 38 | } 39 | return val; 40 | } 41 | 42 | static void 43 | ram_write(uint64_t addr, int access_size, uint64_t value, struct hart * hartptr, 44 | struct pm_region_operation * pmr) 45 | { 46 | struct virtual_machine * vm = hartptr->vmptr; 47 | void * memory_access_base = vm->main_mem_host_base + (addr - pmr->addr_low); 48 | switch (access_size) 49 | { 50 | #define _(size, type) \ 51 | case size: \ 52 | *(type *)memory_access_base = (type)value; \ 53 | break 54 | _(1, uint8_t); 55 | _(2, uint16_t); 56 | _(4, uint32_t); 57 | _(8, uint64_t); 58 | default: 59 | __not_reach(); 60 | break; 61 | #undef _ 62 | } 63 | } 64 | 65 | void 66 | ram_init(struct virtual_machine * vm) 67 | { 68 | // preallocate physical memory for emulated RAM 69 | const char * ram_start_string = ini_get(vm->ini_config, "mem", "main_memory_start"); 70 | const char * ram_size_string = ini_get(vm->ini_config, "mem", "main_memory_size_in_mega"); 71 | ASSERT(ram_start_string); 72 | ASSERT(ram_size_string); 73 | uint32_t ram_start = strtol(ram_start_string, NULL, 16); 74 | uint32_t ram_size = strtol(ram_size_string, NULL, 10); 75 | vm->main_mem_base = ram_start; 76 | vm->main_mem_size = MEGA(ram_size); 77 | vm->main_mem_host_base = preallocate_physical_memory(vm->main_mem_size); 78 | ASSERT(vm->main_mem_host_base); 79 | 80 | // register the physical memory region for RAM 81 | struct pm_region_operation main_memory_pmr = { 82 | .addr_low = vm->main_mem_base, 83 | .addr_high = vm->main_mem_base + vm->main_mem_size, 84 | .pmr_read = ram_read, 85 | .pmr_write = ram_write, 86 | .pmr_direct = ram_direct, 87 | .pmr_desc = "main.memory" 88 | }; 89 | register_pm_region_operation(&main_memory_pmr); 90 | 91 | // load the image(maybe a Linux kernel) into ram here 92 | // FIXME: we need a more sophisticated way to load image later. rigt now we 93 | // just load a binary image. 94 | const char * image_path = ini_get(vm->ini_config, "image", "kernel"); 95 | const char * load_base_string = ini_get(vm->ini_config, "image", "kernel_load_base"); 96 | ASSERT(image_path); 97 | ASSERT(load_base_string); 98 | uint32_t load_base = strtol(load_base_string, NULL, 16); 99 | ASSERT(!preload_binary_image(vm->main_mem_host_base + load_base - vm->main_mem_base, 100 | vm->main_mem_size, image_path)); 101 | 102 | // Load the init ramdisk 103 | const char * initrd_path = ini_get(vm->ini_config, "image", "initrd"); 104 | const char * initrd_load_base_string = 105 | ini_get(vm->ini_config, "image", "initrd_load_base"); 106 | if (initrd_path && initrd_load_base_string) { 107 | uint32_t initrd_load_base = strtol(initrd_load_base_string, NULL, 16); 108 | ASSERT(!preload_binary_image(vm->main_mem_host_base + initrd_load_base - 109 | vm->main_mem_base, 110 | vm->main_mem_size - initrd_load_base + 111 | vm->main_mem_base, initrd_path)); 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /vmm/sort.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #ifndef _SORT_H 5 | #define _SORT_H 6 | #include 7 | #include 8 | 9 | #define ELEM(type, base, index) ((index)+ (type *)((uint64_t)(base))) 10 | 11 | // Here I use the simplest bubble sort in an ascending order 12 | #define SORT(type, base, num, comp) { \ 13 | int idx_out = (num) - 1; \ 14 | for (; idx_out > 0; idx_out--) { \ 15 | int idx_in = 0; \ 16 | for (; idx_in < idx_out; idx_in++) { \ 17 | type * prev_elem = ELEM(type, base, idx_in); \ 18 | type * next_elem = ELEM(type, base, idx_in + 1); \ 19 | if (comp(prev_elem, next_elem) > 0) { \ 20 | type tmp_elem; \ 21 | memcpy(&tmp_elem, prev_elem, sizeof(type)); \ 22 | memcpy(prev_elem, next_elem, sizeof(type)); \ 23 | memcpy(next_elem, &tmp_elem, sizeof(type)); \ 24 | } \ 25 | } \ 26 | } \ 27 | } 28 | 29 | 30 | // FIXED: there must be a fix to improve the effenciy to sort the items. 31 | // it's perforance critical 32 | // the last element is the one to be inserted 33 | // XXX: THE PERFORMANCE IS GREATLY IMPROVED 34 | // FIXME: supersede glibc memcpy. 35 | #define INSERTION_SORT(type, base, num, comp) { \ 36 | if (num) { \ 37 | type * elem_target = ELEM(type, base, ((num) - 1)); \ 38 | int __idx_low = 0; \ 39 | int __idx_high = (num) - 2; \ 40 | int __idx_move = 0; \ 41 | for (; __idx_low <= __idx_high; ) { \ 42 | type * elem_low = ELEM(type, base, __idx_low); \ 43 | type * elem_high = ELEM(type, base, __idx_high); \ 44 | if (comp(elem_target, elem_low) <= 0) { \ 45 | __idx_move = __idx_low; \ 46 | break; \ 47 | } else if (comp(elem_high, elem_target) <= 0) { \ 48 | __idx_move = __idx_high + 1; \ 49 | break; \ 50 | } else { \ 51 | int __idx_mid = (__idx_low + __idx_high) / 2; \ 52 | if (__idx_mid == __idx_low || __idx_mid == __idx_high) { \ 53 | __idx_move = __idx_mid + 1; \ 54 | break; \ 55 | } \ 56 | type * elem_mid = ELEM(type, base, __idx_mid); \ 57 | if (comp(elem_target, elem_mid) < 0) { \ 58 | __idx_high = __idx_mid; \ 59 | } else { \ 60 | __idx_low = __idx_mid; \ 61 | } \ 62 | } \ 63 | } \ 64 | if (__idx_move < ((num) - 1)) { \ 65 | type elem_tmp; \ 66 | memcpy(&elem_tmp, elem_target, sizeof(type)); \ 67 | int __idx = (num) - 1; \ 68 | for (; __idx > __idx_move; __idx--) { \ 69 | memcpy(ELEM(type, base, __idx), \ 70 | ELEM(type, base, __idx - 1), \ 71 | sizeof(type)); \ 72 | } \ 73 | memcpy(ELEM(type, base, __idx_move), &elem_tmp, sizeof(type)); \ 74 | } \ 75 | } \ 76 | } 77 | 78 | #endif 79 | -------------------------------------------------------------------------------- /vmm/hart_trap.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | 7 | static void 8 | setup_mmode_trap(struct hart * hartptr, uint32_t cause, uint32_t tval) 9 | { 10 | struct csr_entry * csr_mcause = 11 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_MCAUSE]; 12 | csr_mcause->csr_blob = cause; 13 | struct csr_entry * csr_mtval = 14 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_MTVAL]; 15 | csr_mtval->csr_blob = tval; 16 | struct csr_entry * csr_mepc = 17 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_MEPC]; 18 | csr_mepc->csr_blob = hartptr->pc; // PC is kept in MEPC 19 | 20 | // MPP is set to the privilege level at the time of trap... 21 | // and current privilege level is set to machine mode. 22 | //uint32_t mpp = hartptr->privilege_level; 23 | //struct csr_entry * csr_mstatus = 24 | // &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_MSTATUS]; 25 | //csr_mstatus->csr_blob &= ~(3 << 11); 26 | //csr_mstatus->csr_blob |= mpp << 11; 27 | //hartptr->privilege_level = PRIVILEGE_LEVEL_MACHINE; 28 | hartptr->status.mpp = hartptr->privilege_level; 29 | hartptr->privilege_level = PRIVILEGE_LEVEL_MACHINE; 30 | 31 | 32 | // MPIE is set to MIE at the time of trap... 33 | // MIE is set to 0. 34 | //uint32_t mie = (csr_mstatus->csr_blob >> 3) & 0x1; 35 | //csr_mstatus->csr_blob &= ~(1 << 7); 36 | //csr_mstatus->csr_blob |= mie << 7; 37 | //csr_mstatus->csr_blob &= ~(1 << 3); 38 | hartptr->status.mpie = hartptr->status.mie; 39 | hartptr->status.mie = 0; 40 | 41 | // PC is set to the trap vector. 42 | struct csr_entry * csr_mtvec = 43 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_MTVEC]; 44 | uint32_t mtvec = csr_mtvec->csr_blob; 45 | uint32_t trap_mode = mtvec & 0x1; 46 | if (!trap_mode || !(cause & 0x80000000)) { 47 | hartptr->pc = mtvec & (~3); 48 | } else { 49 | // vectored interrupt delivery 50 | uint32_t vector = cause & 0x7fffffff; 51 | hartptr->pc = (mtvec & (~3)) + vector * 4; 52 | } 53 | } 54 | 55 | static void 56 | setup_smode_trap(struct hart * hartptr, uint32_t cause, uint32_t tval) 57 | { 58 | struct csr_entry * csr_scause = 59 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_SCAUSE]; 60 | csr_scause->csr_blob = cause; 61 | struct csr_entry * csr_stval = 62 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_STVAL]; 63 | csr_stval->csr_blob = tval; 64 | struct csr_entry * csr_sepc = 65 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_SEPC]; 66 | csr_sepc->csr_blob = hartptr->pc; 67 | 68 | hartptr->status.spp = hartptr->privilege_level; 69 | hartptr->privilege_level = PRIVILEGE_LEVEL_SUPERVISOR; 70 | hartptr->status.spie = hartptr->status.sie; 71 | hartptr->status.sie = 0; 72 | 73 | struct csr_entry * csr_stvec = 74 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_STVEC]; 75 | uint32_t stvec = csr_stvec->csr_blob; 76 | uint32_t trap_mode = stvec & 0x1; 77 | if (!trap_mode || !(cause & 0x80000000)) { 78 | hartptr->pc = stvec & (~3); 79 | } else { 80 | uint32_t vector = cause & 0x7fffffff; 81 | hartptr->pc = (stvec & (~3)) + vector * 4; 82 | } 83 | } 84 | 85 | extern void vmm_entry_point(void); 86 | 87 | static void 88 | do_trap(struct hart * hartptr) 89 | { 90 | __asm__ volatile("movq %%rax, %%r15;" 91 | "movq %%rbx, %%r14;" 92 | "movq %%rcx, %%r13;" 93 | "movq %%rdx, %%r12;" 94 | "jmpq *%%rdi;" 95 | : 96 | :"a"(&hartptr->registers), "b"(&hartptr->pc), 97 | "c"(hartptr->translation_cache), 98 | "d"(hartptr), 99 | "D"(vmm_entry_point) 100 | :"memory"); 101 | } 102 | 103 | /* 104 | * raise_trap_raw 105 | * 106 | * The function is to raise a trap synchronously. 107 | * the wrapper(Interrupt/Exception) should themselves make decision whether 108 | * it's the right time to deliver a trap. 109 | */ 110 | void 111 | raise_trap_raw(struct hart * hartptr, uint8_t target_privilege_level, 112 | uint32_t cause, uint32_t tval) 113 | { 114 | uint32_t previous_pc = hartptr->pc; 115 | uint8_t previous_pl = hartptr->privilege_level; 116 | if (target_privilege_level == PRIVILEGE_LEVEL_MACHINE) { 117 | setup_mmode_trap(hartptr, cause, tval); 118 | } else { 119 | // WE DO NOT SUPPORT USER MODE INTERRUPT 120 | ASSERT(target_privilege_level == PRIVILEGE_LEVEL_SUPERVISOR); 121 | setup_smode_trap(hartptr, cause, tval);; 122 | } 123 | log_debug("trap to privilege:%d cause:0x%08x, tval:0x%08x previous:{pc:%x pl:%d} current:{pc:%x pl:%d}\n", 124 | target_privilege_level, cause, tval, 125 | previous_pc, previous_pl, 126 | hartptr->pc, hartptr->privilege_level); 127 | // XXX: when trap is taken, the addressing manner may chnage, so 128 | // the translation cache must be flushed. 129 | flush_translation_cache(hartptr); 130 | do_trap(hartptr); 131 | } 132 | 133 | -------------------------------------------------------------------------------- /vmm/csr_representation.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | * 4 | * This file extends supervisor-level instructions translation 5 | */ 6 | #include 7 | #include 8 | #include 9 | 10 | void 11 | riscv_generic_csr_callback(struct hart * hartptr, uint64_t instruction) 12 | { 13 | ASSERT(hartptr->hart_magic == HART_MAGIC_WORD); 14 | ASSERT((instruction & 0x7f) == RISCV_OPCODE_SUPERVISOR_LEVEL); 15 | struct decoding dec; 16 | instruction_decoding_per_type(&dec, (uint32_t)instruction, ENCODING_TYPE_I); 17 | struct csr_entry * csr = &((struct csr_entry *)hartptr->csrs_base)[dec.imm & 0xfff]; 18 | 19 | if (!csr->is_valid) { 20 | // This must be a panic in case we miss some CSRs 21 | log_fatal("csr 0x%x is not implemented\n", dec.imm & 0xfff); 22 | PANIC(hartptr); 23 | } 24 | 25 | switch (dec.funct3) 26 | { 27 | case 0x01: 28 | case 0x05: 29 | // CSRRW and CSRRWI 30 | { 31 | // store rs1 reg in case rs1_index == rd_index 32 | uint32_t rs1_reg = HART_REG(hartptr, dec.rs1_index); 33 | if (dec.rd_index) { 34 | // if RD register is zero(x0), don't read the csr to avoid read side effect 35 | uint32_t value_to_read = 0; 36 | if (csr->read) { 37 | value_to_read = csr->read(hartptr, csr); 38 | } 39 | value_to_read &= csr->wpri_mask; 40 | HART_REG(hartptr, dec.rd_index) = value_to_read; 41 | } 42 | uint32_t value_to_write = dec.funct3 == 0x01 ? rs1_reg: dec.rs1_index; 43 | value_to_write &= csr->wpri_mask; 44 | if (csr->write) { 45 | csr->write(hartptr, csr, value_to_write); 46 | } 47 | } 48 | break; 49 | case 0x2: 50 | case 0x6: 51 | // csrrs and csrrsi 52 | { 53 | uint32_t rs1_reg = HART_REG(hartptr, dec.rs1_index); 54 | uint32_t value_to_read = 0; 55 | if (csr->read) { 56 | value_to_read = csr->read(hartptr, csr); 57 | } 58 | value_to_read &= csr->wpri_mask; 59 | HART_REG(hartptr, dec.rd_index) = value_to_read; 60 | 61 | uint32_t value_to_set = 0; 62 | uint8_t proceed_to_write = 0; 63 | if (dec.funct3 == 0x2 && dec.rs1_index) { 64 | value_to_set = rs1_reg; 65 | proceed_to_write = 1; 66 | } else if(dec.funct3 == 0x6 && dec.rs1_index) { 67 | value_to_set = dec.rs1_index & 0x1f; 68 | proceed_to_write = 1; 69 | } 70 | if (proceed_to_write && csr->write) { 71 | // if RS1 is zero or rs1(imm)'s value is zero, don't write csr 72 | csr->write(hartptr, csr, (value_to_read | value_to_set) & 73 | csr->wpri_mask); 74 | } 75 | } 76 | break; 77 | case 0x3: 78 | case 0x7: 79 | // csrrc and csrrci 80 | { 81 | uint32_t rs1_reg = HART_REG(hartptr, dec.rs1_index); 82 | uint32_t value_to_read = 0; 83 | if (csr->read) { 84 | value_to_read = csr->read(hartptr, csr); 85 | } 86 | value_to_read &= csr->wpri_mask; 87 | HART_REG(hartptr, dec.rd_index) = value_to_read; 88 | 89 | uint32_t value_to_clear = 0; 90 | uint8_t proceed_to_write = 0; 91 | 92 | if (dec.funct3 == 0x3 && dec.rs1_index) { 93 | value_to_clear = rs1_reg; 94 | proceed_to_write = 1; 95 | } else if(dec.funct3 == 0x7 && dec.rs1_index){ 96 | value_to_clear = dec.rs1_index & 0x1f; 97 | proceed_to_write = 1; 98 | } 99 | if (proceed_to_write && csr->write) { 100 | csr->write(hartptr, csr, value_to_read & ~value_to_clear & 101 | csr->wpri_mask); 102 | } 103 | } 104 | break; 105 | default: 106 | __not_reach(); 107 | break; 108 | } 109 | } 110 | 111 | void 112 | riscv_generic_csr_instructions_translator(struct decoding * dec, 113 | struct prefetch_blob * blob, 114 | uint32_t instruction) 115 | { 116 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 117 | struct hart * hartptr = (struct hart *)blob->opaque; 118 | PRECHECK_TRANSLATION_CACHE(csr_instructions, blob); 119 | BEGIN_TRANSLATION(csr_instructions); 120 | __asm__ volatile("movq %%r12, %%rdi;" 121 | "movl "PIC_PARAM(0)", %%esi;" 122 | "movq $riscv_generic_csr_callback, %%rax;" 123 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 124 | "call *%%rax;" 125 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 126 | RESET_ZERO_REGISTER() 127 | PROCEED_TO_NEXT_INSTRUCTION() 128 | TRAP_TO_VMM(csr_instructions) 129 | //END_INSTRUCTION(csr_instructions) 130 | : 131 | : 132 | :"memory"); 133 | BEGIN_PARAM_SCHEMA() 134 | PARAM32() //the instruction itself 135 | END_PARAM_SCHEMA() 136 | END_TRANSLATION(csr_instructions); 137 | BEGIN_PARAM(csr_instructions) 138 | instruction 139 | END_PARAM() 140 | COMMIT_TRANSLATION(csr_instructions, hartptr, instruction_linear_address); 141 | //blob->next_instruction_to_fetch += 4; 142 | blob->is_to_stop = 1; 143 | } 144 | 145 | -------------------------------------------------------------------------------- /vmm/translate_memory_store.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | 11 | static void 12 | riscv_sb_translator(struct decoding * dec, struct prefetch_blob * blob, 13 | uint32_t instruction) 14 | { 15 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 16 | struct hart * hartptr = (struct hart *)blob->opaque; 17 | int32_t signed_offset = sign_extend32(dec->imm, 11); 18 | PRECHECK_TRANSLATION_CACHE(sb_instruction, blob); 19 | BEGIN_TRANSLATION(sb_instruction); 20 | __asm__ volatile("movl "PIC_PARAM(1)", %%edx;" 21 | "shl $2, %%edx;" 22 | "addq %%r15, %%rdx;" 23 | "movl (%%rdx), %%esi;" 24 | "movl "PIC_PARAM(0)", %%edx;" 25 | "addl %%edx, %%esi;" // ESI: memory store target location 26 | "movl "PIC_PARAM(2)", %%edx;" 27 | "shl $2, %%edx;" 28 | "addq %%r15, %%rdx;" 29 | "movl (%%rdx), %%edx;" // EDX: mmeory store source value 30 | "andl $0xff, %%edx;" 31 | "movq %%r12, %%rdi;" 32 | "movq $mmu_write8, %%rax;" 33 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 34 | "call *%%rax;" 35 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 36 | PROCEED_TO_NEXT_INSTRUCTION() 37 | END_INSTRUCTION(sb_instruction) 38 | : 39 | : 40 | :"memory"); 41 | BEGIN_PARAM_SCHEMA() 42 | PARAM32() /*imm: signed offset*/ 43 | PARAM32() /*rs1_index: memory base register*/ 44 | PARAM32() /*rs2_index: source operand register*/ 45 | END_PARAM_SCHEMA() 46 | END_TRANSLATION(sb_instruction); 47 | BEGIN_PARAM(sb_instruction) 48 | signed_offset, 49 | dec->rs1_index, 50 | dec->rs2_index 51 | END_PARAM() 52 | COMMIT_TRANSLATION(sb_instruction, hartptr, instruction_linear_address); 53 | blob->next_instruction_to_fetch += 4; 54 | } 55 | 56 | 57 | static void 58 | riscv_sh_translator(struct decoding * dec, struct prefetch_blob * blob, 59 | uint32_t instruction) 60 | { 61 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 62 | struct hart * hartptr = (struct hart *)blob->opaque; 63 | int32_t signed_offset = sign_extend32(dec->imm, 11); 64 | PRECHECK_TRANSLATION_CACHE(sh_instruction, blob); 65 | BEGIN_TRANSLATION(sh_instruction); 66 | __asm__ volatile("movl "PIC_PARAM(1)", %%edx;" 67 | "shl $2, %%edx;" 68 | "addq %%r15, %%rdx;" 69 | "movl (%%rdx), %%esi;" 70 | "movl "PIC_PARAM(0)", %%edx;" 71 | "addl %%edx, %%esi;" // ESI: memory store target location 72 | "movl "PIC_PARAM(2)", %%edx;" 73 | "shl $2, %%edx;" 74 | "addq %%r15, %%rdx;" 75 | "movl (%%rdx), %%edx;" // EDX: mmeory store source value 76 | "andl $0xffff, %%edx;" 77 | "movq %%r12, %%rdi;" 78 | "movq $mmu_write16, %%rax;" 79 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 80 | "call *%%rax;" 81 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 82 | PROCEED_TO_NEXT_INSTRUCTION() 83 | END_INSTRUCTION(sh_instruction) 84 | : 85 | : 86 | :"memory"); 87 | BEGIN_PARAM_SCHEMA() 88 | PARAM32() /*imm: signed offset*/ 89 | PARAM32() /*rs1_index: memory base register*/ 90 | PARAM32() /*rs2_index: source operand register*/ 91 | END_PARAM_SCHEMA() 92 | END_TRANSLATION(sh_instruction); 93 | BEGIN_PARAM(sh_instruction) 94 | signed_offset, 95 | dec->rs1_index, 96 | dec->rs2_index 97 | END_PARAM() 98 | COMMIT_TRANSLATION(sh_instruction, hartptr, instruction_linear_address); 99 | blob->next_instruction_to_fetch += 4; 100 | } 101 | 102 | 103 | static void 104 | riscv_sw_translator(struct decoding * dec, struct prefetch_blob * blob, 105 | uint32_t instruction) 106 | { 107 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 108 | struct hart * hartptr = (struct hart *)blob->opaque; 109 | int32_t signed_offset = sign_extend32(dec->imm, 11); 110 | PRECHECK_TRANSLATION_CACHE(sw_instruction, blob); 111 | BEGIN_TRANSLATION(sw_instruction); 112 | __asm__ volatile("movl "PIC_PARAM(1)", %%edx;" 113 | "shl $2, %%edx;" 114 | "addq %%r15, %%rdx;" 115 | "movl (%%rdx), %%esi;" 116 | "movl "PIC_PARAM(0)", %%edx;" 117 | "addl %%edx, %%esi;" // ESI: memory store target location 118 | "movl "PIC_PARAM(2)", %%edx;" 119 | "shl $2, %%edx;" 120 | "addq %%r15, %%rdx;" 121 | "movl (%%rdx), %%edx;" // EDX: mmeory store source value 122 | "movq %%r12, %%rdi;" 123 | "movq $mmu_write32, %%rax;" 124 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 125 | "call *%%rax;" 126 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 127 | PROCEED_TO_NEXT_INSTRUCTION() 128 | END_INSTRUCTION(sw_instruction) 129 | : 130 | : 131 | :"memory"); 132 | BEGIN_PARAM_SCHEMA() 133 | PARAM32() /*imm: signed offset*/ 134 | PARAM32() /*rs1_index: memory base register*/ 135 | PARAM32() /*rs2_index: source operand register*/ 136 | END_PARAM_SCHEMA() 137 | END_TRANSLATION(sw_instruction); 138 | BEGIN_PARAM(sw_instruction) 139 | signed_offset, 140 | dec->rs1_index, 141 | dec->rs2_index 142 | END_PARAM() 143 | COMMIT_TRANSLATION(sw_instruction, hartptr, instruction_linear_address); 144 | blob->next_instruction_to_fetch += 4; 145 | } 146 | 147 | static instruction_sub_translator per_funct3_handlers[8]; 148 | 149 | void 150 | riscv_memory_store_instructions_translation_entry(struct prefetch_blob * blob, 151 | uint32_t instruction) 152 | { 153 | struct decoding dec; 154 | instruction_decoding_per_type(&dec, instruction, ENCODING_TYPE_S); 155 | ASSERT(per_funct3_handlers[dec.funct3]); 156 | per_funct3_handlers[dec.funct3](&dec, blob, instruction); 157 | } 158 | 159 | 160 | __attribute__((constructor)) static void 161 | memory_store_constructor(void) 162 | { 163 | memset(per_funct3_handlers, 0x0, sizeof(per_funct3_handlers)); 164 | per_funct3_handlers[0] = riscv_sb_translator; 165 | per_funct3_handlers[1] = riscv_sh_translator; 166 | per_funct3_handlers[2] = riscv_sw_translator; 167 | } 168 | -------------------------------------------------------------------------------- /vmm/ini.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2016 rxi 3 | * Copyright (c) 2020 Jie Zheng 4 | * 5 | * Permission is hereby granted, free of charge, to any person obtaining a copy 6 | * of this software and associated documentation files (the "Software"), to deal 7 | * in the Software without restriction, including without limitation the rights 8 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | * copies of the Software, and to permit persons to whom the Software is 10 | * furnished to do so, subject to the following conditions: 11 | * 12 | * The above copyright notice and this permission notice shall be included in 13 | * all copies or substantial portions of the Software. 14 | * 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | * SOFTWARE. 22 | */ 23 | 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | #include "ini.h" 30 | 31 | struct ini_t { 32 | char *data; 33 | char *end; 34 | }; 35 | 36 | 37 | /* Case insensitive string compare */ 38 | static int strcmpci(const char *a, const char *b) { 39 | for (;;) { 40 | int d = tolower(*a) - tolower(*b); 41 | if (d != 0 || !*a) { 42 | return d; 43 | } 44 | a++, b++; 45 | } 46 | } 47 | 48 | /* Returns the next string in the split data */ 49 | static char* next(ini_t *ini, char *p) { 50 | p += strlen(p); 51 | while (p < ini->end && *p == '\0') { 52 | p++; 53 | } 54 | return p; 55 | } 56 | 57 | static void trim_back(ini_t *ini, char *p) { 58 | while (p >= ini->data && (*p == ' ' || *p == '\t' || *p == '\r')) { 59 | *p-- = '\0'; 60 | } 61 | } 62 | 63 | static char* discard_line(ini_t *ini, char *p) { 64 | while (p < ini->end && *p != '\n') { 65 | *p++ = '\0'; 66 | } 67 | return p; 68 | } 69 | 70 | 71 | static char *unescape_quoted_value(ini_t *ini, char *p) { 72 | /* Use `q` as write-head and `p` as read-head, `p` is always ahead of `q` 73 | * as escape sequences are always larger than their resultant data */ 74 | char *q = p; 75 | p++; 76 | while (p < ini->end && *p != '"' && *p != '\r' && *p != '\n') { 77 | if (*p == '\\') { 78 | /* Handle escaped char */ 79 | p++; 80 | switch (*p) { 81 | default : *q = *p; break; 82 | case 'r' : *q = '\r'; break; 83 | case 'n' : *q = '\n'; break; 84 | case 't' : *q = '\t'; break; 85 | case '\r' : 86 | case '\n' : 87 | case '\0' : goto end; 88 | } 89 | 90 | } else { 91 | /* Handle normal char */ 92 | *q = *p; 93 | } 94 | q++, p++; 95 | } 96 | end: 97 | return q; 98 | } 99 | 100 | 101 | /* Splits data in place into strings containing section-headers, keys and 102 | * values using one or more '\0' as a delimiter. Unescapes quoted values */ 103 | static void split_data(ini_t *ini) { 104 | char *value_start, *line_start; 105 | char *p = ini->data; 106 | 107 | while (p < ini->end) { 108 | switch (*p) { 109 | case '\r': 110 | case '\n': 111 | case '\t': 112 | case ' ': 113 | *p = '\0'; 114 | /* Fall through */ 115 | 116 | case '\0': 117 | p++; 118 | break; 119 | 120 | case '[': 121 | p += strcspn(p, "]\n"); 122 | *p = '\0'; 123 | break; 124 | 125 | case ';': 126 | p = discard_line(ini, p); 127 | break; 128 | 129 | default: 130 | line_start = p; 131 | p += strcspn(p, "=\n"); 132 | 133 | /* Is line missing a '='? */ 134 | if (*p != '=') { 135 | p = discard_line(ini, line_start); 136 | break; 137 | } 138 | trim_back(ini, p - 1); 139 | 140 | /* Replace '=' and whitespace after it with '\0' */ 141 | do { 142 | *p++ = '\0'; 143 | } while (*p == ' ' || *p == '\r' || *p == '\t'); 144 | 145 | /* Is a value after '=' missing? */ 146 | if (*p == '\n' || *p == '\0') { 147 | p = discard_line(ini, line_start); 148 | break; 149 | } 150 | 151 | if (*p == '"') { 152 | /* Handle quoted string value */ 153 | value_start = p; 154 | p = unescape_quoted_value(ini, p); 155 | 156 | /* Was the string empty? */ 157 | if (p == value_start) { 158 | p = discard_line(ini, line_start); 159 | break; 160 | } 161 | 162 | /* Discard the rest of the line after the string value */ 163 | p = discard_line(ini, p); 164 | 165 | } else { 166 | /* Handle normal value */ 167 | p += strcspn(p, "\n"); 168 | trim_back(ini, p - 1); 169 | } 170 | break; 171 | } 172 | } 173 | } 174 | 175 | 176 | 177 | ini_t* ini_load(const char *filename) { 178 | ini_t *ini = NULL; 179 | FILE *fp = NULL; 180 | int n, sz; 181 | 182 | /* Init ini struct */ 183 | ini = malloc(sizeof(*ini)); 184 | if (!ini) { 185 | goto fail; 186 | } 187 | memset(ini, 0, sizeof(*ini)); 188 | 189 | /* Open file */ 190 | fp = fopen(filename, "rb"); 191 | if (!fp) { 192 | goto fail; 193 | } 194 | 195 | /* Get file size */ 196 | fseek(fp, 0, SEEK_END); 197 | sz = ftell(fp); 198 | rewind(fp); 199 | 200 | /* Load file content into memory, null terminate, init end var */ 201 | ini->data = malloc(sz + 1); 202 | ini->data[sz] = '\0'; 203 | ini->end = ini->data + sz; 204 | n = fread(ini->data, 1, sz, fp); 205 | if (n != sz) { 206 | goto fail; 207 | } 208 | 209 | /* Prepare data */ 210 | split_data(ini); 211 | 212 | /* Clean up and return */ 213 | fclose(fp); 214 | return ini; 215 | 216 | fail: 217 | if (fp) fclose(fp); 218 | if (ini) ini_free(ini); 219 | return NULL; 220 | } 221 | 222 | 223 | void ini_free(ini_t *ini) { 224 | free(ini->data); 225 | free(ini); 226 | } 227 | 228 | 229 | const char* ini_get(ini_t *ini, const char *section, const char *key) { 230 | char *current_section = ""; 231 | char *val; 232 | char *p = ini->data; 233 | 234 | if (*p == '\0') { 235 | p = next(ini, p); 236 | } 237 | 238 | while (p < ini->end) { 239 | if (*p == '[') { 240 | /* Handle section */ 241 | current_section = p + 1; 242 | 243 | } else { 244 | /* Handle key */ 245 | val = next(ini, p); 246 | if (!section || !strcmpci(section, current_section)) { 247 | if (!strcmpci(p, key)) { 248 | return val; 249 | } 250 | } 251 | p = val; 252 | } 253 | 254 | p = next(ini, p); 255 | } 256 | 257 | return NULL; 258 | } 259 | 260 | 261 | int ini_sget( 262 | ini_t *ini, const char *section, const char *key, 263 | const char *scanfmt, void *dst 264 | ) { 265 | const char *val = ini_get(ini, section, key); 266 | if (!val) { 267 | return 0; 268 | } 269 | if (scanfmt) { 270 | sscanf(val, scanfmt, dst); 271 | } else { 272 | *((const char**) dst) = val; 273 | } 274 | return 1; 275 | } 276 | -------------------------------------------------------------------------------- /vmm/mmu.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #ifndef _MMU_H 5 | #define _MMU_H 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #define _(_size) \ 13 | __attribute__((always_inline)) static inline uint##_size##_t \ 14 | direct_read##_size (struct hart * hartptr, uint32_t linear_address) \ 15 | { \ 16 | struct pm_region_operation * pmr; \ 17 | pmr = search_pm_region_callback(linear_address); \ 18 | if (!pmr) { \ 19 | dump_hart(hartptr); \ 20 | __not_reach(); \ 21 | } \ 22 | return pmr->pmr_read(linear_address, _size / 8, hartptr, pmr); \ 23 | } 24 | 25 | 26 | 27 | _(8) 28 | _(16) 29 | _(32) 30 | _(64) 31 | #undef _ 32 | 33 | 34 | #define _(_size) \ 35 | __attribute__((always_inline)) static inline void \ 36 | direct_write##_size (struct hart * hartptr, uint32_t linear_address, \ 37 | uint##_size##_t value) \ 38 | { \ 39 | struct pm_region_operation * pmr; \ 40 | pmr = search_pm_region_callback(linear_address); \ 41 | if (!pmr) { \ 42 | dump_hart(hartptr); \ 43 | __not_reach(); \ 44 | } \ 45 | pmr->pmr_write(linear_address, _size / 8, value, hartptr, pmr); \ 46 | } 47 | 48 | 49 | 50 | _(8) 51 | _(16) 52 | _(32) 53 | _(64) 54 | #undef _ 55 | 56 | int 57 | walk_page_table(struct hart * hartptr, uint32_t va, struct tlb_entry * tlb, 58 | int tlb_cap); 59 | 60 | #include 61 | 62 | // FIXME: raise exception if address is not naturally aligned. 63 | #define _(_size) \ 64 | __attribute__((always_inline)) static inline uint##_size##_t \ 65 | vmread##_size (struct hart * hartptr, uint32_t linear_address) \ 66 | { \ 67 | struct csr_entry * csr = \ 68 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_SATP]; \ 69 | if (hartptr->privilege_level < PRIVILEGE_LEVEL_MACHINE && \ 70 | csr->csr_blob & 0x80000000) { \ 71 | struct tlb_entry * entry = VA_TO_TLB_ENTRY(hartptr->dtlb, \ 72 | hartptr->dtlb_cap, \ 73 | linear_address); \ 74 | if (!entry) { \ 75 | walk_page_table(hartptr, linear_address, hartptr->dtlb, \ 76 | hartptr->dtlb_cap); \ 77 | entry = VA_TO_TLB_ENTRY(hartptr->dtlb, hartptr->dtlb_cap, \ 78 | linear_address); \ 79 | } \ 80 | if (!entry) { \ 81 | raise_exception_with_tvalue(hartptr, EXCEPTION_LOAD_PAGE_FAULT, \ 82 | linear_address); \ 83 | __not_reach(); \ 84 | } \ 85 | return entry->pmr->pmr_read(entry->pa_tag | ((linear_address & ~(entry->page_mask))),\ 86 | _size / 8, hartptr, entry->pmr); \ 87 | } \ 88 | return direct_read##_size(hartptr, linear_address); \ 89 | } 90 | 91 | 92 | 93 | _(8) 94 | _(16) 95 | _(32) 96 | _(64) 97 | #undef _ 98 | 99 | 100 | 101 | #define _(_size) \ 102 | __attribute__((always_inline)) static inline void \ 103 | vmwrite##_size (struct hart * hartptr, uint32_t linear_address, \ 104 | uint##_size##_t value) \ 105 | { \ 106 | struct csr_entry * csr = \ 107 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_SATP]; \ 108 | if (hartptr->privilege_level < PRIVILEGE_LEVEL_MACHINE && \ 109 | csr->csr_blob & 0x80000000) { \ 110 | struct tlb_entry * entry = VA_TO_TLB_ENTRY(hartptr->dtlb, \ 111 | hartptr->dtlb_cap, \ 112 | linear_address); \ 113 | if (!entry) { \ 114 | walk_page_table(hartptr, linear_address, hartptr->dtlb, \ 115 | hartptr->dtlb_cap); \ 116 | entry = VA_TO_TLB_ENTRY(hartptr->dtlb, hartptr->dtlb_cap, \ 117 | linear_address); \ 118 | } \ 119 | if (!entry) { \ 120 | raise_exception_with_tvalue(hartptr, EXCEPTION_STORE_PAGE_FAULT, \ 121 | linear_address); \ 122 | __not_reach(); \ 123 | } \ 124 | return entry->pmr->pmr_write(entry->pa_tag | ((linear_address & ~(entry->page_mask))),\ 125 | _size / 8, value, hartptr, entry->pmr); \ 126 | } \ 127 | return direct_write##_size(hartptr, linear_address, value); \ 128 | } 129 | 130 | 131 | _(8) 132 | _(16) 133 | _(32) 134 | _(64) 135 | #undef _ 136 | 137 | int 138 | pa_to_va(struct hart * hartptr, uint32_t pa, struct tlb_entry * tlb, 139 | int tlb_cap, uint32_t * va); 140 | 141 | uint8_t 142 | mmu_read8(struct hart * hartptr, uint32_t location); 143 | 144 | uint16_t 145 | mmu_read16(struct hart * hartptr, uint32_t location); 146 | 147 | uint32_t 148 | mmu_read32(struct hart * hartptr, uint32_t location); 149 | 150 | uint32_t 151 | mmu_read32_aligned(struct hart * hartptr, uint32_t location); 152 | 153 | uint32_t 154 | mmu_instruction_read32(struct hart * hartptr, uint32_t instruction_va); 155 | 156 | 157 | void 158 | mmu_write8(struct hart * hartptr, uint32_t location, uint8_t value); 159 | 160 | void 161 | mmu_write16(struct hart * hartptr, uint32_t location, uint16_t value); 162 | 163 | void 164 | mmu_write32(struct hart * hartptr, uint32_t location, uint32_t value); 165 | 166 | void 167 | mmu_write32_aligned(struct hart * hartptr, uint32_t location, uint32_t value); 168 | 169 | #endif 170 | -------------------------------------------------------------------------------- /vmm/translate_amo.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | static void 10 | amoadd_slowpath(struct hart * hartptr, uint8_t rs1_index, uint8_t rs2_index, 11 | uint8_t rd_index) 12 | { 13 | uint32_t * regs = (uint32_t *)&hartptr->registers; 14 | // MUST make a copy of rs1 and rs2. because if rd equals any of them, things 15 | // getting wrong. 16 | uint32_t rs1 = regs[rs1_index]; 17 | uint32_t rs2 = regs[rs2_index]; 18 | regs[rd_index]= mmu_read32_aligned(hartptr, rs1); 19 | mmu_write32_aligned(hartptr, rs1, regs[rd_index] + rs2); 20 | } 21 | 22 | 23 | static void 24 | amoswap_slowpath(struct hart * hartptr, uint8_t rs1_index, uint8_t rs2_index, 25 | uint8_t rd_index) 26 | { 27 | uint32_t * regs = (uint32_t *)&hartptr->registers; 28 | uint32_t rs1 = regs[rs1_index]; 29 | uint32_t rs2 = regs[rs2_index]; 30 | regs[rd_index]= mmu_read32_aligned(hartptr, rs1); 31 | mmu_write32_aligned(hartptr, rs1, rs2); 32 | } 33 | 34 | struct address_reservation_item { 35 | uint32_t addr; 36 | uint32_t valid:1; 37 | uint32_t hartid:8; 38 | }; 39 | // FIXME: GUARD ALL THE CRITICAL RESOURCE IF WE ARE GOING TO REALIZE SMP 40 | #define MAX_RESERVATION_ITEMS 16 41 | static struct address_reservation_item reservations[MAX_RESERVATION_ITEMS]; 42 | static int next_pointer = 0; 43 | 44 | static void 45 | lr_slowpath(struct hart * hartptr, uint8_t rs1_index, uint8_t rs2_index, 46 | uint8_t rd_index) 47 | { 48 | uint32_t * regs = (uint32_t *)&hartptr->registers; 49 | uint32_t rs1 = regs[rs1_index]; 50 | regs[rd_index]= mmu_read32_aligned(hartptr, rs1); 51 | // MAKE A RESERVATION FOR ADDRESS RS1 52 | int idx = 0; 53 | for (idx = 0; idx < MAX_RESERVATION_ITEMS; idx++) { 54 | if (!reservations[idx].valid) { 55 | next_pointer = idx; 56 | break; 57 | } 58 | } 59 | reservations[next_pointer].addr = rs1; 60 | reservations[next_pointer].valid = 1; 61 | reservations[next_pointer].hartid = hartptr->hart_id; 62 | next_pointer = (next_pointer + 1) % MAX_RESERVATION_ITEMS; 63 | log_trace("lr.w address reservation:0x%08x\n", rs1); 64 | } 65 | 66 | static void 67 | sc_slowpath(struct hart * hartptr, uint8_t rs1_index, uint8_t rs2_index, 68 | uint8_t rd_index) 69 | { 70 | uint32_t * regs = (uint32_t *)&hartptr->registers; 71 | uint32_t rs1 = regs[rs1_index]; 72 | uint32_t rs2 = regs[rs2_index]; 73 | int idx = 0; 74 | for (; idx < MAX_RESERVATION_ITEMS; idx++) { 75 | if (reservations[idx].valid && 76 | reservations[idx].addr == rs1 && 77 | reservations[idx].hartid == hartptr->hart_id) { 78 | break; 79 | } 80 | } 81 | if (idx < MAX_RESERVATION_ITEMS) { 82 | // FOUND A VALID RESERVATION ITEM 83 | regs[rd_index] = 0x0; 84 | mmu_write32_aligned(hartptr, rs1, rs2); 85 | // CLEAR ADDRESS RESERVATION 86 | reservations[idx].valid = 0; 87 | } else { 88 | regs[rd_index] = 0x1; 89 | } 90 | } 91 | 92 | static void 93 | amoxor_slowpath(struct hart * hartptr, uint8_t rs1_index, uint8_t rs2_index, 94 | uint8_t rd_index) 95 | { 96 | uint32_t * regs = (uint32_t *)&hartptr->registers; 97 | uint32_t rs1 = regs[rs1_index]; 98 | uint32_t rs2 = regs[rs2_index]; 99 | regs[rd_index]= mmu_read32_aligned(hartptr, rs1); 100 | mmu_write32_aligned(hartptr, rs1, regs[rd_index] ^ rs2); 101 | } 102 | 103 | static void 104 | amoor_slowpath(struct hart * hartptr, uint8_t rs1_index, uint8_t rs2_index, 105 | uint8_t rd_index) 106 | { 107 | uint32_t * regs = (uint32_t *)&hartptr->registers; 108 | uint32_t rs1 = regs[rs1_index]; 109 | uint32_t rs2 = regs[rs2_index]; 110 | regs[rd_index]= mmu_read32_aligned(hartptr, rs1); 111 | mmu_write32_aligned(hartptr, rs1, regs[rd_index] | rs2); 112 | } 113 | 114 | static void 115 | amoand_slowpath(struct hart * hartptr, uint8_t rs1_index, uint8_t rs2_index, 116 | uint8_t rd_index) 117 | { 118 | uint32_t * regs = (uint32_t *)&hartptr->registers; 119 | uint32_t rs1 = regs[rs1_index]; 120 | uint32_t rs2 = regs[rs2_index]; 121 | regs[rd_index]= mmu_read32_aligned(hartptr, rs1); 122 | mmu_write32_aligned(hartptr, rs1, regs[rd_index] & rs2); 123 | } 124 | 125 | void 126 | amo_instruction_slowpath(struct hart * hartptr, uint8_t rs1_index, 127 | uint8_t rs2_index, uint8_t rd_index, uint32_t funct5) 128 | { 129 | #define _(funct5, func) \ 130 | case funct5: \ 131 | func(hartptr, rs1_index, rs2_index, rd_index); \ 132 | break; 133 | switch(funct5) 134 | { 135 | _(0x0, amoadd_slowpath); 136 | _(0x1, amoswap_slowpath); 137 | _(0x2, lr_slowpath); 138 | _(0x3, sc_slowpath); 139 | _(0x4, amoxor_slowpath); 140 | _(0x8, amoor_slowpath); 141 | _(0xc, amoand_slowpath); 142 | default: 143 | __not_reach(); 144 | break; 145 | } 146 | #undef _ 147 | } 148 | static void 149 | riscv_amo_translator(struct decoding * dec, struct prefetch_blob * blob, 150 | uint32_t instruction) 151 | { 152 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 153 | struct hart * hartptr = (struct hart *)blob->opaque; 154 | PRECHECK_TRANSLATION_CACHE(amo_instruction, blob); 155 | BEGIN_TRANSLATION(amo_instruction); 156 | __asm__ volatile( 157 | // RDI: hartptr 158 | // RSI: rs1_index, 159 | // RDX: rs2_index, 160 | // RCX: rd_index 161 | // R8: funct5 162 | "movq %%r12, %%rdi;" 163 | "movl "PIC_PARAM(0)", %%esi;" 164 | "movl "PIC_PARAM(1)", %%edx;" 165 | "movl "PIC_PARAM(2)", %%ecx;" 166 | "movl "PIC_PARAM(3)", %%r8d;" 167 | "movq $amo_instruction_slowpath, %%rax;" 168 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 169 | "call *%%rax;" 170 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 171 | RESET_ZERO_REGISTER() 172 | PROCEED_TO_NEXT_INSTRUCTION() 173 | END_INSTRUCTION(amo_instruction) 174 | : 175 | : 176 | :"memory"); 177 | BEGIN_PARAM_SCHEMA() 178 | PARAM32() /*rs1 index*/ 179 | PARAM32() /*rs2 index*/ 180 | PARAM32() /*rd index*/ 181 | PARAM32() /*funct5*/ 182 | END_PARAM_SCHEMA() 183 | END_TRANSLATION(amo_instruction); 184 | BEGIN_PARAM(amo_instruction) 185 | dec->rs1_index, 186 | dec->rs2_index, 187 | dec->rd_index, 188 | dec->funct7 >> 2 189 | END_PARAM() 190 | COMMIT_TRANSLATION(amo_instruction, hartptr, instruction_linear_address); 191 | blob->next_instruction_to_fetch += 4; 192 | } 193 | 194 | static instruction_sub_translator per_funct5_handlers[32]; 195 | 196 | void 197 | riscv_amo_instructions_translation_entry(struct prefetch_blob * blob, 198 | uint32_t instruction) 199 | { 200 | struct decoding dec; 201 | instruction_decoding_per_type(&dec, instruction, ENCODING_TYPE_R); 202 | uint8_t funct5 = dec.funct7 >> 2; 203 | if (!per_funct5_handlers[funct5]) { 204 | printf("instruction_linear_address:%x\n", blob->next_instruction_to_fetch); 205 | } 206 | ASSERT(per_funct5_handlers[funct5]); 207 | per_funct5_handlers[funct5](&dec, blob, instruction); 208 | } 209 | 210 | 211 | __attribute__((constructor)) static void 212 | amo_constructor(void) 213 | { 214 | memset(reservations, 0x0, sizeof(reservations)); 215 | memset(per_funct5_handlers, 0x0, sizeof(per_funct5_handlers)); 216 | per_funct5_handlers[0] = riscv_amo_translator; //amoadd.w 217 | per_funct5_handlers[0x1] = riscv_amo_translator; // amoswap.w 218 | per_funct5_handlers[0x2] = riscv_amo_translator; // lr.w 219 | per_funct5_handlers[0x3] = riscv_amo_translator; // sc.w 220 | per_funct5_handlers[0x4] = riscv_amo_translator; //amoxor.w 221 | per_funct5_handlers[0x8] = riscv_amo_translator; // amoor.w 222 | per_funct5_handlers[0xc] = riscv_amo_translator; // amoand.w 223 | } 224 | -------------------------------------------------------------------------------- /vmm/hart.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | struct csr_registery_entry * csr_registery_head = NULL; 17 | 18 | uint64_t offset_of_vmm_stack = offsetof(struct hart, vmm_stack_ptr); 19 | 20 | static void 21 | csr_registery_init(struct hart * hartptr) 22 | { 23 | struct csr_registery_entry * ptr = csr_registery_head; 24 | for (; ptr; ptr = ptr->next) { 25 | ASSERT(!(ptr->csr_addr & 0xfffff000)); 26 | struct csr_entry * csr = 27 | &((struct csr_entry *)hartptr->csrs_base)[ptr->csr_addr & 0xfff]; 28 | csr->is_valid = 1; 29 | csr->csr_addr = ptr->csr_addr & 0xfff; 30 | csr->csr_blob = 0; 31 | csr->wpri_mask = ptr->csr_registery.wpri_mask; 32 | csr->write = ptr->csr_registery.write; 33 | csr->read = ptr->csr_registery.read; 34 | csr->reset = ptr->csr_registery.reset; 35 | if (csr->reset) { 36 | csr->reset(hartptr, csr); 37 | } 38 | } 39 | 40 | } 41 | void 42 | hart_init(struct hart * hart_instance, int hart_id) 43 | { 44 | memset(hart_instance, 0x0, sizeof(struct hart)); 45 | hart_instance->hart_id = hart_id; 46 | hart_instance->hart_magic = HART_MAGIC_WORD; 47 | // mprotect requires the memory is obtained by mmap, or its behavior is 48 | // undefined. 49 | uint64_t tc_base = (uint64_t)mmap(NULL, TRANSLATION_CACHE_SIZE + 4096, 50 | PROT_READ | PROT_WRITE | PROT_EXEC, 51 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 52 | tc_base &= ~4095; 53 | hart_instance->translation_cache = (void *)tc_base; 54 | ASSERT(hart_instance->translation_cache); 55 | 56 | hart_instance->pc_mappings = 57 | aligned_alloc(4096, MAX_INSTRUCTIONS_TOTRANSLATE * 58 | sizeof(struct program_counter_mapping_item)); 59 | ASSERT(hart_instance->pc_mappings); 60 | flush_translation_cache(hart_instance); 61 | 62 | // grant exec privilege to the translation cache 63 | ASSERT(!mprotect(hart_instance->translation_cache, TRANSLATION_CACHE_SIZE, 64 | PROT_EXEC | PROT_READ | PROT_WRITE)); 65 | 66 | uint64_t vmm_stack = 67 | (uint64_t)mmap(NULL, VMM_STACK_SIZE + 4096, PROT_READ | PROT_WRITE, 68 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 69 | vmm_stack &= ~4095; 70 | hart_instance->vmm_stack_ptr = (void *)(vmm_stack + VMM_STACK_SIZE); 71 | ASSERT(hart_instance->vmm_stack_ptr); 72 | // CSR INIT 73 | hart_instance->csrs_base = 74 | aligned_alloc(4096, 4096 * sizeof(struct csr_entry)); 75 | ASSERT(hart_instance->csrs_base); 76 | memset(hart_instance->csrs_base, 0x0, 4096 * sizeof(struct csr_entry)); 77 | csr_registery_init(hart_instance); 78 | 79 | // HART initialized as M-MODE 80 | hart_instance->privilege_level = PRIVILEGE_LEVEL_MACHINE; 81 | } 82 | 83 | 84 | void 85 | flush_translation_cache(struct hart * hart_instance) 86 | { 87 | hart_instance->nr_translated_instructions = 0; 88 | hart_instance->translation_cache_ptr = 0; 89 | #if defined(DEBUG_TRACE) 90 | log_trace("flush translation cache hartid:%d\n", 91 | hart_instance->hart_id); 92 | #endif 93 | } 94 | 95 | #if 0 96 | static int 97 | comparing_mapping_item(const void *a, const void * b) 98 | { 99 | const struct program_counter_mapping_item * item_a = a; 100 | const struct program_counter_mapping_item * item_b = b; 101 | return item_a->guest_pc - item_b->guest_pc; 102 | } 103 | #else 104 | // MACRO is much more quick 105 | #define comparing_mapping_item(pa, pb)({ \ 106 | (int)((pa)->guest_pc - (pb)->guest_pc); \ 107 | }) 108 | #endif 109 | // @return zero upon success, otherwise, non-zero is returned 110 | int 111 | add_translation_item(struct hart * hart_instance, 112 | uint32_t guest_instruction_address, 113 | const void * translation_instruction_block, 114 | int instruction_block_length) 115 | { 116 | if (unoccupied_cache_size(hart_instance) < instruction_block_length) { 117 | // No enough room for newly translated block, give up. 118 | return -1; 119 | } 120 | if (hart_instance->nr_translated_instructions >= MAX_INSTRUCTIONS_TOTRANSLATE) { 121 | // No enough room in the mapping array 122 | return -2; 123 | } 124 | uint32_t tc_offset = hart_instance->translation_cache_ptr; 125 | memcpy(hart_instance->translation_cache + hart_instance->translation_cache_ptr, 126 | translation_instruction_block, instruction_block_length); 127 | hart_instance->translation_cache_ptr += instruction_block_length; 128 | 129 | // insert one mapping entry into per-hart pc mapping array 130 | struct program_counter_mapping_item * mappings = hart_instance->pc_mappings; 131 | mappings[hart_instance->nr_translated_instructions].guest_pc = 132 | guest_instruction_address; 133 | mappings[hart_instance->nr_translated_instructions].tc_offset = tc_offset; 134 | hart_instance->nr_translated_instructions ++; 135 | 136 | INSERTION_SORT(struct program_counter_mapping_item, 137 | hart_instance->pc_mappings, 138 | hart_instance->nr_translated_instructions, 139 | comparing_mapping_item); 140 | 141 | #if 0 142 | // FIXED: Do not use glibc qsort which heavily depends system allocated stack. 143 | qsort(hart_instance->pc_mappings, hart_instance->nr_translated_instructions, 144 | sizeof(struct program_counter_mapping_item), comparing_mapping_item); 145 | #endif 146 | return 0; 147 | } 148 | 149 | struct program_counter_mapping_item * 150 | search_translation_item(struct hart * hart_instance, 151 | uint32_t guest_instruction_address) 152 | { 153 | struct program_counter_mapping_item key = { 154 | .guest_pc = guest_instruction_address 155 | }; 156 | return SEARCH(struct program_counter_mapping_item, 157 | hart_instance->pc_mappings, 158 | hart_instance->nr_translated_instructions, 159 | comparing_mapping_item, 160 | &key); 161 | #if 0 162 | return bsearch(&key, hart_instance->pc_mappings, 163 | hart_instance->nr_translated_instructions, 164 | sizeof(struct program_counter_mapping_item), 165 | comparing_mapping_item); 166 | #endif 167 | } 168 | 169 | void 170 | dump_hart(struct hart * hartptr) 171 | { 172 | const char * regs_abi_names[] = { 173 | "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", 174 | "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", 175 | "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", 176 | "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6" 177 | }; 178 | printf("dump hart:%p\n", hartptr); 179 | printf("\thart-id: %d\n", hartptr->hart_id); 180 | printf("\tpc: 0x%x\n", hartptr->pc); 181 | int index = 0; 182 | uint32_t * regs = (uint32_t *)&hartptr->registers; 183 | for (index = 0; index < 32; index++) { 184 | printf("\t"); 185 | printf("X%02d(%-4s): 0x%08x ", index, regs_abi_names[index], 186 | regs[index]); 187 | if (((index + 1) % 4) == 0) { 188 | printf("\n"); 189 | } 190 | } 191 | printf("\thart control and status:\n"); 192 | printf("\tprivilege level:%x\n", hartptr->privilege_level); 193 | printf("\tstatus: uie:%d sie:%d mie:%d upie:%d spie:%d mpie:%d spp:%x, mpp:%x\n", 194 | hartptr->status.uie, hartptr->status.sie, hartptr->status.mie, 195 | hartptr->status.upie, hartptr->status.spie, hartptr->status.mpie, 196 | hartptr->status.spp, hartptr->status.mpp); 197 | printf("\tinterrupt pending: 0x%08x\n", hartptr->ipending.dword); 198 | printf("\tinterrupt enable: 0x%08x\n", hartptr->ienable.dword); 199 | printf("\tinterrupt delegation: 0x%08x\n", hartptr->idelegation.dword); 200 | struct csr_entry * csr_medeleg = 201 | &((struct csr_entry *)hartptr->csrs_base)[CSR_ADDRESS_MEDELEG]; 202 | printf("\tmachine exception delegation: 0x%08x\n", csr_medeleg->csr_blob); 203 | } 204 | 205 | 206 | void 207 | dump_translation_cache(struct hart *hartptr) 208 | { 209 | int index = 0; 210 | printf("hart:%d has %d items in translation cache:\n", hartptr->hart_id, 211 | hartptr->nr_translated_instructions); 212 | struct program_counter_mapping_item * items = hartptr->pc_mappings; 213 | for (index = 0; index < hartptr->nr_translated_instructions; index++) { 214 | printf("\t0x%08x: %p ", items[index].guest_pc, 215 | (hartptr->translation_cache + items[index].tc_offset)); 216 | if (((index + 1) % 4) == 0) { 217 | printf("\n"); 218 | } 219 | } 220 | printf("\n"); 221 | } 222 | 223 | 224 | __attribute__((constructor)) static void 225 | misc_init(void) 226 | { 227 | ASSERT(sizeof(union interrupt_control_blob) == 4); 228 | union interrupt_control_blob blob = { 229 | .dword = 0 230 | }; 231 | blob.bits.usi = 1; 232 | blob.bits.uti = 1; 233 | blob.bits.uei = 1; 234 | blob.bits.mei = 1; 235 | ASSERT(blob.dword == 0x911); 236 | 237 | } 238 | 239 | -------------------------------------------------------------------------------- /bootrom/sha1.c: -------------------------------------------------------------------------------- 1 | /* 2 | SHA-1 in C 3 | By Steve Reid 4 | 100% Public Domain 5 | 6 | Test Vectors (from FIPS PUB 180-1) 7 | "abc" 8 | A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D 9 | "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" 10 | 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1 11 | A million repetitions of "a" 12 | 34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F 13 | */ 14 | 15 | /* #define LITTLE_ENDIAN * This should be #define'd already, if true. */ 16 | /* #define SHA1HANDSOFF * Copies data before messing with it. */ 17 | 18 | #define SHA1HANDSOFF 19 | #include 20 | #include 21 | //#include 22 | static inline void 23 | memcpy(void * dst, const void *src, int size) 24 | { 25 | uint8_t *pdst = (uint8_t *)dst; 26 | const uint8_t *psrc = (const uint8_t *)src; 27 | int idx = 0; 28 | for (idx = 0; idx < size; idx++) { 29 | pdst[idx] = psrc[idx]; 30 | } 31 | } 32 | 33 | static inline void 34 | memset(void * target, uint8_t val, int size) 35 | { 36 | uint8_t *pdst = (uint8_t *)target; 37 | int idx = 0; 38 | for (idx = 0; idx < size; idx++) { 39 | pdst[idx] = val; 40 | } 41 | } 42 | /* for uint32_t */ 43 | #include 44 | #include "sha1.h" 45 | 46 | 47 | #define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) 48 | 49 | /* blk0() and blk() perform the initial expand. */ 50 | /* I got the idea of expanding during the round function from SSLeay */ 51 | #if BYTE_ORDER == LITTLE_ENDIAN 52 | #define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \ 53 | |(rol(block->l[i],8)&0x00FF00FF)) 54 | #elif BYTE_ORDER == BIG_ENDIAN 55 | #define blk0(i) block->l[i] 56 | #else 57 | #error "Endianness not defined!" 58 | #endif 59 | #define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \ 60 | ^block->l[(i+2)&15]^block->l[i&15],1)) 61 | 62 | /* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */ 63 | #define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30); 64 | #define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30); 65 | #define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30); 66 | #define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30); 67 | #define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30); 68 | 69 | 70 | /* Hash a single 512-bit block. This is the core of the algorithm. */ 71 | 72 | void SHA1Transform( 73 | uint32_t state[5], 74 | const unsigned char buffer[64] 75 | ) 76 | { 77 | uint32_t a, b, c, d, e; 78 | 79 | typedef union 80 | { 81 | unsigned char c[64]; 82 | uint32_t l[16]; 83 | } CHAR64LONG16; 84 | 85 | #ifdef SHA1HANDSOFF 86 | CHAR64LONG16 block[1]; /* use array to appear as a pointer */ 87 | 88 | memcpy(block, buffer, 64); 89 | #else 90 | /* The following had better never be used because it causes the 91 | * pointer-to-const buffer to be cast into a pointer to non-const. 92 | * And the result is written through. I threw a "const" in, hoping 93 | * this will cause a diagnostic. 94 | */ 95 | CHAR64LONG16 *block = (const CHAR64LONG16 *) buffer; 96 | #endif 97 | /* Copy context->state[] to working vars */ 98 | a = state[0]; 99 | b = state[1]; 100 | c = state[2]; 101 | d = state[3]; 102 | e = state[4]; 103 | /* 4 rounds of 20 operations each. Loop unrolled. */ 104 | R0(a, b, c, d, e, 0); 105 | R0(e, a, b, c, d, 1); 106 | R0(d, e, a, b, c, 2); 107 | R0(c, d, e, a, b, 3); 108 | R0(b, c, d, e, a, 4); 109 | R0(a, b, c, d, e, 5); 110 | R0(e, a, b, c, d, 6); 111 | R0(d, e, a, b, c, 7); 112 | R0(c, d, e, a, b, 8); 113 | R0(b, c, d, e, a, 9); 114 | R0(a, b, c, d, e, 10); 115 | R0(e, a, b, c, d, 11); 116 | R0(d, e, a, b, c, 12); 117 | R0(c, d, e, a, b, 13); 118 | R0(b, c, d, e, a, 14); 119 | R0(a, b, c, d, e, 15); 120 | R1(e, a, b, c, d, 16); 121 | R1(d, e, a, b, c, 17); 122 | R1(c, d, e, a, b, 18); 123 | R1(b, c, d, e, a, 19); 124 | R2(a, b, c, d, e, 20); 125 | R2(e, a, b, c, d, 21); 126 | R2(d, e, a, b, c, 22); 127 | R2(c, d, e, a, b, 23); 128 | R2(b, c, d, e, a, 24); 129 | R2(a, b, c, d, e, 25); 130 | R2(e, a, b, c, d, 26); 131 | R2(d, e, a, b, c, 27); 132 | R2(c, d, e, a, b, 28); 133 | R2(b, c, d, e, a, 29); 134 | R2(a, b, c, d, e, 30); 135 | R2(e, a, b, c, d, 31); 136 | R2(d, e, a, b, c, 32); 137 | R2(c, d, e, a, b, 33); 138 | R2(b, c, d, e, a, 34); 139 | R2(a, b, c, d, e, 35); 140 | R2(e, a, b, c, d, 36); 141 | R2(d, e, a, b, c, 37); 142 | R2(c, d, e, a, b, 38); 143 | R2(b, c, d, e, a, 39); 144 | R3(a, b, c, d, e, 40); 145 | R3(e, a, b, c, d, 41); 146 | R3(d, e, a, b, c, 42); 147 | R3(c, d, e, a, b, 43); 148 | R3(b, c, d, e, a, 44); 149 | R3(a, b, c, d, e, 45); 150 | R3(e, a, b, c, d, 46); 151 | R3(d, e, a, b, c, 47); 152 | R3(c, d, e, a, b, 48); 153 | R3(b, c, d, e, a, 49); 154 | R3(a, b, c, d, e, 50); 155 | R3(e, a, b, c, d, 51); 156 | R3(d, e, a, b, c, 52); 157 | R3(c, d, e, a, b, 53); 158 | R3(b, c, d, e, a, 54); 159 | R3(a, b, c, d, e, 55); 160 | R3(e, a, b, c, d, 56); 161 | R3(d, e, a, b, c, 57); 162 | R3(c, d, e, a, b, 58); 163 | R3(b, c, d, e, a, 59); 164 | R4(a, b, c, d, e, 60); 165 | R4(e, a, b, c, d, 61); 166 | R4(d, e, a, b, c, 62); 167 | R4(c, d, e, a, b, 63); 168 | R4(b, c, d, e, a, 64); 169 | R4(a, b, c, d, e, 65); 170 | R4(e, a, b, c, d, 66); 171 | R4(d, e, a, b, c, 67); 172 | R4(c, d, e, a, b, 68); 173 | R4(b, c, d, e, a, 69); 174 | R4(a, b, c, d, e, 70); 175 | R4(e, a, b, c, d, 71); 176 | R4(d, e, a, b, c, 72); 177 | R4(c, d, e, a, b, 73); 178 | R4(b, c, d, e, a, 74); 179 | R4(a, b, c, d, e, 75); 180 | R4(e, a, b, c, d, 76); 181 | R4(d, e, a, b, c, 77); 182 | R4(c, d, e, a, b, 78); 183 | R4(b, c, d, e, a, 79); 184 | /* Add the working vars back into context.state[] */ 185 | state[0] += a; 186 | state[1] += b; 187 | state[2] += c; 188 | state[3] += d; 189 | state[4] += e; 190 | /* Wipe variables */ 191 | a = b = c = d = e = 0; 192 | #ifdef SHA1HANDSOFF 193 | memset(block, '\0', sizeof(block)); 194 | #endif 195 | } 196 | 197 | 198 | /* SHA1Init - Initialize new context */ 199 | 200 | void SHA1Init( 201 | SHA1_CTX * context 202 | ) 203 | { 204 | /* SHA1 initialization constants */ 205 | context->state[0] = 0x67452301; 206 | context->state[1] = 0xEFCDAB89; 207 | context->state[2] = 0x98BADCFE; 208 | context->state[3] = 0x10325476; 209 | context->state[4] = 0xC3D2E1F0; 210 | context->count[0] = context->count[1] = 0; 211 | } 212 | 213 | 214 | /* Run your data through this. */ 215 | 216 | void SHA1Update( 217 | SHA1_CTX * context, 218 | const unsigned char *data, 219 | uint32_t len 220 | ) 221 | { 222 | uint32_t i; 223 | 224 | uint32_t j; 225 | 226 | j = context->count[0]; 227 | if ((context->count[0] += len << 3) < j) 228 | context->count[1]++; 229 | context->count[1] += (len >> 29); 230 | j = (j >> 3) & 63; 231 | if ((j + len) > 63) 232 | { 233 | memcpy(&context->buffer[j], data, (i = 64 - j)); 234 | SHA1Transform(context->state, context->buffer); 235 | for (; i + 63 < len; i += 64) 236 | { 237 | SHA1Transform(context->state, &data[i]); 238 | } 239 | j = 0; 240 | } 241 | else 242 | i = 0; 243 | memcpy(&context->buffer[j], &data[i], len - i); 244 | } 245 | 246 | 247 | /* Add padding and return the message digest. */ 248 | 249 | void SHA1Final( 250 | unsigned char digest[20], 251 | SHA1_CTX * context 252 | ) 253 | { 254 | unsigned i; 255 | 256 | unsigned char finalcount[8]; 257 | 258 | unsigned char c; 259 | 260 | #if 0 /* untested "improvement" by DHR */ 261 | /* Convert context->count to a sequence of bytes 262 | * in finalcount. Second element first, but 263 | * big-endian order within element. 264 | * But we do it all backwards. 265 | */ 266 | unsigned char *fcp = &finalcount[8]; 267 | 268 | for (i = 0; i < 2; i++) 269 | { 270 | uint32_t t = context->count[i]; 271 | 272 | int j; 273 | 274 | for (j = 0; j < 4; t >>= 8, j++) 275 | *--fcp = (unsigned char) t} 276 | #else 277 | for (i = 0; i < 8; i++) 278 | { 279 | finalcount[i] = (unsigned char) ((context->count[(i >= 4 ? 0 : 1)] >> ((3 - (i & 3)) * 8)) & 255); /* Endian independent */ 280 | } 281 | #endif 282 | c = 0200; 283 | SHA1Update(context, &c, 1); 284 | while ((context->count[0] & 504) != 448) 285 | { 286 | c = 0000; 287 | SHA1Update(context, &c, 1); 288 | } 289 | SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */ 290 | for (i = 0; i < 20; i++) 291 | { 292 | digest[i] = (unsigned char) 293 | ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255); 294 | } 295 | /* Wipe variables */ 296 | memset(context, '\0', sizeof(*context)); 297 | memset(&finalcount, '\0', sizeof(finalcount)); 298 | } 299 | 300 | void SHA1( 301 | char *hash_out, 302 | const char *str, 303 | int len) 304 | { 305 | SHA1_CTX ctx; 306 | unsigned int ii; 307 | 308 | SHA1Init(&ctx); 309 | for (ii=0; ii 6 | #include 7 | #include 8 | #include 9 | 10 | static void 11 | riscv_lb_translator(struct decoding * dec, struct prefetch_blob * blob, 12 | uint32_t instruction) 13 | 14 | { 15 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 16 | struct hart * hartptr = (struct hart *)blob->opaque; 17 | int32_t signed_offset = sign_extend32(dec->imm, 11); 18 | 19 | PRECHECK_TRANSLATION_CACHE(lb_instruction, blob); 20 | BEGIN_TRANSLATION(lb_instruction); 21 | __asm__ volatile("movl "PIC_PARAM(1)", %%edx;" 22 | "shl $2, %%edx;" 23 | "addq %%r15, %%rdx;" 24 | "movl (%%rdx), %%esi;" 25 | "movl "PIC_PARAM(0)", %%edx;" 26 | "addl %%edx, %%esi;" // ESI: the memory location 27 | "movq %%r12, %%rdi;" 28 | "movq $mmu_read8, %%rax;" 29 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 30 | "call *%%rax;" // EAX: the memory read from the location 31 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 32 | "movl "PIC_PARAM(2)", %%edx;" 33 | "shl $2, %%edx;" 34 | "addq %%r15, %%rdx;" 35 | "movsbl %%al, %%eax;" 36 | "movl %%eax, (%%rdx);" 37 | RESET_ZERO_REGISTER() 38 | PROCEED_TO_NEXT_INSTRUCTION() 39 | END_INSTRUCTION(lb_instruction) 40 | : 41 | : 42 | :"memory"); 43 | BEGIN_PARAM_SCHEMA() 44 | PARAM32() /*imm: signed offset*/ 45 | PARAM32() /*rs1_index*/ 46 | PARAM32() /*rd_index*/ 47 | END_PARAM_SCHEMA() 48 | END_TRANSLATION(lb_instruction); 49 | BEGIN_PARAM(lb_instruction) 50 | signed_offset, 51 | dec->rs1_index, 52 | dec->rd_index 53 | END_PARAM() 54 | COMMIT_TRANSLATION(lb_instruction, hartptr, instruction_linear_address); 55 | blob->next_instruction_to_fetch += 4; 56 | } 57 | 58 | static void 59 | riscv_lbu_translator(struct decoding * dec, struct prefetch_blob * blob, 60 | uint32_t instruction) 61 | 62 | { 63 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 64 | struct hart * hartptr = (struct hart *)blob->opaque; 65 | int32_t signed_offset = sign_extend32(dec->imm, 11); 66 | 67 | PRECHECK_TRANSLATION_CACHE(lbu_instruction, blob); 68 | BEGIN_TRANSLATION(lbu_instruction); 69 | __asm__ volatile("movl "PIC_PARAM(1)", %%edx;" 70 | "shl $2, %%edx;" 71 | "addq %%r15, %%rdx;" 72 | "movl (%%rdx), %%esi;" 73 | "movl "PIC_PARAM(0)", %%edx;" 74 | "addl %%edx, %%esi;" // ESI: the memory location 75 | "movq %%r12, %%rdi;" 76 | "movq $mmu_read8, %%rax;" 77 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 78 | "call *%%rax;" // EAX: the memory read from the location 79 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 80 | "movl "PIC_PARAM(2)", %%edx;" 81 | "shl $2, %%edx;" 82 | "addq %%r15, %%rdx;" 83 | "movzbl %%al, %%eax;" 84 | "movl %%eax, (%%rdx);" 85 | RESET_ZERO_REGISTER() 86 | PROCEED_TO_NEXT_INSTRUCTION() 87 | END_INSTRUCTION(lbu_instruction) 88 | : 89 | : 90 | :"memory"); 91 | BEGIN_PARAM_SCHEMA() 92 | PARAM32() /*imm: signed offset*/ 93 | PARAM32() /*rs1_index*/ 94 | PARAM32() /*rd_index*/ 95 | END_PARAM_SCHEMA() 96 | END_TRANSLATION(lbu_instruction); 97 | BEGIN_PARAM(lbu_instruction) 98 | signed_offset, 99 | dec->rs1_index, 100 | dec->rd_index 101 | END_PARAM() 102 | COMMIT_TRANSLATION(lbu_instruction, hartptr, instruction_linear_address); 103 | blob->next_instruction_to_fetch += 4; 104 | } 105 | 106 | 107 | static void 108 | riscv_lh_translator(struct decoding * dec, struct prefetch_blob * blob, 109 | uint32_t instruction) 110 | 111 | { 112 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 113 | struct hart * hartptr = (struct hart *)blob->opaque; 114 | int32_t signed_offset = sign_extend32(dec->imm, 11); 115 | 116 | PRECHECK_TRANSLATION_CACHE(lh_instruction, blob); 117 | BEGIN_TRANSLATION(lh_instruction); 118 | __asm__ volatile("movl "PIC_PARAM(1)", %%edx;" 119 | "shl $2, %%edx;" 120 | "addq %%r15, %%rdx;" 121 | "movl (%%rdx), %%esi;" 122 | "movl "PIC_PARAM(0)", %%edx;" 123 | "addl %%edx, %%esi;" // ESI: the memory location 124 | "movq %%r12, %%rdi;" 125 | "movq $mmu_read16, %%rax;" 126 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 127 | "call *%%rax;" // EAX: the memory read from the location 128 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 129 | "movl "PIC_PARAM(2)", %%edx;" 130 | "shl $2, %%edx;" 131 | "addq %%r15, %%rdx;" 132 | "movswl %%ax, %%eax;" 133 | "movl %%eax, (%%rdx);" 134 | RESET_ZERO_REGISTER() 135 | PROCEED_TO_NEXT_INSTRUCTION() 136 | END_INSTRUCTION(lh_instruction) 137 | : 138 | : 139 | :"memory"); 140 | BEGIN_PARAM_SCHEMA() 141 | PARAM32() /*imm: signed offset*/ 142 | PARAM32() /*rs1_index*/ 143 | PARAM32() /*rd_index*/ 144 | END_PARAM_SCHEMA() 145 | END_TRANSLATION(lh_instruction); 146 | BEGIN_PARAM(lh_instruction) 147 | signed_offset, 148 | dec->rs1_index, 149 | dec->rd_index 150 | END_PARAM() 151 | COMMIT_TRANSLATION(lh_instruction, hartptr, instruction_linear_address); 152 | blob->next_instruction_to_fetch += 4; 153 | } 154 | 155 | static void 156 | riscv_lhu_translator(struct decoding * dec, struct prefetch_blob * blob, 157 | uint32_t instruction) 158 | 159 | { 160 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 161 | struct hart * hartptr = (struct hart *)blob->opaque; 162 | int32_t signed_offset = sign_extend32(dec->imm, 11); 163 | 164 | PRECHECK_TRANSLATION_CACHE(lhu_instruction, blob); 165 | BEGIN_TRANSLATION(lhu_instruction); 166 | __asm__ volatile("movl "PIC_PARAM(1)", %%edx;" 167 | "shl $2, %%edx;" 168 | "addq %%r15, %%rdx;" 169 | "movl (%%rdx), %%esi;" 170 | "movl "PIC_PARAM(0)", %%edx;" 171 | "addl %%edx, %%esi;" // ESI: the memory location 172 | "movq %%r12, %%rdi;" 173 | "movq $mmu_read16, %%rax;" 174 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 175 | "call *%%rax;" // EAX: the memory read from the location 176 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 177 | "movl "PIC_PARAM(2)", %%edx;" 178 | "shl $2, %%edx;" 179 | "addq %%r15, %%rdx;" 180 | "movzwl %%ax, %%eax;" 181 | "movl %%eax, (%%rdx);" 182 | RESET_ZERO_REGISTER() 183 | PROCEED_TO_NEXT_INSTRUCTION() 184 | END_INSTRUCTION(lhu_instruction) 185 | : 186 | : 187 | :"memory"); 188 | BEGIN_PARAM_SCHEMA() 189 | PARAM32() /*imm: signed offset*/ 190 | PARAM32() /*rs1_index*/ 191 | PARAM32() /*rd_index*/ 192 | END_PARAM_SCHEMA() 193 | END_TRANSLATION(lhu_instruction); 194 | BEGIN_PARAM(lhu_instruction) 195 | signed_offset, 196 | dec->rs1_index, 197 | dec->rd_index 198 | END_PARAM() 199 | COMMIT_TRANSLATION(lhu_instruction, hartptr, instruction_linear_address); 200 | blob->next_instruction_to_fetch += 4; 201 | } 202 | 203 | static void 204 | riscv_lw_translator(struct decoding * dec, struct prefetch_blob * blob, 205 | uint32_t instruction) 206 | 207 | { 208 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 209 | struct hart * hartptr = (struct hart *)blob->opaque; 210 | int32_t signed_offset = sign_extend32(dec->imm, 11); 211 | 212 | PRECHECK_TRANSLATION_CACHE(lw_instruction, blob); 213 | BEGIN_TRANSLATION(lw_instruction); 214 | __asm__ volatile("movl "PIC_PARAM(1)", %%edx;" 215 | "shl $2, %%edx;" 216 | "addq %%r15, %%rdx;" 217 | "movl (%%rdx), %%esi;" 218 | "movl "PIC_PARAM(0)", %%edx;" 219 | "addl %%edx, %%esi;" // ESI: the memory location 220 | "movq %%r12, %%rdi;" 221 | "movq $mmu_read32, %%rax;" 222 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 223 | "call *%%rax;" // EAX: the memory read from the location 224 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 225 | "movl "PIC_PARAM(2)", %%edx;" 226 | "shl $2, %%edx;" 227 | "addq %%r15, %%rdx;" 228 | "movl %%eax, (%%rdx);" 229 | RESET_ZERO_REGISTER() 230 | PROCEED_TO_NEXT_INSTRUCTION() 231 | END_INSTRUCTION(lw_instruction) 232 | : 233 | : 234 | :"memory"); 235 | BEGIN_PARAM_SCHEMA() 236 | PARAM32() /*imm: signed offset*/ 237 | PARAM32() /*rs1_index*/ 238 | PARAM32() /*rd_index*/ 239 | END_PARAM_SCHEMA() 240 | END_TRANSLATION(lw_instruction); 241 | BEGIN_PARAM(lw_instruction) 242 | signed_offset, 243 | dec->rs1_index, 244 | dec->rd_index 245 | END_PARAM() 246 | COMMIT_TRANSLATION(lw_instruction, hartptr, instruction_linear_address); 247 | blob->next_instruction_to_fetch += 4; 248 | } 249 | 250 | static instruction_sub_translator per_funct3_handlers[8]; 251 | 252 | void 253 | riscv_memory_load_instructions_translation_entry(struct prefetch_blob * blob, 254 | uint32_t instruction) 255 | { 256 | struct decoding dec; 257 | instruction_decoding_per_type(&dec, instruction, ENCODING_TYPE_I); 258 | ASSERT(per_funct3_handlers[dec.funct3]); 259 | per_funct3_handlers[dec.funct3](&dec, blob, instruction); 260 | } 261 | 262 | 263 | __attribute__((constructor)) static void 264 | memory_load_constructor(void) 265 | { 266 | memset(per_funct3_handlers, 0x0, sizeof(per_funct3_handlers)); 267 | per_funct3_handlers[2] = riscv_lw_translator; 268 | per_funct3_handlers[1] = riscv_lh_translator; 269 | per_funct3_handlers[5] = riscv_lhu_translator; 270 | per_funct3_handlers[0] = riscv_lb_translator; 271 | per_funct3_handlers[4] = riscv_lbu_translator; 272 | } 273 | -------------------------------------------------------------------------------- /vmm/csr_supervisor_level.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Jie Zheng 3 | */ 4 | #include 5 | 6 | 7 | static void 8 | csr_scounteren_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 9 | { 10 | csr->csr_blob = value; 11 | log_trace("hart id:%d pc:%08x, csr:scounteren write 0x:%x\n", 12 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 13 | } 14 | 15 | static uint32_t 16 | csr_scounteren_read(struct hart *hartptr, struct csr_entry *csr) 17 | { 18 | log_trace("hart id:%d pc:%08x, csr:scounteren read:0x%x\n", 19 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 20 | return csr->csr_blob; 21 | } 22 | 23 | static void 24 | csr_scounteren_reset(struct hart *hartptr, struct csr_entry * csr) 25 | { 26 | csr->csr_blob = 0x0; 27 | } 28 | 29 | static struct csr_registery_entry scounteren_csr_entry = { 30 | .csr_addr = 0x106, 31 | .csr_registery = { 32 | .wpri_mask = WPRI_MASK_ALL, 33 | .reset = csr_scounteren_reset, 34 | .read = csr_scounteren_read, 35 | .write = csr_scounteren_write 36 | } 37 | }; 38 | 39 | 40 | 41 | #include 42 | #include 43 | 44 | static void 45 | csr_satp_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 46 | { 47 | //uint32_t old_blob = csr->csr_blob; 48 | csr->csr_blob = value; 49 | log_trace("hart id:%d pc:%08x, csr:satp write 0x:%x\n", 50 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 51 | // UPDATE: The PC is not changed, later we have to raise an exception 52 | // if it fails to translate the address. 53 | // THE PC IS CHANGED TO ITS VIRTUAL ADDRESS 54 | #if 0 55 | if (csr->csr_blob & 0x80000000 && (!(old_blob & 0x80000000))) { 56 | uint32_t va = 0; 57 | int rc = pa_to_va(hartptr, hartptr->pc, hartptr->itlb,hartptr->itlb_cap, 58 | &va); 59 | ASSERT(!rc); 60 | hartptr->pc = va; 61 | } 62 | #endif 63 | invalidate_tlb(hartptr->itlb, hartptr->itlb_cap); 64 | invalidate_tlb(hartptr->dtlb, hartptr->dtlb_cap); 65 | flush_translation_cache(hartptr); 66 | } 67 | 68 | static uint32_t 69 | csr_satp_read(struct hart *hartptr, struct csr_entry *csr) 70 | { 71 | log_trace("hart id:%d pc:%08x, csr:satp read:0x%x\n", 72 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 73 | return csr->csr_blob; 74 | } 75 | 76 | static void 77 | csr_satp_reset(struct hart *hartptr, struct csr_entry * csr) 78 | { 79 | csr->csr_blob = 0x0; 80 | } 81 | 82 | static struct csr_registery_entry satp_csr_entry = { 83 | .csr_addr = CSR_ADDRESS_SATP, 84 | .csr_registery = { 85 | .wpri_mask = WPRI_MASK_ALL, 86 | .reset = csr_satp_reset, 87 | .read = csr_satp_read, 88 | .write = csr_satp_write 89 | } 90 | }; 91 | 92 | 93 | 94 | static void 95 | csr_sie_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 96 | { 97 | hartptr->ienable.bits.ssi = (value >> 1) & 0x1; 98 | hartptr->ienable.bits.sti = (value >> 5) & 0x1; 99 | hartptr->ienable.bits.sei = (value >> 9) & 0x1; 100 | log_trace("hart id:%d pc:%08x, csr:sie write 0x:%x\n", 101 | hartptr->hart_id, hartptr->pc, value); 102 | } 103 | 104 | static uint32_t 105 | csr_sie_read(struct hart *hartptr, struct csr_entry *csr) 106 | { 107 | uint32_t blob = 0; 108 | blob |= (uint32_t)(hartptr->ienable.bits.usi) << 0; 109 | blob |= (uint32_t)(hartptr->ienable.bits.ssi) << 1; 110 | blob |= (uint32_t)(hartptr->ienable.bits.uti) << 4; 111 | blob |= (uint32_t)(hartptr->ienable.bits.sti) << 5; 112 | blob |= (uint32_t)(hartptr->ienable.bits.uei) << 8; 113 | blob |= (uint32_t)(hartptr->ienable.bits.sei) << 9; 114 | log_trace("hart id:%d pc:%08x, csr:sie read:0x%x\n", 115 | hartptr->hart_id, hartptr->pc, blob); 116 | return blob; 117 | } 118 | 119 | 120 | static struct csr_registery_entry sie_csr_entry = { 121 | .csr_addr = CSR_ADDRESS_SIE, 122 | .csr_registery = { 123 | .wpri_mask = 0x00000222, 124 | .read = csr_sie_read, 125 | .write = csr_sie_write 126 | } 127 | }; 128 | 129 | 130 | static void 131 | csr_sip_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 132 | { 133 | // ONLY SSIP is writable. 134 | hartptr->ipending.bits.ssi = (value >> 1) & 0x1; 135 | log_trace("hart id:%d pc:%08x, csr:sip write 0x:%x\n", 136 | hartptr->hart_id, hartptr->pc, value); 137 | } 138 | 139 | static uint32_t 140 | csr_sip_read(struct hart *hartptr, struct csr_entry *csr) 141 | { 142 | uint32_t blob = 0; 143 | blob |= (uint32_t)(hartptr->ipending.bits.ssi) << 1; 144 | blob |= (uint32_t)(hartptr->ipending.bits.sti) << 5; 145 | blob |= (uint32_t)(hartptr->ipending.bits.sei) << 9; 146 | log_trace("hart id:%d pc:%08x, csr:sip read:0x%x\n", 147 | hartptr->hart_id, hartptr->pc, blob); 148 | return blob; 149 | } 150 | 151 | 152 | static struct csr_registery_entry sip_csr_entry = { 153 | .csr_addr = CSR_ADDRESS_SIP, 154 | .csr_registery = { 155 | .wpri_mask = 0x00000222, 156 | .read = csr_sip_read, 157 | .write = csr_sip_write 158 | } 159 | }; 160 | 161 | 162 | 163 | static void 164 | csr_sstatus_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 165 | { 166 | hartptr->status.uie = (value >> 0) & 0x1; 167 | hartptr->status.sie = (value >> 1) & 0x1; 168 | hartptr->status.upie = (value >> 4) & 0x1; 169 | hartptr->status.spie = (value >> 5) & 0x1; 170 | hartptr->status.spp = (value >> 8) & 0x1; 171 | log_trace("hart id:%d pc:%08x, csr:sstatus write 0x:%x\n", 172 | hartptr->hart_id, hartptr->pc, value); 173 | } 174 | 175 | static uint32_t 176 | csr_sstatus_read(struct hart *hartptr, struct csr_entry *csr) 177 | { 178 | uint32_t blob = 0; 179 | blob |= (uint32_t)(hartptr->status.uie) << 0; 180 | blob |= (uint32_t)(hartptr->status.sie) << 1; 181 | blob |= (uint32_t)(hartptr->status.upie) << 4; 182 | blob |= (uint32_t)(hartptr->status.spie) << 5; 183 | blob |= (uint32_t)(hartptr->status.spp) << 8; 184 | log_trace("hart id:%d pc:%08x, csr:sstatus read:0x%x\n", 185 | hartptr->hart_id, hartptr->pc, blob); 186 | return blob; 187 | } 188 | 189 | 190 | static struct csr_registery_entry sstatus_csr_entry = { 191 | .csr_addr = CSR_ADDRESS_SSTATUS, 192 | .csr_registery = { 193 | .wpri_mask = 0x00000133, 194 | .read = csr_sstatus_read, 195 | .write = csr_sstatus_write 196 | } 197 | }; 198 | 199 | static void 200 | csr_stvec_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 201 | { 202 | csr->csr_blob = value; 203 | log_trace("hart id:%d pc:%08x, csr:stvec write 0x:%x\n", 204 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 205 | } 206 | 207 | static uint32_t 208 | csr_stvec_read(struct hart *hartptr, struct csr_entry *csr) 209 | { 210 | log_trace("hart id:%d pc:%08x, csr:stvec read:0x%x\n", 211 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 212 | return csr->csr_blob; 213 | } 214 | 215 | static void 216 | csr_stvec_reset(struct hart *hartptr, struct csr_entry * csr) 217 | { 218 | csr->csr_blob = 0x0; 219 | } 220 | 221 | static struct csr_registery_entry stvec_csr_entry = { 222 | .csr_addr = CSR_ADDRESS_STVEC, 223 | .csr_registery = { 224 | .wpri_mask = WPRI_MASK_ALL, 225 | .reset = csr_stvec_reset, 226 | .read = csr_stvec_read, 227 | .write = csr_stvec_write 228 | } 229 | }; 230 | 231 | static void 232 | csr_sscratch_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 233 | { 234 | csr->csr_blob = value; 235 | log_trace("hart id:%d pc:%08x, csr:sscratch write 0x:%x\n", 236 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 237 | } 238 | 239 | static uint32_t 240 | csr_sscratch_read(struct hart *hartptr, struct csr_entry *csr) 241 | { 242 | log_trace("hart id:%d pc:%08x, csr:sscratch read:0x%x\n", 243 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 244 | return csr->csr_blob; 245 | } 246 | 247 | static void 248 | csr_sscratch_reset(struct hart *hartptr, struct csr_entry * csr) 249 | { 250 | csr->csr_blob = 0x0; 251 | } 252 | 253 | static struct csr_registery_entry sscratch_csr_entry = { 254 | .csr_addr = CSR_ADDRESS_SSCRATCH, 255 | .csr_registery = { 256 | .wpri_mask = WPRI_MASK_ALL, 257 | .reset = csr_sscratch_reset, 258 | .read = csr_sscratch_read, 259 | .write = csr_sscratch_write 260 | } 261 | }; 262 | 263 | static void 264 | csr_sepc_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 265 | { 266 | csr->csr_blob = value; 267 | log_debug("hart id:%d pc:%08x, csr:sepc write 0x:%x\n", 268 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 269 | } 270 | 271 | static uint32_t 272 | csr_sepc_read(struct hart *hartptr, struct csr_entry *csr) 273 | { 274 | log_debug("hart id:%d pc:%08x, csr:sepc read:0x%x\n", 275 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 276 | return csr->csr_blob; 277 | } 278 | 279 | static void 280 | csr_sepc_reset(struct hart *hartptr, struct csr_entry * csr) 281 | { 282 | csr->csr_blob = 0x0; 283 | } 284 | 285 | static struct csr_registery_entry sepc_csr_entry = { 286 | .csr_addr = CSR_ADDRESS_SEPC, 287 | .csr_registery = { 288 | .wpri_mask = WPRI_MASK_ALL, 289 | .reset = csr_sepc_reset, 290 | .read = csr_sepc_read, 291 | .write = csr_sepc_write 292 | } 293 | }; 294 | 295 | static void 296 | csr_stval_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 297 | { 298 | csr->csr_blob = value; 299 | log_trace("hart id:%d pc:%08x, csr:stval write 0x:%x\n", 300 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 301 | } 302 | 303 | static uint32_t 304 | csr_stval_read(struct hart *hartptr, struct csr_entry *csr) 305 | { 306 | log_trace("hart id:%d pc:%08x, csr:stval read:0x%x\n", 307 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 308 | return csr->csr_blob; 309 | } 310 | 311 | static void 312 | csr_stval_reset(struct hart *hartptr, struct csr_entry * csr) 313 | { 314 | csr->csr_blob = 0x0; 315 | } 316 | 317 | static struct csr_registery_entry stval_csr_entry = { 318 | .csr_addr = CSR_ADDRESS_STVAL, 319 | .csr_registery = { 320 | .wpri_mask = WPRI_MASK_ALL, 321 | .reset = csr_stval_reset, 322 | .read = csr_stval_read, 323 | .write = csr_stval_write 324 | } 325 | }; 326 | 327 | static void 328 | csr_scause_write(struct hart *hartptr, struct csr_entry * csr, uint32_t value) 329 | { 330 | csr->csr_blob = value; 331 | log_trace("hart id:%d pc:%08x, csr:scause write 0x:%x\n", 332 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 333 | } 334 | 335 | static uint32_t 336 | csr_scause_read(struct hart *hartptr, struct csr_entry *csr) 337 | { 338 | log_trace("hart id:%d pc:%08x, csr:scause read:0x%x\n", 339 | hartptr->hart_id, hartptr->pc, csr->csr_blob); 340 | return csr->csr_blob; 341 | } 342 | 343 | static void 344 | csr_scause_reset(struct hart *hartptr, struct csr_entry * csr) 345 | { 346 | csr->csr_blob = 0x0; 347 | } 348 | 349 | static struct csr_registery_entry scause_csr_entry = { 350 | .csr_addr = CSR_ADDRESS_SCAUSE, 351 | .csr_registery = { 352 | .wpri_mask = WPRI_MASK_ALL, 353 | .reset = csr_scause_reset, 354 | .read = csr_scause_read, 355 | .write = csr_scause_write 356 | } 357 | }; 358 | 359 | __attribute__((constructor)) static void 360 | csr_supervisor_level_init(void) 361 | { 362 | register_csr_entry(&scounteren_csr_entry); 363 | register_csr_entry(&satp_csr_entry); 364 | register_csr_entry(&sie_csr_entry); 365 | register_csr_entry(&sip_csr_entry); 366 | register_csr_entry(&sstatus_csr_entry); 367 | register_csr_entry(&stvec_csr_entry); 368 | register_csr_entry(&sscratch_csr_entry); 369 | register_csr_entry(&sepc_csr_entry); 370 | register_csr_entry(&stval_csr_entry); 371 | register_csr_entry(&scause_csr_entry); 372 | } 373 | 374 | -------------------------------------------------------------------------------- /vmm/debug.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019 Jie Zheng 3 | * 4 | * A native debugger: I am tring to honor teh GDB syntax in my debugger 5 | */ 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | static void 13 | print_hint(struct hart * hartptr) 14 | { 15 | printf(ANSI_COLOR_GREEN"(zelda.risc-v.dbg: "ANSI_COLOR_CYAN"0x%x" 16 | ANSI_COLOR_GREEN") "ANSI_COLOR_RESET, 17 | hartptr->pc); 18 | } 19 | 20 | #define CMDLINE_SIZE 256 21 | #define TOKEN_SIZE 64 22 | #define MAX_CMD_TOKEN_PREFIX 8 23 | 24 | struct cmd_registery_item { 25 | char * cmd_prefixs[MAX_CMD_TOKEN_PREFIX]; 26 | int (*func)(struct hart * hartptr, int argc, char *argv[]); 27 | char * desc; 28 | }; 29 | 30 | 31 | static int 32 | dump_registers_info(struct hart * hartptr, int argc, char *argv[]) 33 | { 34 | printf(ANSI_COLOR_MAGENTA"[breakpoint at 0x%x]:\n", hartptr->pc); 35 | dump_hart(hartptr); 36 | printf(ANSI_COLOR_RESET); 37 | return ACTION_CONTINUE; 38 | } 39 | 40 | static int 41 | dump_translation(struct hart * hartptr, int argc, char *argv[]) 42 | { 43 | printf(ANSI_COLOR_MAGENTA); 44 | dump_translation_cache(hartptr); 45 | printf(ANSI_COLOR_RESET); 46 | return ACTION_CONTINUE; 47 | } 48 | 49 | static int 50 | dump_breakpoints_info(struct hart * hartptr, int argc, char *argv[]) 51 | { 52 | dump_breakpoints(); 53 | return ACTION_CONTINUE; 54 | } 55 | 56 | static int 57 | debug_continue(struct hart * hartptr, int argc, char *argv[]) 58 | { 59 | return ACTION_STOP; 60 | } 61 | 62 | #include 63 | static int 64 | inspect_memory(struct hart * hartptr, int argc, char *argv[]) 65 | { 66 | if (argc != 2) { 67 | goto error_usage; 68 | } 69 | uint32_t low_addr = strtol(argv[0], NULL, 16); 70 | uint32_t high_addr = strtol(argv[1], NULL, 16); 71 | if (low_addr > high_addr) { 72 | printf(ANSI_COLOR_RED"memory addresses don't fit\n"ANSI_COLOR_RESET); 73 | goto error_usage; 74 | } 75 | 76 | struct pm_region_operation * pmr1 = search_pm_region_callback(low_addr); 77 | struct pm_region_operation * pmr2 = search_pm_region_callback(high_addr); 78 | if (!pmr1 || pmr1 != pmr2) { 79 | printf(ANSI_COLOR_RED"memory region doesnt exist or two addresses " 80 | "do not reside in same pm region\n"ANSI_COLOR_RESET); 81 | goto error_usage; 82 | } 83 | if (!pmr1->pmr_direct) { 84 | printf(ANSI_COLOR_RED"memory region does't support direct fetch\n"ANSI_COLOR_RESET); 85 | goto error_usage; 86 | } 87 | uint32_t * pm_low = pmr1->pmr_direct(low_addr, hartptr, pmr1); 88 | uint32_t * pm_high = pmr1->pmr_direct(high_addr, hartptr, pmr1); 89 | ASSERT(pm_low <= pm_high); 90 | int counter = 0; 91 | int dwords_per_line = 8; 92 | printf("host memory range:[%p - %p]\n", pm_low, pm_low); 93 | for (; pm_low < pm_high; pm_low += 1, counter++) { 94 | if ((counter % dwords_per_line) == 0) { 95 | printf("0x%08x: ",low_addr + counter * 4); 96 | } 97 | printf("%08x ", *pm_low); 98 | if (((counter + 1) % dwords_per_line) == 0) { 99 | printf("\n"); 100 | } 101 | } 102 | printf("\n"); 103 | return ACTION_CONTINUE; 104 | error_usage: 105 | printf(ANSI_COLOR_RED"example: /x 0x0000001 0x2000000\n"ANSI_COLOR_RESET); 106 | return ACTION_CONTINUE; 107 | } 108 | 109 | #include 110 | 111 | static int 112 | inspect_virtual_memory(struct hart * hartptr, int argc, char *argv[]) 113 | { 114 | if (argc != 2) { 115 | goto error_usage; 116 | } 117 | uint32_t low_addr = strtol(argv[0], NULL, 16); 118 | uint32_t high_addr = strtol(argv[1], NULL, 16); 119 | if (low_addr > high_addr) { 120 | printf(ANSI_COLOR_RED"memory addresses don't fit\n"ANSI_COLOR_RESET); 121 | goto error_usage; 122 | } 123 | low_addr = low_addr & ~0x3; 124 | high_addr = high_addr & ~0x3; 125 | 126 | int counter = 0; 127 | int dwords_per_line = 8; 128 | printf("virtual memory range:[%08x - %08x]\n", low_addr, high_addr); 129 | 130 | for (; low_addr < high_addr; low_addr += 1, counter++) { 131 | if ((counter % dwords_per_line) == 0) { 132 | printf("0x%08x: ",low_addr + counter * 4); 133 | } 134 | printf("%08x ", mmu_read32(hartptr, low_addr)); 135 | if (((counter + 1) % dwords_per_line) == 0) { 136 | printf("\n"); 137 | } 138 | } 139 | printf("\n"); 140 | return ACTION_CONTINUE; 141 | error_usage: 142 | printf(ANSI_COLOR_RED"example: /v 0x1000000 0x2000000\n"ANSI_COLOR_RESET); 143 | return ACTION_CONTINUE; 144 | } 145 | 146 | static int 147 | backtrace_call(struct hart * hartptr, int argc, char *argv[]) 148 | { 149 | if (argc != 2) { 150 | printf(ANSI_COLOR_RED"usage backtrace [leaf|noleaf] maxframe\n" 151 | ANSI_COLOR_RESET); 152 | return ACTION_CONTINUE; 153 | } 154 | int is_leaf_function = !strcmp(argv[0], "leaf"); 155 | int maxframe = atoi(argv[1]); 156 | printf("dump the calling stack:\n"); 157 | printf("\tis current frame marked as leaf: %s\n", is_leaf_function ? 158 | "yes" : "no"); 159 | printf("\tmaximum frames: %d\n", maxframe); 160 | 161 | uint32_t current_fp = hartptr->registers.s0; 162 | uint32_t current_pc = hartptr->pc; 163 | int idx = 0; 164 | for(idx = 0; idx < maxframe; idx++) { 165 | printf("\t#%d %08x\n", idx, current_pc); 166 | // Go outer frame. 167 | if (!idx && is_leaf_function) { 168 | current_pc = hartptr->registers.ra; 169 | current_fp = mmu_read32(hartptr, current_fp - 4); 170 | } else { 171 | current_pc = mmu_read32(hartptr, current_fp - 4); 172 | current_fp = mmu_read32(hartptr, current_fp - 8); 173 | } 174 | } 175 | return ACTION_CONTINUE; 176 | } 177 | static int 178 | debug_help(struct hart * hartptr, int argc, char *argv[]); 179 | 180 | static struct cmd_registery_item cmds_items[] = { 181 | { 182 | .cmd_prefixs = {"info", "registers", NULL}, 183 | .func = dump_registers_info, 184 | .desc = "dump the registers of a hart" 185 | }, 186 | { 187 | .cmd_prefixs = {"info", "translation", NULL}, 188 | .func = dump_translation , 189 | .desc = "dump the items in translation cache" 190 | }, 191 | { 192 | .cmd_prefixs = {"info", "breakpoints", NULL}, 193 | .func = dump_breakpoints_info, 194 | .desc = "dump all the breakpoints" 195 | }, 196 | { 197 | .cmd_prefixs = {"continue", NULL}, 198 | .func = debug_continue, 199 | .desc = "continue to execute util it reaches next breakpoint" 200 | }, 201 | { 202 | .cmd_prefixs = {"break", NULL}, 203 | .func = add_breakpoint_command, 204 | .desc = "add a break by following the address of the target address" 205 | }, 206 | { 207 | .cmd_prefixs = {"/x", NULL}, 208 | .func = inspect_memory, 209 | .desc = "dump physical memory segment" 210 | }, 211 | { 212 | .cmd_prefixs = {"/v", NULL}, 213 | .func = inspect_virtual_memory, 214 | .desc = "dump virtual memory segment(BE CAUTIOUS TO USE IT !!!)" 215 | }, 216 | { 217 | .cmd_prefixs = {"backtrace", NULL}, 218 | .func = backtrace_call, 219 | .desc = "dump the calling stack..." 220 | }, 221 | { 222 | .cmd_prefixs = {"help", NULL}, 223 | .func = debug_help, 224 | .desc = "display all the supported commands" 225 | } 226 | }; 227 | 228 | static int 229 | debug_help(struct hart * hartptr, int argc, char *argv[]) 230 | { 231 | printf("supported commands:\n"); 232 | int idx = 0; 233 | for (; idx < sizeof(cmds_items)/sizeof(cmds_items[0]); idx++) { 234 | int idx_tmp = 0; 235 | printf("\t"ANSI_COLOR_MAGENTA); 236 | for(;cmds_items[idx].cmd_prefixs[idx_tmp]; idx_tmp++) { 237 | printf("%s ", cmds_items[idx].cmd_prefixs[idx_tmp]); 238 | } 239 | if (cmds_items[idx].desc) { 240 | printf(ANSI_COLOR_RESET" %s", cmds_items[idx].desc); 241 | } 242 | printf(ANSI_COLOR_RESET); 243 | printf("\n"); 244 | } 245 | return ACTION_CONTINUE; 246 | } 247 | static int nr_cmds_items = sizeof(cmds_items) / sizeof(cmds_items[0]); 248 | 249 | static int 250 | process_cmds_tokens(struct hart * hartptr, int argc, char *argv[]) 251 | { 252 | int item_index = 0; 253 | struct cmd_registery_item * pitem = NULL; 254 | struct cmd_registery_item * item_found = NULL; 255 | int lpm_counter = 0; 256 | 257 | for (; item_index < nr_cmds_items; item_index++) { 258 | pitem = &cmds_items[item_index]; 259 | 260 | int idx = 0; 261 | char * ptr_token = NULL; 262 | int lpm_counter_tmp = 0; 263 | for (idx = 0; idx < MAX_CMD_TOKEN_PREFIX; idx++) { 264 | ptr_token = pitem->cmd_prefixs[idx]; 265 | if (!ptr_token) { 266 | lpm_counter_tmp = idx; 267 | break; 268 | } 269 | 270 | if (idx >= argc) { 271 | break; 272 | } 273 | 274 | if (strcmp(ptr_token, argv[idx])) { 275 | break; 276 | } 277 | } 278 | if (!ptr_token && lpm_counter_tmp > 0 && 279 | lpm_counter_tmp > lpm_counter) { 280 | item_found = pitem; 281 | lpm_counter = lpm_counter_tmp; 282 | } 283 | } 284 | 285 | if (item_found && lpm_counter > 0) { 286 | return item_found->func(hartptr, argc - lpm_counter, argv + lpm_counter); 287 | } else { 288 | // NO ACTION taken 289 | printf(ANSI_COLOR_RED"No action(enter 'help' for full commands list)\n" 290 | ANSI_COLOR_RESET); 291 | } 292 | return ACTION_CONTINUE; 293 | } 294 | 295 | void 296 | enter_vmm_dbg_shell(struct hart * hartptr, int check_bps) 297 | { 298 | ASSERT(hartptr->hart_magic == HART_MAGIC_WORD); 299 | if (check_bps && !is_address_breakpoint(hartptr->pc)) { 300 | // The address is not tracked, go on. 301 | return; 302 | } 303 | char cmdline[CMDLINE_SIZE]; 304 | char * tokens[TOKEN_SIZE]; 305 | int nr_token; 306 | 307 | static char token_buffer[TOKEN_SIZE][TOKEN_SIZE]; 308 | static char * last_tokens[TOKEN_SIZE]; 309 | static int last_nr_token = 0; 310 | while (!0) { 311 | nr_token = 0; 312 | memset(cmdline, 0x0, sizeof(cmdline)); 313 | memset(tokens, 0x0, sizeof(tokens)); 314 | print_hint(hartptr); 315 | fgets(cmdline, sizeof(cmdline) - 1, stdin); 316 | 317 | {// strip the trailing carriage return '\n' 318 | char * ptr = cmdline; 319 | for (; *ptr; ptr++) { 320 | if (*ptr == '\n') { 321 | *ptr = '\0'; 322 | break; 323 | } 324 | } 325 | } 326 | 327 | {// split the tokens. 328 | char *ptr = strtok(cmdline, " "); 329 | do { 330 | if (!ptr) { 331 | break; 332 | } 333 | tokens[nr_token] = ptr; 334 | nr_token += 1; 335 | if (nr_token == TOKEN_SIZE) { 336 | break; 337 | } 338 | } while ((ptr = strtok(NULL, " "))); 339 | } 340 | 341 | int action; 342 | if (nr_token) { 343 | action = process_cmds_tokens(hartptr, nr_token, tokens); 344 | // MUST make a copy. 345 | int idx = 0; 346 | for (idx = 0; idx < nr_token; idx++) { 347 | strcpy(token_buffer[idx], tokens[idx]); 348 | last_tokens[idx] = token_buffer[idx]; 349 | } 350 | last_nr_token = nr_token; 351 | } else { 352 | action = process_cmds_tokens(hartptr, last_nr_token, last_tokens); 353 | } 354 | 355 | if (action == ACTION_STOP) { 356 | break; 357 | } else if(action == ACTION_CONTINUE) { 358 | 359 | } else { 360 | __not_reach(); 361 | } 362 | } 363 | } 364 | 365 | 366 | 367 | -------------------------------------------------------------------------------- /vmm/translate_privileged_instr.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019-2020 Jie Zheng 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | __attribute__((unused)) static void 13 | ebreak_callback(struct hart * hartptr) 14 | { 15 | #if defined(NATIVE_DEBUGER) 16 | enter_vmm_dbg_shell(hartptr, 0); 17 | #endif 18 | } 19 | 20 | static void 21 | riscv_ebreak_translator(struct decoding * dec, struct prefetch_blob * blob, 22 | uint32_t instruction) 23 | { 24 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 25 | struct hart * hartptr = (struct hart *)blob->opaque; 26 | PRECHECK_TRANSLATION_CACHE(ebreak_instruction, blob); 27 | BEGIN_TRANSLATION(ebreak_instruction); 28 | __asm__ volatile("movq %%r12, %%rdi;" 29 | "movq $ebreak_callback, %%rax;" 30 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 31 | "call *%%rax;" 32 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 33 | PROCEED_TO_NEXT_INSTRUCTION() 34 | END_INSTRUCTION(ebreak_instruction) 35 | : 36 | : 37 | :"memory"); 38 | BEGIN_PARAM_SCHEMA() 39 | PARAM32() /*dummy*/ 40 | END_PARAM_SCHEMA() 41 | END_TRANSLATION(ebreak_instruction); 42 | BEGIN_PARAM(ebreak_instruction) 43 | instruction_linear_address 44 | END_PARAM() 45 | COMMIT_TRANSLATION(ebreak_instruction, hartptr, instruction_linear_address); 46 | blob->next_instruction_to_fetch += 4; 47 | } 48 | 49 | __attribute__((unused)) static void 50 | mret_callback(struct hart * hartptr) 51 | { 52 | assert_hart_running_in_mmode(hartptr); 53 | adjust_mstatus_upon_mret(hartptr); 54 | adjust_pc_upon_mret(hartptr); 55 | // translation cache must be flushed, because adjusted privilege level may 56 | // diff in addressing space 57 | flush_translation_cache(hartptr); 58 | 59 | } 60 | 61 | static void 62 | riscv_mret_translator(struct decoding * dec, struct prefetch_blob * blob, 63 | uint32_t instruction) 64 | { 65 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 66 | struct hart * hartptr = (struct hart *)blob->opaque; 67 | PRECHECK_TRANSLATION_CACHE(mret_instruction, blob); 68 | BEGIN_TRANSLATION(mret_instruction); 69 | __asm__ volatile("movq %%r12, %%rdi;" 70 | "movq $mret_callback, %%rax;" 71 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 72 | "call *%%rax;" 73 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 74 | TRAP_TO_VMM(mret_instruction) 75 | : 76 | : 77 | :"memory"); 78 | BEGIN_PARAM_SCHEMA() 79 | PARAM32() 80 | END_PARAM_SCHEMA() 81 | END_TRANSLATION(mret_instruction); 82 | BEGIN_PARAM(mret_instruction) 83 | instruction_linear_address 84 | END_PARAM() 85 | COMMIT_TRANSLATION(mret_instruction, hartptr, instruction_linear_address); 86 | blob->is_to_stop = 1; 87 | } 88 | 89 | __attribute__((unused)) static void 90 | sret_callback(struct hart * hartptr) 91 | { 92 | assert_hart_running_in_smode(hartptr); 93 | adjust_mstatus_upon_sret(hartptr); 94 | adjust_pc_upon_sret(hartptr); 95 | flush_translation_cache(hartptr); 96 | 97 | } 98 | 99 | static void 100 | riscv_sret_translator(struct decoding * dec, struct prefetch_blob * blob, 101 | uint32_t instruction) 102 | { 103 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 104 | struct hart * hartptr = (struct hart *)blob->opaque; 105 | PRECHECK_TRANSLATION_CACHE(sret_instruction, blob); 106 | BEGIN_TRANSLATION(sret_instruction); 107 | __asm__ volatile("movq %%r12, %%rdi;" 108 | "movq $sret_callback, %%rax;" 109 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 110 | "call *%%rax;" 111 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 112 | TRAP_TO_VMM(sret_instruction) 113 | : 114 | : 115 | :"memory"); 116 | BEGIN_PARAM_SCHEMA() 117 | PARAM32() 118 | END_PARAM_SCHEMA() 119 | END_TRANSLATION(sret_instruction); 120 | BEGIN_PARAM(sret_instruction) 121 | instruction_linear_address 122 | END_PARAM() 123 | COMMIT_TRANSLATION(sret_instruction, hartptr, instruction_linear_address); 124 | blob->is_to_stop = 1; 125 | } 126 | 127 | __attribute__((unused)) static void 128 | sfence_vma_callback(struct hart * hartptr) 129 | { 130 | // flush tlb cache 131 | invalidate_tlb(hartptr->itlb, hartptr->itlb_cap); 132 | invalidate_tlb(hartptr->dtlb, hartptr->dtlb_cap); 133 | // and finlally, flush translation cache 134 | flush_translation_cache(hartptr); 135 | } 136 | 137 | static void 138 | riscv_sfence_vma_translator(struct decoding * dec, struct prefetch_blob * blob, 139 | uint32_t instruction) 140 | { 141 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 142 | struct hart * hartptr = (struct hart *)blob->opaque; 143 | PRECHECK_TRANSLATION_CACHE(sfence_vma_instruction, blob); 144 | BEGIN_TRANSLATION(sfence_vma_instruction); 145 | __asm__ volatile("movq %%r12, %%rdi;" 146 | "movq $sfence_vma_callback, %%rax;" 147 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 148 | "call *%%rax;" 149 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 150 | PROCEED_TO_NEXT_INSTRUCTION() 151 | TRAP_TO_VMM(sfence_vma_instruction) 152 | : 153 | : 154 | :"memory"); 155 | BEGIN_PARAM_SCHEMA() 156 | PARAM32() 157 | END_PARAM_SCHEMA() 158 | END_TRANSLATION(sfence_vma_instruction); 159 | BEGIN_PARAM(sfence_vma_instruction) 160 | instruction_linear_address 161 | END_PARAM() 162 | COMMIT_TRANSLATION(sfence_vma_instruction, hartptr, instruction_linear_address); 163 | blob->is_to_stop = 1; 164 | } 165 | 166 | #include 167 | __attribute__((unused)) static void 168 | ecall_callback(struct hart * hartptr) 169 | { 170 | uint8_t exception = EXCEPTION_ECALL_FROM_MMODE; 171 | switch(hartptr->privilege_level) 172 | { 173 | case PRIVILEGE_LEVEL_MACHINE: 174 | exception = EXCEPTION_ECALL_FROM_MMODE; 175 | break; 176 | case PRIVILEGE_LEVEL_SUPERVISOR: 177 | exception = EXCEPTION_ECALL_FROM_SMODE; 178 | break; 179 | case PRIVILEGE_LEVEL_USER: 180 | exception = EXCEPTION_ECALL_FROM_UMODE; 181 | break; 182 | default: 183 | __not_reach(); 184 | break; 185 | } 186 | raise_exception(hartptr, exception); 187 | 188 | } 189 | 190 | static void 191 | riscv_ecall_translator(struct decoding * dec, struct prefetch_blob * blob, 192 | uint32_t instruction) 193 | { 194 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 195 | struct hart * hartptr = (struct hart *)blob->opaque; 196 | PRECHECK_TRANSLATION_CACHE(ecall_instruction, blob); 197 | BEGIN_TRANSLATION(ecall_instruction); 198 | __asm__ volatile("movq %%r12, %%rdi;" 199 | "movq $ecall_callback, %%rax;" 200 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 201 | "call *%%rax;" 202 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 203 | PROCEED_TO_NEXT_INSTRUCTION() 204 | TRAP_TO_VMM(ecall_instruction) 205 | : 206 | : 207 | :"memory"); 208 | BEGIN_PARAM_SCHEMA() 209 | PARAM32() 210 | END_PARAM_SCHEMA() 211 | END_TRANSLATION(ecall_instruction); 212 | BEGIN_PARAM(ecall_instruction) 213 | instruction_linear_address 214 | END_PARAM() 215 | COMMIT_TRANSLATION(ecall_instruction, hartptr, instruction_linear_address); 216 | blob->is_to_stop = 1; 217 | } 218 | 219 | #include 220 | __attribute__((unused)) static void 221 | wfi_callback(struct hart * hartptr) 222 | { 223 | //hartptr->pc += 4; 224 | // VMM YIELDS CPU until next interrupt comes 225 | //ASSERT(is_interrupt_deliverable(hartptr, INTERRUPT_SUPERVISOR_TIMER)); 226 | //deliver_interrupt(hartptr, INTERRUPT_MACHINE_TIMER); 227 | //dump_hart(hartptr); 228 | __not_reach(); 229 | } 230 | static void 231 | riscv_wfi_translator(struct decoding * dec, struct prefetch_blob * blob, 232 | uint32_t instruction) 233 | { 234 | ASSERT(0x8 == (dec->imm >> 5)); 235 | uint32_t instruction_linear_address = blob->next_instruction_to_fetch; 236 | struct hart * hartptr = (struct hart *)blob->opaque; 237 | PRECHECK_TRANSLATION_CACHE(wfi_instruction, blob); 238 | BEGIN_TRANSLATION(wfi_instruction); 239 | __asm__ volatile("movq %%r12, %%rdi;" 240 | "movq $wfi_callback, %%rax;" 241 | SAVE_GUEST_CONTEXT_SWITCH_REGS() 242 | "call *%%rax;" 243 | RESTORE_GUEST_CONTEXT_SWITCH_REGS() 244 | PROCEED_TO_NEXT_INSTRUCTION() 245 | TRAP_TO_VMM(wfi_instruction) 246 | : 247 | : 248 | :"memory"); 249 | BEGIN_PARAM_SCHEMA() 250 | PARAM32() 251 | END_PARAM_SCHEMA() 252 | END_TRANSLATION(wfi_instruction); 253 | BEGIN_PARAM(wfi_instruction) 254 | instruction_linear_address 255 | END_PARAM() 256 | COMMIT_TRANSLATION(wfi_instruction, hartptr, instruction_linear_address); 257 | blob->is_to_stop = 1; 258 | } 259 | static void 260 | riscv_funct3_000_translator(struct decoding * dec, struct prefetch_blob * blob, 261 | uint32_t instruction) 262 | { 263 | if (((dec->imm >> 5) & 0x7f) == 0x9) { 264 | riscv_sfence_vma_translator(dec, blob, instruction); 265 | } else if (dec->rs2_index == 0x5) { 266 | riscv_wfi_translator(dec, blob, instruction); 267 | } else if (dec->rs2_index == 0x1) { 268 | riscv_ebreak_translator(dec, blob, instruction); 269 | } else if (dec->rs2_index == 0x2) { 270 | if ((dec->imm >> 5) == 0x18) { 271 | riscv_mret_translator(dec, blob, instruction); 272 | } else if ((dec->imm >> 5) == 0x8) { 273 | riscv_sret_translator(dec, blob, instruction); 274 | } else { 275 | __not_reach(); 276 | } 277 | } else if (dec->rs2_index == 0x0) { 278 | riscv_ecall_translator(dec, blob, instruction); 279 | } else { 280 | log_fatal("can not translate:0x%x at 0x%x\n", instruction, blob->next_instruction_to_fetch); 281 | __not_reach(); 282 | } 283 | } 284 | 285 | 286 | static instruction_sub_translator per_funct3_handlers[8]; 287 | 288 | 289 | void 290 | riscv_supervisor_level_instructions_translation_entry(struct prefetch_blob * blob, 291 | uint32_t instruction) 292 | { 293 | struct decoding dec; 294 | // FIXED: only funct3:000 is encoded with type-S, others are not. 295 | if (!((instruction >> 12) & 0x7)) { 296 | instruction_decoding_per_type(&dec, instruction, ENCODING_TYPE_S); 297 | } else { 298 | instruction_decoding_per_type(&dec, instruction, ENCODING_TYPE_I); 299 | } 300 | ASSERT(per_funct3_handlers[dec.funct3]); 301 | per_funct3_handlers[dec.funct3](&dec, blob, instruction); 302 | } 303 | 304 | 305 | void 306 | riscv_generic_csr_instructions_translator(struct decoding * dec, 307 | struct prefetch_blob * blob, 308 | uint32_t instruction); 309 | 310 | __attribute__((constructor)) static void 311 | supervisor_level_constructor(void) 312 | { 313 | memset(per_funct3_handlers, 0x0, sizeof(per_funct3_handlers)); 314 | per_funct3_handlers[0x0] = riscv_funct3_000_translator; 315 | per_funct3_handlers[0x1] = riscv_generic_csr_instructions_translator; 316 | per_funct3_handlers[0x2] = riscv_generic_csr_instructions_translator; 317 | per_funct3_handlers[0x3] = riscv_generic_csr_instructions_translator; 318 | per_funct3_handlers[0x5] = riscv_generic_csr_instructions_translator; 319 | per_funct3_handlers[0x6] = riscv_generic_csr_instructions_translator; 320 | per_funct3_handlers[0x7] = riscv_generic_csr_instructions_translator; 321 | } 322 | 323 | --------------------------------------------------------------------------------