├── .gitignore ├── Makefile ├── include ├── assert.h ├── cache.h ├── ctype.h ├── efi.h ├── elf.h ├── kprintf.h ├── paging.h ├── stdlib.h └── string.h ├── run.sh └── src ├── acpi ├── osl.c ├── parse.c └── parse.h ├── apic.c ├── apic.h ├── boot.c ├── cache.c ├── cpu.h ├── entry.S ├── hpet.c ├── hpet.h ├── interrupt.c ├── interrupt.h ├── kernel.c ├── kernel.ld ├── kprintf.c ├── libc ├── ctype.c ├── stdlib.c └── string.c ├── list.h ├── memory.c ├── memory.h ├── page.c ├── page.h ├── paging.c ├── panic.c ├── pci.c ├── pci.h ├── segment.c ├── segment.h ├── serial.c ├── serial.h ├── smp.c ├── smp.h ├── spinlock.h ├── startup.S ├── trampoline.S ├── tsc.c └── tsc.h /.gitignore: -------------------------------------------------------------------------------- 1 | img 2 | obj 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TARGETS := img/EFI/BOOT/BOOTX64.EFI img/kernel 2 | CFLAGS := -std=c11 -ffreestanding -fbuiltin -MMD -MP -ffunction-sections -fdata-sections -O2 3 | 4 | .PHONY: all 5 | all: $(TARGETS) 6 | 7 | .PHONY: clean 8 | clean: 9 | $(RM) -r obj/ img/ 10 | 11 | .SECONDEXPANSION: 12 | .SECONDARY: 13 | .SUFFIXES: 14 | 15 | obj/ img/EFI/BOOT/: ; mkdir -p $@ 16 | obj/%/: ; mkdir -p $@ 17 | 18 | # bootloader 19 | 20 | boot_CC := x86_64-w64-mingw32-gcc 21 | boot_LD := x86_64-w64-mingw32-ld 22 | 23 | boot_SOURCES := src/boot.c 24 | boot_OBJECTS := $(patsubst %.c,%.o,$(boot_SOURCES:src=obj)) 25 | boot_DEPENDS := $(patsubst %.c,%.d,$(boot_SOURCES:src=obj)) 26 | 27 | img/EFI/BOOT/BOOTX64.EFI: $(boot_SOURCES) | obj/ img/EFI/BOOT/ 28 | $(boot_CC) $(CFLAGS) -Wall -Wextra -c -Iinclude -Iedk2/MdePkg/Include -Iedk2/MdePkg/Include/X64 -o obj/boot.o src/boot.c 29 | $(boot_LD) --oformat pei-x86-64 --subsystem 10 -pie -e UefiMain -o img/EFI/BOOT/BOOTX64.EFI obj/boot.o 30 | 31 | # kernel 32 | 33 | kernel_CC := x86_64-elf-gcc 34 | kernel_LD := x86_64-elf-ld 35 | 36 | kernel_OBJECTS := obj/startup.o obj/trampoline.o obj/segment.o obj/kernel.o obj/entry.o obj/interrupt.o obj/memory.o obj/paging.o obj/page.o obj/cache.o obj/hpet.o obj/apic.o obj/tsc.o obj/smp.o obj/pci.o obj/serial.o obj/kprintf.o obj/panic.o obj/acpi/parse.o obj/acpi/osl.o obj/acpi/acpica.o obj/libc/stdlib.o obj/libc/string.o obj/libc/ctype.o 37 | kernel_DEPENDS := $(patsubst %.o,%.d,$(kernel_OBJECTS)) 38 | 39 | img/kernel: $(kernel_OBJECTS) src/kernel.ld | img/EFI/BOOT/ 40 | $(kernel_CC) -ffreestanding -nostdlib -T src/kernel.ld -n -Wl,--gc-sections -o $@ $(kernel_OBJECTS) 41 | 42 | obj/%.o: src/%.c | $$(dir $$@) 43 | $(kernel_CC) $(CFLAGS) -Wall -Wextra -Wpedantic -Wno-unused-parameter -Werror -mno-mmx -mno-sse -mno-sse2 -mno-red-zone -mcmodel=kernel -Iinclude -c -o $@ $< 44 | 45 | obj/%.o: src/%.S | $$(dir $$@) 46 | $(kernel_CC) -Iinclude -D__ASSEMBLY__ -c -o $@ $< 47 | 48 | acpica_SOURCES := $(wildcard src/acpi/acpica/*.c) 49 | acpica_OBJECTS := $(patsubst %.c,%.o,$(acpica_SOURCES:src/%=obj/%)) 50 | acpica_DEPENDS := $(patsubst %.c,%.d,$(acpica_SOURCES:src/%=obj/%)) 51 | 52 | obj/acpi/acpica.o: $(acpica_OBJECTS) 53 | $(kernel_LD) -r -o $@ $(acpica_OBJECTS) 54 | 55 | obj/acpi/acpica/%.o: src/acpi/acpica/%.c | $$(dir $$@) 56 | $(kernel_CC) $(CFLAGS) -mno-red-zone -mcmodel=kernel -c -Iinclude -Iinclude/acpi -o $@ $< 57 | 58 | # dependencies 59 | 60 | ifeq ($(filter clean, $(MAKECMDGOALS)),) 61 | -include $(boot_DEPENDS) $(kernel_DEPENDS) $(acpica_DEPENDS) 62 | endif 63 | -------------------------------------------------------------------------------- /include/assert.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #define assert(x) do { if (!(x)) \ 4 | panic("assert: %s:%d/%s(): %s\n", __FILE__, __LINE__, __func__, #x); \ 5 | } while (0) 6 | 7 | noreturn void panic(const char *fmt, ...) __attribute__((format (printf, 1, 2))); 8 | -------------------------------------------------------------------------------- /include/cache.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | void cache_init(void); 5 | 6 | struct cache *cache_create(uint32_t object_size); 7 | void cache_shrink(struct cache *cache); 8 | void cache_destroy(struct cache *cache); 9 | 10 | void *cache_alloc(struct cache *cache); 11 | void cache_free(struct cache *cache, void *object); 12 | 13 | void *kmalloc(size_t size); 14 | void kfree(void *ptr); 15 | -------------------------------------------------------------------------------- /include/ctype.h: -------------------------------------------------------------------------------- 1 | int isalpha(int c); 2 | int isdigit(int c); 3 | int islower(int c); 4 | int isprint(int c); 5 | int isspace(int c); 6 | int isupper(int c); 7 | int isxdigit(int c); 8 | 9 | int toupper(int c); 10 | int tolower(int c); 11 | -------------------------------------------------------------------------------- /include/efi.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | typedef size_t efi_status; 6 | 7 | struct efi_guid { 8 | uint32_t data1; 9 | uint16_t data2; 10 | uint16_t data3; 11 | uint8_t data4[8]; 12 | }; 13 | 14 | enum efi_memory_type { 15 | efi_reserved, 16 | efi_loader_code, 17 | efi_loader_data, 18 | efi_boot_code, 19 | efi_boot_data, 20 | efi_runtime_code, 21 | efi_runtime_data, 22 | efi_conventional, 23 | efi_unusable, 24 | efi_acpi_reclaim, 25 | efi_acpi_nvs, 26 | efi_memory_mapped_io, 27 | efi_memory_mapped_port, 28 | efi_pal, 29 | }; 30 | 31 | // unfortunately this can't be an enum because enum literals are 'int' and this is 64-bit 32 | static const uint64_t efi_memory_uc = 1 << 0; 33 | static const uint64_t efi_memory_wc = 1 << 1; 34 | static const uint64_t efi_memory_wt = 1 << 2; 35 | static const uint64_t efi_memory_wb = 1 << 3; 36 | static const uint64_t efi_memory_uce = 1 << 4; 37 | static const uint64_t efi_memory_wp = 1 << 12; 38 | static const uint64_t efi_memory_rp = 1 << 13; 39 | static const uint64_t efi_memory_xp = 1 << 14; 40 | static const uint64_t efi_memory_runtime = 1UL << 63; 41 | 42 | struct efi_memory_descriptor { 43 | uint32_t type; 44 | uint64_t physical; 45 | uint64_t virtual; 46 | uint64_t pages; 47 | uint64_t flags; 48 | }; 49 | 50 | struct efi_time { 51 | uint16_t year; 52 | uint8_t month; 53 | uint8_t day; 54 | uint8_t hour; 55 | uint8_t minute; 56 | uint8_t second; 57 | uint8_t pad1; 58 | uint32_t nanosecond; 59 | uint16_t timezone; 60 | uint8_t daylight; 61 | uint8_t pad2; 62 | }; 63 | 64 | struct efi_time_capabilities { 65 | uint32_t resolution; 66 | uint32_t accuracy; 67 | bool sets_to_zero; 68 | }; 69 | 70 | enum efi_reset_type { 71 | efi_reset_cold, 72 | efi_reset_warm, 73 | efi_reset_shutdown, 74 | }; 75 | 76 | struct efi_capsule_header { 77 | struct efi_guid capsule_guid; 78 | uint32_t header_size; 79 | uint32_t flags; 80 | uint32_t capsule_image_size; 81 | }; 82 | 83 | struct efi_table_header { 84 | uint64_t signature; 85 | uint32_t revision; 86 | uint32_t header_size; 87 | uint32_t crc32; 88 | uint32_t reserved; 89 | }; 90 | 91 | typedef efi_status efi_get_time( 92 | struct efi_time*, struct efi_time_capabilities *capabilities 93 | ) __attribute__((ms_abi)); 94 | typedef efi_status efi_set_time(struct efi_time*) __attribute__((ms_abi)); 95 | typedef efi_status efi_get_wakeup_time(bool*, bool*, struct efi_time*) __attribute__((ms_abi)); 96 | typedef efi_status efi_set_wakeup_time(bool*, struct efi_time*) __attribute__((ms_abi)); 97 | typedef efi_status efi_set_wakeup_time(bool*, struct efi_time*) __attribute__((ms_abi)); 98 | typedef efi_status efi_set_virtual_address_map( 99 | size_t, size_t, uint32_t, struct efi_memory_descriptor* 100 | ) __attribute__((ms_abi)); 101 | typedef efi_status efi_convert_pointer(size_t, void**) __attribute__((ms_abi)); 102 | typedef efi_status efi_get_variable( 103 | uint16_t*, struct efi_guid*, uint32_t*, size_t*, void* 104 | ) __attribute__((ms_abi)); 105 | typedef efi_status efi_get_next_variable_name( 106 | size_t*, uint16_t*, struct efi_guid* 107 | ) __attribute__((ms_abi)); 108 | typedef efi_status efi_set_variable( 109 | uint16_t*, struct efi_guid*, uint32_t*, size_t*, void* 110 | ) __attribute__((ms_abi)); 111 | typedef efi_status efi_get_next_high_monotonic_count(uint32_t*) __attribute__((ms_abi)); 112 | typedef efi_status efi_reset_system( 113 | enum efi_reset_type, efi_status, size_t, void* 114 | ) __attribute__((ms_abi)); 115 | typedef efi_status efi_update_capsule( 116 | struct efi_capsule_header**, size_t, uint64_t 117 | ) __attribute__((ms_abi)); 118 | typedef efi_status efi_query_capsule_capabilities( 119 | struct efi_capsule_header**, size_t, uint64_t*, enum efi_reset_type* 120 | ) __attribute__((ms_abi)); 121 | typedef efi_status efi_query_variable_info( 122 | uint32_t attributes, uint64_t*, uint64_t*, uint64_t* 123 | ) __attribute__((ms_abi)); 124 | 125 | struct efi_runtime_services { 126 | struct efi_table_header header; 127 | efi_get_time *get_time; 128 | efi_set_time *set_time; 129 | efi_get_wakeup_time *get_wakeup_time; 130 | efi_set_wakeup_time *set_wakeup_time; 131 | efi_set_virtual_address_map *set_virtual_address_map; 132 | efi_convert_pointer *convert_pointer; 133 | efi_get_variable *get_variable; 134 | efi_get_next_variable_name *get_next_variable_name; 135 | efi_set_variable *set_variable; 136 | efi_get_next_high_monotonic_count *get_next_high_monotonic_count; 137 | efi_reset_system *reset_system; 138 | efi_update_capsule *update_capsule; 139 | efi_query_capsule_capabilities *query_capsule_capabilities; 140 | efi_query_variable_info *query_variable_info; 141 | }; 142 | -------------------------------------------------------------------------------- /include/elf.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | typedef uint64_t Elf64_Addr; 4 | typedef uint16_t Elf64_Half; 5 | typedef int16_t Elf64_SHalf; 6 | typedef uint64_t Elf64_Off; 7 | typedef int32_t Elf64_Sword; 8 | typedef uint32_t Elf64_Word; 9 | typedef uint64_t Elf64_Xword; 10 | typedef int64_t Elf64_Sxword; 11 | 12 | #define EI_NIDENT 16 13 | #define EI_MAG0 0 14 | #define EI_MAG1 1 15 | #define EI_MAG2 2 16 | #define EI_MAG3 3 17 | #define EI_CLASS 4 18 | #define EI_DATA 5 19 | #define EI_VERSION 6 20 | #define EI_OSABI 7 21 | #define EI_PAD 8 22 | 23 | #define ELFMAG0 0x7f 24 | #define ELFMAG1 'E' 25 | #define ELFMAG2 'L' 26 | #define ELFMAG3 'F' 27 | #define ELFMAG "\x7f""ELF" 28 | #define SELFMAG 4 29 | 30 | #define ELFCLASSNONE 0 31 | #define ELFCLASS32 1 32 | #define ELFCLASS64 2 33 | 34 | #define ELFDATANONE 0 35 | #define ELFDATA2LSB 1 36 | #define ELFDATA2MSB 2 37 | 38 | #define ET_NONE 0 39 | #define ET_REL 1 40 | #define ET_EXEC 2 41 | #define ET_DYN 3 42 | #define ET_CORE 4 43 | 44 | #define EM_AMD64 62 45 | 46 | #define EV_NONE 0 47 | #define EV_CURRENT 1 48 | 49 | typedef struct { 50 | unsigned char e_ident[EI_NIDENT]; 51 | Elf64_Half e_type; 52 | Elf64_Half e_machine; 53 | Elf64_Word e_version; 54 | Elf64_Addr e_entry; 55 | Elf64_Off e_phoff; 56 | Elf64_Off e_shoff; 57 | Elf64_Word e_flags; 58 | Elf64_Half e_ehsize; 59 | Elf64_Half e_phentsize; 60 | Elf64_Half e_phnum; 61 | Elf64_Half e_shentsize; 62 | Elf64_Half e_shnum; 63 | Elf64_Half e_shstrndx; 64 | } Elf64_Ehdr; 65 | 66 | #define PT_NULL 0 67 | #define PT_LOAD 1 68 | #define PT_DYNAMIC 2 69 | #define PT_INTERP 3 70 | #define PT_NOTE 4 71 | #define PT_SHLIB 5 72 | #define PT_PHDR 6 73 | 74 | typedef struct { 75 | Elf64_Word p_type; 76 | Elf64_Word p_flags; 77 | Elf64_Off p_offset; 78 | Elf64_Addr p_vaddr; 79 | Elf64_Addr p_paddr; 80 | Elf64_Xword p_filesz; 81 | Elf64_Xword p_memsz; 82 | Elf64_Xword p_align; 83 | } Elf64_Phdr; 84 | -------------------------------------------------------------------------------- /include/kprintf.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void kprintf(const char *fmt, ...) __attribute__((format (printf, 1, 2))); 4 | void kvprintf(const char *fmt, va_list ap); 5 | -------------------------------------------------------------------------------- /include/paging.h: -------------------------------------------------------------------------------- 1 | #define DIRECT_BASE 0xffff800000000000 2 | #define VIRT_DIRECT(phys) ((void*)(DIRECT_BASE + (phys))) 3 | #define PHYS_DIRECT(virt) ((uint64_t)(virt) - DIRECT_BASE) 4 | 5 | #define KERNEL_BASE 0xffffffff80000000 6 | #define VIRT_KERNEL(phys) ((void*)(KERNEL_BASE + (phys))) 7 | #define PHYS_KERNEL(phys) ((uint64_t)(phys) - KERNEL_BASE) 8 | 9 | #define PAGE_ENTRIES 512 10 | 11 | #define PAGE_SHIFT 12 12 | #define PAGE_SIZE (1UL << PAGE_SHIFT) 13 | #define PAGE_MASK (~(PAGE_SIZE - 1)) 14 | #define PAGE_INDEX(virt) (((virt) >> PAGE_SHIFT) & (PAGE_ENTRIES - 1)) 15 | 16 | #define PD_SHIFT 21 17 | #define PD_SIZE (1UL << PD_SHIFT) 18 | #define PD_MASK (~(PD_SIZE - 1)) 19 | #define PD_INDEX(virt) (((virt) >> PD_SHIFT) & (PAGE_ENTRIES - 1)) 20 | 21 | #define PDPT_SHIFT 30 22 | #define PDPT_SIZE (1UL << PDPT_SHIFT) 23 | #define PDPT_MASK (~(PDPT_SIZE - 1)) 24 | #define PDPT_INDEX(virt) (((virt) >> PDPT_SHIFT) & (PAGE_ENTRIES - 1)) 25 | 26 | #define PML4_SHIFT 39 27 | #define PML4_SIZE (1UL << PML4_SHIFT) 28 | #define PML4_MASK (~(PML4_SIZE - 1)) 29 | #define PML4_INDEX(virt) (((virt) >> PML4_SHIFT) & (PAGE_ENTRIES - 1)) 30 | 31 | #define PAGE_PRESENT (1 << 0) 32 | #define PAGE_WRITE (1 << 1) 33 | #define PAGE_USER (1 << 2) 34 | #define PAGE_CACHE_WT (1 << 3) 35 | #define PAGE_CACHE_UC (1 << 4) 36 | #define PAGE_ACCESSED (1 << 5) 37 | #define PAGE_DIRTY (1 << 6) 38 | #define PAGE_LARGE (1 << 7) 39 | #define PAGE_GLOBAL (1 << 8) 40 | #define PAGE_NX (1 << 63) 41 | 42 | #ifndef __ASSEMBLY__ 43 | 44 | #define round_up(x, y) ((((x) - 1) | ((__typeof__(x))((y) - 1))) + 1) 45 | #define round_down(x, y) ((x) & ~((__typeof__(x))((y) - 1))) 46 | 47 | #include 48 | #include 49 | 50 | void paging_init(void *map_address, size_t map_size, size_t desc_size); 51 | 52 | static inline void write_cr3(uint64_t cr3) { 53 | __asm__ volatile ("mov %0, %%cr3" :: "r"(cr3)); 54 | } 55 | 56 | #endif 57 | -------------------------------------------------------------------------------- /include/stdlib.h: -------------------------------------------------------------------------------- 1 | unsigned long int strtoul(const char *nptr, const char **endptr, int base); 2 | -------------------------------------------------------------------------------- /include/string.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int memcmp(const void *aptr, const void *bptr, size_t n); 4 | void *memcpy(void *dest, const void *src, size_t n); 5 | void *memmove(void *dest, const void *src, size_t n); 6 | void *memset(void *s, int c, size_t n); 7 | 8 | char *strcat(char *dest, const char *src); 9 | int strcmp(const char *s1, const char *s2); 10 | char *strcpy(char *dest, const char *src); 11 | size_t strlen(const char *str); 12 | 13 | int strncmp(const char *s1, const char *s2, size_t n); 14 | char *strncpy(char *dest, const char *src, size_t n); 15 | size_t strnlen(const char *str, size_t n); 16 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | make -j && qemu-system-x86_64 -enable-kvm -s \ 2 | -machine pc-q35-2.7 -cpu host,migratable=no,+invtsc -smp 4 -m 4G \ 3 | -drive if=pflash,format=raw,readonly,file=edk2/Build/OvmfX64/RELEASE_GCC5/FV/OVMF.fd \ 4 | -drive format=raw,file=fat:img 5 | -------------------------------------------------------------------------------- /src/acpi/osl.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | /// environment and tables 7 | 8 | ACPI_STATUS AcpiOsInitialize() { 9 | return AE_OK; 10 | } 11 | 12 | ACPI_STATUS AcpiOsTerminate() { 13 | return AE_OK; 14 | } 15 | 16 | ACPI_TABLE_RSDP *AcpiOsRsdp; 17 | ACPI_PHYSICAL_ADDRESS AcpiOsGetRootPointer() { 18 | return (ACPI_PHYSICAL_ADDRESS)AcpiOsRsdp; 19 | } 20 | 21 | ACPI_STATUS AcpiOsPredefinedOverride( 22 | const ACPI_PREDEFINED_NAMES *PredefinedObject, ACPI_STRING *NewValue 23 | ) { 24 | *NewValue = NULL; 25 | return AE_OK; 26 | } 27 | 28 | ACPI_STATUS AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable, ACPI_TABLE_HEADER **NewTable) { 29 | *NewTable = NULL; 30 | return AE_OK; 31 | } 32 | 33 | ACPI_STATUS AcpiOsPhysicalTableOverride( 34 | ACPI_TABLE_HEADER *ExistingTable, ACPI_PHYSICAL_ADDRESS *NewAddress, UINT32 *NewTableLength 35 | ) { 36 | *NewAddress = 0; 37 | return AE_OK; 38 | } 39 | 40 | /// memory 41 | 42 | void *AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Length) { 43 | return VIRT_DIRECT(PhysicalAddress); 44 | } 45 | 46 | void AcpiOsUnmapMemory(void *where, ACPI_SIZE length) {} 47 | 48 | ACPI_STATUS AcpiOsGetPhysicalAddress(void *LogicalAddress, ACPI_PHYSICAL_ADDRESS *PhysicalAddress) { 49 | uintptr_t virt = (uintptr_t) LogicalAddress; 50 | if (virt >= KERNEL_BASE) 51 | *PhysicalAddress = PHYS_KERNEL(LogicalAddress); 52 | else 53 | *PhysicalAddress = PHYS_DIRECT(LogicalAddress); 54 | 55 | return AE_OK; 56 | } 57 | 58 | void *AcpiOsAllocate(ACPI_SIZE Size) { 59 | return kmalloc(Size); 60 | } 61 | 62 | void AcpiOsFree(void *Memory) { 63 | kfree(Memory); 64 | } 65 | 66 | BOOLEAN AcpiOsReadable(void *Memory, ACPI_SIZE Length) { 67 | return TRUE; 68 | } 69 | 70 | BOOLEAN AcpiOsWritable(void *Memory, ACPI_SIZE Length) { 71 | return TRUE; 72 | } 73 | 74 | /// threads 75 | 76 | ACPI_THREAD_ID AcpiOsGetThreadId() { 77 | return 0; 78 | } 79 | 80 | ACPI_STATUS AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function, void *Context) { 81 | //Function(Context); 82 | return AE_NOT_IMPLEMENTED; 83 | } 84 | 85 | void AcpiOsSleep(UINT64 Milliseconds) {} 86 | 87 | void AcpiOsStall(UINT32 Milliseconds) {} 88 | 89 | void AcpiOsWaitEventsComplete(void) { 90 | kprintf("acpi waiting\n"); 91 | for (;;); 92 | } 93 | 94 | /// synchronization 95 | 96 | /*ACPI_STATUS AcpiOsCreateMutex(ACPI_MUTEX *OutHandle) {} 97 | 98 | void AcpiOsDeleteMutex(ACPI_MUTEX Handle) {} 99 | 100 | ACPI_STATUS AcpiOsAcquireMutex(ACPI_MUTEX Handle, UINT16 Timeout) {} 101 | 102 | void AcpiOsReleaseMutex(ACPI_MUTEX Handle) {}*/ 103 | 104 | ACPI_STATUS AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits, ACPI_SEMAPHORE *OutHandle) { 105 | return AE_NOT_IMPLEMENTED; 106 | } 107 | 108 | ACPI_STATUS AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle) { 109 | return AE_NOT_IMPLEMENTED; 110 | } 111 | 112 | ACPI_STATUS AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout) { 113 | return AE_NOT_IMPLEMENTED; 114 | } 115 | 116 | ACPI_STATUS AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units) { 117 | return AE_NOT_IMPLEMENTED; 118 | } 119 | 120 | ACPI_STATUS AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle) { 121 | return AE_NOT_IMPLEMENTED; 122 | } 123 | 124 | void AcpiOsDeleteLock(ACPI_SPINLOCK Handle) {} 125 | 126 | ACPI_CPU_FLAGS AcpiOsAcquireLock(ACPI_SPINLOCK Handle) { 127 | return 0; 128 | } 129 | 130 | void AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags) {} 131 | 132 | /// interrupt handling 133 | 134 | ACPI_STATUS AcpiOsInstallInterruptHandler( 135 | UINT32 InterruptLevel, ACPI_OSD_HANDLER Handler, void *Context 136 | ) { 137 | return AE_NOT_IMPLEMENTED; 138 | } 139 | 140 | ACPI_STATUS AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber, ACPI_OSD_HANDLER Handler) { 141 | return AE_NOT_IMPLEMENTED; 142 | } 143 | 144 | /// memory 145 | 146 | ACPI_STATUS AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address, UINT64 *Value, UINT32 Width) { 147 | return AE_NOT_IMPLEMENTED; 148 | } 149 | 150 | ACPI_STATUS AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address, UINT64 Value, UINT32 Width) { 151 | return AE_NOT_IMPLEMENTED; 152 | } 153 | 154 | /// port I/O 155 | 156 | ACPI_STATUS AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width) { 157 | return AE_NOT_IMPLEMENTED; 158 | } 159 | 160 | ACPI_STATUS AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width) { 161 | return AE_NOT_IMPLEMENTED; 162 | } 163 | 164 | /// pci 165 | 166 | ACPI_STATUS AcpiOsReadPciConfiguration( 167 | ACPI_PCI_ID *PciId, UINT32 Register, UINT64 *Value, UINT32 Width 168 | ) { 169 | return AE_NOT_IMPLEMENTED; 170 | } 171 | 172 | ACPI_STATUS AcpiOsWritePciConfiguration( 173 | ACPI_PCI_ID *PciId, UINT32 Register, UINT64 Value, UINT32 Width 174 | ) { 175 | return AE_NOT_IMPLEMENTED; 176 | } 177 | 178 | /// formatted output 179 | 180 | void AcpiOsPrintf(const char *Format, ...) { 181 | va_list ap; 182 | va_start(ap, Format); 183 | kvprintf(Format, ap); 184 | va_end(ap); 185 | } 186 | 187 | void AcpiOsVprintf(const char *Format, va_list ap) { 188 | kvprintf(Format, ap); 189 | } 190 | 191 | /// misc 192 | 193 | UINT64 AcpiOsGetTimer(void) { 194 | return 0; 195 | } 196 | 197 | ACPI_STATUS AcpiOsSignal(UINT32 Function, void *Info) { 198 | return AE_NOT_IMPLEMENTED; 199 | } 200 | -------------------------------------------------------------------------------- /src/acpi/parse.c: -------------------------------------------------------------------------------- 1 | #include "parse.h" 2 | #include "../apic.h" 3 | #include "../hpet.h" 4 | #include "../smp.h" 5 | #include "../pci.h" 6 | #include 7 | #include 8 | #include 9 | 10 | static void madt_parse(ACPI_TABLE_MADT *Madt) { 11 | apic_init(Madt->Address, Madt->Flags & ACPI_MADT_PCAT_COMPAT); 12 | 13 | for ( 14 | ACPI_SUBTABLE_HEADER *Apic = (ACPI_SUBTABLE_HEADER*)(Madt + 1); 15 | (char*)Apic < (char*)Madt + Madt->Header.Length; 16 | Apic = (ACPI_SUBTABLE_HEADER*)((char*)Apic + Apic->Length) 17 | ) { 18 | switch (Apic->Type) { 19 | case ACPI_MADT_TYPE_LOCAL_APIC: { 20 | ACPI_MADT_LOCAL_APIC *p = (ACPI_MADT_LOCAL_APIC*)Apic; 21 | if (!(p->LapicFlags & 0x1)) 22 | break; 23 | 24 | lapic_by_cpu[lapic_count] = p->Id; 25 | lapic_count++; 26 | break; 27 | } 28 | 29 | case ACPI_MADT_TYPE_LOCAL_X2APIC: { 30 | ACPI_MADT_LOCAL_X2APIC *p = (ACPI_MADT_LOCAL_X2APIC *)Apic; 31 | if (!(p->LapicFlags & 0x1)) 32 | break; 33 | 34 | lapic_by_cpu[lapic_count] = p->LocalApicId; 35 | lapic_count++; 36 | break; 37 | } 38 | 39 | #if 0 40 | // TODO: initialize ioapic 41 | case ACPI_MADT_TYPE_IO_APIC: { 42 | ACPI_MADT_IO_APIC *p = (ACPI_MADT_IO_APIC*)Apic; 43 | kprintf("IOAPIC %d %#x %d\n", p->Id, p->Address, p->GlobalIrqBase); 44 | break; 45 | } 46 | 47 | case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE: { 48 | static const char *polarity[] = { "bus", "active-high", "", "active-low" }; 49 | static const char *trigger[] = { "bus", "edge-triggered", "", "level-triggered" }; 50 | 51 | ACPI_MADT_INTERRUPT_OVERRIDE *p = (ACPI_MADT_INTERRUPT_OVERRIDE*)Apic; 52 | kprintf( 53 | "INT_SRC_OVR %d -> %d %s %s\n", p->SourceIrq, p->GlobalIrq, 54 | polarity[p->IntiFlags & 0x03], trigger[(p->IntiFlags >> 2) & 0x3] 55 | ); 56 | break; 57 | } 58 | 59 | case ACPI_MADT_TYPE_NMI_SOURCE: { 60 | ACPI_MADT_NMI_SOURCE *p = (ACPI_MADT_NMI_SOURCE*)Apic; 61 | kprintf("NMI_SRC %d %d\n", p->IntiFlags, p->GlobalIrq); 62 | break; 63 | } 64 | 65 | case ACPI_MADT_TYPE_LOCAL_APIC_NMI: { 66 | ACPI_MADT_LOCAL_APIC_NMI *p = (ACPI_MADT_LOCAL_APIC_NMI*)Apic; 67 | kprintf("LAPIC_NMI %d %d %d\n", p->ProcessorId, p->IntiFlags, p->Lint); 68 | break; 69 | } 70 | #endif 71 | } 72 | } 73 | } 74 | 75 | static void mcfg_parse(ACPI_TABLE_MCFG *Mcfg) { 76 | for ( 77 | ACPI_MCFG_ALLOCATION *Allocation = (ACPI_MCFG_ALLOCATION*)(Mcfg + 1); 78 | (char*)Allocation < (char*)Mcfg + Mcfg->Header.Length; 79 | Allocation++ 80 | ) { 81 | pci_add_segment( 82 | Allocation->PciSegment, Allocation->Address, 83 | Allocation->StartBusNumber, Allocation->EndBusNumber 84 | ); 85 | } 86 | } 87 | 88 | #define ACPI_TABLE_COUNT 128 89 | static ACPI_TABLE_DESC acpi_tables[ACPI_TABLE_COUNT]; 90 | 91 | void acpi_parse(ACPI_TABLE_RSDP *Rsdp) { 92 | extern ACPI_TABLE_RSDP *AcpiOsRsdp; 93 | AcpiOsRsdp = Rsdp; 94 | 95 | ACPI_STATUS status = AcpiInitializeTables(acpi_tables, ACPI_TABLE_COUNT, false); 96 | if (ACPI_FAILURE(status)) { 97 | panic("acpi error %d\n", status); 98 | } 99 | 100 | ACPI_TABLE_MADT *Madt = NULL; 101 | AcpiGetTable(ACPI_SIG_MADT, 0, (ACPI_TABLE_HEADER**)&Madt); 102 | if (Madt == NULL) { 103 | panic("no madt available"); 104 | } 105 | madt_parse(Madt); 106 | 107 | ACPI_TABLE_HPET *Hpet = NULL; 108 | AcpiGetTable(ACPI_SIG_HPET, 0, (ACPI_TABLE_HEADER**)&Hpet); 109 | if (Hpet == NULL) { 110 | panic("no hpet available"); 111 | } 112 | hpet_init(Hpet->Address.Address); 113 | 114 | ACPI_TABLE_MCFG *Mcfg = NULL; 115 | AcpiGetTable(ACPI_SIG_MCFG, 0, (ACPI_TABLE_HEADER**)&Mcfg); 116 | if (Mcfg == NULL) { 117 | panic("no mcfg available"); 118 | } 119 | mcfg_parse(Mcfg); 120 | } 121 | -------------------------------------------------------------------------------- /src/acpi/parse.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void acpi_parse(ACPI_TABLE_RSDP *Rsdp); 4 | -------------------------------------------------------------------------------- /src/apic.c: -------------------------------------------------------------------------------- 1 | #include "apic.h" 2 | #include "tsc.h" 3 | #include "cpu.h" 4 | #include "hpet.h" 5 | #include 6 | #include 7 | 8 | enum apic_spurious_flags { 9 | apic_sw_enable = 1 << 8, 10 | apic_focus_check = 1 << 9, 11 | acpi_eoi_supression = 1 << 12, 12 | }; 13 | 14 | enum apic_lvt_flags { 15 | apic_lvt_status = 1 << 12, 16 | apic_lvt_mask = 1 << 16, 17 | 18 | apic_lvt_fixed = 0x0 << 8, 19 | apic_lvt_smi = 0x2 << 8, 20 | apic_lvt_nmi = 0x4 << 8, 21 | apic_lvt_extint = 0x7 << 8, 22 | apic_lvt_init = 0x5 << 8, 23 | 24 | apic_timer_oneshot = 0x0 << 17, 25 | apic_timer_periodic = 0x1 << 17, 26 | apic_timer_tsc = 0x2 << 17, 27 | }; 28 | 29 | static void pic_disable(void) { 30 | kprintf("apic: disabling legacy pic\n"); 31 | 32 | enum pic_port { 33 | pic_master_cmd = 0x20, 34 | pic_master_imr = 0x21, 35 | 36 | pic_slave_cmd = 0xa0, 37 | pic_slave_imr = 0xa1, 38 | }; 39 | 40 | enum pic_command { 41 | pic_icw1_init = 0x10, 42 | pic_icw1_icw4 = 0x01, 43 | pic_icw4_default = 0x01, 44 | }; 45 | 46 | // remap PIC vectors to [0x20,0x30) so they don't overlap CPU traps 47 | // this is used only for spurious interrupts 48 | 49 | outb(pic_master_cmd, pic_icw1_init + pic_icw1_icw4); 50 | outb(pic_master_imr, 0x20); // pic 1 offset 51 | outb(pic_master_imr, 1 << 2); // pic 2 cascade line 52 | outb(pic_master_imr, pic_icw4_default); // no auto-eoi 53 | 54 | outb(pic_slave_cmd, pic_icw1_init + pic_icw1_icw4); 55 | outb(pic_slave_imr, 0x28); // pic 2 offset 56 | outb(pic_slave_imr, 2); // pic 2 cascade line 57 | outb(pic_slave_imr, pic_icw4_default); // no auto-eoi 58 | 59 | // mask all PIC interrupts 60 | outb(pic_slave_imr, 0xff); 61 | outb(pic_master_imr, 0xff); 62 | } 63 | 64 | struct apic *apic; 65 | uint32_t lapic_count; 66 | 67 | static volatile uint32_t *lapic; 68 | static uint32_t lapic_frequency; 69 | 70 | void apic_init(uint32_t lapic_address, bool legacy_pic) { 71 | if (legacy_pic) 72 | pic_disable(); 73 | 74 | uint32_t eax, ebx, ecx, edx; 75 | cpuid(0x01, &eax, &ebx, &ecx, &edx); 76 | bool has_x2apic = ecx & cpuid_01_ecx_x2apic; 77 | 78 | uint32_t msr_flags = apic_global_enable; 79 | if (has_x2apic) { 80 | extern struct apic apic_x2apic; 81 | apic = &apic_x2apic; 82 | 83 | msr_flags |= apic_x2apic_enable; 84 | } else { 85 | extern struct apic apic_flat; 86 | apic = &apic_flat; 87 | 88 | // TODO: factor out temporary mappings 89 | extern uint64_t kernel_pml4[], pt_map[]; 90 | pt_map[0] = lapic_address | PAGE_PRESENT | PAGE_WRITE | PAGE_CACHE_UC | PAGE_GLOBAL; 91 | write_cr3(PHYS_KERNEL(kernel_pml4)); 92 | lapic = (uint32_t*)0xffffffffc0000000; 93 | } 94 | 95 | kprintf("apic: %s routing\n", apic->name); 96 | 97 | rdmsr(ia32_apic_base, &eax, &edx); 98 | wrmsr(ia32_apic_base, eax | msr_flags, 0); 99 | 100 | // TODO: set spurious interrupt vector explicitly 101 | apic_write(apic_spurious, apic_read(apic_spurious) | apic_sw_enable); 102 | } 103 | 104 | void apic_timer_calibrate(void) { 105 | apic_write(apic_lvt_timer, apic_timer_oneshot | 0x30); 106 | 107 | // TODO: move femtoseconds constant 108 | uint64_t wait_time = 100000000000000ULL / hpet_period() / 100; 109 | 110 | uint64_t hpet_start = hpet_now(); 111 | apic_write(apic_timer_init, 0xffffffff); 112 | 113 | while (hpet_now() - hpet_start < wait_time) continue; 114 | 115 | apic_write(apic_lvt_timer, apic_lvt_mask); 116 | uint32_t lapic_end = apic_read(apic_timer_current); 117 | 118 | lapic_frequency = (0xffffffff - lapic_end) * 100; 119 | 120 | kprintf("apic timer: %u.%06uMHz\n", lapic_frequency / 1000000, lapic_frequency % 1000000); 121 | } 122 | 123 | // x2apic 124 | 125 | static uint32_t x2apic_read(uint32_t reg) { 126 | uint32_t eax, edx; 127 | rdmsr(x2apic_base + reg, &eax, &edx); 128 | return eax; 129 | } 130 | 131 | static void x2apic_write(uint32_t reg, uint32_t val) { 132 | wrmsr(x2apic_base + reg, val, 0); 133 | } 134 | 135 | static void x2apic_icr_write(uint32_t high, uint32_t low) { 136 | wrmsr(x2apic_base + apic_icr_low, low, high); 137 | } 138 | 139 | static bool x2apic_icr_wait_idle(uint32_t msecs) { 140 | return true; 141 | } 142 | 143 | struct apic apic_x2apic = { 144 | .name = "x2apic", 145 | 146 | .read = x2apic_read, 147 | .write = x2apic_write, 148 | .icr_write = x2apic_icr_write, 149 | .icr_wait_idle = x2apic_icr_wait_idle, 150 | }; 151 | 152 | // flat apic 153 | 154 | static uint32_t mem_read(uint32_t reg) { 155 | return lapic[reg << 2]; 156 | } 157 | 158 | static void mem_write(uint32_t reg, uint32_t val) { 159 | lapic[reg << 2] = val; 160 | } 161 | 162 | static void mem_icr_write(uint32_t high, uint32_t low) { 163 | lapic[apic_icr_high << 2] = high << apic_icr_dest_shift; 164 | lapic[apic_icr_low << 2] = low; 165 | } 166 | 167 | static bool mem_icr_wait_idle(uint32_t msecs) { 168 | bool pending; 169 | for (uint32_t timeout = 0; timeout < 10 * msecs; timeout++) { 170 | pending = lapic[apic_icr_low << 2] & apic_icr_pending; 171 | if (!pending) 172 | break; 173 | 174 | tsc_udelay(100); 175 | } 176 | 177 | return !pending; 178 | } 179 | 180 | struct apic apic_flat = { 181 | .name = "flat", 182 | 183 | .read = mem_read, 184 | .write = mem_write, 185 | .icr_write = mem_icr_write, 186 | .icr_wait_idle = mem_icr_wait_idle, 187 | }; 188 | -------------------------------------------------------------------------------- /src/apic.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | void apic_init(uint32_t lapic_address, bool legacy_pic); 5 | void apic_timer_calibrate(void); 6 | 7 | enum apic_register { 8 | apic_id = 0x02, 9 | apic_version = 0x03, 10 | 11 | apic_tpr = 0x08, 12 | apic_apr = 0x09, 13 | apic_ppr = 0x0a, 14 | 15 | apic_eoi = 0x0b, 16 | 17 | apic_rrr = 0x0c, 18 | apic_ldr = 0x0d, 19 | apic_dfr = 0x0e, 20 | 21 | apic_spurious = 0x0f, 22 | 23 | apic_isr = 0x10, 24 | apic_tmr = 0x18, 25 | apic_irr = 0x21, 26 | 27 | apic_esr = 0x28, 28 | 29 | apic_icr_low = 0x30, 30 | apic_icr_high = 0x31, 31 | 32 | apic_lvt_timer = 0x32, 33 | apic_lvt_thermal = 0x33, 34 | apic_lvt_perf = 0x34, 35 | apic_lvt_lint0 = 0x35, 36 | apic_lvt_lint1 = 0x36, 37 | apic_lvt_err = 0x37, 38 | 39 | apic_timer_init = 0x38, 40 | apic_timer_current = 0x39, 41 | apic_timer_divide = 0x3e, 42 | }; 43 | static const int apic_reg = 0x10 >> 2; 44 | 45 | enum apic_icr_flags { 46 | apic_icr_dest_shift = 24, 47 | 48 | apic_icr_shorthand_none = 0 << 18, 49 | apic_icr_shorthand_self = 1 << 18, 50 | apic_icr_shorthand_all = 2 << 18, 51 | apic_icr_shorthand_others = 3 << 18, 52 | 53 | apic_icr_edge = 0 << 15, 54 | apic_icr_level = 1 << 15, 55 | 56 | apic_icr_deassert = 0 << 14, 57 | apic_icr_assert = 1 << 14, 58 | 59 | apic_icr_idle = 0 << 12, 60 | apic_icr_pending = 1 << 12, 61 | 62 | apic_icr_physical = 0 << 11, 63 | apic_icr_logical = 1 << 11, 64 | 65 | apic_icr_fixed = 0 << 8, 66 | apic_icr_lowest = 1 << 8, 67 | apic_icr_smi = 2 << 8, 68 | apic_icr_nmi = 4 << 8, 69 | apic_icr_init = 5 << 8, 70 | apic_icr_startup = 6 << 8, 71 | }; 72 | 73 | struct apic { 74 | const char *name; 75 | 76 | uint32_t (*read)(uint32_t reg); 77 | void (*write)(uint32_t reg, uint32_t val); 78 | 79 | void (*icr_write)(uint32_t high, uint32_t low); 80 | bool (*icr_wait_idle)(uint32_t msecs); 81 | }; 82 | 83 | extern struct apic *apic; 84 | 85 | static inline uint32_t apic_read(uint32_t reg) { 86 | return apic->read(reg); 87 | } 88 | 89 | static inline void apic_write(uint32_t reg, uint32_t val) { 90 | apic->write(reg, val); 91 | } 92 | 93 | static inline void apic_icr_write(uint32_t high, uint32_t low) { 94 | apic->icr_write(high, low); 95 | } 96 | 97 | static inline bool apic_icr_wait_idle(uint32_t msecs) { 98 | return apic->icr_wait_idle(msecs); 99 | } 100 | 101 | static inline uint32_t apic_esr_read(void) { 102 | apic->write(apic_esr, 0); 103 | return apic->read(apic_esr) & 0xef; 104 | } 105 | 106 | extern uint32_t lapic_count; 107 | -------------------------------------------------------------------------------- /src/boot.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | EFI_GUID gEfiLoadedImageProtocolGuid = EFI_LOADED_IMAGE_PROTOCOL_GUID; 10 | EFI_GUID gEfiSimpleFileSystemProtocolGuid = EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_GUID; 11 | EFI_GUID gEfiFileInfoGuid = EFI_FILE_INFO_ID; 12 | EFI_GUID gEfiAcpiTableGuid = EFI_ACPI_TABLE_GUID; 13 | 14 | int memcmp(const void *aptr, const void *bptr, size_t n) { 15 | const unsigned char *a = aptr, *b = bptr; 16 | for (size_t i = 0; i < n; i++) { 17 | if (a[i] < b[i]) return -1; 18 | else if (a[i] > b[i]) return 1; 19 | } 20 | return 0; 21 | } 22 | 23 | EFI_STATUS EFIAPI UefiMain(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) { 24 | // open the kernel file from the device this app was loaded from 25 | EFI_FILE *Kernel; 26 | { 27 | EFI_HANDLE_PROTOCOL HandleProtocol = SystemTable->BootServices->HandleProtocol; 28 | 29 | EFI_LOADED_IMAGE_PROTOCOL *LoadedImage; 30 | HandleProtocol(ImageHandle, &gEfiLoadedImageProtocolGuid, (void**)&LoadedImage); 31 | 32 | EFI_SIMPLE_FILE_SYSTEM_PROTOCOL *FileSystem; 33 | HandleProtocol(LoadedImage->DeviceHandle, &gEfiSimpleFileSystemProtocolGuid, (void**)&FileSystem); 34 | 35 | EFI_FILE *Root; 36 | FileSystem->OpenVolume(FileSystem, &Root); 37 | 38 | EFI_STATUS s = Root->Open(Root, &Kernel, L"kernel", EFI_FILE_MODE_READ, EFI_FILE_READ_ONLY); 39 | if (s != EFI_SUCCESS) { 40 | SystemTable->ConOut->OutputString(SystemTable->ConOut, L"kernel is missing\r\n"); 41 | return s; 42 | } 43 | } 44 | 45 | EFI_ALLOCATE_POOL AllocatePool = SystemTable->BootServices->AllocatePool; 46 | 47 | // load the elf header from the kernel 48 | Elf64_Ehdr header; 49 | { 50 | UINTN FileInfoSize; 51 | EFI_FILE_INFO *FileInfo; 52 | Kernel->GetInfo(Kernel, &gEfiFileInfoGuid, &FileInfoSize, NULL); 53 | AllocatePool(EfiLoaderData, FileInfoSize, (void**)&FileInfo); 54 | Kernel->GetInfo(Kernel, &gEfiFileInfoGuid, &FileInfoSize, (void**)&FileInfo); 55 | 56 | UINTN size = sizeof(header); 57 | Kernel->Read(Kernel, &size, &header); 58 | } 59 | 60 | // verify the kernel binary 61 | if ( 62 | memcmp(&header.e_ident[EI_MAG0], ELFMAG, SELFMAG) != 0 || 63 | header.e_ident[EI_CLASS] != ELFCLASS64 || 64 | header.e_ident[EI_DATA] != ELFDATA2LSB || 65 | header.e_type != ET_EXEC || 66 | header.e_machine != EM_AMD64 || 67 | header.e_version != EV_CURRENT 68 | ) { 69 | SystemTable->ConOut->OutputString(SystemTable->ConOut, L"kernel format is bad\r\n"); 70 | return -1; 71 | } 72 | 73 | // load the kernel segment headers 74 | Elf64_Phdr *phdrs; 75 | { 76 | Kernel->SetPosition(Kernel, header.e_phoff); 77 | UINTN size = header.e_phnum * header.e_phentsize; 78 | AllocatePool(EfiLoaderData, size, (void**)&phdrs); 79 | Kernel->Read(Kernel, &size, phdrs); 80 | } 81 | 82 | EFI_ALLOCATE_PAGES AllocatePages = SystemTable->BootServices->AllocatePages; 83 | 84 | // load the actual kernel binary based on its segment headers 85 | for ( 86 | Elf64_Phdr *phdr = phdrs; 87 | (char*)phdr < (char*)phdrs + header.e_phnum * header.e_phentsize; 88 | phdr = (Elf64_Phdr*)((char*)phdr + header.e_phentsize) 89 | ) { 90 | switch (phdr->p_type) { 91 | case PT_LOAD: { 92 | int pages = (phdr->p_memsz + 0x1000 - 1) / 0x1000; // round up 93 | Elf64_Addr segment = phdr->p_paddr; 94 | AllocatePages(AllocateAddress, EfiLoaderData, pages, &segment); 95 | 96 | Kernel->SetPosition(Kernel, phdr->p_offset); 97 | UINTN size = phdr->p_filesz; 98 | Kernel->Read(Kernel, &size, (void*)segment); 99 | break; 100 | } 101 | } 102 | } 103 | 104 | // get the memory map from the firmware 105 | EFI_MEMORY_DESCRIPTOR *Map = NULL; 106 | UINTN MapSize, MapKey; 107 | UINTN DescriptorSize; 108 | UINT32 DescriptorVersion; 109 | { 110 | EFI_GET_MEMORY_MAP GetMemoryMap = SystemTable->BootServices->GetMemoryMap; 111 | 112 | GetMemoryMap(&MapSize, Map, &MapKey, &DescriptorSize, &DescriptorVersion); 113 | AllocatePool(EfiLoaderData, MapSize, (void**)&Map); 114 | GetMemoryMap(&MapSize, Map, &MapKey, &DescriptorSize, &DescriptorVersion); 115 | } 116 | 117 | // get the acpi tables from the firmware 118 | void *rsdp = NULL; 119 | for (UINTN i = 0; i < SystemTable->NumberOfTableEntries; i++) { 120 | EFI_CONFIGURATION_TABLE *Config = &SystemTable->ConfigurationTable[i]; 121 | if (memcmp(&Config->VendorGuid, &gEfiAcpiTableGuid, sizeof(Config->VendorGuid)) == 0) { 122 | rsdp = Config->VendorTable; 123 | break; 124 | } 125 | } 126 | 127 | // finish with firmware and jump to the kernel 128 | SystemTable->BootServices->ExitBootServices(ImageHandle, MapKey); 129 | ((__attribute__((sysv_abi)) void (*)(void*, size_t, size_t, void*))header.e_entry)( 130 | Map, MapSize, DescriptorSize, 131 | rsdp 132 | ); 133 | return EFI_SUCCESS; 134 | } 135 | -------------------------------------------------------------------------------- /src/cache.c: -------------------------------------------------------------------------------- 1 | #include "page.h" 2 | #include "list.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | // TODO: larger slabs 11 | // TODO: off-slab headers 12 | // TODO: cache line alignment and coloring 13 | // TODO: per-cpu object caches 14 | 15 | struct cache { 16 | struct list partial; 17 | struct list empty; 18 | struct list full; 19 | 20 | uint32_t object_size; 21 | uint32_t slab_capacity; 22 | }; 23 | 24 | struct slab { 25 | struct list list; 26 | 27 | void *objects; 28 | 29 | uint32_t ref_count; 30 | uint32_t next_free; 31 | uint32_t free[]; 32 | }; 33 | 34 | static struct cache caches = { 35 | .full = LIST_INIT(caches.full), 36 | .partial = LIST_INIT(caches.partial), 37 | .empty = LIST_INIT(caches.empty), 38 | 39 | .object_size = sizeof(struct cache), 40 | }; 41 | 42 | // TODO: go bigger once we have bigger slab sizes 43 | static struct { 44 | size_t size; 45 | struct cache *cache; 46 | } sized_caches[] = { 47 | { 8, NULL }, 48 | { 16, NULL }, 49 | { 32, NULL }, 50 | { 64, NULL }, 51 | { 96, NULL }, 52 | { 128, NULL }, 53 | { 192, NULL }, 54 | { 256, NULL }, 55 | { 512, NULL }, 56 | { 1024, NULL }, 57 | { 2048, NULL }, 58 | { 0, NULL }, 59 | }; 60 | 61 | static uint32_t calc_slab_capacity(uint32_t object_size) { 62 | uint32_t header = sizeof(struct slab); 63 | uint32_t entry = sizeof(uint32_t); 64 | 65 | uint32_t i = 0; 66 | while (round_up(header + i * entry, object_size) + i * object_size < PAGE_SIZE) 67 | i++; 68 | 69 | return i; 70 | } 71 | 72 | void cache_init(void) { 73 | caches.slab_capacity = calc_slab_capacity(caches.object_size); 74 | 75 | for (int i = 0; sized_caches[i].size != 0; i++) 76 | sized_caches[i].cache = cache_create(sized_caches[i].size); 77 | } 78 | 79 | struct cache *cache_create(uint32_t object_size) { 80 | struct cache *cache = cache_alloc(&caches); 81 | if (cache == NULL) 82 | return NULL; 83 | 84 | list_init(&cache->full); 85 | list_init(&cache->partial); 86 | list_init(&cache->empty); 87 | 88 | cache->object_size = object_size; 89 | cache->slab_capacity = calc_slab_capacity(cache->object_size); 90 | 91 | return cache; 92 | } 93 | 94 | void cache_shrink(struct cache *cache) { 95 | while (cache->empty.next != &cache->empty) { 96 | struct slab *slab = containerof(cache->empty.next, struct slab, list); 97 | list_del(cache->empty.next); 98 | 99 | page_free(page_from_address(slab)); 100 | } 101 | } 102 | 103 | void cache_destroy(struct cache *cache) { 104 | cache_shrink(cache); 105 | assert(list_empty(&cache->partial) && list_empty(&cache->full)); 106 | cache_free(&caches, cache); 107 | } 108 | 109 | static struct slab *slab_create(struct cache *cache) { 110 | struct page *page = page_alloc(); 111 | if (page == NULL) 112 | return NULL; 113 | 114 | struct slab *slab = page_address(page); 115 | list_init(&slab->list); 116 | 117 | page->cache = cache; 118 | page->slab = slab; 119 | 120 | slab->objects = (char*)slab + sizeof(struct slab) + cache->slab_capacity * sizeof(uint32_t); 121 | 122 | slab->ref_count = 0; 123 | slab->next_free = 0; 124 | for (uint32_t i = 0; i < cache->slab_capacity; i++) 125 | slab->free[i] = i + 1; 126 | 127 | return slab; 128 | } 129 | 130 | void *cache_alloc(struct cache *cache) { 131 | struct list *entry = cache->partial.next; 132 | 133 | if (list_empty(&cache->partial)) { 134 | entry = cache->empty.next; 135 | 136 | if (list_empty(&cache->empty)) { 137 | struct slab *slab = slab_create(cache); 138 | if (slab == NULL) 139 | return NULL; 140 | 141 | entry = &slab->list; 142 | list_add_head(entry, &cache->partial); 143 | } 144 | } 145 | 146 | struct slab *slab = containerof(entry, struct slab, list); 147 | void *object = (char*)slab->objects + slab->next_free * cache->object_size; 148 | 149 | slab->ref_count++; 150 | slab->next_free = slab->free[slab->next_free]; 151 | if (slab->next_free == cache->slab_capacity) { 152 | list_del(&slab->list); 153 | list_add_head(&slab->list, &cache->full); 154 | } 155 | 156 | return object; 157 | } 158 | 159 | void cache_free(struct cache *cache, void *object) { 160 | struct slab *slab = (struct slab*)((intptr_t)object & PAGE_MASK); 161 | 162 | uint32_t index = ((char*)object - (char*)slab->objects) / cache->object_size; 163 | slab->free[index] = slab->next_free; 164 | slab->next_free = index; 165 | 166 | uint32_t ref_count = slab->ref_count; 167 | slab->ref_count--; 168 | if (slab->ref_count == 0) { 169 | list_del(&slab->list); 170 | list_add_head(&slab->list, &cache->empty); 171 | } 172 | else if (ref_count == cache->slab_capacity) { 173 | list_del(&slab->list); 174 | list_add_head(&slab->list, &cache->partial); 175 | } 176 | } 177 | 178 | // TODO: fall back on large page allocation once we have large pages 179 | void *kmalloc(size_t size) { 180 | for (int i = 0; sized_caches[i].size != 0; i++) { 181 | if (size > sized_caches[i].size) 182 | continue; 183 | 184 | return cache_alloc(sized_caches[i].cache); 185 | } 186 | 187 | return NULL; 188 | } 189 | 190 | void kfree(void *ptr) { 191 | if (ptr == NULL) 192 | return; 193 | 194 | struct page *page = page_from_address(ptr); 195 | cache_free(page->cache, ptr); 196 | } 197 | -------------------------------------------------------------------------------- /src/cpu.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | // io ports 4 | 5 | static inline void outb(uint16_t port, uint8_t value) { 6 | __asm__ volatile ("outb %0, %1" :: "a"(value), "Nd"(port)); 7 | } 8 | 9 | static inline uint8_t inb(uint16_t port) { 10 | uint8_t result; 11 | __asm__ volatile ("inb %1, %0" : "=a"(result) : "Nd"(port)); 12 | return result; 13 | } 14 | 15 | // cpuid 16 | 17 | static inline void cpuid(uint32_t i, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { 18 | __asm__ volatile ("cpuid" : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) : "a"(i)); 19 | } 20 | 21 | enum cpuid_flags { 22 | cpuid_01_ecx_x2apic = 1 << 21, 23 | }; 24 | 25 | // msrs 26 | 27 | enum msr { 28 | ia32_apic_base = 0x1b, 29 | x2apic_base = 0x800, 30 | }; 31 | 32 | enum msr_flags { 33 | apic_x2apic_enable = 1 << 10, 34 | apic_global_enable = 1 << 11, 35 | }; 36 | 37 | static inline void rdmsr(uint32_t msr, uint32_t *low, uint32_t *high) { 38 | __asm__ volatile ("rdmsr" : "=a"(*low), "=d"(*high) : "c"(msr)); 39 | } 40 | 41 | static inline void wrmsr(uint32_t msr, uint32_t low, uint32_t high) { 42 | __asm__ volatile ("wrmsr" :: "a"(low), "d"(high), "c"(msr)); 43 | } 44 | -------------------------------------------------------------------------------- /src/entry.S: -------------------------------------------------------------------------------- 1 | .text 2 | 3 | .global load_idt 4 | load_idt: 5 | lidt (%rdi) 6 | ret 7 | 8 | .data 9 | message: .asciz "exception\n" 10 | timer: .asciz "|" 11 | .previous 12 | 13 | .global default_exception 14 | default_exception: 15 | mov $message, %rdi 16 | call kprintf 17 | 1: 18 | cli 19 | hlt 20 | jmp 1b 21 | 22 | .global default_interrupt 23 | .extern apic 24 | default_interrupt: 25 | mov $timer, %rdi 26 | call kprintf 27 | mov apic(%rip), %rax 28 | mov 16(%rax), %rax 29 | mov $0x0b, %rdi 30 | mov $0, %rsi 31 | call %rax 32 | iretq 33 | 34 | .global spurious_interrupt 35 | spurious_interrupt: 36 | iretq 37 | 38 | .macro isr handler error_code 39 | .align 8 40 | .global isr_\handler 41 | isr_\handler: 42 | cld 43 | 44 | .if \error_code == 0 45 | push $0 46 | .endif 47 | 48 | push %r15 49 | push %r14 50 | push %r13 51 | push %r12 52 | push %r11 53 | push %r10 54 | push %r9 55 | push %r8 56 | push %rbp 57 | push %rsi 58 | push %rdi 59 | push %rdx 60 | push %rcx 61 | push %rbx 62 | push %rax 63 | 64 | mov $0x10, %ax 65 | mov %ax, %ds 66 | mov %ax, %ss 67 | 68 | mov %rsp, %rdi 69 | call \handler 70 | 71 | pop %rax 72 | pop %rbx 73 | pop %rcx 74 | pop %rdx 75 | pop %rdi 76 | pop %rsi 77 | pop %rbp 78 | pop %r8 79 | pop %r9 80 | pop %r10 81 | pop %r11 82 | pop %r12 83 | pop %r13 84 | pop %r14 85 | pop %r15 86 | 87 | add $0x8, %rsp 88 | 89 | iretq 90 | .endm 91 | 92 | isr divide_error 0 93 | isr general_protection_fault 1 94 | isr page_fault 1 95 | -------------------------------------------------------------------------------- /src/hpet.c: -------------------------------------------------------------------------------- 1 | #include "hpet.h" 2 | #include "cpu.h" 3 | #include 4 | #include 5 | 6 | struct hpet { 7 | uint64_t capabilities; 8 | uint64_t reserved0; 9 | uint64_t config; 10 | uint64_t reserved1; 11 | uint64_t isr; 12 | uint64_t reserved2[25]; 13 | uint64_t counter; 14 | uint64_t reserved3; 15 | 16 | struct hpet_comparator { 17 | uint64_t conf; 18 | uint64_t value; 19 | uint64_t route; 20 | uint64_t reserved; 21 | } timers[]; 22 | }; 23 | 24 | enum hpet_capabilites_fields { 25 | hpet_timers_shift = 8, 26 | hpet_timers_mask = 0xf, 27 | 28 | hpet_period_shift = 32, 29 | }; 30 | 31 | enum hpet_config_flags { 32 | hpet_config_enable = 1 << 0, 33 | }; 34 | 35 | enum hpet_timer_flags { 36 | hpet_timer_periodic = 1 << 4, 37 | hpet_timer_routes_shift = 32, 38 | }; 39 | 40 | static volatile struct hpet *hpet; 41 | 42 | void hpet_init(uint64_t hpet_address) { 43 | // TODO: factor out temporary mappings 44 | extern uint64_t kernel_pml4[], pt_map[]; 45 | pt_map[1] = hpet_address | PAGE_PRESENT | PAGE_WRITE | PAGE_CACHE_UC | PAGE_GLOBAL; 46 | write_cr3(PHYS_KERNEL(kernel_pml4)); 47 | hpet = (volatile struct hpet*)0xffffffffc0001000; 48 | 49 | uint8_t max_timer = (hpet->capabilities >> hpet_timers_shift) & hpet_timers_mask; 50 | 51 | uint32_t period = hpet->capabilities >> hpet_period_shift; 52 | uint32_t frequency = 1000000000000000ULL / period; 53 | 54 | kprintf( 55 | "hpet: %#015lx - %d comparators @ %u.%06uMHz\n", 56 | hpet_address, max_timer + 1, 57 | frequency / 1000000, frequency % 1000000 58 | ); 59 | 60 | volatile struct hpet_comparator *timers = &hpet->timers[0]; 61 | for (int i = 0; i <= max_timer; i++) { 62 | kprintf( 63 | " comparator %d: %4.4speriodic (irqs %08x)\n", i, 64 | (uint32_t)(timers[i].conf & hpet_timer_periodic) ? "" : "non-", 65 | (uint32_t)(timers[i].conf >> hpet_timer_routes_shift) 66 | ); 67 | } 68 | } 69 | 70 | void hpet_enable(void) { 71 | hpet->config |= hpet_config_enable; 72 | } 73 | 74 | uint64_t hpet_now(void) { 75 | return hpet->counter; 76 | } 77 | 78 | uint64_t hpet_period(void) { 79 | return hpet->capabilities >> hpet_period_shift; 80 | } 81 | -------------------------------------------------------------------------------- /src/hpet.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | struct hpet; 4 | 5 | void hpet_init(uint64_t hpet_address); 6 | void hpet_enable(void); 7 | 8 | uint64_t hpet_now(void); 9 | uint64_t hpet_period(void); 10 | -------------------------------------------------------------------------------- /src/interrupt.c: -------------------------------------------------------------------------------- 1 | #include "interrupt.h" 2 | #include "segment.h" 3 | #include 4 | #include 5 | 6 | enum { 7 | IDT_INTERRUPT = 0xe, 8 | IDT_TRAP = 0xf, 9 | }; 10 | 11 | static struct idt_entry { 12 | uint16_t offset_low; 13 | uint16_t selector; 14 | uint8_t ist: 3, reserved0: 5; 15 | uint8_t type: 5, dpl: 2, p: 1; 16 | uint16_t offset_middle; 17 | uint32_t offset_high; 18 | uint32_t reserved1; 19 | } idt[256]; 20 | 21 | #define IDT_ENTRY(offset, segment, t) (struct idt_entry){ \ 22 | .offset_low = (offset) & 0xffff, \ 23 | .offset_middle = ((offset) >> 16) & 0xffff, \ 24 | .offset_high = (offset) >> 32, \ 25 | .selector = (segment), \ 26 | .type = (t), \ 27 | .p = 1, \ 28 | } 29 | 30 | struct registers { 31 | uint64_t rax; 32 | uint64_t rbx; 33 | uint64_t rcx; 34 | uint64_t rdx; 35 | uint64_t rdi; 36 | uint64_t rsi; 37 | uint64_t rbp; 38 | uint64_t r8; 39 | uint64_t r9; 40 | uint64_t r10; 41 | uint64_t r11; 42 | uint64_t r12; 43 | uint64_t r13; 44 | uint64_t r14; 45 | uint64_t r15; 46 | uint64_t error_code; 47 | uint64_t rip; 48 | uint64_t cs; 49 | uint64_t rflags; 50 | uint64_t rsp; 51 | uint64_t ss; 52 | }; 53 | 54 | extern void default_exception(); 55 | extern void default_interrupt(); 56 | extern void spurious_interrupt(); 57 | 58 | extern void isr_divide_error(); 59 | void divide_error(struct registers *registers) { 60 | kprintf("divide error\n"); 61 | for (;;); 62 | } 63 | 64 | extern void isr_general_protection_fault(); 65 | void general_protection_fault(struct registers *registers) { 66 | kprintf("general protection fault %#lx\n", registers->error_code); 67 | for (;;); 68 | } 69 | 70 | extern void isr_page_fault(); 71 | void page_fault(struct registers *registers) { 72 | uint64_t address; 73 | __asm__ volatile ("mov %%cr2, %0" : "=r"(address)); 74 | 75 | kprintf("page fault %#016lx %#lx\n", address, registers->error_code); 76 | for (;;); 77 | } 78 | 79 | void interrupt_init() { 80 | for (int i = 0; i < 32; i++) { 81 | idt[i] = IDT_ENTRY((uint64_t)default_exception, SEG_KERNEL_CODE, IDT_TRAP); 82 | } 83 | 84 | for (int i = 32; i < 256; i++) { 85 | idt[i] = IDT_ENTRY((uint64_t)default_interrupt, SEG_KERNEL_CODE, IDT_TRAP); 86 | } 87 | 88 | idt[0] = IDT_ENTRY((uint64_t)isr_divide_error, SEG_KERNEL_CODE, IDT_TRAP); 89 | idt[13] = IDT_ENTRY((uint64_t)isr_general_protection_fault, SEG_KERNEL_CODE, IDT_TRAP); 90 | idt[14] = IDT_ENTRY((uint64_t)isr_page_fault, SEG_KERNEL_CODE, IDT_TRAP); 91 | idt[39] = IDT_ENTRY((uint64_t)spurious_interrupt, SEG_KERNEL_CODE, IDT_TRAP); 92 | 93 | struct idt_pointer { 94 | uint16_t limit; 95 | uint64_t base; 96 | } __attribute__((packed)) idt_ptr = { 97 | .limit = sizeof(idt) - sizeof(*idt), 98 | .base = (uint64_t)idt, 99 | }; 100 | 101 | extern void load_idt(struct idt_pointer*); 102 | load_idt(&idt_ptr); 103 | } 104 | -------------------------------------------------------------------------------- /src/interrupt.h: -------------------------------------------------------------------------------- 1 | void interrupt_init(void); 2 | -------------------------------------------------------------------------------- /src/kernel.c: -------------------------------------------------------------------------------- 1 | #include "pci.h" 2 | #include "smp.h" 3 | #include "tsc.h" 4 | #include "hpet.h" 5 | #include "apic.h" 6 | #include "acpi/parse.h" 7 | #include "serial.h" 8 | #include "cpu.h" 9 | #include "interrupt.h" 10 | #include "page.h" 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | void kernel_init(void *memory_map, size_t map_size, size_t desc_size, void *Rsdp) { 19 | interrupt_init(); 20 | serial_init(COM1); 21 | paging_init(memory_map, map_size, desc_size); 22 | 23 | acpi_parse(Rsdp); 24 | 25 | hpet_enable(); 26 | apic_timer_calibrate(); 27 | tsc_calibrate(); 28 | 29 | smp_init(); 30 | 31 | page_alloc_init(); 32 | cache_init(); 33 | 34 | pci_enumerate(); 35 | 36 | #if 0 37 | ACPI_STATUS status = AcpiInitializeSubsystem(); 38 | if (ACPI_FAILURE(status)) 39 | panic("acpi error %d\n", status); 40 | #endif 41 | 42 | // clear identity mapping 43 | extern uint64_t kernel_pml4[]; 44 | kernel_pml4[0] = 0; 45 | write_cr3(PHYS_KERNEL(kernel_pml4)); 46 | 47 | __asm__ volatile ("sti"); 48 | while (true) __asm__ ("hlt"); 49 | } 50 | -------------------------------------------------------------------------------- /src/kernel.ld: -------------------------------------------------------------------------------- 1 | ENTRY(kernel_start_phys) 2 | 3 | kernel_base = 0xffffffff80000000; 4 | kernel_base_phys = 0x100000; 5 | 6 | kernel_start_phys = kernel_start - kernel_base; 7 | 8 | /* TODO: use PHDRS to specify rwx attributes? */ 9 | 10 | SECTIONS { 11 | . = kernel_base + kernel_base_phys; 12 | 13 | kernel_begin = .; 14 | 15 | .text : AT(ADDR(.text) - kernel_base) { *(.text) *(.text.*) } 16 | .data : AT(ADDR(.data) - kernel_base) { *(.data) *(.data.*) *(.rodata) *(.rodata.*) } 17 | .bss : AT(ADDR(.bss) - kernel_base) { 18 | bss_begin = .; 19 | *(.bss) *(.bss.*) *(COMMON) 20 | bss_end = .; 21 | } 22 | 23 | /* startup data can be deallocated after boot */ 24 | . = ALIGN(0x1000); 25 | startup_begin = .; 26 | 27 | /* KEEP() gives the linker's section gc a root to start from */ 28 | .startup.text : AT(ADDR(.startup.text) - kernel_base) { KEEP(*(.startup.text)) } 29 | .startup.data : AT(ADDR(.startup.data) - kernel_base) { *(.startup.data) } 30 | 31 | /* smp trampoline needs to be moved to low 1MB */ 32 | trampoline_begin = .; 33 | .trampoline 0 : AT(trampoline_begin - kernel_base) { KEEP(*(.trampoline)) } 34 | . = trampoline_begin + SIZEOF(.trampoline); 35 | trampoline_end = .; 36 | 37 | /* per-cpu data gets linked at 0 so it can be used as an offset to %gs */ 38 | . = ALIGN(64); 39 | percpu_begin = .; 40 | .percpu 0 : AT(percpu_begin - kernel_base) { *(.percpu) } 41 | . = percpu_begin + SIZEOF(.percpu); 42 | . = ALIGN(64); 43 | percpu_end = .; 44 | 45 | . = ALIGN(0x1000); 46 | startup_end = .; 47 | 48 | kernel_end = .; 49 | 50 | /DISCARD/ : { *(.eh_frame) *(.comment) } 51 | } 52 | -------------------------------------------------------------------------------- /src/kprintf.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "serial.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | void kprintf(const char *fmt, ...) { 11 | va_list ap; 12 | va_start(ap, fmt); 13 | 14 | kvprintf(fmt, ap); 15 | 16 | va_end(ap); 17 | } 18 | 19 | enum format_type { 20 | FORMAT_NONE, 21 | 22 | FORMAT_BYTE, 23 | FORMAT_SHORT, 24 | FORMAT_INT, 25 | FORMAT_LONG, 26 | FORMAT_LLONG, 27 | FORMAT_SIZE, 28 | FORMAT_PTRDIFF, 29 | 30 | FORMAT_CHAR, 31 | FORMAT_PTR, 32 | FORMAT_STR, 33 | FORMAT_PERCENT, 34 | }; 35 | 36 | #define FLAG_LEFT 1 37 | #define FLAG_SIGN 2 38 | #define FLAG_SPACE 4 39 | #define FLAG_ALT 8 40 | #define FLAG_ZERO 16 41 | #define FLAG_SIGNED 32 42 | #define FLAG_UPPER 64 43 | 44 | struct format { 45 | uint8_t type; 46 | uint8_t flags; 47 | uint8_t base; 48 | int16_t width; 49 | int16_t precision; 50 | }; 51 | 52 | static int read_format(const char *fmt, struct format *spec, va_list ap) { 53 | const char *start = fmt; 54 | 55 | // skip leading non-specs 56 | spec->type = FORMAT_NONE; 57 | 58 | while (*fmt != '\0' && *fmt != '%') 59 | fmt++; 60 | 61 | if (fmt != start || !*fmt) 62 | return fmt - start; 63 | 64 | // flags 65 | spec->flags = 0; 66 | while (true) { 67 | fmt++; 68 | 69 | bool done = false; 70 | switch (*fmt) { 71 | case '-': spec->flags |= FLAG_LEFT; break; 72 | case '+': spec->flags |= FLAG_SIGN; break; 73 | case '#': spec->flags |= FLAG_ALT; break; 74 | case ' ': spec->flags |= FLAG_SPACE; break; 75 | case '0': spec->flags |= FLAG_ZERO; break; 76 | default: done = true; break; 77 | } 78 | if (done) break; 79 | } 80 | 81 | // width 82 | spec->width = -1; 83 | if (*fmt == '*') { 84 | spec->width = va_arg(ap, int); 85 | if (spec->width < 0) { 86 | spec->flags |= FLAG_LEFT; 87 | spec->width = -spec->width; 88 | } 89 | fmt++; 90 | } else { 91 | spec->width = strtoul(fmt, &fmt, 10); 92 | } 93 | 94 | // precision 95 | spec->precision = -1; 96 | if (*fmt == '.') { 97 | fmt++; 98 | 99 | if (*fmt == '*') { 100 | spec->precision = va_arg(ap, int); 101 | fmt++; 102 | } else { 103 | spec->precision = strtoul(fmt, &fmt, 10); 104 | } 105 | if (spec->precision < 0) 106 | spec->precision = 0; 107 | 108 | spec->flags &= ~FLAG_ZERO; 109 | } 110 | 111 | // length 112 | spec->type = FORMAT_INT; 113 | switch (*fmt) { 114 | case 'h': 115 | fmt++; 116 | if (*fmt == 'h') { 117 | fmt++; 118 | spec->type = FORMAT_BYTE; 119 | } else { 120 | spec->type = FORMAT_SHORT; 121 | } 122 | break; 123 | case 'l': 124 | fmt++; 125 | if (*fmt == 'l') { 126 | fmt++; 127 | spec->type = FORMAT_LLONG; 128 | } else { 129 | spec->type = FORMAT_LONG; 130 | } 131 | break; 132 | case 'z': 133 | fmt++; 134 | spec->type = FORMAT_SIZE; 135 | break; 136 | case 't': 137 | fmt++; 138 | spec->type = FORMAT_PTRDIFF; 139 | break; 140 | } 141 | 142 | // base 143 | spec->base = 10; 144 | switch (*fmt) { 145 | case 'c': 146 | spec->type = FORMAT_CHAR; 147 | break; 148 | case 's': 149 | spec->type = FORMAT_STR; 150 | break; 151 | case 'p': 152 | spec->type = FORMAT_PTR; 153 | break; 154 | case '%': 155 | spec->type = FORMAT_PERCENT; 156 | break; 157 | 158 | case 'o': 159 | spec->base = 8; 160 | break; 161 | 162 | case 'X': 163 | spec->flags |= FLAG_UPPER; 164 | case 'x': 165 | spec->base = 16; 166 | break; 167 | 168 | case 'd': 169 | case 'i': 170 | spec->flags |= FLAG_SIGNED; 171 | case 'u': 172 | spec->base = 10; 173 | break; 174 | 175 | default: 176 | return fmt - start; 177 | } 178 | 179 | return ++fmt - start; 180 | } 181 | 182 | static void string(const char *s, struct format spec) { 183 | if (s == NULL) 184 | s = "(null)"; 185 | 186 | int len = strnlen(s, spec.precision); 187 | 188 | if (!(spec.flags & FLAG_LEFT)) { 189 | while (len < spec.width--) 190 | serial_write_chars(" ", 1); 191 | } 192 | 193 | serial_write_chars(s, len); 194 | 195 | while (len < spec.width--) 196 | serial_write_chars(" ", 1); 197 | } 198 | 199 | static void number(uintmax_t x, struct format spec) { 200 | int written = 0; 201 | 202 | // calculate sign 203 | char sign = 0; 204 | if (spec.flags & FLAG_SIGNED) { 205 | if ((intmax_t)x < 0) { 206 | sign = '-'; 207 | x = -(intmax_t)x; 208 | written++; 209 | } else if (spec.flags & FLAG_SIGN) { 210 | sign = '+'; 211 | written++; 212 | } else if (spec.flags & FLAG_SPACE) { 213 | sign = ' '; 214 | written++; 215 | } 216 | } 217 | 218 | // calculate prefix 219 | bool prefix = (spec.flags & FLAG_ALT) && spec.base != 10; 220 | if (prefix) { 221 | if (spec.base == 16) { 222 | written += 2; 223 | } else if (x != 0) { 224 | written++; 225 | } 226 | } 227 | 228 | // calculate digits 229 | static const char digits[16] = "0123456789ABCDEF"; 230 | char mask = (spec.flags & FLAG_UPPER) ? 0 : 0x20; 231 | 232 | char buf[CHAR_BIT * sizeof(x) / 3 + 2]; 233 | char *const out = buf + sizeof(buf); 234 | int i = 0; 235 | do { 236 | int digit = x % spec.base; 237 | x /= spec.base; 238 | 239 | out[-++i] = digits[digit] | mask; 240 | } while (x != 0); 241 | 242 | written += i > spec.precision ? i : spec.precision; 243 | 244 | // width - minimum number of chars, space padding 245 | if (!(spec.flags & FLAG_LEFT) && !(spec.flags & FLAG_ZERO)) { 246 | while (++written <= spec.width) 247 | serial_write_chars(" ", 1); 248 | } 249 | 250 | // sign 251 | if (sign != 0) { 252 | serial_write_chars(&sign, 1); 253 | } 254 | 255 | // prefix 256 | if (prefix) { 257 | serial_write_chars("0", 1); 258 | if (spec.base == 16) { 259 | char p = 'X' | mask; 260 | serial_write_chars(&p, 1); 261 | } 262 | } 263 | 264 | // width - minimum number of chars, zero padding 265 | if (!(spec.flags & FLAG_LEFT) && (spec.flags & FLAG_ZERO)) { 266 | while (++written <= spec.width) 267 | serial_write_chars("0", 1); 268 | } 269 | 270 | // precision - minimum number of digits 271 | for (int c = spec.precision - i; c > 0; c--) 272 | serial_write_chars("0", 1); 273 | 274 | // digits 275 | serial_write_chars(out - i, i); 276 | 277 | // width - minimum number of chars, left alignment 278 | if (spec.flags & FLAG_LEFT) { 279 | while (++written <= spec.width) 280 | serial_write_chars(" ", 1); 281 | } 282 | } 283 | 284 | void kvprintf(const char *fmt, va_list ap) { 285 | while (*fmt != '\0') { 286 | struct format spec = { 0 }; 287 | 288 | const char *start = fmt; 289 | int read = read_format(fmt, &spec, ap); 290 | fmt += read; 291 | 292 | switch (spec.type) { 293 | case FORMAT_NONE: 294 | serial_write_chars(start, read); 295 | break; 296 | 297 | case FORMAT_CHAR: { 298 | if (!(spec.flags & FLAG_LEFT)) { 299 | while (--spec.width > 0) 300 | serial_write_chars(" ", 1); 301 | } 302 | 303 | char c = (unsigned char)va_arg(ap, int); 304 | serial_write_chars(&c, 1); 305 | 306 | while (--spec.width > 0) 307 | serial_write_chars(" ", 1); 308 | 309 | break; 310 | } 311 | 312 | case FORMAT_STR: { 313 | const char *s = va_arg(ap, char*); 314 | string(s, spec); 315 | break; 316 | } 317 | 318 | case FORMAT_PTR: { 319 | void *ptr = va_arg(ap, void*); 320 | if (ptr == NULL) { 321 | string("(null)", spec); // use len above 322 | break; 323 | } 324 | 325 | spec.width = 2 * sizeof(void*) + 2; 326 | spec.flags |= FLAG_ALT | FLAG_ZERO; 327 | spec.base = 16; 328 | 329 | number((unsigned long)ptr, spec); 330 | break; 331 | } 332 | 333 | case FORMAT_PERCENT: 334 | serial_write_chars("%", 1); 335 | break; 336 | 337 | default: { 338 | uintmax_t num = 0; 339 | if (spec.flags & FLAG_SIGNED) { 340 | switch (spec.type) { 341 | case FORMAT_BYTE: num = (signed char)va_arg(ap, int); break; 342 | case FORMAT_SHORT: num = (short)va_arg(ap, int); break; 343 | case FORMAT_INT: num = va_arg(ap, int); break; 344 | case FORMAT_LONG: num = va_arg(ap, long); break; 345 | case FORMAT_LLONG: num = va_arg(ap, long long); break; 346 | case FORMAT_SIZE: num = va_arg(ap, size_t); break; 347 | } 348 | } else { 349 | switch (spec.type) { 350 | case FORMAT_BYTE: num = (unsigned char)va_arg(ap, int); break; 351 | case FORMAT_SHORT: num = (unsigned short)va_arg(ap, int); break; 352 | case FORMAT_INT: num = va_arg(ap, unsigned int); break; 353 | case FORMAT_LONG: num = va_arg(ap, unsigned long); break; 354 | case FORMAT_LLONG: num = va_arg(ap, unsigned long long); break; 355 | case FORMAT_SIZE: num = va_arg(ap, size_t); break; 356 | case FORMAT_PTRDIFF: num = va_arg(ap, ptrdiff_t); break; 357 | } 358 | } 359 | 360 | number(num, spec); 361 | break; 362 | } 363 | } 364 | } 365 | } 366 | -------------------------------------------------------------------------------- /src/libc/ctype.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int isalpha(int c) { 4 | return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z'); 5 | } 6 | 7 | int isdigit(int c) { 8 | return '0' <= c && c <= '9'; 9 | } 10 | 11 | int islower(int c) { 12 | return 'a' <= c && c <= 'z'; 13 | } 14 | 15 | int isprint(int c) { 16 | return c > 0x1f && c < 0x7f; 17 | } 18 | 19 | int isspace(int c) { 20 | return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; 21 | } 22 | 23 | int isupper(int c) { 24 | return 'A' <= c && c <= 'Z'; 25 | } 26 | 27 | int isxdigit(int c) { 28 | return isdigit(c) || ('A' <= toupper(c) && toupper(c) <= 'F'); 29 | } 30 | 31 | int toupper(int c) { 32 | if ('a' <= c && c <= 'z') 33 | return c - 32; 34 | else 35 | return c; 36 | } 37 | 38 | int tolower(int c) { 39 | if ('A' <= c && c <= 'Z') 40 | return c + 32; 41 | else 42 | return c; 43 | } 44 | 45 | -------------------------------------------------------------------------------- /src/libc/stdlib.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | unsigned long int strtoul(const char *str, const char **end, int base) { 7 | const char *s = str; 8 | int c; 9 | 10 | do { 11 | c = *s++; 12 | } while (isspace(c)); 13 | 14 | bool negative = false; 15 | if (c == '-') { 16 | negative = true; 17 | c = *s++; 18 | } else if (c == '+') { 19 | c = *s++; 20 | } 21 | 22 | if ((base == 0 || base == 16) && c == '0' && (*s == 'x' || *s == 'X')) { 23 | c = s[1]; 24 | s += 2; 25 | base = 16; 26 | } else if ((base == 0 || base == 2) && c == '0' && (*s == 'b' || *s == 'B')) { 27 | c = s[1]; 28 | s += 2; 29 | base = 2; 30 | } else if (base == 0) 31 | base = c == '0' ? 8 : 10; 32 | 33 | unsigned long int x; 34 | bool any = false; 35 | for (x = 0; ; c = *s++) { 36 | if (isdigit(c)) 37 | c -= '0'; 38 | else if (isalpha(c)) 39 | c -= isupper(c) ? 'A' - 10 : 'a' - 10; 40 | else 41 | break; 42 | 43 | if (c >= base) 44 | break; 45 | 46 | any = true; 47 | x = x * base + c; 48 | } 49 | 50 | if (negative) 51 | x = -x; 52 | 53 | if (end != NULL) 54 | *end = any ? s - 1 : str; 55 | 56 | return x; 57 | } 58 | -------------------------------------------------------------------------------- /src/libc/string.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | /// mem 4 | 5 | int memcmp(const void *aptr, const void *bptr, size_t n) { 6 | const unsigned char *a = aptr, *b = bptr; 7 | for (size_t i = 0; i < n; i++) { 8 | if (a[i] < b[i]) return -1; 9 | else if (a[i] > b[i]) return 1; 10 | } 11 | return 0; 12 | } 13 | 14 | void *memcpy(void *dest, const void *src, size_t n) { 15 | const unsigned char *s = src; 16 | unsigned char *d = dest; 17 | while (n--) 18 | *d++ = *s++; 19 | return dest; 20 | } 21 | 22 | void *memmove(void *dest, const void *src, size_t n) { 23 | const unsigned char *s = src; 24 | unsigned char *d = dest; 25 | 26 | if (dest <= src) { 27 | while (n--) 28 | *d++ = *s++; 29 | } else { 30 | s += n; 31 | d += n; 32 | while (n--) 33 | *--d = *--s; 34 | } 35 | 36 | return dest; 37 | } 38 | 39 | void *memset(void *s, int c, size_t n) { 40 | unsigned char *d = s; 41 | while (n--) 42 | *d++ = c; 43 | return s; 44 | } 45 | 46 | /// str 47 | 48 | char *strcat(char *dest, const char *src) { 49 | size_t i = strlen(dest), j = 0; 50 | for (; src[j] != '\0'; j++) 51 | dest[i + j] = src[j]; 52 | dest[i + j] = '\0'; 53 | return dest; 54 | } 55 | 56 | int strcmp(const char *s1, const char *s2) { 57 | for (size_t i = 0; ; i++) { 58 | if (s1[i] < s2[i]) return -1; 59 | else if (s1[i] > s2[i]) return 1; 60 | else if (s1[i] == '\0') return 0; 61 | } 62 | return 0; 63 | } 64 | 65 | char *strcpy(char *dest, const char *src) { 66 | while ((*dest++ = *src++) != '\0'); 67 | return dest; 68 | } 69 | 70 | size_t strlen(const char *str) { 71 | size_t len = 0; 72 | while (*str++) 73 | len++; 74 | return len; 75 | } 76 | 77 | int strncmp(const char *s1, const char *s2, size_t n) { 78 | for (size_t i = 0; i < n; i++) { 79 | if (s1[i] < s2[i]) return -1; 80 | else if (s1[i] > s2[i]) return 1; 81 | else if (s1[i] == '\0') return 0; 82 | } 83 | return 0; 84 | } 85 | 86 | char *strncpy(char *dest, const char *src, size_t n) { 87 | while (n-- && (*dest++ = *src++) != '\0'); 88 | return dest; 89 | } 90 | 91 | size_t strnlen(const char *str, size_t n) { 92 | size_t len = 0; 93 | while (len < n && *str++) 94 | len++; 95 | return len; 96 | } 97 | -------------------------------------------------------------------------------- /src/list.h: -------------------------------------------------------------------------------- 1 | #ifndef LIST_H 2 | #define LIST_H 3 | 4 | #include 5 | #include 6 | 7 | struct list { 8 | struct list *prev, *next; 9 | }; 10 | 11 | #define LIST_INIT(list) { &(list), &(list) } 12 | 13 | static inline void list_init(struct list *list) { 14 | list->prev = list; 15 | list->next = list; 16 | } 17 | 18 | static inline void list_insert(struct list *new, struct list *prev, struct list *next) { 19 | next->prev = new; 20 | new->next = next; 21 | new->prev = prev; 22 | prev->next = new; 23 | } 24 | 25 | static inline void list_add_head(struct list *new, struct list *head) { 26 | list_insert(new, head, head->next); 27 | } 28 | 29 | static inline void list_add_tail(struct list *new, struct list *head) { 30 | list_insert(new, head->prev, head); 31 | } 32 | 33 | static inline void list_remove(struct list *prev, struct list *next) { 34 | next->prev = prev; 35 | prev->next = next; 36 | } 37 | 38 | static inline void list_del(struct list *entry) { 39 | list_remove(entry->prev, entry->next); 40 | } 41 | 42 | static inline bool list_empty(struct list *list) { 43 | return list->next == list; 44 | } 45 | 46 | #define containerof(ptr, type, member) ((type*)((char*)(ptr) - offsetof(type, member))) 47 | 48 | #endif 49 | -------------------------------------------------------------------------------- /src/memory.c: -------------------------------------------------------------------------------- 1 | #include "memory.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | // TODO: move these somewhere useful 8 | #define min(x, y) ((x) < (y) ? (x) : (y)) 9 | #define max(x, y) ((x) > (y) ? (x) : (y)) 10 | #define clamp(x, start, end) min(max((x), (start)), (end)) 11 | #define align(x, align) (((x) + (align) - 1) & ~((align) - 1)) 12 | 13 | struct memory_map { 14 | size_t count; 15 | size_t capacity; 16 | struct memory_region *regions; 17 | }; 18 | 19 | struct memory_region { 20 | uint64_t base; 21 | uint64_t size; 22 | }; 23 | 24 | static struct memory_region memory_regions[128]; 25 | static struct memory_region reserved_regions[128]; 26 | 27 | static struct memory_map memory = { 28 | .count = 0, 29 | .capacity = 128, 30 | .regions = memory_regions, 31 | }; 32 | 33 | static struct memory_map reserved = { 34 | .count = 0, 35 | .capacity = 128, 36 | .regions = reserved_regions, 37 | }; 38 | 39 | // insert a non-overlapping, ordered region into map 40 | static void memory_map_insert(struct memory_map *map, size_t i, uint64_t base, uint64_t size) { 41 | assert(map->count <= map->capacity); 42 | 43 | struct memory_region *region = &map->regions[i]; 44 | memmove(region + 1, region, (map->count - i) * sizeof(*region)); 45 | region->base = base; 46 | region->size = size; 47 | 48 | map->count++; 49 | } 50 | 51 | // coalesce adjacent regions in map 52 | static void memory_map_merge(struct memory_map *map) { 53 | size_t i = 1; 54 | while (i < map->count) { 55 | struct memory_region *this = &map->regions[i - 1]; 56 | struct memory_region *next = &map->regions[i]; 57 | 58 | if (this->base + this->size != next->base) { 59 | assert(this->base + this->size <= next->base); 60 | i++; 61 | continue; 62 | } 63 | 64 | this->size += next->size; 65 | memmove(next, next + 1, (map->count - i) * sizeof(*next)); 66 | map->count--; 67 | } 68 | } 69 | 70 | // add a new region to map, taking care of overlaps 71 | static void memory_map_add(struct memory_map *map, uint64_t base, uint64_t size) { 72 | uint64_t end = base + size; 73 | 74 | size_t i; 75 | for (i = 0; i < map->count; i++) { 76 | struct memory_region *region = &map->regions[i]; 77 | uint64_t region_end = region->base + region->size; 78 | 79 | // skip regions fully below the new one 80 | if (region_end <= base) 81 | continue; 82 | 83 | // stop for regions fully above the new one 84 | if (region->base >= end) 85 | break; 86 | 87 | // insert the piece of new region below 88 | if (region->base > base) 89 | memory_map_insert(map, i++, base, region->base - base); 90 | 91 | base = min(region_end, end); 92 | } 93 | 94 | // insert any leftover piece of new region 95 | if (base < end) 96 | memory_map_insert(map, i, base, end - base); 97 | 98 | memory_map_merge(map); 99 | } 100 | 101 | // add a region of available memory 102 | void memory_add(uint64_t base, uint64_t size) { 103 | memory_map_add(&memory, base, size); 104 | } 105 | 106 | // reserve a region of memory 107 | void memory_reserve(uint64_t base, uint64_t size) { 108 | memory_map_add(&reserved, base, size); 109 | } 110 | 111 | // iterator for pages of existing memory 112 | void memory_pages_next(uint64_t *iterator, uint64_t *out_start, uint64_t *out_end) { 113 | for (; *iterator < memory.count; ++*iterator) { 114 | struct memory_region *region = &memory.regions[*iterator]; 115 | 116 | uint64_t page_start = (region->base + PAGE_SIZE - 1) >> PAGE_SHIFT; 117 | uint64_t page_end = (region->base + region->size) >> PAGE_SHIFT; 118 | 119 | // skip regions smaller than one page 120 | if (page_start >= page_end) 121 | continue; 122 | 123 | *out_start = page_start; 124 | *out_end = page_end; 125 | 126 | ++*iterator; 127 | return; 128 | } 129 | 130 | *iterator = (uint64_t)-1; 131 | } 132 | 133 | // iterator for free memory regions 134 | void memory_free_next(uint64_t *iterator, uint64_t *out_start, uint64_t *out_end) { 135 | uint32_t ia = *iterator & 0xffffffff; 136 | uint32_t ie = *iterator >> 32; 137 | 138 | // loop through available regions 139 | for (; ia < memory.count; ia++) { 140 | struct memory_region *available = &memory.regions[ia]; 141 | uint64_t available_start = available->base; 142 | uint64_t available_end = available->base + available->size; 143 | 144 | // loop through negative space of exclude 145 | for (; ie < reserved.count + 1; ie++) { 146 | struct memory_region *exclude = &reserved.regions[ie]; 147 | uint64_t include_start = ie == 0 ? 0 : exclude[-1].base + exclude[-1].size; 148 | uint64_t include_end = ie < reserved.count ? exclude->base : (uint64_t)-1; 149 | 150 | // we passed the end of the current available region, go to the next one 151 | if (include_start >= available_end) 152 | break; 153 | 154 | // keep going until the include region overlaps the available region 155 | if (include_end <= available_start) 156 | continue; 157 | 158 | // output intersection of the two regions 159 | *out_start = max(available_start, include_start); 160 | *out_end = min(available_end, include_end); 161 | 162 | // advance the lowest region once more before returning 163 | if (available_end <= include_end) 164 | ia++; 165 | else 166 | ie++; 167 | 168 | *iterator = (uint64_t)ia | (uint64_t)ie << 32; 169 | return; 170 | } 171 | } 172 | 173 | *iterator = (uint64_t)-1; 174 | } 175 | 176 | uint64_t memory_end() { 177 | assert(memory.count > 0); 178 | return memory.regions[memory.count - 1].base + memory.regions[memory.count - 1].size; 179 | } 180 | 181 | // find a free region in [start, end) with a power-of-two alignment 182 | uint64_t memory_find(uint64_t start, uint64_t end, uint64_t size, uint64_t align) { 183 | uint64_t i = 0, found_start, found_end; 184 | while (memory_free_next(&i, &found_start, &found_end), i != (uint64_t)-1) { 185 | found_start = clamp(found_start, start, end); 186 | found_end = clamp(found_end, start, end); 187 | 188 | uint64_t align_start = align(found_start, align); 189 | assert(align_start != 0); // TODO: remove this after unconditionally reserving first page? 190 | if (align_start < found_end && found_end - align_start >= size) 191 | return align_start; 192 | } 193 | 194 | return 0; 195 | } 196 | 197 | void *memory_alloc(uint64_t start, uint64_t end, uint64_t size, uint64_t align) { 198 | uint64_t phys = memory_find(start, end, size, align); 199 | memory_reserve(phys, size); 200 | 201 | void *virt = VIRT_DIRECT(phys); 202 | memset(virt, 0, size); 203 | return virt; 204 | } 205 | -------------------------------------------------------------------------------- /src/memory.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void memory_add(uint64_t base, uint64_t size); 4 | void memory_reserve(uint64_t base, uint64_t size); 5 | 6 | void memory_pages_next(uint64_t *iterator, uint64_t *out_start, uint64_t *out_end); 7 | void memory_free_next(uint64_t *iterator, uint64_t *out_start, uint64_t *out_end); 8 | 9 | uint64_t memory_end(); 10 | uint64_t memory_find(uint64_t start, uint64_t end, uint64_t size, uint64_t align); 11 | void *memory_alloc(uint64_t start, uint64_t end, uint64_t size, uint64_t align); 12 | -------------------------------------------------------------------------------- /src/page.c: -------------------------------------------------------------------------------- 1 | #include "page.h" 2 | #include "memory.h" 3 | #include 4 | #include 5 | #include 6 | 7 | // TODO: larger pages 8 | 9 | static uint64_t num_frames; 10 | static struct page *page_frames; 11 | static struct list free_pages; 12 | 13 | void page_alloc_init(void) { 14 | // TODO: allocate page_frames on a page boundary and map it to a fixed location 15 | num_frames = memory_end() >> PAGE_SHIFT; 16 | page_frames = memory_alloc( 17 | PAGE_SIZE, memory_end(), num_frames * sizeof(*page_frames), alignof(*page_frames) 18 | ); 19 | 20 | list_init(&free_pages); 21 | for (uint64_t i = 0; i < num_frames; i++) { 22 | page_frames[i].ref_count = 1; 23 | list_init(&page_frames[i].free); 24 | } 25 | 26 | uint64_t i = 0, found_start, found_end; 27 | while (memory_free_next(&i, &found_start, &found_end), i != (uint64_t)-1) { 28 | uint64_t page_start = (found_start + PAGE_SIZE - 1) >> PAGE_SHIFT; 29 | uint64_t page_end = found_end >> PAGE_SHIFT; 30 | 31 | if (page_start >= page_end) 32 | continue; 33 | 34 | for (uint64_t i = page_start; i < page_end; i++) { 35 | list_add_tail(&page_frames[i].free, &free_pages); 36 | } 37 | } 38 | } 39 | 40 | struct page *page_alloc() { 41 | // TODO: free up cache space on OOM 42 | if (list_empty(&free_pages)) 43 | return NULL; 44 | 45 | struct page *page = containerof(free_pages.next, struct page, free); 46 | list_del(&page->free); 47 | 48 | page->ref_count++; 49 | return page; 50 | } 51 | 52 | void page_free(struct page *page) { 53 | page->ref_count--; 54 | if (page->ref_count == 0) { 55 | list_add_head(&page->free, &free_pages); 56 | } 57 | } 58 | 59 | void *page_address(struct page *page) { 60 | return VIRT_DIRECT((page - page_frames) * PAGE_SIZE); 61 | } 62 | 63 | struct page *page_from_address(void *address) { 64 | return &page_frames[PHYS_DIRECT(address) >> PAGE_SHIFT]; 65 | } 66 | -------------------------------------------------------------------------------- /src/page.h: -------------------------------------------------------------------------------- 1 | #include "list.h" 2 | #include 3 | 4 | struct page { 5 | union { 6 | struct list free; 7 | 8 | struct { 9 | struct cache *cache; 10 | struct slab *slab; 11 | }; 12 | }; 13 | 14 | uint32_t ref_count; 15 | }; 16 | 17 | void page_alloc_init(void); 18 | 19 | struct page *page_alloc(); 20 | void page_free(struct page *page); 21 | 22 | void *page_address(struct page *page); 23 | struct page *page_from_address(void *address); 24 | -------------------------------------------------------------------------------- /src/paging.c: -------------------------------------------------------------------------------- 1 | #include "memory.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | // defined in startup.S 10 | extern uint64_t kernel_pml4[PAGE_ENTRIES]; 11 | 12 | struct range { 13 | uint64_t start; 14 | uint64_t end; 15 | uint8_t level; 16 | }; 17 | 18 | #define RANGE(start, end, level) ((struct range){ (start), (end), (level) }) 19 | 20 | static int split_range(struct range ranges[5], uint64_t start, uint64_t end) { 21 | int range = 0; 22 | 23 | uint64_t start_frame = start >> PAGE_SHIFT; 24 | uint64_t end_frame = end >> PAGE_SHIFT; 25 | 26 | uint64_t first_frame, last_frame; 27 | 28 | // 4k pages from start to 2M-aligned or end 29 | first_frame = start_frame; 30 | last_frame = round_up(start_frame, PD_SIZE >> PAGE_SHIFT); 31 | if (last_frame > end_frame) 32 | last_frame = end_frame; 33 | if (first_frame < last_frame) { 34 | ranges[range++] = RANGE(first_frame << PAGE_SHIFT, last_frame << PAGE_SHIFT, 0); 35 | start_frame = last_frame; 36 | } 37 | 38 | // 2M pages from 2M-aligned to 1G-aligned or 2M-aligned end 39 | first_frame = round_up(start_frame, PD_SIZE >> PAGE_SHIFT); 40 | last_frame = round_up(start_frame, PDPT_SIZE >> PAGE_SHIFT); 41 | if (last_frame > round_down(end_frame, PD_SIZE >> PAGE_SHIFT)) 42 | last_frame = round_down(end_frame, PD_SIZE >> PAGE_SHIFT); 43 | if (first_frame < last_frame) { 44 | ranges[range++] = RANGE(first_frame << PAGE_SHIFT, last_frame << PAGE_SHIFT, 1); 45 | start_frame = last_frame; 46 | } 47 | 48 | // 1G pages from 1G-aligned to 1G-aligned end 49 | first_frame = round_up(start_frame, PDPT_SIZE >> PAGE_SHIFT); 50 | last_frame = round_down(end_frame, PDPT_SIZE >> PAGE_SHIFT); 51 | if (first_frame < last_frame) { 52 | ranges[range++] = RANGE(first_frame << PAGE_SHIFT, last_frame << PAGE_SHIFT, 2); 53 | start_frame = last_frame; 54 | } 55 | 56 | // 2M pages to 2M-aligned end 57 | first_frame = round_up(start_frame, PD_SIZE >> PAGE_SHIFT); 58 | last_frame = round_down(end_frame, PD_SIZE >> PAGE_SHIFT); 59 | if (first_frame < last_frame) { 60 | ranges[range++] = RANGE(first_frame << PAGE_SHIFT, last_frame << PAGE_SHIFT, 1); 61 | start_frame = last_frame; 62 | } 63 | 64 | // 4k pages to end 65 | first_frame = start_frame; 66 | last_frame = end_frame; 67 | if (first_frame < last_frame) { 68 | ranges[range++] = RANGE(first_frame << PAGE_SHIFT, last_frame << PAGE_SHIFT, 0); 69 | } 70 | 71 | // merge any ranges of the same level that crossed a higher alignment boundary 72 | for (int i = 0; i < range - 1; i++) { 73 | if (ranges[i].level != ranges[i + 1].level) 74 | continue; 75 | 76 | uint64_t start = ranges[i].start; 77 | memcpy(&ranges[i], &ranges[i + 1], (range - 1 - i) * sizeof(*ranges)); 78 | ranges[i--].start = start; 79 | range--; 80 | } 81 | 82 | static const char *const sizes[] = { "4k", "2M", "1G" }; 83 | for (int i = 0; i < range; i++) { 84 | kprintf( 85 | " [%#015lx-%#015lx) %s\n", ranges[i].start, ranges[i].end, sizes[ranges[i].level] 86 | ); 87 | } 88 | 89 | return range; 90 | } 91 | 92 | // reserve 2 page tables to get started 93 | // TODO: how many do we really need, given the boostrap direct mapping? 94 | static uint64_t init_page_tables[2][PAGE_ENTRIES] __attribute__((aligned(PAGE_SIZE))); 95 | static unsigned int init_page_table = 0; 96 | 97 | static uint64_t mapped_top = 0; 98 | 99 | static void *alloc_page_direct() { 100 | if (init_page_table < sizeof(init_page_tables) / sizeof(*init_page_tables)) 101 | return VIRT_DIRECT(PHYS_KERNEL(init_page_tables[init_page_table++])); 102 | 103 | // TODO: what to do about the bottom page? 104 | void *page = memory_alloc(PAGE_SIZE, mapped_top, PAGE_SIZE, PAGE_SIZE); 105 | 106 | kprintf(" page table %#015lx\n", PHYS_DIRECT(page)); 107 | 108 | return page; 109 | } 110 | 111 | static void direct_map_pt(uint64_t *pt, uint64_t start_phys, uint64_t end_phys, uint8_t level) { 112 | for (uint64_t phys = start_phys; phys < end_phys; phys = (phys & PAGE_MASK) + PAGE_SIZE) { 113 | uint64_t pte = PAGE_INDEX(phys); 114 | if (pte >= PAGE_ENTRIES) 115 | break; 116 | 117 | pt[pte] = phys | PAGE_PRESENT | PAGE_WRITE | PAGE_GLOBAL; 118 | } 119 | } 120 | 121 | static void direct_map_pd(uint64_t *pd, uint64_t start_phys, uint64_t end_phys, uint8_t level) { 122 | for (uint64_t phys = start_phys; phys < end_phys; phys = (phys & PD_MASK) + PD_SIZE) { 123 | uint64_t pde = PD_INDEX(phys); 124 | if (pde >= PAGE_ENTRIES) 125 | break; 126 | 127 | if (level == 1) { 128 | pd[pde] = phys | PAGE_PRESENT | PAGE_WRITE | PAGE_LARGE | PAGE_GLOBAL; 129 | continue; 130 | } 131 | 132 | uint64_t *pt; 133 | if (pd[pde] == 0) { 134 | pt = alloc_page_direct(); 135 | pd[pde] = PHYS_DIRECT(pt) | PAGE_PRESENT | PAGE_WRITE | PAGE_GLOBAL; 136 | } 137 | else { 138 | pt = VIRT_DIRECT(pd[pde] & PAGE_MASK); 139 | } 140 | 141 | direct_map_pt(pt, phys, end_phys, level); 142 | } 143 | } 144 | 145 | static void direct_map_pdpt(uint64_t *pdpt, uint64_t start_phys, uint64_t end_phys, uint8_t level) { 146 | for (uint64_t phys = start_phys; phys < end_phys; phys = (phys & PDPT_MASK) + PDPT_SIZE) { 147 | uint64_t pdpte = PDPT_INDEX(phys); 148 | if (pdpte >= PAGE_ENTRIES) 149 | break; 150 | 151 | if (level == 2) { 152 | pdpt[pdpte] = phys | PAGE_PRESENT | PAGE_WRITE | PAGE_LARGE | PAGE_GLOBAL; 153 | continue; 154 | } 155 | 156 | uint64_t *pd; 157 | if (pdpt[pdpte] == 0) { 158 | pd = alloc_page_direct(); 159 | pdpt[pdpte] = PHYS_DIRECT(pd) | PAGE_PRESENT | PAGE_WRITE | PAGE_GLOBAL; 160 | } 161 | else { 162 | pd = VIRT_DIRECT(pdpt[pdpte] & PAGE_MASK); 163 | } 164 | 165 | direct_map_pd(pd, phys, end_phys, level); 166 | } 167 | } 168 | 169 | static void direct_map_pml4(uint64_t start_phys, uint64_t end_phys, uint8_t level) { 170 | uint64_t start_virt = (uint64_t)VIRT_DIRECT(start_phys); 171 | uint64_t end_virt = (uint64_t)VIRT_DIRECT(end_phys); 172 | 173 | for (uint64_t virt = start_virt; virt < end_virt; virt = (virt & PML4_MASK) + PML4_SIZE) { 174 | uint64_t pml4e = PML4_INDEX(virt); 175 | 176 | uint64_t *pdpt; 177 | if (kernel_pml4[pml4e] == 0) { 178 | pdpt = alloc_page_direct(); 179 | kernel_pml4[pml4e] = PHYS_DIRECT(pdpt) | PAGE_PRESENT | PAGE_WRITE | PAGE_GLOBAL; 180 | } 181 | else { 182 | // initial page tables are all already in the direct map (see startup.S) 183 | pdpt = VIRT_DIRECT(kernel_pml4[pml4e] & PAGE_MASK); 184 | } 185 | 186 | direct_map_pdpt(pdpt, PHYS_DIRECT(virt), PHYS_DIRECT(end_virt), level); 187 | } 188 | 189 | write_cr3(PHYS_KERNEL(kernel_pml4)); 190 | mapped_top = end_phys; 191 | } 192 | 193 | // TODO: unmap memory in the first GB that's hard-coded in startup.S 194 | void direct_map(uint64_t start_phys, uint64_t end_phys) { 195 | kprintf("mem: [%#015lx-%#015lx)\n", start_phys, end_phys); 196 | 197 | struct range ranges[5]; 198 | int n = split_range(ranges, start_phys, end_phys); 199 | 200 | for (int i = 0; i < n; i++) 201 | direct_map_pml4(ranges[i].start, ranges[i].end, ranges[i].level); 202 | } 203 | 204 | void paging_init(void *map_address, size_t map_size, size_t desc_size) { 205 | // TODO: factor out temporary mappings 206 | extern uint64_t kernel_pml4[], pt_map[]; 207 | for (unsigned int i = 0; i < (map_size + PAGE_SIZE) / PAGE_SIZE; i++) { 208 | uint64_t address = ((uint64_t)map_address & PAGE_MASK) + i * PAGE_SIZE; 209 | pt_map[2 + i] = address | PAGE_PRESENT | PAGE_WRITE | PAGE_GLOBAL; 210 | } 211 | write_cr3(PHYS_KERNEL(kernel_pml4)); 212 | struct efi_memory_descriptor *memory_map = (struct efi_memory_descriptor*)( 213 | 0xffffffffc0002000 + ((uint64_t)map_address & ~PAGE_MASK) 214 | ); 215 | 216 | // transfer efi memory map to our memory map 217 | void *map = memory_map; 218 | for (char *p = map, *end = (char*)map + map_size; p < end; p += desc_size) { 219 | struct efi_memory_descriptor *mem = (void*)p; 220 | 221 | static const char *efi_memory_name[] = { 222 | [efi_reserved] = "reserved", 223 | [efi_loader_code] = "loader code", 224 | [efi_loader_data] = "loader data", 225 | [efi_boot_code] = "boot code", 226 | [efi_boot_data] = "boot data", 227 | [efi_runtime_code] = "runtime code", 228 | [efi_runtime_data] = "runtime data", 229 | [efi_conventional] = "free ram", 230 | [efi_unusable] = "unusable", 231 | [efi_acpi_reclaim] = "acpi reclaim", 232 | [efi_acpi_nvs] = "acpi nvs", 233 | [efi_memory_mapped_io] = "mmio", 234 | [efi_memory_mapped_port] = "mm port", 235 | [efi_pal] = "pal code", 236 | }; 237 | 238 | uint64_t size = mem->pages * PAGE_SIZE; 239 | 240 | kprintf( 241 | "efi: [%#015lx-%#015lx) [%1s %2s %2s %2s %3s %2s %2s %2s %2s] %s\n", 242 | mem->physical, mem->physical + size, 243 | mem->flags & efi_memory_runtime ? "r" : "", 244 | mem->flags & efi_memory_xp ? "xp" : "", 245 | mem->flags & efi_memory_rp ? "rp" : "", 246 | mem->flags & efi_memory_wp ? "wp" : "", 247 | mem->flags & efi_memory_uce ? "uce" : "", 248 | mem->flags & efi_memory_wb ? "wb" : "", 249 | mem->flags & efi_memory_wt ? "wt" : "", 250 | mem->flags & efi_memory_wc ? "wc" : "", 251 | mem->flags & efi_memory_uc ? "uc" : "", 252 | efi_memory_name[mem->type] 253 | ); 254 | 255 | if (mem->flags | efi_memory_runtime) { 256 | // TODO: remap efi runtime memory and call SetVirtualAddressMap 257 | } 258 | 259 | memory_add(mem->physical, size); 260 | 261 | if ( 262 | mem->type != efi_conventional && 263 | mem->type != efi_loader_code && 264 | mem->type != efi_loader_data && 265 | mem->type != efi_boot_code && 266 | mem->type != efi_boot_data 267 | ) 268 | memory_reserve(mem->physical, size); 269 | } 270 | 271 | // reserve kernel image 272 | extern char kernel_begin[], kernel_end[]; 273 | memory_reserve(PHYS_KERNEL(kernel_begin), kernel_end - kernel_begin); 274 | 275 | // direct mapping of ram 276 | uint64_t i = 0, start_frame, end_frame; 277 | while (memory_pages_next(&i, &start_frame, &end_frame), i != (uint64_t)-1) { 278 | uint64_t start_phys = start_frame << PAGE_SHIFT; 279 | uint64_t end_phys = end_frame << PAGE_SHIFT; 280 | direct_map(start_phys, end_phys); 281 | } 282 | } 283 | -------------------------------------------------------------------------------- /src/panic.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | void panic(const char *fmt, ...) { 6 | va_list ap; 7 | va_start(ap, fmt); 8 | 9 | kvprintf(fmt, ap); 10 | 11 | va_end(ap); 12 | 13 | for (;;); 14 | } 15 | -------------------------------------------------------------------------------- /src/pci.c: -------------------------------------------------------------------------------- 1 | #include "pci.h" 2 | #include "cpu.h" 3 | #include 4 | #include 5 | #include 6 | 7 | struct pci_function { 8 | uint16_t vendor_id; 9 | uint16_t device_id; 10 | uint16_t command; 11 | uint16_t status; 12 | uint8_t revision_id; 13 | uint8_t prog_if; 14 | uint8_t subclass; 15 | uint8_t class; 16 | uint8_t cache_line_size; 17 | uint8_t latency_timer; 18 | uint8_t header_type; 19 | uint8_t bist; 20 | uint32_t bar[6]; 21 | uint32_t cardbus; 22 | uint16_t subsystem_vendor_id; 23 | uint16_t subsystem_id; 24 | uint32_t expansion_rom; 25 | uint8_t capabilities; 26 | uint8_t reserved[7]; 27 | uint8_t interrupt_line; 28 | uint8_t interrupt_pin; 29 | uint8_t min_grant; 30 | uint8_t max_latency; 31 | }; 32 | 33 | // TODO: actually support non-zero groups 34 | static size_t segments = 0; 35 | static uint64_t segment_groups[1]; 36 | 37 | static volatile void *ecam; 38 | 39 | void pci_add_segment(uint16_t segment, uint64_t ecam_address, uint8_t bus_start, uint8_t bus_end) { 40 | segment_groups[segments] = ecam_address; 41 | segments++; 42 | 43 | kprintf("pci: [segment %d] %#018lx buses %d-%d\n", segment, ecam_address, bus_start, bus_end); 44 | } 45 | 46 | static inline struct pci_function *function_address(uint8_t bus, uint8_t device, uint8_t function) { 47 | return (struct pci_function*)((char*)ecam + 48 | ((bus & 0xff) << 20) + 49 | ((device & 0x1f) << 15) + 50 | ((function & 0x7) << 12)); 51 | } 52 | 53 | static void enumerate_function(uint8_t bus_id, uint8_t device_id, uint8_t function_id) { 54 | struct pci_function *function = function_address(bus_id, device_id, function_id); 55 | 56 | kprintf( 57 | " function %d: %x:%x %x\n", 58 | function_id, function->vendor_id, function->device_id, function->header_type 59 | ); 60 | } 61 | 62 | static void enumerate_device(uint8_t bus_id, uint8_t device_id) { 63 | struct pci_function *device = function_address(bus_id, device_id, 0); 64 | if (device->vendor_id == 0xffff) { 65 | return; 66 | } 67 | 68 | kprintf(" device %d\n", device_id); 69 | 70 | if ((device->header_type & 0x80) == 0) { 71 | enumerate_function(bus_id, device_id, 0); 72 | } else { 73 | for (uint8_t function_id = 0; function_id < 8; function_id++) { 74 | struct pci_function *function = function_address(bus_id, device_id, function_id); 75 | if (function->vendor_id == 0xffff) { 76 | continue; 77 | } 78 | 79 | enumerate_function(bus_id, device_id, function_id); 80 | } 81 | } 82 | } 83 | 84 | static void enumerate_bus(uint8_t bus_id) { 85 | kprintf("bus %d\n", bus_id); 86 | 87 | for (uint8_t device_id = 0; device_id < 32; device_id++) { 88 | enumerate_device(bus_id, device_id); 89 | } 90 | } 91 | 92 | void pci_enumerate(void) { 93 | uint64_t ecam_address = segment_groups[0]; 94 | 95 | // TODO: factor out temporary mappings 96 | extern uint64_t kernel_pml4[], pd_map[]; 97 | for (int i = 0; i < 8; i += 2) { 98 | pd_map[1 + i / 2] = 99 | ecam_address | PAGE_PRESENT | PAGE_WRITE | PAGE_CACHE_UC | PAGE_LARGE | PAGE_GLOBAL; 100 | } 101 | write_cr3(PHYS_KERNEL(kernel_pml4)); 102 | ecam = (volatile void*)0xffffffffc0200000; 103 | 104 | struct pci_function *root = function_address(0, 0, 0); 105 | if ((root->header_type & 0x80) == 0) { 106 | enumerate_bus(0); 107 | } else { 108 | for (uint8_t function_id = 0; function_id < 8; function_id++) { 109 | struct pci_function *bus = function_address(0, 0, function_id); 110 | if (bus->vendor_id != 0xffff) { 111 | break; 112 | } 113 | 114 | enumerate_bus(function_id); 115 | } 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/pci.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void pci_add_segment(uint16_t segment, uint64_t ecam_address, uint8_t bus_start, uint8_t bus_end); 4 | void pci_enumerate(void); 5 | -------------------------------------------------------------------------------- /src/segment.c: -------------------------------------------------------------------------------- 1 | #include "segment.h" 2 | #include 3 | 4 | struct gdt_entry { 5 | uint16_t limit_low; 6 | uint16_t base_low; 7 | uint8_t base_middle; 8 | uint8_t type: 4, s: 1, dpl: 2, p: 1; 9 | uint8_t limit_high: 4, avl: 1, l: 1, d: 1, g: 1; 10 | uint8_t base_high; 11 | } gdt[] = { 12 | [GDT_KERNEL_CODE] = { .type = 0xa, .s = 1, .p = 1, .l = 1 }, 13 | [GDT_KERNEL_DATA] = { .type = 0x2, .s = 1, .p = 1 }, 14 | [GDT_USER_CODE] = { .type = 0xa, .s = 1, .dpl = 3, .l = 1 }, 15 | [GDT_USER_DATA] = { .type = 0x2, .s = 1, .dpl = 3 }, 16 | }; 17 | 18 | struct gdt_desc { 19 | uint16_t limit; 20 | uint64_t base; 21 | } __attribute__((packed)) gdt_desc = { 22 | .limit = sizeof(gdt) - sizeof(*gdt), 23 | .base = (uint64_t)gdt, 24 | }; 25 | -------------------------------------------------------------------------------- /src/segment.h: -------------------------------------------------------------------------------- 1 | enum { 2 | GDT_KERNEL_CODE = 1, 3 | GDT_KERNEL_DATA = 2, 4 | GDT_USER_CODE = 3, 5 | GDT_USER_DATA = 4, 6 | }; 7 | 8 | enum { 9 | SEG_KERNEL_CODE = GDT_KERNEL_CODE * 8, 10 | SEG_KERNEL_DATA = GDT_KERNEL_DATA * 8, 11 | SEG_USER_CODE = GDT_USER_CODE * 8, 12 | SEG_USER_DATA = GDT_USER_DATA * 8, 13 | }; 14 | -------------------------------------------------------------------------------- /src/serial.c: -------------------------------------------------------------------------------- 1 | #include "serial.h" 2 | #include "cpu.h" 3 | 4 | void serial_init(uint16_t port) { 5 | outb(port + 1, 0x00); // disable interrupts 6 | 7 | outb(port + 3, 0x80); // enable DLAB 8 | outb(port + 0, 0x01); // set divisor to 1 9 | outb(port + 1, 0x00); 10 | 11 | outb(port + 3, 0x03); // 8 bits, no parity, one stop bit 12 | outb(port + 2, 0xc7); // enable fifo, clear with 14-byte threshold 13 | outb(port + 4, 0x0b); // enable interrupts, rts/dsr set 14 | } 15 | 16 | bool serial_available(uint16_t port) { 17 | return inb(port + 5) & 0x1; 18 | } 19 | 20 | uint8_t serial_read(uint16_t port) { 21 | while (!serial_available(port)); 22 | return inb(port); 23 | } 24 | 25 | bool serial_empty(uint16_t port) { 26 | return inb(port + 5) & 0x20; 27 | } 28 | 29 | void serial_write(uint16_t port, uint8_t value) { 30 | while (!serial_empty(port)); 31 | outb(port, value); 32 | } 33 | 34 | void serial_write_chars(const char *buf, size_t n) { 35 | while (n--) { 36 | if (*buf == '\n') 37 | serial_write(COM1, '\r'); 38 | 39 | serial_write(COM1, *buf++); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/serial.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define COM1 0x3f8 6 | 7 | void serial_init(uint16_t port); 8 | bool serial_available(uint16_t port); 9 | uint8_t serial_read(uint16_t port); 10 | bool serial_empty(uint16_t port); 11 | void serial_write(uint16_t port, uint8_t value); 12 | 13 | void serial_write_chars(const char *buf, size_t n); 14 | -------------------------------------------------------------------------------- /src/smp.c: -------------------------------------------------------------------------------- 1 | #include "smp.h" 2 | #include "spinlock.h" 3 | #include "apic.h" 4 | #include "tsc.h" 5 | #include "memory.h" 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | extern char trampoline_begin[], trampoline_end[]; 12 | extern char percpu_begin[], percpu_end[]; 13 | 14 | extern volatile uint64_t startup_stack; 15 | extern volatile uint64_t startup_code; 16 | extern volatile uint64_t startup_gs; 17 | 18 | #define TRAMPOLINE_SYM(base, sym) \ 19 | *(__typeof__(sym)*)((char*)(base) + (uintptr_t)&(sym)) 20 | 21 | extern volatile uint32_t smp_ap_started; 22 | 23 | // TODO: put in .startup.data 24 | uint8_t lapic_by_cpu[256]; 25 | 26 | void *percpu_data[256]; 27 | 28 | SMP_PERCPU uint32_t smp_id; 29 | 30 | static struct spinlock print_lock; 31 | static volatile bool ap_initialized = true; 32 | void smp_start(void) { 33 | ap_initialized = true; 34 | 35 | struct spinlock_node node; 36 | spin_lock(&print_lock, &node); 37 | kprintf("cpu %d started\n", SMP_PERCPU_READ(smp_id)); 38 | spin_unlock(&print_lock, &node); 39 | 40 | while (true) __asm__ ("hlt"); 41 | } 42 | 43 | void smp_init(void) { 44 | // TODO: memory_find from 0 with a better error code? 45 | uint64_t trampoline_size = trampoline_end - trampoline_begin; 46 | uint64_t trampoline = memory_find(PAGE_SIZE, 0x100000, trampoline_size, PAGE_SIZE); 47 | memory_reserve(trampoline, trampoline_size); 48 | memcpy((void*)trampoline, trampoline_begin, trampoline_size); 49 | 50 | // allocate percpu data now that we have a number from acpi 51 | uint64_t percpu_size = percpu_end - percpu_begin + PAGE_SIZE; 52 | void *percpu = memory_alloc(0x100000, memory_end(), lapic_count * percpu_size, PAGE_SIZE); 53 | 54 | volatile uint32_t *ap_started = &TRAMPOLINE_SYM(trampoline, smp_ap_started); 55 | startup_code = (uintptr_t)smp_start; 56 | 57 | uint32_t bsp_id = apic_read(apic_id); 58 | for (unsigned i = 0; i < lapic_count; i++) { 59 | uint32_t apic_id = lapic_by_cpu[i]; 60 | if (apic_id == bsp_id) { 61 | continue; 62 | } 63 | 64 | percpu_data[i] = (char*)percpu + i * percpu_size; 65 | memcpy(percpu_data[i], percpu_begin, percpu_size); 66 | 67 | SMP_PERCPU_SYM(i, smp_id) = i; 68 | 69 | // wait for the last AP to finish using the trampoline 70 | while (!ap_initialized) { 71 | continue; 72 | } 73 | ap_initialized = false; 74 | 75 | *ap_started = 0; 76 | startup_gs = (uintptr_t)percpu_data[i]; 77 | startup_stack = (uintptr_t)percpu_data[i] + percpu_size; 78 | 79 | apic_icr_write(apic_id, apic_icr_level | apic_icr_assert | apic_icr_init); 80 | apic_icr_wait_idle(100); 81 | apic_icr_write(apic_id, apic_icr_level | apic_icr_deassert | apic_icr_init); 82 | apic_icr_wait_idle(100); 83 | 84 | // MP spec says wait 10ms here, but newer CPUs don't need it (e.g. intel family 6+) 85 | 86 | bool sent = false; 87 | uint8_t error = 0; 88 | for (int i = 0; i < 2; i++) { 89 | // start execution on the target core at CS:IP = (trampoline >> 4):0 90 | apic_icr_write(apic_id, apic_icr_startup | (trampoline >> PAGE_SHIFT)); 91 | tsc_udelay(10); 92 | sent = apic_icr_wait_idle(100); 93 | tsc_udelay(200); 94 | 95 | error = apic_esr_read(); 96 | if (!sent || error || *ap_started) 97 | break; 98 | } 99 | 100 | if (!sent) 101 | kprintf("apic: [%d] startup ipi was not delivered\n", i); 102 | if (error) 103 | kprintf("apic: [%d] delivery error %x\n", i, error); 104 | if (!*ap_started) 105 | kprintf("apic: [%d] ap didn't set flag\n", i); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/smp.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #define SMP_PERCPU __attribute__((section(".percpu"))) 4 | 5 | #define SMP_PERCPU_READ(sym) __extension__ ({ \ 6 | __typeof__(sym) out; \ 7 | __asm__ volatile ("mov %%gs:%1, %0" : "=r"(out) : "m"(sym)); \ 8 | out; \ 9 | }) 10 | 11 | #define SMP_PERCPU_WRITE(sym, val) \ 12 | __asm__ volatile ("mov %1, %%gs:%0" : "=m"(sym) : "ir"(val)) 13 | 14 | #define SMP_PERCPU_SYM(cpu, sym) \ 15 | *(__typeof__(sym)*)((char*)percpu_data[cpu] + (uintptr_t)&(sym)) 16 | 17 | void smp_init(void); 18 | 19 | extern uint8_t lapic_by_cpu[256]; 20 | extern void *percpu_data[256]; 21 | -------------------------------------------------------------------------------- /src/spinlock.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | struct spinlock { 6 | struct spinlock_node *tail; 7 | }; 8 | 9 | struct spinlock_node { 10 | struct spinlock_node *next; 11 | bool locked; 12 | }; 13 | 14 | static inline void spin_lock(struct spinlock *lock, struct spinlock_node *node) { 15 | node->next = NULL; 16 | node->locked = false; 17 | 18 | struct spinlock_node *prev = atomic_exchange_explicit(&lock->tail, node, memory_order_acquire); 19 | if (prev == NULL) { 20 | return; 21 | } 22 | 23 | atomic_store_explicit(&prev->next, node, memory_order_relaxed); 24 | while (!atomic_load_explicit(&node->locked, memory_order_acquire)) { 25 | __asm__ volatile ("pause"); 26 | } 27 | } 28 | 29 | static inline void spin_unlock(struct spinlock *lock, struct spinlock_node *node) { 30 | struct spinlock_node *next = atomic_load_explicit(&node->next, memory_order_relaxed); 31 | if (next == NULL) { 32 | struct spinlock_node *prev = node; 33 | if (atomic_compare_exchange_strong_explicit( 34 | &lock->tail, &prev, NULL, memory_order_release, memory_order_relaxed 35 | )) { 36 | return; 37 | } 38 | while ((next = atomic_load_explicit(&node->next, memory_order_relaxed)) == NULL) { 39 | __asm__ volatile ("pause"); 40 | } 41 | } 42 | atomic_store_explicit(&next->locked, true, memory_order_release); 43 | } 44 | -------------------------------------------------------------------------------- /src/startup.S: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | // kernel_start(void *map, size_t map_size, size_t descriptor_size, void *rsdp) 4 | // main kernel entry point- takes UEFI memory map and ACPI RSDP in SysV ABI 5 | .section .startup.text, "awx" 6 | .global kernel_start 7 | kernel_start: 8 | // save args 9 | movq %rdi, %r8 10 | movq %rdx, %r10 11 | movq %rcx, %r11 12 | 13 | // page tables 14 | movq $(kernel_pml4 - KERNEL_BASE), %rax 15 | movq %rax, %cr3 16 | 17 | // zero bss 18 | cld 19 | xorq %rax, %rax 20 | leaq bss_begin, %rdi 21 | leaq bss_end, %rcx 22 | subq %rdi, %rcx 23 | shrq $3, %rcx 24 | rep stosq 25 | 26 | .global ap_start 27 | ap_start: 28 | 29 | // stack 30 | movq startup_stack(%rip), %rsp 31 | 32 | // gdt 33 | lgdt gdt_desc 34 | xorl %eax, %eax 35 | movl %eax, %ds 36 | movl %eax, %es 37 | movl %eax, %fs 38 | movl %eax, %gs 39 | movl %eax, %ss 40 | 41 | // per-cpu %gs 42 | movl $0xc0000101, %ecx 43 | movq startup_gs(%rip), %rax 44 | movq %rax, %rdx 45 | shrq $32, %rdx 46 | wrmsr 47 | 48 | // restore args 49 | movq %r8, %rdi 50 | movq %r10, %rdx 51 | movq %r11, %rcx 52 | 53 | // load cs and jump to high addresses 54 | movq startup_code(%rip), %rax 55 | pushq $0x08 56 | pushq %rax 57 | lretq 58 | 59 | .section .startup.data, "a" 60 | 61 | .global startup_gs 62 | startup_gs: 63 | .quad 0 64 | 65 | .global startup_stack 66 | startup_stack: 67 | .quad bsp_stack 68 | 69 | .global startup_code 70 | startup_code: 71 | .quad kernel_init 72 | 73 | // initial page tables for the kernel 74 | // this includes a temporary identity mapping before we jump to the higher half, 75 | // the start of the direct mapping at DIRECT_BASE to bootstrap the VIRT_DIRECT macro, 76 | // and the kernel mapping itself in the last entry of the kernel_pml4 77 | 78 | .global pdpt_direct 79 | .align 4096 80 | pdpt_direct: 81 | .quad pd - KERNEL_BASE + PAGE_PRESENT + PAGE_WRITE 82 | .fill 511, 8, 0 83 | 84 | .data 85 | 86 | .global kernel_pml4 87 | .align 4096 88 | kernel_pml4: 89 | .quad pdpt_direct - KERNEL_BASE + PAGE_PRESENT + PAGE_WRITE 90 | .org kernel_pml4 + 8 * PML4_INDEX(DIRECT_BASE) 91 | .quad pdpt_direct - KERNEL_BASE + PAGE_PRESENT + PAGE_WRITE 92 | .org kernel_pml4 + 8 * PML4_INDEX(KERNEL_BASE) 93 | .quad pdpt - KERNEL_BASE + PAGE_PRESENT + PAGE_WRITE 94 | 95 | .global pdpt 96 | .align 4096 97 | pdpt: 98 | .fill 510, 8, 0 99 | .quad pd - KERNEL_BASE + PAGE_PRESENT + PAGE_WRITE 100 | .quad pd_map - KERNEL_BASE + PAGE_PRESENT + PAGE_WRITE 101 | 102 | .global pd 103 | .align 4096 104 | pd: 105 | i = 0 106 | .rept 512 107 | .quad (i << 21) + PAGE_PRESENT + PAGE_WRITE + PAGE_LARGE + PAGE_GLOBAL 108 | i = i + 1 109 | .endr 110 | 111 | .global pd_map 112 | .align 4096 113 | pd_map: 114 | .quad pt_map - KERNEL_BASE + PAGE_PRESENT + PAGE_WRITE + PAGE_GLOBAL 115 | .fill 511, 8, 0 116 | 117 | .bss 118 | 119 | .global pt_map 120 | .align 4096 121 | pt_map: 122 | .fill 512, 8, 0 123 | 124 | .global bsp_stack 125 | .align 4096 126 | .fill 4096, 1, 0 127 | bsp_stack: 128 | -------------------------------------------------------------------------------- /src/trampoline.S: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | .section .trampoline, "awx" 4 | .code16 5 | 6 | .global trampoline 7 | trampoline: 8 | 9 | // trampoline starts execution at some unknown address but with IP=0 10 | // set all the segments to CS so it can access trampoline data 11 | mov %cs, %ax 12 | mov %ax, %ds 13 | mov %ax, %es 14 | mov %ax, %ss 15 | 16 | // notify the BSP that this core has started up 17 | movl $1, smp_ap_started 18 | 19 | // calculate linear address 20 | mov %cs, %ax 21 | movzx %ax, %esi 22 | shll $4, %esi 23 | 24 | // fix up absolute pointers 25 | leal trampoline_gdt(%esi), %eax 26 | movl %eax, trampoline_gdt_desc + 2 27 | leal trampoline_64(%esi), %eax 28 | movl %eax, trampoline_64_vector 29 | 30 | // load temporary gdt 31 | lgdtl trampoline_gdt_desc 32 | 33 | // enable PAE and PGE 34 | movl %cr4, %eax 35 | orl $0b10100000, %eax 36 | movl %eax, %cr4 37 | 38 | // load the page tables 39 | movl $(kernel_pml4 - KERNEL_BASE), %eax 40 | movl %eax, %cr3 41 | 42 | // enable LME 43 | movl $0xc0000080, %ecx 44 | rdmsr 45 | orl $0x100, %eax 46 | wrmsr 47 | 48 | // enable paging and switch to long mode 49 | movl %cr0, %eax 50 | orl $0x80000001, %eax 51 | movl %eax, %cr0 52 | 53 | ljmpl *trampoline_64_vector 54 | 55 | .code64 56 | trampoline_64: 57 | lea ap_start, %rax 58 | jmp *%rax 59 | 60 | .global smp_ap_started 61 | .align 4 62 | smp_ap_started: 63 | .long 0 64 | 65 | .align 4 66 | trampoline_gdt: 67 | .quad 0 68 | .quad 0x00209a0000000000 69 | .quad 0x0000920000000000 70 | 71 | trampoline_gdt_desc: 72 | .short trampoline_gdt_desc - trampoline_gdt 73 | .long trampoline_gdt 74 | 75 | .align 4 76 | trampoline_64_vector: 77 | .long trampoline_64 78 | .word 0x8 79 | -------------------------------------------------------------------------------- /src/tsc.c: -------------------------------------------------------------------------------- 1 | #include "tsc.h" 2 | #include "hpet.h" 3 | #include "cpu.h" 4 | #include 5 | #include 6 | 7 | uint32_t tsc_frequency; 8 | 9 | // TODO: cpuid flag constants 10 | void tsc_calibrate(void) { 11 | uint32_t eax, ebx, ecx, edx; 12 | 13 | cpuid(0x80000007, &eax, &ebx, &ecx, &edx); 14 | if (!(edx & (1 << 8))) { 15 | panic("invariant tsc unavailable"); 16 | } 17 | 18 | cpuid(0x01, &eax, &ebx, &ecx, &edx); 19 | if (!(ecx & (1 << 24))) { 20 | panic("tsc deadline unavailable"); 21 | } 22 | 23 | // TODO: move femtoseconds constant 24 | uint32_t wait_time = 1000000000000000ULL / hpet_period() / 100; 25 | 26 | uint64_t hpet_start = hpet_now(); 27 | uint64_t tsc_start = rdtsc(); 28 | 29 | while (hpet_now() - hpet_start < wait_time) continue; 30 | 31 | uint32_t tsc_end = rdtsc(); 32 | 33 | tsc_frequency = (tsc_end - tsc_start) * 100; 34 | 35 | kprintf("tsc: %u.%06uMHz\n", tsc_frequency / 1000000, tsc_frequency % 1000000); 36 | } 37 | 38 | void tsc_udelay(uint64_t usecs) { 39 | uint64_t wait_time = tsc_frequency / 1000000 * usecs; 40 | 41 | uint64_t start_time = rdtsc(); 42 | while (rdtsc() - start_time < wait_time) continue; 43 | 44 | // TODO: handle accidental core switches 45 | } 46 | -------------------------------------------------------------------------------- /src/tsc.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void tsc_calibrate(void); 4 | 5 | void tsc_udelay(uint64_t usecs); 6 | 7 | extern uint32_t tsc_frequency; 8 | 9 | static inline uint64_t rdtsc(void) { 10 | uint64_t result[2]; 11 | __asm__ volatile ("rdtsc" : "=a"(result[0]), "=d"(result[1])); 12 | return (result[1] << 32) | result[0]; 13 | } 14 | --------------------------------------------------------------------------------