├── .gitignore ├── LICENSE ├── Makefile ├── aarch64-unknown-custom.json ├── bootstrap ├── Makefile ├── interrupts.S ├── main.cc ├── memory.cc ├── memory.h ├── pl011.cc ├── pl011.h └── start.S ├── c ├── Makefile ├── crt.c ├── relocate.c ├── string.c └── string.h ├── cc ├── Makefile ├── algorithm ├── ccrt.cc ├── cstddef ├── cstdint ├── cstring ├── memory ├── new ├── new.cc ├── optional ├── type_traits └── utility ├── common ├── Makefile ├── allocator.h ├── endian.cc ├── endian.h ├── fixed_vector.h ├── intrusive_list.cc ├── intrusive_list.h ├── logging.cc ├── logging.h ├── math.cc ├── math.h ├── stream.cc ├── stream.h ├── string_view.cc ├── string_view.h └── vector.h ├── fdt ├── Makefile ├── blob.cc ├── blob.h ├── scanner.cc ├── scanner.h └── span.h ├── kernel.lds ├── kernel ├── Cargo.toml ├── bootstrap │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── devicetree │ ├── Cargo.toml │ └── src │ │ ├── fdt.rs │ │ ├── lib.rs │ │ ├── scanner.rs │ │ ├── test.dtb │ │ └── test.dts ├── interrupt │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── intrusive │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ └── list.rs ├── kernel │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ └── setup.rs ├── log │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── memory │ ├── Cargo.toml │ └── src │ │ ├── buddy.rs │ │ ├── lib.rs │ │ ├── list.rs │ │ ├── memory_map.rs │ │ └── page.rs ├── numeric │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── pl011 │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── runtime │ ├── Cargo.toml │ └── src │ │ └── lib.rs └── sync │ ├── Cargo.toml │ └── src │ ├── lib.rs │ └── placeholder.rs └── memory ├── Makefile ├── alloc.cc ├── alloc.h ├── arch.h ├── cache.cc ├── cache.h ├── memory.cc ├── memory.h ├── phys.cc ├── phys.h ├── space.cc └── space.h /.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | *.d 3 | *.a 4 | *.elf 5 | *.lock 6 | *.fd 7 | *.dts 8 | *.dtb 9 | root/ 10 | TODO 11 | qemu.sh 12 | kernel/target 13 | .gdbinit 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | export MAKEFLAGS += -r 2 | export CC := clang 3 | export CXX := clang 4 | export AR := llvm-ar 5 | export LD := lld 6 | 7 | LDFLAGS := \ 8 | -flavor ld -m aarch64elf \ 9 | --pie --static --nostdlib --script=kernel.lds 10 | CARGO_TARGET := \ 11 | -Zbuild-std -Zbuild-std-features=compiler-builtins-mem \ 12 | --target=$(shell pwd)/aarch64-unknown-custom.json 13 | 14 | default: all 15 | 16 | libkernel.a: 17 | cd kernel ; cargo build $(CARGO_TARGET) --release ; cd - 18 | cp kernel/target/aarch64-unknown-custom/release/libkernel.a $@ 19 | 20 | libmemory.a: 21 | $(MAKE) -C memory 22 | cp memory/libmemory.a $@ 23 | 24 | libfdt.a: 25 | $(MAKE) -C fdt 26 | cp fdt/libfdt.a $@ 27 | 28 | libbootstrap.a: 29 | $(MAKE) -C bootstrap 30 | cp bootstrap/libbootstrap.a $@ 31 | 32 | libc.a: 33 | $(MAKE) -C c 34 | cp c/libc.a $@ 35 | 36 | libcc.a: 37 | $(MAKE) -C cc 38 | cp cc/libcc.a $@ 39 | 40 | libcc.a: 41 | $(MAKE) -C cc 42 | cp cc/libcc.a $@ 43 | 44 | libcommon.a: 45 | $(MAKE) -C common 46 | cp common/libcommon.a $@ 47 | 48 | kernel.elf: libc.a libcc.a libcommon.a libmemory.a libfdt.a libbootstrap.a #libkernel.a 49 | $(LD) $(LDFLAGS) $^ -o $@ 50 | 51 | .PHONY: clean all default test libcommon.a libc.a libcc.a libkernel.a libbootstrap.a libmemory.a libfdt.a 52 | 53 | all: kernel.elf 54 | 55 | test: 56 | cd kernel/pl011 ; cargo test ; cd - 57 | cd kernel/devicetree ; cargo test ; cd - 58 | cd kernel/memory ; cargo test ; cd - 59 | cd kernel/numeric ; cargo test ; cd - 60 | cd kernel/sync ; cargo test ; cd - 61 | 62 | bench: 63 | cd kernel/pl011 ; cargo bench ; cd - 64 | cd kernel/devicetree ; cargo bench ; cd - 65 | cd kernel/memory ; cargo bench ; cd - 66 | cd kernel/numeric ; cargo bench ; cd - 67 | cd kernel/sync ; cargo bench ; cd - 68 | 69 | clean: 70 | cd kernel ; cargo clean ; cd - 71 | $(MAKE) -C bootstrap clean 72 | $(MAKE) -C memory clean 73 | $(MAKE) -C fdt clean 74 | $(MAKE) -C c clean 75 | $(MAKE) -C cc clean 76 | $(MAKE) -C common clean 77 | rm -rf *.elf *.o *.d *.a 78 | 79 | -------------------------------------------------------------------------------- /aarch64-unknown-custom.json: -------------------------------------------------------------------------------- 1 | { 2 | "arch": "aarch64", 3 | "data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", 4 | "disable-redzone": true, 5 | "executables": true, 6 | "features": "+strict-align,+neon,+fp-armv8", 7 | "is-builtin": true, 8 | "linker": "rust-lld", 9 | "linker-flavor": "ld.lld", 10 | "linker-is-gnu": true, 11 | "llvm-target": "aarch64-unknown-none", 12 | "max-atomic-width": 128, 13 | "panic-strategy": "abort", 14 | "relocation-model": "pic", 15 | "target-pointer-width": "64", 16 | "unsupported-abis": [ 17 | "stdcall", 18 | "fastcall", 19 | "vectorcall", 20 | "thiscall", 21 | "win64", 22 | "sysv64" 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /bootstrap/Makefile: -------------------------------------------------------------------------------- 1 | CC := clang 2 | CXX := clang 3 | AR := llvm-ar 4 | 5 | AFLAGS := \ 6 | -fPIE -target aarch64-unknown-none -Wall -Werror 7 | CXXFLAGS := \ 8 | -MMD -mno-red-zone -std=c++17 -ffreestanding -fno-threadsafe-statics \ 9 | -fno-exceptions -fno-rtti -Ofast -g -fPIE -target aarch64-unknown-none \ 10 | -Wall -Werror -Wframe-larger-than=1024 -pedantic -I.. -I../c -I../cc 11 | 12 | ASRCS := start.S interrupts.S 13 | AOBJS := $(ASRCS:.S=.o) 14 | 15 | CXXSRCS := main.cc pl011.cc memory.cc 16 | CXXOBJS := $(CXXSRCS:.cc=.o) 17 | 18 | OBJS := $(AOBJS) $(CXXOBJS) 19 | 20 | default: all 21 | 22 | %.o: %.S 23 | $(CC) $(AFLAGS) -c $< -o $@ 24 | 25 | %.o: %.cc 26 | $(CXX) $(CXXFLAGS) -c $< -o $@ 27 | 28 | libbootstrap.a: $(OBJS) 29 | $(AR) rc $@ $^ 30 | 31 | -include $(ASRCS:.S=.d) 32 | -include $(CXXSRCS:.cc=.d) 33 | 34 | .PHONY: clean all default 35 | 36 | all: libbootstrap.a 37 | 38 | clean: 39 | rm -rf *.elf *.o *.d *.a 40 | -------------------------------------------------------------------------------- /bootstrap/interrupts.S: -------------------------------------------------------------------------------- 1 | .text 2 | .global vector_table 3 | .balign 2048 4 | // The following four entries are for exceptions and interrupts when we use 5 | // SP_EL0 as a stack pointer for all ELs and the interrupt/exception is taken 6 | // from EL2 to EL2. Currently the expectation is that each EL will use their 7 | // own SP register version, so this should not happen. 8 | vector_table: 9 | b . 10 | .balign 0x80 11 | b . 12 | .balign 0x80 13 | b . 14 | .balign 0x80 15 | b . 16 | 17 | // The following four entries are for exceptions and interrupts generated when 18 | // the code was executing in EL2 and using SP_EL2. In other words we will be 19 | // using the same stack pointer as the code that was interrupted. 20 | .balign 0x80 21 | b exception_entry 22 | .balign 0x80 23 | b interrupt_entry 24 | .balign 0x80 25 | b interrupt_entry 26 | .balign 0x80 27 | b exception_entry 28 | 29 | // The following four entries are for exceptions and interrupts taken from 30 | // lower ELs running in AArch64 and AArch32 mode. At least currently there 31 | // is no code running at the lower ELs, so there is no way we should take 32 | // exceptions or interrupts from the lower ELs. Similarly to the first four 33 | // entries in the table I'm using infinite loops here to hang the execution 34 | // if it happens. 35 | .balign 0x80 36 | b . 37 | .balign 0x80 38 | b . 39 | .balign 0x80 40 | b . 41 | .balign 0x80 42 | b . 43 | .balign 0x80 44 | b . 45 | .balign 0x80 46 | b . 47 | .balign 0x80 48 | b . 49 | .balign 0x80 50 | b . 51 | 52 | // The following code is written to preserve all the general purpose registers 53 | // plus some not-quite general purpose registers like SP and LR. All the data 54 | // will be saved on stack, so it should be large enough to handle it. 55 | // 56 | // Note that the implementation doesn't save all the general purpose registers 57 | // on stack because there is no need for that. According to the calling 58 | // convention x19-x28 registers must be preserved through the function calls, 59 | // so we just have to be careful to not modify them inside this code and all 60 | // the functions this code calls must do the same, thus their values will be 61 | // preserved even if we don't store them on stack in this code. 62 | // 63 | // Also, looking at other implementation of the similar logic I found that the 64 | // existing implementation do not really care about the situations when the SP 65 | // value is not aligned on the 16-byte boundary. So it's likely that most of 66 | // the complexity in this function related to the SP alignment isn't needed at 67 | // all, and I just don't understand yet why that's the case. I think that the 68 | // principle at play here is that lower ELs should use their own SP and we just 69 | // need to make sure that our code never viaolates the SP alignment, but I have 70 | // no clue how to check/make sure that the compiler generated code conforms to 71 | // this principle. 72 | interrupt_entry: 73 | // SP may or may not be aligned to 16-byte boundary at this point, so we 74 | // store the minimum amount of data required to fix the SP alignment 75 | // without loosing the state. 76 | stp x20, x21, [sp, #-16]! 77 | 78 | // At this point we preserved the old values of the x20 and x21 registers on 79 | // stack. We need to keep track of the place where those values are 80 | // stored, so that will take one register and the other register is free 81 | // for us to use as we see fit. I will keep the x21 as a pointer to where 82 | // the original x20 and x21 values are stored and also x21 can be used to 83 | // calculate the original value of the SP register. 84 | mov x21, sp 85 | 86 | // We need space to store the following: 87 | // - GPRs: x0-x15 88 | // - semi-GPRs: x16, x17, x18 89 | // - x29 or frame pointer 90 | // - x30 or link register 91 | // - the original sp or stack pointer 92 | // In total 176 bytes, aligning SP may additionally take some space, but 93 | // that should not be more than additional 15 bytes 94 | sub x20, sp, #192 95 | and sp, x20, #~0b1111 96 | 97 | stp x0, x1, [sp, #0] 98 | 99 | // Now when we saved the values of x0 and x1 we can use them for 100 | // somethething else. I need x0 to pass the pointer to the saved registers 101 | // state to the interrupt as per calling convention, so I'm going to copy 102 | // the current value of SP there. 103 | mov x0, sp 104 | 105 | // I will use the register x1 to store the original value of the SP 106 | // register so that it could be saved on stack later. 107 | add x1, x21, #16 108 | 109 | // Now registers x20 and x21 aren't used for anything else, so we can 110 | // restore their original values from the place where x21 points to. 111 | ldp x20, x21, [x21] 112 | 113 | stp x2, x3, [sp, #16] 114 | stp x4, x5, [sp, #32] 115 | stp x6, x7, [sp, #48] 116 | stp x8, x9, [sp, #64] 117 | stp x10, x11, [sp, #80] 118 | stp x12, x13, [sp, #96] 119 | stp x14, x15, [sp, #112] 120 | stp x16, x17, [sp, #128] 121 | stp x18, x29, [sp, #144] 122 | stp x30, x1, [sp, #160] 123 | // The code we call should not use these values, but both interrupts and 124 | // exceptions (see the exception_entry below) use the same structure for 125 | // stored registers, so I'm zeroing out here unused parts. 126 | stp xzr, xzr, [sp, #176] 127 | 128 | // We pass a pointer to the saved registers to the interrupt handling 129 | // routine as a parameter in register x0. All the data there is properly 130 | // aligned and can be accessed freely. Additionally, all the modification 131 | // performed by the interrupt will be restored as the original values of 132 | // the registers, so the handler has the freedom to make some alterations. 133 | bl interrupt 134 | 135 | ldp x2, x3, [sp, #16] 136 | ldp x4, x5, [sp, #32] 137 | ldp x6, x7, [sp, #48] 138 | ldp x8, x9, [sp, #64] 139 | ldp x10, x11, [sp, #80] 140 | ldp x12, x13, [sp, #96] 141 | ldp x14, x15, [sp, #112] 142 | ldp x16, x17, [sp, #128] 143 | ldp x18, x29, [sp, #144] 144 | 145 | // We use x0 and x1 registers to restore the original value of the SP 146 | // register, since we cannot directly restore to sp register using ldp. 147 | // So the sequence as follows: 148 | // 1. restore the orignal stack pointer into x0 149 | // 2. save the current SP into x1 for the future ldp instruction 150 | // 3. copy the original stack pointer from x0 to SP 151 | // 4. restore the original values of x0 and x1 from the stack using x1 as 152 | // a base. 153 | ldp x30, x0, [sp, #160] 154 | mov x1, sp 155 | mov sp, x0 156 | ldp x0, x1, [x1, #0] 157 | 158 | eret 159 | 160 | // A lot of what is written above about the interrupt_entry applies to the 161 | // exception_entry as well. The main difference is that for exception_entry 162 | // we additionally save a few registers that are needed to understand the 163 | // cause of the exception. 164 | exception_entry: 165 | stp x20, x21, [sp, #-16]! 166 | 167 | mov x21, sp 168 | sub x20, sp, #192 169 | and sp, x20, #~0b1111 170 | 171 | stp x0, x1, [sp, #0] 172 | 173 | add x1, x21, #16 174 | ldp x20, x21, [x21] 175 | 176 | stp x2, x3, [sp, #16] 177 | stp x4, x5, [sp, #32] 178 | stp x6, x7, [sp, #48] 179 | stp x8, x9, [sp, #64] 180 | stp x10, x11, [sp, #80] 181 | stp x12, x13, [sp, #96] 182 | stp x14, x15, [sp, #112] 183 | stp x16, x17, [sp, #128] 184 | stp x18, x29, [sp, #144] 185 | stp x30, x1, [sp, #160] 186 | 187 | mrs x0, ESR_EL2 188 | mrs x1, FAR_EL2 189 | stp x0, x1, [sp, #176] 190 | 191 | mov x0, sp 192 | bl exception 193 | 194 | ldp x2, x3, [sp, #16] 195 | ldp x4, x5, [sp, #32] 196 | ldp x6, x7, [sp, #48] 197 | ldp x8, x9, [sp, #64] 198 | ldp x10, x11, [sp, #80] 199 | ldp x12, x13, [sp, #96] 200 | ldp x14, x15, [sp, #112] 201 | ldp x16, x17, [sp, #128] 202 | ldp x18, x29, [sp, #144] 203 | ldp x30, x0, [sp, #160] 204 | mov x1, sp 205 | mov sp, x0 206 | ldp x0, x1, [x1, #0] 207 | 208 | eret 209 | 210 | interrupt: 211 | ret 212 | 213 | exception: 214 | ret 215 | -------------------------------------------------------------------------------- /bootstrap/memory.cc: -------------------------------------------------------------------------------- 1 | #include "bootstrap/memory.h" 2 | #include "fdt/span.h" 3 | 4 | namespace { 5 | 6 | template 7 | bool RegisterRegions(It begin, It end, memory::MemoryMap* mmap) { 8 | for (It it = begin; it != end; ++it) { 9 | const uintptr_t from = it->begin; 10 | const uintptr_t to = from + it->size; 11 | 12 | if (!mmap->Register(from, to, memory::MemoryStatus::FREE)) { 13 | return false; 14 | } 15 | } 16 | return true; 17 | } 18 | 19 | bool RegisterRegions( 20 | const fdt::Property& property, 21 | size_t address, size_t size, 22 | memory::MemoryMap* mmap) { 23 | if (address == 1 && size == 1) { 24 | fdt::Span> span; 25 | if (!property.ValueAsSpan(&span)) { 26 | return false; 27 | } 28 | return RegisterRegions(span.ConstBegin(), span.ConstEnd(), mmap); 29 | } 30 | 31 | if (address == 2 && size == 2) { 32 | fdt::Span> span; 33 | if (!property.ValueAsSpan(&span)) { 34 | return false; 35 | } 36 | return RegisterRegions(span.ConstBegin(), span.ConstEnd(), mmap); 37 | } 38 | 39 | if (address == 1 && size == 2) { 40 | fdt::Span> span; 41 | if (!property.ValueAsSpan(&span)) { 42 | return false; 43 | } 44 | return RegisterRegions(span.ConstBegin(), span.ConstEnd(), mmap); 45 | } 46 | 47 | if (address == 2 && size == 1) { 48 | fdt::Span> span; 49 | if (!property.ValueAsSpan(&span)) { 50 | return false; 51 | } 52 | return RegisterRegions(span.ConstBegin(), span.ConstEnd(), mmap); 53 | } 54 | 55 | return false; 56 | } 57 | 58 | bool ParseMemoryNode( 59 | const fdt::Blob& blob, fdt::Scanner pos, 60 | size_t address, size_t size, memory::MemoryMap* mmap) { 61 | fdt::Node node; 62 | fdt::Property property; 63 | fdt::Token token; 64 | 65 | while (blob.TokenAt(pos, &token)) { 66 | switch (token) { 67 | case fdt::Token::PROP: 68 | if (!blob.ConsumeProperty(&pos, &property)) { 69 | return false; 70 | } 71 | if (property.name == "reg") { 72 | return RegisterRegions(property, address, size, mmap); 73 | } 74 | break; 75 | case fdt::Token::END_NODE: 76 | return true; 77 | default: 78 | return false; 79 | } 80 | } 81 | return false; 82 | } 83 | 84 | bool RegisterMemory(const fdt::Blob& blob, memory::MemoryMap* mmap) { 85 | fdt::Scanner pos = blob.Root().offset; 86 | uint32_t address_cells = 2; 87 | uint32_t size_cells = 2; 88 | fdt::Node node; 89 | fdt::Property property; 90 | fdt::Token token; 91 | 92 | while (blob.TokenAt(pos, &token)) { 93 | switch (token) { 94 | case fdt::Token::END: 95 | return true; 96 | case fdt::Token::PROP: 97 | if (!blob.ConsumeProperty(&pos, &property)) { 98 | return false; 99 | } 100 | if (property.name == "#size-cells") { 101 | if (!property.ValueAsBe32(&size_cells)) { 102 | return false; 103 | } 104 | } 105 | if (property.name == "#address-cells") { 106 | if (!property.ValueAsBe32(&address_cells)) { 107 | return false; 108 | } 109 | } 110 | break; 111 | case fdt::Token::BEGIN_NODE: 112 | if (!blob.ConsumeStartNode(&pos, &node)) { 113 | return false; 114 | } 115 | if (node.name.StartsWith("memory")) { 116 | if (!ParseMemoryNode( 117 | blob, pos, address_cells, size_cells, mmap)) { 118 | return false; 119 | } 120 | } 121 | if (!blob.SkipNode(&pos)) { 122 | return false; 123 | } 124 | break; 125 | case fdt::Token::NOP: 126 | if (!blob.ConsumeNop(&pos)) { 127 | return false; 128 | } 129 | break; 130 | case fdt::Token::END_NODE: 131 | if (!blob.ConsumeEndNode(&pos)) { 132 | return false; 133 | } 134 | break; 135 | } 136 | } 137 | return false; 138 | } 139 | 140 | } // namespace 141 | 142 | bool MMapFromDTB(const fdt::Blob& blob, memory::MemoryMap* mmap) { 143 | if (!RegisterMemory(blob, mmap)) { 144 | return false; 145 | } 146 | 147 | const auto reserved = blob.Reserved(); 148 | for (auto it = reserved.ConstBegin(); it != reserved.ConstEnd(); ++it) { 149 | const uintptr_t begin = it->begin; 150 | const uintptr_t end = begin + it->size; 151 | 152 | if (!mmap->Reserve(begin, end)) { 153 | return false; 154 | } 155 | } 156 | 157 | return true; 158 | } 159 | -------------------------------------------------------------------------------- /bootstrap/memory.h: -------------------------------------------------------------------------------- 1 | #ifndef __BOOTSTRAP_MEMORY_H__ 2 | #define __BOOTSTRAP_MEMORY_H__ 3 | 4 | #include "fdt/blob.h" 5 | #include "memory/phys.h" 6 | 7 | bool MMapFromDTB(const fdt::Blob& blob, memory::MemoryMap* mmap); 8 | 9 | #endif // __BOOTSTRAP_MEMORY_H__ 10 | -------------------------------------------------------------------------------- /bootstrap/pl011.cc: -------------------------------------------------------------------------------- 1 | #include "pl011.h" 2 | 3 | namespace { 4 | 5 | constexpr uint32_t DR = (0x000 / 4); 6 | constexpr uint32_t FR = (0x018 / 4); 7 | constexpr uint32_t IBRD = (0x024 / 4); 8 | constexpr uint32_t FBRD = (0x028 / 4); 9 | constexpr uint32_t LCR = (0x02c / 4); 10 | constexpr uint32_t CR = (0x030 / 4); 11 | constexpr uint32_t IMSC = (0x038 / 4); 12 | constexpr uint32_t ICR = (0x044 / 4); 13 | constexpr uint32_t DMACR = (0x048 / 4); 14 | 15 | constexpr uint32_t CR_TXEN = (1 << 8); 16 | constexpr uint32_t CR_UARTEN = (1 << 0); 17 | constexpr uint32_t FR_BUSY = (1 << 3); 18 | constexpr uint32_t LCR_FEN = (1 << 4); 19 | 20 | 21 | void WaitTxComplete(volatile uint32_t *fr_reg) { 22 | while ((*fr_reg & FR_BUSY) != 0) {} 23 | } 24 | 25 | // Calculates the baudrate divisior value from the device parameters. The 26 | // divisor values is divided into a 16 bit integer part and 6 bit fractional 27 | // part. The 6-bit fractional part contains the number of 1/2^6 = 1/64 28 | // fractions. 29 | // 30 | // In general the divisor have to be calculated as base clock / (16 * baudrate) 31 | // according to the "PrimeCell UART (PL011) Technical Reference Manual, Chapter 32 | // 3 Programmers Model, Section 3.3.6 Fractional Baud Rate Register, UARTFBRD". 33 | uint32_t Divisor(uint64_t base_clock, uint32_t baudrate) { 34 | // It's somewhat inconvenient to work with fractions, so let's first get 35 | // that out of the way by calculating the result multiplied by 64. 36 | // 37 | // I could multiply by a bigger number if I wanted higher precision, but 38 | // since I don't really need that much precision 64 is just enough. 39 | // 40 | // That changes our equation to 4 * base clock / baudrate and I only need 41 | // the integer part of the result. 42 | // 43 | // Now the fractional part is stored in the lower 6 bits and the integer 44 | // part is everything except the lower 6 bits. 45 | return 4 * base_clock / baudrate; 46 | } 47 | 48 | } // namespace 49 | 50 | PL011 PL011::Serial(uintptr_t base_address, uint64_t base_clock) { 51 | volatile uint32_t *regs = reinterpret_cast( 52 | base_address); 53 | uint32_t divisor = Divisor(base_clock, PL011::kBaudrate); 54 | 55 | WaitTxComplete(®s[FR]); 56 | return static_cast(PL011(regs, divisor)); 57 | } 58 | 59 | void PL011::Reset() { 60 | // According to "PrimeCell UART (PL011) Technical Reference Manual, Chapter 61 | // 3 Programmers Model, Section 3.3.8 Control Register, UARTCR" the correct 62 | // sequence involing programmin of the control register (CR) is: 63 | // 1. disable UART 64 | // 2. wait for any ongoing transmissions and receives to complete 65 | // 3. flush the FIFO 66 | // 4. program the control register 67 | // 5. enable UART. 68 | // 69 | // The sequence is slightly confusing because both enabling and disabling 70 | // UART involves writing to the control register. So I interpret it as 71 | // writing the UARTEN bit of the control register should be done separately 72 | // from everything else. 73 | // 74 | // The other part that is confusing me is that while I can check for any 75 | // outgoing transmissions by checking the BUSY bit of the FR register, I 76 | // didn't really figure out what does it mean to wait for any ongoing 77 | // receives to complete and how I can do it. So that part I just ignored. 78 | const uint32_t cr = registers_[CR]; 79 | const uint32_t lcr = registers_[LCR]; 80 | 81 | // steps 1 - 3 of the control register programming sequence 82 | registers_[CR] = (cr & ~CR_UARTEN); 83 | WaitTxComplete(®isters_[FR]); 84 | registers_[LCR] = (lcr & ~LCR_FEN); 85 | 86 | registers_[IMSC] = 0x7ff; 87 | 88 | registers_[ICR] = 0x7ff; 89 | 90 | registers_[DMACR] = 0x0; 91 | 92 | // while UART is disabled I will also program all other registers besides 93 | // the control register: 94 | // 1. IBRD and FBRD that control the baudrate 95 | // 2. LCR that controls the frame format 96 | // 3. IMSC responsible for setting and clearing interrupt masks 97 | // 4. DMACR responsible for DMA settings 98 | registers_[IBRD] = divisor_ >> 6; 99 | registers_[FBRD] = divisor_ & 0x3F; 100 | registers_[LCR] = ((kDataBits - 1) & 0x3) << 5; 101 | 102 | // steps 4 - 5 of the control register programming sequence 103 | registers_[CR] = CR_TXEN; 104 | registers_[CR] = CR_TXEN | CR_UARTEN; 105 | } 106 | 107 | void PL011::Send(const uint8_t *data, size_t size) { 108 | WaitTxComplete(®isters_[FR]); 109 | 110 | for (size_t i = 0; i < size; ++i) { 111 | // I either have to do it here or each and every place where the 112 | // function is used has to add '\r' to each '\n'. 113 | // 114 | // Given that in the Unix world plain '\n' is normally used instead of 115 | // '\r\n' I decided to go with the first option as more natural. 116 | if (data[i] == '\n') { 117 | registers_[DR] = '\r'; 118 | WaitTxComplete(®isters_[FR]); 119 | } 120 | registers_[DR] = data[i]; 121 | WaitTxComplete(®isters_[FR]); 122 | } 123 | } 124 | 125 | PL011OutputStream::PL011OutputStream(PL011 *dev) : dev_(dev) {} 126 | 127 | PL011OutputStream::~PL011OutputStream() {} 128 | 129 | int PL011OutputStream::PutN(const char *data, int n) { 130 | dev_->Send(reinterpret_cast(data), n); 131 | return n; 132 | } 133 | 134 | int PL011OutputStream::Put(char c) { 135 | return PutN(&c, 1); 136 | } 137 | -------------------------------------------------------------------------------- /bootstrap/pl011.h: -------------------------------------------------------------------------------- 1 | #ifndef __BOOTSTRAP_PL011_H__ 2 | #define __BOOTSTRAP_PL011_H__ 3 | 4 | #include 5 | #include 6 | 7 | #include "common/stream.h" 8 | 9 | /* 10 | * UART communication is defined by its speed (baudrate) and the format of the 11 | * frame (number of data bits, parity check and the number of the stop bits). 12 | * That's exactly what the fields of this structure describe, but I dropped the 13 | * parity part for now and use no-parity check setting. 14 | * 15 | * Aside from generic UART parameters I also need base address of the PL011 16 | * register block in memory. PL011 specification only defines offsets of the 17 | * registers from the base address, but the base address itself can change (see 18 | * "PrimeCell UART (PL011) Technical Reference Manual, Chapter 3 Programmers 19 | * Model, Section 3.1 About the programmers model"). 20 | * 21 | * Finally, "PrimeCell UART (PL011) Technical Reference Manual" doesn't actually 22 | * define the base clock frequencey (UARTCLK in the documentation), so it has to 23 | * be provided as well. 24 | */ 25 | class PL011 { 26 | public: 27 | static PL011 Serial(uintptr_t base_address, uint64_t base_clock); 28 | 29 | PL011(const PL011 &other) = delete; 30 | PL011& operator=(const PL011 &other) = delete; 31 | 32 | PL011(PL011 &&other) = default; 33 | PL011& operator=(PL011 &&other) = default; 34 | 35 | void Reset(); 36 | void Send(const uint8_t *data, size_t size); 37 | 38 | private: 39 | PL011(volatile uint32_t *regs, uint32_t divisor) 40 | : registers_(regs), divisor_(divisor) {} 41 | 42 | volatile uint32_t *registers_; 43 | uint32_t divisor_; 44 | 45 | static const uint32_t kBaudrate = 115200; 46 | static const uint32_t kDataBits = 8; 47 | static const uint32_t kStopBits = 1; 48 | }; 49 | 50 | class PL011OutputStream final : public common::OutputStream { 51 | public: 52 | PL011OutputStream(PL011 *dev); 53 | ~PL011OutputStream() override; 54 | 55 | PL011OutputStream(const PL011OutputStream&) = delete; 56 | PL011OutputStream& operator=(const PL011OutputStream&) = delete; 57 | 58 | PL011OutputStream(PL011OutputStream&&) = default; 59 | PL011OutputStream& operator=(PL011OutputStream&&) = default; 60 | 61 | int PutN(const char *data, int size) override; 62 | int Put(char c) override; 63 | 64 | private: 65 | PL011 *dev_; 66 | }; 67 | 68 | 69 | #endif // __BOOTSTRAP_PL011_H__ 70 | -------------------------------------------------------------------------------- /c/Makefile: -------------------------------------------------------------------------------- 1 | CC := clang 2 | AR := llvm-ar 3 | 4 | CFLAGS := \ 5 | -MMD -mno-red-zone -ffreestanding -fno-threadsafe-statics \ 6 | -Ofast -g -fPIE -target aarch64-unknown-none \ 7 | -Wall -Werror -Wframe-larger-than=1024 -pedantic -I.. 8 | 9 | CSRCS := crt.c relocate.c string.c 10 | COBJS := $(CSRCS:.c=.o) 11 | 12 | OBJS := $(COBJS) 13 | 14 | default: all 15 | 16 | %.o: %.c 17 | $(CC) $(CFLAGS) -c $< -o $@ 18 | 19 | libc.a: $(OBJS) 20 | $(AR) rc $@ $^ 21 | 22 | -include $(CSRCS:.c=.d) 23 | 24 | .PHONY: clean all default 25 | 26 | all: libc.a 27 | 28 | clean: 29 | rm -rf *.elf *.o *.d *.a 30 | -------------------------------------------------------------------------------- /c/crt.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | /* 4 | * Compilers sometimes refer to standard C library functions even when they are 5 | * not used directly in the code. For example, a compiler may generate a call 6 | * to memset function instead of directly generating code that fills a 7 | * structure or an array with 0s. 8 | * 9 | * Given that we are not using the default C library, such references will 10 | * result in a linker error. To avoid that we provide our own implementations 11 | * of the C library functions that compiler expects. 12 | */ 13 | void* memset(void* dst, int value, size_t num) { 14 | char *p = dst; 15 | for (size_t i = 0; i != num; ++i) { 16 | p[i] = value; 17 | } 18 | return dst; 19 | } 20 | -------------------------------------------------------------------------------- /c/relocate.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | struct elf64_rela { 4 | uint64_t r_offset; 5 | uint64_t r_info; 6 | int64_t r_addend; 7 | }; 8 | 9 | 10 | const uint32_t R_AARCH64_RELATIVE = 1027; 11 | 12 | static uint32_t rela_type(const struct elf64_rela *rela) 13 | { 14 | return rela->r_info & 0xffffffff; 15 | } 16 | 17 | /* 18 | * Even PIE binary on AARCH64 might contain relocations that require runtime 19 | * adjustment. Apparently, it's normally done inside the CRT, but since the 20 | * kernel is not linked with the standard library it cannot use the CRT also, 21 | * so we have to handle relocations ourselves. This function is called from the 22 | * startup code with three parameters: 23 | * 24 | * * difference between runtime and linktime addresses - that tells us by how 25 | * much we need to correct addresses compared to what linker thought 26 | * * pointers to the first and past last entries of the relocation array - 27 | * gives us all the places that need to be corrected. 28 | * 29 | * In my case all the relocations I encountered are of R_AARCH64_RELATIVE type. 30 | * So that's the only relocation type this function handles, but it hangs if it 31 | * discovers an unknown/unexpected relocation type. 32 | */ 33 | void __relocate(int64_t diff, struct elf64_rela *begin, struct elf64_rela *end) 34 | { 35 | for (struct elf64_rela *ptr = begin; ptr != end; ++ptr) { 36 | while (rela_type(ptr) != R_AARCH64_RELATIVE); 37 | 38 | *(uint64_t *)(ptr->r_offset + diff) = ptr->r_addend + diff; 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /c/string.c: -------------------------------------------------------------------------------- 1 | #include "string.h" 2 | 3 | size_t strlen(const char* str) { 4 | const char* b = str; 5 | while (*str) str++; 6 | return str - b; 7 | } 8 | 9 | void* memset(void* dst, int c, size_t n) { 10 | char* s = (char *)dst; 11 | for (size_t i = 0; i < n; ++i) s[i] = c; 12 | return dst; 13 | } 14 | 15 | void* memcpy(void* dst, const void* src, size_t n) { 16 | unsigned char* d = dst; 17 | const unsigned char* s = src; 18 | 19 | for (size_t i = 0; i < n; ++i) 20 | d[i] = s[i]; 21 | return dst; 22 | } 23 | 24 | int strncmp(const char *l, const char *r, size_t size) { 25 | if (size == 0) 26 | return 0; 27 | 28 | while (size > 1 && *l == *r && *l != '\0') { 29 | ++l; 30 | ++r; 31 | --size; 32 | } 33 | 34 | if (*l < *r) 35 | return -1; 36 | if (*l > *r) 37 | return 1; 38 | return 0; 39 | } 40 | -------------------------------------------------------------------------------- /c/string.h: -------------------------------------------------------------------------------- 1 | #ifndef __C_STRING_H__ 2 | #define __C_STRING_H__ 3 | 4 | #include 5 | 6 | size_t strlen(const char* str); 7 | void* memset(void* dst, int c, size_t n); 8 | void* memcpy(void* dst, const void* src, size_t n); 9 | 10 | int strncmp(const char *l, const char *r, size_t size); 11 | 12 | #endif // __C_STRING_H__ 13 | -------------------------------------------------------------------------------- /cc/Makefile: -------------------------------------------------------------------------------- 1 | CXX := clang 2 | AR := llvm-ar 3 | 4 | CXXFLAGS := \ 5 | -MMD -mno-red-zone -std=c++17 -ffreestanding -fno-threadsafe-statics \ 6 | -fno-exceptions -fno-rtti -Ofast -g -fPIE -target aarch64-unknown-none \ 7 | -Wall -Werror -Wframe-larger-than=1024 -pedantic -I.. -I../c -I../cc 8 | 9 | CXXSRCS := ccrt.cc new.cc 10 | CXXOBJS := $(CXXSRCS:.cc=.o) 11 | 12 | OBJS := $(CXXOBJS) 13 | 14 | default: all 15 | 16 | %.o: %.cc 17 | $(CXX) $(CXXFLAGS) -c $< -o $@ 18 | 19 | libcc.a: $(OBJS) 20 | $(AR) rc $@ $^ 21 | 22 | -include $(CXXSRCS:.cc=.d) 23 | 24 | .PHONY: clean all default 25 | 26 | all: libcc.a 27 | 28 | clean: 29 | rm -rf *.elf *.o *.d *.a 30 | -------------------------------------------------------------------------------- /cc/algorithm: -------------------------------------------------------------------------------- 1 | #ifndef __CC_ALGORITHM__ 2 | #define __CC_ALGORITHM__ 3 | 4 | #include 5 | 6 | namespace std { 7 | 8 | template 9 | constexpr const T& min(const T& a, const T& b) { 10 | return !(b 14 | constexpr const T& max(const T& a, const T& b) { 15 | return (a < b) ? b : a; 16 | } 17 | 18 | 19 | template 20 | void swap(T& a, T& b) { 21 | T tmp = move(a); 22 | a = move(b); 23 | b = move(tmp); 24 | } 25 | 26 | template 27 | void iter_swap(It a, It b) { 28 | using std::swap; 29 | swap(*a, *b); 30 | } 31 | 32 | template 33 | void reverse(BiDiIt first, BiDiIt last) { 34 | while ((first != last) && (first != --last)) { 35 | std::iter_swap(first, last); 36 | ++first; 37 | } 38 | } 39 | 40 | template 41 | It lower_bound(It first, It last, const T& val, Less less) { 42 | for (It it = first; it != last; ++it) { 43 | if (!less(*it, val)) { 44 | return it; 45 | } 46 | } 47 | return last; 48 | } 49 | 50 | template 51 | It upper_bound(It first, It last, const T& val, Less less) { 52 | for (It it = first; it != last; ++it) { 53 | if (less(val, *it)) { 54 | return it; 55 | } 56 | } 57 | return last; 58 | } 59 | 60 | 61 | template 62 | It rotate(It first, It middle, It last) { 63 | reverse(first, middle); 64 | reverse(middle, last); 65 | reverse(first, last); 66 | return last - (middle - first); 67 | } 68 | 69 | } // namespace std 70 | 71 | #endif // __CC_ALGORITHM__ 72 | -------------------------------------------------------------------------------- /cc/ccrt.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * C++ compilers sometimes generate calls to C++ runtime functions like 3 | * "__cxa_atexit". Such functions are not part of the C++ standard, but instead 4 | * they belong to a C++ ABI. 5 | * 6 | * Since we are not using standard compiler implementation of C++ runtime we 7 | * have to provide all the required functions outselves. 8 | */ 9 | typedef void (*ctor_t)(void); 10 | 11 | 12 | /* 13 | * __cxa_atexit is part of the C++ ABI. It registers a destructor that should 14 | * be called when program exits. For example, when you have a static object 15 | * with a constructor its constructor should be called before the "main" and 16 | * its destructor should be called at the program exit. So what compilers can 17 | * do is to generate a code that will construct the static object and then 18 | * register a destructor using __cxa_atexit. 19 | * 20 | * At the moment we do not care about destroying anything when the kernel 21 | * exits, so the current implementation is doing nothing. 22 | */ 23 | extern "C" int __cxa_atexit(void (*destroy)(void*), void* arg, void* dso) { 24 | return 0; 25 | } 26 | 27 | /* 28 | * __cxa_pure_virtual is part of the C++ ABI. In C++ it's possible to create a 29 | * class that has pure virtual functions. Pure virtual functions don't have to 30 | * have a definition. So when compiler generates a vtbl for such classes it 31 | * uses __cxa_pure_virtual as a placeholder. 32 | * 33 | * Naturally, calling a pure virtual function is a mistake, but through 34 | * incorrect use of the C++ language it's still possible to sometimes call a 35 | * pure virtual function. So implementation should allow us to catch such 36 | * cases. 37 | */ 38 | extern "C" void __cxa_pure_virtual() { 39 | while (true); 40 | } 41 | 42 | /* 43 | * TODO: figure out why it's needed and what to do with it. 44 | * 45 | * It appears that C++ compiler generates calls to the operator delete for so 46 | * called deleting destructors. When we define a virtual destructor, compiler 47 | * treats it somewhat differently from other virtual functions - it actually 48 | * generates 2 functions: the destructor of the object itself and a deleting 49 | * destructor. 50 | * 51 | * Deleting destructor executes the normal destructor procedure and calls 52 | * delete operator that typically deallocates memory. One caveat here is that 53 | * delete operator can be overloaded and the purpose of the deleting destructor 54 | * is to call the right delete operator in this case. 55 | */ 56 | void operator delete(void*) {} 57 | 58 | /* 59 | * __constructors is not defined in any ABI I know of, it's just a helper 60 | * function that bootstrap code can call to run constructors of the static 61 | * objects. 62 | */ 63 | extern "C" void __constructors(ctor_t *from, ctor_t *to) 64 | { 65 | for (ctor_t *ctor = from; ctor != to; ++ctor) { 66 | (*ctor)(); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /cc/cstddef: -------------------------------------------------------------------------------- 1 | #ifndef __CC_CSTDDEF__ 2 | #define __CC_CSTDDEF__ 3 | 4 | extern "C" { 5 | #include 6 | 7 | typedef decltype(nullptr) nullptr_t; 8 | 9 | } 10 | 11 | #endif // __CC_CSTDDEF__ 12 | -------------------------------------------------------------------------------- /cc/cstdint: -------------------------------------------------------------------------------- 1 | #ifndef __CC_CSTDINT__ 2 | #define __CC_CSTDINT__ 3 | 4 | extern "C" { 5 | #include 6 | } 7 | 8 | #endif // __CC_CSTDINT__ 9 | -------------------------------------------------------------------------------- /cc/cstring: -------------------------------------------------------------------------------- 1 | #ifndef __CC_CSTRING__ 2 | #define __CC_CSTRING__ 3 | 4 | extern "C" { 5 | #include 6 | } 7 | 8 | #endif // __CC_CSTRING__ 9 | -------------------------------------------------------------------------------- /cc/memory: -------------------------------------------------------------------------------- 1 | #ifndef __CC_MEMORY__ 2 | #define __CC_MEMORY__ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | namespace std { 10 | 11 | template 12 | class unique_ptr { 13 | 14 | template 15 | struct Ptr { 16 | using type = U*; 17 | }; 18 | template 19 | struct Ptr::pointer>> { 20 | using type = typename remove_reference_t::pointer; 21 | }; 22 | 23 | public: 24 | using element_type = T; 25 | using deleter_type = Deleter; 26 | using pointer = typename Ptr::type; 27 | 28 | constexpr unique_ptr() noexcept {} 29 | explicit unique_ptr(pointer ptr) : ptr_(ptr) {} 30 | constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() {} 31 | 32 | template 33 | unique_ptr(pointer ptr, const D& d) noexcept : ptr_(ptr), del_(d) {} 34 | 35 | template 36 | unique_ptr(pointer ptr, D&& del) : ptr_(ptr), del_(move(del)) {} 37 | 38 | template 39 | unique_ptr(unique_ptr&& u) noexcept 40 | : ptr_(u.release()), del_(move(u.get_deleter())) {} 41 | 42 | ~unique_ptr() noexcept { 43 | reset(); 44 | } 45 | 46 | unique_ptr& operator=(unique_ptr&& u) { 47 | reset(u.release()); 48 | del_ = move(u.del_); 49 | return *this; 50 | } 51 | 52 | unique_ptr& operator=(nullptr_t) noexcept { 53 | reset(); 54 | return *this; 55 | } 56 | 57 | unique_ptr(const unique_ptr&) = delete; 58 | 59 | template 60 | unique_ptr& operator=(unique_ptr&& u) noexcept { 61 | reset(u.release()); 62 | del_ = move(u.get_deleter()); 63 | return *this; 64 | } 65 | 66 | unique_ptr& operator=(const unique_ptr&) = delete; 67 | 68 | void reset(pointer p = pointer()) noexcept { 69 | const pointer old = ptr_; 70 | ptr_ = p; 71 | if (old) { 72 | del_(old); 73 | } 74 | } 75 | 76 | pointer release() noexcept { 77 | pointer p = ptr_; 78 | ptr_ = pointer(); 79 | return p; 80 | } 81 | 82 | void swap(unique_ptr& u) noexcept { 83 | using std::swap; 84 | swap(ptr_, u.ptr_); 85 | swap(del_, u.del_); 86 | } 87 | 88 | add_lvalue_reference_t operator*() const { 89 | return *get(); 90 | } 91 | 92 | pointer operator->() const noexcept { 93 | return get(); 94 | } 95 | 96 | pointer get() const noexcept { 97 | return ptr_; 98 | } 99 | 100 | deleter_type& get_deleter() noexcept { 101 | return del_; 102 | } 103 | 104 | const deleter_type& get_deleter() const noexcept { 105 | return del_; 106 | } 107 | 108 | explicit operator bool() const noexcept { 109 | return get() != pointer(); 110 | } 111 | 112 | private: 113 | pointer ptr_; 114 | deleter_type del_; 115 | }; 116 | 117 | template 118 | bool operator==(const unique_ptr& u1, const unique_ptr& u2) { 119 | return u1.get() == u2.get(); 120 | } 121 | 122 | template 123 | bool operator!=(const unique_ptr& u1, const unique_ptr& u2) { 124 | return u1.get() != u2.get(); 125 | } 126 | 127 | template 128 | bool operator<(const unique_ptr& u1, const unique_ptr& u2) { 129 | return u1.get() < u2.get(); 130 | } 131 | 132 | template 133 | bool operator>(const unique_ptr& u1, const unique_ptr& u2) { 134 | return u1.get() > u2.get(); 135 | } 136 | 137 | template 138 | bool operator<=(const unique_ptr& u1, const unique_ptr& u2) { 139 | return u1.get() <= u2.get(); 140 | } 141 | 142 | template 143 | bool operator>=(const unique_ptr& u1, const unique_ptr& u2) { 144 | return u1.get() >= u2.get(); 145 | } 146 | 147 | template 148 | bool operator==(const unique_ptr& u1, nullptr_t) { 149 | return u1.get() == nullptr; 150 | } 151 | 152 | template 153 | bool operator==(nullptr_t, const unique_ptr& u2) { 154 | return u2 == nullptr; 155 | } 156 | 157 | template 158 | bool operator!=(const unique_ptr& u1, nullptr_t) { 159 | return u1.get() != nullptr; 160 | } 161 | 162 | template 163 | bool operator!=(nullptr_t, const unique_ptr& u2) { 164 | return u2 != nullptr; 165 | } 166 | 167 | template 168 | bool operator<(const unique_ptr& u1, nullptr_t) { 169 | return u1.get() < nullptr; 170 | } 171 | 172 | template 173 | bool operator<(nullptr_t, const unique_ptr& u2) { 174 | return u2 > nullptr; 175 | } 176 | 177 | template 178 | bool operator>(const unique_ptr& u1, nullptr_t) { 179 | return u1.get() > nullptr; 180 | } 181 | 182 | template 183 | bool operator>(nullptr_t, const unique_ptr& u2) { 184 | return u2 < nullptr; 185 | } 186 | 187 | template 188 | bool operator>=(const unique_ptr& u1, nullptr_t) { 189 | return u1.get() >= nullptr; 190 | } 191 | 192 | template 193 | bool operator>=(nullptr_t, const unique_ptr& u2) { 194 | return u2 <= nullptr; 195 | } 196 | 197 | template 198 | bool operator<=(const unique_ptr& u1, nullptr_t) { 199 | return u1.get() <= nullptr; 200 | } 201 | 202 | template 203 | bool operator<=(nullptr_t, const unique_ptr& u2) { 204 | return u2 >= nullptr; 205 | } 206 | 207 | } // namespace std 208 | 209 | #endif // __CC_MEMORY__ 210 | -------------------------------------------------------------------------------- /cc/new: -------------------------------------------------------------------------------- 1 | #ifndef __CC_NEW__ 2 | #define __CC_NEW__ 3 | 4 | #include 5 | 6 | void* operator new(size_t, void* ptr); 7 | 8 | #endif // __CC_NEW__ 9 | 10 | -------------------------------------------------------------------------------- /cc/new.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void* operator new(size_t, void* ptr) { return ptr; } 4 | 5 | -------------------------------------------------------------------------------- /cc/optional: -------------------------------------------------------------------------------- 1 | #ifndef __CC_OPTIONAL__ 2 | #define __CC_OPTIONAL__ 3 | 4 | #include 5 | #include 6 | 7 | namespace std { 8 | 9 | namespace impl { 10 | 11 | struct empty {}; 12 | 13 | 14 | template 15 | union storage { 16 | constexpr storage() : empty() {} 17 | ~storage() {} 18 | 19 | 20 | template 21 | void construct(Args&&... args) { 22 | new (ptr()) T(forward(args)...); 23 | } 24 | 25 | void destruct() { 26 | ptr()->~T(); 27 | } 28 | 29 | constexpr T* ptr() { return &val; } 30 | constexpr const T* ptr() const { return &val; } 31 | 32 | constexpr T& value() { return val; } 33 | constexpr const T& value() const { return val; } 34 | 35 | empty empty; 36 | T val; 37 | }; 38 | 39 | } // namespace impl 40 | 41 | 42 | struct nullopt_t {}; 43 | inline constexpr nullopt_t nullopt = nullopt_t{}; 44 | 45 | 46 | template 47 | class optional { 48 | public: 49 | using value_type = T; 50 | 51 | constexpr optional() = default; 52 | constexpr optional(nullopt_t) {} 53 | 54 | template 55 | constexpr optional(U&& value) 56 | { 57 | emplace(forward(value)); 58 | } 59 | 60 | constexpr optional(const optional& other) { 61 | if (other.has_value()) { 62 | emplace(other.value()); 63 | } 64 | } 65 | 66 | constexpr optional(optional&& other) { 67 | if (other.has_value()) { 68 | emplace(move(other.value())); 69 | } 70 | } 71 | 72 | ~optional() { 73 | reset(); 74 | } 75 | 76 | constexpr optional& operator=(nullopt_t) { 77 | reset(); 78 | return *this; 79 | } 80 | 81 | template 82 | constexpr optional& operator=(U&& value) { 83 | if (has_value()) { 84 | value() = forward(value); 85 | } else { 86 | emplace(forward(value)); 87 | } 88 | return *this; 89 | } 90 | 91 | constexpr optional& operator=(const optional& other) { 92 | if (other) { 93 | if (has_value()) { 94 | value() = *other; 95 | } else { 96 | emplace(*other); 97 | } 98 | } else { 99 | reset(); 100 | } 101 | return *this; 102 | } 103 | 104 | constexpr optional& operator=(optional&& other) { 105 | if (other) { 106 | if (has_value()) { 107 | value() = move(*other); 108 | } else { 109 | emplace(move(*other)); 110 | } 111 | } else { 112 | reset(); 113 | } 114 | return *this; 115 | } 116 | 117 | constexpr const T* operator->() const { 118 | return storage_.ptr(); 119 | } 120 | 121 | constexpr T* operator->() { 122 | return storage_.ptr(); 123 | } 124 | 125 | constexpr const T& operator*() const & { 126 | return value(); 127 | } 128 | 129 | constexpr T& operator*() & { 130 | return value(); 131 | } 132 | 133 | constexpr const T&& operator*() const && { 134 | return move(value()); 135 | } 136 | 137 | constexpr T&& operator*() && { 138 | return move(value()); 139 | } 140 | 141 | constexpr explicit operator bool() const { 142 | return has_value(); 143 | } 144 | 145 | constexpr bool has_value() const { 146 | return initialized_; 147 | } 148 | 149 | constexpr T& value() & { 150 | return storage_.value(); 151 | } 152 | 153 | constexpr const T& value() const & { 154 | return storage_.value(); 155 | } 156 | 157 | constexpr T&& value() && { 158 | return move(storage_.value()); 159 | } 160 | 161 | constexpr const T&& value() const && { 162 | return move(storage_.value()); 163 | } 164 | 165 | template 166 | constexpr T value_or(U&& default_value) const & { 167 | if (has_value()) { 168 | return value(); 169 | } else { 170 | return T(forward(default_value)); 171 | } 172 | } 173 | 174 | template 175 | constexpr T value_or(U&& default_value) && { 176 | if (has_value()) { 177 | return move(value()); 178 | } else { 179 | return T(forward(default_value)); 180 | } 181 | } 182 | 183 | constexpr void swap(optional& other) { 184 | using std::swap; 185 | 186 | if (has_value() && other.has_value()) { 187 | swap(value(), other.value()); 188 | } else if (has_value()) { 189 | emplace(move(value())); 190 | reset(); 191 | } else if (other.has_value()) { 192 | emplace(move(other.value())); 193 | other.reset(); 194 | } 195 | } 196 | 197 | constexpr void reset() { 198 | if (has_value()) { 199 | storage_.destruct(); 200 | initialized_ = false; 201 | } 202 | } 203 | 204 | template 205 | constexpr T& emplace(Args&&... args) { 206 | reset(); 207 | storage_.construct(forward(args)...); 208 | initialized_ = true; 209 | return value(); 210 | } 211 | 212 | private: 213 | bool initialized_ = false; 214 | impl::storage storage_; 215 | }; 216 | 217 | } // namespace std 218 | 219 | #endif // __CC_OPTIONAL__ 220 | -------------------------------------------------------------------------------- /cc/type_traits: -------------------------------------------------------------------------------- 1 | #ifndef __CC_TYPE_TRAITS__ 2 | #define __CC_TYPE_TRAITS__ 3 | 4 | namespace std { 5 | 6 | template 7 | using void_t = void; 8 | 9 | template 10 | struct integral_constant { 11 | using value_type = T; 12 | using type = integral_constant; 13 | 14 | static constexpr T value = v; 15 | 16 | constexpr operator value_type() const noexcept { return value; } 17 | constexpr value_type operator()() const noexcept { return value; } 18 | }; 19 | 20 | template 21 | using bool_constant = integral_constant; 22 | 23 | using true_type = integral_constant; 24 | using false_type = integral_constant; 25 | 26 | 27 | template 28 | struct remove_reference { using type = T; }; 29 | 30 | template 31 | struct remove_reference { using type = T; }; 32 | 33 | template 34 | struct remove_reference { using type = T; }; 35 | 36 | template 37 | using remove_reference_t = typename remove_reference::type; 38 | 39 | 40 | template 41 | struct is_lvalue_reference : false_type {}; 42 | 43 | template 44 | struct is_lvalue_reference : true_type {}; 45 | 46 | template 47 | inline constexpr bool is_lvalue_reference_v = is_lvalue_reference::value; 48 | 49 | 50 | template 51 | struct type_identity { using type = T; }; 52 | 53 | 54 | namespace { 55 | 56 | template 57 | struct is_referenceable : public false_type {}; 58 | 59 | template 60 | struct is_referenceable> : public true_type {}; 61 | 62 | template ::value> 63 | struct add_lvalue_reference_impl { using type = T; }; 64 | 65 | template 66 | struct add_lvalue_reference_impl { using type = T&; }; 67 | 68 | } // namespace 69 | 70 | template 71 | struct add_lvalue_reference : public add_lvalue_reference_impl {}; 72 | 73 | template 74 | using add_lvalue_reference_t = typename add_lvalue_reference::type; 75 | 76 | } // namespace std 77 | 78 | #endif // __CC_TYPE_TRAITS__ 79 | -------------------------------------------------------------------------------- /cc/utility: -------------------------------------------------------------------------------- 1 | #ifndef __CC_UTILITY__ 2 | #define __CC_UTILITY__ 3 | 4 | #include 5 | 6 | 7 | namespace std { 8 | 9 | template 10 | remove_reference_t&& move(T&& t) noexcept { 11 | return static_cast&&>(t); 12 | } 13 | 14 | template 15 | remove_reference_t&& forward(remove_reference_t&& t) noexcept { 16 | static_assert(!is_lvalue_reference_v); 17 | return static_cast&&>(t); 18 | } 19 | 20 | template 21 | remove_reference_t&& forward(remove_reference_t& t) noexcept { 22 | return static_cast&&>(t); 23 | } 24 | 25 | } // namespace std 26 | 27 | #endif // __CC_UTILITY__ 28 | -------------------------------------------------------------------------------- /common/Makefile: -------------------------------------------------------------------------------- 1 | CXX := clang 2 | AR := llvm-ar 3 | 4 | CXXFLAGS := \ 5 | -MMD -mno-red-zone -std=c++17 -ffreestanding -fno-threadsafe-statics \ 6 | -fno-exceptions -fno-rtti -Ofast -g -fPIE -target aarch64-unknown-none \ 7 | -Wall -Werror -Wframe-larger-than=1024 -pedantic -I.. -I../c -I../cc 8 | 9 | CXXSRCS := stream.cc logging.cc string_view.cc intrusive_list.cc math.cc endian.cc 10 | CXXOBJS := $(CXXSRCS:.cc=.o) 11 | 12 | OBJS := $(CXXOBJS) 13 | 14 | default: all 15 | 16 | %.o: %.cc 17 | $(CXX) $(CXXFLAGS) -c $< -o $@ 18 | 19 | libcommon.a: $(OBJS) 20 | $(AR) rc $@ $^ 21 | 22 | -include $(CXXSRCS:.cc=.d) 23 | 24 | .PHONY: clean all default 25 | 26 | all: libcommon.a 27 | 28 | clean: 29 | rm -rf *.elf *.o *.d *.a 30 | -------------------------------------------------------------------------------- /common/allocator.h: -------------------------------------------------------------------------------- 1 | #ifndef __COMMON_ALLOCATOR_H__ 2 | #define __COMMON_ALLOCATOR_H__ 3 | 4 | #include "memory/memory.h" 5 | 6 | namespace common { 7 | 8 | template 9 | struct PhysicalAllocator { 10 | struct Header { 11 | memory::Contigous mem; 12 | union { 13 | T items[1]; 14 | }; 15 | 16 | Header() {} 17 | }; 18 | 19 | static Header* FromPointer(T* ptr) { 20 | if (ptr == nullptr) { 21 | return nullptr; 22 | } 23 | 24 | uintptr_t addr = reinterpret_cast(ptr) - Offset(); 25 | Header* head = reinterpret_cast(addr); 26 | if (head->mem.FromAddress() != addr) { 27 | return nullptr; 28 | } 29 | return head; 30 | } 31 | 32 | static size_t Offset() { 33 | Header head; 34 | return reinterpret_cast(&head.items[0]) - 35 | reinterpret_cast(&head.mem); 36 | } 37 | 38 | static size_t AllocationSize(size_t size) { 39 | if (size == 0) { 40 | return sizeof(Header); 41 | } 42 | return sizeof(Header) + (size - 1) * sizeof(T); 43 | } 44 | 45 | T* Allocate(size_t size); 46 | bool Grow(T* ptr, size_t size); 47 | bool Deallocate(T* ptr); 48 | }; 49 | 50 | template 51 | T* PhysicalAllocator::Allocate(size_t size) { 52 | auto mem = memory::AllocatePhysical(AllocationSize(size)); 53 | if (!mem) { 54 | return nullptr; 55 | } 56 | 57 | Header *header = reinterpret_cast(mem->FromAddress()); 58 | ::new (static_cast(header)) Header(); 59 | header->mem = *mem; 60 | return header->items; 61 | } 62 | 63 | template 64 | bool PhysicalAllocator::Grow(T* ptr, size_t size) { 65 | const Header* head = FromPointer(ptr); 66 | if (head == nullptr) { 67 | return false; 68 | } 69 | return head->mem.Size() >= AllocationSize(size); 70 | } 71 | 72 | template 73 | bool PhysicalAllocator::Deallocate(T* ptr) { 74 | Header* head = FromPointer(ptr); 75 | if (head == nullptr) { 76 | return false; 77 | } 78 | 79 | memory::Contigous mem = head->mem; 80 | head->~Header(); 81 | memory::FreePhysical(mem); 82 | return true; 83 | } 84 | 85 | } // namespace common 86 | 87 | #endif // __COMMON_ALLOCATOR_H__ 88 | -------------------------------------------------------------------------------- /common/endian.cc: -------------------------------------------------------------------------------- 1 | #include "endian.h" 2 | 3 | namespace common { 4 | 5 | namespace { 6 | 7 | uint16_t bswap16(uint16_t x) 8 | { 9 | return (x >> 8) | ((x & 0xff) << 16); 10 | } 11 | 12 | uint32_t bswap32(uint32_t x) 13 | { 14 | return bswap16(x >> 16) | ((uint32_t)bswap16(x & 0xffff) << 16); 15 | } 16 | 17 | uint64_t bswap64(uint64_t x) 18 | { 19 | return bswap32(x >> 32) | ((uint64_t)bswap32(x & 0xffffffff) << 32); 20 | } 21 | 22 | } // namepsace 23 | 24 | 25 | uint8_t FromBigEndian(be8_t v) { return v; } 26 | be8_t ToBigEndian(uint8_t v) { return v; } 27 | 28 | uint8_t FromLittleEndian(le8_t v) { return v; } 29 | le8_t ToLittleEndian(uint8_t v) { return v; } 30 | 31 | #ifdef __LITTLE_ENDIAN__ 32 | 33 | uint64_t FromBigEndian(be64_t v) { return bswap64(v); } 34 | uint32_t FromBigEndian(be32_t v) { return bswap32(v); } 35 | uint16_t FromBigEndian(be16_t v) { return bswap16(v); } 36 | 37 | be64_t ToBigEndian(uint64_t v) { return bswap64(v); } 38 | be32_t ToBigEndian(uint32_t v) { return bswap32(v); } 39 | be16_t ToBigEndian(uint16_t v) { return bswap16(v); } 40 | 41 | uint64_t FromLittleEndian(le64_t v) { return v; } 42 | uint32_t FromLittleEndian(le32_t v) { return v; } 43 | uint16_t FromLittleEndian(le16_t v) { return v; } 44 | 45 | le64_t ToLittleEndian(uint64_t v) { return v; } 46 | le32_t ToLittleEndian(uint32_t v) { return v; } 47 | le16_t ToLittleEndian(uint16_t v) { return v; } 48 | 49 | #elif __BIG_ENDIAN__ 50 | 51 | uint64_t FromBigEndian(be64_t v) { return v; } 52 | uint32_t FromBigEndian(be32_t v) { return v; } 53 | uint16_t FromBigEndian(be16_t v) { return v; } 54 | 55 | be64_t ToBigEndian(uint64_t v) { return v; } 56 | be32_t ToBigEndian(uint32_t v) { return v; } 57 | be16_t ToBigEndian(uint16_t v) { return v; } 58 | 59 | uint64_t FromLittleEndian(le64_t v) { return bswap64(v); } 60 | uint32_t FromLittleEndian(le32_t v) { return bswap32(v); } 61 | uint16_t FromLittleEndian(le16_t v) { return bswap16(v); } 62 | 63 | le64_t ToLittleEndian(uint64_t v) { return bswap64(v); } 64 | le32_t ToLittleEndian(uint32_t v) { return bswap32(v); } 65 | le16_t ToLittleEndian(uint16_t v) { return bswap16(v); } 66 | 67 | #else 68 | 69 | class enum endianness { 70 | BIG, 71 | LITTLE, 72 | }; 73 | 74 | static enum endianness detect(void) 75 | { 76 | uint32_t x = 1; 77 | 78 | if (*reinterpret_cast(&x) == 1) 79 | return LITTLE; 80 | return BIG; 81 | } 82 | 83 | uint64_t FromBigEndian(be64_t v) 84 | { 85 | if (detect() == LITTLE) 86 | return bswap64(v); 87 | return v; 88 | } 89 | 90 | uint32_t FromBigEndian(be32_t v) 91 | { 92 | if (detect() == LITTLE) 93 | return bswap32(v); 94 | return v; 95 | } 96 | 97 | uint16_t FromBigEndian(be16_t v) 98 | { 99 | if (detect() == LITTLE) 100 | return bswap16(v); 101 | return v; 102 | } 103 | 104 | be64_t ToBigEndian(uint64_t v) 105 | { 106 | if (detect() == LITTLE) 107 | return bswap64(v); 108 | return v; 109 | } 110 | 111 | be32_t ToBigEndian(uint32_t v) 112 | { 113 | if (detect() == LITTLE) 114 | return bswap32(v); 115 | return v; 116 | } 117 | 118 | be16_t ToBigEndian(uint16_t v) 119 | { 120 | if (detect() == LITTLE) 121 | return bswap16(v); 122 | return v; 123 | } 124 | 125 | uint64_t FromLittleEndian(le64_t v) 126 | { 127 | if (detect() == LITTLE) 128 | return v; 129 | return bswap64(v); 130 | } 131 | 132 | uint32_t FromLittleEndian(le32_t v) 133 | { 134 | if (detect() == LITTLE) 135 | return v; 136 | return bswap32(v); 137 | } 138 | 139 | uint16_t FromLittleEndian(le16_t v) 140 | { 141 | if (detect() == LITTLE) 142 | return v; 143 | return bswap16(v); 144 | } 145 | 146 | le64_t ToLittleEndian(uint64_t v) 147 | { 148 | if (detect() == LITTLE) 149 | return v; 150 | return bswap64(v); 151 | } 152 | 153 | le32_t ToLittleEndian(uint32_t v) 154 | { 155 | if (detect() == LITTLE) 156 | return v; 157 | return bswap32(v); 158 | } 159 | 160 | le16_t ToLittleEndian(uint16_t v) 161 | { 162 | if (detect() == LITTLE) 163 | return v; 164 | return bswap16(v); 165 | } 166 | 167 | #endif 168 | 169 | } // namespace common 170 | -------------------------------------------------------------------------------- /common/endian.h: -------------------------------------------------------------------------------- 1 | #ifndef __COMMON_ENDIAN_H__ 2 | #define __COMMON_ENDIAN_H__ 3 | 4 | #include 5 | 6 | namespace common { 7 | 8 | typedef uint64_t be64_t; 9 | typedef uint32_t be32_t; 10 | typedef uint16_t be16_t; 11 | typedef uint8_t be8_t; 12 | 13 | typedef uint64_t le64_t; 14 | typedef uint32_t le32_t; 15 | typedef uint16_t le16_t; 16 | typedef uint8_t le8_t; 17 | 18 | uint64_t FromBigEndian(be64_t v); 19 | uint32_t FromBigEndian(be32_t v); 20 | uint16_t FromBigEndian(be16_t v); 21 | uint8_t FromBigEndian(be8_t v); 22 | 23 | be64_t ToBigEndian(uint64_t v); 24 | be32_t ToBigEndian(uint32_t v); 25 | be16_t ToBigEndian(uint16_t v); 26 | be8_t ToBigEndian(uint8_t v); 27 | 28 | uint64_t FromLittleEndian(le64_t v); 29 | uint32_t FromLittleEndian(le32_t v); 30 | uint16_t FromLittleEndian(le16_t v); 31 | uint8_t FromLittleEndian(le8_t v); 32 | 33 | le64_t ToLittleEndian(uint64_t v); 34 | le32_t ToLittleEndian(uint32_t v); 35 | le16_t ToLittleEndian(uint16_t v); 36 | le8_t ToLittleEndian(uint8_t v); 37 | 38 | } // namespace common 39 | 40 | #endif // __COMMON_ENDIAN_H__ 41 | -------------------------------------------------------------------------------- /common/fixed_vector.h: -------------------------------------------------------------------------------- 1 | #ifndef __COMMON_FIXED_VECTOR_H__ 2 | #define __COMMON_FIXED_VECTOR_H__ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | namespace common { 10 | 11 | template 12 | class FixedVector { 13 | public: 14 | FixedVector() {} 15 | ~FixedVector() { Clear(); } 16 | 17 | FixedVector(const FixedVector& other); 18 | FixedVector(FixedVector&& other); 19 | FixedVector& operator=(const FixedVector& other); 20 | FixedVector& operator=(FixedVector&& other); 21 | 22 | bool Assign(size_t count, const T& item); 23 | 24 | bool PushBack(const T& item); 25 | template 26 | bool EmplaceBack(Args&&... args); 27 | 28 | bool Insert(const T* pos, const T& item); 29 | 30 | template 31 | bool Emplace(const T* pos, Args&&... args); 32 | 33 | T* Erase(T* pos); 34 | const T* Erase(const T* pos); 35 | T* Erase(T* begin, T* end); 36 | const T* Erase(const T* begin, const T* end); 37 | 38 | bool PopBack(); 39 | bool Resize(size_t size) { return Resize(size, T()); } 40 | bool Resize(size_t size, const T& value); 41 | 42 | void Clear() { 43 | for (size_t i = 0; i < size_; ++i) { 44 | DestroyAt(&items_[i]); 45 | } 46 | size_ = 0; 47 | } 48 | 49 | T& Front() { return items_[0]; } 50 | const T& Front() const { return items_[0]; } 51 | T& Back() { return items_[size_ - 1]; } 52 | const T& Back() const { return items_[size_ - 1]; } 53 | 54 | T& At(size_t pos) { return items_[pos]; } 55 | const T& At(size_t pos) const { return items_[pos]; } 56 | T& operator[](size_t pos) { return At(pos); } 57 | const T& operator[](size_t pos) const { return At(pos); } 58 | 59 | T* Data() { return items_; } 60 | const T* Data() const { return items_; } 61 | 62 | bool Empty() const { return size_ == 0; } 63 | size_t Size() const { return size_; } 64 | size_t MaxSize() const { return N; } 65 | size_t Capacity() const { return N; } 66 | 67 | T* Begin() { return items_; } 68 | const T* ConstBegin() const { return items_; } 69 | 70 | T* End() { return items_ + size_; } 71 | const T* ConstEnd() const { return items_ + size_; } 72 | 73 | private: 74 | bool Copy(const T* begin, const T* end) { 75 | const size_t count = end - begin; 76 | if (count > Capacity()) { 77 | return false; 78 | } 79 | Clear(); 80 | for (const T* it = begin; it != end; ++it) { 81 | PlaceAt(&items_[it - begin], *it); 82 | } 83 | size_ = count; 84 | return true; 85 | } 86 | 87 | bool Move(T* begin, T* end) { 88 | const size_t count = end - begin; 89 | if (count > Capacity()) { 90 | return false; 91 | } 92 | Clear(); 93 | for (T* it = begin; it != end; ++it) { 94 | PlaceAt(&items_[it - begin], std::move(*it)); 95 | } 96 | size_ = count; 97 | return true; 98 | } 99 | 100 | void PlaceAt(T* pos, const T& t) { 101 | ::new(static_cast(pos)) T(t); 102 | } 103 | 104 | void PlaceAt(T* pos, T&& t) { 105 | ::new(static_cast(pos)) T(std::move(t)); 106 | } 107 | 108 | template 109 | void PlaceAt(T* pos, Args&&... args) { 110 | ::new(static_cast(pos)) T(std::forward(args)...); 111 | } 112 | 113 | void DestroyAt(T* pos) { 114 | pos->~T(); 115 | } 116 | 117 | union { 118 | T items_[N]; 119 | }; 120 | size_t size_ = 0; 121 | }; 122 | 123 | 124 | template 125 | FixedVector::FixedVector(const FixedVector& other) { 126 | Copy(other.ConstBegin(), other.ConstEnd()); 127 | } 128 | 129 | template 130 | FixedVector::FixedVector(FixedVector&& other) { 131 | Move(other.Begin(), other.End()); 132 | } 133 | 134 | template 135 | FixedVector& FixedVector::operator=(const FixedVector& other) { 136 | if (this != &other) { 137 | Assign(other.ConstBegin(), other.ConstEnd()); 138 | } 139 | return *this; 140 | } 141 | 142 | template 143 | FixedVector& FixedVector::operator=(FixedVector&& other) { 144 | if (this != &other) { 145 | Move(other.Begin(), other.End()); 146 | } 147 | return *this; 148 | } 149 | 150 | template 151 | bool FixedVector::Assign(size_t count, const T& item) { 152 | if (count > Capacity()) { 153 | return false; 154 | } 155 | Clear(); 156 | for (size_t i = 0; i != count; ++i) { 157 | PlaceAt(&items_[i], item); 158 | } 159 | size_ = count; 160 | return true; 161 | } 162 | 163 | template 164 | bool FixedVector::PushBack(const T& item) { 165 | if (size_ == Capacity()) { 166 | return false; 167 | } 168 | PlaceAt(&items_[size_++], item); 169 | return true; 170 | } 171 | 172 | template 173 | template 174 | bool FixedVector::EmplaceBack(Args&&... args) { 175 | if (size_ == Capacity()) { 176 | return false; 177 | } 178 | PlaceAt(&items_[size_++], std::forward(args)...); 179 | return true; 180 | } 181 | 182 | template 183 | bool FixedVector::Insert(const T* pos, const T& item) { 184 | if (size_ == Capacity()) { 185 | return false; 186 | } 187 | 188 | PlaceAt(&items_[size_++], item); 189 | std::rotate( 190 | const_cast(pos), items_ + size_ - 1, items_ + size_); 191 | return true; 192 | } 193 | 194 | template 195 | template 196 | bool FixedVector::Emplace(const T* pos, Args&&... args) { 197 | if (size_ == Capacity()) { 198 | return false; 199 | } 200 | PlaceAt(&items_[size_++], std::forward(args)...); 201 | std::rotate( 202 | const_cast(pos), items_ + size_ - 1, items_ + size_); 203 | return true; 204 | } 205 | 206 | template 207 | T* FixedVector::Erase(T* pos) { 208 | std::rotate(pos, pos + 1, End()); 209 | DestoryAt(&items_[--size_]); 210 | return pos; 211 | } 212 | 213 | template 214 | const T* FixedVector::Erase(const T* pos) { 215 | return Erase(const_cast(pos)); 216 | } 217 | 218 | template 219 | T* FixedVector::Erase(T* begin, T* end) { 220 | const size_t count = end - begin; 221 | std::rotate(begin, begin + count, End()); 222 | for (T* it = End() - count; it != End(); ++it) { 223 | DestroyAt(it); 224 | } 225 | size_ -= count; 226 | return begin; 227 | } 228 | 229 | template 230 | const T* FixedVector::Erase(const T* begin, const T* end) { 231 | return Erase(const_cast(begin), const_cast(end)); 232 | } 233 | 234 | template 235 | bool FixedVector::PopBack() { 236 | if (Empty()) { 237 | return false; 238 | } 239 | 240 | DestroyAt(&items_[--size_]); 241 | return true; 242 | } 243 | 244 | template 245 | bool FixedVector::Resize(size_t size, const T& value) { 246 | if (size > Capacity()) { 247 | return false; 248 | } 249 | 250 | if (size > Size()) { 251 | for (size_t i = Size(); i < size; ++i) { 252 | PlaceAt(&items_[i], value); 253 | } 254 | } else { 255 | for (size_t i = size; i < Size(); ++i) { 256 | DestroyAt(&items_[i]); 257 | } 258 | } 259 | size_ = size; 260 | return true; 261 | } 262 | 263 | } // namespace common 264 | 265 | #endif // __COMMON_FIXED_VECTOR_H__ 266 | -------------------------------------------------------------------------------- /common/intrusive_list.cc: -------------------------------------------------------------------------------- 1 | #include "common/intrusive_list.h" 2 | 3 | 4 | namespace common { 5 | 6 | namespace impl { 7 | 8 | Iterator::Iterator() : pos_(nullptr) {} 9 | 10 | Iterator::Iterator(Link* pos) : pos_(pos) {} 11 | 12 | void Iterator::Next() { if (pos_) pos_ = pos_->next; } 13 | 14 | void Iterator::Prev() { if (pos_) pos_ = pos_->prev; } 15 | 16 | Link* Iterator::Item() { return pos_; } 17 | 18 | const Link* Iterator::ConstItem() const { return pos_; } 19 | 20 | 21 | bool operator==(const Iterator& l, const Iterator& r) { 22 | return l.ConstItem() == r.ConstItem(); 23 | } 24 | 25 | bool operator!=(const Iterator& l, const Iterator& r) { 26 | return !(l == r); 27 | } 28 | 29 | 30 | List::List() { Clear(); } 31 | 32 | List::List(List&& other) { 33 | Clear(); 34 | Splice(Begin(), other); 35 | } 36 | 37 | List& List::operator=(List&& other) { 38 | if (this == &other) { 39 | return *this; 40 | } 41 | 42 | Clear(); 43 | Splice(Begin(), other); 44 | return *this; 45 | } 46 | 47 | Link* List::Begin() { return head_.next; } 48 | 49 | const Link* List::ConstBegin() const { return head_.next; } 50 | 51 | Link* List::End() { return &head_; } 52 | 53 | const Link* List::ConstEnd() const { return &head_; } 54 | 55 | bool List::Empty() const { return ConstBegin() == ConstEnd(); } 56 | 57 | void List::Clear() { 58 | head_.next = &head_; 59 | head_.prev = &head_; 60 | } 61 | 62 | void List::Swap(List& other) { 63 | List tmp = std::move(other); 64 | other = std::move(*this); 65 | *this = std::move(tmp); 66 | } 67 | 68 | void List::Splice(Link *pos, List& other) { 69 | if (other.Empty()) { 70 | return; 71 | } 72 | 73 | Link* first = other.Begin(); 74 | Link* last = other.End()->prev; 75 | other.Clear(); 76 | 77 | Link* prev = pos->prev; 78 | Link* next = pos; 79 | 80 | first->prev = prev; 81 | last->next = next; 82 | prev->next = first; 83 | next->prev = last; 84 | } 85 | 86 | void List::Splice(Link *pos, List&& other) { 87 | Splice(pos, other); 88 | } 89 | 90 | Link* List::LinkAt(Link *pos, Link *node) { 91 | Link* prev = pos->prev; 92 | Link* next = pos; 93 | 94 | node->next = next; 95 | node->prev = prev; 96 | prev->next = node; 97 | next->prev = node; 98 | 99 | return node; 100 | } 101 | 102 | Link* List::Unlink(Link *pos) { 103 | Link* prev = pos->prev; 104 | Link* next = pos->next; 105 | 106 | prev->next = next; 107 | next->prev = prev; 108 | pos->prev = nullptr; 109 | pos->next = nullptr; 110 | 111 | return next; 112 | } 113 | 114 | Link* List::Front() { 115 | if (Empty()) { 116 | return nullptr; 117 | } 118 | return Begin(); 119 | } 120 | 121 | Link* List::Back() { 122 | if (Empty()) { 123 | return nullptr; 124 | } 125 | return Back()->prev; 126 | } 127 | 128 | Link* List::PopFront() { 129 | if (Empty()) { 130 | return nullptr; 131 | } 132 | 133 | Link* ptr = Begin(); 134 | Unlink(ptr); 135 | return ptr; 136 | } 137 | 138 | Link* List::PopBack() { 139 | if (Empty()) { 140 | return nullptr; 141 | } 142 | 143 | Link* ptr = End()->prev; 144 | Unlink(ptr); 145 | return ptr; 146 | } 147 | 148 | void List::PushBack(Link* link) { 149 | LinkAt(End(), link); 150 | } 151 | 152 | void List::PushFront(Link* link) { 153 | LinkAt(Begin(), link); 154 | } 155 | 156 | } // namespace impl 157 | 158 | } // namespace common 159 | -------------------------------------------------------------------------------- /common/logging.cc: -------------------------------------------------------------------------------- 1 | #include "common/logging.h" 2 | 3 | namespace common { 4 | 5 | namespace { 6 | 7 | struct NoopSink final : public OutputStream { 8 | ~NoopSink() override {} 9 | int Put(char) override { return 0; } 10 | int PutN(const char*, int n) override { return n; } 11 | }; 12 | 13 | OutputStream* logging_sink = nullptr; 14 | 15 | } // namespace 16 | 17 | void RegisterLog(OutputStream* out) { 18 | logging_sink = out; 19 | } 20 | 21 | OutputStream& Log() { 22 | static NoopSink noop_sink; 23 | 24 | if (logging_sink == nullptr) { 25 | return noop_sink; 26 | } 27 | return *logging_sink; 28 | } 29 | 30 | } // namespace common 31 | 32 | -------------------------------------------------------------------------------- /common/logging.h: -------------------------------------------------------------------------------- 1 | #ifndef __COMMON_LOGGING_H__ 2 | #define __COMMON_LOGGING_H__ 3 | 4 | #include "common/stream.h" 5 | 6 | namespace common { 7 | 8 | void RegisterLog(OutputStream* out); 9 | OutputStream& Log(); 10 | 11 | } // namespace common 12 | 13 | #endif // __COMMON_LOGGING_H__ 14 | -------------------------------------------------------------------------------- /common/math.cc: -------------------------------------------------------------------------------- 1 | #include "common/math.h" 2 | 3 | 4 | namespace common { 5 | 6 | int LeastSignificantBit(uint64_t x) { 7 | if (x == 0) { 8 | return 64; 9 | } 10 | 11 | int lsb = 0; 12 | 13 | if ((x & 0xffffffff) == 0) { 14 | lsb += 32; 15 | x >>= 32; 16 | } 17 | if ((x & 0xffff) == 0) { 18 | lsb += 16; 19 | x >>= 16; 20 | } 21 | if ((x & 0xff) == 0) { 22 | lsb += 8; 23 | x >>= 8; 24 | } 25 | if ((x & 0xf) == 0) { 26 | lsb += 4; 27 | x >>= 4; 28 | } 29 | if ((x & 0x3) == 0) { 30 | lsb += 2; 31 | x >>= 2; 32 | } 33 | if ((x & 0x1) == 0) { 34 | lsb += 1; 35 | x >>= 1; 36 | } 37 | return lsb; 38 | } 39 | 40 | int MostSignificantBit(uint64_t x) { 41 | if (x == 0) { 42 | return -1; 43 | } 44 | 45 | int msb = 0; 46 | 47 | if ((x & 0xffffffff00000000) != 0) { 48 | msb += 32; 49 | x >>= 32; 50 | } 51 | if ((x & 0xffff0000) != 0) { 52 | msb += 16; 53 | x >>= 16; 54 | } 55 | if ((x & 0xff00) != 0) { 56 | msb += 8; 57 | x >>= 8; 58 | } 59 | if ((x & 0xf0) != 0) { 60 | msb += 4; 61 | x >>= 4; 62 | } 63 | if ((x & 0xc) != 0) { 64 | msb += 2; 65 | x >>= 2; 66 | } 67 | if ((x & 0x2) != 0) { 68 | msb += 1; 69 | x >>= 1; 70 | } 71 | return msb; 72 | } 73 | 74 | } // namespace common 75 | 76 | -------------------------------------------------------------------------------- /common/math.h: -------------------------------------------------------------------------------- 1 | #ifndef __COMMON_MATH_H__ 2 | #define __COMMON_MATH_H__ 3 | 4 | #include 5 | 6 | namespace common { 7 | 8 | int LeastSignificantBit(uint64_t x); 9 | int MostSignificantBit(uint64_t x); 10 | 11 | template 12 | constexpr T AlignDown(T x, T alignment) { 13 | return x & ~(alignment - 1); 14 | } 15 | 16 | template 17 | constexpr T AlignUp(T x, T alignment) { 18 | return AlignDown(x + alignment - 1, alignment); 19 | } 20 | 21 | template 22 | constexpr T Clamp(T x, T from, T to) { 23 | if (x < from) { 24 | x = from; 25 | } 26 | if (x > to) { 27 | x = to; 28 | } 29 | return x; 30 | } 31 | 32 | template 33 | constexpr T Bits(T x, unsigned from, unsigned to) { 34 | const T m1 = ~((static_cast(1) << from) - 1); 35 | const T m2 = ~((static_cast(1) << to) - 1); 36 | return x & m1 & m2; 37 | } 38 | 39 | } // namespace common 40 | 41 | #endif // __COMMON_MATH_H__ 42 | -------------------------------------------------------------------------------- /common/stream.cc: -------------------------------------------------------------------------------- 1 | #include "common/stream.h" 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | 8 | namespace common { 9 | 10 | namespace { 11 | 12 | char* FormatNumber(char* buffer, int base, unsigned long long x) { 13 | static const char digit[] = "0123456789abcdef"; 14 | if (x == 0) { 15 | *buffer++ = '0'; 16 | return buffer; 17 | } 18 | 19 | char* e = buffer; 20 | while (x) { 21 | *e++ = digit[x % base]; 22 | x /= base; 23 | } 24 | 25 | for (char *l = buffer, *r = e; l < r;) { 26 | const char c = *l; 27 | --r; 28 | *l = *r; 29 | *r = c; 30 | l++; 31 | } 32 | 33 | return e; 34 | } 35 | 36 | void Write(OutputStream& out, const char* data, size_t n) { 37 | for (size_t written = 0; written < n;) { 38 | const int ret = out.PutN(&data[written], n - written); 39 | if (ret < 0) { 40 | return; 41 | } 42 | written += ret; 43 | } 44 | } 45 | 46 | } // namespace 47 | 48 | OutputStream::~OutputStream() {} 49 | 50 | int OutputStream::PutN(const char* data, int n) { 51 | for (int i = 0; i < n; ++i) { 52 | const int ret = this->Put(data[i]); 53 | if (ret < 0) { 54 | if (i > 0) { 55 | return i; 56 | } 57 | return ret; 58 | } 59 | } 60 | return n; 61 | } 62 | 63 | OutputStream& operator<<(OutputStream& out, char c) { 64 | out.Put(c); 65 | return out; 66 | } 67 | 68 | OutputStream& operator<<(OutputStream& out, int x) { 69 | return out << static_cast(x); 70 | } 71 | 72 | OutputStream& operator<<(OutputStream& out, unsigned x) { 73 | return out << static_cast(x); 74 | } 75 | 76 | OutputStream& operator<<(OutputStream& out, long x) { 77 | return out << static_cast(x); 78 | } 79 | 80 | OutputStream& operator<<(OutputStream& out, unsigned long x) { 81 | return out << static_cast(x); 82 | } 83 | 84 | OutputStream& operator<<(OutputStream& out, long long x) { 85 | if (x < 0) { 86 | return out << "-" << static_cast(-x); 87 | } 88 | return out << static_cast(x); 89 | } 90 | 91 | OutputStream& operator<<(OutputStream& out, unsigned long long x) { 92 | char buffer[128]; 93 | char* e = FormatNumber(buffer, 10, x); 94 | Write(out, buffer, e - buffer); 95 | return out; 96 | } 97 | 98 | OutputStream& operator<<(OutputStream& out, const char* str) { 99 | Write(out, str, strlen(str)); 100 | return out; 101 | } 102 | 103 | OutputStream& operator<<(OutputStream& out, const void* ptr) { 104 | const uintptr_t x = reinterpret_cast(ptr); 105 | char buffer[128] = "0x"; 106 | char* e = FormatNumber(buffer + 2, 16, x); 107 | Write(out, buffer, e - buffer); 108 | return out; 109 | } 110 | 111 | } // namespace common 112 | 113 | -------------------------------------------------------------------------------- /common/stream.h: -------------------------------------------------------------------------------- 1 | #ifndef __COMMON_STREAM_H__ 2 | #define __COMMON_STREAM_H__ 3 | 4 | namespace common { 5 | 6 | /* 7 | * OutputStream is a simple interface that represents something we can write 8 | * into. It's not intended to be used directly, instead specific 9 | * implementations are expected to inherit it and implement the virtual methods. 10 | * 11 | * TODO: for IO interfaces like that we should also define how errors are 12 | * reported, so far we expect errors to be reported as negative return 13 | * values of the functions. 14 | */ 15 | class OutputStream { 16 | public: 17 | virtual ~OutputStream(); 18 | 19 | /* 20 | * Put writes character @c to the OutputStream. 21 | * When successful returns non-negative value, otherwise returns a negative 22 | * value. 23 | */ 24 | virtual int Put(char c) = 0; 25 | 26 | /* 27 | * PutN writes @n characters from @data to the OutputStream. 28 | * If the function successfully writes anything to the OutputStream it 29 | * should return the number of characters it wrote. If the function failed 30 | * to write anything, it should return a negative value. In all other cases 31 | * (when @n is zero), it should return zero. 32 | */ 33 | virtual int PutN(const char* data, int n); 34 | }; 35 | 36 | OutputStream& operator<<(OutputStream& out, char c); 37 | OutputStream& operator<<(OutputStream& out, int x); 38 | OutputStream& operator<<(OutputStream& out, unsigned x); 39 | OutputStream& operator<<(OutputStream& out, long x); 40 | OutputStream& operator<<(OutputStream& out, unsigned long x); 41 | OutputStream& operator<<(OutputStream& out, long long x); 42 | OutputStream& operator<<(OutputStream& out, unsigned long long x); 43 | OutputStream& operator<<(OutputStream& out, const char* str); 44 | OutputStream& operator<<(OutputStream& out, const void* ptr); 45 | 46 | } // namespace common 47 | 48 | #endif // __COMMON_STREAM_H__ 49 | -------------------------------------------------------------------------------- /common/string_view.cc: -------------------------------------------------------------------------------- 1 | #include "string_view.h" 2 | 3 | #include 4 | #include 5 | 6 | 7 | namespace common { 8 | 9 | StringView::StringView() : data_(nullptr), size_(0) {} 10 | 11 | StringView::StringView(const char* str) : data_(str), size_(strlen(str)) {} 12 | 13 | StringView::StringView(const char* data, size_t size) 14 | : data_(data), size_(size) {} 15 | 16 | size_t StringView::Size() const { return size_; } 17 | 18 | size_t StringView::Length() const { return size_; } 19 | 20 | bool StringView::Empty() const { return size_ == 0; } 21 | 22 | char StringView::operator[](size_t pos) const { return data_[pos]; } 23 | 24 | const char* StringView::Data() const { return data_; } 25 | 26 | int StringView::Compare(const StringView& other) const { 27 | const size_t len = std::min(Size(), other.Size()); 28 | const int rc = strncmp(data_, other.data_, len); 29 | 30 | if (rc != 0) { 31 | return rc; 32 | } 33 | 34 | if (Size() != other.Size()) { 35 | return Size() < other.Size() ? -1 : 1; 36 | } 37 | 38 | return 0; 39 | } 40 | 41 | StringView StringView::Substr(size_t from, size_t count) const { 42 | size_t to = from + count; 43 | if (from > size_) { 44 | from = size_; 45 | } 46 | if (to > size_) { 47 | to = size_; 48 | } 49 | return StringView(&data_[from], to - from); 50 | } 51 | 52 | bool StringView::StartsWith(const StringView& prefix) const { 53 | return Substr(0, prefix.Size()) == prefix; 54 | } 55 | 56 | bool StringView::EndsWith(const StringView& suffix) const { 57 | if (suffix.Size() > Size()) { 58 | return false; 59 | } 60 | return Substr(Size() - suffix.Size(), suffix.Size()) == suffix; 61 | } 62 | 63 | bool operator<(const StringView& l, const StringView& r) { 64 | return l.Compare(r) < 0; 65 | } 66 | 67 | bool operator>(const StringView& l, const StringView& r) { 68 | return r < l; 69 | } 70 | 71 | bool operator<=(const StringView& l, const StringView& r) { 72 | return !(r < l); 73 | } 74 | 75 | bool operator>=(const StringView& l, const StringView& r) { 76 | return r <= l; 77 | } 78 | 79 | bool operator==(const StringView& l, const StringView& r) { 80 | return l.Compare(r) == 0; 81 | } 82 | 83 | bool operator!=(const StringView& l, const StringView& r) { 84 | return !(l == r); 85 | } 86 | 87 | } // namespace common 88 | -------------------------------------------------------------------------------- /common/string_view.h: -------------------------------------------------------------------------------- 1 | #ifndef __COMMON_STRING_VIEW_H__ 2 | #define __COMMON_STRING_VIEW_H__ 3 | 4 | #include 5 | 6 | 7 | namespace common { 8 | 9 | class StringView { 10 | public: 11 | StringView(); 12 | StringView(const char* cstr); 13 | StringView(const char* data, size_t size); 14 | 15 | StringView(const StringView& other) = default; 16 | StringView& operator=(const StringView& other) = default; 17 | 18 | 19 | size_t Size() const; 20 | size_t Length() const; 21 | bool Empty() const; 22 | 23 | char operator[](size_t pos) const; 24 | const char *Data() const; 25 | 26 | int Compare(const StringView& other) const; 27 | 28 | StringView Substr(size_t from, size_t count) const; 29 | 30 | bool StartsWith(const StringView& prefix) const; 31 | bool EndsWith(const StringView& suffix) const; 32 | bool Contains(const StringView& substr) const; 33 | 34 | private: 35 | const char* data_; 36 | size_t size_; 37 | }; 38 | 39 | bool operator==(const StringView& l, const StringView& r); 40 | bool operator!=(const StringView& l, const StringView& r); 41 | bool operator<(const StringView& l, const StringView& r); 42 | bool operator<=(const StringView& l, const StringView& r); 43 | bool operator>(const StringView& l, const StringView& r); 44 | bool operator>=(const StringView& l, const StringView& r); 45 | 46 | } // namespace common 47 | 48 | #endif // __COMMON_STRING_VIEW_H__ 49 | -------------------------------------------------------------------------------- /common/vector.h: -------------------------------------------------------------------------------- 1 | #ifndef __COMMON_VECTOR_H__ 2 | #define __COMMON_VECTOR_H__ 3 | 4 | #include 5 | 6 | namespace common { 7 | 8 | template 9 | class Vector : private A { 10 | public: 11 | Vector() {} 12 | Vector(A a) : A(a) {} 13 | 14 | ~Vector() { 15 | Clear(); 16 | A::Deallocate(items_); 17 | } 18 | 19 | Vector(const Vector& other) = delete; 20 | Vector& operator=(const Vector& other) = delete; 21 | 22 | Vector(Vector&& other); 23 | Vector& operator=(Vector&& other); 24 | 25 | bool Assign(size_t count, const T& item); 26 | void Swap(Vector& other); 27 | 28 | bool PushBack(const T& item); 29 | template 30 | bool EmplateBack(Args&&... args); 31 | 32 | bool Insert(const T* pos, const T& item); 33 | 34 | template 35 | bool Emplace(const T* pos, Args&&... args); 36 | 37 | T* Erase(T* pos); 38 | const T* Erase(const T* pos); 39 | T* Erase(T* begin, T* end); 40 | const T* Erase(const T* begin, const T* end); 41 | 42 | bool PopBack(); 43 | bool Resize(size_t size) { return Resize(size, T()); } 44 | bool Resize(size_t size, const T& value); 45 | 46 | void Clear() { 47 | for (size_t i = 0; i < size_; ++i) { 48 | items_[i].~T(); 49 | } 50 | size_ = 0; 51 | } 52 | 53 | T& Front() { return items_[0]; } 54 | const T& Front() const { return items_[0]; } 55 | T& Back() { return items_[size_ - 1]; } 56 | const T& Back() const { return items_[size_ - 1]; } 57 | 58 | T& At(size_t pos) { return items_[pos]; } 59 | const T& At(size_t pos) const { return items_[pos]; } 60 | T& operator[](size_t pos) { return At(pos); } 61 | const T& operator[](size_t pos) const { return At(pos); } 62 | 63 | T* Data() { return items_; } 64 | const T* Data() const { return items_; } 65 | 66 | bool Empty() const { return size_ == 0; } 67 | size_t Size() const { return size_; } 68 | size_t Capacity() const { return capacity_; } 69 | 70 | T* Begin() { return items_; } 71 | const T* ConstBegin() const { return items_; } 72 | T* End() { return items_ + size_; } 73 | const T* ConstEnd() const { return items_ + size_; } 74 | 75 | private: 76 | bool Reallocate(size_t capacity) { 77 | T* items = A::Allocate(capacity); 78 | if (items == nullptr) { 79 | return false; 80 | } 81 | 82 | for (size_t i = 0; i < size_; ++i) { 83 | ::new(static_cast(&items[i])) T(std::move(items_[i])); 84 | items_[i].~T(); 85 | } 86 | 87 | A::Deallocate(items_); 88 | items_ = items; 89 | capacity_ = capacity; 90 | return true; 91 | } 92 | 93 | bool Grow(size_t capacity) { 94 | if (Capacity() >= capacity) { 95 | return true; 96 | } 97 | 98 | capacity = std::max(Capacity() * 2 / 3, capacity); 99 | if (A::Grow(items_, capacity)) { 100 | capacity_ = capacity; 101 | return true; 102 | } 103 | 104 | return Reallocate(capacity); 105 | } 106 | 107 | void PlaceAt(T* pos, const T& t) { 108 | ::new(static_cast(pos)) T(t); 109 | } 110 | 111 | void PlaceAt(T& pos, T&& t) { 112 | ::new(static_cast(pos)) T(std::move(t)); 113 | } 114 | 115 | template 116 | void PlaceAt(T* pos, Args&&... args) { 117 | ::new(static_cast(pos)) T(std::forward(args)...); 118 | } 119 | 120 | T *items_ = nullptr; 121 | size_t size_ = 0; 122 | size_t capacity_ = 0; 123 | }; 124 | 125 | template 126 | Vector::Vector(Vector&& other) { 127 | Vector empty; 128 | empty.Swap(*this); 129 | Swap(other); 130 | } 131 | 132 | template 133 | Vector& Vector::operator=(Vector&& other) { 134 | if (this != &other) { 135 | Vector empty; 136 | empty.Swap(*this); 137 | Swap(other); 138 | } 139 | return *this; 140 | } 141 | 142 | template 143 | bool Vector::Assign(size_t count, const T& item) { 144 | if (!Grow(count)) { 145 | return false; 146 | } 147 | 148 | Clear(); 149 | for (size_t i = 0; i != count; ++i) { 150 | PlaceAt(&items_[i], item); 151 | } 152 | size_ = count; 153 | return true; 154 | } 155 | 156 | template 157 | void Vector::Swap(Vector& other) { 158 | Swap(items_, other.items_); 159 | Swap(size_, other.size_); 160 | Swap(capacity_, other.capacity_); 161 | } 162 | 163 | template 164 | bool Vector::PushBack(const T& item) { 165 | if (!Grow(size_ + 1)) { 166 | return false; 167 | } 168 | PlaceAt(&items_[size_++], item); 169 | return true; 170 | } 171 | 172 | template 173 | template 174 | bool Vector::EmplateBack(Args&&... args) { 175 | if (!Grow(size_ + 1)) { 176 | return false; 177 | } 178 | 179 | PlaceAt(&items_[size_++], std::forward(args)...); 180 | return true; 181 | } 182 | 183 | template 184 | bool Vector::Insert(const T* pos, const T& item) { 185 | size_t off = pos - ConstBegin(); 186 | if (!Grow(size_ + 1)) { 187 | return false; 188 | } 189 | 190 | PlaceAt(&items_[size_++], item); 191 | RotateRight(Begin() + off, items_ + size_, 1); 192 | return true; 193 | } 194 | 195 | template 196 | template 197 | bool Vector::Emplace(const T* pos, Args&&... args) { 198 | size_t off = pos - ConstBegin(); 199 | if (!Grow(size_ + 1)) { 200 | return false; 201 | } 202 | 203 | PlaceAt(&items_[size_++], std::forward(args)...); 204 | RotateRight(Begin() + off, items_ + size_, 1); 205 | return true; 206 | } 207 | 208 | template 209 | T* Vector::Erase(T* pos) { 210 | RotateLeft(pos, End(), 1); 211 | items_[--size_].~T(); 212 | return pos; 213 | } 214 | 215 | template 216 | const T* Vector::Erase(const T* pos) { 217 | return Erase(const_cast(pos)); 218 | } 219 | 220 | template 221 | T* Vector::Erase(T* begin, T* end) { 222 | const size_t count = end - begin; 223 | RotateLeft(begin, End(), count); 224 | for (T* it = End() - count; it != End(); ++it) { 225 | it->~T(); 226 | } 227 | size_ -= count; 228 | return begin; 229 | } 230 | 231 | template 232 | const T* Vector::Erase(const T* begin, const T* end) { 233 | return Erase(const_cast(begin), const_cast(end)); 234 | } 235 | 236 | template 237 | bool Vector::PopBack() { 238 | if (Empty()) { 239 | return false; 240 | } 241 | 242 | items_[--size_].~T(); 243 | return true; 244 | } 245 | 246 | template 247 | bool Vector::Resize(size_t size, const T& item) { 248 | if (!Grow(size)) { 249 | return false; 250 | } 251 | 252 | if (size > Size()) { 253 | for (size_t i = Size(); i < size; ++i) { 254 | PlaceAt(&items_[i], item); 255 | } 256 | } else { 257 | for (size_t i = size; i < Size(); ++i) { 258 | items_[i].~T(); 259 | } 260 | } 261 | size_ = size; 262 | return true; 263 | } 264 | 265 | } // common 266 | 267 | #endif // __COMMON_VECTOR_H__ 268 | -------------------------------------------------------------------------------- /fdt/Makefile: -------------------------------------------------------------------------------- 1 | CXX := clang++ 2 | AR := llvm-ar 3 | CXXFLAGS := \ 4 | -MMD -mno-red-zone -std=c++17 -ffreestanding -fno-threadsafe-statics \ 5 | -fno-exceptions -fno-rtti -Ofast -fPIE -target aarch64-unknown-none \ 6 | -Wall -Werror -Wframe-larger-than=1024 -pedantic -I.. -I../c -I../cc 7 | 8 | CXXSRCS := scanner.cc blob.cc 9 | CXXOBJS := $(CXXSRCS:.cc=.o) 10 | 11 | OBJS := $(CXXOBJS) 12 | 13 | default: all 14 | 15 | %.o: %.cc 16 | $(CXX) $(CXXFLAGS) -c $< -o $@ 17 | 18 | libfdt.a: $(OBJS) 19 | $(AR) rc $@ $^ 20 | 21 | -include $(CXXSRCS:.cc=.d) 22 | 23 | .PHONY: clean all default 24 | 25 | all: libfdt.a 26 | 27 | clean: 28 | rm -rf *.o *.d *.a 29 | -------------------------------------------------------------------------------- /fdt/blob.cc: -------------------------------------------------------------------------------- 1 | #include "fdt/blob.h" 2 | 3 | namespace fdt { 4 | 5 | namespace { 6 | 7 | const uint32_t kAlignment = 4; 8 | const uint32_t kVersion = 17; 9 | const uint32_t kMagic = 0xd00dfeed; 10 | 11 | bool ParseReserved( 12 | Scanner scanner, Span>* reserved) { 13 | Range range; 14 | for (size_t count = 0; scanner.ConsumeRange(&range); ++count) { 15 | if (range.begin == 0 && range.size == 0) { 16 | *reserved = Span>( 17 | scanner.Data(), count); 18 | return true; 19 | } 20 | } 21 | return false; 22 | } 23 | 24 | bool ParseHeader(Scanner* scanner, Blob::Header *header) { 25 | if (!scanner->ConsumeBe32(&header->magic)) { 26 | return false; 27 | } 28 | if (!scanner->ConsumeBe32(&header->totalsize)) { 29 | return false; 30 | } 31 | if (!scanner->ConsumeBe32(&header->off_dt_struct)) { 32 | return false; 33 | } 34 | if (!scanner->ConsumeBe32(&header->off_dt_strings)) { 35 | return false; 36 | } 37 | if (!scanner->ConsumeBe32(&header->off_mem_rsvmap)) { 38 | return false; 39 | } 40 | if (!scanner->ConsumeBe32(&header->version)) { 41 | return false; 42 | } 43 | if (!scanner->ConsumeBe32(&header->last_comp_version)) { 44 | return false; 45 | } 46 | if (!scanner->ConsumeBe32(&header->boot_cpuid_phys)) { 47 | return false; 48 | } 49 | if (!scanner->ConsumeBe32(&header->size_dt_strings)) { 50 | return false; 51 | } 52 | if (!scanner->ConsumeBe32(&header->size_dt_struct)) { 53 | return false; 54 | } 55 | if (header->magic != kMagic) { 56 | return false; 57 | } 58 | if (header->totalsize > scanner->Size()) { 59 | return false; 60 | } 61 | if (header->last_comp_version > kVersion) { 62 | return false; 63 | } 64 | if (header->off_mem_rsvmap + 2 * sizeof(uint64_t) > header->totalsize) { 65 | return false; 66 | } 67 | if (header->off_dt_strings + header->size_dt_strings > header->totalsize) { 68 | return false; 69 | } 70 | if (header->off_dt_struct + header->size_dt_struct > header->totalsize) { 71 | return false; 72 | } 73 | return true; 74 | } 75 | 76 | bool EnsureToken(Scanner* pos, Token token) { 77 | Token actual; 78 | 79 | if (!pos->ConsumeToken(&actual)) { 80 | return false; 81 | } 82 | 83 | return actual == token; 84 | } 85 | 86 | bool ParseStartNode(Scanner *pos, Node *node) { 87 | if (!EnsureToken(pos, Token::BEGIN_NODE)) { 88 | return false; 89 | } 90 | 91 | common::StringView name; 92 | 93 | if (!pos->ConsumeCstr(&name)) { 94 | return false; 95 | } 96 | 97 | if (!pos->AlignForward(kAlignment)) { 98 | return false; 99 | } 100 | 101 | *node = Node(name, *pos); 102 | return true; 103 | } 104 | 105 | } // namespace 106 | 107 | bool Property::ValueAsBe32(uint32_t* value) const { 108 | Scanner scan(data, size); 109 | 110 | if (!scan.ConsumeBe32(value)) { 111 | return false; 112 | } 113 | return true; 114 | } 115 | 116 | bool Property::ValueAsBe64(uint64_t* value) const { 117 | Scanner scan(data, size); 118 | 119 | if (!scan.ConsumeBe64(value)) { 120 | return false; 121 | } 122 | return true; 123 | } 124 | 125 | bool Blob::SkipNode(Scanner* pos) const { 126 | Scanner copy = *pos; 127 | Node node; 128 | Property property; 129 | int depth = 1; 130 | 131 | while (true) { 132 | Token token; 133 | if (!TokenAt(copy, &token)) { 134 | return false; 135 | } 136 | switch (token) { 137 | case Token::BEGIN_NODE: 138 | if (!ConsumeStartNode(©, &node)) { 139 | return false; 140 | } 141 | ++depth; 142 | break; 143 | case Token::END_NODE: 144 | if (!ConsumeEndNode(©)) { 145 | return false; 146 | } 147 | if (--depth == 0) { 148 | *pos = copy; 149 | return true; 150 | } 151 | break; 152 | case Token::PROP: 153 | if (!ConsumeProperty(©, &property)) { 154 | return false; 155 | } 156 | break; 157 | case Token::NOP: 158 | if (!ConsumeNop(©)) { 159 | return false; 160 | } 161 | break; 162 | case Token::END: 163 | return false; 164 | } 165 | } 166 | return false; 167 | } 168 | 169 | bool Blob::TokenAt(const Scanner& pos, Token* token) const { 170 | Scanner copy = pos; 171 | return copy.ConsumeToken(token); 172 | } 173 | 174 | bool Blob::ConsumeStartNode(Scanner* pos, Node* node) const { 175 | Scanner copy = *pos; 176 | 177 | if (!ParseStartNode(©, node)) { 178 | return false; 179 | } 180 | *pos = copy; 181 | return true; 182 | } 183 | 184 | bool Blob::ConsumeProperty(Scanner* pos, Property *property) const { 185 | Scanner copy = *pos; 186 | 187 | if (!EnsureToken(©, Token::PROP)) { 188 | return false; 189 | } 190 | 191 | uint32_t size = 0; 192 | 193 | if (!copy.ConsumeBe32(&size)) { 194 | return false; 195 | } 196 | 197 | uint32_t off = 0; 198 | 199 | if (!copy.ConsumeBe32(&off)) { 200 | return false; 201 | } 202 | 203 | if (off >= strsz_) { 204 | return false; 205 | } 206 | 207 | const uint8_t *data = nullptr; 208 | 209 | if (!copy.ConsumeBytes(size, &data)) { 210 | return false; 211 | } 212 | 213 | if (!copy.AlignForward(kAlignment)) { 214 | return false; 215 | } 216 | 217 | common::StringView name; 218 | Scanner str(&str_[off], strsz_ - off); 219 | 220 | if (!str.ConsumeCstr(&name)) { 221 | return false; 222 | } 223 | 224 | *property = Property(name, data, size); 225 | *pos = copy; 226 | return true; 227 | } 228 | 229 | bool Blob::ConsumeEndNode(Scanner* pos) const { 230 | return EnsureToken(pos, Token::END_NODE); 231 | } 232 | 233 | bool Blob::ConsumeNop(Scanner* pos) const { 234 | return EnsureToken(pos, Token::NOP); 235 | } 236 | 237 | bool Blob::Parse(const uint8_t *data, size_t size, Blob *blob) { 238 | Scanner scanner(data, size); 239 | Header header; 240 | 241 | if (!ParseHeader(&scanner, &header)) { 242 | return false; 243 | } 244 | 245 | Scanner nodes(data + header.off_dt_struct, header.size_dt_struct); 246 | Node root; 247 | 248 | if (!ParseStartNode(&nodes, &root)) { 249 | return false; 250 | } 251 | 252 | Scanner rsv( 253 | data + header.off_mem_rsvmap, 254 | header.totalsize - header.off_mem_rsvmap); 255 | Span> reserved; 256 | if (!ParseReserved(rsv, &reserved)) { 257 | return false; 258 | } 259 | 260 | const uint8_t *str = data + header.off_dt_strings; 261 | const size_t strsz = header.size_dt_strings; 262 | 263 | *blob = Blob(header, root, reserved, str, strsz); 264 | return true; 265 | } 266 | 267 | } // namespace fdt 268 | -------------------------------------------------------------------------------- /fdt/blob.h: -------------------------------------------------------------------------------- 1 | #ifndef __FDT_BLOB_H__ 2 | #define __FDT_BLOB_H__ 3 | 4 | #include "fdt/scanner.h" 5 | #include "fdt/span.h" 6 | #include "common/string_view.h" 7 | 8 | 9 | namespace fdt { 10 | 11 | struct Property { 12 | common::StringView name; 13 | const uint8_t *data = nullptr; 14 | size_t size = 0; 15 | 16 | Property() = default; 17 | Property(const common::StringView& name, const uint8_t *data, size_t size) 18 | : name(name), data(data), size(size) {} 19 | 20 | bool ValueAsBe32(uint32_t* data) const; 21 | bool ValueAsBe64(uint64_t* data) const; 22 | 23 | template 24 | bool ValueAsSpan(Span* span) const { 25 | if (size % detail::Parser::kWireSize != 0) { 26 | return false; 27 | } 28 | *span = Span(data, size / detail::Parser::kWireSize); 29 | return true; 30 | } 31 | }; 32 | 33 | 34 | struct Node { 35 | common::StringView name; 36 | Scanner offset; 37 | 38 | Node() = default; 39 | Node(const common::StringView& name, Scanner off) 40 | : name(name), offset(off) {} 41 | }; 42 | 43 | 44 | class Blob { 45 | public: 46 | struct Header { 47 | uint32_t magic = 0; 48 | uint32_t totalsize = 0; 49 | uint32_t off_dt_struct = 0; 50 | uint32_t off_dt_strings = 0; 51 | uint32_t off_mem_rsvmap = 0; 52 | uint32_t version = 0; 53 | uint32_t last_comp_version = 0; 54 | uint32_t boot_cpuid_phys = 0; 55 | uint32_t size_dt_strings = 0; 56 | uint32_t size_dt_struct = 0; 57 | }; 58 | 59 | static bool Parse(const uint8_t *data, size_t size, Blob *blob); 60 | 61 | Blob() : str_(nullptr), strsz_(0) {} 62 | 63 | Blob(const Blob& other) = default; 64 | Blob(Blob&& other) = default; 65 | Blob& operator=(const Blob& other) = default; 66 | Blob& operator=(Blob&& other) = default; 67 | 68 | uint32_t Version() const { return header_.version; } 69 | uint32_t BootCPU() const { return header_.boot_cpuid_phys; } 70 | 71 | Node Root() const { return root_; } 72 | Span> Reserved() const { return reserved_; } 73 | 74 | bool SkipNode(Scanner* pos) const; 75 | bool TokenAt(const Scanner& pos, Token* token) const; 76 | bool ConsumeStartNode(Scanner* pos, Node* node) const; 77 | bool ConsumeEndNode(Scanner* pos) const; 78 | bool ConsumeProperty(Scanner* pos, Property* property) const; 79 | bool ConsumeNop(Scanner* pos) const; 80 | 81 | private: 82 | Blob( 83 | Header header, 84 | Node root, 85 | Span> reserved, 86 | const uint8_t *str, size_t strsz) 87 | : header_(header) 88 | , root_(root) 89 | , reserved_(reserved) 90 | , str_(str) 91 | , strsz_(strsz) 92 | {} 93 | 94 | Header header_; 95 | Node root_; 96 | Span> reserved_; 97 | const uint8_t *str_; 98 | size_t strsz_; 99 | }; 100 | 101 | } // namespace fdt 102 | 103 | #endif // __FDT_BLOB_H__ 104 | -------------------------------------------------------------------------------- /fdt/scanner.cc: -------------------------------------------------------------------------------- 1 | #include "fdt/scanner.h" 2 | 3 | namespace fdt { 4 | 5 | bool Range::Parse(Scanner* pos) { 6 | return pos->ConsumeBe32(&begin) && pos->ConsumeBe32(&size); 7 | } 8 | 9 | bool Range::Parse(Scanner* pos) { 10 | return pos->ConsumeBe64(&begin) && pos->ConsumeBe64(&size); 11 | } 12 | 13 | bool Range::Parse(Scanner* pos) { 14 | return pos->ConsumeBe32(&begin) && pos->ConsumeBe64(&size); 15 | } 16 | 17 | bool Range::Parse(Scanner* pos) { 18 | return pos->ConsumeBe64(&begin) && pos->ConsumeBe32(&size); 19 | } 20 | 21 | Scanner::Scanner() : data_(nullptr), size_(0), off_(0) {} 22 | 23 | Scanner::Scanner(const uint8_t* data, size_t size) 24 | : data_(data), size_(size), off_(0) {} 25 | 26 | bool Scanner::ConsumeBe32(uint32_t* val) { 27 | if (off_ + sizeof(uint32_t) > size_) { 28 | return false; 29 | } 30 | 31 | uint32_t x = 0; 32 | 33 | for (size_t i = 0; i < sizeof(uint32_t); i++) { 34 | x = (x << 8) | data_[off_ + i]; 35 | } 36 | 37 | *val = x; 38 | off_ += sizeof(uint32_t); 39 | return true; 40 | } 41 | 42 | bool Scanner::ConsumeBe64(uint64_t* val) { 43 | if (off_ + sizeof(uint64_t) > size_) { 44 | return false; 45 | } 46 | 47 | uint64_t x = 0; 48 | 49 | for (size_t i = 0; i < sizeof(uint64_t); i++) { 50 | x = (x << 8) | data_[off_ + i]; 51 | } 52 | 53 | *val = x; 54 | off_ += sizeof(uint64_t); 55 | return true; 56 | } 57 | 58 | bool Scanner::ConsumeCstr(const char** str) { 59 | common::StringView s; 60 | 61 | if (!ConsumeCstr(&s)) { 62 | return false; 63 | } 64 | 65 | *str = s.Data(); 66 | return true; 67 | } 68 | 69 | bool Scanner::ConsumeCstr(common::StringView* str) { 70 | const char *start = reinterpret_cast(&data_[off_]); 71 | 72 | for (size_t size = 0; off_ + size < size_; size++) { 73 | if (start[size] == '\0') { 74 | *str = common::StringView(start, size); 75 | off_ += size + 1; 76 | return true; 77 | } 78 | } 79 | 80 | return false; 81 | } 82 | 83 | bool Scanner::ConsumeBytes(size_t size, const uint8_t** data) { 84 | if (off_ + size > size_) { 85 | return false; 86 | } 87 | 88 | *data = &data_[off_]; 89 | off_ += size; 90 | return true; 91 | } 92 | 93 | bool Scanner::ConsumeToken(Token* token) { 94 | Scanner copy = *this; 95 | uint32_t val = 0; 96 | 97 | if (!copy.ConsumeBe32(&val)) { 98 | return false; 99 | } 100 | 101 | switch (val) { 102 | case static_cast(Token::BEGIN_NODE): 103 | case static_cast(Token::END_NODE): 104 | case static_cast(Token::PROP): 105 | case static_cast(Token::NOP): 106 | case static_cast(Token::END): 107 | *token = static_cast(val); 108 | break; 109 | default: 110 | return false; 111 | } 112 | 113 | *this = copy; 114 | return true; 115 | } 116 | 117 | bool Scanner::AlignForward(size_t alignment) { 118 | if (alignment == 0 || off_ % alignment == 0) { 119 | return true; 120 | } 121 | 122 | const size_t shift = alignment - off_ % alignment; 123 | 124 | if (off_ + shift > size_) { 125 | return false; 126 | } 127 | 128 | off_ += shift; 129 | return true; 130 | } 131 | 132 | bool operator==(const Scanner& l, const Scanner& r) { 133 | uintptr_t laddr = reinterpret_cast(l.data_) + l.off_; 134 | uintptr_t raddr = reinterpret_cast(r.data_) + r.off_; 135 | return laddr == raddr; 136 | } 137 | 138 | bool operator!=(const Scanner& l, const Scanner& r) { 139 | return !(l == r); 140 | } 141 | 142 | } // namespace fdt 143 | -------------------------------------------------------------------------------- /fdt/scanner.h: -------------------------------------------------------------------------------- 1 | #ifndef __FDT_SCANNER_H__ 2 | #define __FDT_SCANNER_H__ 3 | 4 | #include 5 | #include 6 | #include "common/string_view.h" 7 | 8 | namespace fdt { 9 | 10 | enum class Token { 11 | BEGIN_NODE = 1, 12 | END_NODE = 2, 13 | PROP = 3, 14 | NOP = 4, 15 | END = 9, 16 | }; 17 | 18 | 19 | class Scanner; 20 | 21 | template 22 | struct Range; 23 | 24 | template <> 25 | struct Range { 26 | uint32_t begin; 27 | uint32_t size; 28 | 29 | bool Parse(Scanner* pos); 30 | }; 31 | 32 | template <> 33 | struct Range { 34 | uint64_t begin; 35 | uint64_t size; 36 | 37 | bool Parse(Scanner* pos); 38 | }; 39 | 40 | template <> 41 | struct Range { 42 | uint32_t begin; 43 | uint64_t size; 44 | 45 | bool Parse(Scanner* pos); 46 | }; 47 | 48 | template <> 49 | struct Range { 50 | uint64_t begin; 51 | uint32_t size; 52 | 53 | bool Parse(Scanner* pos); 54 | }; 55 | 56 | 57 | class Scanner { 58 | public: 59 | Scanner(); 60 | Scanner(const uint8_t* data, size_t size); 61 | 62 | Scanner(const Scanner& other) = default; 63 | Scanner(Scanner&& other) = default; 64 | Scanner& operator=(const Scanner& other) = default; 65 | Scanner& operator=(Scanner&& other) = default; 66 | 67 | const uint8_t* Data() const { return data_; } 68 | size_t Size() const { return size_; } 69 | size_t Offset() const { return off_; } 70 | 71 | bool ConsumeBe32(uint32_t* val); 72 | bool ConsumeBe64(uint64_t* val); 73 | bool ConsumeCstr(const char** str); 74 | bool ConsumeCstr(common::StringView* str); 75 | bool ConsumeBytes(size_t size, const uint8_t** data); 76 | bool ConsumeToken(Token* token); 77 | bool AlignForward(size_t alignment); 78 | 79 | template 80 | bool ConsumeRange(Range* range) { 81 | Scanner copy = *this; 82 | if (!range->Parse(©)) { 83 | return false; 84 | } 85 | *this = copy; 86 | return true; 87 | } 88 | 89 | friend bool operator==(const Scanner& l, const Scanner& r); 90 | 91 | private: 92 | const uint8_t* data_; 93 | size_t size_; 94 | size_t off_; 95 | }; 96 | 97 | bool operator!=(const Scanner& l, const Scanner& r); 98 | 99 | } // namespace fdt 100 | 101 | #endif // __FDT_SCANNER_H__ 102 | -------------------------------------------------------------------------------- /fdt/span.h: -------------------------------------------------------------------------------- 1 | #ifndef __FDT_SPAN_H__ 2 | #define __FDT_SPAN_H__ 3 | 4 | #include 5 | 6 | #include "fdt/scanner.h" 7 | 8 | 9 | namespace fdt { 10 | 11 | namespace detail { 12 | 13 | template 14 | struct Parser; 15 | 16 | template <> 17 | struct Parser { 18 | static constexpr size_t kWireSize = 4; 19 | 20 | static bool Parse(Scanner* pos, uint32_t* item) { 21 | return pos->ConsumeBe32(item); 22 | } 23 | }; 24 | 25 | template <> 26 | struct Parser { 27 | static constexpr size_t kWireSize = 8; 28 | 29 | static bool Parse(Scanner* pos, uint64_t* item) { 30 | return pos->ConsumeBe64(item); 31 | } 32 | }; 33 | 34 | template <> 35 | struct Parser> { 36 | static constexpr size_t kWireSize = 8; 37 | 38 | static bool Parse(Scanner* pos, Range* item) { 39 | return pos->ConsumeRange(item); 40 | } 41 | }; 42 | 43 | template <> 44 | struct Parser> { 45 | static constexpr size_t kWireSize = 16; 46 | 47 | static bool Parse(Scanner* pos, Range* item) { 48 | return pos->ConsumeRange(item); 49 | } 50 | }; 51 | 52 | template <> 53 | struct Parser> { 54 | static constexpr size_t kWireSize = 12; 55 | 56 | static bool Parse(Scanner* pos, Range* item) { 57 | return pos->ConsumeRange(item); 58 | } 59 | }; 60 | 61 | template <> 62 | struct Parser> { 63 | static constexpr size_t kWireSize = 12; 64 | 65 | static bool Parse(Scanner* pos, Range* item) { 66 | return pos->ConsumeRange(item); 67 | } 68 | }; 69 | 70 | } // namespace detail 71 | 72 | 73 | template class Span; 74 | 75 | template 76 | class SpanIterator { 77 | public: 78 | SpanIterator() {} 79 | SpanIterator(const Span* span, size_t pos) : span_(span), pos_(pos) {} 80 | 81 | SpanIterator(const SpanIterator& other) = default; 82 | SpanIterator& operator=(const SpanIterator& other) = default; 83 | 84 | SpanIterator& operator++() { 85 | ++pos_; 86 | return *this; 87 | } 88 | 89 | SpanIterator operator++(int) { 90 | SpanIterator copy = *this; 91 | ++pos_; 92 | return copy; 93 | } 94 | 95 | SpanIterator& operator--() { 96 | --pos_; 97 | return *this; 98 | } 99 | 100 | SpanIterator operator--(int) { 101 | SpanIterator copy = *this; 102 | --pos_; 103 | return *this; 104 | } 105 | 106 | const T* operator->() const; 107 | 108 | T operator*() const; 109 | 110 | template 111 | friend bool operator==(const SpanIterator& l, const SpanIterator& r); 112 | 113 | private: 114 | const Span* span_ = nullptr; 115 | size_t pos_ = 0; 116 | mutable T buf_; 117 | }; 118 | 119 | 120 | template 121 | class Span { 122 | public: 123 | Span(const uint8_t* data, size_t size) 124 | : data_(data), size_(detail::Parser::kWireSize * size) {} 125 | Span() {} 126 | 127 | Span(const Span& other) = default; 128 | Span& operator=(const Span& other) = default; 129 | 130 | size_t Size() const { return size_ / detail::Parser::kWireSize; } 131 | bool Empty() const { return size_ == 0; } 132 | T At(size_t pos) const; 133 | T operator[](size_t pos) const { return At(pos); } 134 | 135 | SpanIterator ConstBegin() const { return SpanIterator(this, 0); } 136 | SpanIterator ConstEnd() const { return SpanIterator(this, Size()); } 137 | 138 | private: 139 | const uint8_t* data_ = nullptr; 140 | size_t size_ = 0; 141 | }; 142 | 143 | template 144 | T Span::At(size_t pos) const { 145 | const size_t offset = pos * detail::Parser::kWireSize; 146 | 147 | Scanner scanner(data_ + offset, size_ - offset); 148 | T item; 149 | detail::Parser::Parse(&scanner, &item); 150 | return item; 151 | } 152 | 153 | template 154 | const T* SpanIterator::operator->() const { 155 | buf_ = span_->At(pos_); 156 | return &buf_; 157 | } 158 | 159 | template 160 | T SpanIterator::operator*() const { 161 | return span_->At(pos_); 162 | } 163 | 164 | template 165 | bool operator==(const SpanIterator& l, const SpanIterator& r) { 166 | return l.span_ == r.span_ && l.pos_ == r.pos_; 167 | } 168 | 169 | template 170 | bool operator!=(const SpanIterator& l, const SpanIterator& r) { 171 | return !(l == r); 172 | } 173 | 174 | } // namespace fdt 175 | 176 | #endif // __FDT_SPAN_H__ 177 | -------------------------------------------------------------------------------- /kernel.lds: -------------------------------------------------------------------------------- 1 | OUTPUT_FORMAT(elf64-aarch64) 2 | ENTRY(start) 3 | 4 | PHDRS 5 | { 6 | headers PT_PHDR PHDRS; 7 | text PT_LOAD FILEHDR PHDRS; 8 | rodata PT_LOAD; 9 | data PT_LOAD; 10 | dynamic PT_DYNAMIC; 11 | } 12 | 13 | SECTIONS 14 | { 15 | . = SIZEOF_HEADERS; 16 | . = 0x1000; 17 | _IMAGE_START = .; 18 | 19 | .text : { 20 | _TEXT_BEGIN = .; 21 | *(.text) 22 | _TEXT_END = .; 23 | } :text 24 | 25 | .init_array : ALIGN(0x1000) { 26 | _INIT_BEGIN = .; 27 | KEEP(*(SORT(.init_array.*))) 28 | KEEP(*(.init_array)) 29 | _INIT_END = .; 30 | } :rodata 31 | 32 | .rodata : { 33 | _RODATA_BEGIN = .; 34 | *(.rodata) 35 | _RODATA_END = .; 36 | } :rodata 37 | 38 | .data.rel.ro : { 39 | _RELRO_BEGIN = .; 40 | *(.data.rel.ro) 41 | _RELRO_END = .; 42 | } :rodata 43 | 44 | .rela.dyn : { 45 | _RELA_BEGIN = .; 46 | *(.rela.dyn) 47 | _RELA_END = .; 48 | } :rodata 49 | 50 | .dynamic : { 51 | _DYNAMIC = .; 52 | *(.dynamic) 53 | } :rodata :dynamic 54 | 55 | .data : { 56 | _DATA_BEGIN = .; 57 | *(.data) 58 | _DATA_END = .; 59 | } :data 60 | 61 | .bss : { 62 | _BSS_BEGIN = .; 63 | *(.bss) 64 | _BSS_END = .; 65 | } :data 66 | } 67 | -------------------------------------------------------------------------------- /kernel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "bootstrap", 4 | "devicetree", 5 | "interrupt", 6 | "intrusive", 7 | "kernel", 8 | "log", 9 | "memory", 10 | "numeric", 11 | "pl011", 12 | "runtime", 13 | "sync", 14 | ] 15 | 16 | [profile.dev] 17 | panic = "abort" 18 | 19 | [profile.release] 20 | panic = "abort" 21 | 22 | -------------------------------------------------------------------------------- /kernel/bootstrap/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bootstrap" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "bootstrap" 9 | crate-type = ["rlib"] 10 | 11 | -------------------------------------------------------------------------------- /kernel/bootstrap/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | use core::iter::Iterator; 3 | use core::ops::Range; 4 | use core::ptr; 5 | use core::slice; 6 | use core::slice::Iter; 7 | 8 | #[repr(C)] 9 | struct PackedReservedRange { 10 | kind: u64, 11 | begin: u64, 12 | end: u64, 13 | } 14 | 15 | extern { 16 | fn reserved_ranges( 17 | ranges: *mut *const PackedReservedRange, size: *mut u32); 18 | fn devicetree(begin: *mut u64, end: *mut u64); 19 | fn bootstrap_heap(begin: *mut u64, end: *mut u64); 20 | 21 | fn bootstrap_allocator_shutdown() -> isize; 22 | fn bootstrap_allocate_aligned(size: usize, align: usize) -> *mut u8; 23 | fn bootstrap_free_aligned(ptr: *mut u8, size: usize, align: usize); 24 | } 25 | 26 | fn reserved_memory_ranges() -> &'static [PackedReservedRange] { 27 | unsafe { 28 | let mut ranges: *const PackedReservedRange = ptr::null(); 29 | let mut size: u32 = 0; 30 | 31 | reserved_ranges( 32 | &mut ranges as *mut *const PackedReservedRange, 33 | &mut size as *mut u32); 34 | slice::from_raw_parts(ranges, size as usize) 35 | } 36 | } 37 | 38 | pub struct ReservedRangeIter { 39 | inner: Iter<'static, PackedReservedRange>, 40 | } 41 | 42 | impl Iterator for ReservedRangeIter { 43 | type Item = Range; 44 | 45 | fn next(&mut self) -> Option> { 46 | match self.inner.next() { 47 | Some(range) => Some(range.begin..range.end), 48 | None => None, 49 | } 50 | } 51 | } 52 | 53 | pub fn reserved_range_iter() -> ReservedRangeIter { 54 | ReservedRangeIter { inner: reserved_memory_ranges().iter() } 55 | } 56 | 57 | pub fn fdt() -> &'static [u8] { 58 | unsafe { 59 | let mut begin: u64 = 0; 60 | let mut end: u64 = 0; 61 | 62 | devicetree(&mut begin as *mut u64, &mut end as *mut u64); 63 | if begin == end { 64 | return &[]; 65 | } 66 | slice::from_raw_parts(begin as *const u8, (end - begin) as usize) 67 | } 68 | } 69 | 70 | pub fn heap_range() -> Range { 71 | unsafe { 72 | let mut begin: u64 = 0; 73 | let mut end: u64 = 0; 74 | 75 | bootstrap_heap(&mut begin as *mut u64, &mut end as *mut u64); 76 | begin..end 77 | } 78 | } 79 | 80 | pub fn allocator_shutdown() -> bool { 81 | unsafe { 82 | bootstrap_allocator_shutdown() == 0 83 | } 84 | } 85 | 86 | pub unsafe fn allocate_aligned(size: usize, align: usize) -> *mut u8 { 87 | bootstrap_allocate_aligned(size, align) 88 | } 89 | 90 | pub unsafe fn free_aligned(ptr: *mut u8, size: usize, align: usize) { 91 | bootstrap_free_aligned(ptr, size, align); 92 | } 93 | -------------------------------------------------------------------------------- /kernel/devicetree/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "devicetree" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "devicetree" 9 | crate-type = ["rlib"] 10 | 11 | [dependencies] 12 | log = { path = "../log" } 13 | -------------------------------------------------------------------------------- /kernel/devicetree/src/fdt.rs: -------------------------------------------------------------------------------- 1 | use alloc::string::String; 2 | use alloc::vec::Vec; 3 | use core::mem; 4 | use core::ops::Range; 5 | use core::result::Result; 6 | 7 | use crate::{DEVICE_TREE_SPEC_VERSION, DeviceTree, DeviceTreeNode}; 8 | use crate::scanner::Scanner; 9 | 10 | #[derive(Clone, Copy, Debug)] 11 | struct FDTHeader { 12 | magic: u32, 13 | totalsize: u32, 14 | off_dt_struct: u32, 15 | off_dt_strings: u32, 16 | off_mem_rsvmap: u32, 17 | version: u32, 18 | last_comp_version: u32, 19 | boot_cpuid_phys: u32, 20 | size_dt_strings: u32, 21 | size_dt_struct: u32, 22 | } 23 | 24 | pub fn parse(fdt: &[u8]) -> Result { 25 | let header = parse_header(fdt)?; 26 | 27 | if header.magic != 0xd00dfeed { 28 | return Err("Incorrect FDT magic value."); 29 | } 30 | if header.last_comp_version > DEVICE_TREE_SPEC_VERSION { 31 | return Err("FDT version is too new and not supported."); 32 | } 33 | if header.totalsize as usize > fdt.len() { 34 | return Err("The FDT size is too small to fit the Device Tree."); 35 | } 36 | 37 | let reserved = parse_reserved(&fdt[header.off_mem_rsvmap as usize..])?; 38 | 39 | let begin = header.off_dt_struct as usize; 40 | let end = begin + header.size_dt_struct as usize; 41 | let structs = &fdt[begin..end]; 42 | 43 | let begin = header.off_dt_strings as usize; 44 | let end = begin + header.size_dt_strings as usize; 45 | let strings = &fdt[begin..end]; 46 | 47 | let root = parse_nodes(structs, strings)?; 48 | 49 | Ok(DeviceTree::new( 50 | reserved, root, header.last_comp_version, header.boot_cpuid_phys)) 51 | } 52 | 53 | fn parse_header(fdt: &[u8]) -> Result { 54 | let mut scanner = Scanner::new(fdt); 55 | let magic = scanner.consume_be32()?; 56 | let totalsize = scanner.consume_be32()?; 57 | let off_dt_struct = scanner.consume_be32()?; 58 | let off_dt_strings = scanner.consume_be32()?; 59 | let off_mem_rsvmap = scanner.consume_be32()?; 60 | let version = scanner.consume_be32()?; 61 | let last_comp_version = scanner.consume_be32()?; 62 | let boot_cpuid_phys = scanner.consume_be32()?; 63 | let size_dt_strings = scanner.consume_be32()?; 64 | let size_dt_struct = scanner.consume_be32()?; 65 | 66 | Ok(FDTHeader{ 67 | magic, 68 | totalsize, 69 | off_dt_struct, 70 | off_dt_strings, 71 | off_mem_rsvmap, 72 | version, 73 | last_comp_version, 74 | boot_cpuid_phys, 75 | size_dt_strings, 76 | size_dt_struct, 77 | }) 78 | } 79 | 80 | fn parse_reserved(data: &[u8]) -> Result>, &'static str> { 81 | let mut scanner = Scanner::new(data); 82 | let mut reserved = Vec::new(); 83 | 84 | loop { 85 | let addr = scanner.consume_be64()?; 86 | let size = scanner.consume_be64()?; 87 | 88 | if addr == 0 && size == 0 { 89 | break; 90 | } 91 | reserved.push(addr..addr + size); 92 | } 93 | 94 | Ok(reserved) 95 | } 96 | 97 | fn parse_nodes(structs: &[u8], strings: &[u8]) 98 | -> Result { 99 | const FDT_BEGIN_NODE: u32 = 0x01; 100 | const FDT_END_NODE: u32 = 0x02; 101 | const FDT_PROP: u32 = 0x03; 102 | const FDT_NOP: u32 = 0x04; 103 | const FDT_END: u32 = 0x09; 104 | 105 | let mut scanner = Scanner::new(structs); 106 | let mut state = State::new(); 107 | 108 | loop { 109 | match scanner.consume_be32() { 110 | Ok(token) if token == FDT_BEGIN_NODE => { 111 | state.begin_node(scanner.consume_cstr()?); 112 | scanner.align_forward(4)?; 113 | }, 114 | Ok(token) if token == FDT_END_NODE => { 115 | state.end_node()?; 116 | }, 117 | Ok(token) if token == FDT_PROP => { 118 | let len = scanner.consume_be32()? as usize; 119 | let off = scanner.consume_be32()? as usize; 120 | let value = scanner.consume_data(len)?; 121 | let name = Scanner::new(&strings[off..]).consume_cstr()?; 122 | state.new_property(name, value); 123 | scanner.align_forward(4)?; 124 | }, 125 | Ok(token) if token == FDT_NOP => {}, 126 | Ok(token) if token == FDT_END => return state.finish(), 127 | Err(msg) => return Err(msg), 128 | _ => return Err("Unknown FDT token."), 129 | } 130 | } 131 | } 132 | 133 | struct State<'a> { 134 | parents: Vec<(&'a str, DeviceTreeNode)>, 135 | current: DeviceTreeNode, 136 | } 137 | 138 | impl<'a> State<'a> { 139 | fn new() -> State<'a> { 140 | State{ 141 | parents: Vec::new(), 142 | current: DeviceTreeNode::new(), 143 | } 144 | } 145 | 146 | fn begin_node(&mut self, name: &'a str) { 147 | let child = DeviceTreeNode::new_child(&self.current); 148 | let parent = mem::replace(&mut self.current, child); 149 | self.parents.push((name, parent)); 150 | } 151 | 152 | fn end_node(&mut self) -> Result<(), &'static str> { 153 | if let Some((name, parent)) = self.parents.pop() { 154 | let node = mem::replace(&mut self.current, parent); 155 | self.current.add_child(String::from(name), node); 156 | return Ok(()); 157 | } 158 | Err("Unmatched end of node token found in FDT.") 159 | } 160 | 161 | fn new_property(&mut self, name: &str, value: &[u8]) { 162 | self.current.add_property(String::from(name), Vec::from(value)); 163 | } 164 | 165 | fn finish(&mut self) -> Result { 166 | if !self.parents.is_empty() { 167 | return Err("Parsed FDT contains unfinished nodes."); 168 | } 169 | if let Some(root) = self.current.remove_child("") { 170 | return Ok(root); 171 | } 172 | Err("FDT doesn't have a root node with an empty name.") 173 | } 174 | } 175 | 176 | #[cfg(test)] 177 | mod tests { 178 | use super::*; 179 | 180 | #[test] 181 | fn test_parse() { 182 | let dtb = include_bytes!("test.dtb"); 183 | let dt = parse(dtb).unwrap(); 184 | 185 | assert_eq!( 186 | dt.reserved_memory(), 187 | vec![ 188 | 0x40000000..0x40001000, 189 | 0x40002000..0x40003000, 190 | 0x40004000..0x40005000 191 | ]); 192 | 193 | assert_eq!( 194 | dt.follow("/").unwrap() 195 | .property("#size-cells").unwrap() 196 | .consume_be32().unwrap(), 197 | 2); 198 | assert_eq!( 199 | dt.follow("/").unwrap() 200 | .property("#address-cells").unwrap() 201 | .consume_be32().unwrap(), 202 | 2); 203 | 204 | assert_eq!( 205 | dt.follow("/memory@40000000").unwrap() 206 | .property("device_type").unwrap() 207 | .consume_cstr().unwrap(), 208 | "memory"); 209 | 210 | let mut p = dt.follow("/memory@40000000").unwrap() 211 | .property("reg").unwrap(); 212 | assert_eq!(p.consume_be64().unwrap(), 0x40000000); 213 | assert_eq!(p.consume_be64().unwrap(), 0x8000000); 214 | 215 | let mut p = dt.follow("/cpus/cpu@0").unwrap() 216 | .property("reg").unwrap(); 217 | assert_eq!(p.consume_be32().unwrap(), 0); 218 | 219 | let mut p = dt.follow("/cpus/cpu@0").unwrap() 220 | .property("device_type").unwrap(); 221 | assert_eq!(p.consume_cstr().unwrap(), "cpu"); 222 | 223 | let mut p = dt.follow("/cpus/cpu@0").unwrap() 224 | .property("compatible").unwrap(); 225 | assert_eq!(p.consume_cstr().unwrap(), "arm,cortex-a57"); 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /kernel/devicetree/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(test), no_std)] 2 | extern crate alloc; 3 | extern crate log; 4 | mod scanner; 5 | pub mod fdt; 6 | 7 | use alloc::collections::btree_map::{BTreeMap, Iter}; 8 | use alloc::string::String; 9 | use alloc::vec::Vec; 10 | use core::iter::Iterator; 11 | use core::ops::Range; 12 | use core::option::Option; 13 | pub use scanner::Scanner; 14 | 15 | 16 | const DEVICE_TREE_SPEC_VERSION: u32 = 17; 17 | 18 | #[derive(Clone, Debug, PartialEq)] 19 | pub struct DeviceTreeNode { 20 | children: BTreeMap, 21 | properties: BTreeMap>, 22 | address_space: AddressSpace, 23 | } 24 | 25 | impl DeviceTreeNode { 26 | pub fn address_space(&self) -> AddressSpace { 27 | self.address_space 28 | } 29 | 30 | pub fn child(&self, name: &str) -> Option<&DeviceTreeNode> { 31 | self.children.get(name) 32 | } 33 | 34 | pub fn children(&self) -> Children { 35 | Children { inner: self.children.iter() } 36 | } 37 | 38 | pub fn property(&self, name: &str) -> Option { 39 | self.properties.get(name).map(|v| Scanner::new(v.as_slice())) 40 | } 41 | 42 | pub fn properties(&self) -> Properties { 43 | Properties { inner: self.properties.iter() } 44 | } 45 | 46 | fn new() -> DeviceTreeNode { 47 | DeviceTreeNode { 48 | children: BTreeMap::new(), 49 | properties: BTreeMap::new(), 50 | address_space: AddressSpace { size_cells: 1, address_cells: 1 }, 51 | } 52 | } 53 | 54 | fn new_child(parent: &DeviceTreeNode) -> DeviceTreeNode { 55 | DeviceTreeNode { 56 | children: BTreeMap::new(), 57 | properties: BTreeMap::new(), 58 | address_space: parent.address_space(), 59 | } 60 | } 61 | 62 | fn add_child(&mut self, name: String, child: DeviceTreeNode) { 63 | self.children.insert(name, child); 64 | } 65 | 66 | fn remove_child(&mut self, name: &str) -> Option { 67 | self.children.remove(name) 68 | } 69 | 70 | fn add_property(&mut self, name: String, value: Vec) { 71 | if name == "#size-cells" { 72 | let mut s = Scanner::new(&value[..]); 73 | self.address_space.size_cells = s.consume_be32().unwrap(); 74 | } 75 | if name == "#address-cells" { 76 | let mut s = Scanner::new(&value[..]); 77 | self.address_space.address_cells = s.consume_be32().unwrap(); 78 | } 79 | self.properties.insert(name, value); 80 | } 81 | } 82 | 83 | #[derive(Clone, Debug)] 84 | pub struct Children<'a> { 85 | inner: Iter<'a, String, DeviceTreeNode>, 86 | } 87 | 88 | impl<'a> Iterator for Children<'a> { 89 | type Item = (&'a str, &'a DeviceTreeNode); 90 | 91 | fn next(&mut self) -> Option<(&'a str, &'a DeviceTreeNode)> { 92 | if let Some((name, node)) = self.inner.next() { 93 | Some((name.as_str(), node)) 94 | } else { 95 | None 96 | } 97 | } 98 | } 99 | 100 | #[derive(Clone, Debug)] 101 | pub struct Properties<'a> { 102 | inner: Iter<'a, String, Vec>, 103 | } 104 | 105 | impl<'a> Iterator for Properties<'a> { 106 | type Item = (&'a str, Scanner<'a>); 107 | 108 | fn next(&mut self) -> Option<(&'a str, Scanner<'a>)> { 109 | if let Some((name, value)) = self.inner.next() { 110 | Some((name.as_str(), Scanner::new(value.as_slice()))) 111 | } else { 112 | None 113 | } 114 | } 115 | } 116 | 117 | #[derive(Copy, Clone, Debug, PartialEq)] 118 | pub struct AddressSpace { 119 | pub size_cells: u32, 120 | pub address_cells: u32, 121 | } 122 | 123 | #[derive(Clone, Debug)] 124 | pub struct DeviceTree { 125 | reserved: Vec>, 126 | root: DeviceTreeNode, 127 | last_comp_version: u32, 128 | boot_cpuid_phys: u32, 129 | } 130 | 131 | impl DeviceTree { 132 | pub fn new( 133 | reserved: Vec>, 134 | root: DeviceTreeNode, 135 | last_comp_version: u32, 136 | boot_cpuid_phys: u32) -> DeviceTree { 137 | DeviceTree { reserved, root, last_comp_version, boot_cpuid_phys } 138 | } 139 | 140 | pub fn reserved_memory(&self) -> &[Range] { 141 | self.reserved.as_slice() 142 | } 143 | 144 | pub fn follow(&self, path: &str) -> Option<&DeviceTreeNode> { 145 | let mut current = &self.root; 146 | 147 | if path == "/" { 148 | return Some(current); 149 | } 150 | 151 | for name in path[1..].split("/") { 152 | if let Some(node) = current.child(name) { 153 | current = node; 154 | } else { 155 | return None; 156 | } 157 | } 158 | 159 | Some(current) 160 | } 161 | 162 | pub fn compatible(&self, version: u32) -> bool { 163 | version <= DEVICE_TREE_SPEC_VERSION 164 | && version >= self.last_comp_version 165 | } 166 | 167 | pub fn boot_cpuid(&self) -> u32 { 168 | self.boot_cpuid_phys 169 | } 170 | } 171 | 172 | 173 | #[cfg(test)] 174 | mod tests { 175 | use super::*; 176 | 177 | #[test] 178 | fn test_compatible() { 179 | let dt = DeviceTree::new(Vec::new(), DeviceTreeNode::new(), 10, 0); 180 | 181 | assert!(dt.compatible(10)); 182 | assert!(dt.compatible(DEVICE_TREE_SPEC_VERSION)); 183 | assert!(!dt.compatible(9)); 184 | assert!(!dt.compatible(DEVICE_TREE_SPEC_VERSION + 1)); 185 | } 186 | 187 | #[test] 188 | fn test_follow() { 189 | let mut root = DeviceTreeNode::new(); 190 | let mut root_node1 = DeviceTreeNode::new(); 191 | let root_node2 = DeviceTreeNode::new(); 192 | let root_node1_node3 = DeviceTreeNode::new(); 193 | 194 | root_node1.add_child( 195 | String::from("node3"), root_node1_node3.clone()); 196 | root.add_child(String::from("node1"), root_node1.clone()); 197 | root.add_child(String::from("node2"), root_node2.clone()); 198 | 199 | let dt = DeviceTree::new(Vec::new(), root.clone(), 0, 0); 200 | 201 | assert_eq!(dt.follow("/"), Some(&root)); 202 | assert_eq!(dt.follow("/node1"), Some(&root_node1)); 203 | assert_eq!(dt.follow("/node2"), Some(&root_node2)); 204 | assert_eq!(dt.follow("/node1/node3"), Some(&root_node1_node3)); 205 | assert_eq!(dt.follow("/node4"), None); 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /kernel/devicetree/src/scanner.rs: -------------------------------------------------------------------------------- 1 | use core::convert::TryFrom; 2 | use core::result::Result; 3 | use core::str; 4 | use crate::AddressSpace; 5 | 6 | pub struct Scanner<'a> { 7 | data: &'a [u8], 8 | offset: usize, 9 | } 10 | 11 | impl<'a> Scanner<'a> { 12 | pub fn new(data: &'a [u8]) -> Scanner<'a> { 13 | Scanner { data, offset: 0 } 14 | } 15 | 16 | pub fn remains(&self) -> usize { 17 | self.data.len() - self.offset 18 | } 19 | 20 | pub fn consume_be32(&mut self) -> Result { 21 | if self.offset + 4 > self.data.len() { 22 | return Err("Not enough data"); 23 | } 24 | 25 | let value = &self.data[self.offset..self.offset + 4]; 26 | match <[u8; 4]>::try_from(value) { 27 | Ok(v) => { 28 | self.offset += value.len(); 29 | Ok(u32::from_be_bytes(v)) 30 | }, 31 | Err(_) => Err("Error while parsing 4-byte big-endian"), 32 | } 33 | } 34 | 35 | pub fn consume_be64(&mut self) -> Result { 36 | if self.offset + 8 > self.data.len() { 37 | return Err("Not enough data"); 38 | } 39 | 40 | let value = &self.data[self.offset..self.offset + 8]; 41 | match <[u8; 8]>::try_from(value) { 42 | Ok(v) => { 43 | self.offset += value.len(); 44 | Ok(u64::from_be_bytes(v)) 45 | }, 46 | Err(_) => Err("Error while parsing 8-byte big-endian"), 47 | } 48 | } 49 | 50 | pub fn consume_address(&mut self, address_space: &AddressSpace) 51 | -> Result 52 | { 53 | match address_space.address_cells { 54 | 0 => Ok(0), 55 | 1 => self.consume_be32().map(|x| x as u64), 56 | 2 => self.consume_be64(), 57 | _ => Err("Unsupported #address-cells value."), 58 | } 59 | } 60 | 61 | pub fn consume_size(&mut self, address_space: &AddressSpace) 62 | -> Result 63 | { 64 | match address_space.size_cells { 65 | 0 => Ok(0), 66 | 1 => self.consume_be32().map(|x| x as u64), 67 | 2 => self.consume_be64(), 68 | _ => Err("Unsupported #address-cells value."), 69 | } 70 | } 71 | 72 | pub fn consume_cstr(&mut self) -> Result<&'a str, &'static str> { 73 | for i in self.offset.. { 74 | if i >= self.data.len() { 75 | return Err("Failed to find terminating '\0' in the data"); 76 | } 77 | 78 | if self.data[i] != 0 { 79 | continue; 80 | } 81 | 82 | match str::from_utf8(&self.data[self.offset..i]) { 83 | Ok(s) => { 84 | self.offset = i + 1; 85 | return Ok(s); 86 | }, 87 | Err(_) => return Err("Not a valid UTF8 string"), 88 | } 89 | } 90 | Err("Unreachable") 91 | } 92 | 93 | pub fn consume_data(&mut self, size: usize) 94 | -> Result<&'a [u8], &'static str> 95 | { 96 | if self.offset + size > self.data.len() { 97 | return Err("Not enough data"); 98 | } 99 | 100 | let begin = self.offset; 101 | let end = begin + size; 102 | self.offset += size; 103 | 104 | Ok(&self.data[begin..end]) 105 | } 106 | 107 | pub fn align_forward(&mut self, alignment: usize) 108 | -> Result<(), &'static str> 109 | { 110 | if alignment == 0 || self.offset % alignment == 0 { 111 | return Ok(()); 112 | } 113 | 114 | let shift = alignment - self.offset % alignment; 115 | 116 | if self.offset + shift >= self.data.len() { 117 | return Err("Not enough data"); 118 | } 119 | 120 | self.offset += shift; 121 | Ok(()) 122 | } 123 | } 124 | 125 | #[cfg(test)] 126 | mod tests { 127 | use super::*; 128 | 129 | #[test] 130 | fn test_consume_be32() { 131 | assert!(Scanner::new(&[]).consume_be32().is_err()); 132 | assert!(Scanner::new(&[0]).consume_be32().is_err()); 133 | assert!(Scanner::new(&[0, 0]).consume_be32().is_err()); 134 | assert!(Scanner::new(&[0, 0, 0]).consume_be32().is_err()); 135 | assert_eq!( 136 | Scanner::new(&[0, 0, 0, 0]).consume_be32(), 137 | Ok(0u32)); 138 | assert_eq!( 139 | Scanner::new(&[0xff, 0, 0, 0]).consume_be32(), 140 | Ok(0xff000000u32)); 141 | assert_eq!( 142 | Scanner::new(&[0, 0xff, 0, 0]).consume_be32(), 143 | Ok(0x00ff0000u32)); 144 | assert_eq!( 145 | Scanner::new(&[0, 0, 0xff, 0]).consume_be32(), 146 | Ok(0x0000ff00u32)); 147 | assert_eq!( 148 | Scanner::new(&[0, 0, 0, 0xff]).consume_be32(), 149 | Ok(0x000000ffu32)); 150 | } 151 | 152 | #[test] 153 | fn test_consume_be64() { 154 | assert!(Scanner::new(&[]).consume_be64().is_err()); 155 | assert!(Scanner::new(&[0]).consume_be64().is_err()); 156 | assert!(Scanner::new(&[0, 0]).consume_be64().is_err()); 157 | assert!(Scanner::new(&[0, 0, 0]).consume_be64().is_err()); 158 | assert!(Scanner::new(&[0, 0, 0, 0]).consume_be64().is_err()); 159 | assert!(Scanner::new(&[0, 0, 0, 0, 0]).consume_be64().is_err()); 160 | assert!(Scanner::new(&[0, 0, 0, 0, 0, 0]).consume_be64().is_err()); 161 | assert!(Scanner::new(&[0, 0, 0, 0, 0, 0, 0]).consume_be64().is_err()); 162 | assert_eq!( 163 | Scanner::new(&[0, 0, 0, 0, 0, 0, 0, 0]).consume_be64(), 164 | Ok(0u64)); 165 | assert_eq!( 166 | Scanner::new(&[0xff, 0, 0, 0, 0, 0, 0, 0]).consume_be64(), 167 | Ok(0xff00000000000000u64)); 168 | assert_eq!( 169 | Scanner::new(&[0, 0xff, 0, 0, 0, 0, 0, 0]).consume_be64(), 170 | Ok(0x00ff000000000000u64)); 171 | assert_eq!( 172 | Scanner::new(&[0, 0, 0xff, 0, 0, 0, 0, 0]).consume_be64(), 173 | Ok(0x0000ff0000000000u64)); 174 | assert_eq!( 175 | Scanner::new(&[0, 0, 0, 0xff, 0, 0, 0, 0]).consume_be64(), 176 | Ok(0x000000ff00000000u64)); 177 | assert_eq!( 178 | Scanner::new(&[0, 0, 0, 0, 0xff, 0, 0, 0]).consume_be64(), 179 | Ok(0x00000000ff000000u64)); 180 | assert_eq!( 181 | Scanner::new(&[0, 0, 0, 0, 0, 0xff, 0, 0]).consume_be64(), 182 | Ok(0x0000000000ff0000u64)); 183 | assert_eq!( 184 | Scanner::new(&[0, 0, 0, 0, 0, 0, 0xff, 0]).consume_be64(), 185 | Ok(0x000000000000ff00u64)); 186 | assert_eq!( 187 | Scanner::new(&[0, 0, 0, 0, 0, 0, 0, 0xff]).consume_be64(), 188 | Ok(0x00000000000000ffu64)); 189 | } 190 | 191 | #[test] 192 | fn test_consume_cstr() { 193 | assert!(Scanner::new(&[]).consume_cstr().is_err()); 194 | assert_eq!(Scanner::new(&[0]).consume_cstr(), Ok("")); 195 | assert_eq!(Scanner::new(&['H' as u8, 'i' as u8, 0]).consume_cstr(), Ok("Hi")); 196 | } 197 | 198 | #[test] 199 | fn test_align_forward() { 200 | let mut scanner = Scanner::new(&[1, 2, 3, 4, 5, 6, 7, 8]); 201 | 202 | assert_eq!(scanner.align_forward(0), Ok(())); 203 | assert_eq!(scanner.offset, 0); 204 | assert_eq!(scanner.align_forward(1), Ok(())); 205 | assert_eq!(scanner.offset, 0); 206 | assert_eq!(scanner.align_forward(2), Ok(())); 207 | assert_eq!(scanner.offset, 0); 208 | assert_eq!(scanner.align_forward(3), Ok(())); 209 | assert_eq!(scanner.offset, 0); 210 | assert_eq!(scanner.align_forward(4), Ok(())); 211 | assert_eq!(scanner.offset, 0); 212 | 213 | assert_eq!(scanner.consume_data(1), Ok(&[1u8][..])); 214 | assert_eq!(scanner.align_forward(0), Ok(())); 215 | assert_eq!(scanner.offset, 1); 216 | assert_eq!(scanner.align_forward(1), Ok(())); 217 | assert_eq!(scanner.offset, 1); 218 | assert_eq!(scanner.align_forward(2), Ok(())); 219 | assert_eq!(scanner.offset, 2); 220 | assert_eq!(scanner.align_forward(2), Ok(())); 221 | assert_eq!(scanner.offset, 2); 222 | assert_eq!(scanner.align_forward(3), Ok(())); 223 | assert_eq!(scanner.offset, 3); 224 | assert_eq!(scanner.align_forward(3), Ok(())); 225 | assert_eq!(scanner.offset, 3); 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /kernel/devicetree/src/test.dtb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krinkinmu/aarch64/7e81f7d964ae7b96116f4011c5aa43e41318eb33/kernel/devicetree/src/test.dtb -------------------------------------------------------------------------------- /kernel/devicetree/src/test.dts: -------------------------------------------------------------------------------- 1 | /dts-v1/; 2 | /memreserve/ 0x40000000 0x1000; 3 | /memreserve/ 0x40002000 0x1000; 4 | /memreserve/ 0x40004000 0x1000; 5 | / { 6 | #size-cells = <0x02>; 7 | #address-cells = <0x02>; 8 | 9 | memory@40000000 { 10 | reg = <0x00 0x40000000 0x00 0x8000000>; 11 | device_type = "memory"; 12 | }; 13 | 14 | cpus { 15 | #size-cells = <0x00>; 16 | #address-cells = <0x01>; 17 | 18 | cpu@0 { 19 | reg = <0x00>; 20 | compatible = "arm,cortex-a57"; 21 | device_type = "cpu"; 22 | }; 23 | }; 24 | }; 25 | -------------------------------------------------------------------------------- /kernel/interrupt/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "interrupt" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "interrupt" 9 | crate-type = ["rlib"] 10 | -------------------------------------------------------------------------------- /kernel/interrupt/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | #[derive(Copy, Clone, Debug)] 4 | #[repr(C)] 5 | pub struct InterruptFrame { 6 | x0: u64, 7 | x1: u64, 8 | x2: u64, 9 | x3: u64, 10 | x4: u64, 11 | x5: u64, 12 | x6: u64, 13 | x7: u64, 14 | x8: u64, 15 | x9: u64, 16 | x10: u64, 17 | x11: u64, 18 | x12: u64, 19 | x13: u64, 20 | x14: u64, 21 | x15: u64, 22 | x16: u64, 23 | x17: u64, 24 | x18: u64, 25 | fp: u64, 26 | lr: u64, 27 | sp: u64, 28 | esr: u64, 29 | far: u64, 30 | } 31 | 32 | #[no_mangle] 33 | pub extern "C" fn interrupt(frame: *mut InterruptFrame) { 34 | unsafe { safe_interrupt(&mut *frame) } 35 | } 36 | 37 | fn safe_interrupt(_frame: &mut InterruptFrame) { 38 | } 39 | 40 | #[no_mangle] 41 | pub extern "C" fn exception(frame: *mut InterruptFrame) { 42 | unsafe { safe_exception(&mut *frame) } 43 | } 44 | 45 | fn safe_exception(_frame: &mut InterruptFrame) { 46 | loop {} 47 | } 48 | 49 | -------------------------------------------------------------------------------- /kernel/intrusive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "intrusive" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "intrusive" 9 | crate-type = ["rlib"] 10 | -------------------------------------------------------------------------------- /kernel/intrusive/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | mod list; 3 | 4 | use core::marker::PhantomData; 5 | pub use list::{IntrusiveList, IntrusiveListLink}; 6 | 7 | #[derive(Copy, Clone, Debug)] 8 | pub struct StructFieldOffset { 9 | offset: usize, 10 | _sm: PhantomData<*const S>, 11 | _fm: PhantomData<*const F>, 12 | } 13 | 14 | impl StructFieldOffset { 15 | pub fn new(s: *const S, f: *const F) -> StructFieldOffset { 16 | StructFieldOffset { 17 | offset: f as usize - s as usize, 18 | _sm: PhantomData, 19 | _fm: PhantomData, 20 | } 21 | } 22 | 23 | pub fn field(&self, s: *const S) -> *const F { 24 | (s as usize + self.offset) as *const F 25 | } 26 | 27 | pub fn field_mut(&self, s: *mut S) -> *mut F { 28 | (s as usize + self.offset) as *mut F 29 | } 30 | 31 | pub fn container_of(&self, f: *const F) -> *const S { 32 | (f as usize - self.offset) as *const S 33 | } 34 | 35 | pub fn container_of_mut(&self, f: *mut F) -> *mut S { 36 | (f as usize - self.offset) as *mut S 37 | } 38 | } 39 | 40 | -------------------------------------------------------------------------------- /kernel/intrusive/src/list.rs: -------------------------------------------------------------------------------- 1 | use core::cell::Cell; 2 | use core::option::Option; 3 | use core::ptr; 4 | use crate::StructFieldOffset; 5 | 6 | #[derive(Copy, Clone, Debug)] 7 | struct IntrusiveListNode { 8 | next: *const IntrusiveListLink, 9 | prev: *const IntrusiveListLink, 10 | } 11 | 12 | #[derive(Debug)] 13 | pub struct IntrusiveListLink { 14 | link: Cell, 15 | } 16 | 17 | impl IntrusiveListLink { 18 | pub fn new() -> IntrusiveListLink { 19 | IntrusiveListLink { 20 | link: Cell::new(IntrusiveListNode { 21 | next: ptr::null(), 22 | prev: ptr::null(), 23 | }), 24 | } 25 | } 26 | 27 | fn next(&self) -> *const IntrusiveListLink { 28 | self.link.get().next 29 | } 30 | 31 | fn set_next(&self, next: *const IntrusiveListLink) { 32 | let mut link = self.link.get(); 33 | link.next = next; 34 | self.link.set(link); 35 | } 36 | 37 | fn prev(&self) -> *const IntrusiveListLink { 38 | self.link.get().prev 39 | } 40 | 41 | fn set_prev(&self, prev: *const IntrusiveListLink) { 42 | let mut link = self.link.get(); 43 | link.prev = prev; 44 | self.link.set(link); 45 | } 46 | 47 | fn set(&self, node: IntrusiveListNode) { 48 | self.link.set(node); 49 | } 50 | } 51 | 52 | #[derive(Debug)] 53 | pub struct IntrusiveList { 54 | head: *const IntrusiveListLink, 55 | tail: *const IntrusiveListLink, 56 | offset: StructFieldOffset, 57 | } 58 | 59 | impl IntrusiveList { 60 | pub fn new(offset: StructFieldOffset) 61 | -> IntrusiveList 62 | { 63 | IntrusiveList { 64 | head: ptr::null(), 65 | tail: ptr::null(), 66 | offset: offset, 67 | } 68 | } 69 | 70 | pub fn is_empty(&self) -> bool { 71 | self.head == ptr::null() 72 | } 73 | 74 | pub unsafe fn push(&mut self, item: *const S) { 75 | self.push_link(self.offset.field(item)); 76 | } 77 | 78 | pub unsafe fn pop(&mut self) -> Option<*const S> { 79 | self.pop_link().map(|link| self.offset.container_of(link)) 80 | } 81 | 82 | pub unsafe fn remove(&mut self, item: *const S) { 83 | self.remove_link(self.offset.field(item)); 84 | } 85 | 86 | unsafe fn push_link(&mut self, ptr: *const IntrusiveListLink) { 87 | (*ptr).set(IntrusiveListNode { 88 | next: self.head, 89 | prev: ptr::null(), 90 | }); 91 | 92 | if self.head == ptr::null() { 93 | self.tail = ptr; 94 | } else { 95 | (*self.head).set_prev(ptr); 96 | } 97 | self.head = ptr; 98 | } 99 | 100 | unsafe fn pop_link(&mut self) -> Option<*const IntrusiveListLink> { 101 | if self.head == ptr::null() { 102 | return None; 103 | } 104 | 105 | let ptr = self.head; 106 | let node = &*ptr; 107 | 108 | if self.head == self.tail { 109 | self.head = ptr::null(); 110 | self.tail = ptr::null(); 111 | } else { 112 | self.head = node.next(); 113 | (*node.next()).set_prev(ptr::null()); 114 | } 115 | 116 | node.set(IntrusiveListNode { 117 | next: ptr::null(), 118 | prev: ptr::null(), 119 | }); 120 | Some(ptr) 121 | } 122 | 123 | unsafe fn remove_link(&mut self, ptr: *const IntrusiveListLink) { 124 | let node = &*ptr; 125 | let prev = node.prev(); 126 | let next = node.next(); 127 | 128 | node.set(IntrusiveListNode { 129 | next: ptr::null(), 130 | prev: ptr::null(), 131 | }); 132 | 133 | if prev != ptr::null() { (*prev).set_next(next); } 134 | if next != ptr::null() { (*next).set_prev(prev); } 135 | 136 | if self.head == ptr { self.head = next; } 137 | if self.tail == ptr { self.tail = prev; } 138 | } 139 | } 140 | 141 | #[cfg(test)] 142 | mod tests { 143 | use super::*; 144 | 145 | struct ListNode { 146 | _placeholder: i32, 147 | link: IntrusiveListLink, 148 | } 149 | 150 | impl ListNode { 151 | fn new() -> ListNode { 152 | ListNode { 153 | _placeholder: 0, 154 | link: IntrusiveListLink::new(), 155 | } 156 | } 157 | 158 | fn offset(&self) -> StructFieldOffset { 159 | StructFieldOffset::new(self as *const ListNode, &self.link as *const IntrusiveListLink) 160 | } 161 | } 162 | 163 | #[test] 164 | fn test_push_pop() { 165 | unsafe { 166 | let nodes = [ListNode::new(), ListNode::new(), ListNode::new()]; 167 | let mut list = IntrusiveList::new(nodes[0].offset()); 168 | 169 | assert_eq!(list.pop(), None); 170 | 171 | list.push(&nodes[0] as *const ListNode); 172 | assert_eq!(list.pop(), Some(&nodes[0] as *const ListNode)); 173 | assert_eq!(list.pop(), None); 174 | 175 | list.push(&nodes[0] as *const ListNode); 176 | list.push(&nodes[1] as *const ListNode); 177 | assert_eq!(list.pop(), Some(&nodes[1] as *const ListNode)); 178 | assert_eq!(list.pop(), Some(&nodes[0] as *const ListNode)); 179 | assert_eq!(list.pop(), None); 180 | 181 | list.push(&nodes[0] as *const ListNode); 182 | list.push(&nodes[1] as *const ListNode); 183 | list.push(&nodes[2] as *const ListNode); 184 | assert_eq!(list.pop(), Some(&nodes[2] as *const ListNode)); 185 | assert_eq!(list.pop(), Some(&nodes[1] as *const ListNode)); 186 | assert_eq!(list.pop(), Some(&nodes[0] as *const ListNode)); 187 | assert_eq!(list.pop(), None); 188 | } 189 | } 190 | 191 | #[test] 192 | fn test_remove() { 193 | unsafe { 194 | let nodes = [ListNode::new(), ListNode::new(), ListNode::new()]; 195 | let mut list = IntrusiveList::new(nodes[0].offset()); 196 | 197 | { 198 | assert_eq!(list.pop(), None); 199 | list.push(&nodes[0] as *const ListNode); 200 | list.push(&nodes[1] as *const ListNode); 201 | list.push(&nodes[2] as *const ListNode); 202 | list.remove(&nodes[0] as *const ListNode); 203 | assert_eq!(list.pop(), Some(&nodes[2] as *const ListNode)); 204 | assert_eq!(list.pop(), Some(&nodes[1] as *const ListNode)); 205 | assert_eq!(list.pop(), None); 206 | } 207 | { 208 | assert_eq!(list.pop(), None); 209 | list.push(&nodes[0] as *const ListNode); 210 | list.push(&nodes[1] as *const ListNode); 211 | list.push(&nodes[2] as *const ListNode); 212 | list.remove(&nodes[1] as *const ListNode); 213 | assert_eq!(list.pop(), Some(&nodes[2] as *const ListNode)); 214 | assert_eq!(list.pop(), Some(&nodes[0] as *const ListNode)); 215 | assert_eq!(list.pop(), None); 216 | } 217 | { 218 | assert_eq!(list.pop(), None); 219 | list.push(&nodes[0] as *const ListNode); 220 | list.push(&nodes[1] as *const ListNode); 221 | list.push(&nodes[2] as *const ListNode); 222 | list.remove(&nodes[2] as *const ListNode); 223 | assert_eq!(list.pop(), Some(&nodes[1] as *const ListNode)); 224 | assert_eq!(list.pop(), Some(&nodes[0] as *const ListNode)); 225 | assert_eq!(list.pop(), None); 226 | } 227 | } 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /kernel/kernel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kernel" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "kernel" 9 | crate-type = ["staticlib", "rlib"] 10 | 11 | [dependencies] 12 | bootstrap = { path = "../bootstrap" } 13 | devicetree = { path = "../devicetree" } 14 | interrupt = { path = "../interrupt" } 15 | log = { path = "../log" } 16 | memory = { path = "../memory" } 17 | numeric = { path = "../numeric" } 18 | pl011 = { path = "../pl011" } 19 | runtime = { path = "../runtime" } 20 | sync = { path = "../sync" } 21 | -------------------------------------------------------------------------------- /kernel/kernel/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | extern crate alloc; 3 | extern crate bootstrap; 4 | extern crate devicetree; 5 | extern crate interrupt; 6 | extern crate log; 7 | extern crate memory; 8 | extern crate numeric; 9 | extern crate pl011; 10 | extern crate runtime; 11 | extern crate sync; 12 | 13 | mod setup; 14 | 15 | use log::Logger; 16 | use memory::Memory; 17 | 18 | pub struct Kernel { 19 | memory: Memory<'static>, 20 | logger: Logger<'static>, 21 | } 22 | 23 | #[no_mangle] 24 | pub extern "C" fn start_kernel() { 25 | let kernel = setup::from_devicetree(); 26 | kernel.logger.log("Hello from Rust\n"); 27 | loop {} 28 | } 29 | -------------------------------------------------------------------------------- /kernel/kernel/src/setup.rs: -------------------------------------------------------------------------------- 1 | use bootstrap; 2 | use core::option::Option; 3 | use crate::Kernel; 4 | use devicetree::{self, DeviceTree}; 5 | use log::{self, Logger, LoggerSink}; 6 | use memory::{Memory, MemoryMap, MemoryType}; 7 | use numeric; 8 | use pl011::PL011; 9 | 10 | 11 | fn mmap_from_devicetree(dt: &DeviceTree) -> MemoryMap { 12 | let mut mmap = MemoryMap::new(); 13 | let root = dt.follow("/").unwrap(); 14 | 15 | for (unitname, node) in root.children() { 16 | if unitname != "memory" && !unitname.starts_with("memory@") { 17 | continue; 18 | } 19 | let mut reg = node.property("reg").unwrap(); 20 | while reg.remains() > 0 { 21 | let addr = reg.consume_address(&root.address_space()).unwrap(); 22 | let size = reg.consume_size(&root.address_space()).unwrap(); 23 | 24 | mmap.add_memory(addr..addr + size, MemoryType::Regular).unwrap(); 25 | } 26 | } 27 | 28 | for reserved in dt.reserved_memory().iter().cloned() { 29 | mmap.reserve_memory(reserved, MemoryType::Regular).unwrap(); 30 | } 31 | for reserved in bootstrap::reserved_range_iter() { 32 | mmap.reserve_memory(reserved, MemoryType::Regular).unwrap(); 33 | } 34 | 35 | mmap 36 | } 37 | 38 | fn memory_from_devicetree(dt: &DeviceTree) -> Memory<'static> { 39 | let mmap = mmap_from_devicetree(dt); 40 | unsafe { Memory::new(&mmap) } 41 | } 42 | 43 | struct SerialSink { 44 | serial: PL011, 45 | } 46 | 47 | impl LoggerSink for SerialSink { 48 | fn log(&self, msg: &str) { 49 | self.serial.send(msg); 50 | self.serial.send("\n"); 51 | } 52 | } 53 | 54 | fn serial_logger() -> Logger<'static> { 55 | static mut SINK: Option = None; 56 | 57 | // For HiKey960 board that I have the following parameters were found to 58 | // work fine: 59 | // 60 | // let serial = PL011::new( 61 | // /* base_address = */0xfff32000, 62 | // /* base_clock = */19200000); 63 | let serial = PL011::new( 64 | /* base_address = */0x9000000, 65 | /* base_clock = */24000000); 66 | 67 | unsafe { 68 | assert!(SINK.is_none()); 69 | SINK = Some(SerialSink { serial }); 70 | Logger::new(SINK.as_ref().unwrap() as &dyn LoggerSink) 71 | } 72 | } 73 | 74 | pub fn from_devicetree() -> Kernel { 75 | let mut memory = { 76 | let dt = devicetree::fdt::parse(bootstrap::fdt()).unwrap(); 77 | memory_from_devicetree(&dt) 78 | }; 79 | let logger = serial_logger(); 80 | 81 | assert!(bootstrap::allocator_shutdown()); 82 | let heap = bootstrap::heap_range(); 83 | 84 | let mut addr = numeric::align_up(heap.start, memory.page_size()); 85 | let to = numeric::align_down(heap.end, memory.page_size()); 86 | while addr < to { 87 | memory.free_pages(addr); 88 | addr += memory.page_size(); 89 | } 90 | 91 | Kernel { memory, logger } 92 | } 93 | -------------------------------------------------------------------------------- /kernel/log/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "log" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "log" 9 | crate-type = ["rlib"] 10 | 11 | -------------------------------------------------------------------------------- /kernel/log/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | pub trait LoggerSink { 3 | fn log(&self, msg: &str); 4 | } 5 | 6 | pub struct Logger<'a> { 7 | sink: &'a dyn LoggerSink, 8 | } 9 | 10 | impl<'a> Logger<'a> { 11 | pub fn new(sink: &dyn LoggerSink) -> Logger { 12 | Logger { sink } 13 | } 14 | 15 | pub fn log(&self, msg: &str) { 16 | self.sink.log(msg) 17 | } 18 | } 19 | 20 | -------------------------------------------------------------------------------- /kernel/memory/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "memory" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "memory" 9 | crate-type = ["rlib"] 10 | 11 | [dependencies] 12 | intrusive = { path = "../intrusive" } 13 | numeric = { path = "../numeric" } 14 | sync = { path = "../sync" } 15 | -------------------------------------------------------------------------------- /kernel/memory/src/buddy.rs: -------------------------------------------------------------------------------- 1 | use core::cmp; 2 | use core::default::Default; 3 | use core::ops::Range; 4 | use crate::list::PageList; 5 | use crate::page::Page; 6 | 7 | pub const LEVELS: usize = 20; 8 | 9 | #[derive(Debug)] 10 | pub struct BuddySystem<'a> { 11 | free: [PageList<'a>; LEVELS], 12 | pages: &'a [Page], 13 | offset: u64, 14 | } 15 | 16 | impl<'a> BuddySystem<'a> { 17 | pub fn new(pages: &'a [Page], offset: u64) -> BuddySystem { 18 | BuddySystem { 19 | free: Default::default(), 20 | pages: pages, 21 | offset: offset, 22 | } 23 | } 24 | 25 | pub fn page_range(&self) -> Range { 26 | self.offset..self.offset + self.pages.len() as u64 27 | } 28 | 29 | unsafe fn page_index(&self, page: &Page) -> u64 { 30 | self.offset + (page as *const Page).offset_from( 31 | self.pages.as_ptr()) as u64 32 | } 33 | 34 | unsafe fn page_offset(&self, index: u64) -> usize { 35 | (index - self.offset) as usize 36 | } 37 | 38 | pub fn allocate_pages(&mut self, order: u64) -> Option { 39 | unsafe { 40 | let order = order as usize; 41 | for level in order..LEVELS { 42 | if let Some(page) = self.free[level].pop() { 43 | let index = self.page_index(page); 44 | self.split_and_return(page, index, order); 45 | (*page).set_busy(); 46 | (*page).set_level(order as u64); 47 | return Some(index); 48 | } 49 | } 50 | 51 | None 52 | } 53 | } 54 | 55 | unsafe fn split_and_return( 56 | &mut self, page: &Page, index: u64, order: usize) 57 | { 58 | let from = page.level() as usize; 59 | let to = order; 60 | 61 | for level in (to..from).rev() { 62 | let buddy = &self.pages[ 63 | self.page_offset(BuddySystem::buddy_index(index, level))]; 64 | 65 | buddy.set_level(level as u64); 66 | buddy.set_free(); 67 | self.free[level].push(buddy); 68 | } 69 | } 70 | 71 | pub fn free_pages(&mut self, index: u64) { 72 | unsafe { 73 | let page = &self.pages[self.page_offset(index)]; 74 | self.free_pages_at_level(index, page.level()); 75 | } 76 | } 77 | 78 | pub unsafe fn free_pages_at_level(&mut self, index: u64, level: u64) { 79 | let mut index = index; 80 | let mut level = level as usize; 81 | 82 | while level < LEVELS - 1 { 83 | let buddy = BuddySystem::buddy_index(index, level); 84 | 85 | if !self.page_range().contains(&buddy) { 86 | break; 87 | } 88 | 89 | let page = &self.pages[self.page_offset(buddy)]; 90 | if !page.is_free() { break; } 91 | if page.level() as usize != level { break; } 92 | 93 | self.free[level].remove(page); 94 | index = cmp::min(index, buddy); 95 | level += 1; 96 | } 97 | 98 | let page = &self.pages[self.page_offset(index)]; 99 | page.set_free(); 100 | page.set_level(level as u64); 101 | self.free[level].push(page); 102 | } 103 | 104 | fn buddy_index(index: u64, order: usize) -> u64 { 105 | index ^ (1 << order) 106 | } 107 | } 108 | 109 | #[cfg(test)] 110 | mod tests { 111 | use super::*; 112 | 113 | #[test] 114 | fn test_buddy_index() { 115 | assert_eq!(BuddySystem::buddy_index(0, 0), 1); 116 | assert_eq!(BuddySystem::buddy_index(1, 0), 0); 117 | assert_eq!(BuddySystem::buddy_index(2, 0), 3); 118 | assert_eq!(BuddySystem::buddy_index(3, 0), 2); 119 | assert_eq!(BuddySystem::buddy_index(4, 0), 5); 120 | assert_eq!(BuddySystem::buddy_index(5, 0), 4); 121 | assert_eq!(BuddySystem::buddy_index(6, 0), 7); 122 | assert_eq!(BuddySystem::buddy_index(7, 0), 6); 123 | assert_eq!(BuddySystem::buddy_index(0, 1), 2); 124 | assert_eq!(BuddySystem::buddy_index(2, 1), 0); 125 | assert_eq!(BuddySystem::buddy_index(4, 1), 6); 126 | assert_eq!(BuddySystem::buddy_index(6, 1), 4); 127 | assert_eq!(BuddySystem::buddy_index(0, 2), 4); 128 | assert_eq!(BuddySystem::buddy_index(4, 2), 0); 129 | } 130 | 131 | #[test] 132 | fn test_alloc_free() { 133 | let pages = [ 134 | Page::new(), Page::new(), Page::new(), Page::new(), 135 | Page::new(), Page::new(), Page::new(), Page::new() 136 | ]; 137 | let mut buddy = BuddySystem::new(&pages[..], 8); 138 | 139 | buddy.free_pages(9); 140 | buddy.free_pages(10); 141 | buddy.free_pages(11); 142 | assert_eq!(buddy.allocate_pages(2), None); 143 | buddy.free_pages(8); 144 | assert_eq!(buddy.allocate_pages(2), Some(8)); 145 | } 146 | 147 | #[test] 148 | fn test_alignment() { 149 | let pages = [ 150 | Page::new(), Page::new(), Page::new(), Page::new(), 151 | Page::new(), Page::new(), Page::new(), Page::new() 152 | ]; 153 | let mut buddy = BuddySystem::new(&pages[..], 8); 154 | 155 | pages[0].set_level(3); 156 | buddy.free_pages(8); 157 | 158 | let index1 = buddy.allocate_pages(1).unwrap(); 159 | assert_eq!(index1 & ((1 << 1) - 1), 0); 160 | 161 | let index2 = buddy.allocate_pages(2).unwrap(); 162 | assert_eq!(index2 & ((1 << 2) - 1), 0); 163 | 164 | buddy.free_pages(index1); 165 | buddy.free_pages(index2); 166 | let index = buddy.allocate_pages(3).unwrap(); 167 | assert_eq!(index & ((1 << 3) - 1), 0); 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /kernel/memory/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(test), no_std)] 2 | #![feature(core_intrinsics)] 3 | extern crate alloc; 4 | extern crate intrusive; 5 | 6 | mod buddy; 7 | mod list; 8 | mod memory_map; 9 | mod page; 10 | 11 | use buddy::BuddySystem; 12 | use core::cmp; 13 | use core::intrinsics; 14 | use core::mem; 15 | use core::ops::Range; 16 | use core::ptr; 17 | use core::slice; 18 | use numeric; 19 | use page::Page; 20 | use sync::placeholder::Mutex; 21 | 22 | pub use memory_map::MemoryMap; 23 | pub use memory_map::MemoryType; 24 | 25 | struct Zone<'a> { 26 | freelist: Mutex>, 27 | space: Range, 28 | } 29 | 30 | impl<'a> Zone<'a> { 31 | fn new(pages: &'a [Page], offset: u64) -> Zone<'a> { 32 | Zone { 33 | freelist: Mutex::new(BuddySystem::new(pages, offset)), 34 | space: offset..offset + pages.len() as u64, 35 | } 36 | } 37 | 38 | fn allocate_pages(&self, order: u64) -> Option { 39 | self.freelist.lock().allocate_pages(order) 40 | } 41 | 42 | fn free_pages(&self, page: u64) { 43 | self.freelist.lock().free_pages(page); 44 | } 45 | 46 | unsafe fn free_pages_at_level(&self, page: u64, level: u64) { 47 | self.freelist.lock().free_pages_at_level(page, level); 48 | } 49 | 50 | unsafe fn free_page_range(&self, range: &Range) { 51 | let end = range.end; 52 | let mut index = range.start; 53 | 54 | while index < range.end { 55 | let order = cmp::min( 56 | buddy::LEVELS as u32 - 1, 57 | cmp::min( 58 | numeric::ilog2(end - index), 59 | numeric::lsb(index) - 1)) as u64; 60 | self.free_pages_at_level(index, order); 61 | index += 1 << order; 62 | } 63 | } 64 | 65 | fn page_range(&self) -> Range { 66 | self.space.clone() 67 | } 68 | 69 | fn contains(&self, page: u64) -> bool { 70 | self.space.contains(&page) 71 | } 72 | } 73 | 74 | pub struct Memory<'a> { 75 | zones: &'a [Zone<'a>], 76 | page_size: u64, 77 | } 78 | 79 | impl<'a> Memory<'a> { 80 | pub fn allocate_pages(&mut self, order: u64) -> Option { 81 | for zone in self.zones { 82 | if let Some(index) = zone.allocate_pages(order) { 83 | return Some(self.page_address(index)); 84 | } 85 | } 86 | None 87 | } 88 | 89 | pub fn free_pages(&mut self, addr: u64) { 90 | assert_eq!(addr & (self.page_size - 1), 0); 91 | let page = self.address_page(addr); 92 | for zone in self.zones { 93 | if zone.contains(page) { 94 | zone.free_pages(page); 95 | return; 96 | } 97 | } 98 | 99 | panic!() 100 | } 101 | 102 | pub fn page_size(&self) -> u64 { 103 | self.page_size 104 | } 105 | 106 | pub fn page_address(&self, page: u64) -> u64 { 107 | self.page_size * page 108 | } 109 | 110 | pub fn address_page(&self, addr: u64) -> u64 { 111 | addr / self.page_size 112 | } 113 | } 114 | 115 | unsafe fn create_pages( 116 | zone: Range, 117 | page_size: u64, 118 | malloc: &mut MemoryMap) -> Option<&'static [Page]> 119 | { 120 | let pages = ((zone.end - zone.start) / page_size) as usize; 121 | let bytes = pages * mem::size_of::(); 122 | 123 | assert!((page_size & (page_size - 1)) == 0); 124 | assert!(mem::align_of::() <= page_size as usize); 125 | 126 | malloc.allocate_with_hint(zone, bytes as u64, page_size) 127 | .map(|addr| { 128 | let ptr = addr as *mut Page; 129 | 130 | intrinsics::assert_zero_valid::(); 131 | intrinsics::volatile_set_memory(ptr, 0, pages); 132 | slice::from_raw_parts(ptr as *const Page, pages) 133 | }) 134 | } 135 | 136 | unsafe fn create_zones( 137 | page_size: u64, 138 | mmap: &MemoryMap, 139 | malloc: &mut MemoryMap) -> Option<&'static mut [Zone<'static>]> 140 | { 141 | let zones = mmap.zones().len(); 142 | let bytes = zones * mem::size_of::(); 143 | 144 | assert!((page_size & (page_size - 1)) == 0); 145 | assert!(mem::align_of::() <= page_size as usize); 146 | 147 | malloc.allocate(bytes as u64, page_size) 148 | .map(|addr| { 149 | let ptr = addr as *mut Zone; 150 | for (index, zone) in mmap.zones().enumerate() { 151 | let start = numeric::align_up(zone.range.start, page_size); 152 | let end = numeric::align_down(zone.range.end, page_size); 153 | let pages = create_pages( 154 | start..end, page_size, malloc).unwrap(); 155 | 156 | ptr::write( 157 | ptr.offset(index as isize), 158 | Zone::new(pages, start / page_size)); 159 | } 160 | slice::from_raw_parts_mut(ptr, zones) 161 | }) 162 | } 163 | 164 | impl Memory<'static> { 165 | pub unsafe fn new(mmap: &MemoryMap) -> Memory<'static> { 166 | const PAGE_SIZE: u64 = 4096; 167 | 168 | let mut malloc = mmap.clone(); 169 | let zones = create_zones(PAGE_SIZE, mmap, &mut malloc).unwrap(); 170 | 171 | for zone in zones.iter() { 172 | let pages = zone.page_range(); 173 | let start = pages.start * PAGE_SIZE; 174 | let end = pages.end * PAGE_SIZE; 175 | 176 | for mem in malloc.free_memory_in_range(start..end) { 177 | let start = numeric::align_up( 178 | mem.range.start, PAGE_SIZE) / PAGE_SIZE; 179 | let end = numeric::align_down( 180 | mem.range.end, PAGE_SIZE) / PAGE_SIZE; 181 | zone.free_page_range(&(start..end)) 182 | } 183 | } 184 | 185 | Memory { zones, page_size: PAGE_SIZE } 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /kernel/memory/src/list.rs: -------------------------------------------------------------------------------- 1 | use core::default::Default; 2 | use core::marker::PhantomData; 3 | use core::option::Option; 4 | use crate::page::Page; 5 | use intrusive::IntrusiveList; 6 | 7 | #[derive(Debug)] 8 | pub struct PageList<'a> { 9 | list: IntrusiveList, 10 | _marker: PhantomData<&'a Page>, 11 | } 12 | 13 | impl<'a> PageList<'a> { 14 | pub fn new() -> PageList<'a> { 15 | let page = Page::new(); 16 | 17 | PageList { 18 | list: IntrusiveList::new(page.link_offset()), 19 | _marker: PhantomData, 20 | } 21 | } 22 | 23 | pub unsafe fn push(&mut self, page: &'a Page) { 24 | self.list.push(page as *const Page); 25 | } 26 | 27 | pub unsafe fn pop(&mut self) -> Option<&'a Page> { 28 | self.list.pop().map(|ptr| { &*ptr }) 29 | } 30 | 31 | pub unsafe fn remove(&mut self, page: &'a Page) { 32 | self.list.remove(page as *const Page); 33 | } 34 | } 35 | 36 | impl<'a> Default for PageList<'a> { 37 | fn default() -> PageList<'a> { 38 | PageList::new() 39 | } 40 | } 41 | 42 | #[cfg(test)] 43 | mod tests { 44 | use super::*; 45 | 46 | #[test] 47 | fn test_push_pop() { 48 | let pages = [Page::new(), Page::new(), Page::new()]; 49 | let mut l = PageList::new(); 50 | 51 | unsafe { 52 | assert!(l.pop().is_none()); 53 | l.push(&pages[0]); 54 | l.push(&pages[1]); 55 | l.push(&pages[2]); 56 | assert_eq!( 57 | l.pop().map(|x| x as *const Page), 58 | Some(&pages[2] as *const Page)); 59 | assert_eq!( 60 | l.pop().map(|x| x as *const Page), 61 | Some(&pages[1] as *const Page)); 62 | assert_eq!( 63 | l.pop().map(|x| x as *const Page), 64 | Some(&pages[0] as *const Page)); 65 | assert!(l.pop().is_none()); 66 | } 67 | } 68 | 69 | #[test] 70 | fn test_remove() { 71 | let pages = [Page::new(), Page::new(), Page::new()]; 72 | let mut l = PageList::new(); 73 | 74 | unsafe { 75 | l.push(&pages[0]); 76 | l.push(&pages[1]); 77 | l.push(&pages[2]); 78 | l.remove(&pages[0]); 79 | assert_eq!( 80 | l.pop().map(|x| x as *const Page), 81 | Some(&pages[2] as *const Page)); 82 | assert_eq!( 83 | l.pop().map(|x| x as *const Page), 84 | Some(&pages[1] as *const Page)); 85 | assert!(l.pop().is_none()); 86 | } 87 | unsafe { 88 | l.push(&pages[0]); 89 | l.push(&pages[1]); 90 | l.push(&pages[2]); 91 | l.remove(&pages[1]); 92 | assert_eq!( 93 | l.pop().map(|x| x as *const Page), 94 | Some(&pages[2] as *const Page)); 95 | assert_eq!( 96 | l.pop().map(|x| x as *const Page), 97 | Some(&pages[0] as *const Page)); 98 | assert!(l.pop().is_none()); 99 | } 100 | unsafe { 101 | l.push(&pages[0]); 102 | l.push(&pages[1]); 103 | l.push(&pages[2]); 104 | l.remove(&pages[2]); 105 | assert_eq!( 106 | l.pop().map(|x| x as *const Page), 107 | Some(&pages[1] as *const Page)); 108 | assert_eq!( 109 | l.pop().map(|x| x as *const Page), 110 | Some(&pages[0] as *const Page)); 111 | assert!(l.pop().is_none()); 112 | } 113 | unsafe { 114 | l.push(&pages[0]); 115 | l.remove(&pages[0]); 116 | assert!(l.pop().is_none()); 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /kernel/memory/src/page.rs: -------------------------------------------------------------------------------- 1 | use intrusive::{IntrusiveListLink, StructFieldOffset}; 2 | use core::cell::Cell; 3 | 4 | #[derive(Debug)] 5 | pub struct Page { 6 | pub link: IntrusiveListLink, 7 | state: Cell, 8 | } 9 | 10 | const LEVEL_MASK: u64 = 0x0ff; 11 | const FREE_MASK: u64 = 0x100; 12 | 13 | impl Page { 14 | pub fn new() -> Page { 15 | Page { 16 | link: IntrusiveListLink::new(), 17 | state: Cell::new(0), 18 | } 19 | } 20 | 21 | pub fn level(&self) -> u64 { 22 | self.state.get() & LEVEL_MASK 23 | } 24 | 25 | pub fn set_level(&self, level: u64) { 26 | assert_eq!(level & !LEVEL_MASK, 0); 27 | self.state.set(level | (self.state.get() & !LEVEL_MASK)); 28 | } 29 | 30 | pub fn is_free(&self) -> bool { 31 | (self.state.get() & FREE_MASK) != 0 32 | } 33 | 34 | pub fn set_free(&self) { 35 | self.state.set(self.state.get() | FREE_MASK); 36 | } 37 | 38 | pub fn set_busy(&self) { 39 | self.state.set(self.state.get() & !FREE_MASK); 40 | } 41 | 42 | pub fn link_offset(&self) -> StructFieldOffset { 43 | StructFieldOffset::new( 44 | self as *const Page, &self.link as *const IntrusiveListLink) 45 | } 46 | } 47 | 48 | #[cfg(test)] 49 | mod tests { 50 | use super::*; 51 | use core::intrinsics; 52 | 53 | #[test] 54 | fn test_page_new() { 55 | let page = Page::new(); 56 | assert_eq!(page.level(), 0); 57 | assert!(!page.is_free()); 58 | } 59 | 60 | #[test] 61 | fn test_state() { 62 | let page = Page::new(); 63 | 64 | page.set_level(0xff); 65 | page.set_free(); 66 | assert_eq!(page.level(), 0xff); 67 | assert!(page.is_free()); 68 | page.set_level(0); 69 | assert_eq!(page.level(), 0); 70 | assert!(page.is_free()); 71 | page.set_busy(); 72 | assert_eq!(page.level(), 0); 73 | assert!(!page.is_free()); 74 | } 75 | 76 | #[test] 77 | fn test_zero_initialization() { 78 | unsafe { intrinsics::assert_zero_valid::(); } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /kernel/numeric/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "numeric" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "numeric" 9 | crate-type = ["rlib"] 10 | -------------------------------------------------------------------------------- /kernel/numeric/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | pub fn lsb(mut x: u64) -> u32 { 4 | if x == 0 { 5 | return u32::MAX; 6 | } 7 | 8 | let mut r = 1; 9 | if (x & 0xffffffff) == 0 { 10 | x >>= 32; 11 | r += 32; 12 | } 13 | if (x & 0xffff) == 0 { 14 | x >>= 16; 15 | r += 16; 16 | } 17 | if (x & 0xff) == 0 { 18 | x >>= 8; 19 | r += 8; 20 | } 21 | if (x & 0xf) == 0 { 22 | x >>= 4; 23 | r += 4; 24 | } 25 | if (x & 0x3) == 0 { 26 | x >>= 2; 27 | r += 2; 28 | } 29 | if (x & 0x1) == 0 { 30 | r += 1; 31 | } 32 | r 33 | } 34 | 35 | pub fn msb(mut x: u64) -> u32 { 36 | if x == 0 { 37 | return 0; 38 | } 39 | 40 | let mut r = 64; 41 | if (x & 0xffffffff00000000) == 0 { 42 | x <<= 32; 43 | r -= 32; 44 | } 45 | if (x & 0xffff000000000000) == 0 { 46 | x <<= 16; 47 | r -= 16; 48 | } 49 | if (x & 0xff00000000000000) == 0 { 50 | x <<= 8; 51 | r -= 8; 52 | } 53 | if (x & 0xf000000000000000) == 0 { 54 | x <<= 4; 55 | r -= 4; 56 | } 57 | if (x & 0xc000000000000000) == 0 { 58 | x <<= 2; 59 | r -= 2; 60 | } 61 | if (x & 0x8000000000000000) == 0 { 62 | r -= 1; 63 | } 64 | r 65 | } 66 | 67 | pub fn ilog2(x: u64) -> u32 { 68 | assert_ne!(x, 0); 69 | msb(x) - 1 70 | } 71 | 72 | pub fn align_down(x: u64, align: u64) -> u64 { 73 | assert_eq!(align & (align - 1), 0); 74 | x & !(align - 1) 75 | } 76 | 77 | pub fn align_up(x: u64, align: u64) -> u64 { 78 | align_down(x + align - 1, align) 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | use super::*; 84 | 85 | #[test] 86 | fn test_align_down() { 87 | assert_eq!(align_down(0, 2), 0); 88 | assert_eq!(align_down(1, 2), 0); 89 | assert_eq!(align_down(2, 2), 2); 90 | assert_eq!(align_down(3, 2), 2); 91 | assert_eq!(align_down(4, 2), 4); 92 | } 93 | 94 | #[test] 95 | fn test_align_up() { 96 | assert_eq!(align_up(0, 2), 0); 97 | assert_eq!(align_up(1, 2), 2); 98 | assert_eq!(align_up(2, 2), 2); 99 | assert_eq!(align_up(3, 2), 4); 100 | assert_eq!(align_up(4, 2), 4); 101 | } 102 | 103 | #[test] 104 | fn test_msb() { 105 | assert_eq!(msb(0), 0); 106 | assert_eq!(msb(1), 1); 107 | assert_eq!(msb(2), 2); 108 | assert_eq!(msb(3), 2); 109 | assert_eq!(msb(4), 3); 110 | assert_eq!(msb(5), 3); 111 | assert_eq!(msb(6), 3); 112 | assert_eq!(msb(7), 3); 113 | assert_eq!(msb(8), 4); 114 | } 115 | 116 | #[test] 117 | fn test_lsb() { 118 | assert_eq!(lsb(0), u32::MAX); 119 | assert_eq!(lsb(1), 1); 120 | assert_eq!(lsb(2), 2); 121 | assert_eq!(lsb(3), 1); 122 | assert_eq!(lsb(4), 3); 123 | assert_eq!(lsb(5), 1); 124 | assert_eq!(lsb(6), 2); 125 | assert_eq!(lsb(7), 1); 126 | assert_eq!(lsb(8), 4); 127 | } 128 | 129 | #[test] 130 | fn test_ilog2() { 131 | assert_eq!(ilog2(1), 0); 132 | assert_eq!(ilog2(2), 1); 133 | assert_eq!(ilog2(3), 1); 134 | assert_eq!(ilog2(4), 2); 135 | assert_eq!(ilog2(5), 2); 136 | assert_eq!(ilog2(6), 2); 137 | assert_eq!(ilog2(7), 2); 138 | assert_eq!(ilog2(8), 3); 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /kernel/pl011/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pl011" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "pl011" 9 | crate-type = ["rlib"] 10 | -------------------------------------------------------------------------------- /kernel/pl011/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(test), no_std)] 2 | use core::ptr; 3 | 4 | #[derive(Copy, Clone)] 5 | pub struct PL011 { 6 | base_address: u64, 7 | base_clock: u32, 8 | baudrate: u32, 9 | data_bits: u32, 10 | stop_bits: u32, 11 | } 12 | 13 | #[derive(Copy, Clone)] 14 | enum Register { 15 | UARTDR = 0x000, 16 | UARTFR = 0x018, 17 | UARTIBRD = 0x024, 18 | UARTFBRD = 0x028, 19 | UARTLCR = 0x02c, 20 | UARTCR = 0x030, 21 | UARTIMSC = 0x038, 22 | UARTICR = 0x044, 23 | UARTDMACR = 0x048, 24 | } 25 | 26 | const UARTFR_BUSY: u32 = 1 << 3; 27 | const UARTLCR_STP2: u32 = 1 << 3; 28 | const UARTLCR_FEN: u32 = 1 << 4; 29 | const UARTCR_EN: u32 = 1 << 0; 30 | const UARTCR_TXEN: u32 = 1 << 8; 31 | 32 | 33 | impl PL011 { 34 | pub fn new(base_address: u64, base_clock: u32) -> Self { 35 | let dev = PL011 { 36 | base_address, 37 | base_clock, 38 | baudrate: 115200, 39 | data_bits: 8, 40 | stop_bits: 1, 41 | }; 42 | 43 | dev.wait_tx_complete(); 44 | dev.reset(); 45 | 46 | dev 47 | } 48 | 49 | pub fn reset(&self) { 50 | let cr = self.load(Register::UARTCR); 51 | let lcr = self.load(Register::UARTLCR); 52 | 53 | self.store(Register::UARTCR, cr & !UARTCR_EN); 54 | self.wait_tx_complete(); 55 | self.store(Register::UARTLCR, lcr & !UARTLCR_FEN); 56 | self.store(Register::UARTIMSC, 0x7ff); 57 | self.store(Register::UARTICR, 0x7ff); 58 | self.store(Register::UARTDMACR, 0x0); 59 | 60 | let (ibrd, fbrd) = self.calculate_divisors(); 61 | 62 | self.store(Register::UARTIBRD, ibrd); 63 | self.store(Register::UARTFBRD, fbrd); 64 | 65 | let mut lcr = ((self.data_bits - 1) & 0x3) << 5; 66 | if self.stop_bits == 2 { 67 | lcr |= UARTLCR_STP2; 68 | } 69 | self.store(Register::UARTLCR, lcr); 70 | 71 | self.store(Register::UARTCR, UARTCR_TXEN); 72 | self.store(Register::UARTCR, UARTCR_TXEN | UARTCR_EN); 73 | } 74 | 75 | pub fn send(&self, data: &str) { 76 | self.wait_tx_complete(); 77 | 78 | for b in data.bytes() { 79 | if b == b'\n' { 80 | self.store(Register::UARTDR, b'\r' as u32); 81 | self.wait_tx_complete(); 82 | } 83 | self.store(Register::UARTDR, b as u32); 84 | self.wait_tx_complete(); 85 | } 86 | } 87 | 88 | fn wait_tx_complete(&self) { 89 | loop { 90 | if (self.load(Register::UARTFR) & UARTFR_BUSY) == 0 { 91 | return; 92 | } 93 | } 94 | } 95 | 96 | fn load(&self, r: Register) -> u32 { 97 | let addr = self.base_address + (r as u64); 98 | 99 | unsafe { ptr::read_volatile(addr as *const u32) } 100 | } 101 | 102 | fn store(&self, r: Register, value: u32) { 103 | let addr = self.base_address + (r as u64); 104 | 105 | unsafe { ptr::write_volatile(addr as *mut u32, value); } 106 | } 107 | 108 | fn calculate_divisors(&self) -> (u32, u32) { 109 | let div = 110 | (8 * self.base_clock + self.baudrate) / (2 * self.baudrate); 111 | 112 | ((div >> 6) & 0xffff, div & 0x3f) 113 | } 114 | } 115 | 116 | #[cfg(test)] 117 | mod tests { 118 | use super::*; 119 | 120 | #[test] 121 | fn test_calculate_divisors() { 122 | let mut serial = PL011{ 123 | base_address: 0, 124 | base_clock: 4000000, 125 | baudrate: 0, 126 | data_bits: 0, 127 | stop_bits: 0 128 | }; 129 | 130 | serial.baudrate = 230400; 131 | assert_eq!(serial.calculate_divisors(), (0x1, 0x5)); 132 | serial.baudrate = 115200; 133 | assert_eq!(serial.calculate_divisors(), (0x2, 0xb)); 134 | serial.baudrate = 76800; 135 | assert_eq!(serial.calculate_divisors(), (0x3, 0x10)); 136 | serial.baudrate = 38400; 137 | assert_eq!(serial.calculate_divisors(), (0x6, 0x21)); 138 | serial.baudrate = 14400; 139 | assert_eq!(serial.calculate_divisors(), (0x11, 0x17)); 140 | serial.baudrate = 2400; 141 | assert_eq!(serial.calculate_divisors(), (0x68, 0xb)); 142 | serial.baudrate = 110; 143 | assert_eq!(serial.calculate_divisors(), (0x8e0, 0x2f)); 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /kernel/runtime/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "runtime" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "runtime" 9 | crate-type = ["rlib"] 10 | 11 | [dependencies] 12 | bootstrap = { path = "../bootstrap" } 13 | log = { path = "../log" } 14 | -------------------------------------------------------------------------------- /kernel/runtime/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![feature(alloc_error_handler)] 3 | extern crate alloc; 4 | 5 | use bootstrap; 6 | use core::alloc::{GlobalAlloc, Layout}; 7 | use core::panic::PanicInfo; 8 | 9 | 10 | #[panic_handler] 11 | fn panic(_info: &PanicInfo) -> ! { 12 | loop {} 13 | } 14 | 15 | struct CustomAllocator; 16 | 17 | unsafe impl GlobalAlloc for CustomAllocator { 18 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 19 | bootstrap::allocate_aligned(layout.size(), layout.align()) 20 | } 21 | 22 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 23 | bootstrap::free_aligned(ptr, layout.size(), layout.align()); 24 | } 25 | } 26 | 27 | #[alloc_error_handler] 28 | fn alloc_error_handler(_layout: Layout) -> ! { 29 | panic!("allocation failed!") 30 | } 31 | 32 | #[global_allocator] 33 | static A: CustomAllocator = CustomAllocator; 34 | -------------------------------------------------------------------------------- /kernel/sync/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sync" 3 | version = "0.1.0" 4 | authors = ["Mike Krinkin "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "sync" 9 | crate-type = ["rlib"] 10 | -------------------------------------------------------------------------------- /kernel/sync/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | pub mod placeholder; 3 | -------------------------------------------------------------------------------- /kernel/sync/src/placeholder.rs: -------------------------------------------------------------------------------- 1 | use core::cell::UnsafeCell; 2 | use core::marker::Sync; 3 | use core::ops::{Deref, DerefMut}; 4 | 5 | #[derive(Debug)] 6 | pub struct Mutex { 7 | data: UnsafeCell, 8 | } 9 | 10 | impl Mutex { 11 | pub const fn new(data: T) -> Mutex { 12 | Mutex { data: UnsafeCell::new(data) } 13 | } 14 | 15 | pub fn lock(&self) -> Lock { 16 | unsafe { Lock::new(self) } 17 | } 18 | } 19 | 20 | unsafe impl Sync for Mutex {} 21 | 22 | pub struct Lock<'a, T> { 23 | mutex: &'a Mutex, 24 | } 25 | 26 | impl<'a, T> Lock<'a, T> { 27 | unsafe fn new(mutex: &'a Mutex) -> Lock<'a, T> { 28 | Lock { mutex } 29 | } 30 | } 31 | 32 | impl<'a, T> Deref for Lock<'a, T> { 33 | type Target = T; 34 | 35 | fn deref(&self) -> &T { 36 | unsafe { &*self.mutex.data.get() } 37 | } 38 | } 39 | 40 | impl<'a, T> DerefMut for Lock<'a, T> { 41 | fn deref_mut(&mut self) -> &mut T { 42 | unsafe { &mut *self.mutex.data.get() } 43 | } 44 | } 45 | 46 | // There is nothing to do in Drop in a placeholder Lock because it's not a 47 | // real lock, but in a real one we'd have to actually drop the lock. 48 | impl<'a, T> Drop for Lock<'a, T> { 49 | fn drop(&mut self) {} 50 | } 51 | 52 | #[cfg(test)] 53 | mod tests { 54 | use super::*; 55 | 56 | struct Data { 57 | data: isize, 58 | } 59 | 60 | #[test] 61 | fn test_mutex() { 62 | let mutex = Mutex::new(Data { data: 0 }); 63 | { 64 | let lock = mutex.lock(); 65 | assert_eq!(lock.data, 0); 66 | } 67 | { 68 | let mut lock = mutex.lock(); 69 | lock.data = 42; 70 | } 71 | { 72 | let lock = mutex.lock(); 73 | assert_eq!(lock.data, 42); 74 | } 75 | { 76 | let mut lock = mutex.lock(); 77 | *lock = Data { data: 0 }; 78 | } 79 | { 80 | let lock = mutex.lock(); 81 | assert_eq!(lock.data, 0); 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /memory/Makefile: -------------------------------------------------------------------------------- 1 | CXX := clang++ 2 | AR := llvm-ar 3 | CXXFLAGS := \ 4 | -MMD -mno-red-zone -std=c++17 -ffreestanding -fno-threadsafe-statics \ 5 | -fno-exceptions -fno-rtti -Ofast -g -fPIE -target aarch64-unknown-none \ 6 | -Wall -Werror -Wframe-larger-than=1024 -pedantic -I.. -I../c -I../cc 7 | 8 | CXXSRCS := phys.cc memory.cc cache.cc alloc.cc 9 | CXXOBJS := $(CXXSRCS:.cc=.o) 10 | 11 | OBJS := $(CXXOBJS) 12 | 13 | default: all 14 | 15 | %.o: %.cc 16 | $(CXX) $(CXXFLAGS) -c $< -o $@ 17 | 18 | libmemory.a: $(OBJS) 19 | $(AR) rc $@ $^ 20 | 21 | -include $(CXXSRCS:.cc=.d) 22 | 23 | .PHONY: clean all default 24 | 25 | all: libmemory.a 26 | 27 | clean: 28 | rm -rf *.o *.d *.a 29 | -------------------------------------------------------------------------------- /memory/alloc.cc: -------------------------------------------------------------------------------- 1 | #include "alloc.h" 2 | 3 | #include 4 | 5 | #include "cache.h" 6 | #include "memory.h" 7 | #include "common/math.h" 8 | 9 | namespace memory { 10 | 11 | namespace { 12 | 13 | constexpr size_t kMaxAlignment = 32; 14 | 15 | struct Metadata { 16 | Cache* cache; 17 | Contigous mem; 18 | }; 19 | 20 | Cache caches[] = { 21 | Cache(128, 128), 22 | Cache(256, 256), 23 | Cache(384, 384), 24 | Cache(512, 512), 25 | Cache(640, 640), 26 | Cache(768, 768), 27 | Cache(896, 896), 28 | Cache(1024, 1024), 29 | Cache(1152, 1152), 30 | Cache(1280, 1280), 31 | Cache(1408, 1408), 32 | Cache(1536, 1536), 33 | Cache(1664, 1664), 34 | Cache(1792, 1792), 35 | Cache(1920, 1920), 36 | Cache(2048, 2048), 37 | Cache(2176, 2176), 38 | Cache(2304, 2304), 39 | Cache(2432, 2432), 40 | Cache(2560, 2560), 41 | Cache(2688, 2688), 42 | Cache(2816, 2816), 43 | Cache(2944, 2944), 44 | Cache(3072, 3072), 45 | Cache(3200, 3200), 46 | Cache(3328, 3328), 47 | Cache(3456, 3456), 48 | Cache(3584, 3584), 49 | Cache(3712, 3712), 50 | Cache(3840, 3840), 51 | Cache(3968, 3968), 52 | Cache(4096, 4096), 53 | }; 54 | 55 | Cache* CacheFor(size_t size) { 56 | const size_t index = size / 128; 57 | if (index >= sizeof(caches)/sizeof(caches[0])) { 58 | return nullptr; 59 | } 60 | return &caches[index]; 61 | } 62 | 63 | constexpr size_t MetadataSize() { 64 | return common::AlignUp(sizeof(Metadata), kMaxAlignment); 65 | } 66 | 67 | Metadata* MetadataFor(const void* ptr) { 68 | return reinterpret_cast( 69 | reinterpret_cast(ptr) - MetadataSize()); 70 | } 71 | 72 | } // namespace 73 | 74 | void* Allocate(size_t size) { 75 | const size_t allocation_size = size + MetadataSize(); 76 | 77 | Cache* cache = CacheFor(allocation_size); 78 | if (cache != nullptr) { 79 | void* ptr = cache->Allocate(); 80 | if (ptr == nullptr) { 81 | return nullptr; 82 | } 83 | 84 | Metadata* m = reinterpret_cast(ptr); 85 | m->cache = cache; 86 | m->mem = Contigous(nullptr); 87 | return reinterpret_cast( 88 | reinterpret_cast(ptr) + MetadataSize()); 89 | } 90 | 91 | auto mem = AllocatePhysical(allocation_size); 92 | if (mem) { 93 | Metadata* m = reinterpret_cast(mem->FromAddress()); 94 | m->cache = nullptr; 95 | m->mem = *mem; 96 | return reinterpret_cast(mem->FromAddress() + MetadataSize()); 97 | } 98 | 99 | return nullptr; 100 | } 101 | 102 | void* Reallocate(void* ptr, size_t new_size) { 103 | Metadata* m = MetadataFor(ptr); 104 | size_t old_size = 0; 105 | 106 | if (m->cache != nullptr) { 107 | Cache* cache = m->cache; 108 | if (cache->ObjectSize() >= new_size) { 109 | return ptr; 110 | } 111 | old_size = cache->ObjectSize() - MetadataSize(); 112 | } 113 | 114 | if (m->mem != Contigous(nullptr)) { 115 | if (m->mem.Size() >= new_size) { 116 | return ptr; 117 | } 118 | old_size = m->mem.Size() - MetadataSize(); 119 | } 120 | 121 | void* new_ptr = Allocate(new_size); 122 | if (new_ptr != nullptr) { 123 | memcpy(new_ptr, ptr, old_size); 124 | Free(ptr); 125 | return new_ptr; 126 | } 127 | return nullptr; 128 | } 129 | 130 | void Free(void* ptr) { 131 | Metadata* m = MetadataFor(ptr); 132 | 133 | if (m->cache != nullptr) { 134 | Cache* cache = m->cache; 135 | cache->Free(reinterpret_cast(m)); 136 | return; 137 | } 138 | 139 | FreePhysical(m->mem); 140 | } 141 | 142 | } // namespace memory 143 | -------------------------------------------------------------------------------- /memory/alloc.h: -------------------------------------------------------------------------------- 1 | #ifndef __MEMORY_ALLOC_H__ 2 | #define __MEMORY_ALLOC_H__ 3 | 4 | #include 5 | 6 | namespace memory { 7 | 8 | void* Allocate(size_t size); 9 | void* Reallocate(void* ptr, size_t new_size); 10 | void Free(void* ptr); 11 | 12 | } // namespace memory 13 | 14 | #endif // __MEMORY_ALLOC_H__ 15 | -------------------------------------------------------------------------------- /memory/arch.h: -------------------------------------------------------------------------------- 1 | #ifndef __MEMORY_ARCH_H__ 2 | #define __MEMORY_ARCH_H__ 3 | 4 | #include 5 | 6 | namespace memory { 7 | 8 | inline void SetMairEl2(uint64_t mair) { 9 | asm volatile("msr MAIR_EL2, %0" : : "r"(mair)); 10 | } 11 | 12 | inline uint64_t GetMairEl2() { 13 | uint64_t mair; 14 | asm volatile("mrs %0, MAIR_EL2" : "=r"(mair)); 15 | return mair; 16 | } 17 | 18 | inline void SetTtbar0El2(uintptr_t ttbr) { 19 | asm volatile("msr TTBR0_EL2, %0" : : "r"(ttbr)); 20 | } 21 | 22 | inline uintptr_t GetTtbar0El2() { 23 | uintptr_t ttbr; 24 | asm volatile("mrs %0, TTBR0_EL2" : "=r"(ttbr)); 25 | return ttbr; 26 | } 27 | 28 | } // namespace memory 29 | 30 | #endif // __MEMORY_ARCH_H__ 31 | -------------------------------------------------------------------------------- /memory/cache.cc: -------------------------------------------------------------------------------- 1 | #include "cache.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "common/math.h" 9 | 10 | namespace memory { 11 | 12 | namespace impl { 13 | 14 | namespace { 15 | 16 | size_t ObjectSize(size_t size, size_t alignment) { 17 | return common::AlignUp(std::max(size, sizeof(Storage)), alignment); 18 | } 19 | 20 | size_t SlabSize(size_t size, size_t control) { 21 | constexpr size_t kMinObjects = 32; 22 | constexpr size_t kMinSize = 4096; 23 | 24 | const size_t min_bytes = size * kMinObjects + control; 25 | 26 | if (kMinSize >= min_bytes) { 27 | return kMinSize; 28 | } 29 | 30 | const size_t order = common::MostSignificantBit(min_bytes - 1) + 1; 31 | return static_cast(1) << order; 32 | } 33 | 34 | Layout MakeLayout(size_t size, size_t alignment) { 35 | const size_t control_size = sizeof(Slab); 36 | const size_t object_size = ObjectSize(size, alignment); 37 | const size_t slab_size = SlabSize(object_size, control_size); 38 | 39 | Layout layout; 40 | layout.object_size = object_size; 41 | layout.object_offset = 0; 42 | layout.objects = (slab_size - control_size) / object_size; 43 | layout.control_offset = slab_size - control_size; 44 | layout.slab_size = slab_size; 45 | return layout; 46 | } 47 | 48 | [[ noreturn ]] void Panic() { 49 | while (1) { 50 | asm volatile("":::"memory"); 51 | } 52 | } 53 | 54 | } // namespace 55 | 56 | 57 | Storage::Storage(void* ptr) : pointer(ptr) {} 58 | 59 | Slab::Slab(const Cache* cache, Contigous mem, Layout layout) 60 | : cache_(cache), memory_(mem) { 61 | const uintptr_t from = memory_.FromAddress() + layout.object_offset; 62 | const uintptr_t to = from + layout.object_size * layout.objects; 63 | 64 | for (uintptr_t addr = from; addr < to; addr += layout.object_size) { 65 | void* ptr = reinterpret_cast(addr); 66 | Storage* storage = reinterpret_cast(ptr); 67 | ::new (storage) Storage(ptr); 68 | freelist_.PushBack(storage); 69 | } 70 | } 71 | 72 | Slab::~Slab() { 73 | if (Allocated() != 0) { 74 | Panic(); 75 | } 76 | cache_ = nullptr; 77 | } 78 | 79 | const Cache* Slab::Owner() const { return cache_; } 80 | 81 | Contigous Slab::Memory() const { return memory_; } 82 | 83 | size_t Slab::Allocated() const { return allocated_; } 84 | 85 | bool Slab::Empty() const { return freelist_.Empty(); } 86 | 87 | void* Slab::Allocate() { 88 | Storage* storage = freelist_.PopFront(); 89 | if (storage == nullptr) { 90 | return nullptr; 91 | } 92 | 93 | void* ptr = storage->pointer; 94 | storage->~Storage(); 95 | ++allocated_; 96 | return ptr; 97 | } 98 | 99 | bool Slab::Free(void* ptr) { 100 | const uintptr_t addr = reinterpret_cast(ptr); 101 | if (addr < memory_.FromAddress() || addr >= memory_.ToAddress()) { 102 | return false; 103 | } 104 | Storage* storage = reinterpret_cast(ptr); 105 | ::new (storage) Storage(ptr); 106 | freelist_.PushFront(storage); 107 | --allocated_; 108 | return true; 109 | } 110 | 111 | 112 | Allocator::Allocator(struct Layout layout) : allocated_(0), layout_(layout) {} 113 | 114 | Slab* Allocator::Allocate(const Cache* cache) { 115 | auto mem = AllocatePhysical(layout_.slab_size); 116 | if (!mem) { 117 | return nullptr; 118 | } 119 | Slab* slab = reinterpret_cast( 120 | mem->FromAddress() + layout_.control_offset); 121 | ::new (slab) Slab(cache, *mem, layout_); 122 | allocated_ += layout_.slab_size; 123 | return slab; 124 | } 125 | 126 | void Allocator::Free(Slab* slab) { 127 | Contigous mem = slab->Memory(); 128 | slab->~Slab(); 129 | allocated_ -= layout_.slab_size; 130 | FreePhysical(mem); 131 | } 132 | 133 | Slab* Allocator::Find(void* ptr) { 134 | const uintptr_t addr = reinterpret_cast(ptr); 135 | const uintptr_t head = common::AlignDown( 136 | addr, static_cast(layout_.slab_size)); 137 | Slab* slab = reinterpret_cast(head + layout_.control_offset); 138 | Contigous memory = slab->Memory(); 139 | if (addr < memory.FromAddress() || addr >= memory.ToAddress()) { 140 | return nullptr; 141 | } 142 | return slab; 143 | } 144 | 145 | uintptr_t Allocator::Allocated() const { return allocated_; } 146 | 147 | Layout Allocator::Layout() const { return layout_; } 148 | 149 | } // namespace impl 150 | 151 | 152 | Cache::Cache(size_t size, size_t alignment) 153 | : layout_(impl::MakeLayout(size, alignment)) 154 | , allocator_(layout_) 155 | {} 156 | 157 | Cache::~Cache() { 158 | if (!partial_.Empty() || !full_.Empty()) { 159 | impl::Panic(); 160 | } 161 | Reclaim(); 162 | } 163 | 164 | size_t Cache::Allocated() const { return allocated_; } 165 | 166 | size_t Cache::Occupied() const { return allocator_.Allocated(); } 167 | 168 | size_t Cache::Reclaimable() const { return reclaimable_; } 169 | 170 | size_t Cache::ObjectSize() const { return layout_.object_size; } 171 | 172 | bool Cache::Reclaim() { 173 | bool ret = Reclaimable() != 0; 174 | for (impl::Slab* slab = free_.PopFront(); 175 | slab != nullptr; 176 | slab = free_.PopFront()) { 177 | allocator_.Free(slab); 178 | } 179 | reclaimable_ = 0; 180 | return ret; 181 | } 182 | 183 | void* Cache::Allocate() { 184 | if (!partial_.Empty()) { 185 | impl::Slab* slab = partial_.Front(); 186 | if (slab->Allocated() + 1 == layout_.objects) { 187 | partial_.Unlink(slab); 188 | full_.PushFront(slab); 189 | } 190 | allocated_ += layout_.object_size; 191 | return slab->Allocate(); 192 | } 193 | 194 | if (!free_.Empty()) { 195 | impl::Slab* slab = free_.PopFront(); 196 | partial_.PushFront(slab); 197 | allocated_ += layout_.object_size; 198 | reclaimable_ -= layout_.slab_size; 199 | return slab->Allocate(); 200 | } 201 | 202 | impl::Slab* slab = allocator_.Allocate(this); 203 | if (slab == nullptr) { 204 | return nullptr; 205 | } 206 | partial_.PushFront(slab); 207 | allocated_ += layout_.object_size; 208 | return slab->Allocate(); 209 | } 210 | 211 | bool Cache::Free(void* ptr) { 212 | if (ptr == nullptr) { 213 | return false; 214 | } 215 | 216 | impl::Slab* slab = allocator_.Find(ptr); 217 | if (slab == nullptr) { 218 | return false; 219 | } 220 | 221 | if (slab->Owner() != this) { 222 | impl::Panic(); 223 | } 224 | 225 | if (slab->Allocated() == 0) { 226 | return false; 227 | } 228 | 229 | if (!slab->Free(ptr)) { 230 | return false; 231 | } 232 | 233 | if (slab->Allocated() == 0) { 234 | partial_.Unlink(slab); 235 | free_.PushFront(slab); 236 | reclaimable_ += layout_.slab_size; 237 | } 238 | 239 | if (slab->Allocated() + 1 == layout_.objects) { 240 | full_.Unlink(slab); 241 | partial_.PushFront(slab); 242 | } 243 | 244 | allocated_ -= layout_.object_size; 245 | return true; 246 | } 247 | 248 | } // namespace memory 249 | -------------------------------------------------------------------------------- /memory/cache.h: -------------------------------------------------------------------------------- 1 | #ifndef __MEMORY_CACHE_H__ 2 | #define __MEMORY_CACHE_H__ 3 | 4 | #include 5 | 6 | #include "common/intrusive_list.h" 7 | #include "memory.h" 8 | 9 | 10 | namespace memory { 11 | 12 | class Cache; 13 | 14 | namespace impl { 15 | 16 | struct Layout { 17 | size_t object_size; 18 | size_t object_offset; 19 | size_t objects; 20 | size_t control_offset; 21 | size_t slab_size; 22 | }; 23 | 24 | 25 | struct Storage : public common::ListNode { 26 | void* pointer; 27 | 28 | Storage(void* ptr); 29 | }; 30 | 31 | 32 | class Slab : public common::ListNode { 33 | public: 34 | Slab(const Cache* cache, Contigous mem, Layout layout); 35 | ~Slab(); 36 | 37 | Slab(const Slab& other) = delete; 38 | Slab& operator=(const Slab& other) = delete; 39 | Slab(Slab&& other) = delete; 40 | Slab& operator=(Slab&& other) = delete; 41 | 42 | const Cache* Owner() const; 43 | Contigous Memory() const; 44 | size_t Allocated() const; 45 | bool Empty() const; 46 | void* Allocate(); 47 | bool Free(void* ptr); 48 | 49 | private: 50 | common::IntrusiveList freelist_; 51 | size_t allocated_ = 0; 52 | const Cache* cache_; 53 | Contigous memory_; 54 | }; 55 | 56 | class Allocator { 57 | public: 58 | Allocator(Layout layout); 59 | 60 | Allocator(const Allocator&) = delete; 61 | Allocator& operator=(const Allocator&) = delete; 62 | Allocator(Allocator&&) = default; 63 | Allocator& operator=(Allocator&&) = default; 64 | 65 | Slab* Allocate(const Cache* cache); 66 | void Free(Slab* slab); 67 | Slab* Find(void* ptr); 68 | 69 | uintptr_t Allocated() const; 70 | struct Layout Layout() const; 71 | 72 | private: 73 | uintptr_t allocated_; 74 | struct Layout layout_; 75 | }; 76 | 77 | } // namespace impl 78 | 79 | 80 | class Cache { 81 | public: 82 | Cache(size_t size, size_t alignment); 83 | ~Cache(); 84 | 85 | Cache(const Cache& other) = delete; 86 | Cache& operator=(const Cache& other) = delete; 87 | Cache(Cache&& other) = delete; 88 | Cache& operator=(Cache&& other) = delete; 89 | 90 | size_t Allocated() const; 91 | size_t Occupied() const; 92 | size_t Reclaimable() const; 93 | size_t ObjectSize() const; 94 | 95 | bool Reclaim(); 96 | void* Allocate(); 97 | bool Free(void* ptr); 98 | 99 | private: 100 | impl::Layout layout_; 101 | impl::Allocator allocator_; 102 | common::IntrusiveList free_; 103 | common::IntrusiveList partial_; 104 | common::IntrusiveList full_; 105 | uintptr_t allocated_ = 0; 106 | uintptr_t reclaimable_ = 0; 107 | }; 108 | 109 | } // namespace memory 110 | 111 | #endif // __MEMORY_SLAB_H__ 112 | -------------------------------------------------------------------------------- /memory/memory.h: -------------------------------------------------------------------------------- 1 | #ifndef __MEMORY_H__ 2 | #define __MEMORY_H__ 3 | 4 | #include 5 | #include 6 | 7 | #include "common/intrusive_list.h" 8 | #include "phys.h" 9 | 10 | 11 | namespace memory { 12 | 13 | constexpr size_t kMaxOrder = 20; 14 | constexpr size_t kPageBits = 12; 15 | constexpr size_t kPageSize = (1 << kPageBits); 16 | 17 | 18 | struct Page : public common::ListNode { 19 | uint64_t flags; 20 | size_t order; 21 | }; 22 | 23 | 24 | class Zone { 25 | public: 26 | Zone(Page* page, size_t pages, uintptr_t from, uintptr_t to); 27 | 28 | Zone(const Zone&) = delete; 29 | Zone& operator=(const Zone&) = delete; 30 | Zone(Zone&&) = delete; 31 | Zone& operator=(Zone&&) = delete; 32 | 33 | Page* AllocatePages(size_t order); 34 | void FreePages(Page* pages); 35 | void FreePages(uintptr_t addr); 36 | void FreePages(uintptr_t addr, size_t order); 37 | 38 | size_t Offset() const; 39 | size_t PageOffset(const Page* page) const; 40 | uintptr_t PageAddress(const Page* page) const; 41 | size_t Pages() const; 42 | size_t Available() const; 43 | uintptr_t FromAddress() const; 44 | uintptr_t ToAddress() const; 45 | 46 | 47 | private: 48 | Page* Split(Page* page, size_t from, size_t to); 49 | void Unite(Page* page, size_t from); 50 | 51 | Page* page_; 52 | size_t pages_; 53 | size_t available_; 54 | uintptr_t from_; 55 | uintptr_t to_; 56 | common::IntrusiveList free_[kMaxOrder + 1]; 57 | }; 58 | 59 | 60 | class Contigous { 61 | public: 62 | Contigous(); 63 | Contigous(nullptr_t); 64 | Contigous(Zone* zone, Page* pages, size_t order); 65 | 66 | Contigous(const Contigous& other) = default; 67 | Contigous& operator=(const Contigous& other) = default; 68 | Contigous(Contigous&& other) = default; 69 | Contigous& operator=(Contigous&& other) = default; 70 | 71 | class Zone* Zone(); 72 | const class Zone* Zone() const; 73 | Page* Pages(); 74 | const Page* Pages() const; 75 | size_t Order() const; 76 | 77 | uintptr_t FromAddress() const; 78 | uintptr_t ToAddress() const; 79 | size_t Size() const; 80 | 81 | private: 82 | class Zone* zone_; 83 | struct Page* pages_; 84 | size_t order_; 85 | }; 86 | 87 | bool operator==(const Contigous& l, const Contigous& r); 88 | bool operator!=(const Contigous& l, const Contigous& r); 89 | 90 | 91 | bool SetupAllocator(MemoryMap* map); 92 | 93 | std::optional AllocatePhysical(size_t size); 94 | void FreePhysical(Contigous mem); 95 | void FreePhysical(uintptr_t addr); 96 | 97 | size_t TotalPhysical(); 98 | size_t AvailablePhysical(); 99 | 100 | } // namespace memory 101 | 102 | #endif // __MEMORY_H__ 103 | -------------------------------------------------------------------------------- /memory/phys.cc: -------------------------------------------------------------------------------- 1 | #include "phys.h" 2 | 3 | #include 4 | 5 | #include "common/math.h" 6 | 7 | 8 | namespace memory { 9 | 10 | namespace { 11 | 12 | bool CanMerge(const MemoryRange& prev, const MemoryRange& next) { 13 | return next.begin == prev.end && next.status == prev.status; 14 | } 15 | 16 | bool IsEmpty(const MemoryRange& range) { 17 | return range.begin == range.end; 18 | } 19 | 20 | MemoryRange* Compact(MemoryRange* begin, MemoryRange* end) { 21 | if (begin == end) { 22 | return end; 23 | } 24 | 25 | MemoryRange* pos = begin; 26 | for (MemoryRange* it = pos + 1; it != end; ++it) { 27 | if (CanMerge(*pos, *it)) { 28 | pos->end = it->end; 29 | continue; 30 | } 31 | 32 | if (IsEmpty(*pos)) { 33 | *pos = *it; 34 | } else { 35 | ++pos; 36 | *pos = *it; 37 | } 38 | } 39 | 40 | if (IsEmpty(*pos)) { 41 | return pos; 42 | } 43 | return pos + 1; 44 | } 45 | 46 | } // namespace 47 | 48 | 49 | bool MemoryMap::SetStatus(uintptr_t begin, uintptr_t end, MemoryStatus status) { 50 | MemoryRange range; 51 | range.begin = begin; 52 | range.end = end; 53 | range.status = status; 54 | 55 | auto less = [](const MemoryRange& l, const MemoryRange& r) -> bool { 56 | return l.end <= r.begin; 57 | }; 58 | 59 | MemoryRange *from = std::lower_bound( 60 | ranges_.Begin(), ranges_.End(), range, less); 61 | MemoryRange *to = std::upper_bound( 62 | ranges_.Begin(), ranges_.End(), range, less); 63 | 64 | if (from == to) { 65 | return true; 66 | } 67 | 68 | if (from->begin < range.begin) { 69 | MemoryRange head; 70 | head.begin = from->begin; 71 | head.end = range.begin; 72 | head.status = from->status; 73 | 74 | from->begin = head.end; 75 | if (!ranges_.Insert(from, head)) { 76 | return false; 77 | } 78 | 79 | ++from; 80 | ++to; 81 | } 82 | 83 | if ((to - 1)->end > range.end) { 84 | MemoryRange tail; 85 | tail.begin = range.end; 86 | tail.end = (to - 1)->end; 87 | tail.status = (to - 1)->status; 88 | 89 | (to - 1)->end = tail.begin; 90 | if (!ranges_.Insert(to, tail)) { 91 | return false; 92 | } 93 | } 94 | 95 | for (MemoryRange *it = from; it != to; ++to) { 96 | it->status = status; 97 | } 98 | 99 | ranges_.Erase( 100 | Compact(ranges_.Begin(), ranges_.End()), 101 | ranges_.End()); 102 | return true; 103 | } 104 | 105 | bool MemoryMap::Register(uintptr_t begin, uintptr_t end, MemoryStatus status) { 106 | MemoryRange range; 107 | range.begin = begin; 108 | range.end = end; 109 | range.status = status; 110 | 111 | auto less = [](const MemoryRange& l, const MemoryRange& r) -> bool { 112 | return l.end <= r.begin; 113 | }; 114 | 115 | auto from = std::lower_bound( 116 | ranges_.Begin(), ranges_.End(), range, less); 117 | auto to = std::upper_bound( 118 | ranges_.Begin(), ranges_.End(), range, less); 119 | 120 | if (from == to) { 121 | return ranges_.Insert(from, range); 122 | } 123 | 124 | for (auto it = from; it != to; ++it) { 125 | if (it->status != range.status) { 126 | return false; 127 | } 128 | } 129 | 130 | range.begin = std::min(range.begin, from->begin); 131 | range.end = std::max(range.end, (to - 1)->end); 132 | 133 | ranges_.Insert(ranges_.Erase(from, to), range); 134 | ranges_.Erase( 135 | Compact(ranges_.Begin(), ranges_.End()), 136 | ranges_.End()); 137 | return true; 138 | } 139 | 140 | bool MemoryMap::Reserve(uintptr_t begin, uintptr_t end) { 141 | return SetStatus(begin, end, MemoryStatus::RESERVED); 142 | } 143 | 144 | bool MemoryMap::Release(uintptr_t begin, uintptr_t end) { 145 | return SetStatus(begin, end, MemoryStatus::FREE); 146 | } 147 | 148 | bool MemoryMap::FindIn( 149 | uintptr_t begin, uintptr_t end, 150 | size_t size, size_t alignment, MemoryStatus status, 151 | uintptr_t* ret) { 152 | MemoryRange range; 153 | range.begin = begin; 154 | range.end = end; 155 | range.status = status; 156 | 157 | auto less = [](const MemoryRange& l, const MemoryRange& r) -> bool { 158 | return l.end <= r.begin; 159 | }; 160 | 161 | MemoryRange *from = std::lower_bound( 162 | ranges_.Begin(), ranges_.End(), range, less); 163 | MemoryRange *to = std::upper_bound( 164 | ranges_.Begin(), ranges_.End(), range, less); 165 | 166 | for (MemoryRange *it = from; it != to; ++it) { 167 | MemoryRange r = *it; 168 | 169 | if (r.status != status) { 170 | continue; 171 | } 172 | 173 | r.begin = common::Clamp(r.begin, begin, end); 174 | r.end = common::Clamp(r.end, begin, end); 175 | 176 | const uintptr_t addr = common::AlignUp( 177 | r.begin, static_cast(alignment)); 178 | if (addr + size <= r.end) { 179 | *ret = addr; 180 | return true; 181 | } 182 | } 183 | return false; 184 | } 185 | 186 | bool MemoryMap::AllocateIn( 187 | uintptr_t begin, uintptr_t end, 188 | size_t size, size_t alignment, 189 | uintptr_t* ret) 190 | { 191 | uintptr_t addr; 192 | 193 | if (!FindIn(begin, end, size, alignment, MemoryStatus::FREE, &addr)) { 194 | return false; 195 | } 196 | 197 | if (!Reserve(addr, addr + size)) { 198 | return false; 199 | } 200 | 201 | *ret = addr; 202 | return true; 203 | } 204 | 205 | bool MemoryMap::Allocate(size_t size, size_t alignment, uintptr_t* ret) { 206 | return AllocateIn(0, ~static_cast(0), size, alignment, ret); 207 | } 208 | 209 | } // namespace memory 210 | -------------------------------------------------------------------------------- /memory/phys.h: -------------------------------------------------------------------------------- 1 | #ifndef __MEMORY_PHYS_H__ 2 | #define __MEMORY_PHYS_H__ 3 | 4 | #include 5 | #include 6 | 7 | #include "common/fixed_vector.h" 8 | 9 | 10 | namespace memory { 11 | 12 | enum class MemoryStatus { 13 | RESERVED, 14 | FREE, 15 | }; 16 | 17 | struct MemoryRange { 18 | uintptr_t begin; 19 | uintptr_t end; 20 | MemoryStatus status; 21 | }; 22 | 23 | class MemoryMap { 24 | public: 25 | MemoryMap() {} 26 | 27 | MemoryMap(const MemoryMap& other) = default; 28 | MemoryMap(MemoryMap&& other) = default; 29 | MemoryMap& operator=(const MemoryMap& other) = default; 30 | MemoryMap& operator=(MemoryMap&& other) = default; 31 | 32 | bool Register(uintptr_t begin, uintptr_t end, MemoryStatus status); 33 | bool Reserve(uintptr_t begin, uintptr_t end); 34 | bool Release(uintptr_t begin, uintptr_t end); 35 | bool AllocateIn( 36 | uintptr_t begin, uintptr_t end, 37 | size_t size, size_t alignment, 38 | uintptr_t *ret); 39 | bool Allocate(size_t size, size_t alignment, uintptr_t* ret); 40 | 41 | const MemoryRange* ConstBegin() const { return ranges_.ConstBegin(); } 42 | const MemoryRange* ConstEnd() const { return ranges_.ConstEnd(); } 43 | 44 | private: 45 | bool SetStatus(uintptr_t begin, uintptr_t end, MemoryStatus status); 46 | bool FindIn( 47 | uintptr_t begin, uintptr_t end, 48 | size_t size, size_t alignment, MemoryStatus status, 49 | uintptr_t* ret); 50 | 51 | common::FixedVector ranges_; 52 | }; 53 | 54 | } // namespace memory 55 | 56 | #endif // __MEMORY_PHYS_H__ 57 | -------------------------------------------------------------------------------- /memory/space.cc: -------------------------------------------------------------------------------- 1 | #include "space.h" 2 | 3 | namespace memory { 4 | 5 | namespace impl { 6 | 7 | PageTable::PageTable(uintptr_t address) 8 | : descriptors_(reinterpret_cast(address)) 9 | {} 10 | 11 | uintptr_t PageTable::Address() const { 12 | return reinterpret_cast(descriptors_); 13 | } 14 | 15 | bool PageTable::IsClear(size_t entry) const { 16 | return (descriptors_[entry] & kPresent) == 0; 17 | } 18 | 19 | bool PageTable::IsTable(size_t entry) const { 20 | const uint64_t mask = kPresent | kTable; 21 | return (descriptors_[entry] & mask) == mask; 22 | } 23 | 24 | bool PageTable::IsMemory(size_t entry) const { 25 | const uint64_t mask = kPresent | kTable; 26 | return (descriptors_[entry] & mask) == kPresent; 27 | } 28 | 29 | void PageTable::Clear(size_t entry) { 30 | descriptors_[entry] = 0; 31 | } 32 | 33 | void PageTable::SetTable(size_t entry, const PageTable& table) { 34 | descriptors_[entry] = table.Address() | kPresent | kTable; 35 | } 36 | 37 | void PageTable::SetMemory(size_t entry, const Memory& memory) { 38 | descriptors_[entry] = memory.addr | memory.attr | kPresent; 39 | } 40 | 41 | Memory PageTable::GetMemory(size_t entry) const { 42 | addr = descriptors_[entry] & kAddressMask; 43 | attr = descriptors_[entry] & kAttributesMask; 44 | return Memory{ 45 | .addr = addr, 46 | .attr = attr, 47 | }; 48 | } 49 | 50 | PageTable PageTable::GetTable(size_t entry) const { 51 | return PageTable(descriptors_[entry] & kAddressMask); 52 | } 53 | 54 | } // namespace impl 55 | 56 | bool SetupMapping() { 57 | // AArch64 System Register Descriptions, D13.2 General system control 58 | // registers, D13.2.85 MAIR_EL2, Memory Attribute Indirection Register (EL2) 59 | // for the encoding of the MAIR_EL2. 60 | constexpr uint64_t kNormalOuterWriteBackNonTransient = 0xc0; 61 | constexpr uint64_t kOuterReadAllocate = 0x20; 62 | constexpr uint64_t kOuterWriteAllocate = 0x10; 63 | 64 | constexpr uint64_t kNormalInnerWriteBackNonTransient = 0xc; 65 | constexpr uint64_t kInnerReadAllocate = 0x2; 66 | constexpr uint64_t kInnerWriteAllocate = 0x1; 67 | 68 | constexpr uint64_t mair = 69 | kNormalOuterWriteBackNonTransient | 70 | kOuterReadAllocate | kOuterWriteAllocate | 71 | kNormalInnerWriteBackNonTransient | 72 | kInnerReadAllocate | kInnerWriteAllocate; 73 | 74 | SetMairEl2(mair); 75 | return true; 76 | } 77 | 78 | } // namespace memory 79 | -------------------------------------------------------------------------------- /memory/space.h: -------------------------------------------------------------------------------- 1 | #ifndef __MEMORY_SPACE_H__ 2 | #define __MEMORY_SPACE_H__ 3 | 4 | #include 5 | #include 6 | 7 | 8 | namespace memory { 9 | 10 | namespace impl { 11 | 12 | struct Memory { 13 | uintptr_t addr; 14 | uint64_t attr; 15 | }; 16 | 17 | class PageTable { 18 | public: 19 | static constexpr size_t kPageTableSize = 512; 20 | static constexpr uint64_t kPresent = 1ull << 0; 21 | static constexpr uint64_t kTable = 1ull << 1; 22 | 23 | // These flags need to match MAIR_EL2 register configuration. 24 | // I only have normal memory configured in MAIR_EL2 for now (write-back 25 | // caching policy, non-transient allocation on reads and writes for both 26 | // outer and inner domains). 27 | static constexpr uint64_t kNormalMemory = 1ull << 2; 28 | static constexpr uint64_t kMemoryAttributeMask = kNormalMemory; 29 | 30 | static constexpr uint64_t kPrivileged = 1ull << 6; 31 | static constexpr uint64_t kWritable = 1ull << 7; 32 | static constexpr uint64_t kExecuteNever = 1ull << 54; 33 | static constexpr uint64_t kAccessMask = kPrivileged | kWritable | kExecuteNever; 34 | 35 | static constexpr uint64_t kAttributesMask = kAccessMask | kMemoryAttributeMask; 36 | static constexpr uint64_t ((1ull << 48) - 1) & ~((1ull << 12) - 1); 37 | 38 | PageTable(uintptr_t address); 39 | 40 | PageTable(const PageTable& other) = delete; 41 | PageTable& operator=(const PageTable& other) = delete; 42 | 43 | PageTable(PageTable&& other) = default; 44 | PageTable& operator=(PageTable&& other) = default; 45 | 46 | uintptr_t Address() const; 47 | 48 | bool IsClear(size_t entry) const; 49 | bool IsTable(size_t entry) const; 50 | bool IsMemory(size_t entry) const; 51 | 52 | void Clear(size_t entry); 53 | void SetTable(size_t entry, const PageTable& table); 54 | void SetMemory(size_t entry, const Memory& memory); 55 | 56 | Memory GetMemory(size_t entry) const; 57 | PageTable GetTable(size_t entry) const; 58 | 59 | private: 60 | uint64_t *descriptors_; 61 | }; 62 | 63 | } // namespace impl 64 | 65 | 66 | struct MemoryRange { 67 | uintptr_t from; 68 | uintptr_t to; 69 | 70 | bool Overlap(const MemoryRange& other) const; 71 | bool Touch(const MemoryRange& other) const; 72 | bool Before(const MemoryRange& other) const; 73 | bool After(const MemoryRange& other) const; 74 | }; 75 | 76 | class Mapping { 77 | public: 78 | Mapping(const MemoryRange& range, uint64_t attrs); 79 | virtual ~Mapping() {} 80 | 81 | Mapping(const Mapping&) = delete; 82 | Mapping& operator=(const Mapping&) = delete; 83 | Mapping(Mapping&&) = delete; 84 | Mapping& operator=(Mapping&&) = delete; 85 | 86 | MemoryRange AddressRange() const; 87 | uint64_t Attributes() const; 88 | 89 | virtual bool Map(uintptr_t from, uintptr_t to, PageTable* root) = 0; 90 | virtual void Unmap(uintptr_t from, uintptr_t to, PageTable* root) = 0; 91 | virtual bool Fault(uintptr_t addr, PageTable* root) = 0; 92 | 93 | private: 94 | MemoryRange range_; 95 | uint64_t attrs_; 96 | }; 97 | 98 | class StaticMapping : public Mapping { 99 | public: 100 | MemoryMapping(const MemoryRange& virt, const MemoryRange& phys, uint64_t attrs); 101 | 102 | bool Map(uintptr_t from, uintptr_t to, PageTable* root) override; 103 | void Unmap(uintptr_t from, uintptr_t to, PageTable* root) override; 104 | bool Fault(uintptr_t addr, PageTable* root) override; 105 | 106 | private: 107 | MemoryRange phys_; 108 | }; 109 | 110 | class AddressSpace { 111 | public: 112 | uintptr_t PageTable() const; 113 | 114 | bool RegisterMapping(std::unique_ptr mapping); 115 | const Mapping* FindMapping(uintptr_t addr); 116 | std::unique_ptr UnregisterMapping(const Mapping* mapping); 117 | 118 | bool Translate(uintptr_t virt, uintptr_t* phys); 119 | bool Populate(uintptr_t from, uintptr_t to); 120 | bool Fault(uintptr_t addr); 121 | 122 | private: 123 | }; 124 | 125 | bool SetupMapping(); 126 | 127 | } // namespace memory 128 | 129 | #endif // __MEMORY_SPACE_H__ 130 | --------------------------------------------------------------------------------