├── .gitignore ├── .gitmodules ├── LICENSE.txt ├── Makefile ├── images ├── 2023-01-07 203840.png └── 2023-01-27 220350.png ├── include ├── Arch.hpp ├── Atomic.hpp ├── EternalHeap.hpp ├── KArray.hpp ├── KList.hpp ├── KPriorityQueue.hpp ├── MemoryManager.hpp ├── NanoShell.hpp ├── Scheduler.hpp ├── Spinlock.hpp ├── Terminal.hpp ├── Thread.hpp └── _limine.h ├── limine.cfg ├── linker.ld ├── readme.md ├── run-unix.sh ├── run.bat ├── run.sh ├── source ├── EternalHeap.cpp ├── Init.cpp ├── LimineTerm │ ├── LICENSE.md │ ├── font_data.h │ ├── framebuffer.c │ ├── framebuffer.h │ ├── term.c │ └── term.h ├── MemMgr │ ├── KernelHeap.cpp │ ├── PMM.cpp │ ├── PageFault.cpp │ ├── PageMapClone.cpp │ └── VMM.cpp ├── Panic.cpp ├── Scheduler.cpp ├── Spinlock.cpp ├── Standard │ ├── CStandard.cpp │ ├── CxxAbi.cpp │ └── Printf.cpp ├── System.cpp ├── Terminal.cpp ├── Thread.cpp └── ax86_64 │ ├── APIC.cpp │ ├── Arch.cpp │ ├── CPU.cpp │ ├── GDT.cpp │ ├── HPET.cpp │ ├── IDT.cpp │ ├── Misc.asm │ ├── PIT.cpp │ ├── RSD.cpp │ └── TSC.cpp └── test.bat /.gitignore: -------------------------------------------------------------------------------- 1 | build/* 2 | .keep/* 3 | limine/* 4 | keep/* 5 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "limine"] 2 | path = limine 3 | url = https://github.com/limine-bootloader/limine.git 4 | branch = v4.x-branch-binary 5 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # NanoShell 64 kernel makefile 2 | 3 | # The build directory 4 | BUILD_DIR = build 5 | 6 | # The source directory 7 | SRC_DIR = source 8 | 9 | # The include directory 10 | INC_DIR = include 11 | 12 | # The ISO root directory 13 | ISO_DIR=$(BUILD_DIR)/iso_root 14 | 15 | # The ISO target. 16 | IMAGE_TARGET=$(BUILD_DIR)/image.iso 17 | 18 | # The platform we are targetting 19 | TARGET=X86_64 20 | 21 | # This is the name that our final kernel executable will have. 22 | # Change as needed. 23 | override KERNEL := $(BUILD_DIR)/kernel.elf 24 | 25 | # Convenience macro to reliably declare overridable command variables. 26 | define DEFAULT_VAR = 27 | ifeq ($(origin $1),default) 28 | override $(1) := $(2) 29 | endif 30 | ifeq ($(origin $1),undefined) 31 | override $(1) := $(2) 32 | endif 33 | endef 34 | 35 | # It is highly recommended to use a custom built cross toolchain to build a kernel. 36 | # We are only using "cc" as a placeholder here. It may work by using 37 | # the host system's toolchain, but this is not guaranteed. 38 | $(eval $(call DEFAULT_VAR,CC,cc)) 39 | $(eval $(call DEFAULT_VAR,CXX,c++)) 40 | 41 | # Same thing for "ld" (the linker). 42 | $(eval $(call DEFAULT_VAR,LD,ld)) 43 | 44 | # User controllable CFLAGS. 45 | CFLAGS ?= -g -O2 -pipe -Wall -Wextra -I $(INC_DIR) -DTARGET_$(TARGET) 46 | 47 | # User controllable CXXFLAGS. 48 | CXXFLAGS ?= -g -O2 -pipe -Wall -Wextra -I $(INC_DIR) -DTARGET_$(TARGET) 49 | 50 | # User controllable preprocessor flags. We set none by default. 51 | CPPFLAGS ?= 52 | 53 | # User controllable nasm flags. 54 | NASMFLAGS ?= -F dwarf -g 55 | 56 | # User controllable linker flags. We set none by default. 57 | LDFLAGS ?= 58 | 59 | # Internal C flags that should not be changed by the user. 60 | override CFLAGS += \ 61 | -std=c11 \ 62 | -ffreestanding \ 63 | -fno-stack-protector \ 64 | -fno-stack-check \ 65 | -fno-lto \ 66 | -fno-pie \ 67 | -fno-pic \ 68 | -m64 \ 69 | -march=x86-64 \ 70 | -mabi=sysv \ 71 | -mno-80387 \ 72 | -mno-mmx \ 73 | -mno-sse \ 74 | -mno-sse2 \ 75 | -mno-red-zone \ 76 | -mcmodel=kernel \ 77 | -MMD \ 78 | -I. 79 | 80 | # Internal C++ flags that should not be changed by the user. 81 | override CXXFLAGS += \ 82 | -std=c++17 \ 83 | -ffreestanding \ 84 | -fno-stack-protector \ 85 | -fno-stack-check \ 86 | -fno-lto \ 87 | -fno-pie \ 88 | -fno-pic \ 89 | -m64 \ 90 | -march=x86-64 \ 91 | -mabi=sysv \ 92 | -mno-80387 \ 93 | -mno-mmx \ 94 | -mno-sse \ 95 | -mno-sse2 \ 96 | -mno-red-zone \ 97 | -mcmodel=kernel \ 98 | -MMD \ 99 | -fno-exceptions \ 100 | -fno-rtti \ 101 | -I. 102 | 103 | # Internal linker flags that should not be changed by the user. 104 | override LDFLAGS += \ 105 | -nostdlib \ 106 | -static \ 107 | -m elf_x86_64 \ 108 | -z max-page-size=0x1000 \ 109 | -T linker.ld 110 | 111 | # Check if the linker supports -no-pie and enable it if it does 112 | ifeq ($(shell $(LD) --help 2>&1 | grep 'no-pie' >/dev/null 2>&1; echo $$?),0) 113 | override LDFLAGS += -no-pie 114 | endif 115 | 116 | # Internal nasm flags that should not be changed by the user. 117 | override NASMFLAGS += \ 118 | -f elf64 119 | 120 | # Use find to glob all *.c, *.S, and *.asm files in the directory and extract the object names. 121 | override CFILES := $(shell find $(SRC_DIR) -not -path '*/.*' -type f -name '*.c') 122 | override CXXFILES := $(shell find $(SRC_DIR) -not -path '*/.*' -type f -name '*.cpp') 123 | override ASFILES := $(shell find $(SRC_DIR) -not -path '*/.*' -type f -name '*.S') 124 | override NASMFILES := $(shell find $(SRC_DIR) -not -path '*/.*' -type f -name '*.asm') 125 | override OBJ := $(patsubst $(SRC_DIR)/%,$(BUILD_DIR)/%,$(CFILES:.c=.o) $(CXXFILES:.cpp=.o) $(ASFILES:.S=.o) $(NASMFILES:.asm=.o)) 126 | override HEADER_DEPS := $(patsubst $(SRC_DIR)/%,$(BUILD_DIR)/%,$(CFILES:.c=.d) $(CXXFILES:.cpp=.d) $(ASFILES:.S=.d)) 127 | 128 | # Default target. 129 | .PHONY: all 130 | all: image 131 | 132 | # Link rules for the final kernel executable. 133 | $(KERNEL): $(OBJ) 134 | $(LD) $(OBJ) $(LDFLAGS) -o $@ 135 | 136 | # Include header dependencies. 137 | -include $(HEADER_DEPS) 138 | 139 | # Compilation rules for *.c files. 140 | $(BUILD_DIR)/%.o: $(SRC_DIR)/%.c 141 | @mkdir -p $(dir $@) 142 | $(CC) $(CPPFLAGS) $(CFLAGS) -c $< -o $@ 143 | 144 | # Compilation rules for *.cpp files. 145 | $(BUILD_DIR)/%.o: $(SRC_DIR)/%.cpp 146 | @mkdir -p $(dir $@) 147 | $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@ 148 | 149 | # Compilation rules for *.S files. 150 | $(BUILD_DIR)/%.o: $(SRC_DIR)/%.S 151 | @mkdir -p $(dir $@) 152 | $(CC) $(CPPFLAGS) $(CFLAGS) -c $< -o $@ 153 | 154 | # Compilation rules for *.asm (nasm) files. 155 | $(BUILD_DIR)/%.o: $(SRC_DIR)/%.asm 156 | @mkdir -p $(dir $@) 157 | nasm $(NASMFLAGS) $< -o $@ 158 | 159 | # Remove object files and the final executable. 160 | .PHONY: clean 161 | clean: 162 | @echo "Cleaning..." 163 | rm -rf $(KERNEL) $(OBJ) $(HEADER_DEPS) 164 | 165 | image: limine $(IMAGE_TARGET) 166 | 167 | $(IMAGE_TARGET): $(KERNEL) 168 | @echo "Building iso..." 169 | @rm -rf $(ISO_DIR) 170 | @mkdir -p $(ISO_DIR) 171 | @cp $^ limine.cfg limine/limine.sys limine/limine-cd.bin $(ISO_DIR) 172 | @xorriso -as mkisofs -b limine-cd.bin -no-emul-boot -boot-load-size 4 -boot-info-table --protective-msdos-label $(ISO_DIR) -o $@ 2>/dev/null 173 | @limine/limine-deploy $@ 2>/dev/null 174 | @rm -rf $(ISO_DIR) 175 | 176 | run: image 177 | @echo "Running..." 178 | @./run-unix.sh 179 | 180 | runw: image 181 | @echo "Invoking WSL to run the OS..." 182 | @./run.sh 183 | -------------------------------------------------------------------------------- /images/2023-01-07 203840.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iProgramMC/NanoShell64/7eb93ebccda81f80fd46ebe203ae7f48c4ded614/images/2023-01-07 203840.png -------------------------------------------------------------------------------- /images/2023-01-27 220350.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iProgramMC/NanoShell64/7eb93ebccda81f80fd46ebe203ae7f48c4ded614/images/2023-01-27 220350.png -------------------------------------------------------------------------------- /include/Arch.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Arch.hpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | #ifndef _ARCH_HPP 10 | #define _ARCH_HPP 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include <_limine.h> 17 | 18 | namespace Arch 19 | { 20 | #ifdef TARGET_X86_64 21 | constexpr uint64_t C_RFLAGS_INTERRUPT_FLAG = 0x200; 22 | 23 | struct TSS 24 | { 25 | uint32_t m_reserved; 26 | uint64_t m_rsp[3]; // RSP 0-2 27 | uint64_t m_reserved1; 28 | uint64_t m_ist[7]; // IST 1-7 29 | uint64_t m_reserved2; 30 | uint16_t m_reserved3; 31 | uint16_t m_iopb; 32 | } 33 | PACKED; 34 | 35 | // The GDT structure. It contains an array of uint64s, which represents 36 | // each of the GDT entries, and potentially a TSS entry. (todo) 37 | struct GDT 38 | { 39 | // The segment numbers. 40 | enum 41 | { 42 | DESC_NULL = 0x00, 43 | DESC_16BIT_CODE = 0x08, 44 | DESC_16BIT_DATA = 0x10, 45 | DESC_32BIT_CODE = 0x18, 46 | DESC_32BIT_DATA = 0x20, 47 | DESC_64BIT_RING0_CODE = 0x28, 48 | DESC_64BIT_RING0_DATA = 0x30, 49 | DESC_64BIT_RING3_CODE = 0x38, 50 | DESC_64BIT_RING3_DATA = 0x40, 51 | }; 52 | 53 | uint64_t m_BasicEntries[9]; 54 | 55 | TSS m_tss; 56 | }; 57 | 58 | struct IDT 59 | { 60 | enum 61 | { 62 | INT_PAGE_FAULT = 0x0E, 63 | INT_IPI = 0xF0, 64 | INT_APIC_TIMER = 0xF1, 65 | INT_SPURIOUS = 0xFF, 66 | }; 67 | 68 | struct Entry 69 | { 70 | // Byte 0, 1 71 | uint64_t m_offsetLow : 16; 72 | // Byte 2, 3 73 | uint64_t m_segmentSel : 16; 74 | // Byte 4 75 | uint64_t m_ist : 3; 76 | uint64_t m_reserved0 : 5; 77 | // Byte 5 78 | uint64_t m_gateType : 4; 79 | uint64_t m_reserved1 : 1; 80 | uint64_t m_dpl : 2; 81 | uint64_t m_present : 1; 82 | // Byte 6, 7, 8, 9, 10, 11 83 | uint64_t m_offsetHigh : 48; 84 | // Byte 12, 13, 14, 15 85 | uint64_t m_reserved2 : 32; 86 | 87 | Entry() = default; 88 | 89 | Entry(uintptr_t handler, uint8_t ist = 0, uint8_t dpl = 0) 90 | { 91 | memset(this, 0, sizeof *this); 92 | 93 | m_offsetLow = handler & 0xFFFF; 94 | m_offsetHigh = handler >> 16; 95 | m_gateType = 0xE; 96 | m_dpl = dpl; 97 | m_ist = ist; 98 | m_present = handler != 0; 99 | m_segmentSel = GDT::DESC_64BIT_RING0_CODE; 100 | } 101 | } 102 | PACKED; 103 | 104 | Entry m_entries[256]; 105 | 106 | void SetEntry(uint8_t iv, Entry entry) 107 | { 108 | m_entries[iv] = entry; 109 | } 110 | }; 111 | 112 | namespace RSD 113 | { 114 | struct Table 115 | { 116 | // Header. 117 | char m_Signature[4]; 118 | uint32_t m_Length; 119 | uint8_t m_Revision; 120 | uint8_t m_Checksum; 121 | char m_OemId[6]; 122 | char m_OemTableId[6]; 123 | uint32_t m_OemRevision; 124 | uint32_t m_CreatorId; 125 | uint32_t m_CreatorRevision; 126 | 127 | uint32_t m_SubSDTs[0]; 128 | 129 | int GetSubSDTCount() const 130 | { 131 | size_t sizeOfHeader = (size_t)&(((Table*)nullptr)->m_SubSDTs); 132 | 133 | return (m_Length - sizeOfHeader) / sizeof(uint32_t); 134 | } 135 | }; 136 | 137 | struct Descriptor 138 | { 139 | char m_Signature[8]; 140 | uint8_t m_Checksum; 141 | char m_OemId[6]; 142 | uint8_t m_Revision; 143 | uint32_t m_RsdtAddress; 144 | 145 | uintptr_t GetRSDTAddress() const 146 | { 147 | return m_RsdtAddress; 148 | } 149 | } 150 | PACKED; 151 | 152 | // Load an RSDT table. 153 | void LoadTable(RSD::Table* pTable); 154 | 155 | // Load the RSDP root table. 156 | void Load(); 157 | } 158 | 159 | typedef void(*PolledSleepFunc)(uint64_t); 160 | 161 | namespace APIC 162 | { 163 | // Ensure the APIC is supported by checking CPUID 164 | void EnsureOn(); 165 | 166 | // Initialize the APIC for this CPU. 167 | void Init(); 168 | 169 | // Write a register. 170 | void WriteReg(uint32_t reg, uint32_t value); 171 | 172 | // Read a register. 173 | uint32_t ReadReg(uint32_t reg); 174 | 175 | // Get the LAPIC's base address. This is not offset by the HHDM. 176 | uintptr_t GetLapicBasePhys(); 177 | 178 | // Get the LAPIC's base address. This is offset by the HHDM. 179 | uintptr_t GetLapicBase(); 180 | 181 | // Schedule a one-shot interrupt in X nanoseconds. 182 | // To avoid race conditions, only the scheduler may use this. 183 | void ScheduleInterruptIn(uint64_t nanoseconds); 184 | 185 | // Tell the APIC that we are done processing its interrupt. 186 | void EndOfInterrupt(); 187 | 188 | // Calibrate the APIC and TSC timers using the PIT or HPET. Used by the CPU. 189 | // Returns the frequency of ticks per millisecond. 190 | // This is not thread safe, so be sure to add locking before going in. 191 | void CalibrateTimer(uint64_t &apicOut, uint64_t &tscOut); 192 | 193 | // Set the polled sleep function for calibration. By default, it's 194 | // the PIT sleep function. 195 | void SetPolledSleepFunc(PolledSleepFunc func); 196 | 197 | // Don't use directly. 198 | void CalibrateHPET(uint64_t &apicOut, uint64_t &tscOut); 199 | }; 200 | 201 | namespace HPET 202 | { 203 | // Get the raw tick count of the HPET. 204 | uint64_t GetRawTickCount(); 205 | 206 | // Get the number of nanoseconds passed since the HPET was initialized. 207 | uint64_t GetTickCount(); 208 | 209 | // The function that loads the HPET's data from the RSDT entry and initializes it. 210 | void Found(RSD::Table*); 211 | 212 | // Performs a polling sleep. 213 | void PolledSleep(uint64_t nanoseconds); 214 | 215 | // Get the counter clock period (amount of femtoseconds per tick) 216 | uint64_t GetCounterClockPeriod(); 217 | } 218 | 219 | namespace TSC 220 | { 221 | // Get the raw tick count of the TSC. 222 | uint64_t Read(); 223 | } 224 | 225 | // A small driver to allow calibration of the APIC. 226 | // Note: Using this on more than 1 CPU WILL lead to problems, 227 | // so only use this on one at a time. 228 | namespace PIT 229 | { 230 | // Reads a short from the latch counter. 231 | uint16_t Read(); 232 | 233 | // Performs a polling sleep. Supports about 50 ms max. 234 | void PolledSleep(uint64_t nanoseconds); 235 | }; 236 | 237 | #endif 238 | 239 | class CPU 240 | { 241 | public: 242 | enum class eIpiType 243 | { 244 | NONE, 245 | HELLO, 246 | PANIC, 247 | }; 248 | 249 | static constexpr size_t C_INTERRUPT_STACK_SIZE = 8192; 250 | 251 | /**** CPU specific variables ****/ 252 | private: 253 | // The index of this processor. 254 | uint32_t m_processorID = 0; 255 | 256 | // The SMP info we have been given. 257 | limine_smp_info* m_pSMPInfo = nullptr; 258 | 259 | // Whether we are the bootstrap processor or not. 260 | bool m_bIsBSP = false; 261 | 262 | // The GDT of this CPU. 263 | GDT m_gdt; 264 | 265 | // The IDT of this CPU. 266 | IDT m_idt; 267 | 268 | // The main page mapping. 269 | VMM::PageMapping* m_pPageMap = nullptr; 270 | 271 | // The interrupt handler stack. 272 | void* m_pIsrStack = nullptr; 273 | 274 | // The current IPI type. 275 | eIpiType m_ipiType = eIpiType::NONE; 276 | 277 | // The sender of this IPI. 278 | uint32_t m_ipiSenderID = 0; 279 | 280 | // The IPI spinlock. 281 | Spinlock m_ipiSpinlock; 282 | 283 | // The scheduler object. 284 | Scheduler m_Scheduler; 285 | 286 | // If the interrupts are currently enabled. 287 | bool m_InterruptsEnabled = false; 288 | 289 | // The number of LAPIC timer ticks per millisecond. 290 | uint64_t m_LapicTicksPerMS = 0; 291 | 292 | // The number of TSC timer ticks per millisecond. 293 | uint64_t m_TscTicksPerMS = 0; 294 | 295 | // The starting TSC. This is set after Arch::CPU::Go() detects that all CPUs have reached that point. 296 | uint64_t m_StartingTSC = 0; 297 | 298 | // Store other fields here such as current task, etc. 299 | 300 | /**** Private CPU object functions. ****/ 301 | private: 302 | 303 | #ifdef TARGET_X86_64 304 | // Load the GDT. 305 | void LoadGDT(); 306 | 307 | // Clear the IDT. 308 | void ClearIDT(); 309 | 310 | // Load the IDT. 311 | // Note: Any ulterior changes should be done through an IPI to the CPU. 312 | void LoadIDT(); 313 | 314 | // Sets up the GDT and IDT. 315 | void SetupGDTAndIDT(); 316 | 317 | // Waits for the BSP to initialize. 318 | void WaitForBSP(); 319 | 320 | // Marks the BSP as initialized. 321 | void OnBSPInitialized(); 322 | 323 | // Calibrates the LAPIC and TSC timers using the PIT or the HPET, if available. 324 | void CalibrateTimer(); 325 | #endif 326 | /**** Operations that should be run within this CPU's context, but are otherwise public ****/ 327 | public: 328 | uint32_t ID() const 329 | { 330 | return m_processorID; 331 | } 332 | 333 | uint64_t GetStartingTSC() const 334 | { 335 | return m_StartingTSC; 336 | } 337 | 338 | uint64_t GetTSCTicksPerMS() const 339 | { 340 | return m_TscTicksPerMS; 341 | } 342 | 343 | uint64_t GetLapicTicksPerMS() const 344 | { 345 | return m_LapicTicksPerMS; 346 | } 347 | 348 | void UnlockIpiSpinlock() 349 | { 350 | m_ipiSpinlock.Unlock(); 351 | } 352 | 353 | bool& InterruptsEnabledRaw() 354 | { 355 | return m_InterruptsEnabled; 356 | } 357 | 358 | // The function called when an IPI was received. 359 | void OnIPI(); 360 | 361 | // The function called when a timer interrupt was received. 362 | void OnTimerIRQ(Registers* pRegs); 363 | 364 | // The function called when we're inside of a page fault. 365 | void OnPageFault(Registers* pRegs); 366 | 367 | // Setup the CPU. 368 | void Init(); 369 | 370 | // Start the CPU's idle loop. 371 | void Go(); 372 | 373 | // Set an interrupt gate. 374 | void SetInterruptGate(uint8_t intNum, uintptr_t fnHandler, uint8_t ist = 0, uint8_t dpl = 0); 375 | 376 | // Get the scheduler. 377 | Scheduler* GetScheduler() { return &m_Scheduler; } 378 | 379 | // Check if interrupts are enabled. 380 | bool InterruptsEnabled() { return m_InterruptsEnabled; } 381 | 382 | /**** Operations that can be performed on a CPU object from anywhere. ****/ 383 | public: 384 | CPU(uint32_t processorID, limine_smp_info* pSMPInfo, bool bIsBSP) : m_processorID(processorID), m_pSMPInfo(pSMPInfo), m_bIsBSP(bIsBSP) 385 | { 386 | #ifdef TARGET_X86_64 387 | ClearIDT(); 388 | #endif 389 | } 390 | 391 | // Get whether this CPU is the bootstrap CPU. 392 | bool IsBootstrap() const 393 | { 394 | return m_bIsBSP; 395 | } 396 | 397 | // Set whether the interrupts are enabled or not. 398 | bool SetInterruptsEnabled(bool); 399 | 400 | // Send this CPU an IPI. 401 | void SendIPI(eIpiType type); 402 | 403 | /**** CPU agnostic operations ****/ 404 | public: 405 | // Get the number of CPUs available to the system. 406 | static uint64_t GetCount(); 407 | 408 | // Initialize the CPUs based on the Limine SMP response, from the bootstrap processor's perspective. 409 | static void InitAsBSP(); 410 | 411 | // Get the SMP request's response. 412 | static limine_smp_response* GetSMPResponse(); 413 | 414 | // Get the HHDM request's response. 415 | static limine_hhdm_response* GetHHDMResponse(); 416 | 417 | // Static function to initialize a certain CPU. 418 | static void Start(limine_smp_info* pInfo); 419 | 420 | // Get the current CPU. 421 | static CPU* GetCurrent(); 422 | 423 | // Get the CPU with the specified processor ID. 424 | static CPU* GetCPU(uint64_t pid); 425 | 426 | // Relating to this, check if the current processor is the BSP. 427 | static bool AreWeBootstrap() 428 | { 429 | return GetCurrent()->IsBootstrap(); 430 | } 431 | }; 432 | 433 | typedef uint64_t(*GetTickCountMethod)(); 434 | 435 | // Set the method used for GetTickCount. 436 | void SetGetTickCountMethod(GetTickCountMethod ptr); 437 | 438 | // Waits until the next interrupt. 439 | void Halt(); 440 | 441 | // This loop constantly idles. This is done so that, when the CPU is not 442 | // running any task, it can idle and not continue running on full throttle. 443 | NO_RETURN void IdleLoop(); 444 | 445 | // x86_64 architecture specific functions. 446 | #ifdef TARGET_X86_64 447 | // Gets the contents of CR3. 448 | uintptr_t ReadCR3(); 449 | 450 | // Sets the contents of CR3. 451 | void WriteCR3(uintptr_t cr3); 452 | 453 | // Reads a single byte from an I/O port. 454 | uint8_t ReadByte(uint16_t port); 455 | 456 | // Writes a single byte to an I/O port. 457 | void WriteByte(uint16_t port, uint8_t data); 458 | 459 | // MSRs: 460 | enum eMSR 461 | { 462 | FS_BASE = 0xC0000100, 463 | GS_BASE = 0xC0000101, 464 | KERNEL_GS_BASE = 0xC0000102, 465 | }; 466 | 467 | // Get the HHDM offset (higher half direct map). 468 | uintptr_t GetHHDMOffset(); 469 | 470 | // Invalidate a part of the TLB cache. 471 | void Invalidate(uintptr_t addr); 472 | 473 | // Writes to a model specific register. 474 | void WriteMSR(uint32_t msr, uint64_t value); 475 | 476 | // Writes to a model specific register. 477 | uint64_t ReadMSR(uint32_t msr); 478 | 479 | // Write a 32-bit integer to any address within physical memory. 480 | // This assumes an HHDM is present and the entire physical address space is mapped. 481 | void WritePhys(uintptr_t ptr, uint32_t thing); 482 | 483 | // Read a 32-bit integer from any address within physical memory. 484 | // This assumes an HHDM is present and the entire physical address space is mapped. 485 | uint32_t ReadPhys(uintptr_t ptr); 486 | 487 | // Get the number of nanoseconds since system boot. 488 | // Specifically, since all the CPUs are about to call "Thread::Yield()". 489 | uint64_t GetTickCount(); 490 | 491 | #endif 492 | } 493 | 494 | #endif//_ARCH_HPP 495 | -------------------------------------------------------------------------------- /include/Atomic.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Atomic.hpp - Creation date: 04/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | 10 | #ifndef _ATOMIC_HPP 11 | #define _ATOMIC_HPP 12 | 13 | // This is a wrapper for C++ atomic builtins. 14 | 15 | // If our compiler claims we're hosted, we're really not.. 16 | 17 | #if __STDC_HOSTED__ || defined(__STDC_NO_ATOMICS__) 18 | #error "Hey" 19 | #endif 20 | 21 | #define ATOMIC_MEMORD_SEQ_CST __ATOMIC_SEQ_CST 22 | #define ATOMIC_MEMORD_ACQ_REL __ATOMIC_ACQ_REL 23 | #define ATOMIC_MEMORD_ACQUIRE __ATOMIC_ACQUIRE 24 | #define ATOMIC_MEMORD_RELEASE __ATOMIC_RELEASE 25 | #define ATOMIC_MEMORD_CONSUME __ATOMIC_CONSUME 26 | #define ATOMIC_MEMORD_RELAXED __ATOMIC_RELAXED 27 | 28 | // TODO: Allow default atomic memory order to be changed? 29 | #define ATOMIC_DEFAULT_MEMORDER ATOMIC_MEMORD_SEQ_CST 30 | 31 | template 32 | class Atomic 33 | { 34 | private: 35 | T m_content; 36 | 37 | public: 38 | Atomic() 39 | { 40 | 41 | } 42 | 43 | Atomic(T init, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 44 | { 45 | __atomic_store_n(&m_content, init, memoryOrder); 46 | } 47 | 48 | void Load(T* ret, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) const 49 | { 50 | __atomic_load(&m_content, ret, memoryOrder); 51 | } 52 | 53 | T Load(int memoryOrder = ATOMIC_DEFAULT_MEMORDER) const 54 | { 55 | return __atomic_load_n(&m_content, memoryOrder); 56 | } 57 | 58 | void Store(T store, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 59 | { 60 | __atomic_store_n(&m_content, store, memoryOrder); 61 | } 62 | 63 | bool TestAndSet(int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 64 | { 65 | return __atomic_test_and_set(&m_content, memoryOrder); 66 | } 67 | 68 | void Clear(int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 69 | { 70 | __atomic_clear(&m_content, memoryOrder); 71 | } 72 | 73 | // This class of operations fetches the result AFTER the operation is performed. 74 | 75 | T AddFetch(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 76 | { 77 | return __atomic_add_fetch(&m_content, value, memoryOrder); 78 | } 79 | 80 | T SubFetch(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 81 | { 82 | return __atomic_sub_fetch(&m_content, value, memoryOrder); 83 | } 84 | 85 | T AndFetch(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 86 | { 87 | return __atomic_and_fetch(&m_content, value, memoryOrder); 88 | } 89 | 90 | T NandFetch(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 91 | { 92 | return __atomic_nand_fetch(&m_content, value, memoryOrder); 93 | } 94 | 95 | T OrFetch(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 96 | { 97 | return __atomic_or_fetch(&m_content, value, memoryOrder); 98 | } 99 | 100 | T XorFetch(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 101 | { 102 | return __atomic_xor_fetch(&m_content, value, memoryOrder); 103 | } 104 | 105 | // This class of operations fetches the result and THEN performs the operation. Otherwise, basically the same. 106 | 107 | T FetchAdd(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 108 | { 109 | return __atomic_fetch_add(&m_content, value, memoryOrder); 110 | } 111 | 112 | T FetchSub(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 113 | { 114 | return __atomic_fetch_sub(&m_content, value, memoryOrder); 115 | } 116 | 117 | T FetchAnd(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 118 | { 119 | return __atomic_fetch_and(&m_content, value, memoryOrder); 120 | } 121 | 122 | T FetchNand(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 123 | { 124 | return __atomic_fetch_nand(&m_content, value, memoryOrder); 125 | } 126 | 127 | T FetchOr(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 128 | { 129 | return __atomic_fetch_or(&m_content, value, memoryOrder); 130 | } 131 | 132 | T FetchXor(T value, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 133 | { 134 | return __atomic_fetch_xor(&m_content, value, memoryOrder); 135 | } 136 | 137 | void Exchange(T* val, T* ret, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 138 | { 139 | __atomic_exchange(&m_content, val, ret, memoryOrder); 140 | } 141 | 142 | T Exchange(T val, int memoryOrder = ATOMIC_DEFAULT_MEMORDER) 143 | { 144 | return __atomic_exchange_n(&m_content, val, memoryOrder); 145 | } 146 | 147 | bool CompareExchange(T* expected, T desired, bool weak, int successMemoryOrder = ATOMIC_DEFAULT_MEMORDER, int failureMemoryOrder = ATOMIC_DEFAULT_MEMORDER) 148 | { 149 | return __atomic_compare_exchange_n(&m_content, expected, desired, weak, successMemoryOrder, failureMemoryOrder); 150 | } 151 | }; 152 | 153 | 154 | #endif//_ATOMIC_HPP 155 | -------------------------------------------------------------------------------- /include/EternalHeap.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // EternalHeap.hpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | 10 | // The eternal heap is a small (1Mib) block of memory which allows very small and 11 | // permanent blocks of memory to be given out during the initialization process. 12 | 13 | #ifndef _ETERNAL_HEAP_HPP 14 | #define _ETERNAL_HEAP_HPP 15 | 16 | #include 17 | #include 18 | 19 | namespace EternalHeap 20 | { 21 | // Permanently allocates a block of memory. 22 | void * Allocate(size_t sz); 23 | }; 24 | 25 | #endif//_ETERNAL_HEAP_HPP 26 | -------------------------------------------------------------------------------- /include/KArray.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // KArray.hpp - Creation date: 02/05/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | 10 | #ifndef _KARRAY_HPP 11 | #define _KARRAY_HPP 12 | 13 | // NOTE: This structure is NOT thread safe. 14 | 15 | // NOTE: TODO: Use the kernel heap directly instead of initializing extra elements that will go unused. 16 | 17 | // This will mostly be used with pointer types, so this is probably fine. 18 | 19 | template 20 | class KArray 21 | { 22 | public: 23 | KArray() 24 | { 25 | m_container = nullptr; 26 | m_container_capacity = 0; 27 | m_container_size = 0; 28 | } 29 | ~KArray() 30 | { 31 | if (m_container) 32 | { 33 | delete[] m_container; 34 | m_container = nullptr; 35 | m_container_size = 0; 36 | m_container_capacity = 0; 37 | } 38 | } 39 | 40 | void Reserve(size_t sz) 41 | { 42 | if (m_container_capacity == 0) 43 | { 44 | SetupDefaultContainer(); 45 | // call reserve() again; 46 | 47 | Reserve(sz); 48 | 49 | return; 50 | } 51 | 52 | if (sz == 0) return; // considered an error 53 | if (sz == m_container_capacity) return; 54 | 55 | if (sz < m_container_size) 56 | sz = m_container_size; 57 | 58 | T* newData = new T[sz]; 59 | for (size_t i = 0; i < m_container_size; i++) 60 | { 61 | newData[i] = m_container[i]; 62 | } 63 | 64 | T* oldContainer = m_container; 65 | m_container = newData; 66 | 67 | delete[] oldContainer; 68 | } 69 | 70 | virtual void PushBack(const T &t) 71 | { 72 | if (m_container_size >= m_container_capacity) 73 | Reserve(m_container_capacity * 2); 74 | 75 | m_container[m_container_size++] = t; 76 | } 77 | 78 | T& Front() 79 | { 80 | return m_container[0]; 81 | } 82 | 83 | T& Back() 84 | { 85 | return m_container[m_container_size - 1]; 86 | } 87 | 88 | T* At(size_t x) 89 | { 90 | if (x >= m_container_size) return nullptr; 91 | 92 | return &m_container[x]; 93 | } 94 | 95 | // unsafe access 96 | T& operator[](size_t index) 97 | { 98 | return m_container[index]; 99 | } 100 | 101 | virtual void Erase(size_t index) 102 | { 103 | if (index >= m_container_size) return; 104 | 105 | for (size_t i = index + 1; i < m_container_size; i++) 106 | m_container[i] = m_container[i + 1]; 107 | 108 | m_container_size--; 109 | } 110 | 111 | // this is faster, but doesn't keep the order 112 | virtual void EraseUnordered(size_t index) 113 | { 114 | if (index >= m_container_size) return; 115 | 116 | m_container[index] = m_container[--m_container_size]; 117 | } 118 | 119 | void Clear() 120 | { 121 | delete m_container; 122 | SetupDefaultContainer(); 123 | } 124 | 125 | size_t Size() 126 | { 127 | return m_container_size; 128 | } 129 | 130 | size_t Capacity() 131 | { 132 | return m_container_capacity; 133 | } 134 | 135 | private: 136 | void SetupDefaultContainer() 137 | { 138 | m_container_capacity = 16; // the default. 139 | m_container = new T[m_container_capacity]; 140 | m_container_size = 0; 141 | } 142 | 143 | private: 144 | T* m_container; 145 | size_t m_container_size; 146 | size_t m_container_capacity; 147 | }; 148 | 149 | #endif 150 | -------------------------------------------------------------------------------- /include/KList.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // KList.hpp - Creation date: 12/04/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | 10 | #ifndef _KLIST_HPP 11 | #define _KLIST_HPP 12 | 13 | // NOTE: This structure is NOT thread safe. 14 | 15 | // This linked list based structure can be used as either a linked list, 16 | // or a queue, or a deque (double-ended queue). The scheduler uses this 17 | // data structure in all three ways. 18 | 19 | template 20 | class KList 21 | { 22 | class ListNode 23 | { 24 | friend class KList; 25 | 26 | ListNode *m_pPrev = nullptr, *m_pNext = nullptr; 27 | 28 | T m_data; 29 | 30 | public: 31 | ListNode(T data) 32 | { 33 | m_data = data; 34 | } 35 | }; 36 | 37 | class ListNodeIterator 38 | { 39 | friend ListNode; 40 | friend KList; 41 | 42 | ListNode* m_pNode; 43 | 44 | public: 45 | ListNodeIterator(ListNode* pNode) 46 | { 47 | m_pNode = pNode; 48 | } 49 | 50 | T& operator*() const 51 | { 52 | return m_pNode->m_data; 53 | } 54 | 55 | bool Valid() const 56 | { 57 | return m_pNode != nullptr; 58 | } 59 | 60 | ListNodeIterator& operator++() 61 | { 62 | m_pNode = m_pNode->m_pNext; 63 | 64 | return (*this); 65 | } 66 | 67 | ListNodeIterator operator++(UNUSED int unused) 68 | { 69 | ListNodeIterator iter = (*this); 70 | ++(*this); 71 | return iter; 72 | } 73 | 74 | ListNodeIterator& operator--() 75 | { 76 | m_pNode = m_pNode->m_pPrev; 77 | 78 | return (*this); 79 | } 80 | 81 | ListNodeIterator operator--(UNUSED int unused) 82 | { 83 | ListNodeIterator iter = (*this); 84 | --(*this); 85 | return iter; 86 | } 87 | }; 88 | 89 | ListNode *m_pFirst = nullptr, *m_pLast = nullptr; 90 | 91 | public: 92 | bool Empty() 93 | { 94 | return m_pFirst == nullptr && m_pLast == nullptr; 95 | } 96 | 97 | void AddBack(const T& element) 98 | { 99 | ListNode* pNode = new ListNode(element); 100 | 101 | if (m_pLast) 102 | m_pLast->m_pNext = pNode; 103 | 104 | pNode->m_pPrev = m_pLast; 105 | 106 | m_pLast = pNode; 107 | 108 | if (!m_pFirst) 109 | m_pFirst = pNode; 110 | } 111 | 112 | void AddFront(const T& element) 113 | { 114 | ListNode* pNode = new ListNode(element); 115 | 116 | if (m_pFirst) 117 | m_pFirst->m_pPrev = pNode; 118 | 119 | pNode->m_pNext = m_pFirst; 120 | 121 | m_pFirst = pNode; 122 | 123 | if (!m_pLast) 124 | m_pLast = pNode; 125 | } 126 | 127 | ~KList() 128 | { 129 | while (!Empty()) 130 | PopBack(); 131 | } 132 | 133 | T Front() 134 | { 135 | // WARNING: Don't call this while the list is empty! 136 | return m_pFirst->m_data; 137 | } 138 | 139 | T Back() 140 | { 141 | // WARNING: Don't call this while the list is empty! 142 | return m_pLast->m_data; 143 | } 144 | 145 | ListNodeIterator Begin() 146 | { 147 | return ListNodeIterator(m_pFirst); 148 | } 149 | 150 | ListNodeIterator End() 151 | { 152 | return ListNodeIterator(m_pLast); 153 | } 154 | 155 | void PopFront() 156 | { 157 | if (Empty()) return; 158 | 159 | ListNode* pNode = m_pFirst->m_pNext; 160 | 161 | if (!pNode) 162 | { 163 | // this is the only node. 164 | delete m_pFirst; 165 | 166 | m_pFirst = m_pLast = nullptr; 167 | return; 168 | } 169 | 170 | pNode->m_pPrev = nullptr; 171 | 172 | delete m_pFirst; 173 | m_pFirst = pNode; 174 | } 175 | 176 | void Remove(ListNode* pNode) 177 | { 178 | if (m_pFirst == pNode) 179 | m_pFirst = m_pFirst->m_pNext; 180 | if (m_pLast == pNode) 181 | m_pLast = m_pLast ->m_pPrev; 182 | if (pNode->m_pPrev) 183 | pNode->m_pPrev->m_pNext = pNode->m_pNext; 184 | if (pNode->m_pNext) 185 | pNode->m_pNext->m_pPrev = pNode->m_pPrev; 186 | delete pNode; 187 | } 188 | 189 | void Erase(const ListNodeIterator& iter) 190 | { 191 | if (!iter.Valid()) 192 | return; 193 | 194 | Remove(iter.m_pNode); 195 | } 196 | 197 | void PopBack() 198 | { 199 | if (Empty()) return; 200 | 201 | ListNode* pNode = m_pLast->m_pPrev; 202 | 203 | if (!pNode) 204 | { 205 | // this is the only node. 206 | delete m_pLast; 207 | 208 | m_pLast = m_pFirst = nullptr; 209 | return; 210 | } 211 | 212 | pNode->m_pNext = nullptr; 213 | delete m_pLast; 214 | m_pLast = pNode; 215 | } 216 | }; 217 | 218 | #endif 219 | -------------------------------------------------------------------------------- /include/KPriorityQueue.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // KPriorityQueue.hpp - Creation date: 02/05/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | 10 | #ifndef _KPRIORITYQUEUE_HPP 11 | #define _KPRIORITYQUEUE_HPP 12 | 13 | #include 14 | 15 | // Maybe we'll move these to a different header file. 16 | template 17 | struct KGreater 18 | { 19 | bool operator() (const T& a, const T& b) const 20 | { 21 | return a > b; 22 | } 23 | }; 24 | 25 | // This is implemented with KArray and a binary max heap on top. 26 | 27 | // Note: Why won't the compiler simply accept "Size()" instead of "this->Size()"? 28 | 29 | // Comp(a, b) essentially calls a > b by default. 30 | template > 31 | class KPriorityQueue : public KArray 32 | { 33 | public: 34 | void PushBack(const T& t) override 35 | { 36 | KArray::PushBack(t); 37 | SortUp(this->Size() - 1); 38 | } 39 | 40 | void Erase(size_t index) override 41 | { 42 | if (index >= this->Size()) return; 43 | 44 | KArray::EraseUnordered(index); 45 | SortDown(index); 46 | } 47 | 48 | void EraseUnordered(size_t index) override 49 | { 50 | this->Erase(index); 51 | } 52 | 53 | private: 54 | void SortUp(size_t index) 55 | { 56 | if (index >= this->Size()) return; 57 | 58 | if (index == 0) // already at the top 59 | return; 60 | 61 | auto& me = *this; 62 | 63 | Comp comp; 64 | 65 | size_t parentIndex = (index - 1) / 2; 66 | while (true) 67 | { 68 | if (parentIndex == index) break; 69 | 70 | T& parent = me[parentIndex]; 71 | 72 | if (comp(me[index], parent)) 73 | { 74 | T temp = parent; 75 | parent = me[index]; 76 | me[index] = temp; 77 | } 78 | else break; 79 | 80 | index = parentIndex; 81 | 82 | if (index == 0) break; 83 | 84 | parentIndex = (index - 1) / 2; 85 | } 86 | } 87 | 88 | void SortDown(size_t index) 89 | { 90 | if (index >= this->Size()) return; 91 | 92 | auto& me = *this; 93 | 94 | Comp comp; 95 | 96 | while (true) 97 | { 98 | size_t ciLeft = index * 2 + 1; 99 | size_t ciRight = index * 2 + 2; 100 | size_t swapIdx = 0; 101 | 102 | if (ciLeft < this->Size()) 103 | { 104 | swapIdx = ciLeft; 105 | 106 | if (ciRight < this->Size() && comp(me[ciRight], me[ciLeft])) 107 | swapIdx = ciRight; 108 | } 109 | else break; 110 | 111 | if (comp(me[swapIdx], me[index])) // should change this to !comp I guess? 112 | { 113 | T temp = me[swapIdx]; 114 | me[swapIdx] = me[index]; 115 | me[index] = temp; 116 | 117 | index = swapIdx; 118 | } 119 | else break; 120 | } 121 | } 122 | }; 123 | 124 | #endif 125 | -------------------------------------------------------------------------------- /include/MemoryManager.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // MemoryManager.hpp - Creation date: 07/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // File description: 11 | // This file contains header definitions for the memory 12 | // manager. 13 | // 14 | // *************************************************************** 15 | #ifndef _MEMORY_MANAGER_HPP 16 | #define _MEMORY_MANAGER_HPP 17 | 18 | // Address Layout: 19 | // 0x0000'0000'0000'0000 - 0x0000'FFFF'FFFF'FFFF: User mappable memory region. 20 | // 0x0001'0000'0000'0000 - 0xFFFE'FFFF'FFFF'FFFF: Non-canonical address gap. 21 | // 0xFFFF'8000'0000'0000 - 0xFFFF'EFFF'FFFF'FFFF: The HHDM mapping. This part and the ones below will have their PML4's verbatim copied 22 | // 0xFFFF'F000'0000'0000 - 0xFFFF'FFFD'FFFF'FFFF: The kernel heap. 23 | // 0xFFFF'FFFE'0000'0000 - 0xFFFE'FFFF'FFFF'FFFF: Single page hrdware mappings. 24 | // 0xFFFF'FFFF'0000'0000 - 0xFFFF'FFFF'FFFF'FFFF: The kernel itself. 25 | 26 | #include 27 | 28 | constexpr uint64_t PAGE_SIZE = 4096; 29 | 30 | namespace PMM 31 | { 32 | constexpr uintptr_t INVALID_PAGE = 0; 33 | 34 | struct PageFreeListNode 35 | { 36 | PageFreeListNode *pPrev, *pNext; 37 | }; 38 | 39 | struct MemoryArea 40 | { 41 | // The link to the next MemoryArea entry. 42 | MemoryArea* m_pLink; 43 | // The start of the physical memory this bitmap represents. 44 | uintptr_t m_startAddr; 45 | // The length (in pages) of this PMM bitmap. 46 | size_t m_length; 47 | // The amount of pages free. This should always be kept in sync. 48 | size_t m_freePages; 49 | 50 | PageFreeListNode *m_pFirst, *m_pLast; 51 | 52 | MemoryArea(uintptr_t start, size_t len) : m_pLink(nullptr), m_startAddr(start), m_length(len), m_freePages(len) 53 | { 54 | } 55 | 56 | uintptr_t RemoveFirst(); 57 | void PushBack(uintptr_t paddr); 58 | }; 59 | 60 | // Get the total amount of pages available to the system. Never changes after init. 61 | uint64_t GetTotalPages(); 62 | 63 | // Initializes the PMM using the Limine memory map request. 64 | // This function must be run on the bootstrap CPU. 65 | void Init(); 66 | 67 | // Allocate a new page within the PMM. 68 | // Note: The hint reference should be treated as an opaque value and is designed for quick successive allocations. 69 | //uintptr_t AllocatePage(uint64_t& hint); 70 | uintptr_t AllocatePage(); 71 | 72 | // Free a page within the PMM. 73 | void FreePage(uintptr_t page); 74 | 75 | // Test out the PMM. 76 | void Test(); 77 | } 78 | 79 | namespace VMM 80 | { 81 | constexpr uintptr_t C_KERNEL_HEAP_START = 0xFFFFA00000000000; 82 | constexpr uintptr_t C_KERNEL_HEAP_SIZE = 0x1600000; // 16 MB 83 | 84 | constexpr uintptr_t C_HPET_MAP_ADDRESS = 0xFFFFFFFE00000000; 85 | 86 | // The PML4 indices of the memory regions. 87 | enum ePml4Limit 88 | { 89 | P_USER_START = 0x000, 90 | P_USER_END = 0x100, 91 | P_KERN_START = 0x100, 92 | P_HHDM_START = 0x100, 93 | P_HHDM_END = 0x1D0, 94 | P_KHEAP = 0x1D0, // one page is enough I would think. 95 | P_KERNEL_PML4 = 0x1FF, 96 | P_KERN_END = 0x200, 97 | }; 98 | 99 | // Flags for a page entry. 100 | constexpr uint64_t 101 | PE_PRESENT = BIT(0), 102 | PE_READWRITE = BIT(1), 103 | PE_SUPERVISOR = BIT(2), 104 | PE_WRITETHROUGH = BIT(3), 105 | PE_CACHEDISABLE = BIT(4), 106 | PE_ACCESSED = BIT(5), 107 | PE_DIRTY = BIT(6), 108 | PE_PAT = BIT(7), 109 | PE_GLOBAL = BIT(8), 110 | PE_PARTOFPMM = BIT(9), // NanoShell64 specific 111 | PE_NEEDALLOCPAGE = BIT(10), // NanoShell64 specific 112 | PE_BIT11 = BIT(11), 113 | PE_EXECUTEDISABLE = BIT(63); 114 | 115 | // Represents a single page entry. 116 | union PageEntry 117 | { 118 | struct 119 | { 120 | bool m_present : 1; // bit 0 121 | bool m_readWrite : 1; // bit 1 122 | bool m_supervisor : 1; // bit 2 123 | bool m_writeThrough : 1; // bit 3 124 | bool m_cacheDisable : 1; // bit 4 125 | bool m_accessed : 1; // bit 5 126 | bool m_dirty : 1; // bit 6 127 | bool m_pat : 1; // bit 7 128 | bool m_global : 1; // bit 8 129 | // 3 available bits. 130 | bool m_partOfPmm : 1; // bit 9: If this bit is set, this is a part of the PMM. 131 | bool m_needAllocPage : 1; // bit 10: If this bit is set, we will want to place a new address into the address field on page fault. 132 | // When we do that, we should fill it with a byte like (protKey << 4 | protKey). No particular 133 | // reason we are using protKey specifically. 134 | bool m_available0 : 1; // bit 11 135 | uint64_t m_address : 40;// bits 12-51 (MAXPHYADDR) 136 | int m_available1 : 7; // bits 52-58 (ignored) 137 | int m_protKey : 4; // bits 59-62 (protection key, ignores unless CR4.PKE or CR4.PKS is set and this is a page tree leaf) 138 | bool m_execDisable : 1; // bit 63: Disable execution from this page. 139 | }; 140 | 141 | uint64_t m_data; 142 | 143 | PageEntry() = default; 144 | 145 | PageEntry(uint64_t addr, uint64_t flags, uint64_t default_flags = PE_PRESENT, int pkey = 0) 146 | { 147 | m_data = 0; 148 | 149 | m_address = addr >> 12; 150 | m_protKey = pkey; 151 | 152 | m_data |= flags; 153 | m_data |= default_flags; 154 | } 155 | 156 | /* 157 | PageEntry(uint64_t addr, bool rw, bool us, bool xd, bool pop, bool nap, bool pr = true, 158 | int pk = 0, bool wt = false, bool cd = false, bool pat = false, bool glb = false) // useless stuff 159 | { 160 | m_address = addr >> 12; 161 | m_readWrite = rw; 162 | m_present = pr; 163 | m_supervisor = us; 164 | m_execDisable = xd; 165 | m_partOfPmm = pop; 166 | m_needAllocPage = nap; 167 | m_writeThrough = wt; 168 | m_cacheDisable = cd; 169 | m_pat = pat; 170 | m_global = glb; 171 | m_protKey = pk; 172 | } 173 | */ 174 | }; 175 | 176 | struct PageTable 177 | { 178 | PageEntry m_entries[512]; 179 | 180 | // Gets the page entry pointer as a virtual address. 181 | PageEntry* GetPageEntry(int index) 182 | { 183 | return &m_entries[index]; 184 | } 185 | 186 | // Clone the page table. 187 | PageTable* Clone(); 188 | }; 189 | 190 | struct PageDirectory 191 | { 192 | PageEntry m_entries[512]; 193 | 194 | // Gets the page table pointer as a virtual address. 195 | PageTable* GetPageTable(int index); 196 | 197 | // Clones the page directory. 198 | PageDirectory* Clone(); 199 | }; 200 | 201 | struct PML3 // PDPT 202 | { 203 | PageEntry m_entries[512]; 204 | 205 | // Gets the page table pointer as a virtual address. 206 | PageDirectory* GetPageDirectory(int index); 207 | 208 | // Clones the PML3. 209 | PML3* Clone(); 210 | }; 211 | 212 | struct PageMapping 213 | { 214 | PageEntry m_entries[512]; 215 | 216 | // Gets the current page mapping from the CR3. 217 | static PageMapping* GetFromCR3(); 218 | 219 | // Gets the PML3 pointer as a virtual address. 220 | PML3* GetPML3(int index); 221 | 222 | // Clones a page mapping. 223 | PageMapping* Clone(bool keepLowerHalf = true); 224 | 225 | // Gets a page entry from the table. Returns NULL if it's not available. 226 | PageEntry* GetPageEntry(uintptr_t addr); 227 | 228 | // Switches the executing CPU to use this page mapping. 229 | void SwitchTo(); 230 | 231 | // Set a page mapping's page entry at a particular address. 232 | bool MapPage(uintptr_t addr, const PageEntry & pe); 233 | 234 | // Map a new page in. For now, not demand paged -- we need to wait until we add interrupts. 235 | bool MapPage(uintptr_t addr, bool rw = true, bool super = false, bool xd = true); 236 | 237 | // Removes a page mapping, and any now empty levels that it resided in. 238 | void UnmapPage(uintptr_t addr, bool removeUpperLevels = true); 239 | }; 240 | 241 | class KernelHeap 242 | { 243 | public: 244 | struct FreeListNode 245 | { 246 | static constexpr uint64_t FLN_MAGIC = 0x67249a80d35b1cef; 247 | static constexpr uint64_t FLA_MAGIC = 0x4fa850d3672e91cb; 248 | 249 | uint64_t m_magic; 250 | FreeListNode* m_next; 251 | FreeListNode* m_prev; 252 | size_t m_size; 253 | 254 | void* GetArea() 255 | { 256 | return (void*)((uint8_t*)this + sizeof(FreeListNode)); 257 | } 258 | 259 | FreeListNode* GetPtrDirectlyAfter() 260 | { 261 | return (FreeListNode*)((uint8_t*)this + sizeof(FreeListNode) + m_size); 262 | } 263 | }; 264 | 265 | public: 266 | // Initializes the kernel heap. 267 | static void Init(); 268 | 269 | static void* Allocate(size_t); 270 | 271 | static void Free(void*); 272 | }; 273 | } 274 | 275 | #endif//_MEMORY_MANAGER_HPP 276 | -------------------------------------------------------------------------------- /include/NanoShell.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Nanoshell.hpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | #ifndef _NANOSHELL_HPP 10 | #define _NANOSHELL_HPP 11 | 12 | // This file includes global definitions that everyone should have. 13 | #include 14 | #include 15 | #include 16 | 17 | #define PACKED __attribute__((packed)) 18 | #define NO_RETURN __attribute__((noreturn)) 19 | #define RETURNS_TWICE __attribute__((returns_twice)) 20 | #define UNUSED __attribute__((unused)) 21 | 22 | #define BIT(x) (1ULL << (x)) 23 | 24 | inline void *operator new(size_t, void *p) throw() { return p; } 25 | inline void *operator new[](size_t, void *p) throw() { return p; } 26 | inline void operator delete (void *, void *) throw() { }; 27 | inline void operator delete[](void *, void *) throw() { }; 28 | 29 | class nopanic_t {}; 30 | 31 | constexpr nopanic_t nopanic; 32 | 33 | void* operator new(size_t, const nopanic_t&); 34 | void* operator new[](size_t, const nopanic_t&); 35 | 36 | #define ASM __asm__ __volatile__ 37 | 38 | #ifdef TARGET_X86_64 39 | struct Registers 40 | { 41 | uint16_t ds, es, fs, gs; 42 | uint64_t cr2; 43 | uint64_t r15, r14, r13, r12, r11, r10, r9, r8; 44 | uint64_t rbp, rdi, rsi; 45 | uint64_t rdx, rcx, rbx, rax; 46 | uint64_t error_code; 47 | uint64_t rip, cs, rflags, rsp, ss; // pushed by the ISR and popped by iretq. 48 | } 49 | PACKED; 50 | #endif 51 | 52 | extern "C" 53 | { 54 | void* memcpy(void* dst, const void* src, size_t n); 55 | void* memquadcpy(uint64_t* dst, const uint64_t* src, size_t n); 56 | void* memset(void* dst, int c, size_t n); 57 | char* strcpy(char* dst, const char* src); 58 | char* strcat(char* dst, const char* src); 59 | int strcmp(const char* s1, const char* s2); 60 | int memcmp(const void* s1, const void* s2, size_t n); 61 | size_t strlen(const char * s); 62 | int vsnprintf(char* buf, size_t sz, const char* fmt, va_list arg); 63 | int snprintf(char* buf, size_t sz, const char* fmt, ...); 64 | int sprintf(char* buf, const char* fmt, ...); 65 | 66 | void LogMsg(const char* fmt, ...); 67 | void LogMsgNoCR(const char* fmt, ...); 68 | 69 | void SLogMsg(const char* fmt, ...); 70 | void SLogMsgNoCR(const char* fmt, ...); 71 | 72 | NO_RETURN void KernelPanic(const char* fmt, ...); 73 | 74 | NO_RETURN void AssertUnreachable(const char* src_file, int src_line); 75 | 76 | #define ASSERT_UNREACHABLE AssertUnreachable(__FILE__, __LINE__) 77 | }; 78 | 79 | #endif//_NANOSHELL_HPP 80 | -------------------------------------------------------------------------------- /include/Scheduler.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Scheduler.hpp - Creation date: 11/04/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | #ifndef _SCHEDULER_HPP 10 | #define _SCHEDULER_HPP 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | // Forward declare the CPU class since we need it as a friend of Scheduler. 17 | namespace Arch 18 | { 19 | class CPU; 20 | } 21 | 22 | struct Thread_ExecQueueComparator 23 | { 24 | bool operator() (Thread* threadA, Thread* threadB) const 25 | { 26 | return threadA->m_Priority.Load() > threadB->m_Priority.Load(); 27 | } 28 | }; 29 | 30 | struct Thread_SleepTimeComparator 31 | { 32 | bool operator() (Thread* threadA, Thread* threadB) const 33 | { 34 | return threadA->m_SleepingUntil.Load() < threadB->m_SleepingUntil.Load(); 35 | } 36 | }; 37 | 38 | // The way this works is simple. When a thread is to be scheduled, the pointer: 39 | // - is popped off the relevant queue 40 | // - is placed as "the current thread" 41 | // - the old "current thread" is placed on the relevant queue, or the suspended threads list. 42 | 43 | class Scheduler 44 | { 45 | // Maximum time slice for a thread. 46 | constexpr static uint64_t C_THREAD_MAX_TIME_SLICE = 1'000'000; 47 | 48 | public: 49 | // Creates a new thread object. 50 | Thread* CreateThread(); 51 | 52 | protected: 53 | friend class Arch::CPU; 54 | friend class Thread; 55 | 56 | // Initializes the scheduler. 57 | void Init(); 58 | 59 | // Pops the next thread from the relevant execution queue. 60 | Thread* PopNextThread(); 61 | 62 | // Gets the current thread. 63 | Thread* GetCurrentThread(); 64 | 65 | // Let the scheduler know that this thread's quantum is over. 66 | void Done(Thread* pThrd); 67 | 68 | // Schedules in a new thread. This is used within Thread::Yield(), so use that instead. 69 | void Schedule(bool bRunFromTimerIRQ); 70 | 71 | // The function run when an interrupt comes in. 72 | void OnTimerIRQ(Registers* pRegs); 73 | 74 | private: 75 | // A list of ALL threads ever. 76 | KList m_AllThreads; 77 | KList m_ThreadFreeList; 78 | KPriorityQueue m_ExecutionQueue; 79 | KPriorityQueue m_SleepingThreads; 80 | KList m_SuspendedThreads; 81 | KList m_ZombieThreads; // Threads to clean up and dispose. 82 | 83 | Thread *m_pCurrentThread = nullptr; 84 | 85 | static void IdleThread(); 86 | static void NormalThread(); 87 | static void RealTimeThread(); 88 | 89 | void DeleteThread(Thread* pThread); 90 | 91 | // Looks for the next event that will happen, such as a thread wake up. 92 | uint64_t NextEvent(); 93 | 94 | // For each suspended thread, check if it's suspended anymore. 95 | void CheckUnsuspensionConditions(); 96 | 97 | // Kill every zombie thread that isn't owned by anybody. 98 | void CheckZombieThreads(); 99 | 100 | // Check for sleeping threads that need to wake up. 101 | // Returns the time the next thread needs to wake up, or zero if no thread needs to wake up 102 | uint64_t CheckSleepingThreads(); 103 | 104 | // Unsuspends sleeping threads if needed. 105 | void UnsuspendSleepingThreads(); 106 | 107 | // Check for events for the scheduler. 108 | void CheckEvents(); 109 | }; 110 | 111 | #endif//_SCHEDULER_HPP 112 | 113 | -------------------------------------------------------------------------------- /include/Spinlock.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Spinlock.hpp - Creation date: 04/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | #ifndef _SPINLOCK_HPP 10 | #define _SPINLOCK_HPP 11 | 12 | #include "Atomic.hpp" 13 | 14 | class Spinlock 15 | { 16 | private: 17 | Atomic m_lockBool; 18 | 19 | public: 20 | Spinlock() : m_lockBool(false) {} 21 | 22 | // Checks if a spin lock is locked. Not sure why you would need this. 23 | bool IsLocked() const 24 | { 25 | return m_lockBool.Load(); 26 | } 27 | 28 | // Tries to lock the current Spinlock object. If the lock is already 29 | // taken, this will return 'false', but if the lock has been acquired 30 | // through this function, this will return 'true'. 31 | bool TryLock() 32 | { 33 | return !m_lockBool.TestAndSet(ATOMIC_MEMORD_ACQUIRE); 34 | } 35 | 36 | // Lock the current Spinlock object. If the lock is already taken at 37 | // the time of this call, this function will spin and wait until it's 38 | // no longer locked. 39 | inline void Lock() 40 | { 41 | while (true) 42 | { 43 | if (!m_lockBool.TestAndSet(ATOMIC_MEMORD_ACQUIRE)) break; 44 | while (m_lockBool.Load(ATOMIC_MEMORD_ACQUIRE)) 45 | { 46 | Spinlock::SpinHint(); 47 | } 48 | } 49 | } 50 | 51 | // Unlock the current Spinlock object. 52 | void Unlock() 53 | { 54 | m_lockBool.Clear(ATOMIC_MEMORD_RELEASE); 55 | } 56 | 57 | // Hints to the current processor that it is currently spinning. 58 | // This is done on x86_64 with a "pause" instruction. 59 | static void SpinHint() 60 | { 61 | #ifdef TARGET_X86_64 62 | __builtin_ia32_pause(); 63 | #else 64 | #warning "Spinlock may benefit from adding a pause instruction or similar" 65 | #endif 66 | } 67 | }; 68 | 69 | // Note: Currently the only viable locking strategy to implement right now is 70 | // AdoptLock. TryToLock implicitly depends on OwnsLock, and since we don't have 71 | // threading yet we can't really implement it. And DeferLock... I don't see its 72 | // purpose just yet. 73 | 74 | // Empty tag class used in LockGuard to let it know that we already own 75 | // the lock. Behavior is undefined if this is used on an unlocked lock, 76 | // or a lock that's already been locked by another context. 77 | class AdoptLock 78 | { 79 | explicit AdoptLock() = default; 80 | }; 81 | 82 | // The lock guard locks a spin lock throughout its lifetime. Best used as 83 | // a stack-allocated object, this guarantees that the lock will never be 84 | // left locked. 85 | class LockGuard 86 | { 87 | private: 88 | Spinlock &m_lock; 89 | 90 | public: 91 | // Constructs a LockGuard object which adopts the passed in lock and locks it. 92 | LockGuard(Spinlock& lock); 93 | 94 | // Constructs a LockGuard object which adopts the passed in lock but doesn't lock it by itself. 95 | LockGuard(Spinlock& lock, AdoptLock); 96 | 97 | // Constructs a LockGuard object 98 | 99 | // This will delete any LockGuard copiers. This is an object which may not be copied. 100 | LockGuard(const LockGuard &) = delete; 101 | LockGuard& operator=(const LockGuard &) = delete; 102 | 103 | // The destructor unlocks the lock associated with this object. 104 | ~LockGuard(); 105 | }; 106 | 107 | #endif//_SPINLOCK_HPP -------------------------------------------------------------------------------- /include/Terminal.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Terminal.hpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | #ifndef _TERMINAL_HPP 10 | #define _TERMINAL_HPP 11 | 12 | #include 13 | 14 | namespace Terminal 15 | { 16 | // Called when the kernel first starts up. 17 | void Setup(); 18 | 19 | // Called when the kernel first starts up. Checks Limine's response field. 20 | bool CheckResponse(); 21 | 22 | // Writes a string to the terminal. 23 | void Write(const char * str); 24 | 25 | // Writes a string to the terminal, and adds a new line. 26 | void WriteLn(const char * str); 27 | 28 | // Writes a string to the E9 port. 29 | void E9Write(const char * str); 30 | 31 | // Writes a string to the E9 port, and adds a new line. 32 | void E9WriteLn(const char * str); 33 | }; 34 | 35 | #endif//_TERMINAL_HPP 36 | -------------------------------------------------------------------------------- /include/Thread.hpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Thread.hpp - Creation date: 11/04/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | #ifndef _THREAD_HPP 10 | #define _THREAD_HPP 11 | 12 | #include 13 | #include 14 | 15 | /** 16 | Explanation on how thread creation and deletion would be done: 17 | 18 | Step 1: Creating the thread object: 19 | 20 | ``` 21 | Thread* thread = Scheduler::CreateThread(); 22 | ``` 23 | 24 | This creates a thread on the current CPU. (could be changed) 25 | 26 | Step 2: Set up properties about the thread, such as its priority. 27 | 28 | ``` 29 | thread->SetPriority(Thread::NORMAL); 30 | // ... 31 | ``` 32 | 33 | Step 3: Starting the thread: 34 | ``` 35 | thread->Start(); 36 | ``` 37 | 38 | Note: There are now 2 ways of operating on a thread object: 39 | 40 | 1. Detach the thread. This allows the kernel to destroy the thread 41 | automatically upon its exit, however, the ownership of this thread 42 | object is forfeited. After a Thread::Detach() call, assume that the 43 | thread is no longer valid. (While it may still be valid, it's good 44 | to assume the worst. A good strategy in avoiding bugs.) 45 | 46 | Note: As soon as the thread is detached, it can also be relocated to 47 | any other running CPU. The scheduler may pass threads around to balance 48 | the load on the system. Another reason to consider the thread invalid 49 | after a detach call. 50 | ``` 51 | thread->Detach(); 52 | ``` 53 | 54 | 2. Join the thread. This waits for the thread's death. If the thread 55 | has died, this does nothing. 56 | ``` 57 | thread->Join(); 58 | ``` 59 | **/ 60 | 61 | // Forward declaration of the scheduler class. We would like to later give this class 62 | // access to our protected members. 63 | class Scheduler; 64 | 65 | typedef void(*ThreadEntry)(); 66 | 67 | class Thread 68 | { 69 | public: 70 | // Note. This only saves important registers that will get us back 71 | // to a context of a call to the Yield() function. 72 | struct ExecutionContext 73 | { 74 | uint64_t rbp, rbx, r12, r13, r14, r15; 75 | uint64_t rip, cs, rflags, rsp, ss; // popped by iretq. Allows for an easy return to normal 76 | }; 77 | 78 | // Additional registers that must be restored as well. 79 | struct AdditionalRegisters 80 | { 81 | uint64_t rax, rcx, rdx, rsi, rdi, r8, r9, r10, r11; 82 | uint64_t ds, es, fs, gs; 83 | }; 84 | 85 | enum ePriority 86 | { 87 | IDLE, // Idle priority. This thread will only be run when no other threads can be scheduled. 88 | NORMAL, // Normal priority. This thread will execute normally along with other threads. 89 | // No normal thread will be scheduled while real time threads are still in the execution queue. 90 | REALTIME, // Real Time priority. This thread will execute as much as it can. 91 | }; 92 | 93 | enum eStatus 94 | { 95 | SETUP, // The thread is in the process of being set up. 96 | SUSPENDED, // The thread has been manually suspended. 97 | RUNNING, // The thread is active. 98 | ZOMBIE, // The thread has completely died. The owner of this thread object now has to clean it up. 99 | SLEEPING, // The thread is sleeping until a moment in time in the future. 100 | }; 101 | 102 | public: // Static operations performed on the current thread 103 | 104 | // Yields execution of the current thread. 105 | static void Yield(); 106 | 107 | // Puts the thread to sleep for some time. 108 | static void Sleep(uint64_t nanoseconds); 109 | 110 | // Gets the current running thread. 111 | static Thread* GetCurrent(); 112 | 113 | public: 114 | // This sets the entry point of the thread. 115 | // This is only possible before the Start() function is called. 116 | void SetEntryPoint(ThreadEntry pEntry); 117 | 118 | // This function sets the stack size of the thread. 119 | // This is only possible before Start() is called! 120 | void SetStackSize(size_t stack_size); 121 | 122 | // This starts the thread object. This is meant to be called 123 | // after the thread's properties have been setup. 124 | void Start(); 125 | 126 | // Suspends the thread's execution. 127 | void Suspend(); 128 | 129 | // Suspends the thread's execution until a time point. 130 | void SleepUntil(uint64_t time); 131 | 132 | // Resumes the thread's execution, if it was suspended. 133 | void Resume(); 134 | 135 | // Marks the thread as a zombie. 136 | void Kill(); 137 | 138 | // Set the priority of this thread. 139 | void SetPriority(ePriority prio); 140 | 141 | // Detaches a thread from the current thread of execution. 142 | // This forfeits control of this thread object to the scheduler. 143 | void Detach(); 144 | 145 | // Waits until the thread exits. This is not possible if the thread 146 | // has been detached. 147 | void Join(); 148 | 149 | private: 150 | static void Beginning(); 151 | 152 | /**** Protected variables. ****/ 153 | protected: 154 | // The scheduler manages the thread linked queue. We will give it permission 155 | // to access our stuff below: 156 | friend class Scheduler; 157 | 158 | // Also allow comparison structs to access our protected fields. 159 | friend struct Thread_ExecQueueComparator; 160 | friend struct Thread_SleepTimeComparator; 161 | 162 | // The ID of the thread. 163 | int m_ID; 164 | 165 | // The next and previous items in the thread queue. 166 | Thread *m_Prev = nullptr, *m_Next = nullptr; 167 | 168 | // The priority of the thread. 169 | Atomic m_Priority { NORMAL }; 170 | 171 | // The status of the thread. 172 | Atomic m_Status { SETUP }; 173 | 174 | // If the thread is currently owned by the creator thread. 175 | // Detach() sets this to false. 176 | Atomic m_bOwned { true }; 177 | 178 | // The owner scheduler. 179 | Scheduler* m_pScheduler; 180 | 181 | // Entry point of the thread. 182 | ThreadEntry m_EntryPoint; 183 | 184 | // The stack of this thread. 185 | uint64_t* m_pStack; 186 | size_t m_StackSize = 32768; 187 | 188 | // The time the thread will wake up: 189 | Atomic m_SleepingUntil = 0; 190 | 191 | // The time the time slice will end: 192 | uint64_t m_TimeSliceUntil = 0; 193 | 194 | // The user-space GS base. 195 | void* m_UserGSBase = nullptr; 196 | 197 | // When calling JumpExecContext, also restore these if needed: 198 | bool m_bNeedRestoreAdditionalRegisters = false; 199 | AdditionalRegisters m_AdditionalRegisters; 200 | 201 | // The saved execution context of the thread. 202 | ExecutionContext m_ExecContext; 203 | 204 | // Jumps to this thread's execution context. 205 | void JumpExecContext(); 206 | 207 | // Unsuspend the thread. 208 | void Unsuspend(); 209 | }; 210 | 211 | #endif//_THREAD_HPP 212 | -------------------------------------------------------------------------------- /include/_limine.h: -------------------------------------------------------------------------------- 1 | #ifndef _LIMINE_H 2 | #define _LIMINE_H 1 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | #include 9 | 10 | /* Misc */ 11 | 12 | #ifdef LIMINE_NO_POINTERS 13 | # define LIMINE_PTR(TYPE) uint64_t 14 | #else 15 | # define LIMINE_PTR(TYPE) TYPE 16 | #endif 17 | 18 | #define LIMINE_COMMON_MAGIC 0xc7b1dd30df4c8b88, 0x0a82e883a194f07b 19 | 20 | struct limine_uuid { 21 | uint32_t a; 22 | uint16_t b; 23 | uint16_t c; 24 | uint8_t d[8]; 25 | }; 26 | 27 | #define LIMINE_MEDIA_TYPE_GENERIC 0 28 | #define LIMINE_MEDIA_TYPE_OPTICAL 1 29 | #define LIMINE_MEDIA_TYPE_TFTP 2 30 | 31 | struct limine_file { 32 | uint64_t revision; 33 | LIMINE_PTR(void *) address; 34 | uint64_t size; 35 | LIMINE_PTR(char *) path; 36 | LIMINE_PTR(char *) cmdline; 37 | uint32_t media_type; 38 | uint32_t unused; 39 | uint32_t tftp_ip; 40 | uint32_t tftp_port; 41 | uint32_t partition_index; 42 | uint32_t mbr_disk_id; 43 | struct limine_uuid gpt_disk_uuid; 44 | struct limine_uuid gpt_part_uuid; 45 | struct limine_uuid part_uuid; 46 | }; 47 | 48 | /* Boot info */ 49 | 50 | #define LIMINE_BOOTLOADER_INFO_REQUEST { LIMINE_COMMON_MAGIC, 0xf55038d8e2a1202f, 0x279426fcf5f59740 } 51 | 52 | struct limine_bootloader_info_response { 53 | uint64_t revision; 54 | LIMINE_PTR(char *) name; 55 | LIMINE_PTR(char *) version; 56 | }; 57 | 58 | struct limine_bootloader_info_request { 59 | uint64_t id[4]; 60 | uint64_t revision; 61 | LIMINE_PTR(struct limine_bootloader_info_response *) response; 62 | }; 63 | 64 | /* Stack size */ 65 | 66 | #define LIMINE_STACK_SIZE_REQUEST { LIMINE_COMMON_MAGIC, 0x224ef0460a8e8926, 0xe1cb0fc25f46ea3d } 67 | 68 | struct limine_stack_size_response { 69 | uint64_t revision; 70 | }; 71 | 72 | struct limine_stack_size_request { 73 | uint64_t id[4]; 74 | uint64_t revision; 75 | LIMINE_PTR(struct limine_stack_size_response *) response; 76 | uint64_t stack_size; 77 | }; 78 | 79 | /* HHDM */ 80 | 81 | #define LIMINE_HHDM_REQUEST { LIMINE_COMMON_MAGIC, 0x48dcf1cb8ad2b852, 0x63984e959a98244b } 82 | 83 | struct limine_hhdm_response { 84 | uint64_t revision; 85 | uint64_t offset; 86 | }; 87 | 88 | struct limine_hhdm_request { 89 | uint64_t id[4]; 90 | uint64_t revision; 91 | LIMINE_PTR(struct limine_hhdm_response *) response; 92 | }; 93 | 94 | /* Framebuffer */ 95 | 96 | #define LIMINE_FRAMEBUFFER_REQUEST { LIMINE_COMMON_MAGIC, 0x9d5827dcd881dd75, 0xa3148604f6fab11b } 97 | 98 | #define LIMINE_FRAMEBUFFER_RGB 1 99 | 100 | struct limine_framebuffer { 101 | LIMINE_PTR(void *) address; 102 | uint64_t width; 103 | uint64_t height; 104 | uint64_t pitch; 105 | uint16_t bpp; 106 | uint8_t memory_model; 107 | uint8_t red_mask_size; 108 | uint8_t red_mask_shift; 109 | uint8_t green_mask_size; 110 | uint8_t green_mask_shift; 111 | uint8_t blue_mask_size; 112 | uint8_t blue_mask_shift; 113 | uint8_t unused[7]; 114 | uint64_t edid_size; 115 | LIMINE_PTR(void *) edid; 116 | }; 117 | 118 | struct limine_framebuffer_response { 119 | uint64_t revision; 120 | uint64_t framebuffer_count; 121 | LIMINE_PTR(struct limine_framebuffer **) framebuffers; 122 | }; 123 | 124 | struct limine_framebuffer_request { 125 | uint64_t id[4]; 126 | uint64_t revision; 127 | LIMINE_PTR(struct limine_framebuffer_response *) response; 128 | }; 129 | 130 | /* Terminal */ 131 | 132 | #define LIMINE_TERMINAL_REQUEST { LIMINE_COMMON_MAGIC, 0xc8ac59310c2b0844, 0xa68d0c7265d38878 } 133 | 134 | #define LIMINE_TERMINAL_CB_DEC 10 135 | #define LIMINE_TERMINAL_CB_BELL 20 136 | #define LIMINE_TERMINAL_CB_PRIVATE_ID 30 137 | #define LIMINE_TERMINAL_CB_STATUS_REPORT 40 138 | #define LIMINE_TERMINAL_CB_POS_REPORT 50 139 | #define LIMINE_TERMINAL_CB_KBD_LEDS 60 140 | #define LIMINE_TERMINAL_CB_MODE 70 141 | #define LIMINE_TERMINAL_CB_LINUX 80 142 | 143 | #define LIMINE_TERMINAL_CTX_SIZE ((uint64_t)(-1)) 144 | #define LIMINE_TERMINAL_CTX_SAVE ((uint64_t)(-2)) 145 | #define LIMINE_TERMINAL_CTX_RESTORE ((uint64_t)(-3)) 146 | #define LIMINE_TERMINAL_FULL_REFRESH ((uint64_t)(-4)) 147 | 148 | struct limine_terminal; 149 | 150 | typedef void (*limine_terminal_write)(struct limine_terminal *, const char *, uint64_t); 151 | typedef void (*limine_terminal_callback)(struct limine_terminal *, uint64_t, uint64_t, uint64_t, uint64_t); 152 | 153 | struct limine_terminal { 154 | uint64_t columns; 155 | uint64_t rows; 156 | LIMINE_PTR(struct limine_framebuffer *) framebuffer; 157 | }; 158 | 159 | struct limine_terminal_response { 160 | uint64_t revision; 161 | uint64_t terminal_count; 162 | LIMINE_PTR(struct limine_terminal **) terminals; 163 | LIMINE_PTR(limine_terminal_write) write; 164 | }; 165 | 166 | struct limine_terminal_request { 167 | uint64_t id[4]; 168 | uint64_t revision; 169 | LIMINE_PTR(struct limine_terminal_response *) response; 170 | LIMINE_PTR(limine_terminal_callback) callback; 171 | }; 172 | 173 | /* 5-level paging */ 174 | 175 | #define LIMINE_5_LEVEL_PAGING_REQUEST { LIMINE_COMMON_MAGIC, 0x94469551da9b3192, 0xebe5e86db7382888 } 176 | 177 | struct limine_5_level_paging_response { 178 | uint64_t revision; 179 | }; 180 | 181 | struct limine_5_level_paging_request { 182 | uint64_t id[4]; 183 | uint64_t revision; 184 | LIMINE_PTR(struct limine_5_level_paging_response *) response; 185 | }; 186 | 187 | /* SMP */ 188 | 189 | #define LIMINE_SMP_REQUEST { LIMINE_COMMON_MAGIC, 0x95a67b819a1b857e, 0xa0b61b723b6a73e0 } 190 | 191 | struct limine_smp_info; 192 | 193 | typedef void (*limine_goto_address)(struct limine_smp_info *); 194 | 195 | #if defined (__x86_64__) || defined (__i386__) 196 | 197 | #define LIMINE_SMP_X2APIC (1 << 0) 198 | 199 | struct limine_smp_info { 200 | uint32_t processor_id; 201 | uint32_t lapic_id; 202 | uint64_t reserved; 203 | LIMINE_PTR(limine_goto_address) goto_address; 204 | uint64_t extra_argument; 205 | }; 206 | 207 | struct limine_smp_response { 208 | uint64_t revision; 209 | uint32_t flags; 210 | uint32_t bsp_lapic_id; 211 | uint64_t cpu_count; 212 | LIMINE_PTR(struct limine_smp_info **) cpus; 213 | }; 214 | 215 | #elif defined (__aarch64__) 216 | 217 | struct limine_smp_info { 218 | uint32_t processor_id; 219 | uint32_t gic_iface_no; 220 | uint64_t mpidr; 221 | uint64_t reserved; 222 | LIMINE_PTR(limine_goto_address) goto_address; 223 | uint64_t extra_argument; 224 | }; 225 | 226 | struct limine_smp_response { 227 | uint64_t revision; 228 | uint32_t flags; 229 | uint64_t bsp_mpidr; 230 | uint64_t cpu_count; 231 | LIMINE_PTR(struct limine_smp_info **) cpus; 232 | }; 233 | 234 | #else 235 | #error Unknown architecture 236 | #endif 237 | 238 | struct limine_smp_request { 239 | uint64_t id[4]; 240 | uint64_t revision; 241 | LIMINE_PTR(struct limine_smp_response *) response; 242 | uint64_t flags; 243 | }; 244 | 245 | /* Memory map */ 246 | 247 | #define LIMINE_MEMMAP_REQUEST { LIMINE_COMMON_MAGIC, 0x67cf3d9d378a806f, 0xe304acdfc50c3c62 } 248 | 249 | #define LIMINE_MEMMAP_USABLE 0 250 | #define LIMINE_MEMMAP_RESERVED 1 251 | #define LIMINE_MEMMAP_ACPI_RECLAIMABLE 2 252 | #define LIMINE_MEMMAP_ACPI_NVS 3 253 | #define LIMINE_MEMMAP_BAD_MEMORY 4 254 | #define LIMINE_MEMMAP_BOOTLOADER_RECLAIMABLE 5 255 | #define LIMINE_MEMMAP_KERNEL_AND_MODULES 6 256 | #define LIMINE_MEMMAP_FRAMEBUFFER 7 257 | 258 | struct limine_memmap_entry { 259 | uint64_t base; 260 | uint64_t length; 261 | uint64_t type; 262 | }; 263 | 264 | struct limine_memmap_response { 265 | uint64_t revision; 266 | uint64_t entry_count; 267 | LIMINE_PTR(struct limine_memmap_entry **) entries; 268 | }; 269 | 270 | struct limine_memmap_request { 271 | uint64_t id[4]; 272 | uint64_t revision; 273 | LIMINE_PTR(struct limine_memmap_response *) response; 274 | }; 275 | 276 | /* Entry point */ 277 | 278 | #define LIMINE_ENTRY_POINT_REQUEST { LIMINE_COMMON_MAGIC, 0x13d86c035a1cd3e1, 0x2b0caa89d8f3026a } 279 | 280 | typedef void (*limine_entry_point)(void); 281 | 282 | struct limine_entry_point_response { 283 | uint64_t revision; 284 | }; 285 | 286 | struct limine_entry_point_request { 287 | uint64_t id[4]; 288 | uint64_t revision; 289 | LIMINE_PTR(struct limine_entry_point_response *) response; 290 | LIMINE_PTR(limine_entry_point) entry; 291 | }; 292 | 293 | /* Kernel File */ 294 | 295 | #define LIMINE_KERNEL_FILE_REQUEST { LIMINE_COMMON_MAGIC, 0xad97e90e83f1ed67, 0x31eb5d1c5ff23b69 } 296 | 297 | struct limine_kernel_file_response { 298 | uint64_t revision; 299 | LIMINE_PTR(struct limine_file *) kernel_file; 300 | }; 301 | 302 | struct limine_kernel_file_request { 303 | uint64_t id[4]; 304 | uint64_t revision; 305 | LIMINE_PTR(struct limine_kernel_file_response *) response; 306 | }; 307 | 308 | /* Module */ 309 | 310 | #define LIMINE_MODULE_REQUEST { LIMINE_COMMON_MAGIC, 0x3e7e279702be32af, 0xca1c4f3bd1280cee } 311 | 312 | struct limine_module_response { 313 | uint64_t revision; 314 | uint64_t module_count; 315 | LIMINE_PTR(struct limine_file **) modules; 316 | }; 317 | 318 | struct limine_module_request { 319 | uint64_t id[4]; 320 | uint64_t revision; 321 | LIMINE_PTR(struct limine_module_response *) response; 322 | }; 323 | 324 | /* RSDP */ 325 | 326 | #define LIMINE_RSDP_REQUEST { LIMINE_COMMON_MAGIC, 0xc5e77b6b397e7b43, 0x27637845accdcf3c } 327 | 328 | struct limine_rsdp_response { 329 | uint64_t revision; 330 | LIMINE_PTR(void *) address; 331 | }; 332 | 333 | struct limine_rsdp_request { 334 | uint64_t id[4]; 335 | uint64_t revision; 336 | LIMINE_PTR(struct limine_rsdp_response *) response; 337 | }; 338 | 339 | /* SMBIOS */ 340 | 341 | #define LIMINE_SMBIOS_REQUEST { LIMINE_COMMON_MAGIC, 0x9e9046f11e095391, 0xaa4a520fefbde5ee } 342 | 343 | struct limine_smbios_response { 344 | uint64_t revision; 345 | LIMINE_PTR(void *) entry_32; 346 | LIMINE_PTR(void *) entry_64; 347 | }; 348 | 349 | struct limine_smbios_request { 350 | uint64_t id[4]; 351 | uint64_t revision; 352 | LIMINE_PTR(struct limine_smbios_response *) response; 353 | }; 354 | 355 | /* EFI system table */ 356 | 357 | #define LIMINE_EFI_SYSTEM_TABLE_REQUEST { LIMINE_COMMON_MAGIC, 0x5ceba5163eaaf6d6, 0x0a6981610cf65fcc } 358 | 359 | struct limine_efi_system_table_response { 360 | uint64_t revision; 361 | LIMINE_PTR(void *) address; 362 | }; 363 | 364 | struct limine_efi_system_table_request { 365 | uint64_t id[4]; 366 | uint64_t revision; 367 | LIMINE_PTR(struct limine_efi_system_table_response *) response; 368 | }; 369 | 370 | /* Boot time */ 371 | 372 | #define LIMINE_BOOT_TIME_REQUEST { LIMINE_COMMON_MAGIC, 0x502746e184c088aa, 0xfbc5ec83e6327893 } 373 | 374 | struct limine_boot_time_response { 375 | uint64_t revision; 376 | int64_t boot_time; 377 | }; 378 | 379 | struct limine_boot_time_request { 380 | uint64_t id[4]; 381 | uint64_t revision; 382 | LIMINE_PTR(struct limine_boot_time_response *) response; 383 | }; 384 | 385 | /* Kernel address */ 386 | 387 | #define LIMINE_KERNEL_ADDRESS_REQUEST { LIMINE_COMMON_MAGIC, 0x71ba76863cc55f63, 0xb2644a48c516a487 } 388 | 389 | struct limine_kernel_address_response { 390 | uint64_t revision; 391 | uint64_t physical_base; 392 | uint64_t virtual_base; 393 | }; 394 | 395 | struct limine_kernel_address_request { 396 | uint64_t id[4]; 397 | uint64_t revision; 398 | LIMINE_PTR(struct limine_kernel_address_response *) response; 399 | }; 400 | 401 | /* Device Tree Blob */ 402 | 403 | #define LIMINE_DTB_REQUEST { LIMINE_COMMON_MAGIC, 0xb40ddb48fb54bac7, 0x545081493f81ffb7 } 404 | 405 | struct limine_dtb_response { 406 | uint64_t revision; 407 | LIMINE_PTR(void *) dtb_ptr; 408 | }; 409 | 410 | struct limine_dtb_request { 411 | uint64_t id[4]; 412 | uint64_t revision; 413 | LIMINE_PTR(struct limine_dtb_response *) response; 414 | }; 415 | 416 | #ifdef __cplusplus 417 | } 418 | #endif 419 | 420 | #endif 421 | -------------------------------------------------------------------------------- /limine.cfg: -------------------------------------------------------------------------------- 1 | TIMEOUT=3 2 | VERBOSE=yes 3 | 4 | :NanoShell64 5 | PROTOCOL=limine 6 | KERNEL_PATH=boot:///kernel.elf -------------------------------------------------------------------------------- /linker.ld: -------------------------------------------------------------------------------- 1 | /* Tell the linker that we want an x86_64 ELF64 output file */ 2 | OUTPUT_FORMAT(elf64-x86-64) 3 | OUTPUT_ARCH(i386:x86-64) 4 | 5 | /* We want the symbol _start to be our entry point */ 6 | ENTRY(_start) 7 | 8 | /* Define the program headers we want so the bootloader gives us the right */ 9 | /* MMU permissions */ 10 | PHDRS 11 | { 12 | text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */ 13 | rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */ 14 | data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */ 15 | } 16 | 17 | SECTIONS 18 | { 19 | /* We want to be placed in the topmost 2GiB of the address space, for optimizations, and because that is what the Limine spec mandates. */ 20 | /* Any address in this region will do, but often 0xffffffff80000000 is chosen as that is the beginning of the region. */ 21 | . = 0xffffffff80000000; 22 | 23 | .text : { 24 | *(.text .text.*) 25 | } :text 26 | 27 | /* Move to the next memory page for .rodata */ 28 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 29 | 30 | .rodata : { 31 | *(.rodata .rodata.*) 32 | } :rodata 33 | 34 | /* Move to the next memory page for .data */ 35 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 36 | 37 | /* Global constructor array. */ 38 | .init_array : { 39 | g_init_array_start = .; 40 | *(.init_array) 41 | g_init_array_end = .; 42 | } 43 | 44 | /* Global destructor array. */ 45 | .fini_array : { 46 | g_fini_array_start = .; 47 | *(.fini_array) 48 | g_fini_array_end = .; 49 | } 50 | 51 | .data : { 52 | *(.data .data.*) 53 | } :data 54 | 55 | .bss : { 56 | *(COMMON) 57 | *(.bss .bss.*) 58 | } :data 59 | 60 | /* Discard .note.* and .eh_frame since they may cause issues on some hosts. */ 61 | /DISCARD/ : { 62 | *(.eh_frame) 63 | *(.note .note.*) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | ### NanoShell64 has been officially deprecated as of August 20th, 2023. Instead, check out its successor, [The Boron Operating System](https://github.com/iProgramMC/Boron) 2 | 3 |
4 | 5 | # The NanoShell64 Operating System 6 | 7 | NanoShell64 is a 64-bit operating system designed with SMP in mind. 8 | 9 | The project is wholly licensed under the GNU General Public License V3, **except the following**: 10 | - [Limine Terminal](source/LimineTerm): https://github.com/limine-bootloader/terminal 11 | 12 | #### Be advised this is alpha-level software and you should not expect any stability from it. 13 | 14 | NOTE: You need Limine V3.18.3, 3.0 or older doesn't work for some reason 15 | 16 | ## Building 17 | In a terminal, run the following commands: 18 | ``` 19 | git submodule update 20 | make -C limine 21 | make 22 | ``` 23 | (note: these are to be done on Linux or WSL1. Cygwin/MinGW32 were not tested and probably don't work.) 24 | 25 | To run, invoke `./run-unix.sh` or `make run`. If you are using WSL 1, you can do `./run.sh` 26 | to run the built iso using your native QEMU installation on Windows. 27 | 28 | ## Goals/plans 29 | 30 | #### Architecture design 31 | There's hardly a decided architecture design, as this is right now at the experimental stage. 32 | I'd like to experiment with: (currently only one thing but I may add more) 33 | * Worker thread centered design. This can also be interpreted as a clients-server architecture. 34 | * Each CPU has its own kernel heap. This reduces TLB shootdowns. If there's a need to transfer 35 | data between CPUs, one may use an IPI with a list of physical pages. 36 | * Until swapping to disk is added, use a form of poor man's compression - if a page is filled 37 | to the brim with a single byte that fills unspecified criteria, it'll be "compressed" down 38 | into a single page entry. 39 | 40 | #### Primordial tasks 41 | * [x] Hello World 42 | * [ ] SMP Bootstrap 43 | * [ ] Inter-processor communication (through IPIs) 44 | * [ ] Task switching and concurrency 45 | * [ ] Physical memory manager 46 | * [ ] Virtual memory manager 47 | * [ ] Inter-process communication 48 | 49 | #### Other features 50 | * [ ] Init ram disk file system 51 | * [ ] Ext2 file system support 52 | * [ ] More... (still not decided) 53 | 54 | #### Drivers 55 | * [ ] Limine terminal 56 | * [ ] PS/2 Keyboard 57 | * [ ] Own terminal with framebuffer 58 | * [ ] Serial port 59 | * [ ] PCI 60 | * [ ] PS/2 mouse 61 | 62 | #### User 63 | * [ ] A basic shell 64 | Still to be decided. 65 | 66 | #### Far in the future 67 | * [ ] NanoShell32 compatibility 68 | * [ ] Networking? 69 | * [ ] USB (could also backport to NanoShell32 itself) 70 | 71 | -------------------------------------------------------------------------------- /run-unix.sh: -------------------------------------------------------------------------------- 1 | qemu-system-x86_64 \ 2 | -no-reboot \ 3 | -no-shutdown \ 4 | -M q35 \ 5 | -m 256M \ 6 | -smp 4 \ 7 | -boot d \ 8 | -cdrom build/image.iso \ 9 | -debugcon stdio 10 | -------------------------------------------------------------------------------- /run.bat: -------------------------------------------------------------------------------- 1 | @rem Run script 2 | 3 | @echo off 4 | 5 | set backupPath=%path% 6 | set NSPath=%CD% 7 | cd /d c:\Program Files\qemu 8 | set path=%path%;%NSPath% 9 | 10 | qemu-system-x86_64.exe -no-reboot -no-shutdown ^ 11 | -M q35 ^ 12 | -m 256M ^ 13 | -smp 1 ^ 14 | -boot d ^ 15 | -cdrom %nspath%\build\image.iso ^ 16 | -debugcon stdio ^ 17 | -display sdl ^ 18 | -accel tcg -trace apic_* ^ 19 | -monitor telnet:127.0.0.1:56789,server,nowait 20 | 21 | :-d cpu_reset ^ 22 | : -s -S -- for debugging with GDB 23 | : -serial COM7 -- to output the serial port to somewhere real 24 | : -kernel %nspath%/kernel.bin 25 | : -debugcon stdio 26 | : -monitor telnet:127.0.0.1:55555,server,nowait -- to use the QEMU console 27 | : 28 | :qemu-system-i386 -m 16M -drive file=\\.\PHYSICALDRIVE1,format=raw 29 | rem -s -S 30 | 31 | :-drive id=disk,file=%nspath%\vdisk.vdi,if=none ^ 32 | :-device ahci,id=ahci ^ 33 | :-device ide-hd,drive=disk,bus=ahci.0 ^ 34 | 35 | rem go back 36 | cd /d %NSPath% 37 | 38 | set path=%backupPath% 39 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | # works only in wsl :) 2 | 3 | cmd.exe /k "run.bat && exit" 4 | -------------------------------------------------------------------------------- /source/EternalHeap.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // EternalHeap.cpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | // The eternal heap is a small (4Mib) block of memory which allows very small and 15 | // permanent blocks of memory to be given out during the initialization process. 16 | 17 | #define C_ETERNAL_HEAP_SIZE (4 * 1024 * 1024) 18 | 19 | static Spinlock gEternalHeapSpinlock; 20 | static uint8_t gEternalHeap[C_ETERNAL_HEAP_SIZE]; 21 | static uint8_t *gEternalHeapPtr = gEternalHeap; 22 | static uint8_t * const gEternalHeapEnd = &gEternalHeap[C_ETERNAL_HEAP_SIZE]; 23 | 24 | void *EternalHeap::Allocate(size_t sz) 25 | { 26 | sz = (sz + 15) & ~15; 27 | 28 | LockGuard grd(gEternalHeapSpinlock); 29 | 30 | if (gEternalHeapPtr + sz > gEternalHeapEnd) 31 | { 32 | // OOPS! We failed to allocate this block. Return NULL. 33 | SLogMsg("EternalHeap could not fulfill an allocation of %z bytes (RA: %p)", sz, __builtin_return_address(0)); 34 | return NULL; 35 | } 36 | 37 | void *pMem = gEternalHeapPtr; 38 | 39 | gEternalHeapPtr += sz; 40 | 41 | return pMem; 42 | } 43 | 44 | -------------------------------------------------------------------------------- /source/Init.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Init.cpp - Creation date: 04/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This file contains the _start function, which launches and 12 | // brings the kernel to life. 13 | // *************************************************************** 14 | 15 | #include 16 | #include <_limine.h> 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | // The Limine requests can be placed anywhere, but it is important that 25 | // the compiler does not optimise them away, so, usually, they should 26 | // be made volatile or equivalent. 27 | 28 | volatile limine_bootloader_info_request g_BootloaderInfoRequest = 29 | { 30 | .id = LIMINE_BOOTLOADER_INFO_REQUEST, 31 | .revision = 0, 32 | .response = NULL, 33 | }; 34 | 35 | void RunAllConstructors(); 36 | void RunAllDestructors(); 37 | 38 | // The following will be our kernel's entry point. 39 | extern "C" void _start(void) 40 | { 41 | // Ensure Limine has set up these features. 42 | if (!Terminal::CheckResponse() || !Arch::CPU::GetSMPResponse() || !Arch::CPU::GetHHDMResponse()) 43 | Arch::IdleLoop(); 44 | 45 | RunAllConstructors(); 46 | 47 | Terminal::Setup(); 48 | 49 | LogMsg("NanoShell64 (TM), April 2023 - V0.004"); 50 | SLogMsg("NanoShell64 says good morning"); 51 | 52 | PMM::Init(); 53 | 54 | #ifdef TARGET_X86_64 55 | Arch::APIC::EnsureOn(); 56 | #endif 57 | 58 | uint32_t processorCount = Arch::CPU::GetCount(); 59 | LogMsg("%d System Processor%s [%llu Kb Memory] MultiProcessor Kernel", processorCount, processorCount == 1 ? "" : "s", PMM::GetTotalPages() * 4); 60 | 61 | // Initialize the other CPUs. This should not return. 62 | Arch::CPU::InitAsBSP(); 63 | } 64 | -------------------------------------------------------------------------------- /source/LimineTerm/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright 2022 mintsuki and contributors. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 10 | -------------------------------------------------------------------------------- /source/LimineTerm/framebuffer.h: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // framebuffer.h - Creation date: 04/10/2022 3 | // ------------------------------------------------------------- 4 | // Limine Terminal (C) 2022-2023 - Licensed under BSD 2 Clause 5 | // 6 | // *************************************************************** 7 | // Programmer(s): mintsuki and contributors 8 | // *************************************************************** 9 | #ifndef _TERM_FRAMEBUFFER_H 10 | #define _TERM_FRAMEBUFFER_H 11 | 12 | #ifdef __cplusplus 13 | extern "C" { 14 | #endif 15 | 16 | #include 17 | #include 18 | #include 19 | 20 | #include "term.h" 21 | 22 | #define FBTERM_FONT_GLYPHS 256 23 | 24 | struct fbterm_char { 25 | uint32_t c; 26 | uint32_t fg; 27 | uint32_t bg; 28 | }; 29 | 30 | struct fbterm_queue_item { 31 | size_t x, y; 32 | struct fbterm_char c; 33 | }; 34 | 35 | struct fbterm_context { 36 | struct term_context term; 37 | 38 | size_t font_width; 39 | size_t font_height; 40 | size_t glyph_width; 41 | size_t glyph_height; 42 | 43 | size_t font_scale_x; 44 | size_t font_scale_y; 45 | 46 | size_t offset_x, offset_y; 47 | 48 | volatile uint32_t *framebuffer; 49 | size_t pitch; 50 | size_t width; 51 | size_t height; 52 | size_t bpp; 53 | 54 | size_t font_bits_size; 55 | uint8_t *font_bits; 56 | size_t font_bool_size; 57 | bool *font_bool; 58 | 59 | uint32_t ansi_colours[8]; 60 | uint32_t ansi_bright_colours[8]; 61 | uint32_t default_fg, default_bg; 62 | uint32_t default_fg_bright, default_bg_bright; 63 | 64 | size_t canvas_size; 65 | uint32_t *canvas; 66 | 67 | size_t grid_size; 68 | size_t queue_size; 69 | size_t map_size; 70 | 71 | struct fbterm_char *grid; 72 | 73 | struct fbterm_queue_item *queue; 74 | size_t queue_i; 75 | 76 | struct fbterm_queue_item **map; 77 | 78 | uint32_t text_fg; 79 | uint32_t text_bg; 80 | size_t cursor_x; 81 | size_t cursor_y; 82 | 83 | uint32_t saved_state_text_fg; 84 | uint32_t saved_state_text_bg; 85 | size_t saved_state_cursor_x; 86 | size_t saved_state_cursor_y; 87 | 88 | size_t old_cursor_x; 89 | size_t old_cursor_y; 90 | }; 91 | 92 | struct term_context *fbterm_init( 93 | void *(*_malloc)(size_t), 94 | uint32_t *framebuffer, size_t width, size_t height, size_t pitch, 95 | uint32_t *canvas, 96 | uint32_t *ansi_colours, uint32_t *ansi_bright_colours, 97 | uint32_t *default_bg, uint32_t *default_fg, 98 | uint32_t *default_bg_bright, uint32_t *default_fg_bright, 99 | void *font, size_t font_width, size_t font_height, size_t font_spacing, 100 | size_t font_scale_x, size_t font_scale_y, 101 | size_t margin 102 | ); 103 | 104 | #ifdef __cplusplus 105 | } 106 | #endif 107 | 108 | #endif 109 | -------------------------------------------------------------------------------- /source/LimineTerm/term.h: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // term.h - Creation date: 04/10/2022 3 | // ------------------------------------------------------------- 4 | // Limine Terminal (C) 2022-2023 - Licensed under BSD 2 Clause 5 | // 6 | // *************************************************************** 7 | // Programmer(s): mintsuki and contributors 8 | // *************************************************************** 9 | #ifndef _TERM_H 10 | #define _TERM_H 11 | 12 | #ifdef __cplusplus 13 | extern "C" { 14 | #endif 15 | 16 | #include 17 | #include 18 | #include 19 | 20 | #define TERM_MAX_ESC_VALUES 16 21 | 22 | #define TERM_CB_DEC 10 23 | #define TERM_CB_BELL 20 24 | #define TERM_CB_PRIVATE_ID 30 25 | #define TERM_CB_STATUS_REPORT 40 26 | #define TERM_CB_POS_REPORT 50 27 | #define TERM_CB_KBD_LEDS 60 28 | #define TERM_CB_MODE 70 29 | #define TERM_CB_LINUX 80 30 | 31 | struct term_context { 32 | /* internal use */ 33 | 34 | size_t tab_size; 35 | bool autoflush; 36 | bool cursor_enabled; 37 | bool scroll_enabled; 38 | bool control_sequence; 39 | bool csi; 40 | bool escape; 41 | bool osc; 42 | bool osc_escape; 43 | bool rrr; 44 | bool discard_next; 45 | bool bold; 46 | bool bg_bold; 47 | bool reverse_video; 48 | bool dec_private; 49 | bool insert_mode; 50 | uint64_t code_point; 51 | size_t unicode_remaining; 52 | uint8_t g_select; 53 | uint8_t charsets[2]; 54 | size_t current_charset; 55 | size_t escape_offset; 56 | size_t esc_values_i; 57 | size_t saved_cursor_x; 58 | size_t saved_cursor_y; 59 | size_t current_primary; 60 | size_t current_bg; 61 | size_t scroll_top_margin; 62 | size_t scroll_bottom_margin; 63 | uint32_t esc_values[TERM_MAX_ESC_VALUES]; 64 | bool saved_state_bold; 65 | bool saved_state_bg_bold; 66 | bool saved_state_reverse_video; 67 | size_t saved_state_current_charset; 68 | size_t saved_state_current_primary; 69 | size_t saved_state_current_bg; 70 | 71 | /* to be set by backend */ 72 | 73 | size_t rows, cols; 74 | bool in_bootloader; 75 | 76 | void (*raw_putchar)(struct term_context *, uint8_t c); 77 | void (*clear)(struct term_context *, bool move); 78 | void (*set_cursor_pos)(struct term_context *, size_t x, size_t y); 79 | void (*get_cursor_pos)(struct term_context *, size_t *x, size_t *y); 80 | void (*set_text_fg)(struct term_context *, size_t fg); 81 | void (*set_text_bg)(struct term_context *, size_t bg); 82 | void (*set_text_fg_bright)(struct term_context *, size_t fg); 83 | void (*set_text_bg_bright)(struct term_context *, size_t bg); 84 | void (*set_text_fg_rgb)(struct term_context *, uint32_t fg); 85 | void (*set_text_bg_rgb)(struct term_context *, uint32_t bg); 86 | void (*set_text_fg_default)(struct term_context *); 87 | void (*set_text_bg_default)(struct term_context *); 88 | void (*set_text_fg_default_bright)(struct term_context *); 89 | void (*set_text_bg_default_bright)(struct term_context *); 90 | void (*move_character)(struct term_context *, size_t new_x, size_t new_y, size_t old_x, size_t old_y); 91 | void (*scroll)(struct term_context *); 92 | void (*revscroll)(struct term_context *); 93 | void (*swap_palette)(struct term_context *); 94 | void (*save_state)(struct term_context *); 95 | void (*restore_state)(struct term_context *); 96 | void (*double_buffer_flush)(struct term_context *); 97 | void (*full_refresh)(struct term_context *); 98 | void (*deinit)(struct term_context *, void (*)(void *, size_t)); 99 | 100 | /* to be set by client */ 101 | 102 | void (*callback)(struct term_context *, uint64_t, uint64_t, uint64_t, uint64_t); 103 | }; 104 | 105 | void term_context_reinit(struct term_context *ctx); 106 | void term_write(struct term_context *ctx, const char *buf, size_t count); 107 | 108 | #ifdef __cplusplus 109 | } 110 | #endif 111 | 112 | #endif 113 | -------------------------------------------------------------------------------- /source/MemMgr/KernelHeap.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // KernelHeap.cpp - Creation date: 27/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // 12 | // This module implements the NanoShell64 kernel heap. 13 | // 14 | // The kernel heap is a place in memory where allocations 15 | // wherein no particular property needs to be fulfilled, such 16 | // as page boundary alignment. 17 | // If page alignment is required we usually only need to 18 | // allocate one page at a time, so use the PMM directly + the 19 | // HHDM mapping. 20 | // 21 | // *************************************************************** 22 | #include 23 | #include 24 | 25 | #define DEBUG_KERNEL_HEAP 26 | 27 | using namespace VMM; 28 | 29 | static Spinlock s_KernelHeapLock; 30 | 31 | static KernelHeap::FreeListNode *s_FLStart, *s_FLEnd; 32 | 33 | void KernelHeap::Init() 34 | { 35 | LockGuard lg(s_KernelHeapLock); 36 | 37 | PageMapping* pPM = PageMapping::GetFromCR3(); 38 | 39 | // map the kernel heap in. 40 | for (uint64_t i = 0; i < C_KERNEL_HEAP_SIZE; i += PAGE_SIZE) 41 | { 42 | pPM->MapPage(C_KERNEL_HEAP_START + i, true, true); 43 | } 44 | 45 | // setup the first free list block 46 | s_FLStart = (FreeListNode*)C_KERNEL_HEAP_START; 47 | s_FLStart->m_magic = FreeListNode::FLN_MAGIC; 48 | s_FLStart->m_size = C_KERNEL_HEAP_SIZE - sizeof(FreeListNode); 49 | s_FLStart->m_next = nullptr; 50 | s_FLStart->m_prev = nullptr; 51 | s_FLEnd = s_FLStart; 52 | } 53 | 54 | void* KernelHeap::Allocate(size_t sz) 55 | { 56 | // align our size to 16 bytes. 57 | sz = (sz + 15) & ~15; 58 | 59 | LockGuard lg(s_KernelHeapLock); 60 | 61 | FreeListNode* pNode = s_FLStart; 62 | FreeListNode* pBFN = nullptr; 63 | while (pNode) 64 | { 65 | // skip any candidate that won't fit our size... 66 | if (pNode->m_size < sz) 67 | { 68 | pNode = pNode->m_next; 69 | continue; 70 | } 71 | 72 | // or isn't free 73 | if (pNode->m_magic != FreeListNode::FLN_MAGIC) 74 | { 75 | #ifdef DEBUG_KERNEL_HEAP 76 | // make sure that our kernel heap ain't corrupted or anything 77 | if (pNode->m_magic != FreeListNode::FLA_MAGIC) 78 | { 79 | SLogMsg("ERROR: kernel heap corruption detected at %p. Magic: %p. RA: %p", pNode, pNode->m_magic, __builtin_return_address(0)); 80 | } 81 | #endif 82 | 83 | pNode = pNode->m_next; 84 | continue; 85 | } 86 | 87 | if (!pBFN || pBFN->m_size > pNode->m_size) 88 | pBFN = pNode; 89 | 90 | // if the node's size is more than 16 times our original size, 91 | // this is a great fit. Otherwise, continue trying for a better fit. 92 | if (pBFN->m_size >= 16 * sz) 93 | break; 94 | 95 | pNode = pNode->m_next; 96 | } 97 | 98 | if (!pBFN) 99 | { 100 | // oops. we ran out of kernel heap space 101 | return nullptr; 102 | } 103 | 104 | void *pArea = pBFN->GetArea(); 105 | 106 | size_t old_size = pBFN->m_size; 107 | pBFN->m_magic = FreeListNode::FLA_MAGIC; 108 | 109 | // do we need to create an auxiliary node? 110 | if (pBFN->m_size >= sizeof(FreeListNode) + 32) 111 | { 112 | pBFN->m_size = sz; 113 | FreeListNode* pAfter = pBFN->GetPtrDirectlyAfter(); 114 | pAfter->m_magic = FreeListNode::FLN_MAGIC; 115 | pAfter->m_next = pBFN->m_next; 116 | pAfter->m_prev = pBFN; 117 | pAfter->m_size = old_size - sizeof(FreeListNode) - sz; 118 | if (pBFN->m_next) pBFN->m_next->m_prev = pAfter; 119 | pBFN->m_next = pAfter; 120 | } 121 | else 122 | { 123 | // simply mark it as allocated, it's fine. 124 | } 125 | 126 | return pArea; 127 | } 128 | 129 | void KernelHeap::Free(void* pArea) 130 | { 131 | LockGuard lg(s_KernelHeapLock); 132 | 133 | FreeListNode* pNode = (FreeListNode*)pArea - 1; 134 | if (pNode->m_magic != FreeListNode::FLA_MAGIC) 135 | { 136 | // uh oh! Well, at least we were able to catch this, so just return. 137 | SLogMsg("ERROR: attempt to free region %p from kernel heap that wasn't actually allocated (its magic number is %p, a free nodes' is %p, RA: %p)", pArea, pNode->m_magic, FreeListNode::FLN_MAGIC, __builtin_return_address(0)); 138 | return; 139 | } 140 | 141 | // mark this as free 142 | pNode->m_magic = FreeListNode::FLN_MAGIC; 143 | 144 | // you may ask, "iProgram, why are you doing a for loop for 2 iterations here?" 145 | // the answer is very simple. The first iteration merges the actual node with its 146 | // 'next' neighbour. Afterwards, it changes the pNode variable to the pNode's previous 147 | // neighbour. If that node's not free, we simply return, but otherwise, we try to merge 148 | // with the next neighbor (which should be the node we are trying to merge with its 149 | // neighbors). 150 | for (int i = 0; i < 2; i++) 151 | { 152 | if (pNode->m_magic != FreeListNode::FLN_MAGIC) return; 153 | 154 | // Attempt to connect this with other nodes. 155 | if (pNode->m_next && pNode->m_next->m_magic == FreeListNode::FLN_MAGIC) 156 | { 157 | // merge this and the next together. 158 | pNode->m_size = ((uint8_t*)pNode->m_next - (uint8_t*)pNode) + pNode->m_next->m_size; 159 | 160 | pNode->m_next->m_magic = 0; 161 | 162 | pNode->m_next = pNode->m_next->m_next; 163 | if (pNode->m_next) 164 | pNode->m_next->m_prev = pNode; 165 | } 166 | 167 | pNode = pNode->m_prev; 168 | if (!pNode) return; 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /source/MemMgr/PMM.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // PMM.cpp - Creation date: 07/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements a thread safe physical memory 12 | // manager. 13 | // 14 | // *************************************************************** 15 | #include <_limine.h> 16 | #include 17 | #include 18 | #include 19 | 20 | volatile limine_memmap_request g_MemMapRequest = 21 | { 22 | .id = LIMINE_MEMMAP_REQUEST, 23 | .revision = 0, 24 | .response = NULL, 25 | }; 26 | 27 | static PMM::MemoryArea* s_pFirstBMPart, *s_pLastBMPart; 28 | static Spinlock s_PMMSpinlock; 29 | static uint64_t s_totalAvailablePages; // The total amount of pages available to the system. 30 | 31 | // The reason I put these in a "namespace PMM" block is because I don't want to publicize these functions. 32 | namespace PMM 33 | { 34 | 35 | uintptr_t MemoryArea::RemoveFirst() 36 | { 37 | PageFreeListNode * pFirst = m_pFirst; 38 | 39 | // remove it 40 | if (pFirst->pNext) 41 | pFirst->pNext->pPrev = NULL; 42 | m_pFirst = pFirst->pNext; 43 | if (m_pLast == pFirst) 44 | m_pLast = NULL; 45 | 46 | m_freePages--; 47 | 48 | return (uintptr_t)pFirst - Arch::GetHHDMOffset(); 49 | } 50 | 51 | void MemoryArea::PushBack(uintptr_t paddr) 52 | { 53 | PageFreeListNode* pLast = m_pLast, *pThis = (PageFreeListNode*)(Arch::GetHHDMOffset() + paddr); 54 | 55 | pThis->pNext = NULL; 56 | pThis->pPrev = pLast; 57 | if (pLast) 58 | pLast->pNext = pThis; 59 | m_pLast = pThis; 60 | m_freePages++; 61 | } 62 | 63 | // Initialize a MemoryArea free list. 64 | void InitializeMemoryArea(MemoryArea* pPart) 65 | { 66 | uintptr_t ho = Arch::GetHHDMOffset(); 67 | uintptr_t physAddr = pPart->m_startAddr + ho; 68 | 69 | PageFreeListNode* first = (PageFreeListNode*)physAddr; 70 | 71 | for (size_t sz = 0; sz != pPart->m_length; sz++, physAddr += PAGE_SIZE) 72 | { 73 | ((PageFreeListNode*)physAddr)->pPrev = (PageFreeListNode*)(physAddr - PAGE_SIZE); 74 | ((PageFreeListNode*)physAddr)->pNext = (PageFreeListNode*)(physAddr + PAGE_SIZE); 75 | } 76 | 77 | PageFreeListNode* last = (PageFreeListNode*)(physAddr - PAGE_SIZE); 78 | last ->pNext = NULL; 79 | first->pPrev = NULL; 80 | 81 | pPart->m_pFirst = first; 82 | pPart->m_pLast = last; 83 | 84 | // ensure continuity 85 | /* 86 | PageFreeListNode* pNode = (PageFreeListNode*)(ho + pPart->m_startAddr); 87 | 88 | while (pNode) 89 | { 90 | SLogMsg("Node: %p", pNode); 91 | pNode = pNode->pNext; 92 | } 93 | */ 94 | } 95 | 96 | // Add a new MemoryArea entry. This may only be called during initialization. 97 | void AddMemoryArea(MemoryArea* pPart) 98 | { 99 | if (s_pFirstBMPart == NULL) 100 | { 101 | s_pFirstBMPart = s_pLastBMPart = pPart; 102 | pPart->m_pLink = NULL; 103 | } 104 | 105 | s_pLastBMPart->m_pLink = pPart; 106 | s_pLastBMPart = pPart; 107 | 108 | InitializeMemoryArea(pPart); 109 | } 110 | 111 | }; 112 | 113 | uint64_t PMM::GetTotalPages() 114 | { 115 | return s_totalAvailablePages; 116 | } 117 | 118 | void PMM::Init() 119 | { 120 | // Check if we have the memmap response. 121 | if (!g_MemMapRequest.response) 122 | { 123 | // Just hang.. 124 | LogMsg("No physical memory map response was given by limine. Halting"); 125 | Arch::IdleLoop(); 126 | } 127 | 128 | // For each memory map entry. 129 | auto resp = g_MemMapRequest.response; 130 | for (uint64_t i = 0; i != resp->entry_count; i++) 131 | { 132 | auto entry = resp->entries[i]; 133 | 134 | // If this entry is marked as usable... 135 | if (entry->type != LIMINE_MEMMAP_USABLE) 136 | { 137 | // TODO: maybe we should handle what's in here? 138 | continue; 139 | } 140 | 141 | // Check how many pages we have. 142 | size_t nPages = (entry->length + PAGE_SIZE - 1) / PAGE_SIZE; 143 | 144 | // TODO: Assert that entry->base is page aligned 145 | void* pMem = EternalHeap::Allocate(sizeof(MemoryArea)); 146 | memset(pMem, 0, sizeof(MemoryArea)); 147 | 148 | MemoryArea* pPart = new(pMem) MemoryArea(entry->base, nPages); 149 | 150 | s_totalAvailablePages += pPart->m_length; 151 | 152 | AddMemoryArea(pPart); 153 | } 154 | } 155 | 156 | uintptr_t PMM::AllocatePage() 157 | { 158 | LockGuard lg (s_PMMSpinlock); 159 | 160 | // browse through the whole MemoryArea chain 161 | MemoryArea* bmp = s_pFirstBMPart; 162 | while (bmp) 163 | { 164 | // if there are no free list nodes 165 | if (bmp->m_pFirst == NULL || bmp->m_pLast == NULL || bmp->m_freePages == 0) 166 | { 167 | bmp = bmp->m_pLink; 168 | continue; 169 | } 170 | 171 | // there is one. Remove it and return. 172 | return bmp->RemoveFirst(); 173 | } 174 | 175 | return INVALID_PAGE; 176 | } 177 | 178 | void PMM::FreePage(uintptr_t page) 179 | { 180 | LockGuard lg (s_PMMSpinlock); 181 | 182 | // Find the bitmap part where this page resides; 183 | MemoryArea* bmp = s_pFirstBMPart; 184 | while (bmp) 185 | { 186 | if (bmp->m_startAddr <= page) break; 187 | 188 | bmp = bmp->m_pLink; 189 | } 190 | 191 | if (!bmp) 192 | { 193 | SLogMsg("Error, trying to free page %p, not within any of our bitmap parts", page); 194 | return; 195 | } 196 | 197 | // Append it to the last freelist index. 198 | bmp->PushBack(page); 199 | } 200 | 201 | void PMM::Test() 202 | { 203 | // The two printed addresses should ideally be the same. 204 | uintptr_t addr = PMM::AllocatePage(); 205 | LogMsg("Addr: %p", addr); 206 | PMM::FreePage(addr); 207 | 208 | addr = PMM::AllocatePage(); 209 | LogMsg("Addr: %p", addr); 210 | PMM::FreePage(addr); 211 | } 212 | -------------------------------------------------------------------------------- /source/MemMgr/PageFault.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // PageFault.cpp - Creation date: 01/02/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the page fault function. 12 | // 13 | // *************************************************************** 14 | #include 15 | #include 16 | 17 | void Arch::CPU::OnPageFault(Registers* pRegs) 18 | { 19 | //SLogMsg("Page fault! CR2: %p RIP: %p ErrorCode: %p", pRegs->cr2, pRegs->rip, pRegs->error_code); 20 | 21 | union 22 | { 23 | struct 24 | { 25 | bool bPresent : 1; 26 | bool bWrite : 1; 27 | bool bUser : 1; 28 | //...... 29 | }; 30 | uint64_t value; 31 | } 32 | errorCode; 33 | 34 | errorCode.value = pRegs->error_code; 35 | 36 | using namespace VMM; 37 | 38 | // Check if the accessed page is valid or not 39 | PageMapping* pPM = PageMapping::GetFromCR3(); 40 | 41 | PageEntry* pPageEntry = pPM->GetPageEntry(pRegs->cr2); 42 | 43 | if (!pPageEntry) 44 | { 45 | // Since the page entry isn't even there, I'm going to have to make you bail 46 | goto _INVALID_PAGE_FAULT; 47 | } 48 | 49 | if (!errorCode.bPresent) 50 | { 51 | // The thing isn't present. Maybe it is in the page entry? 52 | // Not sure why it would, though. Page mappings should only 53 | // be messed with by their owner CPU. 54 | if (pPageEntry->m_present) 55 | return; 56 | 57 | // still not present. Look into why that is. 58 | if (pPageEntry->m_needAllocPage) 59 | { 60 | // ah hah! I see why that is now - we need to allocate a PMM page. 61 | uintptr_t page = PMM::AllocatePage(); 62 | if (page == PMM::INVALID_PAGE) 63 | { 64 | // Uh oh. We need to get rid of some pages, TODO 65 | KernelPanic("TODO: out of memory, need to free some caches and stuff. CR2: %p RIP: %p ErrorCode: %p", pRegs->cr2, pRegs->rip, pRegs->error_code); 66 | } 67 | 68 | // fill it with some byte 69 | uint8_t someByte = (pPageEntry->m_protKey << 4 | pPageEntry->m_protKey); 70 | memset((void*)(Arch::GetHHDMOffset() + page), someByte, PAGE_SIZE); 71 | 72 | // make it present now 73 | pPageEntry->m_address = page >> 12; 74 | pPageEntry->m_needAllocPage = false; 75 | pPageEntry->m_partOfPmm = true; 76 | pPageEntry->m_present = true; 77 | 78 | Arch::Invalidate(pRegs->cr2); 79 | return; 80 | } 81 | 82 | // not sure of any other cases, so... 83 | goto _INVALID_PAGE_FAULT; 84 | } 85 | 86 | // If the page was present but we have an access error... 87 | if (errorCode.bWrite) 88 | { 89 | // TODO COW 90 | goto _INVALID_PAGE_FAULT; 91 | } 92 | 93 | // don't know of any other cases. 94 | 95 | _INVALID_PAGE_FAULT: 96 | KernelPanic("Invalid page fault at CR2: %p RIP: %p ErrorCode: %p", pRegs->cr2, pRegs->rip, pRegs->error_code); 97 | } 98 | -------------------------------------------------------------------------------- /source/MemMgr/PageMapClone.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // PageMapClone.cpp - Creation date: 07/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the page mapping's clone functions. 12 | // 13 | // *************************************************************** 14 | #include 15 | 16 | namespace VMM 17 | { 18 | 19 | /**** Cloning ****/ 20 | 21 | PageTable* PageTable::Clone() 22 | { 23 | uintptr_t pmPage = PMM::AllocatePage(); 24 | 25 | if (pmPage == PMM::INVALID_PAGE) 26 | KernelPanic("Could not clone page table! (source/MemMgr/VMM.cpp:%d)", __LINE__); 27 | 28 | // Allocate the PageTable itself. 29 | PageTable* pNewPT = (PageTable*)(Arch::GetHHDMOffset() + pmPage); 30 | memset(pNewPT, 0, sizeof *pNewPT); 31 | 32 | for (int i = 0; i < 512; i++) 33 | { 34 | // Clone the page. 35 | PageEntry& oldEnt = m_entries[i]; 36 | if (!oldEnt.m_present) continue; 37 | 38 | PageEntry newEnt = oldEnt; 39 | 40 | // if this is part of the PMM... 41 | if (oldEnt.m_partOfPmm) 42 | { 43 | // clone the page. TODO: Copy on write 44 | uintptr_t newPage = PMM::AllocatePage(); 45 | if (pmPage == PMM::INVALID_PAGE) 46 | KernelPanic("Could not clone page! (source/MemMgr/VMM.cpp:%d)", __LINE__); 47 | 48 | newEnt.m_address = newPage >> 12; 49 | } 50 | 51 | pNewPT->m_entries[i].m_data = newEnt.m_data; 52 | } 53 | 54 | return pNewPT; 55 | } 56 | 57 | PageDirectory* PageDirectory::Clone() 58 | { 59 | uintptr_t pmPage = PMM::AllocatePage(); 60 | 61 | if (pmPage == PMM::INVALID_PAGE) 62 | KernelPanic("Could not clone page directory! (source/MemMgr/VMM.cpp:%d)", __LINE__); 63 | 64 | // Allocate the PageDirectory itself. 65 | PageDirectory* pNewPD = (PageDirectory*)(Arch::GetHHDMOffset() + pmPage); 66 | memset(pNewPD, 0, sizeof *pNewPD); 67 | 68 | for (int i = 0; i < 512; i++) 69 | { 70 | PageTable* pOldPT = GetPageTable(i); 71 | if (!pOldPT) continue; 72 | 73 | PageEntry& oldEnt = m_entries[i]; 74 | 75 | // Clone the page table. 76 | PageTable* pPT = pOldPT->Clone(); 77 | if (!pPT) continue; 78 | uintptr_t ptPhys = (uintptr_t)pPT - Arch::GetHHDMOffset(); 79 | 80 | // Build a page entry. 81 | PageEntry entry = oldEnt; 82 | entry.m_address = ptPhys >> 12; 83 | 84 | pNewPD->m_entries[i].m_data = entry.m_data; 85 | } 86 | 87 | return pNewPD; 88 | } 89 | 90 | PML3* PML3::Clone() 91 | { 92 | uintptr_t pmPage = PMM::AllocatePage(); 93 | 94 | if (pmPage == PMM::INVALID_PAGE) 95 | KernelPanic("Could not clone PML3! (source/MemMgr/VMM.cpp:%d)", __LINE__); 96 | 97 | // Allocate the PML3 itself. 98 | PML3* pNewPM = (PML3*)(Arch::GetHHDMOffset() + pmPage); 99 | memset(pNewPM, 0, sizeof *pNewPM); 100 | 101 | for (int i = 0; i < 512; i++) 102 | { 103 | PageDirectory* pOldPD = GetPageDirectory(i); 104 | if (!pOldPD) continue; 105 | 106 | PageEntry& oldEnt = m_entries[i]; 107 | 108 | // Clone the page directory. 109 | PageDirectory* pPD = pOldPD->Clone(); 110 | if (!pPD) continue; 111 | uintptr_t pdPhys = (uintptr_t)pPD - Arch::GetHHDMOffset(); 112 | 113 | // Build a page entry. 114 | PageEntry entry = oldEnt; 115 | entry.m_address = pdPhys >> 12; 116 | 117 | pNewPM->m_entries[i].m_data = entry.m_data; 118 | } 119 | 120 | return pNewPM; 121 | } 122 | 123 | PageMapping* PageMapping::Clone(bool keepLowerHalf) 124 | { 125 | uintptr_t pmPage = PMM::AllocatePage(); 126 | 127 | if (pmPage == PMM::INVALID_PAGE) 128 | KernelPanic("Could not clone page mapping! (source/MemMgr/VMM.cpp:%d)", __LINE__); 129 | 130 | // Allocate the pagemapping itself. 131 | PageMapping* pNewPM = (PageMapping*)(Arch::GetHHDMOffset() + pmPage); 132 | memset(pNewPM, 0, sizeof *pNewPM); 133 | 134 | // Look through the lower canonical half's PML3 entries and clone them recursively. 135 | for (int i = P_USER_START; keepLowerHalf && i < P_USER_END; i++) 136 | { 137 | // If there is no PML3, continue; 138 | PML3* pOldPML3 = GetPML3(i); 139 | if (!pOldPML3) continue; 140 | 141 | PageEntry& oldEnt = m_entries[i]; 142 | 143 | PML3* pPML3 = pOldPML3->Clone(); 144 | if (!pPML3) continue; 145 | uintptr_t pml4Phys = (uintptr_t)pPML3 - Arch::GetHHDMOffset(); 146 | 147 | // Build a page entry. 148 | PageEntry entry = oldEnt; 149 | entry.m_address = pml4Phys >> 12; 150 | 151 | pNewPM->m_entries[i].m_data = entry.m_data; 152 | } 153 | 154 | // Just copy the other 256. 155 | for (int i = P_KERN_START; i < P_KERN_END; i++) 156 | { 157 | PageEntry& oldEnt = m_entries[i]; 158 | pNewPM->m_entries[i].m_data = oldEnt.m_data; 159 | } 160 | 161 | using namespace Arch; 162 | 163 | CPU* pCpu = CPU::GetCurrent(); 164 | 165 | 166 | 167 | return pNewPM; 168 | } 169 | 170 | } // namespace VMM -------------------------------------------------------------------------------- /source/MemMgr/VMM.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // VMM.cpp - Creation date: 07/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the NanoShell virtual memory 12 | // manager. 13 | // 14 | // *************************************************************** 15 | #include 16 | 17 | // Get the next level for each of the levels. 18 | namespace VMM 19 | { 20 | 21 | /**** Get the next level ****/ 22 | 23 | PageTable* PageDirectory::GetPageTable(int index) 24 | { 25 | if (!m_entries[index].m_present) return NULL; 26 | return (PageTable*)(Arch::GetHHDMOffset() + PAGE_SIZE * (m_entries[index].m_address)); 27 | } 28 | 29 | PageDirectory* PML3::GetPageDirectory(int index) 30 | { 31 | if (!m_entries[index].m_present) return NULL; 32 | return (PageDirectory*)(Arch::GetHHDMOffset() + PAGE_SIZE * (m_entries[index].m_address)); 33 | } 34 | 35 | PML3* PageMapping::GetPML3(int index) 36 | { 37 | if (!m_entries[index].m_present) return NULL; 38 | return (PML3*)(Arch::GetHHDMOffset() + PAGE_SIZE * (m_entries[index].m_address)); 39 | } 40 | 41 | PageMapping* PageMapping::GetFromCR3() 42 | { 43 | return (PageMapping*)(Arch::GetHHDMOffset() + Arch::ReadCR3()); 44 | } 45 | 46 | /**** Unmap pages ****/ 47 | void PageMapping::UnmapPage(uintptr_t addr, bool removeUpperLevels) 48 | { 49 | // Remove a page mapping. 50 | constexpr uintptr_t mask = 0x1FF; 51 | uintptr_t index_PML4 = (addr >> 39) & mask; 52 | uintptr_t index_PML3 = (addr >> 30) & mask; 53 | uintptr_t index_PML2 = (addr >> 21) & mask; 54 | uintptr_t index_PML1 = (addr >> 12) & mask; 55 | 56 | PML3 *pml3 = GetPML3 (index_PML4); if (!pml3) return; 57 | PageDirectory *pml2 = pml3->GetPageDirectory(index_PML3); if (!pml2) return; 58 | PageTable *pml1 = pml2->GetPageTable (index_PML2); if (!pml1) return; 59 | 60 | PageEntry& ent = pml1->m_entries[index_PML1]; 61 | 62 | // If this was a part of the PMM, free the page. 63 | if (ent.m_partOfPmm) 64 | { 65 | PMM::FreePage(ent.m_address << 12); 66 | } 67 | 68 | memset(&ent, 0, sizeof ent); 69 | 70 | if (removeUpperLevels) 71 | { 72 | // TODO: Remove upper levels too if they're empty. 73 | } 74 | } 75 | 76 | /**** Map pages ****/ 77 | bool PageMapping::MapPage(uintptr_t addr, const PageEntry & pe) 78 | { 79 | constexpr uintptr_t mask = 0x1FF; 80 | uintptr_t index_PML4 = (addr >> 39) & mask; 81 | uintptr_t index_PML3 = (addr >> 30) & mask; 82 | uintptr_t index_PML2 = (addr >> 21) & mask; 83 | uintptr_t index_PML1 = (addr >> 12) & mask; 84 | 85 | // if we don't have a pml3 here: 86 | PML3* pml3 = GetPML3(index_PML4); 87 | if (!pml3) 88 | { 89 | // allocate one 90 | uintptr_t page = PMM::AllocatePage(); 91 | if (page == PMM::INVALID_PAGE) return false; 92 | pml3 = (PML3*)(Arch::GetHHDMOffset() + page); 93 | memset(pml3, 0, sizeof *pml3); 94 | m_entries[index_PML4] = PageEntry(page, PE_PRESENT | PE_READWRITE | PE_PARTOFPMM); 95 | } 96 | 97 | // if we don't have a pml2 here: 98 | PageDirectory* pd = pml3->GetPageDirectory(index_PML3); 99 | if (!pd) 100 | { 101 | // allocate one 102 | uintptr_t page = PMM::AllocatePage(); 103 | if (page == PMM::INVALID_PAGE) return false; 104 | pd = (PageDirectory*)(Arch::GetHHDMOffset() + page); 105 | memset(pd, 0, sizeof *pd); 106 | pml3->m_entries[index_PML3] = PageEntry(page, PE_PRESENT | PE_READWRITE | PE_PARTOFPMM); 107 | } 108 | 109 | // if we don't have a pml1 here: 110 | PageTable* pt = pd->GetPageTable(index_PML2); 111 | if (!pt) 112 | { 113 | // allocate one 114 | uintptr_t page = PMM::AllocatePage(); 115 | if (page == PMM::INVALID_PAGE) return false; 116 | pt = (PageTable*)(Arch::GetHHDMOffset() + page); 117 | memset(pt, 0, sizeof *pt); 118 | pd->m_entries[index_PML2] = PageEntry(page, PE_PRESENT | PE_READWRITE | PE_PARTOFPMM); 119 | } 120 | 121 | // unmap whatever was here previously. 122 | UnmapPage(addr, false); 123 | 124 | // map the page in. 125 | PageEntry& entry = pt->m_entries[index_PML1]; 126 | 127 | entry = pe; 128 | 129 | return true; 130 | } 131 | 132 | bool PageMapping::MapPage(uintptr_t addr, bool rw, bool super, bool xd) 133 | { 134 | /* 135 | uintptr_t pm = PMM::AllocatePage(); 136 | if (pm == PMM::INVALID_PAGE) return false; 137 | 138 | // Create a page entry. TODO don't actually allocate from the PMM but instead fault the page in. 139 | PageEntry pe(pm, rw, super, xd, true, false); 140 | */ 141 | 142 | uint64_t flags = PE_PARTOFPMM | PE_NEEDALLOCPAGE; 143 | if (rw) 144 | flags |= PE_READWRITE; 145 | if (super) 146 | flags |= PE_SUPERVISOR; 147 | if (xd) 148 | flags |= PE_EXECUTEDISABLE; 149 | 150 | // Create a page entry. Note that the default_flags are overridden here. 151 | PageEntry pe(0, flags, 0); 152 | 153 | return MapPage(addr, pe); 154 | } 155 | 156 | PageEntry* PageMapping::GetPageEntry(uintptr_t addr) 157 | { 158 | // Remove a page mapping. 159 | constexpr uintptr_t mask = 0x1FF; 160 | uintptr_t index_PML4 = (addr >> 39) & mask; 161 | uintptr_t index_PML3 = (addr >> 30) & mask; 162 | uintptr_t index_PML2 = (addr >> 21) & mask; 163 | uintptr_t index_PML1 = (addr >> 12) & mask; 164 | 165 | PML3 *pPml3; 166 | PageDirectory *pPageDir; 167 | PageTable *pPageTable; 168 | 169 | if (!(pPml3 = GetPML3 (index_PML4))) return NULL; 170 | if (!(pPageDir = pPml3 ->GetPageDirectory(index_PML3))) return NULL; 171 | if (!(pPageTable = pPageDir->GetPageTable (index_PML2))) return NULL; 172 | return pPageTable->GetPageEntry(index_PML1); 173 | } 174 | 175 | /**** Switch To ****/ 176 | 177 | void PageMapping::SwitchTo() 178 | { 179 | if (this < (PageMapping*)Arch::GetHHDMOffset()) 180 | { 181 | SLogMsg("This page mapping ain't part of the hhdm"); 182 | return; 183 | } 184 | 185 | // Go!! 186 | Arch::WriteCR3((uintptr_t)this - Arch::GetHHDMOffset()); 187 | } 188 | 189 | }; // namespace VMM 190 | -------------------------------------------------------------------------------- /source/Panic.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Panic.cpp - Creation date: 27/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the kernel panic functionality. 12 | // 13 | // *************************************************************** 14 | #include 15 | #include <_limine.h> 16 | #include 17 | #include 18 | 19 | static Spinlock g_PanicLock; 20 | 21 | extern Atomic g_panickedCpus; 22 | extern Spinlock g_E9Spinlock; 23 | extern Spinlock g_TermSpinlock; 24 | 25 | extern "C" void KernelPanic(const char* fmt, ...) 26 | { 27 | using namespace Arch; 28 | CPU* pThisCpu = CPU::GetCurrent(); 29 | 30 | char panic_formatted[8192]; 31 | 32 | va_list lst; 33 | va_start(lst, fmt); 34 | vsnprintf(panic_formatted, sizeof panic_formatted, fmt, lst); 35 | 36 | // log the panic to the console, in case the code to force panic the other CPUs fails to send the IPIs for some reason 37 | LogMsg("CPU %d - PANIC: %s (RA:%p).....", pThisCpu->ID(), panic_formatted, __builtin_return_address(0)); 38 | SLogMsg("CPU %d - PANIC: %s (RA:%p).....", pThisCpu->ID(), panic_formatted, __builtin_return_address(0)); 39 | 40 | // if the panic lock is already locked 41 | if (!g_PanicLock.TryLock()) 42 | { 43 | // well, what a coincidence! We panicked here too. Simply wait forever at this pointer 44 | // Eventually an IPI will come, putting us out of our misery. 45 | while (true) 46 | Halt(); 47 | } 48 | 49 | // really, we should ::Store(1), however we can only panic once during the lifetime of the kernel, so this and Store(1) are equivalent 50 | g_panickedCpus.FetchAdd(1); 51 | 52 | // send all other processors an IPI 53 | limine_smp_response* pResp = CPU::GetSMPResponse(); 54 | 55 | for (uint32_t pid = 0; pid < pResp->cpu_count; pid++) 56 | { 57 | CPU* pCpu = CPU::GetCPU(pid); 58 | if (pCpu == pThisCpu) continue; 59 | 60 | pCpu->SendIPI(CPU::eIpiType::PANIC); 61 | } 62 | 63 | while (g_panickedCpus.Load() < (int)pResp->cpu_count) 64 | { 65 | Spinlock::SpinHint(); 66 | } 67 | 68 | // now that all CPUs are halted, forcefully unlock the LogMsg mutex, just in case we crashed within LogMsg functions 69 | g_E9Spinlock.Unlock(); 70 | g_TermSpinlock.Unlock(); 71 | 72 | LogMsg("KERNEL PANIC! (CPU %u)", pThisCpu->ID()); 73 | LogMsg("\nMessage: %s\n", panic_formatted); 74 | LogMsg("Note: Last return address: %p", __builtin_return_address(0)); 75 | // TODO: A stack frame unwinder. NanoShell32 can do this, why not 64? 76 | 77 | va_end(lst); 78 | 79 | Arch::IdleLoop(); 80 | } 81 | 82 | 83 | -------------------------------------------------------------------------------- /source/Scheduler.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Scheduler.cpp - Creation date: 11/04/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | #include 10 | #include 11 | #include 12 | 13 | static Atomic g_NextThreadID(1); 14 | 15 | void Scheduler::IdleThread() 16 | { 17 | while (true) 18 | { 19 | Arch::Halt(); 20 | } 21 | } 22 | 23 | void Scheduler::NormalThread() 24 | { 25 | int x=0; 26 | volatile int* px = &x; 27 | 28 | while (true) 29 | { 30 | LogMsg("Normal thread on CPU %u", Arch::CPU::GetCurrent()->ID()); 31 | 32 | Thread::Sleep(100'000'000); // sleep for 100 MS 33 | } 34 | } 35 | 36 | void Scheduler::RealTimeThread() 37 | { 38 | int x=0; 39 | volatile int* px = &x; 40 | 41 | while (true) 42 | { 43 | LogMsg("Real Time thread on CPU %u", Arch::CPU::GetCurrent()->ID()); 44 | 45 | Thread::Sleep(1'000'000'000); // sleep for 1000 MS 46 | } 47 | } 48 | 49 | Thread* Scheduler::CreateThread() 50 | { 51 | Thread* pThrd = new(nopanic) Thread; 52 | 53 | pThrd->m_pScheduler = this; 54 | pThrd->m_ID = g_NextThreadID.FetchAdd(1); 55 | 56 | m_AllThreads.AddBack(pThrd); 57 | 58 | return pThrd; 59 | } 60 | 61 | Thread* Scheduler::GetCurrentThread() 62 | { 63 | return m_pCurrentThread; 64 | } 65 | 66 | void Scheduler::Init() 67 | { 68 | // create an idle thread now 69 | Thread* pThrd1 = CreateThread(); 70 | Thread* pThrd2 = CreateThread(); 71 | Thread* pThrd3 = CreateThread(); 72 | 73 | if (!pThrd1 || !pThrd2 || !pThrd3) 74 | KernelPanic("could not create either one of these three threads"); 75 | 76 | pThrd1->SetEntryPoint(Scheduler::IdleThread); 77 | pThrd1->SetPriority(Thread::IDLE); 78 | pThrd1->Start(); 79 | pThrd1->Detach(); 80 | 81 | pThrd2->SetEntryPoint(Scheduler::NormalThread); 82 | pThrd2->SetPriority(Thread::NORMAL); 83 | pThrd2->Start(); 84 | pThrd2->Detach(); 85 | 86 | pThrd3->SetEntryPoint(Scheduler::RealTimeThread); 87 | pThrd3->SetPriority(Thread::REALTIME); 88 | pThrd3->Start(); 89 | pThrd3->Detach(); 90 | } 91 | 92 | // note that when a thread is scheduled for execution, it is removed from any queue 93 | void Scheduler::Done(Thread* pThread) 94 | { 95 | switch (pThread->m_Status.Load()) 96 | { 97 | case Thread::RUNNING: 98 | m_ExecutionQueue.PushBack(pThread); 99 | break; 100 | case Thread::SETUP: 101 | // not sure how we got there. the scheduler really shouldn't 102 | // schedule threads during their setup phase 103 | ASSERT_UNREACHABLE; 104 | break; 105 | case Thread::ZOMBIE: 106 | // if this thread is owned, it's the owner's job to clean it up... 107 | if (pThread->m_bOwned.Load()) 108 | break; 109 | // add it to the list of threads to dispose of 110 | m_ZombieThreads.AddBack(pThread); 111 | break; 112 | case Thread::SUSPENDED: 113 | // If this thread is now suspended, but has been running before, this means that 114 | // it does not appear in the suspended threads array and should be added there. 115 | m_SuspendedThreads.AddBack(pThread); 116 | break; 117 | case Thread::SLEEPING: 118 | // If this thread is now sleeping, but has been running before, this means that 119 | // it does not appear in the sleeping threads list and should be added there. 120 | m_SleepingThreads.PushBack(pThread); 121 | break; 122 | default: 123 | ASSERT_UNREACHABLE; 124 | break; 125 | } 126 | } 127 | 128 | Thread* Scheduler::PopNextThread() 129 | { 130 | Thread* pThrd = nullptr; 131 | 132 | if (m_ExecutionQueue.Size()) 133 | { 134 | pThrd = m_ExecutionQueue.Front(); 135 | m_ExecutionQueue.Erase(0); 136 | } 137 | 138 | return pThrd; 139 | } 140 | 141 | // looks through the list of suspended threads and checks if any are supposed to be unsuspended. 142 | // Note: This could be a performance concern. 143 | void Scheduler::CheckUnsuspensionConditions() 144 | { 145 | // TODO 146 | } 147 | 148 | // looks through the list of zombie threads and kills them. 149 | void Scheduler::CheckZombieThreads() 150 | { 151 | // TODO 152 | } 153 | 154 | // looks through the list of sleeping threads and unsuspends them if needed 155 | // Note: This could be a performance concern. 156 | void Scheduler::UnsuspendSleepingThreads() 157 | { 158 | // we can get away with simply checking the top 159 | if (m_SleepingThreads.Size() == 0) return; 160 | 161 | uint64_t currTime = Arch::GetTickCount(); 162 | 163 | Thread* pThrd = m_SleepingThreads[0]; 164 | 165 | while (pThrd->m_SleepingUntil.Load() - 100 < currTime) 166 | { 167 | // unsuspend it 168 | pThrd->Unsuspend(); 169 | // place it back on the regular queues: 170 | Done(pThrd); 171 | 172 | // pop it off the top of the list: 173 | m_SleepingThreads.Erase(0); 174 | 175 | if (m_SleepingThreads.Size() == 0) 176 | { 177 | pThrd = NULL; 178 | break; 179 | } 180 | 181 | pThrd = m_SleepingThreads[0]; 182 | } 183 | } 184 | 185 | uint64_t Scheduler::CheckSleepingThreads() 186 | { 187 | if (m_SleepingThreads.Size() == 0) return 0; 188 | 189 | // we can get away with simply checking the top 190 | uint64_t currTime = Arch::GetTickCount(); 191 | Thread* pThrd = m_SleepingThreads[0]; 192 | 193 | if (pThrd->m_SleepingUntil.Load() - 100 < currTime) 194 | { 195 | // if there's still things to unsuspend, we probably failed in UnsuspendSleepingThreads. 196 | // Just let it retry in 1 microsecond 197 | return currTime + 1000; 198 | } 199 | 200 | return pThrd->m_SleepingUntil.Load(); 201 | } 202 | 203 | // this is only to be called from Thread::Yield!!! 204 | void Scheduler::Schedule(bool bRunFromTimerIRQ) 205 | { 206 | using namespace Arch; 207 | m_pCurrentThread = nullptr; 208 | 209 | Thread* pThread = PopNextThread(); 210 | 211 | // if no thread is to be executed, well....... 212 | if (!pThread) 213 | { 214 | KernelPanic("nothing to execute on CPU %u", Arch::CPU::GetCurrent()->ID()); 215 | } 216 | 217 | // set this thread as the current thread. 218 | m_pCurrentThread = pThread; 219 | 220 | // set when the thread's time slice will expire: 221 | pThread->m_TimeSliceUntil = Arch::GetTickCount() + C_THREAD_MAX_TIME_SLICE; 222 | 223 | // schedule an interrupt for the next event: 224 | uint64_t currTime = Arch::GetTickCount(); 225 | uint64_t nextEvent = NextEvent(); 226 | uint64_t timeWait = nextEvent - 10 - currTime; 227 | 228 | APIC::ScheduleInterruptIn(timeWait); 229 | 230 | // if run from the timer IRQ, don't forget to send the APIC an EOI: 231 | if (bRunFromTimerIRQ) 232 | APIC::EndOfInterrupt(); 233 | 234 | // go! 235 | m_pCurrentThread->JumpExecContext(); 236 | } 237 | 238 | void Scheduler::DeleteThread(Thread* pThread) 239 | { 240 | for (auto it = m_AllThreads.Begin(); it.Valid(); ++it) 241 | { 242 | if (*it != pThread) continue; 243 | 244 | m_AllThreads.Erase(it); 245 | 246 | return; 247 | } 248 | } 249 | 250 | void Scheduler::CheckEvents() 251 | { 252 | CheckUnsuspensionConditions(); 253 | CheckZombieThreads(); 254 | UnsuspendSleepingThreads(); 255 | } 256 | 257 | void Scheduler::OnTimerIRQ(Registers* pRegs) 258 | { 259 | SLogMsg("X"); 260 | using namespace Arch; 261 | CPU* pCpu = CPU::GetCurrent(); 262 | uint64_t currTime = Arch::GetTickCount(); 263 | 264 | CheckEvents(); 265 | 266 | // if we're running a thread right now... (note the reference. This is important) 267 | Thread* &t = m_pCurrentThread; 268 | if (t) 269 | { 270 | // If the thread's time slice has not expired yet, simply check for events, reprogram the APIC, and return. 271 | if (t->m_TimeSliceUntil - 100 > currTime) 272 | { 273 | uint64_t nextEvent = NextEvent(); 274 | uint64_t timeWait = nextEvent - 10 - currTime; 275 | Arch::APIC::ScheduleInterruptIn(timeWait); 276 | return; 277 | } 278 | 279 | // Save its context. 280 | t->m_bNeedRestoreAdditionalRegisters = true; 281 | 282 | Thread::AdditionalRegisters & ar = t->m_AdditionalRegisters; 283 | Thread::ExecutionContext & ec = t->m_ExecContext; 284 | 285 | // this is long... 286 | ar.rax = pRegs->rax; 287 | ar.rcx = pRegs->rcx; 288 | ar.rdx = pRegs->rdx; 289 | ar.rsi = pRegs->rsi; 290 | ar.rdi = pRegs->rdi; 291 | ar.r8 = pRegs->r8; 292 | ar.r9 = pRegs->r9; 293 | ar.r10 = pRegs->r10; 294 | ar.r11 = pRegs->r11; 295 | ar.ds = pRegs->ds; 296 | ar.es = pRegs->es; 297 | ar.fs = pRegs->fs; 298 | ar.gs = pRegs->gs; 299 | ec.rbp = pRegs->rbp; 300 | ec.rbx = pRegs->rbx; 301 | ec.r12 = pRegs->r12; 302 | ec.r13 = pRegs->r13; 303 | ec.r14 = pRegs->r14; 304 | ec.r15 = pRegs->r15; 305 | ec.rip = pRegs->rip; 306 | ec.rsp = pRegs->rsp; 307 | ec.cs = pRegs->cs; 308 | ec.ss = pRegs->ss; 309 | ec.rflags = pRegs->rflags; 310 | 311 | //Mmark the thread as 'done' and set the current thread to nullptr, then schedule 312 | Done(t); 313 | t = nullptr; 314 | } 315 | 316 | Schedule(true); 317 | } 318 | 319 | uint64_t Scheduler::NextEvent() 320 | { 321 | uint64_t currTime = Arch::GetTickCount(); 322 | 323 | // Figure out when the next event will be. 324 | uint64_t time = currTime + C_THREAD_MAX_TIME_SLICE; 325 | 326 | if (m_pCurrentThread) 327 | time = m_pCurrentThread->m_TimeSliceUntil; 328 | 329 | // TODO: for each suspended thread, check if it's ready to be woken up 330 | uint64_t stTime = CheckSleepingThreads(); 331 | if (time > stTime && stTime != 0) 332 | time = stTime; 333 | 334 | if (time < currTime) 335 | { 336 | // if we somehow managed to mess it up, try again in 1 microsecond, should fix it: 337 | time = currTime + 1000; 338 | } 339 | 340 | return time; 341 | } 342 | -------------------------------------------------------------------------------- /source/Spinlock.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Spinlock.cpp - Creation date: 04/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | 10 | #include 11 | 12 | LockGuard::LockGuard(Spinlock& lock) : m_lock(lock) 13 | { 14 | m_lock.Lock(); 15 | } 16 | 17 | LockGuard::LockGuard(Spinlock& lock, AdoptLock) : m_lock(lock) 18 | { 19 | } 20 | 21 | LockGuard::~LockGuard() 22 | { 23 | m_lock.Unlock(); 24 | } 25 | -------------------------------------------------------------------------------- /source/Standard/CStandard.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // CStandard.cpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the standard C functions that may 12 | // not be needed, such as memcpy, memset, strlen etc. 13 | // 14 | // *************************************************************** 15 | #include 16 | 17 | extern "C" { 18 | 19 | void* memcpy(void* dst, const void* src, size_t n) 20 | { 21 | void* dst2 = dst; 22 | ASM("rep movsb":"+c"(n),"+D"(dst),"+S"(src)::"memory"); 23 | return dst2; 24 | } 25 | 26 | void* memquadcpy(uint64_t* dst, const uint64_t* src, size_t n) 27 | { 28 | void* dst2 = dst; 29 | ASM("rep movsq":"+c"(n),"+D"(dst),"+S"(src)::"memory"); 30 | return dst2; 31 | } 32 | 33 | void* memset(void* dst, int c, size_t n) 34 | { 35 | void* dst2 = dst; 36 | ASM("rep stosb":"+c"(n),"+D"(dst):"a"(c):"memory"); 37 | return dst2; 38 | } 39 | 40 | size_t strlen(const char * s) 41 | { 42 | //optimization hint : https://github.com/bminor/glibc/blob/master/string/strlen.c 43 | size_t sz = 0; 44 | while (*s++) sz++; 45 | return sz; 46 | } 47 | 48 | char* strcpy(char* s, const char * d) 49 | { 50 | return (char*)memcpy(s, d, strlen(d) + 1); 51 | } 52 | 53 | char* strcat(char* s, const char * src) 54 | { 55 | return strcpy(s + strlen(s), src); 56 | } 57 | 58 | int memcmp(const void* s1, const void* s2, size_t n) 59 | { 60 | uint8_t* b1 = (uint8_t*)s1; 61 | uint8_t* b2 = (uint8_t*)s2; 62 | 63 | while (n--) 64 | { 65 | if (*b1 < *b2) 66 | return -1; 67 | if (*b1 > *b2) 68 | return 1; 69 | 70 | b1++, b2++; 71 | } 72 | 73 | // the memory is equal 74 | return 0; 75 | } 76 | 77 | int strcmp(const char* s1, const char* s2) 78 | { 79 | while (true) 80 | { 81 | if (!*s1 && !*s2) 82 | return 0; 83 | 84 | // if either one of the pointers points to a 0 character, 85 | // either case applies, so it's fine. 86 | if (*s1 < *s2) 87 | return -1; 88 | if (*s1 > *s2) 89 | return 1; 90 | 91 | s1++; 92 | s2++; 93 | } 94 | return 0; 95 | } 96 | 97 | }; 98 | -------------------------------------------------------------------------------- /source/Standard/CxxAbi.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // CxxAbi.cpp - Creation date: 07/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This file contains the global constructor executor. 12 | // *************************************************************** 13 | #include 14 | #include 15 | 16 | typedef void(*Constructor)(); 17 | typedef void(*Destructor)(); 18 | 19 | extern Constructor g_init_array_start[], g_init_array_end[]; 20 | extern Destructor g_fini_array_start[], g_fini_array_end[]; 21 | 22 | void RunAllConstructors() 23 | { 24 | for (auto func = g_init_array_start; func != g_init_array_end; func++) 25 | (*func)(); 26 | } 27 | 28 | void RunAllDestructors() 29 | { 30 | for (auto func = g_fini_array_start; func != g_fini_array_end; func++) 31 | (*func)(); 32 | } 33 | 34 | static void* OperatorNew(size_t size) 35 | { 36 | void* pMem = VMM::KernelHeap::Allocate(size); 37 | 38 | if (!pMem) 39 | KernelPanic("ERROR: cannot new[](%z), kernel heap gave us NULL.", size); 40 | 41 | return pMem; 42 | } 43 | 44 | static void OperatorFree(void* ptr) 45 | { 46 | VMM::KernelHeap::Free(ptr); 47 | } 48 | 49 | void* operator new(size_t size) 50 | { 51 | return OperatorNew(size); 52 | } 53 | 54 | void* operator new[](size_t size) 55 | { 56 | return OperatorNew(size); 57 | } 58 | 59 | void* operator new(size_t size, const nopanic_t&) 60 | { 61 | return VMM::KernelHeap::Allocate(size); 62 | } 63 | 64 | void* operator new[](size_t size, const nopanic_t&) 65 | { 66 | return VMM::KernelHeap::Allocate(size); 67 | } 68 | 69 | void operator delete(void* ptr) 70 | { 71 | OperatorFree(ptr); 72 | } 73 | 74 | void operator delete(void* ptr, UNUSED size_t size) 75 | { 76 | OperatorFree(ptr); 77 | } 78 | 79 | void operator delete[](void* ptr) 80 | { 81 | OperatorFree(ptr); 82 | } 83 | 84 | void operator delete[](void* ptr, UNUSED size_t size) 85 | { 86 | OperatorFree(ptr); 87 | } 88 | 89 | // cxa calls 90 | extern "C" 91 | { 92 | 93 | void __cxa_pure_virtual() 94 | { 95 | // Do nothing or print an error message. 96 | LogMsg("Pure virtual function!"); 97 | } 98 | 99 | // what the hell 100 | void __dso_handle() 101 | { 102 | } 103 | void __cxa_atexit() 104 | { 105 | } 106 | 107 | }; 108 | -------------------------------------------------------------------------------- /source/Standard/Printf.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Printf.cpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements printf-style functions. 12 | // 13 | // *************************************************************** 14 | 15 | #include 16 | 17 | #define IS_64_BIT 1 18 | 19 | static void UnsignedToString(uint64_t num, char* str, int paddingInfo, char paddingChar) 20 | { 21 | // print the actual digits themselves 22 | int i = 0; 23 | while (num || i == 0) 24 | { 25 | str[i++] = '0' + (num % 10); 26 | str[i] = '\0'; 27 | num /= 10; 28 | } 29 | 30 | // append padding too 31 | for (; i < paddingInfo; ) 32 | { 33 | str[i++] = paddingChar; 34 | str[i] = '\0'; 35 | } 36 | 37 | // reverse the string 38 | int start = 0, end = i - 1; 39 | while (start < end) 40 | { 41 | char 42 | temp = str[start]; 43 | str[start] = str[end]; 44 | str[end] = temp; 45 | start++; 46 | end--; 47 | } 48 | } 49 | 50 | void SignedToString(int64_t num, char* str, int paddingInfo, char paddingChar) 51 | { 52 | if (num < 0) 53 | { 54 | str[0] = '-'; 55 | UnsignedToString((uint64_t)(-num), str + 1, paddingInfo, paddingChar); 56 | } 57 | else 58 | UnsignedToString((uint64_t) num, str, paddingInfo, paddingChar); 59 | } 60 | 61 | extern "C" { 62 | 63 | int vsnprintf(char* buf, size_t sz, const char* fmt, va_list args) 64 | { 65 | int paddingInfo = -1; 66 | char paddingChar = ' '; 67 | size_t currentIndex = 0; 68 | while (*fmt) 69 | { 70 | char m = *fmt; 71 | if (!m) goto finished; 72 | fmt++; 73 | 74 | if (m == '%') 75 | { 76 | paddingInfo = -1; 77 | paddingChar = ' '; 78 | 79 | m = *(fmt++); 80 | 81 | // if hit end, return 82 | if (!m) goto finished; 83 | 84 | // handle %0 or %. 85 | if (m == '0' || m == '.') 86 | { 87 | // this by default handles %0 too, though it does nothing 88 | paddingInfo = 0; 89 | m = *(fmt++); 90 | 91 | // if hit end, return 92 | if (!m) goto finished; 93 | 94 | // handle %0D cases (D = digit) 95 | if (m >= '0' && m <= '9') 96 | { 97 | paddingInfo = m - '0'; 98 | paddingChar = '0'; 99 | m = *(fmt++); 100 | } 101 | } 102 | else if (m >= '1' && m <= '9') 103 | { 104 | paddingInfo = m - '0'; 105 | paddingChar = ' '; 106 | m = *(fmt++); 107 | 108 | // if hit end, return 109 | if (!m) goto finished; 110 | } 111 | 112 | switch (m) 113 | { 114 | // Format a string 115 | case 's': case 'S': 116 | { 117 | const char* pString = va_arg(args, const char*); 118 | 119 | //allow user to print null 120 | if (pString == NULL) 121 | pString = "(null)"; 122 | 123 | while (*pString) 124 | { 125 | if (currentIndex >= sz - 1) 126 | goto finished; 127 | 128 | // place this character here 129 | buf[currentIndex++] = *pString; 130 | 131 | pString++; 132 | } 133 | 134 | break; 135 | } 136 | // Escape a percentage symbol 137 | case '%': 138 | { 139 | if (currentIndex >= sz - 1) 140 | goto finished; 141 | 142 | buf[currentIndex++] = '%'; 143 | break; 144 | } 145 | // Format a char 146 | case 'c': case 'C': 147 | { 148 | // using va_arg(args, char) has undefined behavior, because 149 | // char arguments will be promoted to int. 150 | char character = (char)va_arg(args, int); 151 | 152 | if (currentIndex >= sz - 1) 153 | goto finished; 154 | 155 | buf[currentIndex++] = character; 156 | break; 157 | } 158 | // Format an int 159 | case 'd': case 'i': case 'D': case 'I': 160 | { 161 | int num = va_arg(args, int); 162 | char buffer[20]; 163 | 164 | SignedToString(num, buffer, paddingInfo, paddingChar); 165 | 166 | const char* pString = buffer; 167 | while (*pString) 168 | { 169 | if (currentIndex >= sz - 1) 170 | goto finished; 171 | 172 | // place this character here 173 | buf[currentIndex++] = *pString; 174 | 175 | pString++; 176 | } 177 | 178 | break; 179 | } 180 | // Format an unsigned int 181 | #if !IS_64_BIT 182 | case 'z': 183 | { 184 | m = *(fmt++); 185 | if (m == 0) goto finished; 186 | //fallthrough intended 187 | [[fallthrough]]; 188 | } 189 | #endif 190 | case 'u': case 'U': 191 | { 192 | uint32_t num = va_arg(args, uint32_t); 193 | char buffer[20]; 194 | 195 | UnsignedToString(num, buffer, paddingInfo, paddingChar); 196 | 197 | const char* pString = buffer; 198 | while (*pString) 199 | { 200 | if (currentIndex >= sz - 1) 201 | goto finished; 202 | 203 | // place this character here 204 | buf[currentIndex++] = *pString; 205 | 206 | pString++; 207 | } 208 | 209 | break; 210 | } 211 | // Format a longer integer. 212 | #if IS_64_BIT 213 | case 'z': 214 | #endif 215 | case 'l': 216 | { 217 | bool longlong = false; 218 | #if IS_64_BIT 219 | if (m == 'z') 220 | { 221 | longlong = true; 222 | m = 'u'; 223 | goto __parse_the_thing; 224 | } 225 | #endif 226 | m = *(fmt++); 227 | if (m == 'l') 228 | { 229 | longlong = true; 230 | m = *(fmt++); 231 | } 232 | if (m == 0) goto finished; 233 | 234 | #if IS_64_BIT 235 | __parse_the_thing: 236 | #endif 237 | 238 | const char* pString = NULL; 239 | char buffer[30]; 240 | buffer[0] = 0; 241 | pString = buffer; 242 | 243 | if (m == 'u') 244 | { 245 | unsigned long long num = longlong ? va_arg(args, unsigned long long) : va_arg(args, unsigned long); 246 | UnsignedToString(num, buffer, paddingInfo, paddingChar); 247 | } 248 | if (m == 'd') 249 | { 250 | long long num = longlong ? va_arg(args, long long) : va_arg(args, long); 251 | SignedToString(num, buffer, paddingInfo, paddingChar); 252 | } 253 | 254 | if (!pString) break; 255 | 256 | while (*pString) 257 | { 258 | if (currentIndex >= sz - 1) 259 | goto finished; 260 | 261 | // place this character here 262 | buf[currentIndex++] = *pString; 263 | 264 | pString++; 265 | } 266 | break; 267 | } 268 | // Format a uint8_t as lowercase/uppercase hexadecimal 269 | case 'b': 270 | case 'B': 271 | { 272 | const char* charset = "0123456789abcdef"; 273 | if (m == 'B') 274 | charset = "0123456789ABCDEF"; 275 | 276 | // using va_arg(args, uint8_t) has undefined behavior, because 277 | // uint8_t arguments will be promoted to int. 278 | uint8_t p = (uint8_t) va_arg(args, uint32_t); 279 | 280 | if (currentIndex >= sz - 1) goto finished; 281 | buf[currentIndex++] = charset[(p & 0xF0) >> 4]; 282 | if (currentIndex >= sz - 1) goto finished; 283 | buf[currentIndex++] = charset[p & 0x0F]; 284 | 285 | break; 286 | } 287 | // Format a uint16_t as lowercase/uppercase hexadecimal 288 | case 'w': 289 | case 'W': 290 | { 291 | const char* charset = "0123456789abcdef"; 292 | if (m == 'W') 293 | charset = "0123456789ABCDEF"; 294 | 295 | // using va_arg(args, uint16_t) has undefined behavior, because 296 | // uint16_t arguments will be promoted to int. 297 | uint16_t p = (uint16_t) va_arg(args, uint32_t); 298 | 299 | for (uint32_t mask = 0xF000, bitnum = 12; mask; mask >>= 4, bitnum -= 4) 300 | { 301 | if (currentIndex >= sz - 1) 302 | goto finished; 303 | 304 | // place this character here 305 | buf[currentIndex++] = charset[(p & mask) >> bitnum]; 306 | } 307 | 308 | break; 309 | } 310 | // Format a uint32_t as lowercase/uppercase hexadecimal 311 | case 'x': 312 | case 'X': 313 | #if !IS_64_BIT 314 | case 'p': case 'P': 315 | #endif 316 | { 317 | const char* charset = "0123456789abcdef"; 318 | if (m == 'X' || m == 'P') 319 | charset = "0123456789ABCDEF"; 320 | 321 | uint32_t p = va_arg(args, uint32_t); 322 | 323 | for (uint32_t mask = 0xF0000000, bitnum = 28; mask; mask >>= 4, bitnum -= 4) 324 | { 325 | if (currentIndex >= sz - 1) 326 | goto finished; 327 | 328 | // place this character here 329 | buf[currentIndex++] = charset[(p & mask) >> bitnum]; 330 | } 331 | 332 | break; 333 | } 334 | // Format a uint64_t as lowercase/uppercase hexadecimal 335 | case 'q': 336 | case 'Q': 337 | #if IS_64_BIT 338 | case 'p': case 'P': 339 | #endif 340 | { 341 | const char* charset = "0123456789abcdef"; 342 | if (m == 'Q' || m == 'P') 343 | charset = "0123456789ABCDEF"; 344 | 345 | uint64_t p = va_arg(args, uint64_t); 346 | 347 | for (uint64_t mask = 0xF000000000000000ULL, bitnum = 60; mask; mask >>= 4, bitnum -= 4) 348 | { 349 | if (currentIndex >= sz - 1) 350 | goto finished; 351 | 352 | // place this character here 353 | buf[currentIndex++] = charset[(p & mask) >> bitnum]; 354 | } 355 | 356 | break; 357 | } 358 | } 359 | } 360 | else 361 | { 362 | if (currentIndex >= sz - 1) 363 | goto finished; 364 | buf[currentIndex++] = m; 365 | } 366 | } 367 | finished: 368 | buf[currentIndex] = '\0'; 369 | return (int)currentIndex; 370 | } 371 | 372 | int vsprintf(char* buf, const char* fmt, va_list args) 373 | { 374 | return vsnprintf(buf, SIZE_MAX, fmt, args); 375 | } 376 | 377 | int snprintf(char* buf, size_t sz, const char* fmt, ...) 378 | { 379 | va_list lst; 380 | va_start(lst, fmt); 381 | 382 | int val = vsnprintf(buf, sz, fmt, lst); 383 | 384 | va_end(lst); 385 | 386 | return val; 387 | } 388 | 389 | int sprintf(char* buf, const char* fmt, ...) 390 | { 391 | va_list lst; 392 | va_start(lst, fmt); 393 | 394 | int val = vsprintf(buf, fmt, lst); 395 | 396 | va_end(lst); 397 | 398 | return val; 399 | } 400 | 401 | }; // extern "C" 402 | -------------------------------------------------------------------------------- /source/System.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // System.cpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | #include 10 | 11 | extern "C" void AssertUnreachable(const char* src_file, int src_line) 12 | { 13 | KernelPanic("ASSERT_UNREACHABLE reached at %s:%d", src_file, src_line); 14 | } 15 | -------------------------------------------------------------------------------- /source/Terminal.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Terminal.cpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include <_limine.h> 14 | #include "LimineTerm/framebuffer.h" 15 | 16 | // TODO: Restructure this so that it's based on a worker thread. For now, this works. 17 | 18 | volatile limine_framebuffer_request g_FramebufferRequest = 19 | { 20 | .id = LIMINE_FRAMEBUFFER_REQUEST, 21 | .revision = 0, 22 | .response = NULL, 23 | }; 24 | 25 | term_context* g_pTermContext = NULL; 26 | 27 | bool Terminal::CheckResponse() 28 | { 29 | return g_FramebufferRequest.response != NULL; 30 | } 31 | 32 | void Terminal::Setup() 33 | { 34 | if (!CheckResponse()) Arch::IdleLoop(); 35 | if (g_FramebufferRequest.response->framebuffer_count == 0) Arch::IdleLoop(); 36 | 37 | limine_framebuffer* pFB = g_FramebufferRequest.response->framebuffers[0]; 38 | 39 | uint32_t defaultBG = 0x0000007f; 40 | uint32_t defaultFG = 0x00ffffff; 41 | 42 | g_pTermContext = fbterm_init( 43 | EternalHeap::Allocate, 44 | (uint32_t*)pFB->address, 45 | pFB->width, pFB->height, pFB->pitch, 46 | NULL, // canvas 47 | NULL, // ANSI colors 48 | NULL, // ANSI bright colors 49 | &defaultBG, // Default BG 50 | &defaultFG, // Default FG 51 | NULL, // Default BG Bright 52 | NULL, // Default FG Bright 53 | NULL, // Font 54 | 0, 0, // font width, height 55 | 0, // font spacing 56 | 1, 1, // font scale X, Y 57 | 0 58 | ); 59 | } 60 | 61 | Spinlock g_E9Spinlock; 62 | Spinlock g_TermSpinlock; 63 | 64 | void Terminal::E9Write(const char* str) 65 | { 66 | //LockGuard lg(g_E9Spinlock); 67 | while (*str) 68 | { 69 | Arch::WriteByte(0xE9, *str); 70 | str++; 71 | } 72 | } 73 | 74 | void Terminal::E9WriteLn(const char* str) 75 | { 76 | //LockGuard lg(g_E9Spinlock); 77 | while (*str) 78 | { 79 | Arch::WriteByte(0xE9, *str); 80 | str++; 81 | } 82 | Arch::WriteByte(0xE9, '\n'); 83 | } 84 | 85 | void Terminal::Write(const char* str) 86 | { 87 | LockGuard lg(g_TermSpinlock); 88 | term_write(g_pTermContext, str, strlen(str)); 89 | } 90 | 91 | void Terminal::WriteLn(const char* str) 92 | { 93 | LockGuard lg(g_TermSpinlock); 94 | term_write(g_pTermContext, str, strlen(str)); 95 | term_write(g_pTermContext, "\n", 1); 96 | } 97 | 98 | // LogMsg and SLogMsg. Useful for debugging, these can be called from anywhere, even C contexts. 99 | // Note: This is limited to 1024 bytes. Careful! 100 | // TODO: Don't repeat myself 101 | // TODO: Expand this maybe? 102 | extern "C" void LogMsg(const char* fmt, ...) 103 | { 104 | va_list lst; 105 | va_start(lst, fmt); 106 | char buffer[1024]; 107 | vsnprintf(buffer, sizeof buffer, fmt, lst); 108 | va_end(lst); 109 | 110 | Terminal::WriteLn(buffer); 111 | } 112 | 113 | extern "C" void SLogMsg(const char* fmt, ...) 114 | { 115 | va_list lst; 116 | va_start(lst, fmt); 117 | char buffer[1024]; 118 | vsnprintf(buffer, sizeof buffer, fmt, lst); 119 | va_end(lst); 120 | 121 | Terminal::E9WriteLn(buffer); 122 | } 123 | 124 | extern "C" void LogMsgNoCR(const char* fmt, ...) 125 | { 126 | va_list lst; 127 | va_start(lst, fmt); 128 | char buffer[1024]; 129 | vsnprintf(buffer, sizeof buffer, fmt, lst); 130 | va_end(lst); 131 | 132 | Terminal::Write(buffer); 133 | } 134 | 135 | extern "C" void SLogMsgNoCR(const char* fmt, ...) 136 | { 137 | va_list lst; 138 | va_start(lst, fmt); 139 | char buffer[1024]; 140 | vsnprintf(buffer, sizeof buffer, fmt, lst); 141 | va_end(lst); 142 | 143 | Terminal::E9Write(buffer); 144 | } 145 | -------------------------------------------------------------------------------- /source/Thread.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // Thread.cpp - Creation date: 11/04/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | #include 10 | 11 | extern "C" RETURNS_TWICE uint64_t SetThreadEC(Thread::ExecutionContext* pEC); 12 | extern "C" NO_RETURN void JumpThreadEC(Thread::ExecutionContext* pEC, uint64_t value); 13 | extern "C" NO_RETURN void JumpThreadEC2(Thread::ExecutionContext* pEC, Thread::AdditionalRegisters* pAR); 14 | 15 | void Thread::Beginning() 16 | { 17 | Thread* pThread = Arch::CPU::GetCurrent()->GetScheduler()->GetCurrentThread(); 18 | 19 | // call the entry point 20 | if (!pThread->m_EntryPoint) 21 | { 22 | SLogMsg("Warning: Starting a thread without an entry point is considered an error (Thread::Beginning)"); 23 | pThread->Kill(); 24 | return; 25 | } 26 | 27 | pThread->m_EntryPoint(); 28 | pThread->Kill(); 29 | } 30 | 31 | void Thread::SetEntryPoint(ThreadEntry pEntry) 32 | { 33 | if (m_Status.Load() != SETUP) 34 | { 35 | SLogMsg("Calling Thread::SetEntryPoint(%p) on a thread (ID %d) which is in state %d is an error", pEntry, m_ID, m_Status.Load()); 36 | return; 37 | } 38 | 39 | m_EntryPoint = pEntry; 40 | } 41 | 42 | void Thread::SetPriority(ePriority prio) 43 | { 44 | m_Priority.Store(prio); 45 | } 46 | 47 | void Thread::Detach() 48 | { 49 | m_bOwned.Store(false); 50 | } 51 | 52 | void Thread::Join() 53 | { 54 | // you can't join an unjoinable thread 55 | if (!m_bOwned.Load()) 56 | return; 57 | 58 | while (m_Status.Load() != ZOMBIE) 59 | { 60 | // TODO: yield our current thread? 61 | Spinlock::SpinHint(); 62 | } 63 | } 64 | 65 | void Thread::SetStackSize(size_t sz) 66 | { 67 | if (m_Status.Load() != SETUP) 68 | { 69 | SLogMsg("Calling Thread::SetStackSize(%z) on a thread (ID %d) which is in state %d is an error", sz, m_ID, m_Status.Load()); 70 | return; 71 | } 72 | 73 | m_StackSize = sz; 74 | } 75 | 76 | void Thread::Kill() 77 | { 78 | m_Status.Store(ZOMBIE); 79 | 80 | // note: I mean, yielding is harmless, but this is better to do 81 | if (this == m_pScheduler->GetCurrentThread()) 82 | Yield(); 83 | } 84 | 85 | void Thread::Unsuspend() 86 | { 87 | m_Status.Store(RUNNING); 88 | m_SleepingUntil = 0; 89 | } 90 | 91 | void Thread::Resume() 92 | { 93 | auto pCpu = Arch::CPU::GetCurrent(); 94 | 95 | // avoid a TOCTOU bug: 96 | bool bOldState = pCpu->SetInterruptsEnabled(false); 97 | 98 | if (m_Status.Load() == SUSPENDED) 99 | { 100 | m_Status.Store(RUNNING); 101 | } 102 | 103 | pCpu->SetInterruptsEnabled(bOldState); 104 | } 105 | 106 | void Thread::Start() 107 | { 108 | using namespace Arch; 109 | auto pCpu = CPU::GetCurrent(); 110 | 111 | // This function starts the thread. 112 | 113 | // Clear interrupts. This prevents the scheduler from running during its manipulation: 114 | bool bOldState = pCpu->SetInterruptsEnabled(false); 115 | 116 | // Set up the stack. 117 | size_t nStackSizeLongs = m_StackSize / 8; 118 | 119 | m_pStack = new uint64_t[nStackSizeLongs]; 120 | 121 | // Ensure the stack won't fault on access by faulting it in ourselves 122 | for (size_t i = 0; i < nStackSizeLongs; i++) 123 | m_pStack[i] = 0; 124 | 125 | // Preparing the execution context: 126 | 127 | // Set the execution context as 'here', to copy the rflags over. 128 | SetThreadEC(&m_ExecContext); 129 | 130 | // force the rflags to have interrupts enabled: 131 | m_ExecContext.rflags |= C_RFLAGS_INTERRUPT_FLAG; 132 | 133 | m_ExecContext.rip = (uint64_t)Thread::Beginning; 134 | m_ExecContext.rsp = (uint64_t)&m_pStack[nStackSizeLongs - 2]; 135 | m_ExecContext.cs = GDT::DESC_64BIT_RING0_CODE; 136 | m_ExecContext.ss = GDT::DESC_64BIT_RING0_DATA; 137 | 138 | m_Status.Store(RUNNING); 139 | 140 | m_pScheduler->Done(this); 141 | 142 | // Restore the old interrupt state after we're done. 143 | pCpu->SetInterruptsEnabled(bOldState); 144 | } 145 | 146 | // static 147 | void Thread::Yield() 148 | { 149 | // clear interrupts. 150 | auto pCpu = Arch::CPU::GetCurrent(); 151 | auto pSched = pCpu->GetScheduler(); 152 | 153 | bool bOldState = pCpu->SetInterruptsEnabled(false); 154 | 155 | Thread* pThrd = pSched->GetCurrentThread(); 156 | 157 | if (pThrd == nullptr) 158 | { 159 | pSched->Schedule(false); 160 | ASSERT_UNREACHABLE; 161 | } 162 | 163 | // save our execution point. If needed, we will return. 164 | if (SetThreadEC(&pThrd->m_ExecContext)) 165 | { 166 | pCpu->SetInterruptsEnabled(bOldState); 167 | return; 168 | } 169 | 170 | pSched->Done(pThrd); 171 | pSched->Schedule(false); 172 | ASSERT_UNREACHABLE; 173 | } 174 | 175 | void Thread::JumpExecContext() 176 | { 177 | using namespace Arch; 178 | CPU* pCpu = CPU::GetCurrent(); 179 | 180 | pCpu->InterruptsEnabledRaw() = (m_ExecContext.rflags & C_RFLAGS_INTERRUPT_FLAG); 181 | 182 | if (m_bNeedRestoreAdditionalRegisters) 183 | { 184 | m_bNeedRestoreAdditionalRegisters = false; 185 | JumpThreadEC2(&m_ExecContext, &m_AdditionalRegisters); 186 | } 187 | else 188 | { 189 | JumpThreadEC(&m_ExecContext, 1); 190 | } 191 | } 192 | 193 | Thread* Thread::GetCurrent() 194 | { 195 | return Arch::CPU::GetCurrent()->GetScheduler()->GetCurrentThread(); 196 | } 197 | 198 | void Thread::Suspend() 199 | { 200 | m_Status.Store(SUSPENDED); 201 | 202 | if (this == m_pScheduler->GetCurrentThread()) 203 | Yield(); 204 | } 205 | 206 | void Thread::SleepUntil(uint64_t time) 207 | { 208 | m_SleepingUntil.Store(time); 209 | m_Status.Store(SLEEPING); 210 | 211 | if (this == m_pScheduler->GetCurrentThread()) 212 | Yield(); 213 | } 214 | 215 | void Thread::Sleep(uint64_t nanoseconds) 216 | { 217 | GetCurrent()->SleepUntil(Arch::GetTickCount() + nanoseconds - 20); 218 | } 219 | -------------------------------------------------------------------------------- /source/ax86_64/APIC.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // APIC.cpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements a manager for the APIC for each CPU. 12 | // 13 | // *************************************************************** 14 | #include 15 | #include 16 | #include 17 | 18 | #define C_SPURIOUS_INTERRUPT_VECTOR (0xFF) 19 | #define C_APIC_TIMER_DIVIDE_BY_128 (0b1010) // Intel SDM Vol.3A Ch.11 "11.5.4 APIC Timer". Bit 2 is reserved. 20 | #define C_APIC_TIMER_DIVIDE_BY_16 (0b0011) // Intel SDM Vol.3A Ch.11 "11.5.4 APIC Timer". Bit 2 is reserved. 21 | #define C_APIC_TIMER_MODE_ONESHOT (0b00 << 17) 22 | #define C_APIC_TIMER_MODE_PERIODIC (0b01 << 17) // not used right now, but may be needed. 23 | #define C_APIC_TIMER_MODE_TSCDEADLN (0b10 << 17) 24 | 25 | #define APIC_LVT_INT_MASKED (0x10000) 26 | 27 | #define IA32_APIC_BASE_MSR (0x1B) 28 | 29 | using namespace Arch; 30 | 31 | enum 32 | { 33 | APIC_REG_ID = 0x20, 34 | APIC_REG_VER = 0x30, 35 | APIC_REG_TASK_PRIORITY = 0x80, 36 | APIC_REG_ARB_PRIORITY = 0x90, 37 | APIC_REG_PROC_PRIORITY = 0xA0, 38 | APIC_REG_EOI = 0xB0, 39 | APIC_REG_REMOTE_READ = 0xC0, 40 | APIC_REG_LOGICAL_DEST = 0xD0, 41 | APIC_REG_DEST_FORMAT = 0xE0, 42 | APIC_REG_SPURIOUS = 0xF0, 43 | APIC_REG_ISR_START = 0x100, // 0x100 - 0x170 44 | APIC_REG_TRIG_MODE = 0x180, // 0x180 - 0x1F0 45 | APIC_REG_IRQ = 0x200, // 0x200 - 0x270 46 | APIC_REG_ERROR_STAT = 0x280, 47 | APIC_REG_LVT_CMCI = 0x2F0, 48 | APIC_REG_ICR0 = 0x300, 49 | APIC_REG_ICR1 = 0x310, 50 | APIC_REG_LVT_TIMER = 0x320, 51 | APIC_REG_LVT_THERMAL = 0x330, 52 | APIC_REG_LVT_PERFMON = 0x340, 53 | APIC_REG_LVT_LINT0 = 0x350, 54 | APIC_REG_LVT_LINT1 = 0x360, 55 | APIC_REG_LVT_ERROR = 0x370, 56 | APIC_REG_TMR_INIT_CNT = 0x380, 57 | APIC_REG_TMR_CURR_CNT = 0x390, 58 | APIC_REG_TMR_DIV_CFG = 0x3E0, 59 | }; 60 | 61 | enum 62 | { 63 | APIC_ICR0_DELIVERY_STATUS = (1 << 12), 64 | }; 65 | 66 | enum 67 | { 68 | APIC_ICR1_SINGLE = (0 << 18), 69 | APIC_ICR1_SELF = (1 << 18), 70 | APIC_ICR1_BROADCAST = (2 << 18), 71 | APIC_ICR1_BROADCAST_OTHERS = (3 << 18), 72 | }; 73 | 74 | // Write a register. 75 | void APIC::WriteReg(uint32_t reg, uint32_t value) 76 | { 77 | WritePhys(GetLapicBasePhys() + reg, value); 78 | } 79 | 80 | // Read a register. 81 | uint32_t APIC::ReadReg(uint32_t reg) 82 | { 83 | return ReadPhys(GetLapicBasePhys() + reg); 84 | } 85 | 86 | uintptr_t APIC::GetLapicBasePhys() 87 | { 88 | // Read the specific MSR. 89 | /*uintptr_t msr = ReadMSR(IA32_APIC_BASE_MSR); 90 | 91 | return msr & 0x0000'000F'FFFF'F000; 92 | */ 93 | return 0xFEE0'0000; // really I think it's fine if we hardcode it 94 | } 95 | 96 | uintptr_t APIC::GetLapicBase() 97 | { 98 | return GetHHDMOffset() + GetLapicBasePhys(); 99 | } 100 | 101 | void APIC::EndOfInterrupt() 102 | { 103 | APIC::WriteReg(APIC_REG_EOI, 0); 104 | } 105 | 106 | void APIC::EnsureOn() 107 | { 108 | // Use the CPUID instruction. 109 | uint32_t eax, edx; 110 | ASM("cpuid":"=d"(edx),"=a"(eax):"a"(1)); 111 | 112 | if (~edx & (1 << 9)) 113 | { 114 | LogMsg("APIC is off. An APIC must be present before running NanoShell64."); 115 | IdleLoop(); 116 | } 117 | } 118 | 119 | extern "C" void Arch_APIC_OnSpInterrupt_Asm(); 120 | extern "C" void Arch_APIC_OnSpInterrupt() 121 | { 122 | APIC::EndOfInterrupt(); 123 | } 124 | 125 | extern "C" void Arch_APIC_OnIPInterrupt_Asm(); 126 | extern "C" void Arch_APIC_OnIPInterrupt() 127 | { 128 | using namespace Arch; 129 | 130 | // Get the current CPU. 131 | CPU* pCpu = CPU::GetCurrent(); 132 | 133 | // Tell it that we've IPI'd. 134 | pCpu->OnIPI(); 135 | 136 | // Send an EOI. 137 | APIC::EndOfInterrupt(); 138 | 139 | // since a CPU::SendIPI() call was needed to reach this point, unlock the IPI spinlock. 140 | pCpu->UnlockIpiSpinlock(); 141 | } 142 | 143 | extern "C" void Arch_APIC_OnTimerInterrupt_Asm(); 144 | extern "C" void Arch_APIC_OnTimerInterrupt(Registers* pRegs) 145 | { 146 | using namespace Arch; 147 | 148 | // Get the current CPU. 149 | CPU* pCpu = CPU::GetCurrent(); 150 | 151 | // make sure to let ourselves know that right now, interrupts are disabled. 152 | pCpu->InterruptsEnabledRaw() = false; 153 | 154 | // Tell it that we've IPI'd. 155 | pCpu->OnTimerIRQ(pRegs); 156 | 157 | // Send an EOI. 158 | APIC::EndOfInterrupt(); 159 | 160 | // go back to the old state 161 | pCpu->InterruptsEnabledRaw() = (pRegs->rflags & C_RFLAGS_INTERRUPT_FLAG); 162 | } 163 | 164 | void APIC::Init() 165 | { 166 | if (CPU::AreWeBootstrap()) 167 | { 168 | LogMsg("Disabling legacy PIC(s)."); 169 | 170 | constexpr uint16_t picComdPrim = 0x20, picComdSecd = 0xA0, picDataPrim = 0x21, picDataSecd = 0xA1; 171 | 172 | // Start legacy PIC init sequence. 173 | WriteByte(picComdPrim, 0x11); 174 | WriteByte(picComdSecd, 0x11); 175 | // Re-map IRQs. 176 | WriteByte(picDataPrim, 0x20); 177 | WriteByte(picDataSecd, 0x28); 178 | // Do other fancy stuff 179 | WriteByte(picDataPrim, 4); 180 | WriteByte(picDataSecd, 2); 181 | WriteByte(picDataPrim, 1); 182 | WriteByte(picDataSecd, 1); 183 | // Mask all interrupts. 184 | WriteByte(picDataPrim, 0xFF); 185 | WriteByte(picDataSecd, 0xFF); 186 | } 187 | 188 | // Set this CPU's IDT entry. 189 | CPU::GetCurrent()->SetInterruptGate(IDT::INT_SPURIOUS, uintptr_t(Arch_APIC_OnSpInterrupt_Asm)); 190 | 191 | // Enable the spurious vector interrupt. 192 | WriteReg(APIC_REG_SPURIOUS, IDT::INT_SPURIOUS | 0x100); 193 | } 194 | 195 | void CPU::SendIPI(eIpiType type) 196 | { 197 | // The destination is 'this'. The sender (us) is 'pSenderCPU'. 198 | CPU * pSenderCPU = GetCurrent(); 199 | 200 | // Wait for any pending IPIs to finish on this CPU. 201 | while (APIC::ReadReg(APIC_REG_ICR0) & APIC_ICR0_DELIVERY_STATUS) Spinlock::SpinHint(); 202 | 203 | m_ipiSpinlock.Lock(); 204 | 205 | m_ipiType = type; 206 | m_ipiSenderID = pSenderCPU->m_processorID; 207 | 208 | // Write the destination CPU's LAPIC ID. 209 | APIC::WriteReg(APIC_REG_ICR1, m_pSMPInfo->lapic_id << 24); 210 | 211 | // Write the interrupt vector. 212 | APIC::WriteReg(APIC_REG_ICR0, IDT::INT_IPI | APIC_ICR1_SINGLE); 213 | 214 | // The CPU in question will unlock the IPI spinlock. 215 | } 216 | 217 | PolledSleepFunc g_PolledSleepFunc = PIT::PolledSleep; 218 | 219 | void APIC::SetPolledSleepFunc(PolledSleepFunc func) 220 | { 221 | g_PolledSleepFunc = func; 222 | } 223 | 224 | constexpr uint64_t C_FEMTOS_TO_NANOS = 1'000'000; 225 | constexpr uint64_t C_MILLIS_TO_NANOS = 1'000'000; 226 | 227 | void APIC::CalibrateHPET(uint64_t &apicOut, uint64_t &tscOut) 228 | { 229 | uint64_t avg_apic = 0; 230 | uint64_t avg_tsc = 0; 231 | constexpr int nRuns = 16; 232 | constexpr int nMs = 20; 233 | 234 | for (int i = 0; i < nRuns; i++) 235 | { 236 | // Set APIC init counter to -1. 237 | APIC::WriteReg(APIC_REG_TMR_INIT_CNT, 0xFFFFFFFF); 238 | 239 | // Sleep for X ms. 240 | uint64_t time = nMs * C_MILLIS_TO_NANOS * C_FEMTOS_TO_NANOS / HPET::GetCounterClockPeriod(); 241 | 242 | uint64_t tscThen = TSC::Read(); 243 | uint64_t hpetThen = HPET::GetRawTickCount(); 244 | uint64_t target = hpetThen + time; 245 | 246 | while (HPET::GetRawTickCount() < target) 247 | Spinlock::SpinHint(); 248 | 249 | APIC::WriteReg(APIC_REG_LVT_TIMER, APIC_LVT_INT_MASKED); 250 | uint64_t tscNow = TSC::Read(); 251 | uint64_t hpetNow = HPET::GetRawTickCount(); 252 | uint64_t apicDiff = 0xFFFFFFFF - APIC::ReadReg(APIC_REG_TMR_CURR_CNT); 253 | 254 | uint64_t tscDiff = tscNow - tscThen; 255 | uint64_t hpetDiff = hpetNow - hpetThen; 256 | 257 | // rescale the TSC and APIC diffs by the HPET diff 258 | tscDiff = tscDiff * time / hpetDiff; 259 | apicDiff = apicDiff * time / hpetDiff; 260 | 261 | avg_apic += apicDiff; 262 | avg_tsc += tscDiff; 263 | } 264 | 265 | avg_apic /= nRuns * nMs; 266 | avg_tsc /= nRuns * nMs; 267 | 268 | apicOut = avg_apic; 269 | tscOut = avg_tsc; 270 | } 271 | 272 | // Despite being in the APIC namespace this also calibrates the TSC. Wow! 273 | void APIC::CalibrateTimer(uint64_t &apicOut, uint64_t &tscOut) 274 | { 275 | // Tell the APIC timer to use divider 16. 276 | APIC::WriteReg(APIC_REG_TMR_DIV_CFG, C_APIC_TIMER_DIVIDE_BY_16); 277 | 278 | if (g_PolledSleepFunc == HPET::PolledSleep) 279 | { 280 | return APIC::CalibrateHPET(apicOut, tscOut); 281 | } 282 | 283 | uint64_t avg_apic = 0; 284 | uint64_t avg_tsc = 0; 285 | 286 | constexpr int nRuns = 16; 287 | constexpr int nMs = 20; 288 | 289 | for (int i = 0; i < nRuns; i++) 290 | { 291 | uint64_t tscStart = TSC::Read(); 292 | 293 | // Set APIC init counter to -1. 294 | APIC::WriteReg(APIC_REG_TMR_INIT_CNT, 0xFFFFFFFF); 295 | 296 | // Sleep for X ms. 297 | // subtract a small amount of time to compensate for the speed difference that also calibrating the TSC adds. 298 | g_PolledSleepFunc(nMs*1000*1000); 299 | 300 | uint64_t ticksPerMsTsc = TSC::Read() - tscStart; 301 | 302 | // Stop the APIC timer. 303 | APIC::WriteReg(APIC_REG_LVT_TIMER, APIC_LVT_INT_MASKED); 304 | 305 | // Read the current count. 306 | uint64_t ticksPerMsApic = 0xFFFFFFFF - APIC::ReadReg(APIC_REG_TMR_CURR_CNT); 307 | //SLogMsg("CPU %d Run %d: %lld APIC ticks/ms, %lld TSC ticks/ms", CPU::GetCurrent()->ID(), i, ticksPerMsApic, ticksPerMsTsc); 308 | 309 | avg_apic += ticksPerMsApic; 310 | avg_tsc += ticksPerMsTsc; 311 | } 312 | 313 | avg_apic /= nRuns * nMs; 314 | avg_tsc /= nRuns * nMs; 315 | 316 | apicOut = avg_apic; 317 | tscOut = avg_tsc; 318 | } 319 | 320 | void APIC::ScheduleInterruptIn(uint64_t nanoseconds) 321 | { 322 | if (nanoseconds >= 1'000'000'000'000ULL) 323 | { 324 | SLogMsg("APIC::ScheduleInterruptIn: nanoseconds value too big (%lld, %p)", nanoseconds, __builtin_return_address(0)); 325 | } 326 | 327 | uint64_t lvtTimerReg = 0; 328 | 329 | // bit 16: masked. That'll be 0 330 | 331 | // first 8 bits are the interrupt vector: 332 | lvtTimerReg |= IDT::INT_APIC_TIMER; 333 | 334 | // This is redundant since the oneshot mode's value is 0, but hey, this is for clarity 335 | lvtTimerReg |= C_APIC_TIMER_MODE_ONESHOT; 336 | 337 | CPU* pCpu = CPU::GetCurrent(); 338 | 339 | bool bState = pCpu->SetInterruptsEnabled(false); 340 | 341 | // get the new timer value: 342 | uint64_t timerVal = pCpu->GetLapicTicksPerMS() * nanoseconds / C_MILLIS_TO_NANOS; 343 | 344 | // set the count: 345 | APIC::WriteReg(APIC_REG_TMR_INIT_CNT, timerVal); 346 | APIC::WriteReg(APIC_REG_LVT_TIMER, lvtTimerReg); 347 | 348 | // and off it goes ! 349 | 350 | pCpu->SetInterruptsEnabled(bState); 351 | } 352 | -------------------------------------------------------------------------------- /source/ax86_64/Arch.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // ax86_64/Arch.cpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the x86_64 architecture specific 12 | // functions that don't work on a particular CPU object. 13 | // (those reside in CPU.cpp) 14 | // 15 | // *************************************************************** 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | Atomic g_CPUsInitialized; 22 | 23 | namespace Arch 24 | { 25 | 26 | volatile limine_hhdm_request g_HHDMRequest = 27 | { 28 | .id = LIMINE_HHDM_REQUEST, 29 | .revision = 0, 30 | .response = NULL, 31 | }; 32 | 33 | limine_hhdm_response* CPU::GetHHDMResponse() 34 | { 35 | return g_HHDMRequest.response; 36 | } 37 | 38 | uintptr_t GetHHDMOffset() 39 | { 40 | return g_HHDMRequest.response->offset; 41 | } 42 | 43 | void Invalidate(uintptr_t ptr) 44 | { 45 | ASM("invlpg (%0)"::"r"(ptr):"memory"); 46 | } 47 | 48 | void WritePhys(uintptr_t ptr, uint32_t thing) 49 | { 50 | *((uint32_t*)(GetHHDMOffset() + ptr)) = thing; 51 | } 52 | 53 | uint32_t ReadPhys(uintptr_t ptr) 54 | { 55 | return *((uint32_t*)(GetHHDMOffset() + ptr)); 56 | } 57 | 58 | void Halt() 59 | { 60 | ASM("hlt":::"memory"); 61 | } 62 | 63 | void IdleLoop() 64 | { 65 | for (;;) 66 | Halt(); 67 | } 68 | 69 | uint8_t ReadByte(uint16_t port) 70 | { 71 | uint8_t rv; 72 | ASM("inb %1, %0" : "=a" (rv) : "dN" (port)); 73 | return rv; 74 | } 75 | 76 | void WriteByte(uint16_t port, uint8_t data) 77 | { 78 | ASM("outb %0, %1"::"a"((uint8_t)data),"Nd"((uint16_t)port)); 79 | } 80 | 81 | uintptr_t ReadCR3() 82 | { 83 | uintptr_t cr3 = 0; 84 | ASM("movq %%cr3, %0":"=r"(cr3)); 85 | return cr3; 86 | } 87 | 88 | void WriteCR3(uintptr_t cr3) 89 | { 90 | ASM("movq %0, %%cr3"::"r"(cr3)); 91 | } 92 | 93 | void WriteMSR(uint32_t msr, uint64_t value) 94 | { 95 | uint32_t edx = uint32_t(value >> 32); 96 | uint32_t eax = uint32_t(value); 97 | 98 | ASM("wrmsr"::"d"(edx),"a"(eax),"c"(msr)); 99 | } 100 | 101 | uint64_t ReadMSR(uint32_t msr) 102 | { 103 | uint32_t edx, eax; 104 | 105 | ASM("rdmsr":"=d"(edx),"=a"(eax):"c"(msr)); 106 | 107 | return uint64_t(edx) << 32 | eax; 108 | } 109 | 110 | // Since the pointer to the structure is passed into RDI, assuming 111 | // the x86_64 System V ABI, the first argument corresponds to RDI. 112 | void CPU::Start(limine_smp_info* pInfo) 113 | { 114 | CPU* pCpu = (CPU*)pInfo->extra_argument; 115 | 116 | pCpu->Init(); 117 | 118 | if (!pCpu->m_bIsBSP) 119 | pCpu->Go(); 120 | } 121 | 122 | volatile limine_smp_request g_SMPRequest = 123 | { 124 | .id = LIMINE_SMP_REQUEST, 125 | .revision = 0, 126 | .response = NULL, 127 | .flags = 0, 128 | }; 129 | 130 | // Initialize the CPUs from the bootloader's perspective. 131 | 132 | limine_smp_response* CPU::GetSMPResponse() 133 | { 134 | return g_SMPRequest.response; 135 | } 136 | 137 | uint64_t CPU::GetCount() 138 | { 139 | return GetSMPResponse()->cpu_count; 140 | } 141 | 142 | void CPU::InitAsBSP() 143 | { 144 | limine_smp_response* pSMP = g_SMPRequest.response; 145 | 146 | // Initialize all the CPUs in series. 147 | for (uint64_t i = 0; i < pSMP->cpu_count; i++) 148 | { 149 | bool bIsBSP = pSMP->bsp_lapic_id == pSMP->cpus[i]->lapic_id; 150 | 151 | // Note: The reason I don't just overload `operator new' with the eternal heap is because 152 | // I really do not want to do that. 153 | void* pCpuInfo = EternalHeap::Allocate(sizeof(CPU)); 154 | 155 | pSMP->cpus[i]->extra_argument = (uint64_t)pCpuInfo; 156 | 157 | // Fill in the pCpuInfo structure by running a placement new on it. 158 | new (pCpuInfo) CPU (i, pSMP->cpus[i], bIsBSP); 159 | 160 | // Run 161 | 162 | if (bIsBSP) 163 | { 164 | Start(pSMP->cpus[i]); 165 | } 166 | else 167 | { 168 | __atomic_store_n(&pSMP->cpus[i]->goto_address, &Start, ATOMIC_DEFAULT_MEMORDER); 169 | } 170 | } 171 | 172 | while (g_CPUsInitialized.Load(ATOMIC_MEMORD_RELAXED) != (int)pSMP->cpu_count) 173 | { 174 | Spinlock::SpinHint(); 175 | } 176 | 177 | LogMsg("All %llu processors have been initialized.", pSMP->cpu_count); 178 | 179 | CPU::GetCurrent()->Go(); 180 | } 181 | 182 | CPU* CPU::GetCPU(uint64_t pid) 183 | { 184 | limine_smp_response* resp = g_SMPRequest.response; 185 | 186 | if (pid >= resp->cpu_count) return NULL; 187 | 188 | return (CPU*)(resp->cpus[pid]->extra_argument); 189 | } 190 | 191 | uint64_t GetTickCount_TSC() 192 | { 193 | auto pCpu = CPU::GetCurrent(); 194 | 195 | uint64_t ticksPerMS = pCpu->GetTSCTicksPerMS(); 196 | uint64_t startingTSC = pCpu->GetStartingTSC(); 197 | 198 | // we have not gotten past that stage. 199 | if (ticksPerMS == 0) return 0; 200 | 201 | uint64_t timeDiff = TSC::Read() - startingTSC; 202 | return timeDiff * 1'000'000 / ticksPerMS; 203 | } 204 | 205 | uint64_t GetTickCount_HPET() 206 | { 207 | return Arch::HPET::GetTickCount(); 208 | } 209 | 210 | static GetTickCountMethod g_GetTickCountMethod = GetTickCount_TSC; 211 | 212 | void SetGetTickCountMethod(GetTickCountMethod ptr) 213 | { 214 | g_GetTickCountMethod = ptr; 215 | } 216 | 217 | uint64_t GetTickCount() 218 | { 219 | return g_GetTickCountMethod(); 220 | } 221 | 222 | } 223 | -------------------------------------------------------------------------------- /source/ax86_64/CPU.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // CPU.cpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the CPU object specific functions. 12 | // 13 | // *************************************************************** 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | extern Atomic g_CPUsInitialized; // Arch.cpp 20 | 21 | Spinlock g_CalibrateSpinlock; 22 | 23 | extern "C" void CPU_OnPageFault_Asm(); 24 | extern "C" void Arch_APIC_OnIPInterrupt_Asm(); 25 | extern "C" void Arch_APIC_OnTimerInterrupt_Asm(); 26 | extern "C" void CPU_OnPageFault(Registers* pRegs) 27 | { 28 | Arch::CPU::GetCurrent()->OnPageFault(pRegs); 29 | } 30 | 31 | void Arch::CPU::SetupGDTAndIDT() 32 | { 33 | // Re-load the GDT. 34 | LoadGDT(); 35 | 36 | // Setup the IDT.... 37 | SetInterruptGate(IDT::INT_PAGE_FAULT, uintptr_t(CPU_OnPageFault_Asm)); 38 | SetInterruptGate(IDT::INT_IPI, uintptr_t(Arch_APIC_OnIPInterrupt_Asm)); 39 | SetInterruptGate(IDT::INT_APIC_TIMER, uintptr_t(Arch_APIC_OnTimerInterrupt_Asm)); 40 | //SetInterruptGate(0, uintptr_t(Arch_APIC_OnTimerInterrupt_Asm)); 41 | 42 | // Load the IDT. 43 | LoadIDT(); 44 | } 45 | 46 | static Atomic g_bIsBSPInitted(false); 47 | 48 | void Arch::CPU::OnBSPInitialized() 49 | { 50 | g_bIsBSPInitted.Store(true); 51 | } 52 | 53 | void Arch::CPU::WaitForBSP() 54 | { 55 | while (!g_bIsBSPInitted.Load()) 56 | Spinlock::SpinHint(); 57 | } 58 | 59 | bool Arch::CPU::SetInterruptsEnabled(bool b) 60 | { 61 | if (GetCurrent() != this) 62 | { 63 | SLogMsg("Error: Arch::CPU::SetInterruptsEnabled can only be called on the same CPU it's modifying"); 64 | return false; 65 | } 66 | 67 | bool x = m_InterruptsEnabled; 68 | 69 | m_InterruptsEnabled = b; 70 | 71 | if (b) 72 | ASM("sti":::"memory"); 73 | else 74 | ASM("cli":::"memory"); 75 | 76 | return x; 77 | } 78 | 79 | void Arch::CPU::CalibrateTimer() 80 | { 81 | // Since the calibration operation needs to be performed carefully, we can't have 82 | // interrupts enabled, or anyone else messing with the PIT. Take ownership of the 83 | // PIT now. 84 | LockGuard lg(g_CalibrateSpinlock); 85 | 86 | APIC::CalibrateTimer(m_LapicTicksPerMS, m_TscTicksPerMS); 87 | } 88 | 89 | void Arch::CPU::Init() 90 | { 91 | bool bIsBSP = GetSMPResponse()->bsp_lapic_id == m_pSMPInfo->lapic_id; 92 | 93 | using namespace VMM; 94 | 95 | if (!bIsBSP) 96 | { 97 | WaitForBSP(); 98 | } 99 | 100 | // Allocate a small stack 101 | m_pIsrStack = EternalHeap::Allocate(C_INTERRUPT_STACK_SIZE); 102 | 103 | // Set it in the TSS 104 | m_gdt.m_tss.m_rsp[0] = m_gdt.m_tss.m_rsp[1] = m_gdt.m_tss.m_rsp[2] = uint64_t(m_pIsrStack); 105 | 106 | // Write the GS base MSR. 107 | WriteMSR(Arch::eMSR::KERNEL_GS_BASE, uint64_t(this)); 108 | 109 | SetupGDTAndIDT(); 110 | 111 | if (bIsBSP) 112 | { 113 | KernelHeap::Init(); 114 | } 115 | 116 | // Clone the page mapping and assign it to this CPU. This will 117 | // ditch the lower half mapping that the bootloader has provided us. 118 | m_pPageMap = PageMapping::GetFromCR3()->Clone(false); 119 | m_pPageMap->SwitchTo(); 120 | 121 | if (bIsBSP) 122 | { 123 | RSD::Load(); 124 | } 125 | 126 | // Initialize the APIC on this CPU. 127 | APIC::Init(); 128 | 129 | // Calibrate its timer. 130 | CalibrateTimer(); 131 | 132 | // The X will be replaced. 133 | LogMsg("Processor #%d has come online.", m_processorID); 134 | 135 | g_CPUsInitialized.FetchAdd(1); 136 | 137 | // Enable interrupts. 138 | SetInterruptsEnabled(true); 139 | 140 | // Initialize our scheduler. 141 | m_Scheduler.Init(); 142 | 143 | if (bIsBSP) 144 | { 145 | OnBSPInitialized(); 146 | } 147 | } 148 | 149 | Atomic g_CPUsReady; 150 | 151 | void Arch::CPU::Go() 152 | { 153 | auto* pResponse = GetSMPResponse(); 154 | 155 | int cpuCount = int(pResponse->cpu_count); 156 | 157 | // TODO: If the bootstrap processor, do some other stuff, like spawn an initial thread 158 | if (m_bIsBSP) 159 | { 160 | // for each CPU, check if the lapic ticks per MS is very close between CPUs 161 | int64_t LapicTicksPerMS_Avg = 0; 162 | int64_t TscTicksPerMS_Avg = 0; 163 | 164 | for (int i = 0; i < cpuCount; i++) 165 | { 166 | CPU *pCpu = (CPU*)(pResponse->cpus[i]->extra_argument); 167 | 168 | LapicTicksPerMS_Avg += pCpu->m_LapicTicksPerMS; 169 | TscTicksPerMS_Avg += pCpu->m_TscTicksPerMS; 170 | } 171 | 172 | LapicTicksPerMS_Avg /= cpuCount; 173 | TscTicksPerMS_Avg /= cpuCount; 174 | 175 | int64_t diffApic = LapicTicksPerMS_Avg - m_LapicTicksPerMS; 176 | int64_t diffTsc = LapicTicksPerMS_Avg - m_LapicTicksPerMS; 177 | 178 | // if it's within 100 ticks of tolerance... 179 | if (diffApic <= 1000 && diffApic >= -1000) 180 | { 181 | // just set the rest of the CPUs' LAPIC timer to the average; 182 | for (int i = 0; i < cpuCount; i++) 183 | { 184 | CPU *pCpu = (CPU*)(pResponse->cpus[i]->extra_argument); 185 | 186 | pCpu->m_LapicTicksPerMS = LapicTicksPerMS_Avg; 187 | } 188 | } 189 | // if it's within 100 ticks of tolerance... 190 | if (diffTsc <= 1000 && diffTsc >= -1000) 191 | { 192 | // just set the rest of the CPUs' LAPIC timer to the average; 193 | for (int i = 0; i < cpuCount; i++) 194 | { 195 | CPU *pCpu = (CPU*)(pResponse->cpus[i]->extra_argument); 196 | 197 | pCpu->m_TscTicksPerMS = TscTicksPerMS_Avg; 198 | } 199 | } 200 | 201 | for (int i = 0; i < cpuCount; i++) 202 | { 203 | CPU *pCpu = (CPU*)(pResponse->cpus[i]->extra_argument); 204 | 205 | LogMsg("CPU %d has APIC tick rate %lld, TSC tick rate %lld", i, pCpu->m_LapicTicksPerMS, pCpu->m_TscTicksPerMS); 206 | } 207 | 208 | LogMsg("I am the bootstrap processor, and I will soon spawn an initial task instead of printing this!"); 209 | 210 | // Since all other processors are running, try sending an IPI to processor 1. 211 | CPU* p1 = GetCPU(1); 212 | 213 | if (p1) 214 | p1->SendIPI(eIpiType::HELLO); 215 | 216 | //KernelPanic("Hello there %d", 1337); 217 | } 218 | 219 | g_CPUsReady.FetchAdd(1); 220 | 221 | while (g_CPUsReady.Load(ATOMIC_MEMORD_RELAXED) < cpuCount) 222 | Spinlock::SpinHint(); 223 | 224 | m_StartingTSC = TSC::Read(); 225 | 226 | Thread::Yield(); 227 | } 228 | 229 | Arch::CPU* Arch::CPU::GetCurrent() 230 | { 231 | return (CPU*)ReadMSR(Arch::eMSR::KERNEL_GS_BASE); 232 | } 233 | 234 | // Set the CPU's interrupt gate to the following handler. 235 | void Arch::CPU::SetInterruptGate(uint8_t intNum, uintptr_t fnHandler, uint8_t ist, uint8_t dpl) 236 | { 237 | m_idt.SetEntry(intNum, IDT::Entry(fnHandler, ist, dpl)); 238 | } 239 | 240 | void Arch::CPU::OnTimerIRQ(Registers* pRegs) 241 | { 242 | m_Scheduler.OnTimerIRQ(pRegs); 243 | } 244 | 245 | Atomic g_panickedCpus { 0 }; 246 | 247 | void Arch::CPU::OnIPI() 248 | { 249 | eIpiType type = m_ipiType; 250 | 251 | switch (type) 252 | { 253 | case eIpiType::NONE: break; 254 | case eIpiType::HELLO: 255 | { 256 | LogMsg("Got IPI! (I am processor %u)", m_processorID); 257 | break; 258 | } 259 | case eIpiType::PANIC: 260 | { 261 | SLogMsg("Processor %u got panic IPI from someone.", m_processorID); 262 | 263 | g_panickedCpus.FetchAdd(1); 264 | 265 | SetInterruptsEnabled(false); 266 | Arch::IdleLoop(); 267 | } 268 | } 269 | } 270 | -------------------------------------------------------------------------------- /source/ax86_64/GDT.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // GDT.cpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the GDT loader. 12 | // 13 | // *************************************************************** 14 | #include 15 | 16 | // This GDT will be cloned by every CPU, so that each CPU can have its own TSS. 17 | static UNUSED Arch::GDT g_InitialGDT = 18 | { 19 | { 20 | 0x0000000000000000, // Null descriptor 21 | 0x00009a000000ffff, // 16-bit code 22 | 0x000093000000ffff, // 16-bit data 23 | 0x00cf9a000000ffff, // 32-bit ring-0 code 24 | 0x00cf93000000ffff, // 32-bit ring-0 data 25 | 0x00af9b000000ffff, // 64-bit ring-0 code 26 | 0x00af93000000ffff, // 64-bit ring-0 data 27 | 0x00affb000000ffff, // 64-bit ring-3 code 28 | 0x00aff3000000ffff, // 64-bit ring-3 data 29 | }, 30 | { 31 | 0, // reserved0 32 | { 0, 0, 0 }, // RSP0-2 33 | 0, // reserved1 34 | { 0, 0, 0, 0, 0, 0, 0 }, // IST1-7 35 | 0, // reserved2 36 | 0, // reserved 3 37 | 0xFFFF, // I/O bitmap offset. Setting it to a value like this means that we do not have an IOBP. 38 | }, 39 | }; 40 | 41 | // This function is responsible for loading the GDT for this CPU. 42 | void Arch::CPU::LoadGDT() 43 | { 44 | // Setup the GDT. 45 | m_gdt = g_InitialGDT; 46 | 47 | // Setup a descriptor. 48 | struct 49 | { 50 | uint16_t m_gdtLimit; 51 | uint64_t m_gdtBase; 52 | } PACKED gdtr; 53 | 54 | gdtr.m_gdtLimit = sizeof(Arch::GDT) - 1; 55 | gdtr.m_gdtBase = uint64_t(&m_gdt); 56 | 57 | // Note: For now we do not need to reload segments such as CS and DS. 58 | // We will need to however, once we remove the 16- and 32-bit segments. 59 | ASM("lgdt %0"::"m"(gdtr)); 60 | } 61 | -------------------------------------------------------------------------------- /source/ax86_64/HPET.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // ax86_64/HPET.cpp - Creation date: 09/05/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2023 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the HPET timer. 12 | // 13 | // *************************************************************** 14 | 15 | // Note: Huge thanks to https://github.com/DeanoBurrito/northport 16 | 17 | #include 18 | 19 | using namespace Arch; 20 | 21 | struct HPETAddress 22 | { 23 | uint8_t m_AddressSpaceID; 24 | uint8_t m_RegisterBitWidth; 25 | uint8_t m_RegisterBitOffset; 26 | uint8_t m_Reserved; 27 | uint64_t m_Address; 28 | } 29 | PACKED; 30 | 31 | struct HPETTable 32 | { 33 | RSD::Table m_header; 34 | 35 | // HPET specific data. 36 | uint8_t m_HardwareRevID; 37 | 38 | uint8_t m_ComparatorCount : 5; 39 | uint8_t m_CounterSize : 1; 40 | uint8_t m_Reserved0 : 1; 41 | uint8_t m_LegacyReplacement : 1; 42 | 43 | uint16_t m_PCIVendorID; 44 | 45 | HPETAddress m_Address; 46 | 47 | uint8_t m_HPETNumber; 48 | uint16_t m_MinimumTick; 49 | uint8_t m_PageProtection; 50 | } 51 | PACKED; 52 | 53 | union HPETGeneralCaps 54 | { 55 | struct 56 | { 57 | unsigned m_RevID : 8; 58 | unsigned m_NumTimCap : 5; 59 | unsigned m_CountSizeCap : 1; 60 | unsigned m_Reserved : 1; 61 | unsigned m_LegRouteCap : 1; 62 | unsigned m_VendorID : 16; 63 | unsigned m_CounterClockPeriod : 32; 64 | } 65 | PACKED; 66 | 67 | uint64_t m_Contents; 68 | }; 69 | 70 | struct HPETTimerInfo 71 | { 72 | uint64_t m_ConfigAndCaps; 73 | uint64_t m_ComparatorValue; 74 | uint64_t m_FSBInterruptRoute; 75 | uint64_t m_reserved; 76 | }; 77 | 78 | struct HPETRegisters 79 | { 80 | uint64_t m_GeneralCapsRegister; 81 | uint64_t m_reserved8; 82 | uint64_t m_GeneralConfig; 83 | uint64_t m_reserved18; 84 | uint64_t m_GeneralIrqStatus; 85 | uint64_t m_reserved28; 86 | uint64_t m_reservedArr[24]; 87 | uint64_t m_CounterValue; 88 | uint64_t m_reservedF8; 89 | HPETTimerInfo m_timers[32]; 90 | }; 91 | 92 | constexpr uint64_t C_HPET_GEN_CFG_ENABLE_CNF = BIT(0); // Overall Enable. This bit MUST be set to allow any of the timers to generate interrupts and increment the main counter. 93 | constexpr uint64_t C_HPET_GEN_CFG_LEG_RT_CNF = BIT(1); // supports legacy replacement route 94 | 95 | HPETTable g_HpetTable; 96 | volatile HPETRegisters* g_pHpetRegisters; 97 | HPETGeneralCaps g_HpetGeneralCaps; 98 | 99 | // A nanosecond is 10^6, or 1'000'000, femtoseconds 100 | constexpr uint64_t C_FEMTOS_TO_NANOS = 1'000'000; 101 | constexpr uint64_t C_HPET_MAX_PERIOD = 100'000'000; // Note: The spec mandates 05F5E100h (which looks like a random magic value but it's just this) 102 | constexpr uint64_t C_HPET_MIN_PERIOD = 100'000; // 1/10 of a nanosecond. Really doubt any kind of timer runs that fast 103 | 104 | uint64_t HPET::GetRawTickCount() 105 | { 106 | // note: this contacts the system bus, so it's not as fast as the TSC. 107 | return g_pHpetRegisters->m_CounterValue; 108 | } 109 | 110 | uint64_t HPET::GetCounterClockPeriod() 111 | { 112 | return g_HpetGeneralCaps.m_CounterClockPeriod; 113 | } 114 | 115 | uint64_t HPET::GetTickCount() 116 | { 117 | // note: constant divide should be optimized to a fixed point multiply combo 118 | return uint64_t(g_pHpetRegisters->m_CounterValue) * g_HpetGeneralCaps.m_CounterClockPeriod / C_FEMTOS_TO_NANOS; 119 | } 120 | 121 | void HPET::PolledSleep(uint64_t nanoseconds) 122 | { 123 | uint64_t time = nanoseconds * C_FEMTOS_TO_NANOS / g_HpetGeneralCaps.m_CounterClockPeriod; 124 | 125 | uint64_t current_time = GetRawTickCount(); 126 | uint64_t target = current_time + time; 127 | 128 | while (GetRawTickCount() < target) 129 | Spinlock::SpinHint(); 130 | } 131 | 132 | void HPET::Found(RSD::Table* pTable) 133 | { 134 | memcpy(&g_HpetTable, pTable, sizeof g_HpetTable); 135 | 136 | using namespace VMM; 137 | 138 | // map it in. 139 | PageMapping* pPM = PageMapping::GetFromCR3(); 140 | 141 | pPM->MapPage(C_HPET_MAP_ADDRESS, PageEntry(g_HpetTable.m_Address.m_Address, PE_PRESENT | PE_READWRITE | PE_SUPERVISOR | PE_EXECUTEDISABLE | PE_CACHEDISABLE)); 142 | 143 | g_pHpetRegisters = (HPETRegisters*)C_HPET_MAP_ADDRESS; 144 | 145 | g_HpetGeneralCaps.m_Contents = g_pHpetRegisters->m_GeneralCapsRegister; 146 | 147 | // Read and dump the general caps and ID registers. 148 | LogMsg("HPET Capabilities: %Q vendor ID: %W", g_HpetGeneralCaps.m_Contents, g_HpetGeneralCaps.m_VendorID); 149 | LogMsg("Counter clock period: %d femtoseconds per tick (%d nanoseconds)", g_HpetGeneralCaps.m_CounterClockPeriod, g_HpetGeneralCaps.m_CounterClockPeriod / C_FEMTOS_TO_NANOS); 150 | 151 | if (g_HpetGeneralCaps.m_CounterClockPeriod > C_HPET_MAX_PERIOD) 152 | { 153 | LogMsg("WARNING: HPET counter clock period is %d, bigger than 100 nanoseconds. The spec doesn't allow that!", g_HpetGeneralCaps.m_CounterClockPeriod); 154 | } 155 | if (g_HpetGeneralCaps.m_CounterClockPeriod < C_HPET_MIN_PERIOD) 156 | { 157 | LogMsg("WARNING: HPET counter clock period is %d, smaller than 1/10 nanoseconds. You may be seeing issues caused by timer overflow.", g_HpetGeneralCaps.m_CounterClockPeriod); 158 | } 159 | 160 | // eh, but we will use the HPET to calibrate the LAPIC timer anyways 161 | if (g_HpetGeneralCaps.m_CountSizeCap != 1) 162 | { 163 | LogMsg("WARNING: HPET cannot operate in 64-bit mode. We cannot handle overflow well..."); 164 | } 165 | 166 | // Enable and reset the main counter. 167 | g_pHpetRegisters->m_GeneralConfig = 0; 168 | g_pHpetRegisters->m_CounterValue = 0; 169 | g_pHpetRegisters->m_GeneralConfig = C_HPET_GEN_CFG_ENABLE_CNF; 170 | 171 | LogMsg("Counter Config: %lld", g_pHpetRegisters->m_GeneralConfig); 172 | 173 | uint64_t lastValue = 0; 174 | 175 | for (int i = 0; i < 20; i++) 176 | { 177 | uint64_t value = g_pHpetRegisters->m_CounterValue; 178 | 179 | if (value <= lastValue) 180 | { 181 | LogMsg("FATAL ERROR: value %llu smaller than lastValue %llu. Cannot continue.", value, lastValue); 182 | return; 183 | } 184 | 185 | PIT::PolledSleep(1*1000*1000); 186 | } 187 | 188 | APIC::SetPolledSleepFunc(HPET::PolledSleep); 189 | } 190 | 191 | -------------------------------------------------------------------------------- /source/ax86_64/IDT.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // IDT.cpp - Creation date: 05/01/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the IDT and its loader. 12 | // 13 | // *************************************************************** 14 | #include 15 | 16 | void Arch::CPU::ClearIDT() 17 | { 18 | memset(&m_idt, 0, sizeof m_idt); 19 | } 20 | 21 | void Arch::CPU::LoadIDT() 22 | { 23 | struct 24 | { 25 | uint16_t m_idtLimit; 26 | uint64_t m_idtBase; 27 | } 28 | PACKED idtr; 29 | 30 | idtr.m_idtLimit = sizeof m_idt - 1; 31 | idtr.m_idtBase = uint64_t(&m_idt); 32 | 33 | ASM("lidt %0"::"m"(idtr)); 34 | } 35 | 36 | 37 | -------------------------------------------------------------------------------- /source/ax86_64/Misc.asm: -------------------------------------------------------------------------------- 1 | ; *************************************************************** 2 | ; Misc.asm - Creation date: 06/01/2023 3 | ; ------------------------------------------------------------- 4 | ; NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | ; 6 | ; *************************************************************** 7 | ; Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | ; *************************************************************** 9 | ; 10 | ; Module description: 11 | ; This module implements interrupt routines and other stuff 12 | ; that's hard to do within the C++ environment. 13 | ; 14 | ; *************************************************************** 15 | 16 | bits 64 17 | 18 | global Arch_APIC_OnTimerInterrupt_Asm 19 | extern Arch_APIC_OnTimerInterrupt 20 | global Arch_APIC_OnIPInterrupt_Asm 21 | extern Arch_APIC_OnIPInterrupt 22 | global Arch_APIC_OnSpInterrupt_Asm 23 | extern Arch_APIC_OnSpInterrupt 24 | 25 | global CPU_OnPageFault_Asm 26 | extern CPU_OnPageFault 27 | 28 | %macro PUSH_ALL 0 29 | push rax 30 | push rbx 31 | push rcx 32 | push rdx 33 | push rsi 34 | push rdi 35 | push rbp 36 | push r8 37 | push r9 38 | push r10 39 | push r11 40 | push r12 41 | push r13 42 | push r14 43 | push r15 44 | mov rax, cr2 45 | push rax 46 | mov ax, gs 47 | push ax 48 | mov ax, fs 49 | push ax 50 | mov ax, es 51 | push ax 52 | %endmacro 53 | 54 | %macro POP_ALL 0 55 | pop ax 56 | mov ax, es 57 | pop ax 58 | mov ax, fs 59 | pop ax 60 | mov ax, gs 61 | pop rax ; we don't need to uselessly pop cr2. 62 | pop r15 63 | pop r14 64 | pop r13 65 | pop r12 66 | pop r11 67 | pop r10 68 | pop r9 69 | pop r8 70 | pop rbp 71 | pop rdi 72 | pop rsi 73 | pop rdx 74 | pop rcx 75 | pop rbx 76 | pop rax 77 | add rsp, 8 ; we're supposed to add 8 to rsp anyways. 78 | %endmacro 79 | 80 | %macro PUSH_ALL_NO_ERC 0 81 | push 0 ; push a fake error code 82 | PUSH_ALL 83 | %endmacro 84 | 85 | ; Swaps GS if needed, pushes DS. 86 | %macro SWAP_GS_IF_NEEDED 0 87 | ; swap gs if needed 88 | mov ax, ds 89 | push ax 90 | cmp ax, 0x40 91 | jne .noneedtoswap 92 | swapgs 93 | .noneedtoswap: 94 | %endmacro 95 | 96 | ; Swaps GS back if needed, pops DS. 97 | %macro SWAP_GS_BACK_IF_NEEDED 0 98 | pop ax 99 | mov ds, ax 100 | cmp ax, 0x40 101 | jne .noneedtoswap2 102 | swapgs 103 | .noneedtoswap2: 104 | %endmacro 105 | 106 | CPU_OnPageFault_Asm: 107 | PUSH_ALL 108 | SWAP_GS_IF_NEEDED 109 | 110 | mov rdi, rsp 111 | call CPU_OnPageFault 112 | 113 | SWAP_GS_BACK_IF_NEEDED 114 | POP_ALL 115 | iretq 116 | 117 | ; Implements the assembly stub which calls into the C function, which then calls into the C++ function. 118 | Arch_APIC_OnIPInterrupt_Asm: 119 | PUSH_ALL_NO_ERC 120 | SWAP_GS_IF_NEEDED 121 | 122 | mov rdi, rsp 123 | call Arch_APIC_OnIPInterrupt 124 | 125 | SWAP_GS_BACK_IF_NEEDED 126 | POP_ALL 127 | iretq 128 | 129 | ; Implements the assembly stub which calls into the C function, which then calls into the C++ function. 130 | Arch_APIC_OnTimerInterrupt_Asm: 131 | PUSH_ALL_NO_ERC 132 | SWAP_GS_IF_NEEDED 133 | 134 | mov rdi, rsp 135 | call Arch_APIC_OnTimerInterrupt 136 | 137 | SWAP_GS_BACK_IF_NEEDED 138 | POP_ALL 139 | iretq 140 | 141 | ; it's probably fine if we get a spurious interrupt. We don't really need to handle it 142 | Arch_APIC_OnSpInterrupt_Asm: 143 | PUSH_ALL_NO_ERC 144 | SWAP_GS_IF_NEEDED 145 | 146 | mov rdi, rsp 147 | call Arch_APIC_OnSpInterrupt 148 | 149 | SWAP_GS_BACK_IF_NEEDED 150 | POP_ALL 151 | iretq 152 | 153 | ; definition: NO_RETURN void JumpThreadEC( &ThreadExecutionContext ); 154 | global JumpThreadEC 155 | JumpThreadEC: 156 | mov rsp, rdi ; argument #1 157 | mov rax, rsi ; argument #2. This will make SetThreadEC return this value a second time, like setjmp() would. 158 | or rax, rax 159 | ; failsafe to make sure we aren't just passing 0 160 | jnz .not_zero 161 | inc rax 162 | .not_zero: 163 | pop rbp 164 | pop rbx 165 | pop r12 166 | pop r13 167 | pop r14 168 | pop r15 169 | iretq 170 | 171 | ; definition: NO_RETURN void JumpThreadEC2( &ThreadExecutionContext, &ThreadAdditionalRegisters ); 172 | global JumpThreadEC2 173 | JumpThreadEC2: 174 | ; mov the execution context into RBX, we'll need it later, and RBX is a free reg which we don't restore from the AR 175 | mov rbx, rdi 176 | ; pop the additional registers 177 | mov rsp, rsi 178 | pop rax 179 | pop rcx 180 | pop rdx 181 | pop rsi 182 | pop rdi 183 | pop r8 184 | pop r9 185 | pop r10 186 | pop r11 187 | pop r12 188 | mov ds, r12 189 | pop r12 190 | mov es, r12 191 | pop r12 192 | mov fs, r12 193 | pop r12 194 | mov gs, r12 195 | ; now pop the execution context 196 | mov rsp, rbx 197 | pop rbp 198 | pop rbx 199 | pop r12 200 | pop r13 201 | pop r14 202 | pop r15 203 | iretq 204 | 205 | ; definition: RETURNS_TWICE uint64_t SetThreadEC( &ThreadExecutionContext ); 206 | global SetThreadEC 207 | SetThreadEC: 208 | mov [rdi], rbp ; preserve the registers that the 209 | mov [rdi + 8], rbx 210 | mov [rdi + 16], r12 211 | mov [rdi + 24], r13 212 | mov [rdi + 32], r14 213 | mov [rdi + 40], r15 214 | mov rax, [rsp] ; get EIP after our call insn (return address) 215 | mov [rdi + 48], rax ; rip field of TEC 216 | mov rax, cs ; get the code segment 217 | mov [rdi + 56], rax ; cs field of TEC 218 | pushfq ; get the flags register 219 | mov rax, [rsp] ; get it into rax 220 | add rsp, 8 ; undo pushfq's effects 221 | mov [rdi + 64], rax ; rflags field of TEC 222 | mov rax, rsp ; get the rsp into eax 223 | add rax, 8 ; we need to do this, since we don't want to go to our 224 | ; return address and have the context of this function call... 225 | mov [rdi + 72], rax ; rsp field of the TEC 226 | mov rax, ss ; get the stack segment 227 | mov [rdi + 80], rax ; ss field of TEC 228 | xor rax, rax ; clear RAX for now. When a second return happens, 229 | ; JumpToThreadEC will set rax to something differe 230 | ret ; our job here is done. 231 | -------------------------------------------------------------------------------- /source/ax86_64/PIT.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // APIC.cpp - Creation date: 04/05/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2022 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements a small driver for the PIT, to allow 12 | // the calibration of the LAPIC timer. 13 | // 14 | // *************************************************************** 15 | #include 16 | #include 17 | 18 | constexpr size_t C_PIT_PERIOD = 838; // 1 second / 1193182 hz = ~838.09 nanoseconds. 19 | constexpr uint16_t C_PIT_DATA_PORT = 0x40; // Channel 0. 20 | constexpr uint16_t C_PIT_CMD_PORT = 0x43; 21 | 22 | using namespace Arch; 23 | 24 | uint16_t PIT::Read() 25 | { 26 | uint16_t data = ReadByte(C_PIT_DATA_PORT); 27 | 28 | data |= (ReadByte(C_PIT_DATA_PORT) << 8); 29 | 30 | return data; 31 | } 32 | 33 | void PIT::PolledSleep(uint64_t ns) 34 | { 35 | uint64_t periods = ns / C_PIT_PERIOD; 36 | 37 | if (periods >= 0xFFF0) 38 | { 39 | // can't do! 40 | SLogMsg("Error: PIT::Sleep(%llu) is not possible", ns); 41 | return; 42 | } 43 | 44 | // calculate our target 45 | uint16_t target = 0xFFFF - uint16_t(periods); 46 | 47 | // reset the PIT counter to max: 48 | WriteByte(C_PIT_CMD_PORT, 0x34); 49 | WriteByte(C_PIT_DATA_PORT, 0xFF); 50 | WriteByte(C_PIT_DATA_PORT, 0xFF); 51 | 52 | // wait until the PIT hits our target 53 | while (Read() > target) 54 | Spinlock::SpinHint(); 55 | } 56 | -------------------------------------------------------------------------------- /source/ax86_64/RSD.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // ax86_64/RSD.cpp - Creation date: 08/05/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2023 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements the code that allows for the usage 12 | // of the RSDP table. 13 | // 14 | // *************************************************************** 15 | #include 16 | 17 | using namespace Arch; 18 | 19 | KList g_RSDTables; 20 | 21 | volatile limine_rsdp_request g_RSDPRequest = 22 | { 23 | .id = LIMINE_RSDP_REQUEST, 24 | .revision = 0, 25 | .response = NULL, 26 | }; 27 | 28 | void RSD::LoadTable(RSD::Table* pTable) 29 | { 30 | int count = pTable->GetSubSDTCount(); 31 | 32 | // add it to the list of RSD tables: 33 | 34 | for (int i = 0; i < count; i++) 35 | { 36 | uintptr_t addr = pTable->m_SubSDTs[i]; 37 | 38 | Table* pItem = (Table*)(Arch::GetHHDMOffset() + addr); 39 | g_RSDTables.AddBack(pItem); 40 | } 41 | } 42 | 43 | void RSD::Load() 44 | { 45 | Descriptor* pDesc = (Descriptor*)g_RSDPRequest.response->address; 46 | 47 | uintptr_t rsdtAddr = pDesc->GetRSDTAddress(); 48 | 49 | Table* pTable = (Table*)(Arch::GetHHDMOffset() + rsdtAddr); 50 | 51 | LoadTable(pTable); 52 | 53 | for (auto it = g_RSDTables.Begin(); it.Valid(); it++) 54 | { 55 | char thing[5]; 56 | thing[4] = 0; 57 | 58 | Table* pTable = *it; 59 | memcpy(thing, pTable->m_Signature, 4); 60 | 61 | LogMsg("RSD entry: %s", thing); 62 | 63 | if (strcmp(thing, "HPET") == 0) 64 | HPET::Found(pTable); 65 | } 66 | } 67 | 68 | 69 | -------------------------------------------------------------------------------- /source/ax86_64/TSC.cpp: -------------------------------------------------------------------------------- 1 | // *************************************************************** 2 | // TSC.cpp - Creation date: 13/05/2023 3 | // ------------------------------------------------------------- 4 | // NanoShell64 Copyright (C) 2023 - Licensed under GPL V3 5 | // 6 | // *************************************************************** 7 | // Programmer(s): iProgramInCpp (iprogramincpp@gmail.com) 8 | // *************************************************************** 9 | // 10 | // Module description: 11 | // This module implements a small driver for the TSC. 12 | // 13 | // *************************************************************** 14 | #include 15 | 16 | using namespace Arch; 17 | 18 | uint64_t TSC::Read() 19 | { 20 | uint64_t low, high; 21 | 22 | // note: The rdtsc instruction is specified to zero out the top 32 bits of rax and rdx. 23 | asm("rdtsc":"=a"(low), "=d"(high)); 24 | 25 | // So something like this is fine. 26 | return high << 32 | low; 27 | } 28 | -------------------------------------------------------------------------------- /test.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | :a 3 | "C:\Program Files\Oracle\VirtualBox\VBoxManage.exe" debugvm "64-bit Misc" getregisters --cpu=2 rip 4 | goto a 5 | --------------------------------------------------------------------------------