├── .clang-format ├── .gitignore ├── .trunk ├── .gitignore ├── configs │ ├── .clang-tidy │ ├── .markdownlint.yaml │ ├── .shellcheckrc │ └── .yamllint.yaml └── trunk.yaml ├── .vscode ├── c_cpp_properties.json ├── launch.json ├── settings.json └── tasks.json ├── CMakeLists.txt ├── CONTRIBUTING.md ├── SETUP.md ├── arch └── x86-64 │ ├── asm.h │ ├── build.cmake │ ├── entry.asm │ ├── init.cc │ ├── interface.h │ ├── interrupts.cc │ ├── linker.ld │ ├── mmu.cc │ ├── private.h │ ├── thread.cc │ ├── thread.h │ └── types.h ├── core ├── assert.cc ├── build.cmake ├── include │ ├── arch │ │ ├── interface.h │ │ └── types.h │ ├── core │ │ ├── mem.h │ │ ├── mp.h │ │ ├── mutex.h │ │ ├── slab.h │ │ ├── spinlock.h │ │ ├── thread.h │ │ ├── vm.h │ │ └── vmregion.h │ ├── loaderabi.h │ └── platform │ │ └── interface.h ├── main.cc ├── mp.cc ├── mutex.cc ├── pmm.cc ├── sched.cc ├── slab.cc ├── spinlock.cc ├── thread.cc └── vmregion.cc ├── lib ├── ebl │ ├── assert.h │ ├── atomic.h │ ├── bit.h │ ├── cxxabi.cc │ ├── guard.h │ ├── linked_list.h │ ├── memory.h │ ├── new.cc │ ├── new.h │ ├── source_location.h │ ├── status.h │ ├── stdio.cc │ ├── stdio.h │ ├── string.cc │ ├── string.h │ ├── thread_safety.h │ ├── type_traits.h │ └── util.h └── elf │ ├── elf.h │ ├── elf32_parser.cc │ ├── elf64_parser.cc │ ├── parser.cc │ └── sysv_elf_abi.h ├── loader └── x86-64-multiboot2 │ ├── build.cmake │ ├── linker.ld │ ├── loader.h │ ├── main.cc │ ├── mem.cc │ ├── multiboot2.cc │ ├── multiboot2.h │ └── start.asm ├── platform └── pc │ ├── build.cmake │ ├── console.cc │ ├── init.cc │ └── interface.h ├── scripts ├── bochsrc.bxrc ├── bx_enh_dbg.ini ├── create_gdbinit.sh ├── grub.cfg ├── grub.cmake ├── launch_qemu.sh ├── make_grub_image.sh └── toolchain.cmake └── tests ├── assert.cc ├── build.cmake ├── ebl_bitfield.cc ├── ebl_linkedlist.cc ├── ebl_main.cc └── kernel_vmm.cc /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | BasedOnStyle: Google 3 | AccessModifierOffset: -3 4 | AlignAfterOpenBracket: Align 5 | AlignArrayOfStructures: None 6 | AlignConsecutiveAssignments: 7 | Enabled: false 8 | AcrossEmptyLines: false 9 | AcrossComments: false 10 | AlignCompound: false 11 | PadOperators: true 12 | AlignConsecutiveBitFields: 13 | Enabled: false 14 | AcrossEmptyLines: false 15 | AcrossComments: false 16 | AlignCompound: false 17 | PadOperators: false 18 | AlignConsecutiveDeclarations: 19 | Enabled: false 20 | AcrossEmptyLines: false 21 | AcrossComments: false 22 | AlignCompound: false 23 | PadOperators: false 24 | AlignConsecutiveMacros: 25 | Enabled: false 26 | AcrossEmptyLines: false 27 | AcrossComments: false 28 | AlignCompound: false 29 | PadOperators: false 30 | AlignEscapedNewlines: Left 31 | AlignOperands: Align 32 | AlignTrailingComments: 33 | Kind: Always 34 | OverEmptyLines: 0 35 | AllowAllArgumentsOnNextLine: true 36 | AllowAllParametersOfDeclarationOnNextLine: true 37 | AllowShortBlocksOnASingleLine: Never 38 | AllowShortCaseLabelsOnASingleLine: false 39 | AllowShortEnumsOnASingleLine: true 40 | AllowShortFunctionsOnASingleLine: All 41 | AllowShortIfStatementsOnASingleLine: WithoutElse 42 | AllowShortLambdasOnASingleLine: All 43 | AllowShortLoopsOnASingleLine: true 44 | AlwaysBreakAfterDefinitionReturnType: None 45 | AlwaysBreakAfterReturnType: None 46 | AlwaysBreakBeforeMultilineStrings: true 47 | AlwaysBreakTemplateDeclarations: Yes 48 | AttributeMacros: 49 | - __capability 50 | BinPackArguments: false 51 | BinPackParameters: false 52 | BitFieldColonSpacing: Both 53 | BraceWrapping: 54 | AfterCaseLabel: false 55 | AfterClass: false 56 | AfterControlStatement: Never 57 | AfterEnum: false 58 | AfterFunction: false 59 | AfterNamespace: false 60 | AfterObjCDeclaration: false 61 | AfterStruct: false 62 | AfterUnion: false 63 | AfterExternBlock: false 64 | BeforeCatch: false 65 | BeforeElse: false 66 | BeforeLambdaBody: false 67 | BeforeWhile: false 68 | IndentBraces: false 69 | SplitEmptyFunction: true 70 | SplitEmptyRecord: true 71 | SplitEmptyNamespace: true 72 | BreakAfterAttributes: Never 73 | BreakAfterJavaFieldAnnotations: false 74 | BreakArrays: true 75 | BreakBeforeBinaryOperators: None 76 | BreakBeforeBraces: Attach 77 | BreakBeforeConceptDeclarations: Always 78 | BreakBeforeInlineASMColon: OnlyMultiline 79 | BreakBeforeTernaryOperators: true 80 | BreakConstructorInitializers: BeforeColon 81 | BreakInheritanceList: BeforeColon 82 | BreakStringLiterals: true 83 | ColumnLimit: 100 84 | CommentPragmas: "^ IWYU pragma:" 85 | CompactNamespaces: false 86 | ConstructorInitializerIndentWidth: 6 87 | ContinuationIndentWidth: 6 88 | Cpp11BracedListStyle: true 89 | DerivePointerAlignment: true 90 | DisableFormat: false 91 | EmptyLineAfterAccessModifier: Never 92 | EmptyLineBeforeAccessModifier: LogicalBlock 93 | ExperimentalAutoDetectBinPacking: false 94 | FixNamespaceComments: true 95 | ForEachMacros: 96 | - foreach 97 | - Q_FOREACH 98 | - BOOST_FOREACH 99 | IfMacros: 100 | - KJ_IF_MAYBE 101 | IncludeBlocks: Regroup 102 | IncludeCategories: 103 | - Regex: ^ 104 | Priority: 2 105 | SortPriority: 0 106 | CaseSensitive: false 107 | - Regex: ^<.*\.h> 108 | Priority: 1 109 | SortPriority: 0 110 | CaseSensitive: false 111 | - Regex: ^<.* 112 | Priority: 2 113 | SortPriority: 0 114 | CaseSensitive: false 115 | - Regex: .* 116 | Priority: 3 117 | SortPriority: 0 118 | CaseSensitive: false 119 | IncludeIsMainRegex: ([-_](test|unittest))?$ 120 | IncludeIsMainSourceRegex: "" 121 | IndentAccessModifiers: false 122 | IndentCaseBlocks: false 123 | IndentCaseLabels: true 124 | IndentExternBlock: AfterExternBlock 125 | IndentGotoLabels: true 126 | IndentPPDirectives: None 127 | IndentRequiresClause: true 128 | IndentWidth: 3 129 | IndentWrappedFunctionNames: false 130 | InsertBraces: false 131 | InsertNewlineAtEOF: false 132 | InsertTrailingCommas: None 133 | IntegerLiteralSeparator: 134 | Binary: 0 135 | BinaryMinDigits: 0 136 | Decimal: 0 137 | DecimalMinDigits: 0 138 | Hex: 0 139 | HexMinDigits: 0 140 | JavaScriptQuotes: Leave 141 | JavaScriptWrapImports: true 142 | KeepEmptyLinesAtTheStartOfBlocks: false 143 | LambdaBodyIndentation: Signature 144 | Language: Cpp 145 | LineEnding: DeriveLF 146 | MacroBlockBegin: "" 147 | MacroBlockEnd: "" 148 | MaxEmptyLinesToKeep: 1 149 | NamespaceIndentation: All 150 | ObjCBinPackProtocolList: Never 151 | ObjCBlockIndentWidth: 2 152 | ObjCBreakBeforeNestedBlockParam: true 153 | ObjCSpaceAfterProperty: false 154 | ObjCSpaceBeforeProtocolList: true 155 | PPIndentWidth: -1 156 | PackConstructorInitializers: NextLine 157 | PenaltyBreakAssignment: 2 158 | PenaltyBreakBeforeFirstCallParameter: 1 159 | PenaltyBreakComment: 300 160 | PenaltyBreakFirstLessLess: 120 161 | PenaltyBreakOpenParenthesis: 0 162 | PenaltyBreakString: 1000 163 | PenaltyBreakTemplateDeclaration: 10 164 | PenaltyExcessCharacter: 1000000 165 | PenaltyIndentedWhitespace: 0 166 | PenaltyReturnTypeOnItsOwnLine: 200 167 | PointerAlignment: Left 168 | QualifierAlignment: Leave 169 | RawStringFormats: 170 | - Language: Cpp 171 | Delimiters: 172 | - cc 173 | - CC 174 | - cpp 175 | - Cpp 176 | - CPP 177 | - c++ 178 | - C++ 179 | CanonicalDelimiter: "" 180 | BasedOnStyle: google 181 | - Language: TextProto 182 | Delimiters: 183 | - pb 184 | - PB 185 | - proto 186 | - PROTO 187 | EnclosingFunctions: 188 | - EqualsProto 189 | - EquivToProto 190 | - PARSE_PARTIAL_TEXT_PROTO 191 | - PARSE_TEST_PROTO 192 | - PARSE_TEXT_PROTO 193 | - ParseTextOrDie 194 | - ParseTextProtoOrDie 195 | - ParseTestProto 196 | - ParsePartialTestProto 197 | CanonicalDelimiter: pb 198 | BasedOnStyle: google 199 | ReferenceAlignment: Pointer 200 | ReflowComments: true 201 | RemoveBracesLLVM: false 202 | RemoveSemicolon: false 203 | RequiresClausePosition: OwnLine 204 | RequiresExpressionIndentation: OuterScope 205 | SeparateDefinitionBlocks: Leave 206 | ShortNamespaceLines: 1 207 | SortIncludes: CaseSensitive 208 | SortJavaStaticImport: Before 209 | SortUsingDeclarations: LexicographicNumeric 210 | SpaceAfterCStyleCast: false 211 | SpaceAfterLogicalNot: false 212 | SpaceAfterTemplateKeyword: true 213 | SpaceAroundPointerQualifiers: Default 214 | SpaceBeforeAssignmentOperators: true 215 | SpaceBeforeCaseColon: false 216 | SpaceBeforeCpp11BracedList: false 217 | SpaceBeforeCtorInitializerColon: true 218 | SpaceBeforeInheritanceColon: true 219 | SpaceBeforeParens: Never 220 | SpaceBeforeParensOptions: 221 | AfterControlStatements: true 222 | AfterForeachMacros: true 223 | AfterFunctionDeclarationName: false 224 | AfterFunctionDefinitionName: false 225 | AfterIfMacros: true 226 | AfterOverloadedOperator: false 227 | AfterRequiresInClause: false 228 | AfterRequiresInExpression: false 229 | BeforeNonEmptyParentheses: false 230 | SpaceBeforeRangeBasedForLoopColon: true 231 | SpaceBeforeSquareBrackets: false 232 | SpaceInEmptyBlock: false 233 | SpaceInEmptyParentheses: false 234 | SpacesBeforeTrailingComments: 1 235 | SpacesInAngles: Never 236 | SpacesInCStyleCastParentheses: false 237 | SpacesInConditionalStatement: false 238 | SpacesInContainerLiterals: true 239 | SpacesInLineCommentPrefix: 240 | Minimum: 1 241 | Maximum: -1 242 | SpacesInParentheses: false 243 | SpacesInSquareBrackets: false 244 | Standard: c++20 245 | StatementAttributeLikeMacros: 246 | - Q_EMIT 247 | StatementMacros: 248 | - Q_UNUSED 249 | - QT_REQUIRE_VERSION 250 | TabWidth: 8 251 | UseTab: Never 252 | WhitespaceSensitiveMacros: 253 | - BOOST_PP_STRINGIZE 254 | - CF_SWIFT_NAME 255 | - NS_SWIFT_NAME 256 | - PP_STRINGIZE 257 | - STRINGIZE 258 | UseCRLF: false 259 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | .cache/ 3 | -------------------------------------------------------------------------------- /.trunk/.gitignore: -------------------------------------------------------------------------------- 1 | *out 2 | *logs 3 | *actions 4 | *notifications 5 | *tools 6 | plugins 7 | user_trunk.yaml 8 | user.yaml 9 | -------------------------------------------------------------------------------- /.trunk/configs/.clang-tidy: -------------------------------------------------------------------------------- 1 | Checks: >- 2 | bugprone-*, 3 | cppcoreguidelines-*, 4 | google-*, 5 | misc-*, 6 | modernize-*, 7 | performance-*, 8 | readability-*, 9 | -bugprone-lambda-function-name, 10 | -bugprone-reserved-identifier, 11 | -cppcoreguidelines-avoid-goto, 12 | -cppcoreguidelines-avoid-magic-numbers, 13 | -cppcoreguidelines-avoid-non-const-global-variables, 14 | -cppcoreguidelines-pro-bounds-array-to-pointer-decay, 15 | -cppcoreguidelines-pro-type-vararg, 16 | -google-readability-braces-around-statements, 17 | -google-readability-function-size, 18 | -misc-no-recursion, 19 | -modernize-return-braced-init-list, 20 | -modernize-use-nodiscard, 21 | -modernize-use-trailing-return-type, 22 | -performance-unnecessary-value-param, 23 | -readability-magic-numbers, 24 | 25 | CheckOptions: 26 | - key: readability-function-cognitive-complexity.Threshold 27 | value: 100 28 | - key: readability-function-cognitive-complexity.IgnoreMacros 29 | value: true 30 | # Set naming conventions for your style below (there are dozens of naming settings possible): 31 | # See https://clang.llvm.org/extra/clang-tidy/checks/readability/identifier-naming.html 32 | # - key: readability-identifier-naming.ClassCase 33 | # value: CamelCase 34 | # - key: readability-identifier-naming.NamespaceCase 35 | # value: lower_case 36 | # - key: readability-identifier-naming.PrivateMemberSuffix 37 | # value: _ 38 | # - key: readability-identifier-naming.StructCase 39 | # value: CamelCase 40 | -------------------------------------------------------------------------------- /.trunk/configs/.markdownlint.yaml: -------------------------------------------------------------------------------- 1 | # Autoformatter friendly markdownlint config (all formatting rules disabled) 2 | default: true 3 | blank_lines: false 4 | bullet: false 5 | html: false 6 | indentation: false 7 | line_length: false 8 | spaces: false 9 | url: false 10 | whitespace: false 11 | -------------------------------------------------------------------------------- /.trunk/configs/.shellcheckrc: -------------------------------------------------------------------------------- 1 | enable=all 2 | source-path=SCRIPTDIR 3 | disable=SC2154 4 | 5 | # If you're having issues with shellcheck following source, disable the errors via: 6 | # disable=SC1090 7 | # disable=SC1091 8 | -------------------------------------------------------------------------------- /.trunk/configs/.yamllint.yaml: -------------------------------------------------------------------------------- 1 | rules: 2 | quoted-strings: 3 | required: only-when-needed 4 | extra-allowed: ["{|}"] 5 | empty-values: 6 | forbid-in-block-mappings: true 7 | forbid-in-flow-mappings: true 8 | key-duplicates: {} 9 | octal-values: 10 | forbid-implicit-octal: true 11 | -------------------------------------------------------------------------------- /.trunk/trunk.yaml: -------------------------------------------------------------------------------- 1 | # This file controls the behavior of Trunk: https://docs.trunk.io/cli 2 | # To learn more about the format of this file, see https://docs.trunk.io/reference/trunk-yaml 3 | version: 0.1 4 | cli: 5 | version: 1.18.1 6 | # Trunk provides extensibility via plugins. (https://docs.trunk.io/plugins) 7 | plugins: 8 | sources: 9 | - id: trunk 10 | ref: v1.4.0 11 | uri: https://github.com/trunk-io/plugins 12 | # Many linters and tools depend on runtimes - configure them here. (https://docs.trunk.io/runtimes) 13 | runtimes: 14 | enabled: 15 | - go@1.21.0 16 | - node@18.12.1 17 | - python@3.10.8 18 | # This is the section where you manage your linters. (https://docs.trunk.io/check/configuration) 19 | lint: 20 | disabled: 21 | - yamllint 22 | - checkov 23 | - git-diff-check 24 | - trivy 25 | - trufflehog 26 | enabled: 27 | - clang-format@16.0.3 28 | - clang-tidy@16.0.3 29 | - markdownlint@0.37.0 30 | - prettier@3.1.0 31 | - shellcheck@0.9.0 32 | - shfmt@3.6.0 33 | actions: 34 | disabled: 35 | - trunk-announce 36 | - trunk-check-pre-push 37 | - trunk-fmt-pre-commit 38 | enabled: 39 | - trunk-upgrade-available 40 | -------------------------------------------------------------------------------- /.vscode/c_cpp_properties.json: -------------------------------------------------------------------------------- 1 | { 2 | "configurations": [ 3 | { 4 | "name": "Linux WSL", 5 | "compilerPath": "/usr/bin/clang++", 6 | "cStandard": "c17", 7 | "cppStandard": "c++20", 8 | "intelliSenseMode": "clang-x64", 9 | "compileCommands": "${workspaceFolder}/build/compile_commands.json" 10 | } 11 | ], 12 | "version": 4 13 | } 14 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Launch kernel with GDB (WSL)", 9 | "type": "cppdbg", 10 | "request": "launch", 11 | "program": "${workspaceRoot}/build/cxkernel.sys", 12 | "cwd": "${workspaceRoot}", 13 | "targetArchitecture": "amd64", 14 | "MIMode": "gdb", 15 | "miDebuggerArgs": "-nh", 16 | "externalConsole": false, 17 | "customLaunchSetupCommands": [ 18 | { 19 | "text": "source ${workspaceRoot}/build/.gdbinit", 20 | "description": "Connect to QEMU remote debugger" 21 | } 22 | ], 23 | "setupCommands": [ 24 | { 25 | "description": "Enable pretty-printing for gdb", 26 | "text": "-enable-pretty-printing", 27 | "ignoreFailures": true 28 | }, 29 | { 30 | "text": "file ${workspaceFolder}/build/cxkernel.sys" 31 | } 32 | ], 33 | "preLaunchTask": "Create gdbinit", 34 | "postDebugTask": "Kill TMUX" 35 | } 36 | ] 37 | } 38 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "*.h": "cpp", 4 | "*.cc": "cpp" 5 | }, 6 | 7 | // Have some fun with the icons :) 8 | "material-icon-theme.folders.associations": { 9 | "core": "home", 10 | "arch": "target", 11 | "platform": "core", 12 | "loader": "upload" 13 | }, 14 | "nasm.outputFormat": "elf64", 15 | "nasm.extraFlags": ["-Fdwarf"], 16 | "editor.rulers": [80, 100], 17 | "editor.indentSize": "tabSize", 18 | "editor.tabSize": 3, 19 | "[cmake]": { 20 | "editor.tabSize": 8, 21 | "editor.detectIndentation": false, 22 | "editor.insertSpaces": true 23 | }, 24 | "taskExplorer.enabledTasks": { 25 | "ant": false, 26 | "appPublisher": false, 27 | "bash": false, 28 | "batch": false, 29 | "composer": false, 30 | "gradle": false, 31 | "grunt": false, 32 | "gulp": false, 33 | "make": true, 34 | "maven": false, 35 | "npm": false, 36 | "nsis": false, 37 | "perl": false, 38 | "pipenv": false, 39 | "powershell": false, 40 | "python": false, 41 | "ruby": false, 42 | "tsc": false, 43 | "workspace": true 44 | }, 45 | "C_Cpp.codeAnalysis.runAutomatically": false, 46 | "C_Cpp.codeAnalysis.clangTidy.enabled": true, 47 | "C_Cpp.codeAnalysis.clangTidy.useBuildPath": true, 48 | "C_Cpp.codeAnalysis.exclude": { 49 | "**/build/**": true 50 | }, 51 | "C_Cpp.autoAddFileAssociations": false, 52 | "C_Cpp.intelliSenseEngine": "disabled", 53 | "C_Cpp.errorSquiggles": "disabled", 54 | "C_Cpp.autocomplete": "disabled", 55 | "trunk.inlineDecorators": false 56 | } 57 | -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0.0", 3 | "tasks": [ 4 | { 5 | "type": "shell", 6 | "label": "Build", 7 | "command": "make -j$(nproc)", 8 | "group": { 9 | "kind": "build", 10 | "isDefault": true 11 | }, 12 | "problemMatcher": [ 13 | "$gcc", 14 | { 15 | "owner": "linker", 16 | "pattern": { 17 | "regexp": "^(ld): (error): (.*)$", 18 | "file": 1, 19 | "severity": 2, 20 | "message": 3 21 | }, 22 | "fileLocation": ["relative", "${workspaceFolder}/build"] 23 | } 24 | ], 25 | "options": { 26 | "cwd": "${workspaceFolder}/build" 27 | }, 28 | "presentation": { 29 | "echo": true, 30 | "reveal": "never", 31 | "revealProblems": "onProblem", 32 | "focus": false, 33 | "panel": "shared", 34 | "showReuseMessage": true, 35 | "clear": true 36 | } 37 | }, 38 | { 39 | "type": "shell", 40 | "label": "Clean", 41 | "command": "make clean", 42 | "options": { 43 | "cwd": "${workspaceFolder}/build" 44 | }, 45 | "presentation": { 46 | "reveal": "never", 47 | "panel": "shared" 48 | } 49 | }, 50 | { 51 | "type": "shell", 52 | "label": "Launch QEMU", 53 | "linux": { 54 | "command": "./launch_qemu.sh" 55 | }, 56 | "options": { 57 | "cwd": "${workspaceFolder}/scripts" 58 | }, 59 | "isBackground": true, 60 | "problemMatcher": [], 61 | "dependsOn": ["Build"] 62 | }, 63 | { 64 | "label": "Launch QEMU Debug", 65 | "type": "shell", 66 | "linux": { 67 | "command": [ 68 | "tmux new-session -d -s qemu ./launch_qemu.sh -debug;", 69 | "sleep 1;" 70 | ] 71 | }, 72 | "options": { 73 | "cwd": "${workspaceFolder}/scripts" 74 | }, 75 | "problemMatcher": [], 76 | "hide": true, 77 | "dependsOn": ["Build"] 78 | }, 79 | { 80 | "label": "Create gdbinit", 81 | "type": "shell", 82 | "linux": { 83 | "command": "./create_gdbinit.sh ${workspaceFolder}/build/" 84 | }, 85 | "options": { 86 | "cwd": "${workspaceFolder}/scripts" 87 | }, 88 | "problemMatcher": [], 89 | "hide": true, 90 | "dependsOn": ["Launch QEMU Debug"] 91 | }, 92 | { 93 | "label": "Kill TMUX", 94 | "type": "shell", 95 | "linux": { 96 | "command": "tmux kill-session -t qemu; echo ''" 97 | }, 98 | "hide": true 99 | } 100 | ] 101 | } 102 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project(cxkernel) 2 | cmake_minimum_required(VERSION 3.22) 3 | set(CMAKE_EXPORT_COMPILE_COMMANDS ON) 4 | 5 | # Set ... ??? 6 | set(TARGET_LOADER multiboot2) 7 | set(TARGET_ARCH x86-64) 8 | set(TARGET_PLATFORM pc) 9 | # Set this depending on architecture 10 | set(MCMODEL large) 11 | 12 | # Setup toolchain 13 | include(${CMAKE_CURRENT_SOURCE_DIR}/scripts/toolchain.cmake) 14 | set(CMAKE_CXX_STANDARD 20) 15 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 16 | 17 | # Define macros 18 | add_compile_definitions(ARCH_TARGET=x86-64) 19 | add_compile_definitions(PLAT_TARGET=pc) 20 | add_compile_definitions(ABI_ALIGN=8) 21 | 22 | # Loader target 23 | add_executable(cxloader "") 24 | include(${CMAKE_CURRENT_SOURCE_DIR}/loader/${TARGET_ARCH}-${TARGET_LOADER}/build.cmake) 25 | 26 | # Kernel target 27 | file(GLOB LIB_SOURCES 28 | "${CMAKE_CURRENT_SOURCE_DIR}/lib/ebl/*.cc" 29 | "${CMAKE_CURRENT_SOURCE_DIR}/lib/elf/*.cc") 30 | add_executable(cxkernel ${LIB_SOURCES}) 31 | set_target_properties(cxkernel PROPERTIES SUFFIX ".sys") 32 | target_include_directories( 33 | cxkernel PRIVATE 34 | "${CMAKE_CURRENT_SOURCE_DIR}/lib" 35 | "${CMAKE_CURRENT_SOURCE_DIR}/core/include" 36 | "${CMAKE_CURRENT_SOURCE_DIR}/arch" 37 | "${CMAKE_CURRENT_SOURCE_DIR}/" 38 | ) 39 | include(${CMAKE_CURRENT_SOURCE_DIR}/core/build.cmake) 40 | include(${CMAKE_CURRENT_SOURCE_DIR}/arch/${TARGET_ARCH}/build.cmake) 41 | include(${CMAKE_CURRENT_SOURCE_DIR}/platform/${TARGET_PLATFORM}/build.cmake) 42 | 43 | # Final boot image target 44 | include(${CMAKE_CURRENT_SOURCE_DIR}/scripts/grub.cmake) 45 | 46 | # Library tests 47 | include(${CMAKE_CURRENT_SOURCE_DIR}/tests/build.cmake) 48 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Naming rules: 2 | 3 | - Struct and class names must be in PascalCase 4 | - _Rule exceptions:_ Private/implementation-specific arch- or platform-specific structures 5 | - All Struct/class members must be in lower_snake_case 6 | - Private members must additionally end in an underscore like_this\_ 7 | - Typedefs and usings must be in lower_snake_case 8 | 9 | Code organization: 10 | 11 | - Struct/class should have public members listed first and private members listed last 12 | - Struct/class should be marked final whenever possible 13 | - Using `noexcept` is optional as exceptions are disallowed anyways. `noexcept` may be required by the compiler or other compile-time checking tools, so they are not banned entirely. 14 | 15 | 16 | Test naming convention: 17 | - Unit tests are named like this: `test_domain unit_being_tested description...` 18 | - For instance, "ebl intrusive_linked_list with refptr" tells us the domain is the "ebl" library and we're testing "intrusive_linked_list" specifically. 19 | -------------------------------------------------------------------------------- /SETUP.md: -------------------------------------------------------------------------------- 1 | Install the latest LLVM toolchain 2 | 3 | ``` 4 | sudo su 5 | bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" 6 | LLVM_VERSION=16 # Change this! 7 | update-alternatives --install /usr/bin/clang clang /usr/bin/clang-$LLVM_VERSION 200 8 | update-alternatives --install /usr/bin/cc cc /usr/bin/clang-$LLVM_VERSION 200 9 | update-alternatives --install /usr/bin/c++ c++ /usr/bin/clang++-$LLVM_VERSION 200 10 | update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-$LLVM_VERSION 200 11 | update-alternatives --install /usr/bin/lld lld /usr/bin/lld-$LLVM_VERSION 200 12 | update-alternatives --install /usr/bin/ld.lld ld.lld /usr/bin/ld.lld-$LLVM_VERSION 200 13 | update-alternatives --install /usr/bin/ld ld /usr/bin/ld.lld 200 14 | update-alternatives --install /usr/bin/clangd clangd /usr/bin/clangd-$LLVM_VERSION 200 15 | update-alternatives --install /usr/bin/llvm-profdata llvm-profdata /usr/bin/llvm-profdata-$LLVM_VERSION 200 16 | update-alternatives --install /usr/bin/llvm-cov llvm-cov /usr/bin/llvm-cov-$LLVM_VERSION 200 17 | ``` 18 | -------------------------------------------------------------------------------- /arch/x86-64/asm.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | namespace x86_64 { 7 | 8 | static inline void sti() { 9 | FENCE; 10 | asm volatile("sti"); 11 | } 12 | 13 | static inline void cli() { 14 | asm volatile("cli"); 15 | FENCE; 16 | } 17 | 18 | static inline uint64_t read_cr2() { 19 | uint64_t ret; 20 | asm volatile("mov %%cr2, %0" : "=r"(ret)); 21 | return ret; 22 | } 23 | 24 | static inline uint64_t read_cr3() { 25 | uint64_t ret; 26 | asm volatile("mov %%cr3, %0" : "=r"(ret)); 27 | return ret; 28 | } 29 | 30 | static inline uint32_t save_flags() { 31 | uint32_t state; 32 | asm volatile( 33 | "pushf;" 34 | "pop %0" 35 | : "=rm"(state)::"memory"); 36 | return state; 37 | } 38 | 39 | static inline uint64_t read_gs_offset64(uintptr_t offset) { 40 | uint64_t ret; 41 | asm volatile("movq %%gs:%1, %0" : "=r"(ret) : "m"(*(uint64_t*)(offset))); 42 | return ret; 43 | } 44 | 45 | static inline void restore_flags(uint32_t flags) { 46 | asm volatile( 47 | "push %0;" 48 | "popf" ::"g"(flags) 49 | : "memory", "cc"); 50 | } 51 | 52 | static inline void outb(uint16_t port, uint8_t value) { 53 | asm volatile("outb %1, %0" : : "dN"(port), "a"(value)); 54 | } 55 | 56 | static inline uint8_t inb(uint16_t port) { 57 | uint8_t ret; 58 | asm volatile("inb %1, %0" : "=a"(ret) : "dN"(port)); 59 | return ret; 60 | } 61 | 62 | enum class MSR : uint32_t { IA32_GS_BASE = 0xC0000101, IA32_KERNEL_GS_BASE = 0xC0000102 }; 63 | 64 | static inline void wrmsr(MSR sel, uint64_t value) { 65 | uint32_t low = value & 0xFFFFFFFF; 66 | uint32_t high = value >> 32; 67 | asm volatile("wrmsr" : : "c"(sel), "a"(low), "d"(high)); 68 | } 69 | 70 | } // namespace x86_64 71 | -------------------------------------------------------------------------------- /arch/x86-64/build.cmake: -------------------------------------------------------------------------------- 1 | set(SOURCES "") 2 | 3 | # Add all sources 4 | file(GLOB SOURCES 5 | "${CMAKE_CURRENT_LIST_DIR}/*.cc" 6 | "${CMAKE_CURRENT_LIST_DIR}/*.asm" 7 | ) 8 | 9 | # Manually set the language + flags for the assembly file 10 | set_source_files_properties( 11 | "${CMAKE_CURRENT_LIST_DIR}/entry.asm" 12 | PROPERTIES 13 | LANGUAGE ASM_NASM 14 | COMPILE_FLAGS "-felf64 -Fdwarf" 15 | ) 16 | 17 | # Update target sources 18 | target_sources(cxkernel PRIVATE ${SOURCES}) 19 | 20 | # Set compile and linker flags 21 | target_link_options( 22 | cxkernel PRIVATE 23 | ${STANDARD_LINKER_FLAGS} 24 | LINKER:-T${CMAKE_CURRENT_LIST_DIR}/linker.ld 25 | LINKER:--apply-dynamic-relocs 26 | LINKER:-z,max-page-size=0x1000 27 | -no-pie -mabi=sysv -m64 -mcmodel=${MCMODEL} 28 | ) 29 | target_compile_options( 30 | cxkernel PRIVATE 31 | $<$:${STANDARD_CXX_FLAGS} 32 | -mcmodel=${MCMODEL} -fno-pie -m64> 33 | ) 34 | -------------------------------------------------------------------------------- /arch/x86-64/entry.asm: -------------------------------------------------------------------------------- 1 | [BITS 64] 2 | 3 | SECTION .text 4 | 5 | %assign i 0 6 | %rep 256 7 | isr%+i: 8 | cli 9 | %if (i = 8) || (i >= 10 && i <= 14) || (i = 17) || (i = 20) || (i = 30) 10 | push i 11 | %else 12 | push 0 13 | push i 14 | %endif 15 | jmp irq_stub 16 | %assign i i+1 17 | %endrep 18 | 19 | [EXTERN isr_handler] 20 | 21 | irq_stub: 22 | cld 23 | push r15 24 | push r14 25 | push r13 26 | push r12 27 | push r11 28 | push r10 29 | push r9 30 | push r8 31 | push rax 32 | push rcx 33 | push rdx 34 | push rbx 35 | push rbp 36 | push rsi 37 | push rdi 38 | 39 | mov rdi, rsp ; Pass the stack pointer as the first argument 40 | 41 | call isr_handler 42 | 43 | pop rdi 44 | pop rsi 45 | pop rbp 46 | pop rbx 47 | pop rdx 48 | pop rcx 49 | pop rax 50 | pop r8 51 | pop r9 52 | pop r10 53 | pop r11 54 | pop r12 55 | pop r13 56 | pop r14 57 | pop r15 58 | add rsp, 16 ; Cleans up the pushed error code and interrupt number 59 | iret 60 | 61 | SECTION .data 62 | 63 | [GLOBAL isr_stub_table] 64 | isr_stub_table: 65 | %assign i 0 66 | %rep 256 67 | dq isr%+i 68 | %assign i i+1 69 | %endrep 70 | -------------------------------------------------------------------------------- /arch/x86-64/init.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "arch/interface.h" 5 | #include "core/mp.h" 6 | #include "core/thread.h" 7 | #include "loaderabi.h" 8 | #include "x86-64/private.h" 9 | 10 | static arch::PerCPU percpu_array[MAX_SMP_CPUS]{}; 11 | static core::Thread boot_thread{"Kernel boot thread"}; 12 | static core::AddressSpace boot_aspace{ 13 | // Nothing to be done here 14 | }; 15 | 16 | void arch::init() { 17 | // Initialize percpu array 18 | for(int i = 0; i < MAX_SMP_CPUS; i++) { 19 | percpu_array[i].self = &percpu_array[i]; 20 | percpu_array[i].cpu_num = i; 21 | percpu_array[i].curthread = nullptr; 22 | } 23 | 24 | // Load gs with percpu pointer 25 | x86_64::wrmsr(x86_64::MSR::IA32_KERNEL_GS_BASE, (uint64_t)&percpu_array[0]); 26 | asm volatile("swapgs"); 27 | assert(arch::cpu_num() == 0, "Boot CPU number is not 0"); 28 | assert(arch::get_percpu()->self == &percpu_array[0], "Boot percpu pointer is not correct"); 29 | 30 | // Install placeholder thread struct 31 | arch::init_aspace(boot_aspace); 32 | boot_thread.address_space = ebl::AdoptRef(&boot_aspace); 33 | percpu_array[0].curthread = &boot_thread; 34 | assert(arch::get_current_thread() == &boot_thread, "Boot thread is not current thread"); 35 | 36 | // Install interrupt handlers 37 | x86_64::init_idt(); 38 | } 39 | -------------------------------------------------------------------------------- /arch/x86-64/interface.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "asm.h" 7 | #include "thread.h" 8 | #include "types.h" 9 | 10 | namespace core { 11 | struct Thread; 12 | } 13 | 14 | namespace arch { 15 | 16 | struct SpinlockBackend { 17 | volatile uint32_t x = 0; 18 | }; 19 | 20 | struct SpinlockState { 21 | uint32_t flags; 22 | }; 23 | 24 | struct IrqVector { 25 | uint8_t vector; 26 | }; 27 | 28 | struct ABICOMPAT LoaderState { 29 | vaddr_t kernel_base; 30 | vaddr_t kernel_limit; 31 | vaddr_t kernel_stack_base; 32 | vaddr_t kernel_stack_limit; 33 | vaddr_t kernel_pfndb_base; 34 | vaddr_t kernel_slab_base; 35 | }; 36 | 37 | struct PerCPU { 38 | struct PerCPU* self; 39 | int cpu_num; 40 | core::Thread* curthread; 41 | }; 42 | 43 | struct AddressSpace { 44 | uint64_t pml4; 45 | }; 46 | 47 | inline void spin_lock(SpinlockBackend* lock) { 48 | #define NL "\n" 49 | asm volatile("lock bts $0, (%0)" NL "jnc 2f" NL "1:" NL "pause" NL "testl $1, (%0)" NL 50 | "jne 1b" NL "lock bts $0, (%0)" NL "jc 1b" NL "2:" 51 | : 52 | : "r"(lock->x) 53 | : "memory"); 54 | #undef NL 55 | } 56 | 57 | inline void spin_unlock(SpinlockBackend* lock) { lock->x = 0; } 58 | 59 | inline void enable_interrupts() { x86_64::sti(); } 60 | 61 | inline void disable_interrupts() { x86_64::cli(); } 62 | 63 | inline void spin_save_state(SpinlockState* state) { 64 | state->flags = x86_64::save_flags(); 65 | disable_interrupts(); 66 | } 67 | 68 | inline void spin_restore_state(SpinlockState const* state) { 69 | // Restore will re-enable interrupts. 70 | x86_64::restore_flags(state->flags); 71 | } 72 | 73 | [[noreturn]] inline void halt() { 74 | asm volatile("hlt"); 75 | for(;;) 76 | ; 77 | __builtin_unreachable(); 78 | } 79 | 80 | inline int cpu_num() { return (int)x86_64::read_gs_offset64(offsetof(PerCPU, cpu_num)); } 81 | 82 | inline PerCPU* get_percpu() { return (PerCPU*)x86_64::read_gs_offset64(offsetof(PerCPU, self)); } 83 | 84 | inline core::Thread* get_current_thread() { return get_percpu()->curthread; } 85 | 86 | inline bool is_heap_address(vaddr_t addr) { 87 | (void)addr; 88 | return false; 89 | } 90 | 91 | inline void* grow_heap(unsigned int num_pages) { 92 | (void)num_pages; 93 | return nullptr; 94 | } 95 | 96 | } // namespace arch 97 | -------------------------------------------------------------------------------- /arch/x86-64/interrupts.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "arch/interface.h" 4 | #include "arch/types.h" 5 | #include "x86-64/asm.h" 6 | #include "x86-64/private.h" 7 | 8 | static x86_64::idt_entry idt[256]{}; 9 | static x86_64::idt_ptr idt_reg_ptr{.limit = sizeof(idt) - 1, .base = (uint64_t)&idt}; 10 | static arch::irq_handler_t irq_handlers[256]{}; 11 | static void exception_handler(x86_64::int_frame& r, uint64_t rsp); 12 | extern "C" uint64_t isr_stub_table; 13 | 14 | extern "C" void isr_handler(void* arg) { 15 | auto* regs = (x86_64::int_frame*)arg; 16 | auto handler = irq_handlers[regs->vector]; 17 | if(handler != nullptr) { 18 | handler(regs); 19 | } else if(regs->vector < 32) { 20 | exception_handler(*regs, (uint64_t)arg); 21 | } else { 22 | ebl::kerr("Unhandled interrupt: %d\n", regs->vector); 23 | arch::halt(); 24 | } 25 | } 26 | 27 | void arch::irq_install_handler(IrqVector vector, irq_handler_t fn) { 28 | irq_handlers[vector.vector] = fn; 29 | } 30 | 31 | void arch::irq_remove_handler(IrqVector vector) { irq_handlers[vector.vector] = nullptr; } 32 | 33 | arch::irq_handler_t arch::irq_get_handler(IrqVector vector) { return irq_handlers[vector.vector]; } 34 | 35 | static const char* exception_messages[] = {"#DE Division By Zero", 36 | "#DB Debug", 37 | "Non Maskable Interrupt", 38 | "#BP Breakpoint", 39 | "#OF Into Detected Overflow", 40 | "#BR Out of Bounds", 41 | "#UD Invalid Opcode", 42 | "#NM No Coprocessor", 43 | "#DF Double Fault", 44 | "Coprocessor Segment Overrun", 45 | "#TS Bad TSS", 46 | "#NP Segment Not Present", 47 | "#SS Stack-Segment Fault", 48 | "#GP General Protection Fault", 49 | "#PF Page Fault", 50 | "Unknown Interrupt", 51 | "#MF Coprocessor Fault", 52 | "#AC Alignment Check", 53 | "#MC Machine Check", 54 | "#XF SIMD Floating-Point Exception", 55 | "#VE Virtualization Exception", 56 | "Reserved", 57 | "#SX Security Exception", 58 | "Reserved", 59 | "Triple Fault", 60 | "#FERR Legacy", 61 | "Reserved", 62 | "Reserved", 63 | "Reserved", 64 | "Reserved", 65 | "Reserved", 66 | "Reserved"}; 67 | 68 | static const char* page_fault_messages[] = { 69 | "Supervisory process tried to read a non-present page entry", 70 | "Supervisory process tried to read a page and caused a protection fault", 71 | "Supervisory process tried to write to a non-present page entry", 72 | "Supervisory process tried to write a page and caused a protection fault", 73 | "User process tried to read a non-present page entry", 74 | "User process tried to read a page and caused a protection fault", 75 | "User process tried to write to a non-present page entry", 76 | "User process tried to write a page and caused a protection fault"}; 77 | 78 | void exception_handler(x86_64::int_frame& r, uint64_t rsp) { 79 | ebl::kerr( 80 | "\n" 81 | "=== KERNEL UNHANDLED EXCEPTION 0x%02lX ===\n" 82 | "=== %s\n" 83 | "=== Dumping register contents below\n" 84 | "> rax: 0x%016lX rbx: 0x%016lX rcx: 0x%016lX rdx: 0x%016lX\n" 85 | "> rsi: 0x%016lX rdi: 0x%016lX rsp: 0x%016lX rbp: 0x%016lX\n" 86 | "> r8: 0x%016lX r9: 0x%016lX r10: 0x%016lX r11: 0x%016lX\n" 87 | "> r12: 0x%016lX r13: 0x%016lX r14: 0x%016lX r15: 0x%016lX\n" 88 | "> rip: 0x%016lX eflags: 0x%lX\n" 89 | "=== Error code: 0x%016lX\n", 90 | r.vector, 91 | exception_messages[r.vector], 92 | r.rax, 93 | r.rbx, 94 | r.rcx, 95 | r.rdx, 96 | r.rsi, 97 | r.rdi, 98 | rsp, 99 | r.rbp, 100 | r.r8, 101 | r.r9, 102 | r.r10, 103 | r.r11, 104 | r.r12, 105 | r.r13, 106 | r.r14, 107 | r.r15, 108 | r.rip, 109 | r.flags, 110 | r.err_code); 111 | // Additional page fault data 112 | if(r.vector == 14) { 113 | ebl::kerr( 114 | "=== Page fault data:\n" 115 | "=== %s\n" 116 | "=== Faulting address: 0x%016lX\n", 117 | page_fault_messages[r.err_code], 118 | x86_64::read_cr2()); 119 | } 120 | ebl::kerr("\n"); 121 | 122 | ebl::kerr( 123 | // Mini dead computer ASCII art 124 | // ref: https://www.asciiart.eu/computers/computers 125 | "+===============================================+\n" 126 | "| |\n" 127 | "| Kernel panic: The system has halted. |\n" 128 | "| Please reboot the system. |\n" 129 | "| If you see this message again, please report |\n" 130 | "| this to the developers. |\n" 131 | "| _ |\n" 132 | "| __ |-| THE SYSTEM |\n" 133 | "| jgs [Ll] |=| IS DEAD! x.x |\n" 134 | "| ====`o'^' |\n" 135 | "+===============================================+\n\n"); 136 | 137 | arch::halt(); 138 | } 139 | 140 | void x86_64::init_idt() { 141 | for(int i = 0; i < 256; i++) { 142 | idt[i] = x86_64::idt_entry{(&isr_stub_table)[i], x86_64::descriptor_type::INTERRUPT_GATE}; 143 | irq_handlers[i] = nullptr; 144 | } 145 | asm volatile("lidt %0" : : "m"(idt_reg_ptr)); 146 | } 147 | -------------------------------------------------------------------------------- /arch/x86-64/linker.ld: -------------------------------------------------------------------------------- 1 | ENTRY(kernel_entry) 2 | SECTIONS 3 | { 4 | . = 0xffff800000000000; 5 | _kernel_start = .; 6 | 7 | .text ALIGN (4K) : 8 | { 9 | *(.multiboot) 10 | *(.text*) 11 | *(.gnu.linkonce.t.*) 12 | } 13 | 14 | .rodata ALIGN (4K) : 15 | { 16 | *(.rodata*) 17 | *(.gnu.linkonce.r.*) 18 | } 19 | 20 | .data ALIGN (4K) : 21 | { 22 | *(.data*) 23 | *(.gnu.linkonce.d.*) 24 | } 25 | 26 | .kstrtab ALIGN (4K) : 27 | { 28 | *(kstrtab) 29 | } 30 | 31 | .init_array ALIGN (4K) : 32 | { 33 | init_array_start_ = .; 34 | KEEP (*(SORT(.init_array.*))) 35 | KEEP (*(.init_array )) 36 | init_array_end_ = .; 37 | } 38 | 39 | .bss ALIGN (4K) : 40 | { 41 | *(COMMON) 42 | *(.bss*) 43 | *(.gnu.linkonce.b.*) 44 | *(.bootloader_stack) 45 | } 46 | 47 | _kernel_end = .; 48 | } 49 | -------------------------------------------------------------------------------- /arch/x86-64/mmu.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "arch/types.h" 4 | #include "core/mem.h" 5 | #include "core/vm.h" 6 | #include "loaderabi.h" 7 | #include "x86-64/private.h" 8 | 9 | using core::Page; 10 | using x86_64::page_flags; 11 | using page_list_head = ebl::IntrusiveList; 12 | 13 | static Result map_single_page(paddr_t phys, 14 | vaddr_t virt, 15 | page_flags flags, 16 | page_list_head& pt_pages) { 17 | namespace ns = x86_64; 18 | assert(phys % arch::page_size == 0, "Physical address is not page-aligned!"); 19 | assert(virt % arch::page_size == 0, "Virtual address is not page-aligned!"); 20 | 21 | const uint16_t pml4e_idx = ns::pml4e_index(virt); 22 | const uint16_t pdpte_idx = ns::pdpte_index(virt); 23 | const uint16_t pde_idx = ns::pde_index(virt); 24 | const uint16_t pte_idx = ns::pte_index(virt); 25 | 26 | auto* const pml4 = (ns::pml4e*)ns::virt_from_index(511, 511, 511, 511); 27 | auto* const pdpt = (ns::pdpte*)ns::virt_from_index(511, 511, 511, pml4e_idx); 28 | auto* const pd = (ns::pde*)ns::virt_from_index(511, 511, pdpte_idx, pml4e_idx); 29 | auto* const pt = (ns::pte*)ns::virt_from_index(511, pde_idx, pdpte_idx, pml4e_idx); 30 | 31 | auto* pml4e = &pml4[pml4e_idx]; 32 | auto* pdpte = &pdpt[pdpte_idx]; 33 | auto* pde = &pd[pde_idx]; 34 | auto* pte = &pt[pte_idx]; 35 | 36 | if(!pml4e->f.present) { 37 | auto ec = core::alloc_phys_page_single(); 38 | if(!ec) return ec.status(); 39 | auto page = ec.unwrap(); 40 | pt_pages.push_back(page); 41 | pml4e->data = page->paddr() | 1 | flags.data; 42 | } 43 | 44 | if(!pdpte->f.present) { 45 | auto ec = core::alloc_phys_page_single(); 46 | if(!ec) return ec.status(); 47 | auto page = ec.unwrap(); 48 | pt_pages.push_back(page); 49 | pdpte->data = page->paddr() | 1 | flags.data; 50 | } 51 | 52 | if(!pde->f.present) { 53 | auto ec = core::alloc_phys_page_single(); 54 | if(!ec) return ec.status(); 55 | auto page = ec.unwrap(); 56 | pt_pages.push_back(page); 57 | pde->data = page->paddr() | 1 | flags.data; 58 | } 59 | 60 | // Force overwrite PTE if present 61 | pte->data = phys | 1 | flags.data; 62 | 63 | return E::OK; 64 | } 65 | 66 | Result x86_64::map_pages(page_list_head& pages, vaddr_t virt, page_flags flags) { 67 | page_list_head pt_pages; 68 | Result ec = E::OK; 69 | for(auto page : pages) { 70 | ec = map_single_page(page->paddr(), virt, flags, pt_pages); 71 | if(!ec) { 72 | // FIXME: This function does not clean up mappings properly! 73 | core::free_phys_pages(pt_pages); 74 | return ec; 75 | } 76 | virt += arch::page_size; 77 | } 78 | return E::OK; 79 | } 80 | 81 | /** 82 | * To understand recursive virtual mappings, let's manually compute the translations. 83 | * Given a virtual address (x, y, z, w) where each number represents an index into 84 | * the respective level's table, let's compute the physical address. 85 | * 86 | * Let's assign PML4[511] -> PML4. 87 | * 88 | * (x, y, z, w) maps to what paddr? (x < 511) This is just regular page table mappings. 89 | * Level | Lookup 90 | * ------|------------------ 91 | * PML4 | PML4[511] -> PDPT 92 | * PDPT | PDPT[x ] -> PD 93 | * PD | PD [y ] -> PT 94 | * PT | PT [z ] -> PTE 95 | * (511, 511, 511, 511) maps to what physical address? 96 | * Level | Lookup 97 | * ------|------------------ 98 | * PML4 | PML4[511] -> PML4 99 | * PDPT | PML4[511] -> PML4 100 | * PD | PML4[511] -> PML4 101 | * PT | PML4[511] -> PML4 102 | * (511, 511, 511, x) maps to what paddr? (x < 511) 103 | * Level | Lookup 104 | * ------|------------------ 105 | * PML4 | PML4[511] -> PML4 106 | * PDPT | PML4[511] -> PML4 107 | * PD | PML4[511] -> PML4 108 | * PT | PML4[x ] -> PDPT 109 | * (511, 511, x, y) maps to what paddr? (x < 511) 110 | * Level | Lookup 111 | * ------|------------------ 112 | * PML4 | PML4[511] -> PML4 113 | * PDPT | PML4[511] -> PML4 114 | * PD | PML4[x ] -> PDPT 115 | * PT | PDPT[y ] -> PD 116 | * (511, x, y, z) maps to what paddr? (x < 511) 117 | * Level | Lookup 118 | * ------|------------------ 119 | * PML4 | PML4[511] -> PML4 120 | * PDPT | PML4[x ] -> PDPT 121 | * PD | PDPT[y ] -> PD 122 | * PT | PD [z ] -> PT 123 | */ 124 | -------------------------------------------------------------------------------- /arch/x86-64/private.h: -------------------------------------------------------------------------------- 1 | #include "arch/interface.h" 2 | #include "core/vm.h" 3 | #include "ebl/linked_list.h" 4 | #include "ebl/status.h" 5 | 6 | namespace x86_64 { 7 | 8 | //===------------------------------------------------------------------===// 9 | // Kernel virtual memory map 10 | 11 | // FIXME: Not implemented yet 12 | 13 | //===------------------------------------------------------------------===// 14 | // Internal (non-interface) arch-specific functions 15 | 16 | void init_idt(); 17 | Result map_pages(ebl::IntrusiveList& pages, vaddr_t virt, page_flags flags); 18 | 19 | } // namespace x86_64 20 | -------------------------------------------------------------------------------- /arch/x86-64/thread.cc: -------------------------------------------------------------------------------- 1 | #include "core/thread.h" 2 | 3 | #include "arch/interface.h" 4 | #include "arch/x86-64/thread.h" 5 | 6 | #define NL "\n" 7 | 8 | namespace arch { 9 | 10 | void switch_thread(core::Thread* oldthread, core::Thread* newthread) { 11 | get_percpu()->curthread = newthread; 12 | 13 | // Assumes SystemV ABI 14 | // ref: https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf 15 | // FIXME: Should I move this to a separate assembly file? 16 | asm volatile("pushf" NL // Push flags 17 | "push %%rbx" NL // Push callee-save regs 18 | "push %%rbp" NL "push %%r12" NL "push %%r13" NL "push %%r14" NL "push %%r15" NL 19 | "mov %%rsp, (%0)" NL // Save old rsp 20 | "mov %1, %%rsp" NL // Load new rsp 21 | "pop %%r15" NL // Restore callee-save regs 22 | "pop %%r14" NL "pop %%r13" NL "pop %%r12" NL "pop %%rbp" NL "pop %%rbx" NL 23 | "popf" NL // Restore flags 24 | ::"r"(&oldthread->backend.sp), 25 | "r"(&newthread->backend.sp)); 26 | } 27 | 28 | void init_aspace(core::AddressSpace& aspace) { 29 | aspace.arch().pml4 = x86_64::read_cr3(); 30 | aspace.user_root_.base_ = 0; 31 | aspace.user_root_.size_ = 0x00007FFFFFFFFFFF; 32 | aspace.kernel_root_.base_ = 0xFFFF800000000000; 33 | aspace.kernel_root_.size_ = 0x00007FFFFFFFFFFF; 34 | } 35 | 36 | } // namespace arch 37 | -------------------------------------------------------------------------------- /arch/x86-64/thread.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "types.h" 4 | 5 | namespace arch { 6 | 7 | struct ThreadBackend { 8 | vaddr_t sp; 9 | }; 10 | 11 | } // namespace arch 12 | -------------------------------------------------------------------------------- /arch/x86-64/types.h: -------------------------------------------------------------------------------- 1 | // NOTE: System V ABI gaurentees bitfield order in packed structs, 2 | // even though the C++ standard does not. 3 | 4 | #pragma once 5 | 6 | #include 7 | #include 8 | 9 | typedef uint64_t vaddr_t; 10 | typedef uint64_t paddr_t; 11 | typedef uint64_t timestamp_t; 12 | 13 | namespace arch { 14 | constexpr unsigned int page_size = 4096; 15 | } 16 | 17 | namespace x86_64 { 18 | 19 | //===------------------------------------------------------------------===// 20 | // CPU structures 21 | 22 | // ref: Figure 4-11 from Intel SDM Vol 3 4.5.4 23 | struct PACKED generic_page_entry_raw { 24 | uint8_t present : 1; 25 | uint8_t writable : 1; 26 | uint8_t user : 1; 27 | uint8_t write_through : 1; 28 | uint8_t cache_disable : 1; 29 | uint8_t accessed : 1; 30 | uint8_t dirty : 1; 31 | uint8_t pat : 1; 32 | uint8_t global : 1; 33 | uint8_t ignored0 : 3; // Bits 9-11 34 | uint64_t addr : 40; 35 | uint8_t ignored1 : 7; 36 | uint8_t protection_key : 4; 37 | uint8_t execute_disable : 1; 38 | }; 39 | 40 | union generic_page_entry { 41 | uint64_t data; 42 | generic_page_entry_raw f; 43 | }; 44 | 45 | using page_flags = generic_page_entry; 46 | using pml4e = union generic_page_entry; 47 | using pdpte = union generic_page_entry; 48 | using pde = union generic_page_entry; 49 | using pte = union generic_page_entry; 50 | static_assert(sizeof(pml4e) == 8, "Size of pml4e is incorrect!"); 51 | static_assert(sizeof(pdpte) == 8, "Size of pdpte is incorrect!"); 52 | static_assert(sizeof(pde) == 8, "Size of pde is incorrect!"); 53 | static_assert(sizeof(pte) == 8, "Size of pte is incorrect!"); 54 | 55 | // ref: Table 3-1 from Intel SDM Vol 3 3.4.5.1 56 | enum class segment_type : uint8_t { 57 | DATA_READ = 0b0000, 58 | DATA_READ_WRITE = 0b0010, 59 | CODE_EXECUTE_ACCESSED = 0b1001, 60 | CODE_EXECUTE_READ = 0b1010 61 | }; 62 | 63 | // ref: Table 3-2 from Intel SDM Vol 3 3.5 64 | enum class descriptor_type : uint8_t { 65 | TASK_GATE = 0b0101, 66 | INTERRUPT_GATE = 0b1110, 67 | TRAP_GATE = 0b1111, 68 | }; 69 | 70 | // ref: Figure 3-8 from Intel SDM Vol 3 3.4.5 71 | struct PACKED gdt_entry { 72 | uint16_t limit_low : 16; 73 | uint16_t base_low : 16; 74 | uint8_t base_middle : 8; 75 | uint8_t seg_type : 4; 76 | uint8_t desc_type : 1; 77 | uint8_t desc_priv : 2; 78 | uint8_t present : 1; 79 | uint8_t limit_high : 4; 80 | uint8_t available : 1; 81 | uint8_t long_mode : 1; 82 | uint8_t op_size : 1; 83 | uint8_t granularity : 1; 84 | uint8_t base_high : 8; 85 | 86 | gdt_entry() = default; 87 | 88 | gdt_entry(uint32_t base, uint32_t limit, segment_type type) { 89 | limit_low = limit & 0xFFFF; 90 | base_low = base & 0xFFFF; 91 | base_middle = (base >> 16) & 0xFF; 92 | seg_type = (uint8_t)type & 0xF; 93 | desc_type = 1; 94 | desc_priv = 00; 95 | present = 1; 96 | limit_high = (limit >> 16) & 0xF; 97 | available = 0; 98 | long_mode = 1; 99 | op_size = 0; 100 | granularity = 0; 101 | base_high = (base >> 24) & 0xFF; 102 | } 103 | }; 104 | static_assert(sizeof(struct gdt_entry) == 8, "gdt_entry is not 8 bytes long!"); 105 | 106 | // ref: Figure 2-6 from Intel SDM Vol 3 2.4.1 107 | struct PACKED gdt_ptr { 108 | uint16_t limit : 16; 109 | uint64_t base : 64; 110 | }; 111 | static_assert(sizeof(struct gdt_ptr) == 10, "gdt_ptr is not 10 bytes long!"); 112 | 113 | // ref: Figure 6-8 from Intel SDM Vol 3 6.14.1 114 | struct PACKED idt_entry { 115 | uint16_t offset_low : 16; 116 | uint16_t selector : 16; 117 | uint8_t ist : 3; 118 | uint8_t reserved0 : 5; 119 | uint8_t type : 4; 120 | uint8_t zero : 1; 121 | uint8_t dpl : 2; 122 | uint8_t present : 1; 123 | uint16_t offset_mid : 16; 124 | uint32_t offset_high : 32; 125 | uint32_t reserved1 : 32; 126 | idt_entry() = default; 127 | idt_entry(uint64_t isr_fn, descriptor_type type) { 128 | offset_low = isr_fn & 0xFFFF; 129 | selector = 0x08; 130 | ist = 0; 131 | reserved0 = 0; 132 | this->type = (uint8_t)type & 0xF; 133 | zero = 0; 134 | dpl = 0; 135 | present = 1; 136 | offset_mid = (isr_fn >> 16) & 0xFFFF; 137 | offset_high = (isr_fn >> 32) & 0xFFFFFFFF; 138 | reserved1 = 0; 139 | } 140 | }; 141 | static_assert(sizeof(struct idt_entry) == 16, "idt_entry is not 16 bytes long!"); 142 | 143 | // ref: Figure 6-1 from Intel SDM Vol 3 6.10 144 | struct PACKED idt_ptr { 145 | uint16_t limit : 16; 146 | uint64_t base : 64; 147 | }; 148 | static_assert(sizeof(struct idt_ptr) == 10, "idt_ptr is not 6 bytes long!"); 149 | 150 | struct PACKED int_frame { 151 | uint64_t rdi, rsi, rbp, rbx, rdx, rcx, rax; // pushed by isr_handler 152 | uint64_t r8, r9, r10, r11, r12, r13, r14, r15; // pushed by isr_handler 153 | uint64_t vector; // pushed by isrN 154 | uint64_t err_code; // pushed by isrN 155 | uint64_t rip, cs, flags; // pushed by cpu 156 | uint64_t user_sp, user_ss; // pushed by cpu if CPL changes 157 | }; 158 | 159 | //===------------------------------------------------------------------===// 160 | // Paging helper functions 161 | 162 | // Canonicalizes a virtual address 163 | constexpr vaddr_t canonicalize(vaddr_t addr) { 164 | static_assert(sizeof(vaddr_t) == sizeof(int64_t)); 165 | return ((int64_t)addr << 16) >> 16; 166 | } 167 | 168 | // Gets the virtual address from a PML4E, PDPTE, PDE, and PTE index 169 | constexpr uint64_t virt_from_index(uint16_t pml4e_index, 170 | uint16_t pdpte_index, 171 | uint16_t pde_index, 172 | uint16_t pte_index) { 173 | return canonicalize( 174 | ((uint64_t)(pml4e_index & 0x1FF) << 39) | ((uint64_t)(pdpte_index & 0x1FF) << 30) | 175 | ((uint64_t)(pde_index & 0x1FF) << 21) | ((uint64_t)(pte_index & 0x1FF) << 12)); 176 | } 177 | 178 | // Gets the index of the PML4 entry for a virtual address 179 | // ref: Figure 4-8 from Intel SDM Vol 3 4.5.4 180 | constexpr uint16_t pml4e_index(vaddr_t addr) { return (addr >> 39) & 0x1FF; } 181 | // ref: pml4e_index 182 | constexpr uint16_t pdpte_index(vaddr_t addr) { return (addr >> 30) & 0x1FF; } 183 | // ref: pml4e_index 184 | constexpr uint16_t pde_index(vaddr_t addr) { return (addr >> 21) & 0x1FF; } 185 | // ref: pml4e_index 186 | constexpr uint16_t pte_index(vaddr_t addr) { return (addr >> 12) & 0x1FF; } 187 | } // namespace x86_64 188 | 189 | namespace arch { 190 | typedef x86_64::page_flags mmu_flags; 191 | } // namespace arch 192 | -------------------------------------------------------------------------------- /core/assert.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "arch/interface.h" 5 | 6 | void assert(bool condition, std::source_location const& loc) { 7 | if(!condition) [[unlikely]] { 8 | ebl::kerr("Assertion failed at %s:%d in \"%s\"\n", 9 | loc.file_name(), 10 | loc.line(), 11 | loc.function_name()); 12 | arch::disable_interrupts(); 13 | arch::halt(); 14 | } 15 | } 16 | 17 | void assert(bool condition, const char* message, std::source_location const& loc) { 18 | if(!condition) [[unlikely]] { 19 | ebl::kerr("Assertion failed at %s:%d in \"%s\"\nReason: %s\n", 20 | loc.file_name(), 21 | loc.line(), 22 | loc.function_name(), 23 | message); 24 | arch::disable_interrupts(); 25 | arch::halt(); 26 | } 27 | } 28 | 29 | void panic(const char* message, std::source_location const& loc) { 30 | ebl::kerr("Kernel panicked at %s:%d in \"%s\"\nReason: %s\n", 31 | loc.file_name(), 32 | loc.line(), 33 | loc.function_name(), 34 | message); 35 | arch::disable_interrupts(); 36 | arch::halt(); 37 | } 38 | -------------------------------------------------------------------------------- /core/build.cmake: -------------------------------------------------------------------------------- 1 | set(SOURCES "") 2 | 3 | # Add all sources 4 | file(GLOB SOURCES 5 | "${CMAKE_CURRENT_LIST_DIR}/*.cc" 6 | ) 7 | 8 | # Update target sources 9 | target_sources(cxkernel PRIVATE ${SOURCES}) 10 | -------------------------------------------------------------------------------- /core/include/arch/interface.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "arch/types.h" 9 | 10 | struct LoaderState; 11 | namespace core { 12 | struct Thread; 13 | struct AddressSpace; 14 | } // namespace core 15 | 16 | namespace arch { 17 | 18 | constexpr vaddr_t page_align_down(vaddr_t addr) { 19 | // NOTE: We expect page_size to be defined by types.h 20 | return ebl::align_down(addr, page_size); 21 | } 22 | constexpr vaddr_t page_align_up(vaddr_t addr) { return ebl::align_up(addr, page_size); } 23 | 24 | struct SpinlockBackend; 25 | struct SpinlockState; 26 | struct ThreadBackend; 27 | struct IrqVector; 28 | struct ABICOMPAT LoaderState; 29 | struct PerCPU; 30 | struct AddressSpace; 31 | typedef void (*irq_handler_t)(void*); 32 | 33 | void irq_install_handler(IrqVector vector, irq_handler_t fn); 34 | void irq_remove_handler(IrqVector vector); 35 | irq_handler_t irq_get_handler(IrqVector vector); 36 | 37 | void spin_lock(SpinlockBackend* lock); 38 | void spin_unlock(SpinlockBackend* lock); 39 | void spin_save_state(SpinlockState* state); 40 | void spin_restore_state(SpinlockState const* state); 41 | 42 | void enable_interrupts(); 43 | void disable_interrupts(); 44 | [[noreturn]] void halt(); 45 | 46 | void switch_thread(core::Thread* oldthread, core::Thread* newthread); 47 | 48 | void init(); 49 | 50 | int cpu_num(); 51 | PerCPU* get_percpu(); 52 | core::Thread* get_current_thread(); 53 | 54 | void init_aspace(core::AddressSpace& aspace); 55 | 56 | } // namespace arch 57 | 58 | // Include the arch-specific interface header last. 59 | // clang-format off 60 | #include STRINGIFY(arch/ARCH_TARGET/interface.h) 61 | // clang-format on 62 | -------------------------------------------------------------------------------- /core/include/arch/types.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | // clang-format off 7 | #include STRINGIFY(arch/ARCH_TARGET/types.h) 8 | // clang-format on 9 | 10 | // Check that the arch-specific header has defined all the required types. 11 | static_assert(ebl::is_type_complete()); 12 | static_assert(ebl::is_type_complete()); 13 | static_assert(ebl::is_type_complete()); 14 | static_assert(ebl::is_type_complete()); 15 | -------------------------------------------------------------------------------- /core/include/core/mem.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "core/vm.h" 4 | #include "ebl/status.h" 5 | #include "loaderabi.h" 6 | 7 | namespace core { 8 | Result alloc_phys_pages(unsigned count, ebl::IntrusiveList& pages); 9 | Result alloc_phys_page_single(); 10 | void free_phys_pages(ebl::IntrusiveList& pages); 11 | void free_phys_page_single(Page* page); 12 | } // namespace core 13 | 14 | namespace g { 15 | extern decltype(loader_state_.pfndb_freelist)& pfndb_freelist; 16 | extern decltype(loader_state_.pfndb_rsrvlist)& pfndb_rsrvlist; 17 | extern decltype(loader_state_.total_phys_pgs)& total_phys_pgs; 18 | } // namespace g 19 | -------------------------------------------------------------------------------- /core/include/core/mp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "arch/interface.h" 4 | #include "core/thread.h" 5 | #include "ebl/linked_list.h" 6 | 7 | constexpr int MAX_SMP_CPUS = 32; 8 | 9 | namespace core { 10 | struct PerCPU { 11 | ebl::IntrusiveList thread_queue{}; 12 | }; 13 | } // namespace core 14 | 15 | // Global variables 16 | namespace g { 17 | extern struct core::PerCPU percpu[]; // FIXME: Fix this! 18 | } 19 | 20 | namespace core { 21 | inline struct PerCPU& get_percpu() { return g::percpu[arch::cpu_num()]; } 22 | } // namespace core 23 | -------------------------------------------------------------------------------- /core/include/core/mutex.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "arch/interface.h" 7 | #include "ebl/linked_list.h" 8 | 9 | namespace core { 10 | 11 | struct Thread; 12 | 13 | /** 14 | * @brief Mutex 15 | */ 16 | class CAPABILITY("mutex") Mutex final { 17 | friend class ebl::Guard; 18 | 19 | private: 20 | struct Policy { 21 | private: 22 | Mutex* lock_; 23 | 24 | public: 25 | Policy(Mutex* lock) : lock_{lock} {} 26 | void lock() ACQUIRE(lock_) NO_THREAD_SAFETY_ANALYSIS { lock_->lock(); } 27 | void unlock() RELEASE(lock_) NO_THREAD_SAFETY_ANALYSIS { 28 | if(lock_ != nullptr) [[likely]] { 29 | lock_->unlock(); 30 | lock_ = nullptr; 31 | } 32 | } 33 | }; 34 | 35 | private: 36 | enum class State { 37 | FREE, 38 | LOCKED, 39 | }; 40 | struct arch::SpinlockBackend lock_; 41 | ebl::IntrusiveList wait_queue_; 42 | Thread* owner_; 43 | State state_; 44 | 45 | private: 46 | void lock(); 47 | void unlock(); 48 | bool do_i_hold() const; 49 | 50 | public: 51 | Mutex() : lock_{}, wait_queue_{}, owner_{nullptr}, state_{State::FREE} {} 52 | }; 53 | 54 | } // namespace core 55 | -------------------------------------------------------------------------------- /core/include/core/slab.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "arch/types.h" 6 | #include "core/mutex.h" 7 | 8 | namespace kmem { 9 | void bootstrap(); 10 | void* alloc(unsigned int size); 11 | void free(void* obj); 12 | } // namespace kmem 13 | 14 | namespace kmem_private { 15 | struct slabcache; 16 | struct slab; 17 | struct objctl; 18 | 19 | using slab_list_head = ebl::IntrusiveList; 20 | using slabcache_list_head = ebl::IntrusiveList; 21 | 22 | static inline unsigned int num_objects_per_slab(unsigned int size, uint16_t order); 23 | constexpr unsigned int align_up(unsigned int value, uint16_t order) { 24 | // FIXME: Use ebl::align_up instead 25 | return (value + ((1 << (unsigned int)order) - 1)) & ~((1 << (unsigned int)order) - 1); 26 | } 27 | 28 | constexpr int cache_name_max = 32; 29 | constexpr uint16_t default_order = 3; 30 | static_assert((1 << default_order) <= arch::page_size, 31 | "2^default_order must be <= size of page"); 32 | 33 | struct objctl { 34 | objctl* next_free_obj; //!< Pointer to the next free object within the slab 35 | }; 36 | 37 | #define G GUARDED_BY(lock) 38 | 39 | struct slab : ebl::IntrusiveListNode { 40 | slab(slabcache* parent, objctl* objctl, void* objects) 41 | : lock{}, 42 | parent{parent}, 43 | inuse{0}, 44 | last_alloc_time{0}, 45 | objctl{objctl}, 46 | objects{objects} {} 47 | mutable core::Mutex lock; //!< Lock for this slab 48 | slabcache* const parent; //!< Parent slabcache 49 | unsigned int inuse G; //!< Number of objects in use (not free) 50 | timestamp_t last_alloc_time G; //!< Time of last allocation, used for reaping 51 | objctl* objctl G; //!< Pointer to the first free object in this slab 52 | void* const objects; //!< Pointer to the first object in this slab 53 | }; 54 | 55 | struct slabcache : ebl::IntrusiveListNode { 56 | slabcache(const char* name, unsigned int size, uint16_t order) 57 | : lock{}, 58 | freelist{}, 59 | partiallist{}, 60 | fulllist{}, 61 | name{}, 62 | order{order}, 63 | slab_objsize{size}, 64 | slab_nobjects{num_objects_per_slab(size, order)}, 65 | slab_last_color{0} { 66 | for(int i = 0; i < cache_name_max; ++i) this->name[i] = name[i]; 67 | } 68 | slabcache(unsigned int size, uint16_t order) 69 | : lock{}, 70 | freelist{}, 71 | partiallist{}, 72 | fulllist{}, 73 | name{}, 74 | order{order}, 75 | slab_objsize{size}, 76 | slab_nobjects{num_objects_per_slab(size, order)}, 77 | slab_last_color{0} {} 78 | mutable core::Mutex lock; //!< Lock for this slabcache 79 | slab_list_head freelist G; //!< List of free slabs 80 | slab_list_head partiallist G; //!< List of partially full slabs 81 | slab_list_head fulllist G; //!< List of full slabs 82 | /*const*/ char name[cache_name_max]; //!< Name of this slabcache 83 | const uint16_t order; //!< 2^order byte alignment of objects 84 | const unsigned int slab_objsize; //!< Size of an object (in bytes) aligned to 2^order bytes 85 | unsigned int slab_nobjects G; //!< Number of objects per slab 86 | unsigned int slab_last_color G; //!< Last color used for slab allocation 87 | }; 88 | 89 | unsigned int num_objects_per_slab(unsigned int size, uint16_t order) { 90 | return (arch::page_size - align_up(sizeof(slab), order)) / align_up(size, order); 91 | } 92 | 93 | #undef G 94 | } // namespace kmem_private 95 | -------------------------------------------------------------------------------- /core/include/core/spinlock.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "arch/interface.h" 7 | 8 | namespace core { 9 | 10 | /** 11 | * @brief Spinlock WITH disabling interrupts. 12 | */ 13 | class CAPABILITY("mutex") Spinlock final { 14 | friend class ebl::Guard; 15 | 16 | private: 17 | // Private policy class for ebl::Guard. 18 | struct Policy { 19 | private: 20 | arch::SpinlockState irq_flags_; 21 | Spinlock* lock_; 22 | 23 | public: 24 | Policy(Spinlock* lock) : irq_flags_{}, lock_{lock} {} 25 | void lock() ACQUIRE(lock_) NO_THREAD_SAFETY_ANALYSIS { lock_->lock(irq_flags_); } 26 | void unlock() RELEASE(lock_) NO_THREAD_SAFETY_ANALYSIS { 27 | if(lock_ != nullptr) [[likely]] { 28 | lock_->unlock(irq_flags_); 29 | lock_ = nullptr; 30 | } 31 | } 32 | }; 33 | 34 | private: 35 | struct arch::SpinlockBackend backend_; 36 | 37 | private: 38 | void lock(arch::SpinlockState& x); 39 | void unlock(arch::SpinlockState const& x); 40 | 41 | public: 42 | Spinlock() : backend_{} {} 43 | }; 44 | 45 | /** 46 | * @brief Spinlock WITHOUT disabling interrupts. 47 | */ 48 | class SpinlockNoIrq final { 49 | friend class ebl::Guard; 50 | 51 | private: 52 | // Private policy class for ebl::Guard. 53 | struct Policy { 54 | private: 55 | SpinlockNoIrq* lock_; 56 | 57 | public: 58 | Policy(SpinlockNoIrq* lock) : lock_{lock} {} 59 | void lock() { lock_->lock(); } 60 | void unlock() { 61 | if(lock_ != nullptr) [[likely]] { 62 | lock_->unlock(); 63 | lock_ = nullptr; 64 | } 65 | } 66 | }; 67 | 68 | private: 69 | struct arch::SpinlockBackend backend_; 70 | 71 | private: 72 | void lock(); 73 | void unlock(); 74 | 75 | public: 76 | SpinlockNoIrq() : backend_{} {} 77 | }; 78 | 79 | } // namespace core 80 | -------------------------------------------------------------------------------- /core/include/core/thread.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include "arch/interface.h" 8 | #include "core/spinlock.h" 9 | #include "core/vm.h" 10 | 11 | namespace core { 12 | 13 | struct Thread; 14 | 15 | enum class ThreadState { RUN, READY, SLEEP, BLOCKED, ZOMBIE }; 16 | 17 | struct KernStack final { 18 | vaddr_t base; 19 | size_t size; 20 | }; 21 | 22 | constexpr int thread_name_max = 30; 23 | 24 | struct Thread final : ebl::RefCountable, ebl::IntrusiveListNode { 25 | Thread(const char* name) noexcept { 26 | for(int i = 0; i < thread_name_max; ++i) this->name[i] = name[i]; 27 | } 28 | char name[thread_name_max]; 29 | enum ThreadState state; 30 | struct KernStack stack; 31 | ebl::RefPtr address_space; 32 | struct arch::ThreadBackend backend; 33 | }; 34 | 35 | void thread_preempt(); 36 | void thread_yield(); 37 | void schedule_next_thread(Thread* oldthread); 38 | 39 | } // namespace core 40 | -------------------------------------------------------------------------------- /core/include/core/vm.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "arch/types.h" 10 | #include "core/mutex.h" 11 | #include "core/spinlock.h" 12 | #include "core/vmregion.h" 13 | #include "loaderabi.h" 14 | 15 | namespace core { 16 | struct ABICOMPAT Page; 17 | struct VmObject; 18 | struct VmRegion; 19 | struct AddressSpace; 20 | 21 | struct VmObject final : ebl::RefCountable { 22 | private: 23 | core::Spinlock lock_; 24 | ebl::IntrusiveList pages_; 25 | }; 26 | 27 | struct AddressSpace final : ebl::RefCountable { 28 | friend void arch::init_aspace(AddressSpace&); 29 | 30 | public: 31 | arch::AddressSpace& arch() { return backend_; } 32 | 33 | public: 34 | AddressSpace() noexcept : user_root_{0, 0, {}, this}, kernel_root_{0, 0, {}, this} { 35 | user_root_.flags_.capability = 0b1111; 36 | kernel_root_.flags_.capability = 0b1111; 37 | user_root_.flags_.type = VmRegionType::REGION; 38 | kernel_root_.flags_.type = VmRegionType::REGION; 39 | user_root_.flags_.is_root = 1; 40 | kernel_root_.flags_.is_root = 1; 41 | } 42 | 43 | VmRegion& get_user_root() { return user_root_; } 44 | 45 | private: 46 | VmRegion user_root_; 47 | VmRegion kernel_root_; 48 | arch::AddressSpace backend_; 49 | mutable core::Mutex lock_; 50 | }; 51 | 52 | struct ABICOMPAT PACKED Page final : ebl::IntrusiveListNode { 53 | uint32_t flags; 54 | core::Spinlock lock; 55 | union { 56 | vaddr_t u3_; 57 | struct AddressSpace* address_space; 58 | }; 59 | paddr_t paddr() const { return (this - g::loader_state_.pfndb_arr) * arch::page_size; } 60 | }; 61 | } // namespace core 62 | -------------------------------------------------------------------------------- /core/include/core/vmregion.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace core { 11 | struct VmObject; 12 | struct AddressSpace; 13 | struct VmRegion; 14 | 15 | enum struct VmRegionType : uint8_t { REGION = 0, MAPPING = 1 }; 16 | 17 | union VmRegionFlags { 18 | using T = uint8_t; 19 | T value; 20 | ebl::BitField cap_read; 21 | ebl::BitField cap_write; 22 | ebl::BitField cap_execute; 23 | ebl::BitField cap_specific; 24 | ebl::BitField capability; 25 | ebl::BitField type; 26 | ebl::BitField is_root; 27 | }; 28 | 29 | struct VmRegion final : ebl::RefCountable, ebl::IntrusiveListNode { 30 | friend struct AddressSpace; 31 | friend struct ebl::MakeRefPtrHelper; 32 | friend void arch::init_aspace(AddressSpace&); 33 | 34 | private: 35 | VmRegion(vaddr_t base, vaddr_t size, VmRegionFlags flags, AddressSpace* aspace) noexcept 36 | : base_{base}, size_{size}, flags_{flags}, parent_{nullptr}, aspace_{aspace} {} 37 | 38 | public: 39 | /** 40 | * @brief 41 | * 42 | * @param size 43 | * @param align_pow2 44 | * @param flags 45 | * @return Result> 46 | */ 47 | Result> allocate_vmr_compact(size_t size, 48 | uint8_t align_pow2, 49 | VmRegionFlags flags); 50 | 51 | /** 52 | * @brief 53 | * 54 | * @param size 55 | * @param align_pow2 56 | * @param flags 57 | * @return Result> 58 | */ 59 | Result> allocate_vmr_sparse(size_t size, 60 | uint8_t align_pow2, 61 | VmRegionFlags flags); 62 | 63 | /** 64 | * @brief 65 | * 66 | * @param offset 67 | * @param size 68 | * @param flags 69 | * @param object 70 | * @param vmo_offset 71 | * @param mmu_flags 72 | * @return Result> 73 | */ 74 | Result> map_pages(vaddr_t offset, 75 | size_t size, 76 | VmRegionFlags flags, 77 | ebl::RefPtr object, 78 | vaddr_t vmo_offset, 79 | arch::mmu_flags mmu_flags); 80 | 81 | /** 82 | * @brief 83 | * 84 | * @param addr 85 | * @param size 86 | * @param flags 87 | * @return Result 88 | */ 89 | Result protect(vaddr_t addr, vaddr_t size, arch::mmu_flags flags); 90 | 91 | /** 92 | * @brief 93 | * 94 | * @return Result 95 | */ 96 | Result destroy(); 97 | 98 | auto begin() { return children_.begin(); } 99 | auto end() { return children_.end(); } 100 | 101 | auto get_base() const { return base_; } 102 | auto get_size() const { return size_; } 103 | 104 | private: 105 | /** 106 | * @brief Find the child region that is immediately above the given offset. 107 | * 108 | * @param offset Offset within this region. 109 | * @return ebl::RefPtr A reference to the child region, or nullptr if none. 110 | */ 111 | ebl::RefPtr find_child_above(vaddr_t offset); 112 | 113 | /** 114 | * @brief Check if the given offset and size overlaps with any child region. 115 | * 116 | * @param offset Offset within this region. 117 | * @param size Size of the region to check. 118 | */ 119 | bool does_overlap_child(vaddr_t offset, size_t size); 120 | 121 | /** 122 | * @brief Iterate over all holes in this region. 123 | * 124 | * @tparam T Type bool(vaddr_t, size_t) that returns true to continue, false to stop. 125 | * @param func The function to call for each hole, with type T. 126 | */ 127 | template 128 | void foreach_hole(T func) { 129 | // TODO: Type check T using this (need to implement these templates first) 130 | // static_assert(is_convertible_v>, "Error..."); 131 | vaddr_t offset = 0; 132 | for(auto child : children_) { 133 | if(child->base_ > this->base_ + offset) { 134 | if(!func(child, offset, child->base_ - this->base_ - offset)) return; 135 | } 136 | offset = child->base_ + child->size_; 137 | } 138 | if(this->base_ + this->size_ > this->base_ + offset) { 139 | func(nullptr, offset, this->base_ + this->size_ - this->base_ - offset); 140 | } 141 | } 142 | 143 | private: 144 | /// The (absolute) base address of this region. 145 | vaddr_t base_; 146 | /// The size of this region. 147 | vaddr_t size_; 148 | /// Region capabilities and type. 149 | VmRegionFlags flags_; 150 | /// The parent region of this region, or nullptr if this is the root. 151 | VmRegion* parent_; 152 | /// The address space this region belongs to. 153 | AddressSpace* aspace_; 154 | /// The object backing this region, or nullptr if this is a hole. 155 | ebl::RefPtr object_; 156 | /// Ordered list of child regions, sorted by base address. 157 | ebl::IntrusiveList> children_; 158 | }; 159 | } // namespace core 160 | -------------------------------------------------------------------------------- /core/include/loaderabi.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "arch/interface.h" 7 | 8 | namespace core { 9 | struct ABICOMPAT Page; 10 | } 11 | 12 | // I love boba :) 13 | constexpr uint32_t LOADER_ABI_MAGIC_START = 0xCAFEB0BA; 14 | constexpr uint32_t LOADER_ABI_MAGIC_END = 0xBADBEEEF; 15 | 16 | struct PACKED ABICOMPAT LoaderState final { 17 | uint32_t magic_start; 18 | vaddr_t kernel_elf; 19 | ebl::IntrusiveList pfndb_rsrvlist; 20 | ebl::IntrusiveList pfndb_freelist; 21 | paddr_t total_phys_pgs; 22 | union { 23 | core::Page* pfndb_arr; 24 | vaddr_t unused0_; 25 | }; 26 | arch::LoaderState arch_state; 27 | uint32_t magic_end; 28 | }; 29 | 30 | namespace g { 31 | extern LoaderState loader_state_; 32 | } 33 | -------------------------------------------------------------------------------- /core/include/platform/interface.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace platform { 6 | 7 | void console_init(); 8 | void console_emit(char c); 9 | void console_puts(char const* c); 10 | void console_log(char const* c); 11 | void init(); 12 | 13 | } // namespace platform 14 | 15 | // Include the platform-specific interface header last. 16 | // clang-format off 17 | #include STRINGIFY(platform/PLAT_TARGET/interface.h) 18 | // clang-format on 19 | -------------------------------------------------------------------------------- /core/main.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "core/mem.h" 5 | #include "loaderabi.h" 6 | #include "platform/interface.h" 7 | 8 | namespace g { 9 | LoaderState loader_state_{}; 10 | } 11 | 12 | extern "C" void (*init_array_start_)(); 13 | extern "C" void (*init_array_end_)(); 14 | 15 | extern "C" void kernel_entry(uint64_t state_ptr) { 16 | // Run .init_array 17 | for(auto* fn = &init_array_start_; fn != &init_array_end_; fn++) (*fn)(); 18 | 19 | // Kick off console 20 | platform::console_init(); 21 | platform::console_puts("Hello, world (from the kernel)!\n"); 22 | 23 | // Save state and check integrity 24 | g::loader_state_ = *(LoaderState*)state_ptr; 25 | assert(g::loader_state_.magic_start == LOADER_ABI_MAGIC_START && 26 | g::loader_state_.magic_end == LOADER_ABI_MAGIC_END, 27 | "Invalid loader struct magics (possible ABI incompatibility)"); 28 | 29 | // Traverse pfndb to check PFN DB integrity 30 | { 31 | const auto x = (uint64_t)g::pfndb_freelist.size(); 32 | const auto y = (uint64_t)g::pfndb_rsrvlist.size(); 33 | assert(x + y == g::total_phys_pgs, "PFN DB size mismatch, possible corruption"); 34 | } 35 | 36 | // Architecture-specific init 37 | arch::init(); 38 | 39 | // Platform-specific init 40 | platform::init(); 41 | 42 | // Enable interrupts and hand-off to the scheduler (FIXME: implement) 43 | arch::enable_interrupts(); 44 | 45 | asm volatile("int $0x3"); 46 | arch::halt(); 47 | } 48 | -------------------------------------------------------------------------------- /core/mp.cc: -------------------------------------------------------------------------------- 1 | #include "core/mp.h" 2 | 3 | struct core::PerCPU g::percpu[MAX_SMP_CPUS] = {}; 4 | -------------------------------------------------------------------------------- /core/mutex.cc: -------------------------------------------------------------------------------- 1 | #include "core/mutex.h" 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include "core/mp.h" 8 | #include "core/thread.h" 9 | 10 | using namespace core; 11 | 12 | void Mutex::lock() { 13 | arch::spin_lock(&lock_); 14 | if(state_ == State::FREE) { 15 | // If lock is free, lock it 16 | state_ = State::LOCKED; 17 | owner_ = arch::get_current_thread(); 18 | arch::spin_unlock(&lock_); 19 | } else /* if(state_ == State::LOCKED) */ { 20 | // Atomically put the thread to sleep -- be careful of wakeup races 21 | auto* cur_th_ptr = arch::get_current_thread(); 22 | cur_th_ptr->state = ThreadState::BLOCKED; 23 | wait_queue_.push_back(cur_th_ptr); 24 | arch::spin_unlock(&lock_); 25 | schedule_next_thread(cur_th_ptr); 26 | arch::spin_lock(&lock_); 27 | // We must check this condition here, otherwise a race occurs 28 | assert(!do_i_hold()); 29 | // Set the lock owner now 30 | owner_ = arch::get_current_thread(); 31 | arch::spin_unlock(&lock_); 32 | } 33 | } 34 | 35 | void Mutex::unlock() { 36 | // Only the lock holder can release the lock 37 | assert(do_i_hold(), ""); 38 | // Otherwise, start unlock procedure 39 | arch::spin_lock(&lock_); 40 | if(wait_queue_.empty()) { 41 | // If we're the last holder, unlock the lock (so owner becomes NULL) 42 | state_ = State::FREE; 43 | owner_ = nullptr; 44 | } else { 45 | // Otherwise, wake up the next thread in the queue 46 | // and pass on the lock without unlocking it 47 | auto next_th = wait_queue_.pop_front(); 48 | next_th->state = ThreadState::READY; 49 | core::get_percpu().thread_queue.push_back(next_th); 50 | } 51 | arch::spin_unlock(&lock_); 52 | } 53 | 54 | bool Mutex::do_i_hold() const { return owner_ == arch::get_current_thread(); } 55 | -------------------------------------------------------------------------------- /core/pmm.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "core/mem.h" 4 | #include "core/vm.h" 5 | 6 | namespace g { 7 | auto& pfndb_freelist = loader_state_.pfndb_freelist; 8 | auto& pfndb_rsrvlist = loader_state_.pfndb_rsrvlist; 9 | auto& total_phys_pgs = loader_state_.total_phys_pgs; 10 | } // namespace g 11 | 12 | Result core::alloc_phys_page_single() { 13 | auto* node = g::pfndb_freelist.pop_front(); 14 | if(node == nullptr) return E::OUT_OF_MEMORY; 15 | return node; 16 | } 17 | 18 | Result core::alloc_phys_pages(unsigned count, ebl::IntrusiveList& pages) { 19 | pages = {}; 20 | for(unsigned i = 0; i < count; i++) { 21 | auto* node = g::pfndb_freelist.pop_front(); 22 | if(node == nullptr) { 23 | while(!pages.empty()) { 24 | g::pfndb_freelist.push_front(pages.pop_front()); 25 | } 26 | return E::OUT_OF_MEMORY; 27 | } 28 | pages.push_front(node); 29 | } 30 | return E::OK; 31 | } 32 | 33 | void core::free_phys_pages(ebl::IntrusiveList& pages) { 34 | while(!pages.empty()) { 35 | auto* page = pages.pop_front(); 36 | g::pfndb_freelist.push_back(page); 37 | } 38 | } 39 | 40 | void core::free_phys_page_single(Page* page) { 41 | // FIXME: Add more sophisticated checks 42 | if(page != nullptr) { 43 | g::pfndb_freelist.push_back(page); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /core/sched.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "arch/interface.h" 5 | #include "core/mp.h" 6 | #include "core/thread.h" 7 | 8 | using ebl::move; 9 | using namespace core; 10 | 11 | void core::schedule_next_thread(Thread* oldthread) { 12 | // FIXME: Improve this. For now, just pick the head of the list. 13 | auto* newthread = get_percpu().thread_queue.pop_front(); 14 | assert(newthread != nullptr); 15 | assert(oldthread != nullptr); 16 | arch::switch_thread(oldthread, newthread); 17 | } 18 | -------------------------------------------------------------------------------- /core/slab.cc: -------------------------------------------------------------------------------- 1 | #include "core/slab.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "core/mutex.h" 9 | 10 | using namespace kmem; 11 | using namespace kmem_private; 12 | 13 | // The slabcache descriptor to allocate slabcaches (For bootstrapping) 14 | static slabcache slabcache_cache_node{ 15 | "slabcache cache", align_up(sizeof(slabcache), default_order), default_order}; 16 | static slabcache* const slabcache_cache = &slabcache_cache_node; 17 | core::Mutex cache_list_lock{}; 18 | slabcache_list_head cache_list GUARDED_BY(cache_list_lock){}; 19 | 20 | static slabcache* find_best_cache(unsigned int size); 21 | static slab* find_slab_from_obj(void* obj); 22 | static slab* slab_create(slabcache* cache); 23 | static slabcache* cache_create(const char* name, unsigned int size, uint16_t order); 24 | static void* cache_alloc(slabcache* cache); 25 | static void cache_free(slab* node, objctl* obj); 26 | static void cache_destroy(slabcache* cache); 27 | static void cache_reap(slabcache* cache); 28 | 29 | //===----------------------------------------------------------------------===// 30 | 31 | slabcache* find_best_cache(unsigned int size) REQUIRES(cache_list_lock) { 32 | for(auto cache : cache_list) { 33 | // FIXME: Invent a better way to find best-fit cache (instead of exact match) 34 | if(cache->slab_objsize == size) return cache; 35 | } 36 | return nullptr; 37 | } 38 | 39 | slab* find_slab_from_obj(void* obj) { return (slab*)arch::page_align_down((vaddr_t)obj); } 40 | 41 | /** 42 | * @brief Set up a new page as a slab and requires cache->lock to be held 43 | * 44 | * @param cache The parent slabcache 45 | * @return The slab's list node struct 46 | */ 47 | slab* slab_create(slabcache* cache) REQUIRES(cache->lock) { 48 | // Page Layout: 49 | // [struct slab_node] [color padding] [objctl] ... [objctl] [end padding] 50 | // ^ ^ 51 | // This struct size is aligned to 2^order bytes 52 | 53 | void* page = arch::grow_heap(1); 54 | if(!page) return nullptr; 55 | 56 | // Calculate the color padding 57 | const auto slab_node_sz = align_up(sizeof(slab), cache->order); 58 | const auto free_space = arch::page_size - slab_node_sz; 59 | const auto padding = free_space - cache->slab_nobjects * cache->slab_objsize; 60 | const auto max_color = padding / (1 << cache->order); 61 | const auto color = (cache->slab_last_color + 1) % max_color; 62 | const auto color_padding = color * (1 << cache->order); 63 | 64 | // Update the color tracking 65 | cache->slab_last_color = color; 66 | 67 | // Initialize the objctl structs 68 | vaddr_t ptr = (vaddr_t)page + slab_node_sz + color_padding; 69 | for(unsigned int i = 0; i < cache->slab_nobjects; i++) { 70 | objctl* obj = (objctl*)ptr; 71 | ptr += cache->slab_objsize; 72 | if(cache->slab_nobjects == i + 1) 73 | obj->next_free_obj = nullptr; 74 | else 75 | obj->next_free_obj = (objctl*)ptr; 76 | } 77 | 78 | // Initialize the slab struct 79 | auto* node = static_cast(page); 80 | node = new(node) slab{cache, (objctl*)ptr, (void*)ptr}; 81 | 82 | return node; 83 | } 84 | 85 | slabcache* cache_create(const char* name, unsigned int size, uint16_t order) { 86 | auto* node = (slabcache*)cache_alloc(slabcache_cache); 87 | if(!node) return nullptr; 88 | node = new(node) slabcache{size, order}; 89 | // Copy over the name 90 | ebl::memcpy(&node->name, name, cache_name_max); 91 | node->name[cache_name_max - 1] = '\0'; 92 | // Add the new cache to the cache list 93 | { 94 | ebl::Guard guard_{&cache_list_lock}; 95 | cache_list.push_back(node); 96 | } 97 | return node; 98 | } 99 | 100 | void* cache_alloc(slabcache* cache) { 101 | ebl::Guard guard_cache_{&cache->lock}; 102 | slab* node = nullptr; 103 | if(cache->partiallist.empty()) { 104 | // No partial slabs, grab one from the free list 105 | node = cache->freelist.pop_front(); 106 | cache->partiallist.push_back(node); 107 | } else if(cache->freelist.empty()) { 108 | // No free slabs, create a new one 109 | node = slab_create(cache); 110 | if(!node) return nullptr; 111 | cache->partiallist.push_back(node); 112 | } else { 113 | // Grab the first partial slab 114 | node = cache->partiallist.pop_front(); 115 | } 116 | 117 | // Grab the first free object from the slab 118 | ebl::Guard guard_slab_{&node->lock}; 119 | if(node->inuse + 1 == cache->slab_nobjects) cache->fulllist.push_back(node); 120 | node->inuse++; 121 | node->last_alloc_time = 0; 122 | node->objctl = node->objctl->next_free_obj; 123 | 124 | return (void*)node->objctl; 125 | } 126 | 127 | void cache_free(slab* slab, objctl* obj) { 128 | ebl::Guard guard1_{&slab->parent->lock}; 129 | ebl::Guard guard2_{&slab->lock}; 130 | if(slab->inuse == slab->parent->slab_nobjects) { 131 | // Slab was full, move to the partial list 132 | slab->parent->fulllist.remove(slab); 133 | slab->parent->partiallist.push_back(slab); 134 | } else if(slab->inuse == 1) { 135 | // Slab will be empty, move to the free list 136 | slab->parent->partiallist.remove(slab); 137 | slab->parent->freelist.push_back(slab); 138 | } else { 139 | // Slab is still partially full, do nothing 140 | } 141 | slab->inuse--; 142 | obj->next_free_obj = slab->objctl; 143 | slab->objctl = obj; 144 | } 145 | 146 | void cache_destroy(slabcache* cache) { 147 | (void)cache; 148 | (void)cache_reap; 149 | } 150 | 151 | void cache_reap(slabcache* cache) { 152 | (void)cache; 153 | (void)cache_destroy; 154 | } 155 | 156 | //===----------------------------------------------------------------------===// 157 | 158 | void kmem::bootstrap() NO_THREAD_SAFETY_ANALYSIS { 159 | // Init cache for struct slabcache 160 | cache_list.push_back(&slabcache_cache_node); 161 | } 162 | 163 | void* kmem::alloc(unsigned int size) { 164 | size = align_up(size, default_order); 165 | struct slabcache* cache = nullptr; 166 | { 167 | ebl::Guard guard_{&cache_list_lock}; 168 | cache = find_best_cache(size); 169 | } 170 | if(cache == nullptr) { 171 | cache = cache_create("", size, default_order); 172 | } 173 | return cache_alloc(cache); 174 | } 175 | 176 | void kmem::free(void* obj) { 177 | assert(obj != nullptr, "Tried to free nullptr"); 178 | slab* node = find_slab_from_obj(obj); 179 | assert(arch::is_heap_address((vaddr_t)node), "Object is not in the heap"); 180 | slabcache* cache = node->parent; 181 | assert(align_up((vaddr_t)obj, cache->order) == (vaddr_t)obj, 182 | "Object is not within the expected alignment"); 183 | assert((vaddr_t)obj >= (vaddr_t)node->objects, "Object is not within the expected slab range"); 184 | cache_free(node, (objctl*)obj); 185 | } 186 | -------------------------------------------------------------------------------- /core/spinlock.cc: -------------------------------------------------------------------------------- 1 | #include "core/spinlock.h" 2 | 3 | using namespace core; 4 | 5 | void Spinlock::lock(arch::SpinlockState& x) { 6 | arch::spin_save_state(&x); 7 | arch::spin_lock(&backend_); 8 | } 9 | 10 | void Spinlock::unlock(arch::SpinlockState const& x) { 11 | arch::spin_restore_state(&x); 12 | arch::spin_unlock(&backend_); 13 | } 14 | 15 | void SpinlockNoIrq::lock() { arch::spin_lock(&backend_); } 16 | 17 | void SpinlockNoIrq::unlock() { arch::spin_unlock(&backend_); } 18 | -------------------------------------------------------------------------------- /core/thread.cc: -------------------------------------------------------------------------------- 1 | #include "core/thread.h" 2 | 3 | #include 4 | 5 | #include "core/mp.h" 6 | #include "core/spinlock.h" 7 | 8 | using namespace core; 9 | 10 | void core::thread_preempt() { 11 | // FIXME: Improve this alongside sched.cc, for now just yield. 12 | thread_yield(); 13 | } 14 | 15 | void core::thread_yield() { 16 | auto* oldthread = arch::get_current_thread(); 17 | oldthread->state = ThreadState::READY; 18 | get_percpu().thread_queue.push_back(oldthread); 19 | schedule_next_thread(oldthread); 20 | } 21 | -------------------------------------------------------------------------------- /core/vmregion.cc: -------------------------------------------------------------------------------- 1 | #include "core/vmregion.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "core/mem.h" 9 | #include "core/vm.h" 10 | #include "ebl/type_traits.h" 11 | #include "x86-64/types.h" 12 | 13 | using namespace core; 14 | using ebl::MakeRefPtr; 15 | using ebl::RefPtr; 16 | 17 | Result> VmRegion::allocate_vmr_compact(size_t size, 18 | uint8_t align_pow2, 19 | VmRegionFlags flags) { 20 | // Ensure child VMR capability is a subset of parent VMR capability. 21 | if((flags.capability & flags_.capability) != flags.capability) { 22 | return E::PERMISSION_DENIED; 23 | } 24 | 25 | // Ensure this is a region VMR. 26 | if(flags.type != VmRegionType::REGION) { 27 | return E::INVALID_ARGUMENT; 28 | } 29 | 30 | bool found_hole = false; 31 | vaddr_t new_hole_base; 32 | size_t new_hole_size; 33 | 34 | // The child VMR to insert before, to keep sorted order of children_ 35 | VmRegion* child = nullptr; 36 | 37 | // TODO: Currently, this is just a first-fit allocator. 38 | const vaddr_t align = ebl::max(1U << align_pow2, arch::page_size); 39 | assert(size % arch::page_size == 0, "Size is not page-aligned!"); 40 | foreach_hole([&](VmRegion* x, vaddr_t hole_base, size_t hole_size) -> bool { 41 | auto aligned_base = ebl::align_up(hole_base, align); 42 | if(aligned_base + size > hole_base + hole_size) return true; 43 | // Found it. 44 | new_hole_base = aligned_base; 45 | new_hole_size = size; 46 | found_hole = true; 47 | child = x; 48 | return false; 49 | }); 50 | 51 | if(!found_hole) return E::ALLOCATION_FAILED; 52 | 53 | // Allocate the new VMR and insert it into the children_ list 54 | VmRegionFlags new_flags{}; 55 | new_flags.type = VmRegionType::REGION; 56 | new_flags.capability = flags.capability; 57 | new_flags.is_root = 0; 58 | auto result = MakeRefPtr(new_hole_base, new_hole_size, new_flags, aspace_); 59 | if(!result) return result.status(); 60 | auto new_vmr = result.unwrap(); 61 | new_vmr->parent_ = this; 62 | if(child == nullptr) { 63 | children_.push_back(new_vmr); 64 | } else { 65 | children_.insert_before(child, new_vmr); 66 | } 67 | return new_vmr; 68 | } 69 | 70 | Result> VmRegion::allocate_vmr_sparse(size_t size, 71 | uint8_t align_pow2, 72 | VmRegionFlags flags) { 73 | (void)size; 74 | (void)align_pow2; 75 | (void)flags; 76 | return E::NOT_IMPLEMENTED; 77 | } 78 | 79 | Result> VmRegion::map_pages(vaddr_t offset, 80 | size_t size, 81 | VmRegionFlags flags, 82 | RefPtr object, 83 | vaddr_t vmo_offset, 84 | arch::mmu_flags mmu_flags) { 85 | (void)offset; 86 | (void)size; 87 | (void)flags; 88 | (void)mmu_flags; 89 | (void)object; 90 | (void)vmo_offset; 91 | return E::NOT_IMPLEMENTED; 92 | } 93 | 94 | Result VmRegion::protect(vaddr_t addr, vaddr_t size, arch::mmu_flags flags) { 95 | (void)addr; 96 | (void)size; 97 | (void)flags; 98 | return E::NOT_IMPLEMENTED; 99 | } 100 | 101 | Result VmRegion::destroy() { 102 | E status = E::OK; 103 | while(!children_.empty()) { 104 | auto child = children_.pop_front(); 105 | child->parent_ = nullptr; 106 | auto res = child->destroy(); 107 | if(res) status = res.status(); 108 | } 109 | if(parent_ != nullptr) { 110 | parent_->children_.remove(this); 111 | } 112 | return status; 113 | } 114 | 115 | RefPtr VmRegion::find_child_above(vaddr_t offset) { 116 | for(auto child : children_) { 117 | if(child->base_ >= this->base_ + offset) 118 | return child; 119 | } 120 | return nullptr; 121 | } 122 | 123 | bool VmRegion::does_overlap_child(vaddr_t offset, size_t size) { 124 | auto end = this->base_ + offset + size; 125 | auto child = find_child_above(offset); 126 | if(!child) return false; 127 | return child->base_ <= end; 128 | } 129 | -------------------------------------------------------------------------------- /lib/ebl/assert.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | void assert(bool condition, std::source_location const& l = std::source_location::current()); 6 | void assert(bool condition, 7 | const char* message, 8 | std::source_location const& l = std::source_location::current()); 9 | void panic(const char* message, std::source_location const& l = std::source_location::current()); 10 | -------------------------------------------------------------------------------- /lib/ebl/atomic.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ebl/util.h" 4 | 5 | // Implement typesafe C++ atomics on integral types 6 | // A lot of the functions are re-implementations from libcxx 7 | // ref: https://github.com/llvm-mirror/libcxx 8 | 9 | namespace ebl { 10 | enum memory_order : int { 11 | memory_order_relaxed = __ATOMIC_RELAXED, 12 | memory_order_acquire = __ATOMIC_ACQUIRE, 13 | memory_order_release = __ATOMIC_RELEASE, 14 | memory_order_acq_rel = __ATOMIC_ACQ_REL, 15 | memory_order_seq_cst = __ATOMIC_SEQ_CST, 16 | }; 17 | 18 | template 19 | class atomic { 20 | static_assert(is_integral_v && !is_same_v, 21 | "Atomic only supports integral types (except bool)"); 22 | 23 | public: 24 | void store(T desr, memory_order m = memory_order_seq_cst) volatile noexcept { 25 | __atomic_store_n(&value_, desr, m); 26 | } 27 | void store(T desr, memory_order m = memory_order_seq_cst) noexcept { 28 | __atomic_store_n(&value_, desr, m); 29 | } 30 | T load(memory_order m = memory_order_seq_cst) const volatile noexcept { 31 | return __atomic_load_n(&value_, m); 32 | } 33 | T load(memory_order m = memory_order_seq_cst) const noexcept { 34 | return __atomic_load_n(&value_, m); 35 | } 36 | operator T() const volatile noexcept { return load(); } 37 | operator T() const noexcept { return load(); } 38 | T exchange(T desr, memory_order m = memory_order_seq_cst) volatile noexcept { 39 | return __atomic_exchange_n(&value_, desr, m); 40 | } 41 | T exchange(T desr, memory_order m = memory_order_seq_cst) noexcept { 42 | return __atomic_exchange_n(&value_, desr, m); 43 | } 44 | bool compare_exchange_weak(T& expc, 45 | T desr, 46 | memory_order s, 47 | memory_order f) volatile noexcept { 48 | return __atomic_compare_exchange_n(&value_, &expc, desr, true, s, f); 49 | } 50 | bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) noexcept { 51 | return __atomic_compare_exchange_n(&value_, &expc, desr, true, s, f); 52 | } 53 | bool compare_exchange_strong(T& expc, 54 | T desr, 55 | memory_order s, 56 | memory_order f) volatile noexcept { 57 | return __atomic_compare_exchange_n(&value_, &expc, desr, false, s, f); 58 | } 59 | bool compare_exchange_strong(T& expc, T desr, memory_order s, memory_order f) noexcept { 60 | return __atomic_compare_exchange_n(&value_, &expc, desr, false, s, f); 61 | } 62 | T fetch_add(T op, memory_order m = memory_order_seq_cst) volatile noexcept { 63 | return __atomic_fetch_add(&value_, op, m); 64 | } 65 | T fetch_add(T op, memory_order m = memory_order_seq_cst) noexcept { 66 | return __atomic_fetch_add(&value_, op, m); 67 | } 68 | T fetch_sub(T op, memory_order m = memory_order_seq_cst) volatile noexcept { 69 | return __atomic_fetch_sub(&value_, op, m); 70 | } 71 | T fetch_sub(T op, memory_order m = memory_order_seq_cst) noexcept { 72 | return __atomic_fetch_sub(&value_, op, m); 73 | } 74 | T fetch_and(T op, memory_order m = memory_order_seq_cst) volatile noexcept { 75 | return __atomic_fetch_and(&value_, op, m); 76 | } 77 | T fetch_and(T op, memory_order m = memory_order_seq_cst) noexcept { 78 | return __atomic_fetch_and(&value_, op, m); 79 | } 80 | T fetch_or(T op, memory_order m = memory_order_seq_cst) volatile noexcept { 81 | return __atomic_fetch_or(&value_, op, m); 82 | } 83 | T fetch_or(T op, memory_order m = memory_order_seq_cst) noexcept { 84 | return __atomic_fetch_or(&value_, op, m); 85 | } 86 | T fetch_xor(T op, memory_order m = memory_order_seq_cst) volatile noexcept { 87 | return __atomic_fetch_xor(&value_, op, m); 88 | } 89 | T fetch_xor(T op, memory_order m = memory_order_seq_cst) noexcept { 90 | return __atomic_fetch_xor(&value_, op, m); 91 | } 92 | 93 | public: 94 | atomic() noexcept = default; 95 | atomic(T value) noexcept : value_{value} {} 96 | DELETE_COPY(atomic); 97 | DELETE_MOVE(atomic); 98 | atomic& operator=(const atomic&) volatile = delete; 99 | atomic& operator=(atomic&&) volatile = delete; 100 | 101 | private: 102 | T value_; 103 | }; 104 | 105 | inline void atomic_thread_fence(memory_order m) noexcept { __atomic_thread_fence(m); } 106 | 107 | inline void atomic_signal_fence(memory_order m) noexcept { __atomic_signal_fence(m); } 108 | } // namespace ebl 109 | -------------------------------------------------------------------------------- /lib/ebl/bit.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | namespace ebl { 6 | 7 | /** 8 | * @brief Implements platform-independent bitfields. 9 | * 10 | * @tparam T The containing type for the bitfield. 11 | * @tparam Index The bit offset of the field in the containing type. 12 | * @tparam Bits The number of bits this field occupies. 13 | */ 14 | template 15 | struct BitField { 16 | private: 17 | static constexpr int Size = sizeof(T) * 8; 18 | // Underlying type of T is T2 19 | // TODO: This needs extra error checking. 20 | typedef ebl::conditional_t< 21 | Size == 0, 22 | void, 23 | ebl::conditional_t< 24 | Size <= 8, 25 | uint8_t, 26 | ebl::conditional_t< 27 | Size <= 16, 28 | uint16_t, 29 | ebl::conditional_t>>>> 32 | T2; 33 | // Mask of Bits bits starting at Index 34 | static constexpr T2 Mask = (1u << Bits) - 1u; 35 | 36 | public: 37 | // Assignment operator from T to T2 38 | BitField operator=(T value) { 39 | value_ = (value_ & ~(Mask << Index)) | (((T2)value & Mask) << Index); 40 | return *this; 41 | } 42 | // Get value as T 43 | operator T() const { return (T)((value_ >> Index) & Mask); } 44 | // Get value as bool 45 | explicit operator bool() const { return value_ & (Mask << Index); } 46 | 47 | private: 48 | // Bitfield underlying value T2 49 | T2 value_; 50 | }; 51 | 52 | } // namespace ebl 53 | -------------------------------------------------------------------------------- /lib/ebl/cxxabi.cc: -------------------------------------------------------------------------------- 1 | // Required functions for C++ ABI to link against 2 | // Most of these are just dummy functions that do nothing 3 | 4 | unsigned __atexit_funcs_count = 0; 5 | void *__dso_handle = 0; 6 | 7 | namespace __cxxabiv1 { 8 | 9 | extern "C" int __cxa_atexit(void (*f)(void *), void *objptr, void *dso) { 10 | (void)f; 11 | (void)objptr; 12 | (void)dso; 13 | return 0; 14 | } 15 | 16 | extern "C" void __cxa_finalize(void *f) { (void)f; } 17 | 18 | void __cxa_pure_virtual() { 19 | // Do nothing 20 | for(;;) 21 | ; 22 | } 23 | 24 | // The ABI requires a 64-bit type. 25 | __extension__ typedef int __guard __attribute__((mode(__DI__))); 26 | 27 | extern "C" int __cxa_guard_acquire(__guard *); 28 | extern "C" void __cxa_guard_release(__guard *); 29 | extern "C" void __cxa_guard_abort(__guard *); 30 | 31 | extern "C" int __cxa_guard_acquire(__guard *g) { return !*(char *)(g); } 32 | 33 | extern "C" void __cxa_guard_release(__guard *g) { *(char *)g = 1; } 34 | 35 | extern "C" void __cxa_guard_abort(__guard *g) { (void)g; } 36 | 37 | } // namespace __cxxabiv1 38 | -------------------------------------------------------------------------------- /lib/ebl/guard.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | namespace ebl { 7 | 8 | template 9 | class SCOPED_CAPABILITY Guard final { 10 | using Policy = typename T::Policy; 11 | template 12 | using has_lock = decltype(declval().lock()); 13 | template 14 | using has_unlock = decltype(declval().unlock()); 15 | static_assert(ebl::is_detected_exact_v, 16 | "T::Policy must support public member: void lock(void)"); 17 | static_assert(ebl::is_detected_exact_v, 18 | "T::Policy must support public member: void unlock(void)"); 19 | static_assert(ebl::is_constructible_v, "T::Policy must be constructible with T*"); 20 | 21 | public: 22 | explicit Guard(T* lock) ACQUIRE(lock) : impl_{Policy{lock}} { impl_.lock(); } 23 | Guard(T* lock1, Guard&& lock2) ACQUIRE(lock1) RELEASE(lock2) : impl_{Policy{lock1}} { 24 | lock2.release(); 25 | impl_.lock(); 26 | } 27 | void release() RELEASE() { impl_.unlock(); } 28 | ~Guard() RELEASE() { release(); } 29 | 30 | private: 31 | Policy impl_; 32 | }; 33 | 34 | } // namespace ebl -------------------------------------------------------------------------------- /lib/ebl/memory.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include "ebl/atomic.h" 8 | #include "ebl/new.h" 9 | #include "ebl/util.h" 10 | 11 | namespace kmem { 12 | void* alloc(unsigned int size); 13 | void free(void* obj); 14 | } 15 | 16 | namespace ebl { 17 | 18 | /** 19 | * @brief A reference-counted pointer to an object. Similar to std::shared_ptr. 20 | * 21 | * @tparam T Type of the object. 22 | */ 23 | template 24 | class RefPtr; 25 | 26 | template 27 | class IntrusiveList; 28 | 29 | /** 30 | * @brief Intrusively reference-counted object. Objects wishing to be ref-counted 31 | * (i.e., be used with RefPtr) should inherit from this class. 32 | * 33 | * @tparam T Unused type parameter to ensure base class uniqueness (i.e., 34 | * unique ref-countable objects won't be upcasted to the same base class) 35 | */ 36 | template 37 | class RefCountable { 38 | template 39 | friend class RefPtr; 40 | 41 | template 42 | friend class IntrusiveList; 43 | 44 | public: 45 | DELETE_COPY(RefCountable); 46 | DELETE_MOVE(RefCountable); 47 | RefCountable() noexcept : ref_count_(0) {} 48 | ~RefCountable() noexcept {} 49 | int32_t ref_count() const noexcept { return ref_count_.load(memory_order_relaxed); } 50 | 51 | private: 52 | void add_ref() const noexcept { ref_count_.fetch_add(1, memory_order_relaxed); } 53 | bool release() const noexcept { 54 | auto ref = ref_count_.fetch_sub(1, memory_order_release); 55 | if(ref == 1) { 56 | atomic_thread_fence(memory_order_acquire); 57 | return true; 58 | } 59 | return false; 60 | } 61 | void adopt() const noexcept { ref_count_.store(1, memory_order_relaxed); } 62 | 63 | private: 64 | mutable atomic ref_count_; 65 | }; 66 | 67 | /** 68 | * @brief Wraps an RefPtr around a newly-allocated RefCountable object. 69 | * 70 | * @tparam T Object type. 71 | * @param ptr Pointer to the object. 72 | * @return RefPtr Reference-counted pointer to the object. 73 | */ 74 | template 75 | RefPtr AdoptRef(T* ptr); 76 | 77 | template 78 | class RefPtr final { 79 | public: 80 | constexpr RefPtr() noexcept : ptr_{nullptr} {} 81 | constexpr RefPtr(ebl::nullptr_t) noexcept : ptr_{nullptr} {} 82 | // Construct from raw pointer, pointer must already be adopted 83 | RefPtr(T* ptr) noexcept : ptr_{ptr} { 84 | if(ptr_ != nullptr) { 85 | ptr_->add_ref(); 86 | } 87 | } 88 | // Copy constructor 89 | RefPtr(RefPtr const& other) : RefPtr{other.ptr_} {} 90 | // Move constructor 91 | RefPtr(RefPtr&& other) noexcept : ptr_{other.ptr_} { other.ptr_ = nullptr; } 92 | // Copy assignment 93 | RefPtr& operator=(RefPtr const& other) { 94 | if(other.ptr_ != nullptr) other.ptr_->add_ref(); 95 | T* old = ptr_; 96 | ptr_ = other.ptr_; 97 | destroy(old); 98 | return *this; 99 | } 100 | // Move assignment (use copy-swap idiom) 101 | RefPtr& operator=(RefPtr&& other) noexcept { 102 | RefPtr{ebl::move(other)}.swap(*this); 103 | return *this; 104 | } 105 | // Destructor 106 | ~RefPtr() { 107 | destroy(ptr_); 108 | ptr_ = nullptr; 109 | } 110 | // Swap pointers (this does not affect reference count) 111 | void swap(RefPtr& other) noexcept { 112 | T* tmp = ptr_; 113 | ptr_ = other.ptr_; 114 | other.ptr_ = tmp; 115 | } 116 | T* get() const { return ptr_; } 117 | T& operator*() const { return *ptr_; } 118 | T* operator->() const { return ptr_; } 119 | explicit operator bool() const { return !!ptr_; } 120 | bool operator==(RefPtr const& other) const { return ptr_ == other.ptr_; } 121 | bool operator!=(RefPtr const& other) const { return ptr_ != other.ptr_; } 122 | bool operator==(ebl::nullptr_t) const { return ptr_ == nullptr; } 123 | bool operator!=(ebl::nullptr_t) const { return ptr_ != nullptr; } 124 | 125 | private: 126 | friend RefPtr AdoptRef(T*); 127 | enum AdoptTag { ADOPT }; 128 | RefPtr(T* ptr, AdoptTag) : ptr_{ptr} { 129 | if(ptr_ != nullptr) { 130 | ptr_->adopt(); 131 | } 132 | } 133 | static void destroy(T* ptr) { 134 | if(ptr != nullptr && ptr->release()) { 135 | // The #if below is to avoid valgrind complaining about 136 | // mismatched new/delete pairs. 137 | #if __STDC_HOSTED__ == 0 138 | delete ptr; 139 | #else 140 | ptr->~T(); 141 | kmem::free(ptr); 142 | #endif 143 | } 144 | } 145 | 146 | private: 147 | T* ptr_; 148 | }; 149 | 150 | template 151 | inline RefPtr AdoptRef(T* ptr) { 152 | return RefPtr(ptr, RefPtr::ADOPT); 153 | } 154 | 155 | template 156 | struct MakeRefPtrHelper; 157 | 158 | template 159 | struct MakeRefPtrHelper { 160 | template 161 | static Result> make_ref(Args&&... args) { 162 | T* ptr = (T*)kmem::alloc(sizeof(T)); 163 | if(ptr == nullptr) return E::OUT_OF_MEMORY; 164 | ptr = new(ptr) T{ebl::forward(args)...}; 165 | return AdoptRef(ptr); 166 | } 167 | }; 168 | 169 | template 170 | Result> MakeRefPtr(Args&&... args) { 171 | return MakeRefPtrHelper::make_ref(ebl::forward(args)...); 172 | } 173 | 174 | } // namespace ebl 175 | -------------------------------------------------------------------------------- /lib/ebl/new.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #ifndef LOADER 4 | #include 5 | #endif 6 | 7 | #if __STDC_HOSTED__ == 0 8 | 9 | void operator delete(void* ptr) noexcept { 10 | #ifndef LOADER 11 | kmem::free(ptr); 12 | #else 13 | (void)ptr; 14 | for(;;) 15 | ; 16 | #endif 17 | } 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /lib/ebl/new.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | #if __STDC_HOSTED__ == 0 5 | 6 | inline void* operator new(size_t size, void* ptr) noexcept { 7 | (void)size; 8 | return ptr; 9 | } 10 | 11 | void operator delete(void* ptr) noexcept; 12 | 13 | #else 14 | #include 15 | #endif 16 | -------------------------------------------------------------------------------- /lib/ebl/source_location.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | // ref: 6 | // https://github.com/llvm/llvm-project/blob/llvmorg-17.0.6/libcxx/include/source_location#L44 7 | 8 | namespace std { 9 | class source_location { 10 | // The names source_location::__impl, _M_file_name, _M_function_name, _M_line, 11 | // and _M_column are hard-coded in the compiler and must not be changed here. 12 | struct __impl { 13 | const char *_M_file_name; 14 | const char *_M_function_name; 15 | unsigned _M_line; 16 | unsigned _M_column; 17 | }; 18 | const __impl *__ptr_ = nullptr; 19 | // GCC returns the type 'const void*' from the builtin, while clang returns 20 | // `const __impl*`. Per C++ [expr.const], casts from void* are not permitted 21 | // in constant evaluation, so we don't want to use `void*` as the argument 22 | // type unless the builtin returned that, anyhow, and the invalid cast is 23 | // unavoidable. 24 | using __bsl_ty = decltype(__builtin_source_location()); 25 | 26 | public: 27 | // The defaulted __ptr argument is necessary so that the builtin is evaluated 28 | // in the context of the caller. An explicit value should never be provided. 29 | static consteval source_location current( 30 | __bsl_ty __ptr = __builtin_source_location()) noexcept { 31 | source_location __sl; 32 | __sl.__ptr_ = static_cast(__ptr); 33 | return __sl; 34 | } 35 | constexpr source_location() noexcept = default; 36 | 37 | constexpr uint32_t line() const noexcept { return __ptr_ != nullptr ? __ptr_->_M_line : 0; } 38 | constexpr uint32_t column() const noexcept { 39 | return __ptr_ != nullptr ? __ptr_->_M_column : 0; 40 | } 41 | constexpr const char *file_name() const noexcept { 42 | return __ptr_ != nullptr ? __ptr_->_M_file_name : ""; 43 | } 44 | constexpr const char *function_name() const noexcept { 45 | return __ptr_ != nullptr ? __ptr_->_M_function_name : ""; 46 | } 47 | }; 48 | } // namespace std 49 | -------------------------------------------------------------------------------- /lib/ebl/status.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "ebl/type_traits.h" 7 | 8 | /** 9 | * @brief These error codes are used by the kernel to indicate the status of 10 | * an operation. They are used instead of exceptions because C++ 11 | * exceptions are not supported in the kernel itself. 12 | */ 13 | enum E { 14 | // No error. 15 | OK = 0, 16 | 17 | // Generic error code. 18 | INVALID, 19 | 20 | // The requested operation is not implemented. 21 | NOT_IMPLEMENTED, 22 | 23 | // The caller passed an invalid argument to the function. 24 | INVALID_ARGUMENT, 25 | 26 | // Physical memory allocation failed. Most likely returned by the kernel 27 | // slab allocator. Will trigger the kernel paging process and attempt 28 | // to free up some memory. 29 | OUT_OF_MEMORY, 30 | 31 | // The caller does not have permission to perform the requested operation 32 | PERMISSION_DENIED, 33 | 34 | // Returned by VmRegion::* when virtual memory allocation fails. This is 35 | // technically a out-of-memory error, but we want to distinguish it from 36 | // the kernel heap running out of memory. 37 | ALLOCATION_FAILED 38 | }; 39 | 40 | template 41 | struct Result { 42 | public: 43 | Result(T&& v) : value_{ebl::move(v)}, error_{E::OK}, armed_{true} {} 44 | 45 | /* implicit */ Result(E e) : unused_{0}, error_{e}, armed_{true} {} 46 | 47 | ~Result() { 48 | if(error_ == E::OK) 49 | value_.~T(); 50 | } 51 | 52 | operator bool() { return this->status() == E::OK; } 53 | 54 | E status() { 55 | armed_ = false; 56 | return error_; 57 | } 58 | 59 | T unwrap() { 60 | if(armed_ == true) panic("Attempted to unwrap an armed Result"); 61 | if(error_ != E::OK) panic("Attempted to unwrap an error Result"); 62 | // Set these to avoid double unwrapping Result 63 | error_ = E::INVALID; 64 | armed_ = true; 65 | // Let go of our ownership of the value 66 | return ebl::move(value_); 67 | } 68 | 69 | private: 70 | union { 71 | char unused_; 72 | T value_; 73 | }; 74 | E error_; 75 | bool armed_; 76 | }; 77 | 78 | template <> 79 | struct Result { 80 | public: 81 | /* implicit */ Result(E e) : error_{e}, armed_{true} {} 82 | Result() : error_{E::OK}, armed_{false} {} 83 | operator bool() { return this->status() == E::OK; } 84 | E status() { 85 | armed_ = false; 86 | return error_; 87 | } 88 | ~Result() { 89 | if(armed_ == true) 90 | panic("Attempted to destroy an armed Result"); 91 | } 92 | 93 | private: 94 | E error_; 95 | bool armed_; 96 | }; 97 | -------------------------------------------------------------------------------- /lib/ebl/stdio.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | using ebl::memset; 6 | using ebl::strlen; 7 | 8 | //===----------------------------------------------------------------------===// 9 | // ssprintf and vsprintf 10 | 11 | #define SIGNED 0b1 // Is signed 12 | #define ZEROPAD 0b10 // Pad with zeros instead of spaces 13 | #define FPLUS 0b100 // Force plus sign 14 | #define SPLUS 0b1000 // Show space instead of '+' 15 | #define PREFIX 0b10000 // Prefixes 0x and 0 16 | #define SMALL 0b100000 // Upper case for hex printout 17 | #define LEFT 0b1000000 // Left justify within field width 18 | 19 | constexpr bool is_digit(char c) { return (c) >= '0' && (c) <= '9'; } 20 | 21 | // Reads a number from the format string 22 | static int parse_num(const char** c) { 23 | int num = 0; 24 | while(is_digit(**c)) num = num * 10 + *((*c)++) - '0'; 25 | return num; 26 | } 27 | 28 | // Prints a character to the buffer using the given function 29 | static inline int f(void (*g)(char, char**), char c, char** buf) { 30 | g(c, buf); 31 | return 1; 32 | } 33 | 34 | // Prints a number to the buffer using the given function 35 | static int num(void (*g)(char, char**), 36 | char** _buf, 37 | long long num, 38 | char base, 39 | uint8_t flags, 40 | int width, 41 | int percision) { 42 | char str[23]; 43 | memset(str, 0, 23); // 22 digits of uint64_t in oct + 1 EOL 44 | const char* base_str = flags & SMALL ? "0123456789abcdef" : "0123456789ABCDEF"; 45 | int written = 0; 46 | char sign = 0; 47 | int w = 0, p = percision == -1 ? 0 : percision; 48 | unsigned long long divide = (num < 0) && (flags & SIGNED) ? -num : (unsigned long long)num; 49 | // Base conversion 50 | for(int i = 0; divide > 0; i++) { 51 | str[i] = base_str[divide % base]; 52 | divide /= base; 53 | } 54 | if(num == 0) str[0] = '0'; 55 | // Prepare some values for padding 56 | if((flags & PREFIX) && (base == 16)) { 57 | w += 2; 58 | } else if((flags & PREFIX) && (base == 8)) { 59 | w++; 60 | } 61 | if(((flags & SIGNED) && (num < 0)) || ((flags & SIGNED) && (flags & FPLUS)) || 62 | ((flags & SIGNED) && (flags & SPLUS))) { 63 | w++; 64 | } 65 | w = strlen(str) + w; 66 | w = width - w > 0 ? width - w : 0; 67 | // Deal with signage 68 | sign = (num < 0) && (flags & SIGNED) ? '-' : sign; 69 | sign = (num >= 0) && (flags & FPLUS) ? '+' : sign; 70 | sign = (num >= 0) && (flags & SPLUS) ? ' ' : sign; 71 | // Deal with paddings 72 | if(!(flags & ZEROPAD) && !(flags & LEFT)) 73 | while(w-- > 0) written += f(g, ' ', _buf); 74 | // Sign and prefixes go before the zero padding and after the space padding 75 | written = sign != 0 ? written + f(g, sign, _buf) : written; 76 | if((flags & PREFIX) && (base == 16)) { 77 | written += f(g, '0', _buf); 78 | written += flags & SMALL ? f(g, 'x', _buf) : f(g, 'X', _buf); 79 | } else if((flags & PREFIX) && (base == 8)) { 80 | written += f(g, '0', _buf); 81 | } 82 | // Zero padding 83 | while(p-- > 0) { 84 | written += f(g, '0', _buf); 85 | } 86 | if(flags & ZEROPAD) 87 | while(w-- > 0) written += f(g, '0', _buf); 88 | for(int i = strlen(str); i > 0; i--) { 89 | written += f(g, str[i - 1], _buf); 90 | } 91 | if(!(flags & ZEROPAD) && (flags & LEFT)) 92 | while(w-- > 0) written += f(g, ' ', _buf); 93 | return written; 94 | } 95 | 96 | static int ssprintf(void (*g)(char, char**), char* buf, const char* fmt, va_list args) { 97 | int written = 0; 98 | uint8_t flags; 99 | int width; 100 | int percision; 101 | char length; 102 | 103 | while(*fmt != '\0') { 104 | long long val = 0; 105 | char base = 0; 106 | 107 | if(*fmt == '%') { 108 | // Collect and parse the flags 109 | flags = 0; 110 | repeat: 111 | fmt++; 112 | switch(*fmt) { 113 | case '0': 114 | flags |= ZEROPAD; 115 | goto repeat; 116 | case ' ': 117 | flags |= SPLUS; 118 | goto repeat; 119 | case '+': 120 | flags |= FPLUS; 121 | goto repeat; 122 | case '#': 123 | flags |= PREFIX; 124 | goto repeat; 125 | case '-': 126 | flags |= LEFT; 127 | goto repeat; 128 | } 129 | 130 | // Parse the width 131 | width = -1; 132 | if(*fmt == '*') { 133 | fmt++; 134 | width = va_arg(args, int); 135 | if(width < 0) { 136 | width = -width; 137 | flags |= LEFT; 138 | } 139 | } else if(is_digit(*fmt)) 140 | width = parse_num(&fmt); 141 | 142 | // Parse percision 143 | percision = -1; 144 | if(*fmt == '.') { 145 | fmt++; 146 | if(*fmt == '*') { 147 | percision = va_arg(args, int); 148 | fmt++; 149 | } else if(is_digit(*fmt)) 150 | percision = parse_num(&fmt); 151 | percision = percision < 0 ? 0 : percision; 152 | } 153 | 154 | // Parse length 155 | length = 0; 156 | if((*fmt == 'h') || (*fmt == 'l') || (*fmt == 'L')) { 157 | length = *fmt; 158 | fmt++; 159 | } 160 | 161 | // Ignore llX vs lX 162 | if((*fmt == 'l')) fmt++; 163 | 164 | // Parse the type 165 | char* str = nullptr; 166 | int* foo = nullptr; 167 | int w = 0; 168 | switch(*fmt) { 169 | case 's':; 170 | str = va_arg(args, char*); 171 | w = (width - strlen(str) > 0) ? width - strlen(str) : 0; 172 | // Deal with padding before the text 173 | if(!(flags & LEFT)) 174 | while(w-- > 0) written += f(g, ' ', &buf); 175 | for(int i = 0; str[i] != '\0'; i++) written += f(g, str[i], &buf); 176 | // Deal with padding after the text 177 | while(w-- > 0) written += f(g, ' ', &buf); 178 | break; 179 | case 'c': 180 | // Deal with padding before the character (do you see a trend?) 181 | if(!(flags & LEFT)) 182 | while(--width > 0) written += f(g, ' ', &buf); 183 | written += f(g, (uint8_t)va_arg(args, int), &buf); 184 | // I'm just a comment don't mind me 185 | while(--width > 0) written += f(g, ' ', &buf); 186 | break; 187 | case 'x': 188 | flags |= SMALL; 189 | case 'X': 190 | base = 16; 191 | case 'o': 192 | base = base == 0 ? 8 : base; 193 | case 'u': 194 | base = base == 0 ? 10 : base; 195 | if(length == 'l') 196 | val = va_arg(args, uint64_t); 197 | else if(length == 'h') 198 | val = (unsigned short)va_arg(args, uint32_t); 199 | else 200 | val = va_arg(args, uint32_t); 201 | written += num(g, &buf, val, base, flags, width, percision); 202 | break; 203 | case 'p': 204 | if(width == -1) { 205 | width = 8; 206 | flags |= ZEROPAD; 207 | } 208 | written += num( 209 | g, &buf, (unsigned long long)va_arg(args, void*), 16, flags, width, percision); 210 | break; 211 | case 'i': 212 | case 'd': 213 | flags |= SIGNED; 214 | if(length == 'l') 215 | val = va_arg(args, long long); 216 | else if(length == 'h') 217 | val = (short)va_arg(args, long); 218 | else 219 | val = va_arg(args, long); 220 | written += num(g, &buf, val, 10, flags, width, percision); 221 | break; 222 | case 'n':; 223 | // Get the pointer 224 | foo = va_arg(args, int*); 225 | // Store the amount of characters written in an integer pointed to by a pointer 226 | *foo = written; 227 | break; 228 | case '%': 229 | written += f(g, '%', &buf); 230 | break; 231 | default: 232 | return -1; 233 | } 234 | fmt++; 235 | } else { 236 | written += f(g, *fmt, &buf); 237 | fmt++; 238 | } 239 | } 240 | return written; 241 | } 242 | 243 | static void swrite_sbuf__(char c, char** buf) { 244 | **buf = c; 245 | *buf += 1; 246 | } 247 | 248 | int sprintf(char* buf, const char* fmt, ...) { 249 | va_list args; 250 | va_start(args, fmt); 251 | int ret = vsprintf(buf, fmt, args); 252 | va_end(args); 253 | return ret; 254 | } 255 | 256 | int vsprintf(char* buf, const char* fmt, va_list args) { 257 | char* modbuf = buf; 258 | int ret = ssprintf(swrite_sbuf__, modbuf, fmt, args); 259 | return ret; 260 | } 261 | 262 | //===----------------------------------------------------------------------===// 263 | // Helper functions 264 | 265 | using namespace platform; 266 | 267 | static char stream_buffer_org[2][512]; 268 | static char* stream_buffer[] = {&stream_buffer_org[0][0], &stream_buffer_org[1][0]}; 269 | 270 | static void fflush(int fd) { 271 | switch(fd) { 272 | case ebl::CLOG: 273 | *stream_buffer[1] = 0; // Null terminator 274 | #ifdef LOG_ENABLE 275 | console_log(&stream_buffer_org[1][0]); 276 | #else 277 | console_puts(&stream_buffer_org[1][0]); 278 | #endif 279 | stream_buffer[1] = &stream_buffer_org[1][0]; 280 | break; 281 | case ebl::COUT: 282 | *stream_buffer[0] = 0; // Null terminator 283 | stream_buffer[0] = &stream_buffer_org[0][0]; // Reset buffer ptr 284 | while((*stream_buffer[0]) && ((stream_buffer[0] - &stream_buffer_org[0][0]) < 512)) 285 | console_emit(*stream_buffer[0]++); 286 | #ifdef LOG_ALL_OUTPUT 287 | console_log(&stream_buffer_org[0][0]); 288 | #endif 289 | stream_buffer[0] = &stream_buffer_org[0][0]; 290 | break; 291 | } 292 | } 293 | 294 | static void stream_log(char c, char** buf) { 295 | (void)buf; 296 | *stream_buffer[1]++ = c; 297 | if(c == '\n' || (stream_buffer[1] - &stream_buffer_org[1][0]) > 512) fflush(ebl::CLOG); 298 | } 299 | 300 | static void stream_out(char c, char** buf) { 301 | (void)buf; 302 | *stream_buffer[0]++ = c; 303 | if(c == '\n' || (stream_buffer[0] - &stream_buffer_org[0][0]) > 512) fflush(ebl::COUT); 304 | } 305 | 306 | static void stream_err(char c, char** buf) { 307 | (void)buf; 308 | console_emit(c); 309 | } 310 | 311 | //===----------------------------------------------------------------------===// 312 | // printf and fprintf 313 | 314 | int printf(const char* format, ...) { 315 | va_list args; 316 | va_start(args, format); 317 | int ret = ssprintf(stream_out, 0, format, args); 318 | va_end(args); 319 | return ret; 320 | } 321 | 322 | int fprintf(char file, const char* format, ...) { 323 | va_list args; 324 | va_start(args, format); 325 | int ret = -1; 326 | switch(file) { 327 | // TODO: Implement actual real streams for these 328 | case ebl::CLOG: 329 | ret = ssprintf(stream_log, 0, format, args); 330 | break; 331 | case ebl::CERR: 332 | ret = ssprintf(stream_err, 0, format, args); 333 | break; 334 | default: 335 | ret = ssprintf(stream_out, 0, format, args); 336 | break; 337 | } 338 | va_end(args); 339 | return ret; 340 | } 341 | 342 | //===----------------------------------------------------------------------===// 343 | // C++ wrapper functions 344 | 345 | int ebl::kout(const char* format, ...) { 346 | va_list args; 347 | va_start(args, format); 348 | int ret = -1; 349 | ret = ssprintf(stream_out, 0, format, args); 350 | va_end(args); 351 | return ret; 352 | } 353 | 354 | int ebl::kerr(const char* format, ...) { 355 | va_list args; 356 | va_start(args, format); 357 | int ret = -1; 358 | ret = ssprintf(stream_err, 0, format, args); 359 | va_end(args); 360 | return ret; 361 | } 362 | 363 | int ebl::klog(const char* format, ...) { 364 | va_list args; 365 | va_start(args, format); 366 | int ret = -1; 367 | ret = ssprintf(stream_log, 0, format, args); 368 | va_end(args); 369 | return ret; 370 | } 371 | -------------------------------------------------------------------------------- /lib/ebl/stdio.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | namespace ebl { 5 | constexpr int COUT = 0; 6 | constexpr int CERR = 1; 7 | constexpr int CLOG = 2; 8 | 9 | int kout(const char* format, ...); 10 | int kerr(const char* format, ...); 11 | int klog(const char* format, ...); 12 | }; // namespace ebl 13 | 14 | namespace platform { 15 | void console_emit(char c); 16 | void console_puts(char const* c); 17 | void console_log(char const* c); 18 | } // namespace platform 19 | 20 | /** 21 | * Prints a formatted string to the screen 22 | * @param format Format string 23 | * @param ... Arguments for format 24 | * @return Length of string written 25 | */ 26 | int printf(const char* format, ...); 27 | 28 | /** 29 | * Writes a formatted string to a kernel output stream 30 | * @param file Stream ID. See #define in system header. 31 | * @param format Format string 32 | * @param ... Arguments for format 33 | * @return Length of string written 34 | */ 35 | int fprintf(char file, const char* format, ...); 36 | 37 | /** 38 | * Formats a string 39 | * @param buf The buffer to contain the formatted string 40 | * @param fmt The string containing the formats 41 | * @param args The arguments for the formats 42 | * @return The number of characters written 43 | */ 44 | int vsprintf(char* buf, const char* fmt, va_list args); 45 | 46 | /** 47 | * Formats a string 48 | * @param buf The buffer to contain the formatted string 49 | * @param fmt The string containing the formats 50 | * @param ... The arguments for the formats 51 | * @return The number of characters written 52 | */ 53 | int sprintf(char* buf, const char* fmt, ...); 54 | -------------------------------------------------------------------------------- /lib/ebl/string.cc: -------------------------------------------------------------------------------- 1 | #include "ebl/string.h" 2 | 3 | #include 4 | #include 5 | 6 | namespace ebl { 7 | 8 | int memcmp(const void* buf1, const void* buf2, size_t len) { 9 | const unsigned char* p1 = static_cast(buf1); 10 | const unsigned char* p2 = static_cast(buf2); 11 | while(len--) 12 | if(*p1 != *p2) 13 | return *p1 - *p2; 14 | else 15 | p1++, p2++; 16 | return 0; 17 | } 18 | 19 | void* memcpy(void* dest, const void* src, size_t len) { 20 | #if ARCH_TYPE == ARCH_x86_32 21 | asm volatile("rep movsb" : : "D"(dest), "S"(src), "c"(len) : "memory"); 22 | return dest; 23 | #else 24 | char* d = dest; 25 | const char* s = src; 26 | while(len--) *d++ = *s++; 27 | return dest; 28 | #endif 29 | } 30 | 31 | void* memset(void* dest, int c, size_t len) { 32 | #if ARCH_TYPE == ARCH_x86_32 33 | asm volatile("rep stosb" : : "a"(c), "D"(dest), "c"(len) : "memory"); 34 | return dest; 35 | #else 36 | unsigned char* ptr = dest; 37 | while(len-- > 0) *ptr++ = c; 38 | return dest; 39 | #endif 40 | } 41 | 42 | void* memmove(void* dest, void* src, size_t len) { 43 | char* d = static_cast(dest); 44 | char* s = static_cast(src); 45 | if(d < s) 46 | while(len--) *d++ = *s++; 47 | else { 48 | char* lasts = s + (len - 1); 49 | char* lastd = d + (len - 1); 50 | while(len--) *lastd-- = *lasts--; 51 | } 52 | return dest; 53 | } 54 | 55 | char* strcat(char* dest, const char* src) { 56 | strcpy(dest + strlen(dest), src); 57 | return dest; 58 | } 59 | 60 | char* strcpy(char* dest, const char* src) { 61 | return (char*)memcpy((void*)dest, (void*)src, strlen(src)); 62 | } 63 | 64 | char* strdup(const char* str) { 65 | if(!str) return NULL; 66 | char* ret = NULL; //(char *) malloc(strlen(str) + 1); 67 | if(!ret) return NULL; 68 | ret[strlen(str)] = '\0'; 69 | memcpy(ret, str, strlen(str)); 70 | return ret; 71 | } 72 | 73 | int strcmp(const char* str1, const char* str2) { 74 | while(*str1 && (*str1 == *str2)) str1++, str2++; 75 | return *(const unsigned char*)str1 - *(const unsigned char*)str2; 76 | } 77 | 78 | // See https://www.strchr.com/optimized_strlen_function for algorithm details 79 | size_t strlen(const char* str) { 80 | const char* ptr; 81 | const unsigned long int* longword_ptr; 82 | unsigned long int longword, himagic, lomagic; 83 | for(ptr = str; ((unsigned long int)ptr & (sizeof(longword) - 1)) != 0; ++ptr) 84 | if(*ptr == '\0') return ptr - str; 85 | longword_ptr = (unsigned long int*)ptr; 86 | himagic = 0x80808080L; 87 | lomagic = 0x01010101L; 88 | if(sizeof(longword) > 4) { 89 | himagic = ((himagic << 16) << 16) | himagic; 90 | lomagic = ((lomagic << 16) << 16) | lomagic; 91 | } 92 | while(true) { 93 | longword = *longword_ptr++; 94 | 95 | if(((longword - lomagic) & ~longword & himagic) != 0) { 96 | const char* cp = (const char*)(longword_ptr - 1); 97 | if(cp[0] == '\0') return cp - str; 98 | if(cp[1] == '\0') return cp - str + 1; 99 | if(cp[2] == '\0') return cp - str + 2; 100 | if(cp[3] == '\0') return cp - str + 3; 101 | if(sizeof(longword) > 4) { 102 | if(cp[4] == '\0') return cp - str + 4; 103 | if(cp[5] == '\0') return cp - str + 5; 104 | if(cp[6] == '\0') return cp - str + 6; 105 | if(cp[7] == '\0') return cp - str + 7; 106 | } 107 | } 108 | } 109 | } 110 | 111 | // This function is in the public domain. 112 | int strncmp(const char* s1, const char* s2, size_t n) { 113 | unsigned char u1, u2; 114 | 115 | while(n-- > 0) { 116 | u1 = (unsigned char)*s1++; 117 | u2 = (unsigned char)*s2++; 118 | if(u1 != u2) return u1 - u2; 119 | if(u1 == '\0') return 0; 120 | } 121 | return 0; 122 | } 123 | 124 | // This function is in the public domain. 125 | char* strstr(const char* s1, const char* s2) { 126 | const char* p = s1; 127 | const size_t len = strlen(s2); 128 | for(; (p = strchr(p, *s2)) != 0; p++) { 129 | if(strncmp(p, s2, len) == 0) return (char*)p; 130 | } 131 | return (0); 132 | } 133 | 134 | // This function is in the public domain. 135 | char* strchr(const char* s, int c) { 136 | while(*s != (char)c) 137 | if(!*s++) return 0; 138 | return (char*)s; 139 | } 140 | 141 | } // namespace ebl 142 | 143 | extern "C" void* memset(void* dest, int c, size_t len) { return ebl::memset(dest, c, len); } 144 | -------------------------------------------------------------------------------- /lib/ebl/string.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace ebl { 6 | 7 | // As defined in the: 8 | // The Open Group Base Specifications Issue 6 9 | // IEEE Std 1003.1, 2004 Edition 10 | // Copyright © 2001-2004 The IEEE and The Open Group, All Rights reserved. 11 | 12 | int strncmp(const char *, const char *, size_t); 13 | int memcmp(const void *, const void *, size_t); 14 | void *memcpy(void *, const void *, size_t); 15 | void *memset(void *, int, size_t); 16 | void *memmove(void *, void *, size_t); 17 | char *strcat(char *, const char *); 18 | char *strchr(const char *, int); 19 | char *strcpy(char *, const char *); 20 | char *strdup(const char *); 21 | int strcmp(const char *, const char *); 22 | size_t strlen(const char *); 23 | char *strstr(const char *, const char *); 24 | 25 | } // namespace ebl 26 | -------------------------------------------------------------------------------- /lib/ebl/thread_safety.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // This header file provides a set of macros for annotating C++ source code 4 | // ref: https://clang.llvm.org/docs/ThreadSafetyAnalysis.htm 5 | 6 | // Enable thread safety attributes only with clang. 7 | // The attributes can be safely erased when compiling with other compilers. 8 | #if defined(__clang__) && (!defined(SWIG)) 9 | #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) 10 | #else 11 | #define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op 12 | #endif 13 | 14 | #define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) 15 | 16 | #define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) 17 | 18 | #define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) 19 | 20 | #define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) 21 | 22 | #define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) 23 | 24 | #define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) 25 | 26 | #define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) 27 | 28 | #define REQUIRES_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) 29 | 30 | #define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) 31 | 32 | #define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) 33 | 34 | #define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) 35 | 36 | #define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) 37 | 38 | #define RELEASE_GENERIC(...) THREAD_ANNOTATION_ATTRIBUTE__(release_generic_capability(__VA_ARGS__)) 39 | 40 | #define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) 41 | 42 | #define TRY_ACQUIRE_SHARED(...) \ 43 | THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) 44 | 45 | #define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) 46 | 47 | #define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) 48 | 49 | #define ASSERT_SHARED_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) 50 | 51 | #define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) 52 | 53 | #define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) 54 | -------------------------------------------------------------------------------- /lib/ebl/type_traits.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // A lot of the functions are re-implementations from libcxx 4 | // ref: https://github.com/llvm-mirror/libcxx 5 | 6 | namespace ebl { 7 | 8 | namespace internal { 9 | template 10 | struct integral_constant { 11 | static constexpr const T value = v; 12 | }; 13 | template 14 | using bool_constant = integral_constant; 15 | template 16 | T&& __declval(int) {} 17 | template 18 | T __declval(long) {} 19 | } // namespace internal 20 | 21 | typedef internal::bool_constant true_type; 22 | typedef internal::bool_constant false_type; 23 | template 24 | using void_t = void; 25 | 26 | template 27 | struct is_same : public false_type {}; 28 | template 29 | struct is_same : public true_type {}; 30 | template 31 | inline constexpr bool is_same_v = is_same::value; 32 | 33 | template 34 | decltype(internal::__declval(0)) declval() noexcept {} 35 | 36 | template 37 | struct is_constructible_ : false_type {}; 38 | 39 | template 40 | struct is_constructible_()...))>, T, Args...> : true_type {}; 41 | 42 | template 43 | using is_constructible = is_constructible_, T, Args...>; 44 | 45 | template 46 | inline constexpr bool is_constructible_v = is_constructible::value; 47 | 48 | template 49 | struct enable_if {}; 50 | template 51 | struct enable_if { 52 | typedef T type; 53 | }; 54 | 55 | template 56 | M get_member_type(M T::*); 57 | 58 | template 59 | struct always_false : ebl::false_type {}; 60 | 61 | // conditional 62 | template 63 | struct conditional { 64 | typedef T type; 65 | }; 66 | template 67 | struct conditional { 68 | typedef F type; 69 | }; 70 | template 71 | using conditional_t = typename conditional::type; 72 | 73 | // is_base_of 74 | template 75 | struct is_base_of : public internal::integral_constant {}; 76 | template 77 | inline constexpr bool is_base_of_v = is_base_of::value; 78 | 79 | // is_instance 80 | template class> 81 | struct is_instance : public false_type {}; 82 | template class U> 83 | struct is_instance, U> : public true_type {}; 84 | template class U> 85 | inline constexpr bool is_instance_v = is_instance::value; 86 | 87 | // is_class 88 | template 89 | struct is_class : public internal::integral_constant {}; 90 | template 91 | inline constexpr bool is_class_v = is_class::value; 92 | 93 | // has_virtual_destructor 94 | template 95 | struct has_virtual_destructor 96 | : public internal::integral_constant {}; 97 | template 98 | inline constexpr bool has_virtual_destructor_v = has_virtual_destructor::value; 99 | 100 | // is_integral 101 | template 102 | struct is_integral : public false_type {}; 103 | template <> 104 | struct is_integral : public true_type {}; 105 | template <> 106 | struct is_integral : public true_type {}; 107 | template <> 108 | struct is_integral : public true_type {}; 109 | template <> 110 | struct is_integral : public true_type {}; 111 | template <> 112 | struct is_integral : public true_type {}; 113 | template <> 114 | struct is_integral : public true_type {}; 115 | template <> 116 | struct is_integral : public true_type {}; 117 | template <> 118 | struct is_integral : public true_type {}; 119 | template <> 120 | struct is_integral : public true_type {}; 121 | template <> 122 | struct is_integral : public true_type {}; 123 | template <> 124 | struct is_integral : public true_type {}; 125 | template <> 126 | struct is_integral : public true_type {}; 127 | template <> 128 | struct is_integral : public true_type {}; 129 | template 130 | inline constexpr bool is_integral_v = is_integral::value; 131 | 132 | /*===----------------------------------------------------------------------===*/ 133 | // ref: ??? (I forgot where I got this from) 134 | 135 | template 136 | struct remove_reference { 137 | typedef T type; 138 | }; 139 | template 140 | struct remove_reference { 141 | typedef T type; 142 | }; 143 | template 144 | struct remove_reference { 145 | typedef T type; 146 | }; 147 | 148 | /*===----------------------------------------------------------------------===*/ 149 | // ref: https://en.cppreference.com/w/cpp/experimental/is_detected 150 | 151 | namespace internal { 152 | template class Op, class... Args> 153 | struct detector { 154 | using value_t = false_type; 155 | using type = Default; 156 | }; 157 | 158 | template class Op, class... Args> 159 | struct detector>, Op, Args...> { 160 | using value_t = true_type; 161 | using type = Op; 162 | }; 163 | } // namespace internal 164 | 165 | struct nonesuch { 166 | ~nonesuch() = delete; 167 | nonesuch(nonesuch const&) = delete; 168 | void operator=(nonesuch const&) = delete; 169 | }; 170 | 171 | template