├── .gitignore ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── Xargo.toml ├── src ├── acpi │ ├── madt.rs │ ├── mod.rs │ ├── rsdp.rs │ ├── rsdt.rs │ ├── sdt.rs │ └── xsdt.rs ├── arch │ ├── mod.rs │ └── x86_64 │ │ ├── asm │ │ ├── boot.asm │ │ ├── grub.cfg │ │ ├── linker.ld │ │ ├── long_mode_init.asm │ │ └── multiboot_header.asm │ │ ├── init.rs │ │ ├── interrupts │ │ ├── exceptions.rs │ │ ├── gdt.rs │ │ ├── irq.rs │ │ ├── mod.rs │ │ ├── syscall │ │ │ ├── mod.rs │ │ │ └── x86_64.rs │ │ └── utils.rs │ │ ├── memory │ │ ├── area_frame_allocator.rs │ │ ├── heap_allocator.rs │ │ ├── mod.rs │ │ ├── paging │ │ │ ├── entry.rs │ │ │ ├── mapper.rs │ │ │ ├── mod.rs │ │ │ ├── table.rs │ │ │ └── temporary_page.rs │ │ └── stack_allocator.rs │ │ └── mod.rs ├── device │ ├── ahci │ │ ├── fis.rs │ │ ├── hba.rs │ │ └── mod.rs │ ├── apic.rs │ ├── io │ │ ├── cpuio.rs │ │ ├── dma.rs │ │ ├── mmio.rs │ │ └── mod.rs │ ├── keyboard │ │ ├── keyboard.rs │ │ ├── layout │ │ │ ├── mod.rs │ │ │ ├── uk_std.rs │ │ │ └── us_std.rs │ │ ├── mod.rs │ │ └── ps2_keyboard.rs │ ├── mod.rs │ ├── pci.rs │ ├── pic.rs │ ├── pit.rs │ ├── ps2_8042.rs │ ├── serial.rs │ └── vga │ │ ├── buffer.rs │ │ ├── mod.rs │ │ └── vga.rs ├── lib.rs ├── macros.rs ├── runtime_glue.rs ├── syscall │ ├── mod.rs │ └── process.rs └── task │ ├── context.rs │ ├── coop_sched.rs │ ├── mod.rs │ ├── proc_list.rs │ └── process.rs └── x86_64-lambda.json /.gitignore: -------------------------------------------------------------------------------- 1 | /.gdbinit 2 | build 3 | target 4 | Cargo.lock 5 | ./*.*.swp 6 | ./*/*/*.*.swp 7 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["William Huddie "] 3 | description = "A simple operating system written in Rust" 4 | license = "GPL-3.0" 5 | name = "lambda_os" 6 | version = "0.1.0" 7 | 8 | [dependencies] 9 | bit_field = "0.7.0" 10 | bitflags = "1.0.0" 11 | linked_list_allocator = "0.5.0" 12 | multiboot2 = "0.5.0" 13 | once = "0.3.3" 14 | raw-cpuid = "*" 15 | rlibc = "1.0" 16 | spin = "0.4.5" 17 | volatile = "0.1.0" 18 | x86_64 = "0.1.2" 19 | heapless = "0.2.4" 20 | 21 | [dependencies.lazy_static] 22 | features = ["spin_no_std"] 23 | version = "0.2.4" 24 | 25 | [features] 26 | default = ["uk"] 27 | uk = [] 28 | us = [] 29 | 30 | [lib] 31 | crate-type = ["staticlib"] 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | QEMU = qemu 2 | GRUB = grub 3 | NASM = nasm 4 | LD = ld 5 | 6 | arch ?= x86_64 7 | kernel := build/lambda-$(arch).bin 8 | iso := build/os-$(arch).iso 9 | target ?= $(arch)-lambda 10 | rust_os := target/$(target)/debug/liblambda_os.a 11 | 12 | linker_script := src/arch/$(arch)/asm/linker.ld 13 | grub_cfg := src/arch/$(arch)/asm/grub.cfg 14 | assembly_source_files := $(wildcard src/arch/$(arch)/asm/*.asm) 15 | assembly_object_files := $(patsubst src/arch/$(arch)/asm/%.asm, \ 16 | build/arch/$(arch)/%.o, $(assembly_source_files)) 17 | 18 | CARGOFLAGS := 19 | 20 | ifdef FEATURES 21 | CARGOFLAGS += --no-default-features --features $(FEATURES) 22 | endif 23 | 24 | .PHONY: all clean run iso kernel 25 | 26 | all: $(kernel) 27 | 28 | clean: 29 | @rm -r build 30 | @cargo clean 31 | 32 | run: $(iso) 33 | @$(QEMU)-system-x86_64 -cdrom $(iso) -m 4G -serial stdio 34 | 35 | iso: $(iso) 36 | 37 | $(iso): $(kernel) $(grub_cfg) 38 | @mkdir -p build/isofiles/boot/grub 39 | @cp $(kernel) build/isofiles/boot/kernel.bin 40 | @cp $(grub_cfg) build/isofiles/boot/grub 41 | @$(GRUB)-mkrescue -o $(iso) build/isofiles 2> /dev/null 42 | @rm -r build/isofiles 43 | 44 | $(kernel): kernel $(rust_os) $(assembly_object_files) $(linker_script) 45 | @$(LD) -n --gc-sections -T $(linker_script) -o $(kernel) \ 46 | $(assembly_object_files) $(rust_os) 47 | 48 | kernel: 49 | @RUST_TARGET_PATH="$(shell pwd)" xargo build --target $(target) $(CARGOFLAGS) 50 | 51 | # compile assembly files 52 | build/arch/$(arch)/%.o: src/arch/$(arch)/asm/%.asm 53 | @mkdir -p $(shell dirname $@) 54 | @$(NASM) -felf64 $< -o $@ 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # lambdaOS 2 | An OS written in Rust and Assembly. It currently only supports the amd64 architecture. 3 | ## Features 4 | **Completed** 5 | - VGA driver. 6 | - Paging. 7 | - Keyboard input / PS/2 driver. 8 | - Basic support for PCI devices. 9 | - Basic pre-emptive scheduling. 10 | 11 | ## Building 12 | ```bash 13 | # Install Rust - follow on-screen instructions. Note - you may have to reload your shell to be able to use Rust 14 | # commands. 15 | curl https://sh.rustup.rs -sSf | sh 16 | # Clone repo. 17 | git clone https://github.com/too-r/lambdaOS.git && cd ~/lambdaOS #Or wherever you put it. 18 | # We need to be using the nightly toolchain. 19 | rustup override set nightly 20 | # Install rust-src and xargo for cross-compilation. 21 | rustup component add rust-src && cargo install xargo 22 | # Install dependencies from package manager. 23 | sudo pacman -S make qemu xorriso grub nasm mtools 24 | # Build and run lambdaOS 25 | make run 26 | ``` 27 | -------------------------------------------------------------------------------- /Xargo.toml: -------------------------------------------------------------------------------- 1 | [target.x86_64-lambda.dependencies] 2 | alloc = {} 3 | -------------------------------------------------------------------------------- /src/acpi/madt.rs: -------------------------------------------------------------------------------- 1 | use acpi::sdt::SdtHeader; 2 | use arch::memory::paging::ActivePageTable; 3 | use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; 4 | use core::mem; 5 | use spin::Mutex; 6 | use heapless::Vec as StaticVec; 7 | use alloc::Vec; 8 | use device::{apic, pic}; 9 | use raw_cpuid::CpuId; 10 | 11 | static CPUS: AtomicUsize = ATOMIC_USIZE_INIT; 12 | 13 | #[derive(Debug, Clone, Copy)] 14 | pub struct Madt { 15 | pub sdt: &'static SdtHeader, 16 | /// Address of LAPIC. 17 | pub address: u32, 18 | /// Flags - 1 indicates that dual legacy PICs are installed. 19 | pub flags: u32, 20 | } 21 | 22 | impl Madt { 23 | /// Initialise all the MADT entries. 24 | pub fn init(&mut self, active_table: &mut ActivePageTable) { 25 | let mut local_apics: StaticVec<&'static LapicEntry, [&'static LapicEntry; 20]> 26 | = StaticVec::new(); 27 | let mut nmis: StaticVec<&'static ApicNMI, [&'static ApicNMI; 10]> = StaticVec::new(); 28 | let mut io_apics: StaticVec<&'static IoApic, [&'static IoApic; 10]> = StaticVec::new(); 29 | let mut isos: StaticVec<&'static InterruptSourceOverride, 30 | [&'static InterruptSourceOverride; 10]> 31 | = StaticVec::new(); 32 | 33 | let mut apic_manager = apic::ApicManager::new(); 34 | 35 | for entry in self.iter() { 36 | match entry { 37 | MadtEntry::Lapic(local_apic) => { 38 | use x86_64::registers::msr::{rdmsr, IA32_APIC_BASE}; 39 | 40 | // Check if this local APIC corresponds to an active application processor. 41 | if local_apic.flags & 1 == 1 { 42 | println!( 43 | "[ dev ] Found local APIC, id: {}, processor id: {}", 44 | local_apic.id, local_apic.processor_id 45 | ); 46 | if rdmsr(IA32_APIC_BASE) & (1 << 8) == local_apic.id as u64 { 47 | println!("[ dev ] Found the BSP local APIC, id: {}", local_apic.id); 48 | } else { 49 | CPUS.fetch_add(1, Ordering::SeqCst); 50 | } 51 | } 52 | else { 53 | println!("Found disabled core, id: {}", local_apic.id); 54 | } 55 | 56 | local_apics.push(local_apic).expect("Failed to push element to static vector"); 57 | } 58 | 59 | MadtEntry::IoApic(io_apic) => { 60 | println!( 61 | "[ dev ] Found I/O APIC, id: {}, register base: {:#x}, gsib: {}", 62 | io_apic.id, io_apic.address, io_apic.gsib 63 | ); 64 | io_apics.push(io_apic).expect("Failed to push element to static vector"); 65 | } 66 | 67 | MadtEntry::Iso(iso) => { 68 | println!( 69 | "[ dev ] Found interrupt source override,\n overrides IRQ {},\n gsi: {}", 70 | iso.irq_source, iso.gsi 71 | ); 72 | isos.push(iso).expect("Failed to push element to static vector"); 73 | } 74 | 75 | MadtEntry::Nmi(nmi) => { 76 | println!("[ dev ] APIC NMI with flags: {}, LINT: {}", 77 | nmi.flags, 78 | nmi.lint_no); 79 | nmis.push(nmi).expect("Failed to push element to static vector."); 80 | } 81 | 82 | _ => { 83 | println!("[ acpi ] No more MADT entries..."); 84 | return; 85 | } 86 | } 87 | } 88 | 89 | apic_manager.lapic_base = self.address; 90 | 91 | apic_manager.local_apics = local_apics; 92 | apic_manager.io_apics = io_apics; 93 | apic_manager.nmis = nmis; 94 | apic_manager.isos = isos; 95 | 96 | 97 | *apic::APIC_MANAGER.lock() = Some(apic_manager); 98 | 99 | unsafe { pic::PICS.lock().init() }; 100 | 101 | if CpuId::new().get_feature_info().unwrap().has_apic() { 102 | apic::init(active_table); 103 | } 104 | 105 | println!("[ smp ] Found {} APs", CPUS.load(Ordering::SeqCst)); 106 | } 107 | 108 | pub fn new(sdt: &'static SdtHeader) -> Self { 109 | let local_address = unsafe { *(sdt.data_address() as *const u32) }; 110 | let flags = unsafe { *(sdt.data_address() as *const u32).offset(1) }; 111 | 112 | Madt { 113 | sdt: sdt, 114 | address: local_address, 115 | flags: flags, 116 | } 117 | } 118 | 119 | fn iter(&self) -> MadtIter { 120 | MadtIter { 121 | sdt: self.sdt, 122 | i: 8, /* Skip laddr and flags */ 123 | } 124 | } 125 | } 126 | 127 | /// The Local APIC. 128 | #[repr(packed)] 129 | pub struct LapicEntry { 130 | /// The ID of the parent AP. 131 | pub processor_id: u8, 132 | /// The ID of this APIC. 133 | pub id: u8, 134 | /// Flags - 1 means that the AP is enabled. 135 | pub flags: u32, 136 | } 137 | 138 | #[repr(packed)] 139 | pub struct IoApic { 140 | /// The ID of this I/O APIC. 141 | pub id: u8, 142 | _resv: u8, 143 | /// Address of this I/O APIC. 144 | pub address: u32, 145 | /// The first interrupt number this APIC handles. 146 | pub gsib: u32, 147 | } 148 | 149 | /// Mapping of IRQ source to interrupt. 150 | #[repr(packed)] 151 | pub struct InterruptSourceOverride { 152 | pub bus_source: u8, 153 | pub irq_source: u8, 154 | pub gsi: u32, 155 | pub flags: u16, 156 | } 157 | 158 | /// Non-maskable interrupts. 159 | #[repr(packed)] 160 | pub struct ApicNMI { 161 | pub processor_id: u8, 162 | pub flags: u16, 163 | pub lint_no: u8, 164 | } 165 | 166 | pub enum MadtEntry { 167 | Lapic(&'static LapicEntry), 168 | InvalidLapic(usize), 169 | IoApic(&'static IoApic), 170 | InvalidIoApic(usize), 171 | Iso(&'static InterruptSourceOverride), 172 | InvalidIso(usize), 173 | Nmi(&'static ApicNMI), 174 | InvalidNmi(usize), 175 | Unknown(u8), 176 | } 177 | 178 | struct MadtIter { 179 | sdt: &'static SdtHeader, 180 | i: usize, 181 | } 182 | 183 | impl Iterator for MadtIter { 184 | type Item = MadtEntry; 185 | fn next(&mut self) -> Option { 186 | if self.i + 1 < self.sdt.data_len() { 187 | let ty = unsafe { *(self.sdt.data_address() as *const u8).offset(self.i as isize) }; 188 | let len = unsafe { *(self.sdt.data_address() as *const u8).offset(self.i as isize + 1) } 189 | as usize; 190 | 191 | if self.i + len <= self.sdt.data_len() { 192 | let item = match ty { 193 | 0 => if len == mem::size_of::() + 2 { 194 | MadtEntry::Lapic(unsafe { 195 | &*((self.sdt.data_address() + self.i + 2) as *const LapicEntry) 196 | }) 197 | } else { 198 | MadtEntry::InvalidLapic(len) 199 | }, 200 | 1 => if len == mem::size_of::() + 2 { 201 | MadtEntry::IoApic(unsafe { 202 | &*((self.sdt.data_address() + self.i + 2) as *const IoApic) 203 | }) 204 | } else { 205 | MadtEntry::InvalidIoApic(len) 206 | }, 207 | 2 => if len == mem::size_of::() + 2 { 208 | MadtEntry::Iso(unsafe { 209 | &*((self.sdt.data_address() + self.i + 2) 210 | as *const InterruptSourceOverride) 211 | }) 212 | } else { 213 | MadtEntry::InvalidIso(len) 214 | }, 215 | 4 => if len == mem::size_of::() + 2 { 216 | MadtEntry::Nmi(unsafe { 217 | &*((self.sdt.data_address() + self.i + 2) 218 | as *const ApicNMI) 219 | }) 220 | } else { 221 | MadtEntry::InvalidNmi(len) 222 | }, 223 | _ => MadtEntry::Unknown(ty), 224 | }; 225 | 226 | println!("[ acpi ] MADT entry at address: {:#x}", self.sdt.data_address() + self.i + 2); 227 | 228 | self.i += len; 229 | 230 | Some(item) 231 | } else { 232 | None 233 | } 234 | } else { 235 | None 236 | } 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /src/acpi/mod.rs: -------------------------------------------------------------------------------- 1 | use arch::memory::paging::{ActivePageTable, Page, PhysicalAddress, VirtualAddress}; 2 | use arch::memory::Frame; 3 | use arch::memory::paging::entry::EntryFlags; 4 | use core::mem; 5 | 6 | pub mod rsdp; 7 | pub mod sdt; 8 | pub mod rsdt; 9 | pub mod xsdt; 10 | pub mod madt; 11 | 12 | /// Retrieve an SDT from a pointer found using the RSDP 13 | fn get_sdt(address: usize, active_table: &mut ActivePageTable) -> &'static sdt::SdtHeader { 14 | { 15 | let page = Page::containing_address(VirtualAddress::new(address)); 16 | if active_table.translate_page(page).is_none() { 17 | let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get())); 18 | let result = 19 | active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE); 20 | result.flush(active_table); 21 | } 22 | } 23 | 24 | let sdt = unsafe { &*(address as *const sdt::SdtHeader) }; 25 | 26 | { 27 | // Map next page, and all pages within the range occupied by the data table. 28 | let start_page = Page::containing_address(VirtualAddress::new(address + 4096)); 29 | let end_page = Page::containing_address(VirtualAddress::new(address + sdt.length as usize)); 30 | for page in Page::range_inclusive(start_page, end_page) { 31 | // Check if this page has already been mapped to a frame. 32 | if active_table.translate_page(page).is_none() { 33 | let frame = 34 | Frame::containing_address(PhysicalAddress::new(page.start_address().get())); 35 | let result = 36 | active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE); 37 | result.flush(active_table); 38 | } 39 | } 40 | } 41 | 42 | sdt 43 | } 44 | 45 | pub unsafe fn init(active_table: &mut ActivePageTable) { 46 | let rsdp = rsdp::RsdpDescriptor::init(active_table).expect("Could not find rsdp, aborting ..."); 47 | let sdt = get_sdt(rsdp.sdt(), active_table); 48 | let rsdt = rsdt::Rsdt::new(sdt); 49 | 50 | println!( 51 | "[ apci ] Found RSDT at address {:#x}", 52 | rsdt.sdt as *const sdt::SdtHeader as usize 53 | ); 54 | 55 | println!( 56 | "[ acpi ] RSDT length {}, data length {}", 57 | rsdt.sdt.length, 58 | rsdt.sdt.length as usize - mem::size_of::() 59 | ); 60 | 61 | println!( 62 | "[ acpi ] RSDT points to {} tables", 63 | rsdt.other_entries.len() 64 | ); 65 | 66 | // let mut madt: madt::Madt = unsafe { *(&*(0 as *const madt::Madt)) }; 67 | match rsdt.find_sdt(b"APIC") { 68 | Some(rsdt::TableType::Madt(mut m)) => { 69 | println!( 70 | "[ apci ] Found MADT at address {:#x}", 71 | m.sdt as *const sdt::SdtHeader as usize 72 | ); 73 | 74 | m.init(active_table); 75 | } 76 | _ => println!("Could not find MADT."), 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/acpi/rsdp.rs: -------------------------------------------------------------------------------- 1 | use arch::memory::Frame; 2 | use arch::memory::paging::{Page, PhysicalAddress, VirtualAddress}; 3 | use arch::memory::paging::ActivePageTable; 4 | use arch::memory::paging::entry::EntryFlags; 5 | 6 | #[derive(Copy, Clone, Debug)] 7 | #[repr(packed)] 8 | pub struct RsdpDescriptor { 9 | signature: [u8; 8], 10 | checksum: u8, 11 | oem_id: [u8; 6], 12 | pub revision: u8, 13 | rsdt_address: u32, 14 | length: u32, 15 | xsdt_address: u64, 16 | extended_checksum: u8, 17 | reserved: [u8; 3], 18 | } 19 | 20 | impl RsdpDescriptor { 21 | /// Map RSDP address space, search for RSDP. 22 | pub fn init(active_table: &mut ActivePageTable) -> Option { 23 | // TODO: Search in EBDA as well. 24 | 25 | let rsdp_start: usize = 0xe0000; 26 | let rsdp_end: usize = 0xf_ffff; 27 | 28 | // Map address space. 29 | { 30 | let start_frame = Frame::containing_address(PhysicalAddress::new(rsdp_start)); 31 | let end_frame = Frame::containing_address(PhysicalAddress::new(rsdp_end)); 32 | 33 | for frame in Frame::range_inclusive(start_frame, end_frame) { 34 | let page = 35 | Page::containing_address(VirtualAddress::new(frame.start_address().get())); 36 | let res = 37 | active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE); 38 | 39 | res.flush(active_table); 40 | } 41 | } 42 | 43 | RsdpDescriptor::search(rsdp_start, rsdp_end) 44 | } 45 | 46 | /// Find and parse the RSDP. 47 | fn search(start_addr: usize, end_addr: usize) -> Option { 48 | for i in 0..(end_addr + 1 - start_addr) / 16 { 49 | let rsdp = unsafe { &*((start_addr + i * 16) as *const RsdpDescriptor) }; 50 | if &rsdp.signature == b"RSD PTR " { 51 | println!( 52 | "[ acpi ] Found RSDP at {:#x}", 53 | rsdp as *const RsdpDescriptor as usize 54 | ); 55 | return Some(*rsdp); 56 | } 57 | } 58 | 59 | None 60 | } 61 | 62 | /// Dependent on ACPI version, return the address of the XSDT/RSDT. 63 | pub fn sdt(&self) -> usize { 64 | if self.revision >= 2 { 65 | self.xsdt_address as usize 66 | } else { 67 | self.rsdt_address as usize 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/acpi/rsdt.rs: -------------------------------------------------------------------------------- 1 | use super::sdt::SdtHeader; 2 | use core::slice; 3 | 4 | use super::madt::Madt; 5 | 6 | #[derive(Debug)] 7 | pub struct Rsdt<'a> { 8 | pub sdt: &'static SdtHeader, 9 | pub other_entries: &'a [u32], 10 | } 11 | 12 | impl<'a> Rsdt<'a> { 13 | pub fn new(sdt: &'static SdtHeader) -> Self { 14 | match &sdt.signature { 15 | b"RSDT" => { 16 | let array = Rsdt::data(sdt); 17 | 18 | Rsdt { 19 | sdt: sdt, 20 | other_entries: array, 21 | } 22 | } 23 | _ => panic!("Non-matching signature, aborting ..."), 24 | } 25 | } 26 | 27 | /// Retrieve a pointed-to table using a byte signature. 28 | pub fn find_sdt(&self, signature: &[u8]) -> Option { 29 | // Iterate over all the pointers to other tables. 30 | for i in self.other_entries.iter() { 31 | let sdt = *i as *const SdtHeader; 32 | let sdt = unsafe { &*sdt }; 33 | 34 | let sig: &[u8] = &sdt.signature; 35 | 36 | if sig != signature { 37 | continue; 38 | } else { 39 | match signature { 40 | // TODO: Support more tables. 41 | b"APIC" => return Some(TableType::Madt(Madt::new(sdt))), 42 | _ => return None, 43 | } 44 | } 45 | } 46 | 47 | None 48 | } 49 | 50 | /// Return RSDT data. 51 | pub fn data(sdt: &'static SdtHeader) -> &[u32] { 52 | // len - sizeof(header) / 4. 53 | unsafe { slice::from_raw_parts(sdt.data_address() as *const u32, sdt.data_len() / 4) } 54 | } 55 | } 56 | 57 | pub enum TableType { 58 | Madt(Madt), 59 | Facp, 60 | Hpet, 61 | } 62 | -------------------------------------------------------------------------------- /src/acpi/sdt.rs: -------------------------------------------------------------------------------- 1 | use core::mem; 2 | use core::slice; 3 | 4 | #[derive(Copy, Clone, Debug)] 5 | #[repr(packed)] 6 | /// A standard ACPI descriptor table header. 7 | pub struct SdtHeader { 8 | /// String signature that describes what this table is. 9 | pub signature: [u8; 4], 10 | /// The length of the table. 11 | pub length: u32, 12 | /// ACPI revision that this table is using. 13 | pub revision: u8, 14 | /// Checksum that determines whether the table is valid. 15 | pub checksum: u8, 16 | /// OEM-specific string. 17 | pub oem_id: [u8; 6], 18 | pub oem_table_id: [u8; 8], 19 | pub oem_revision: u32, 20 | pub creator_id: u32, 21 | pub creator_rev: u32, 22 | } 23 | 24 | impl SdtHeader { 25 | /// Return the starting address of this table's data. 26 | pub fn data_address(&self) -> usize { 27 | self as *const _ as usize + mem::size_of::() 28 | } 29 | 30 | /// Return the length of this table's data. 31 | pub fn data_len(&self) -> usize { 32 | let total_size = self.length as usize; 33 | let header_size = mem::size_of::(); 34 | 35 | // Check if the length is bigger than the header itself. If it is, other data exists. 36 | if total_size >= header_size { 37 | return total_size - header_size; 38 | } else { 39 | 0 // No extra data. 40 | } 41 | } 42 | 43 | /// Return a slice of this table's data. 44 | pub unsafe fn data(&self) -> &[u8] { 45 | slice::from_raw_parts(self.data_address() as *const u8, self.data_len()) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/acpi/xsdt.rs: -------------------------------------------------------------------------------- 1 | use super::sdt::SdtHeader; 2 | 3 | #[derive(Debug)] 4 | pub struct Xsdt(&'static SdtHeader); 5 | 6 | impl Xsdt { 7 | pub fn new(sdt: &'static SdtHeader) -> Option { 8 | match &sdt.signature { 9 | b"XSDT" => Some(Xsdt(sdt)), 10 | _ => None, 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/arch/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(target_arch = "x86_64")] 2 | pub mod x86_64; 3 | 4 | #[cfg(target_arch = "x86_64")] 5 | pub use self::x86_64::*; 6 | -------------------------------------------------------------------------------- /src/arch/x86_64/asm/boot.asm: -------------------------------------------------------------------------------- 1 | global start 2 | extern long_mode_start 3 | 4 | section .text 5 | bits 32 6 | start: 7 | mov esp, stack_top 8 | ; Move Multiboot info pointer to edi to pass it to the kernel. We must not 9 | ; modify the `edi` register until the kernel it called. 10 | mov edi, ebx 11 | 12 | call check_multiboot 13 | call check_cpuid 14 | call check_long_mode 15 | 16 | call set_up_page_tables 17 | call enable_paging 18 | call set_up_SSE 19 | 20 | ; load the 64-bit GDT 21 | lgdt [gdt64.pointer] 22 | 23 | jmp gdt64.code:long_mode_start 24 | 25 | set_up_page_tables: 26 | ; recursively map P4 27 | mov eax, p4_table 28 | or eax, 0b11 ; present + writable 29 | mov [p4_table + 511 * 8], eax 30 | 31 | ; map first P4 entry to P3 table 32 | mov eax, p3_table 33 | or eax, 0b11 ; present + writable 34 | mov [p4_table], eax 35 | 36 | ; map first P3 entry to P2 table 37 | mov eax, p2_table 38 | or eax, 0b11 ; present + writable 39 | mov [p3_table], eax 40 | 41 | ; map each P2 entry to a huge 2MiB page 42 | mov ecx, 0 ; counter variable 43 | .map_p2_table: 44 | ; map ecx-th P2 entry to a huge page that starts at address (2MiB * ecx) 45 | mov eax, 0x200000 ; 2MiB 46 | mul ecx ; start address of ecx-th page 47 | or eax, 0b10000011 ; present + writable + huge 48 | mov [p2_table + ecx * 8], eax ; map ecx-th entry 49 | 50 | inc ecx ; increase counter 51 | cmp ecx, 512 ; if counter == 512, the whole P2 table is mapped 52 | jne .map_p2_table ; else map the next entry 53 | 54 | ret 55 | 56 | enable_paging: 57 | ; load P4 to cr3 register (cpu uses this to access the P4 table) 58 | mov eax, p4_table 59 | mov cr3, eax 60 | 61 | ; enable PAE-flag in cr4 (Physical Address Extension) 62 | mov eax, cr4 63 | or eax, 1 << 5 64 | mov cr4, eax 65 | 66 | ; set the long mode bit in the EFER MSR (model specific register) 67 | mov ecx, 0xC0000080 68 | rdmsr 69 | or eax, 1 << 8 70 | wrmsr 71 | 72 | ; enable paging in the cr0 register 73 | mov eax, cr0 74 | or eax, 1 << 31 75 | mov cr0, eax 76 | 77 | ret 78 | 79 | ; Prints `ERR: ` and the given error code to screen and hangs. 80 | ; parameter: error code (in ascii) in al 81 | error: 82 | mov dword [0xb8000], 0x4f524f45 83 | mov dword [0xb8004], 0x4f3a4f52 84 | mov dword [0xb8008], 0x4f204f20 85 | mov byte [0xb800a], al 86 | hlt 87 | 88 | ; Throw error 0 if eax doesn't contain the Multiboot 2 magic value (0x36d76289). 89 | check_multiboot: 90 | cmp eax, 0x36d76289 91 | jne .no_multiboot 92 | ret 93 | .no_multiboot: 94 | mov al, "0" 95 | jmp error 96 | 97 | ; Throw error 1 if the CPU doesn't support the CPUID command. 98 | check_cpuid: 99 | ; Check if CPUID is supported by attempting to flip the ID bit (bit 21) in 100 | ; the FLAGS register. If we can flip it, CPUID is available. 101 | 102 | ; Copy FLAGS in to EAX via stack 103 | pushfd 104 | pop eax 105 | 106 | ; Copy to ECX as well for comparing later on 107 | mov ecx, eax 108 | 109 | ; Flip the ID bit 110 | xor eax, 1 << 21 111 | 112 | ; Copy EAX to FLAGS via the stack 113 | push eax 114 | popfd 115 | 116 | ; Copy FLAGS back to EAX (with the flipped bit if CPUID is supported) 117 | pushfd 118 | pop eax 119 | 120 | ; Restore FLAGS from the old version stored in ECX (i.e. flipping the ID bit 121 | ; back if it was ever flipped). 122 | push ecx 123 | popfd 124 | 125 | ; Compare EAX and ECX. If they are equal then that means the bit wasn't 126 | ; flipped, and CPUID isn't supported. 127 | cmp eax, ecx 128 | je .no_cpuid 129 | ret 130 | .no_cpuid: 131 | mov al, "1" 132 | jmp error 133 | 134 | ; Throw error 2 if the CPU doesn't support Long Mode. 135 | check_long_mode: 136 | ; test if extended processor info in available 137 | mov eax, 0x80000000 ; implicit argument for cpuid 138 | cpuid ; get highest supported argument 139 | cmp eax, 0x80000001 ; it needs to be at least 0x80000001 140 | jb .no_long_mode ; if it's less, the CPU is too old for long mode 141 | 142 | ; use extended info to test if long mode is available 143 | mov eax, 0x80000001 ; argument for extended processor info 144 | cpuid ; returns various feature bits in ecx and edx 145 | test edx, 1 << 29 ; test if the LM-bit is set in the D-register 146 | jz .no_long_mode ; If it's not set, there is no long mode 147 | ret 148 | .no_long_mode: 149 | mov al, "2" 150 | jmp error 151 | 152 | ; Check for SSE and enable it. If it's not supported throw error "a". 153 | set_up_SSE: 154 | ; check for SSE 155 | mov eax, 0x1 156 | cpuid 157 | test edx, 1<<25 158 | jz .no_SSE 159 | 160 | ; enable SSE 161 | mov eax, cr0 162 | and ax, 0xFFFB ; clear coprocessor emulation CR0.EM 163 | or ax, 0x2 ; set coprocessor monitoring CR0.MP 164 | mov cr0, eax 165 | mov eax, cr4 166 | or ax, 3 << 9 ; set CR4.OSFXSR and CR4.OSXMMEXCPT at the same time 167 | mov cr4, eax 168 | 169 | ret 170 | .no_SSE: 171 | mov al, "a" 172 | jmp error 173 | 174 | section .bss 175 | align 4096 176 | ; Each page table occupies a single 4KiB physical frame. 177 | p4_table: 178 | resb 4096 179 | p3_table: 180 | resb 4096 181 | p2_table: 182 | resb 4096 183 | stack_bottom: 184 | ; Reserve a 32 KiB stack for the kernel main function. 185 | resb 4096 * 8 186 | stack_top: 187 | 188 | section .rodata 189 | gdt64: 190 | dq 0 ; zero entry 191 | .code: equ $ - gdt64 192 | dq (1<<44) | (1<<47) | (1<<43) | (1<<53) ; code segment 193 | .pointer: 194 | dw $ - gdt64 - 1 195 | dq gdt64 196 | -------------------------------------------------------------------------------- /src/arch/x86_64/asm/grub.cfg: -------------------------------------------------------------------------------- 1 | set timeout=10 2 | set default=0 3 | 4 | menuentry "lambdaOS" { 5 | multiboot2 /boot/kernel.bin 6 | boot 7 | } 8 | -------------------------------------------------------------------------------- /src/arch/x86_64/asm/linker.ld: -------------------------------------------------------------------------------- 1 | ENTRY(start) 2 | 3 | SECTIONS { 4 | . = 1M; 5 | 6 | .rodata : 7 | { 8 | /* ensure that the multiboot header is at the beginning */ 9 | KEEP(*(.multiboot_header)) 10 | *(.rodata .rodata.*) 11 | . = ALIGN(4K); 12 | } 13 | 14 | .text : 15 | { 16 | *(.text .text.*) 17 | . = ALIGN(4K); 18 | } 19 | 20 | .data : 21 | { 22 | *(.data .data.*) 23 | . = ALIGN(4K); 24 | } 25 | 26 | .bss : 27 | { 28 | *(.bss .bss.*) 29 | . = ALIGN(4K); 30 | } 31 | 32 | .got : 33 | { 34 | *(.got) 35 | . = ALIGN(4K); 36 | } 37 | 38 | .got.plt : 39 | { 40 | *(.got.plt) 41 | . = ALIGN(4K); 42 | } 43 | 44 | .data.rel.ro : ALIGN(4K) { 45 | *(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*) 46 | . = ALIGN(4K); 47 | } 48 | 49 | .gcc_except_table : ALIGN(4K) { 50 | *(.gcc_except_table) 51 | . = ALIGN(4K); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/arch/x86_64/asm/long_mode_init.asm: -------------------------------------------------------------------------------- 1 | global long_mode_start 2 | extern kmain 3 | 4 | section .text 5 | bits 64 6 | long_mode_start: 7 | ; load 0 into all data segment registers 8 | mov ax, 0 9 | mov ss, ax 10 | mov ds, ax 11 | mov es, ax 12 | mov fs, ax 13 | mov gs, ax 14 | 15 | ; call rust main (with multiboot pointer in rdi) 16 | call kmain 17 | .os_returned: 18 | ; rust main returned, print `OS returned!` 19 | mov rax, 0x4f724f204f534f4f 20 | mov [0xb8000], rax 21 | mov rax, 0x4f724f754f744f65 22 | mov [0xb8008], rax 23 | mov rax, 0x4f214f644f654f6e 24 | mov [0xb8010], rax 25 | hlt 26 | -------------------------------------------------------------------------------- /src/arch/x86_64/asm/multiboot_header.asm: -------------------------------------------------------------------------------- 1 | section .multiboot_header 2 | header_start: 3 | dd 0xe85250d6 ; magic number (multiboot 2) 4 | dd 0 ; architecture 0 (protected mode i386) 5 | dd header_end - header_start ; header length 6 | ; checksum 7 | dd 0x100000000 - (0xe85250d6 + 0 + (header_end - header_start)) 8 | 9 | ; insert optional multiboot tags here 10 | 11 | ; required end tag 12 | dw 0 ; type 13 | dw 0 ; flags 14 | dd 8 ; size 15 | header_end: 16 | -------------------------------------------------------------------------------- /src/arch/x86_64/init.rs: -------------------------------------------------------------------------------- 1 | use super::interrupts; 2 | use super::memory; 3 | use device; 4 | 5 | /// Main kernel init function. This sets everything up for us. 6 | pub unsafe fn init(multiboot_info: usize) { 7 | use device::serial; 8 | use device::pic; 9 | 10 | pic::PICS.lock().disable_8259_pic(); 11 | 12 | // Enable serial for printing. 13 | serial::init(); 14 | 15 | asm!("cli"); 16 | { 17 | device::vga::buffer::clear_screen(); 18 | println!("[ INFO ] lambdaOS: Begin init."); 19 | 20 | let boot_info = ::multiboot2::load(multiboot_info); 21 | 22 | // Set safety bits in certain registers. 23 | enable_nxe_bit(); 24 | enable_write_protect_bit(); 25 | 26 | // Setup memory management. 27 | let mut memory_controller = memory::init(&boot_info); 28 | interrupts::init(&mut memory_controller); 29 | 30 | // Setup hardware devices. 31 | device::init(); 32 | } 33 | asm!("sti"); 34 | 35 | println!("[ OK ] Init successful, you may now type.") 36 | } 37 | 38 | pub fn enable_nxe_bit() { 39 | use x86_64::registers::msr::{rdmsr, wrmsr, IA32_EFER}; 40 | 41 | let nxe_bit = 1 << 11; 42 | unsafe { 43 | let efer = rdmsr(IA32_EFER); 44 | wrmsr(IA32_EFER, efer | nxe_bit); 45 | } 46 | } 47 | 48 | pub fn enable_write_protect_bit() { 49 | use x86_64::registers::control_regs::{Cr0, cr0, cr0_write}; 50 | 51 | unsafe { cr0_write(cr0() | Cr0::WRITE_PROTECT) }; 52 | } 53 | -------------------------------------------------------------------------------- /src/arch/x86_64/interrupts/exceptions.rs: -------------------------------------------------------------------------------- 1 | //! Handlers for internal CPU exceptions. Currently, when an exception occurs, we just print some 2 | //! debug information and then spin the CPU. TODO: Figure out which exceptions are safe to return 3 | //! from. 4 | 5 | use x86_64::structures::idt::{ExceptionStackFrame, PageFaultErrorCode}; 6 | use super::disable_interrupts_and_then; 7 | 8 | /// Handler for the #DE Exception. This exception occurs when divinding any number by zero using 9 | /// either the DIV or IDIV instructions. 10 | pub extern "x86-interrupt" fn divide_by_zero_handler(stack_frame: &mut ExceptionStackFrame) { 11 | disable_interrupts_and_then(|| { 12 | println!("\nEXCEPTION: DIVIDE BY ZERO\n{:#?}", stack_frame); 13 | loop {} 14 | }); 15 | } 16 | 17 | /// The Debug exception occurs under the following conditions. It is either a fault or a trap. 18 | /// - Instruction fetch breakpoint (Fault). 19 | /// - General detect condition (Fault). 20 | /// - Data r/w breakpoint (Trap). 21 | /// - I/O r/w breakpoint (Trap). 22 | /// - Single-step (Trap). 23 | /// - Task switch (Trap). 24 | pub extern "x86-interrupt" fn debug_handler(stack_frame: &mut ExceptionStackFrame) { 25 | disable_interrupts_and_then(|| { 26 | println!("\nEXCEPTION: DEBUG\n{:#?}", stack_frame); 27 | loop {} 28 | }); 29 | } 30 | 31 | /// A non-maskable interrupt is a hardware-driven interrupt much like those sent by the PIC, except 32 | /// an NMI either goes directly to the CPU or via another controller. An NMI occurs for hardware 33 | /// errors, which are something we can do nothing about. TODO: Investigate how we might discover 34 | /// which piece of hardware is faulty. 35 | pub extern "x86-interrupt" fn nmi_handler(stack_frame: &mut ExceptionStackFrame) { 36 | disable_interrupts_and_then(|| { 37 | println!("\nEXCEPTION: NON-MASKABLE INTERRUPT\n{:#?}", stack_frame); 38 | loop {} 39 | }); 40 | } 41 | 42 | /// Hardware breakpoint exception. This can return without issues. 43 | pub extern "x86-interrupt" fn breakpoint_handler(stack_frame: &mut ExceptionStackFrame) { 44 | println!( 45 | "\nEXCEPTION: BREAKPOINT at {:#x}\n{:#?}", 46 | stack_frame.instruction_pointer, stack_frame 47 | ); 48 | } 49 | 50 | /// An overflow exception occurs in two situations - where an INTO instruction is executed and the 51 | /// OVERFLOW bit in RFLAGS is set to 1, or when the result of `DIV/IDIV` instruction is greater 52 | /// than the maximum value of a 64-bit integer. 53 | pub extern "x86-interrupt" fn overflow_handler(stack_frame: &mut ExceptionStackFrame) { 54 | disable_interrupts_and_then(|| { 55 | println!("\nEXCEPTION: OVERFLOW\n{:#?}", stack_frame); 56 | loop {} 57 | }); 58 | } 59 | 60 | /// A Bound Range Exceeded exception occurs when the `BOUND` instruction is executed and the index is 61 | /// out of bounds. The `BOUND` instruction takes an index into an array, and compares it with the 62 | /// upper and lower bounds of the array. If the index is out of bounds, this exception is thrown. 63 | pub extern "x86-interrupt" fn bound_range_handler(stack_frame: &mut ExceptionStackFrame) { 64 | disable_interrupts_and_then(|| { 65 | println!("\nEXCEPTION: BOUND RANGE EXCEEDED\n{:#?}", stack_frame); 66 | loop {} 67 | }); 68 | } 69 | 70 | /// If the processor tries to execute an instruction with an invalid or undefined exception (or if 71 | /// the instruction exceeds 15 bytes), an `INVALID OPCODE` exception is thrown. 72 | pub extern "x86-interrupt" fn invalid_opcode_handler(stack_frame: &mut ExceptionStackFrame) { 73 | disable_interrupts_and_then(|| { 74 | println!( 75 | "\nEXCEPTION: INVALID OPCODE at {:#x}\n{:#?}", 76 | stack_frame.instruction_pointer, stack_frame 77 | ); 78 | loop {} 79 | }); 80 | } 81 | 82 | /// This exception occurs when the processor tries to execute an FPU-related instruction but there 83 | /// is no x87 present. This is a very rare occurence, as only very old hardware will not have an 84 | /// FPU. 85 | pub extern "x86-interrupt" fn device_not_available_handler(stack_frame: &mut ExceptionStackFrame) { 86 | disable_interrupts_and_then(|| { 87 | println!("\nEXCEPTION: FPU NOT AVAILABLE\n{:#?}", stack_frame); 88 | loop {} 89 | }); 90 | } 91 | 92 | /// A Double Fault occurs when a) an exception is unhandled, b) when an exception occurs whilst the 93 | /// CPU is in the process of calling the exception handler for the first exception. This is an 94 | /// Abort, meaning it is not possible to recover from a Double Fault. 95 | pub extern "x86-interrupt" fn double_fault_handler( 96 | stack_frame: &mut ExceptionStackFrame, 97 | _error_code: u64, 98 | ) { 99 | disable_interrupts_and_then(|| { 100 | println!("\nEXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame); 101 | loop {} 102 | }); 103 | } 104 | 105 | /// The Invalid TSS exception occurs when an invalid segment selector is referenced during 106 | /// control transfer through a gate descriptor. 107 | pub extern "x86-interrupt" fn invalid_tss_handler( 108 | stack_frame: &mut ExceptionStackFrame, 109 | error_code: u64, 110 | ) { 111 | disable_interrupts_and_then(|| { 112 | println!( 113 | "\nEXCEPTION: INVALID TSS with code: {:?}\n{:#?}", 114 | error_code, stack_frame 115 | ); 116 | loop {} 117 | }); 118 | } 119 | 120 | /// A Segment Not Present exception occurs when an attempt is made to load a segment which has its 121 | /// present bit set to 0. 122 | pub extern "x86-interrupt" fn seg_not_present_handler( 123 | stack_frame: &mut ExceptionStackFrame, 124 | error_code: u64, 125 | ) { 126 | disable_interrupts_and_then(|| { 127 | println!( 128 | "\nEXCEPTION: SEGMENT NOT PRESENT\nerror code: \ 129 | {:?}\n{:#?}", 130 | error_code, stack_frame 131 | ); 132 | 133 | loop {} 134 | }); 135 | } 136 | 137 | /// A Stack Segment Fault exception occurs when: 138 | /// - Loading a stack segment referencing a non-present segment descriptor. 139 | /// - PUSH/POP using ESP/EBP where the referenced address is non-canonical. 140 | /// - The stack limit check fails. 141 | pub extern "x86-interrupt" fn stack_seg_fault_handler( 142 | stack_frame: &mut ExceptionStackFrame, 143 | error_code: u64, 144 | ) { 145 | disable_interrupts_and_then(|| { 146 | println!( 147 | "\nEXCEPTION: STACK SEGMENT FAULT\nerror code: \ 148 | {:?}\n{:#?}", 149 | error_code, stack_frame 150 | ); 151 | 152 | loop {} 153 | }); 154 | } 155 | 156 | /// A General Protection Fault can occur for several different reasons. 157 | /// - Segment error (privilege, type, limit, read/write rights). 158 | /// - Executing a privileged instruction (IRET, INT, OUT, etc.), while CPL != 0. 159 | /// - Writing 1 into a reserved register field. 160 | /// - Referencing the null segment descriptor. 161 | /// - Trying to access an unimplemented register (i.e in Protected Mode: `mov cr6, eax` is 162 | /// illegal). 163 | pub extern "x86-interrupt" fn gpf_handler(stack_frame: &mut ExceptionStackFrame, _error_code: u64) { 164 | disable_interrupts_and_then(|| { 165 | println!("\nEXCEPTION: GPF\n{:#?}", stack_frame); 166 | loop {} 167 | }); 168 | } 169 | 170 | /// A Page Fault occurs when: 171 | /// - a page directory or table entry is not present in physical memory. 172 | /// - An attempt to load the TLB with a translation for a non-executable page occurs. 173 | /// - A protection check on the page (r/w, priveleges) failed. 174 | /// - A reserved bit in the page directory or table entries is set to 1. 175 | /// The address that the CPU tried to access is saved in register `cr2`. 176 | pub extern "x86-interrupt" fn page_fault_handler( 177 | stack_frame: &mut ExceptionStackFrame, 178 | error_code: PageFaultErrorCode, 179 | ) { 180 | disable_interrupts_and_then(|| { 181 | use x86_64::registers::control_regs; 182 | println!( 183 | "\nEXCEPTION: PAGE FAULT while accessing {:#x}\nerror code: \ 184 | {:?}\n{:#?}", 185 | control_regs::cr2(), 186 | error_code, 187 | stack_frame 188 | ); 189 | loop {} 190 | }); 191 | } 192 | 193 | /// An x87-floating point exception occurs when any waiting floating point instruction (e.g, FWAIT 194 | /// or WAIT.), and the following conditions are true: 195 | /// - CR0.NE = 1, 196 | /// - an unmasked x87 floating point exception is pending. 197 | pub extern "x86-interrupt" fn x87_fp_exception_handler(stack_frame: &mut ExceptionStackFrame) { 198 | disable_interrupts_and_then(|| { 199 | println!("\nX87 FLOATING POINT EXCEPTION\n{:#?}", stack_frame); 200 | loop {} 201 | }); 202 | } 203 | 204 | /// The Alignment Check exception occurs when alignment checking is enabled and a instruction 205 | /// attempts to reference an unaligned memory address. Alignment checking is only performed if 206 | /// `CPL = 3`. 207 | pub extern "x86-interrupt" fn alignment_check_handler( 208 | stack_frame: &mut ExceptionStackFrame, 209 | _error_code: u64, 210 | ) { 211 | disable_interrupts_and_then(|| { 212 | println!("\nEXCEPTION: ALIGNMENT CHECK\n{:#?}", stack_frame); 213 | loop {} 214 | }); 215 | } 216 | 217 | /// The Machine Check exception is an exception that occurs when the CPU detects that it has 218 | /// internal errors - i.e, bad memory, bad cache, faulty timings etc. The error information is 219 | /// placed in the model-specific registers. 220 | pub extern "x86-interrupt" fn machine_check_handler(stack_frame: &mut ExceptionStackFrame) { 221 | disable_interrupts_and_then(|| { 222 | // TODO: use the MSRs to get error information about the MC. 223 | println!("\nEXCEPTION: MACHINE CHECK\n{:#?}", stack_frame); 224 | loop {} 225 | }); 226 | } 227 | 228 | /// If the `CR4.OSXMMEXCEPT` bit is set to 1 in `cr4`, then an unmasked 128-bit media instruction 229 | /// will cause this exception. Otherwise, an `Invalid Opcode` exception occurs. 230 | pub extern "x86-interrupt" fn simd_fp_exception_handler(stack_frame: &mut ExceptionStackFrame) { 231 | disable_interrupts_and_then(|| { 232 | println!( 233 | "\nEXCEPTION: SIMD FLOATING POINT EXCEPTION\n{:#?}", 234 | stack_frame 235 | ); 236 | loop {} 237 | }); 238 | } 239 | -------------------------------------------------------------------------------- /src/arch/x86_64/interrupts/gdt.rs: -------------------------------------------------------------------------------- 1 | use x86_64::structures::tss::TaskStateSegment; 2 | use x86_64::structures::gdt::SegmentSelector; 3 | use x86_64::PrivilegeLevel; 4 | 5 | pub struct Gdt { 6 | table: [u64; 8], 7 | next_free: usize, 8 | } 9 | 10 | impl Gdt { 11 | pub fn new() -> Gdt { 12 | Gdt { 13 | table: [0; 8], 14 | next_free: 1, 15 | } 16 | } 17 | 18 | pub fn add_entry(&mut self, entry: Descriptor) -> SegmentSelector { 19 | let index = match entry { 20 | Descriptor::UserSegment(value) => self.push(value), 21 | Descriptor::SystemSegment(value_low, value_high) => { 22 | let index = self.push(value_low); 23 | self.push(value_high); 24 | index 25 | } 26 | }; 27 | SegmentSelector::new(index as u16, PrivilegeLevel::Ring0) 28 | } 29 | 30 | fn push(&mut self, value: u64) -> usize { 31 | if self.next_free < self.table.len() { 32 | let index = self.next_free; 33 | self.table[index] = value; 34 | self.next_free += 1; 35 | index 36 | } else { 37 | panic!("GDT full"); 38 | } 39 | } 40 | 41 | pub fn load(&'static self) { 42 | use x86_64::instructions::tables::{lgdt, DescriptorTablePointer}; 43 | use core::mem::size_of; 44 | 45 | let ptr = DescriptorTablePointer { 46 | base: self.table.as_ptr() as u64, 47 | limit: (self.table.len() * size_of::() - 1) as u16, 48 | }; 49 | 50 | unsafe { lgdt(&ptr) }; 51 | } 52 | } 53 | 54 | pub enum Descriptor { 55 | UserSegment(u64), 56 | SystemSegment(u64, u64), 57 | } 58 | 59 | impl Descriptor { 60 | pub fn kernel_code_segment() -> Descriptor { 61 | let flags = DescriptorFlags::USER_SEGMENT | DescriptorFlags::PRESENT 62 | | DescriptorFlags::EXECUTABLE | DescriptorFlags::LONG_MODE; 63 | Descriptor::UserSegment(flags.bits()) 64 | } 65 | 66 | pub fn tss_segment(tss: &'static TaskStateSegment) -> Descriptor { 67 | use core::mem::size_of; 68 | use bit_field::BitField; 69 | 70 | let ptr = tss as *const _ as u64; 71 | 72 | let mut low = DescriptorFlags::PRESENT.bits(); 73 | // base 74 | low.set_bits(16..40, ptr.get_bits(0..24)); 75 | low.set_bits(56..64, ptr.get_bits(24..32)); 76 | // limit (the `-1` in needed since the bound is inclusive) 77 | low.set_bits(0..16, (size_of::() - 1) as u64); 78 | // type (0b1001 = available 64-bit tss) 79 | low.set_bits(40..44, 0b1001); 80 | 81 | let mut high = 0; 82 | high.set_bits(0..32, ptr.get_bits(32..64)); 83 | 84 | Descriptor::SystemSegment(low, high) 85 | } 86 | } 87 | 88 | bitflags! { 89 | pub struct DescriptorFlags: u64 { 90 | const CONFORMING = 1 << 42; 91 | const EXECUTABLE = 1 << 43; 92 | const USER_SEGMENT = 1 << 44; 93 | const PRESENT = 1 << 47; 94 | const LONG_MODE = 1 << 53; 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/arch/x86_64/interrupts/irq.rs: -------------------------------------------------------------------------------- 1 | use device::pic::PICS; 2 | use device::keyboard::ps2_keyboard::parse_key; 3 | use device::ps2_8042::read_char; 4 | use x86_64::structures::idt::ExceptionStackFrame; 5 | use super::disable_interrupts_and_then; 6 | use device::apic; 7 | 8 | /// Timer handler checks the tick counter and if it exceeds 10, performs a round-robin context 9 | /// switch to the next process. 10 | pub extern "x86-interrupt" fn timer_handler(_stack_frame: &mut ExceptionStackFrame) { 11 | use core::sync::atomic::Ordering; 12 | use device::pit::PIT_TICKS; 13 | use task::{Scheduling, SCHEDULER}; 14 | 15 | println!("timer interrupt."); 16 | 17 | apic::eoi(); 18 | 19 | // Check if allocated timeslice finished (~20ms). 20 | if PIT_TICKS.fetch_add(1, Ordering::SeqCst) >= 10 { 21 | PIT_TICKS.store(0, Ordering::SeqCst); 22 | 23 | unsafe { 24 | // Call scheduler. 25 | disable_interrupts_and_then(|| { 26 | SCHEDULER.resched(); 27 | }); 28 | } 29 | } 30 | } 31 | 32 | pub extern "x86-interrupt" fn keyboard_handler(_stack_frame: &mut ExceptionStackFrame) { 33 | println!("keyboard interrupt."); 34 | let code = read_char(); 35 | 36 | parse_key(code); 37 | 38 | apic::eoi(); 39 | } 40 | -------------------------------------------------------------------------------- /src/arch/x86_64/interrupts/mod.rs: -------------------------------------------------------------------------------- 1 | use arch::memory::MemoryController; 2 | use x86_64::structures::tss::TaskStateSegment; 3 | use x86_64::structures::idt::{Idt, ExceptionStackFrame}; 4 | use spin::Once; 5 | 6 | pub mod gdt; 7 | pub mod exceptions; 8 | pub mod irq; 9 | pub mod utils; 10 | 11 | pub use self::utils::*; 12 | 13 | const DOUBLE_FAULT_IST_INDEX: usize = 0; 14 | 15 | lazy_static! { 16 | static ref IDT: Idt = { 17 | let mut idt = Idt::new(); 18 | 19 | println!("[ interrupts ] Installing exception handlers."); 20 | idt.divide_by_zero.set_handler_fn(exceptions::divide_by_zero_handler); 21 | idt.debug.set_handler_fn(exceptions::debug_handler); 22 | idt.non_maskable_interrupt.set_handler_fn(exceptions::nmi_handler); 23 | idt.breakpoint.set_handler_fn(exceptions::breakpoint_handler); 24 | idt.overflow.set_handler_fn(exceptions::overflow_handler); 25 | idt.bound_range_exceeded.set_handler_fn(exceptions::bound_range_handler); 26 | idt.invalid_opcode.set_handler_fn(exceptions::invalid_opcode_handler); 27 | idt.device_not_available.set_handler_fn(exceptions::device_not_available_handler); 28 | unsafe { 29 | idt.double_fault.set_handler_fn(exceptions::double_fault_handler) 30 | .set_stack_index(DOUBLE_FAULT_IST_INDEX as u16); 31 | } 32 | idt.invalid_tss.set_handler_fn(exceptions::invalid_tss_handler); 33 | idt.segment_not_present.set_handler_fn(exceptions::seg_not_present_handler); 34 | idt.stack_segment_fault.set_handler_fn(exceptions::stack_seg_fault_handler); 35 | idt.general_protection_fault.set_handler_fn(exceptions::gpf_handler); 36 | idt.page_fault.set_handler_fn(exceptions::page_fault_handler); 37 | idt.x87_floating_point.set_handler_fn(exceptions::x87_fp_exception_handler); 38 | idt.alignment_check.set_handler_fn(exceptions::alignment_check_handler); 39 | idt.machine_check.set_handler_fn(exceptions::machine_check_handler); 40 | idt.simd_floating_point.set_handler_fn(exceptions::simd_fp_exception_handler); 41 | 42 | println!("[ interrupts ] Installing IRQs."); 43 | idt.interrupts[0].set_handler_fn(irq::timer_handler); 44 | // idt.interrupts[1].set_handler_fn(irq::keyboard_handler); 45 | 46 | idt.interrupts[0x30 - 0x20].set_handler_fn(irq::timer_handler); 47 | // idt.interrupts[17].set_handler_fn(irq::keyboard_handler); 48 | 49 | // APIC NMI. 50 | for vec in (0x90-0x20)..(0x97-0x20) { 51 | idt.interrupts[vec].set_handler_fn(apic_nmi_handler); 52 | } 53 | idt.interrupts[0xff - 0x20].set_handler_fn(spurious_interrupt_handler); 54 | 55 | idt 56 | }; 57 | } 58 | 59 | static TSS: Once = Once::new(); 60 | static GDT: Once = Once::new(); 61 | 62 | /// Loads an IDT, GDT and TSS and reloads code segment registers. 63 | pub fn init(memory_controller: &mut MemoryController) { 64 | use x86_64::structures::gdt::SegmentSelector; 65 | use x86_64::instructions::segmentation::set_cs; 66 | use x86_64::instructions::tables::load_tss; 67 | use x86_64::VirtualAddress; 68 | 69 | let double_fault_stack = memory_controller 70 | .alloc_stack(1) 71 | .expect("could not allocate double fault stack"); 72 | 73 | let tss = TSS.call_once(|| { 74 | let mut tss = TaskStateSegment::new(); 75 | tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX] = 76 | VirtualAddress(double_fault_stack.top()); 77 | //TODO allocate privilege stacks. 78 | tss 79 | }); 80 | 81 | let mut code_selector = SegmentSelector(0); 82 | let mut tss_selector = SegmentSelector(0); 83 | let gdt = GDT.call_once(|| { 84 | let mut gdt = gdt::Gdt::new(); 85 | println!("[ tables ] Loading GDT entries."); 86 | code_selector = gdt.add_entry(gdt::Descriptor::kernel_code_segment()); 87 | tss_selector = gdt.add_entry(gdt::Descriptor::tss_segment(&tss)); 88 | gdt 89 | }); 90 | 91 | // Load a new GDT in the CPU. 92 | gdt.load(); 93 | println!("[ tables ] Successfully loaded GDT."); 94 | 95 | unsafe { 96 | // reload code segment register. 97 | println!("[ tables ] Reloading CS."); 98 | set_cs(code_selector); 99 | // load TSS 100 | println!("[ tables ] Loading TSS."); 101 | load_tss(tss_selector); 102 | } 103 | 104 | // Load the IDT 105 | IDT.load(); 106 | println!("[ tables ] Successfully loaded IDT.") 107 | } 108 | 109 | pub extern "x86-interrupt" fn apic_nmi_handler(stack_frame: &mut ExceptionStackFrame) { 110 | println!("NON-MASKABLE APIC INTERRUPT!"); 111 | loop {} 112 | } 113 | 114 | pub extern "x86-interrupt" fn spurious_interrupt_handler(stack_frame: &mut ExceptionStackFrame) { 115 | println!("SPURIOUS INTERRUPT!"); 116 | } 117 | -------------------------------------------------------------------------------- /src/arch/x86_64/interrupts/syscall/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod x86_64; 2 | -------------------------------------------------------------------------------- /src/arch/x86_64/interrupts/syscall/x86_64.rs: -------------------------------------------------------------------------------- 1 | pub unsafe fn syscall0(mut a: usize) -> usize { 2 | asm!("int 0x80" 3 | : "={rax}"(a) 4 | : "{rax}"(a) 5 | : "memory" 6 | : "intel", "volatile"); 7 | a 8 | } 9 | 10 | pub unsafe fn syscall1(mut a: usize, b: usize) -> usize { 11 | asm!("int 0x80" 12 | : "={rax}"(a) 13 | : "{rax}"(a), "{rbx}"(b) 14 | : "memory" 15 | : "intel", "volatile"); 16 | 17 | a 18 | } 19 | 20 | pub unsafe fn syscall1_clobber(mut a: usize, b: usize) -> usize { 21 | asm!("int 0x80" 22 | : "={rax}"(a) 23 | : "{rax}"(a), "{rbx}"(b) 24 | : "memory", "rbx", "rcx", "rdx", "rsi", "rdi", "r8", 25 | "r9", "r10", "r11", "r12", "r13", "r14", "r15" 26 | : "intel", "volatile"); 27 | 28 | a 29 | } 30 | 31 | pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> usize { 32 | asm!("int 0x80" 33 | : "={rax}"(a) 34 | : "{rax}"(a), "{rbx}"(b), "{rcx}"(c) 35 | : "memory" 36 | : "intel", "volatile"); 37 | 38 | a 39 | } 40 | 41 | pub unsafe fn syscall3(mut a: usize, b: usize, c: usize, d: usize) -> usize { 42 | asm!("int 0x80" 43 | : "={rax}"(a) 44 | : "{rax}"(a), "{rbx}"(b), "{rcx}"(c), "{rdx}"(d) 45 | : "memory" 46 | : "intel", "volatile"); 47 | a 48 | } 49 | 50 | pub unsafe fn syscall4(mut a: usize, b: usize, c: usize, d: usize, e: usize) -> usize { 51 | asm!("int 0x80" 52 | : "={rax}"(a) 53 | : "{rax}"(a), "{rbx}"(b), "{rcx}"(c), "{rdx}"(d), "{rsi}"(e) 54 | : "memory" 55 | : "intel", "volatile"); 56 | 57 | a 58 | } 59 | 60 | pub unsafe fn syscall5(mut a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) 61 | -> usize { 62 | asm!("int 0x80" 63 | : "={rax}"(a) 64 | : "{rax}"(a), "{rbx}"(b), "{rcx}"(c), "{rdx}"(d), "{rsi}"(e), "{rdi}"(f) 65 | : "memory" 66 | : "intel", "volatile"); 67 | 68 | a 69 | } 70 | -------------------------------------------------------------------------------- /src/arch/x86_64/interrupts/utils.rs: -------------------------------------------------------------------------------- 1 | unsafe fn disable() { 2 | asm!("cli"); 3 | } 4 | 5 | unsafe fn enable() { 6 | asm!("sti"); 7 | } 8 | 9 | /// Disable all interrupts and save the PIC masks 10 | pub fn disable_interrupts() -> (u8, u8) { 11 | use device::pic::PICS; 12 | 13 | unsafe { 14 | disable(); 15 | } 16 | 17 | let saved_masks: (u8, u8) = { 18 | let mask_pic0 = PICS.lock().pics[0].data.read(); 19 | let mask_pic1 = PICS.lock().pics[1].data.read(); 20 | 21 | (mask_pic0, mask_pic1) 22 | }; 23 | 24 | PICS.lock().pics[0].data.write(0xff); 25 | PICS.lock().pics[1].data.write(0xff); 26 | 27 | saved_masks 28 | } 29 | 30 | /// Enable all interrupts 31 | pub fn enable_interrupts() { 32 | use device::pic::PICS; 33 | 34 | // Ensure that PIC manipulation is not interrupted 35 | unsafe { 36 | disable(); 37 | } 38 | 39 | { 40 | // Clear all interrupt masks 41 | // PICS.lock().pics[0].data.write(0); 42 | // PICS.lock().pics[1].data.write(0); 43 | } 44 | 45 | unsafe { 46 | enable(); 47 | } 48 | } 49 | 50 | /// Restore interrupts to previous state 51 | pub fn restore_interrupts(saved_masks: (u8, u8)) { 52 | use device::pic::PICS; 53 | 54 | // Ensure PIC manipulation is not interrupted 55 | unsafe { 56 | disable(); 57 | } 58 | 59 | let (mask_pic0, mask_pic1) = saved_masks; 60 | 61 | PICS.lock().pics[0].data.write(mask_pic0); 62 | PICS.lock().pics[1].data.write(mask_pic1); 63 | 64 | unsafe { 65 | enable(); 66 | } 67 | } 68 | 69 | // Stolen from Robert Gries. 70 | // This function disables interrupts, allows a function to run without them enabled, and then 71 | // reenables interrupts. 72 | pub fn disable_interrupts_and_then(f: F) -> T 73 | where 74 | F: FnOnce() -> T, 75 | { 76 | let saved_masks = disable_interrupts(); 77 | 78 | let result: T = f(); 79 | 80 | restore_interrupts(saved_masks); 81 | 82 | result 83 | } 84 | -------------------------------------------------------------------------------- /src/arch/x86_64/memory/area_frame_allocator.rs: -------------------------------------------------------------------------------- 1 | use arch::memory::{Frame, FrameAllocator}; 2 | use multiboot2::{MemoryArea, MemoryAreaIter}; 3 | use arch::memory::paging::PhysicalAddress; 4 | 5 | /// A frame allocator that uses the memory areas from the multiboot information structure as 6 | /// source. The {kernel, multiboot}_{start, end} fields are used to avoid returning memory that is 7 | /// already in use. 8 | /// 9 | /// `kernel_end` and `multiboot_end` are _inclusive_ bounds. 10 | pub struct AreaFrameAllocator { 11 | /// The next available physical frame. 12 | next_free_frame: Frame, 13 | /// The current memory area, detected by multiboot using the e820. 14 | current_area: Option<&'static MemoryArea>, 15 | /// An iterator over all memory areas. 16 | areas: MemoryAreaIter, 17 | /// The starting frame of the kernel in physical memory. 18 | /// frame.start_address().get() == 1MiB. 19 | kernel_start: Frame, 20 | /// The end frame of the kernel in physical memory. 21 | kernel_end: Frame, 22 | /// The starting frame of the multiboot structure in physical memory, 23 | multiboot_start: Frame, 24 | /// The end frame of the multiboot data structure in physical memory. 25 | multiboot_end: Frame, 26 | } 27 | 28 | impl AreaFrameAllocator { 29 | pub fn new( 30 | kernel_start: usize, 31 | kernel_end: usize, 32 | multiboot_start: usize, 33 | multiboot_end: usize, 34 | memory_areas: MemoryAreaIter, 35 | ) -> AreaFrameAllocator { 36 | let mut allocator = AreaFrameAllocator { 37 | next_free_frame: Frame::containing_address(PhysicalAddress::new(0)), 38 | current_area: None, 39 | areas: memory_areas, 40 | kernel_start: Frame::containing_address(PhysicalAddress::new(kernel_start)), 41 | kernel_end: Frame::containing_address(PhysicalAddress::new(kernel_end)), 42 | multiboot_start: Frame::containing_address(PhysicalAddress::new(multiboot_start)), 43 | multiboot_end: Frame::containing_address(PhysicalAddress::new(multiboot_end)), 44 | }; 45 | allocator.choose_next_area(); 46 | allocator.allocate_frame(1); 47 | allocator 48 | } 49 | 50 | /// Choose the next available memory area. 51 | fn choose_next_area(&mut self) { 52 | self.current_area = self.areas 53 | .clone() 54 | .filter(|area| { 55 | let address = area.start_address() + area.size() - 1; 56 | Frame::containing_address(PhysicalAddress::new(address as usize)) 57 | >= self.next_free_frame 58 | }) 59 | .min_by_key(|area| area.start_address()); 60 | 61 | if let Some(area) = self.current_area { 62 | let start_frame = Frame::containing_address(PhysicalAddress::new(area.start_address())); 63 | println!( 64 | "First area starts at address: {:#x}", 65 | start_frame.start_address().get() 66 | ); 67 | if self.next_free_frame < start_frame { 68 | self.next_free_frame = start_frame; 69 | } 70 | } 71 | } 72 | } 73 | 74 | impl FrameAllocator for AreaFrameAllocator { 75 | /// Allocate a single frame. Return `None` if we are out of memory. 76 | fn allocate_frame(&mut self, count: usize) -> Option { 77 | if count == 0 { 78 | return None; 79 | } else if let Some(area) = self.current_area { 80 | // "clone" the frame to return it if it's free. Frame doesn't 81 | // implement Clone, but we can construct an identical frame. 82 | let start_frame = Frame { 83 | number: self.next_free_frame.number, 84 | }; 85 | 86 | let end_frame = Frame { 87 | number: self.next_free_frame.number + (count - 1), 88 | }; 89 | 90 | // the last frame of the current area 91 | let current_area_last_frame = { 92 | let address = area.start_address() + area.size() - 1; 93 | Frame::containing_address(PhysicalAddress::new(address)) 94 | }; 95 | 96 | if end_frame > current_area_last_frame { 97 | // all frames of current area are used, switch to next area 98 | self.choose_next_area(); 99 | } else if (start_frame >= self.kernel_start && start_frame <= self.kernel_end) 100 | || (end_frame >= self.kernel_start && start_frame <= self.kernel_end) 101 | { 102 | // frame range is used by the kernel. 103 | self.next_free_frame = Frame { 104 | number: self.kernel_end.number + 1, 105 | }; 106 | } else if (start_frame >= self.multiboot_start && start_frame <= self.multiboot_end) 107 | || (end_frame >= self.multiboot_start && end_frame <= self.multiboot_end) 108 | { 109 | // `frame` is used by the multiboot information structure 110 | self.next_free_frame = Frame { 111 | number: self.multiboot_end.number + 1, 112 | }; 113 | } else { 114 | // frame is unused, increment `next_free_frame` and return it 115 | self.next_free_frame.number += 1; 116 | return Some(start_frame); 117 | } 118 | // `frame` was not valid, try it again with the updated `next_free_frame` 119 | self.allocate_frame(count) 120 | } else { 121 | None // no free frames left 122 | } 123 | } 124 | 125 | fn deallocate_frame(&mut self, _frame: Frame) { 126 | unimplemented!() 127 | } 128 | 129 | /// Get a count of available free frames. 130 | fn free_frames(&mut self) -> usize { 131 | let mut count = 0; 132 | 133 | for area in self.areas.clone() { 134 | let start_frame = Frame::containing_address(PhysicalAddress::new(area.start_address())); 135 | let end_frame = Frame::containing_address(PhysicalAddress::new( 136 | area.start_address() + area.size() - 1, 137 | )); 138 | 139 | for frame in Frame::range_inclusive(start_frame, end_frame) { 140 | if frame >= self.kernel_start && frame <= self.kernel_end { 141 | // Frame is used by the kernel. 142 | } else if frame >= self.next_free_frame { 143 | count += 1; 144 | } else { 145 | // Inside of used range. 146 | } 147 | } 148 | } 149 | 150 | count 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/arch/x86_64/memory/heap_allocator.rs: -------------------------------------------------------------------------------- 1 | use alloc::allocator::{Alloc, AllocErr, Layout}; 2 | use linked_list_allocator::LockedHeap; 3 | use arch::interrupts::disable_interrupts_and_then; 4 | 5 | pub const HEAP_START: usize = 0o_000_001_000_000_0000; 6 | pub const HEAP_SIZE: usize = 500 * 1024; 7 | 8 | pub struct HeapAllocator { 9 | inner: LockedHeap, 10 | } 11 | 12 | impl HeapAllocator { 13 | /// Creates an empty heap. All allocate calls will return `None`. 14 | pub const fn new() -> Self { 15 | HeapAllocator { 16 | inner: LockedHeap::empty(), 17 | } 18 | } 19 | 20 | /// Initializes an empty heap 21 | /// 22 | /// # Unsafety 23 | /// 24 | /// This function must be called at most once and must only be used on an 25 | /// empty heap. Also, it is assumed that interrupts are disabled. 26 | pub unsafe fn init(&self, heap_bottom: usize, heap_size: usize) { 27 | self.inner.lock().init(heap_bottom, heap_size); 28 | } 29 | 30 | pub unsafe fn extend(&mut self, by: usize) { 31 | self.inner.lock().extend(by); 32 | } 33 | } 34 | 35 | /// Wrappers for inner Alloc implementation 36 | unsafe impl<'a> Alloc for &'a HeapAllocator { 37 | unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { 38 | disable_interrupts_and_then(|| -> Result<*mut u8, AllocErr> { 39 | self.inner.lock().alloc(layout) 40 | }) 41 | } 42 | 43 | unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { 44 | disable_interrupts_and_then(|| { 45 | self.inner.lock().dealloc(ptr, layout); 46 | }); 47 | } 48 | 49 | fn oom(&mut self, _: AllocErr) -> ! { 50 | panic!("Out of memory"); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/arch/x86_64/memory/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::area_frame_allocator::AreaFrameAllocator; 2 | pub use self::paging::ActivePageTable; 3 | pub use self::stack_allocator::Stack; 4 | use self::paging::{PhysicalAddress, VirtualAddress}; 5 | use self::paging::entry::EntryFlags; 6 | use acpi; 7 | use multiboot2::BootInformation; 8 | use spin::Mutex; 9 | 10 | pub mod area_frame_allocator; 11 | pub mod heap_allocator; 12 | pub mod paging; 13 | pub mod stack_allocator; 14 | 15 | /// The size of a physical page on x86. 16 | pub const PAGE_SIZE: usize = 4096; 17 | 18 | pub static ALLOCATOR: Mutex> = Mutex::new(None); 19 | 20 | pub fn init(boot_info: &BootInformation) -> MemoryController { 21 | assert_has_not_been_called!("memory::init must be called only once"); 22 | 23 | let memory_map_tag = boot_info.memory_map_tag().expect("Memory map tag required"); 24 | let elf_sections_tag = boot_info 25 | .elf_sections_tag() 26 | .expect("Elf sections tag required"); 27 | 28 | let kernel_start = elf_sections_tag 29 | .sections() 30 | .filter(|s| s.is_allocated()) 31 | .map(|s| s.start_address()) 32 | .min() 33 | .unwrap(); 34 | let kernel_end = elf_sections_tag 35 | .sections() 36 | .filter(|s| s.is_allocated()) 37 | .map(|s| s.start_address() + s.size()) 38 | .max() 39 | .unwrap(); 40 | 41 | println!( 42 | "[ pmm ] Kernel start: {:#x}, kernel end: {:#x}", 43 | kernel_start, kernel_end 44 | ); 45 | println!( 46 | "[ pmm ] Multiboot data structure start: {:#x}, end: {:#x}", 47 | boot_info.start_address(), 48 | boot_info.end_address() 49 | ); 50 | 51 | // Construct a physical frame allocator based on parameters passed to the main kernel. 52 | let frame_allocator = AreaFrameAllocator::new( 53 | kernel_start as usize, 54 | kernel_end as usize, 55 | boot_info.start_address(), 56 | boot_info.end_address(), 57 | memory_map_tag.memory_areas(), 58 | ); 59 | 60 | *ALLOCATOR.lock() = Some(frame_allocator); 61 | 62 | let mut active_table = paging::init(&boot_info); 63 | 64 | use self::paging::Page; 65 | use self::heap_allocator::{HEAP_SIZE, HEAP_START}; 66 | 67 | // The beginning and end of the heap. 68 | let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START)); 69 | let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE - 1)); 70 | 71 | println!("[ vmm ] Mapping heap pages ..."); 72 | 73 | for page in Page::range_inclusive(heap_start_page, heap_end_page) { 74 | let result = active_table.map(page, EntryFlags::PRESENT | EntryFlags::WRITABLE); 75 | // Flush this vaddr translation from the TLB. 76 | result.flush(&mut active_table); 77 | } 78 | 79 | unsafe { ::HEAP_ALLOCATOR.init(HEAP_START, HEAP_SIZE) }; 80 | 81 | let stack_allocator = { 82 | let stack_start_page = heap_end_page + 1; 83 | let stack_end_page = stack_start_page + 100; 84 | let stack_alloc_range = Page::range_inclusive(stack_start_page, stack_end_page); 85 | stack_allocator::StackAllocator::new(stack_alloc_range) 86 | }; 87 | unsafe { acpi::init(&mut active_table) }; 88 | MemoryController { 89 | active_table: active_table, 90 | stack_allocator: stack_allocator, 91 | } 92 | } 93 | 94 | pub struct MemoryController { 95 | active_table: paging::ActivePageTable, 96 | stack_allocator: stack_allocator::StackAllocator, 97 | } 98 | 99 | impl MemoryController { 100 | pub fn alloc_stack(&mut self, size_in_pages: usize) -> Option { 101 | let &mut MemoryController { 102 | ref mut active_table, 103 | ref mut stack_allocator, 104 | } = self; 105 | stack_allocator.alloc_stack(active_table, size_in_pages) 106 | } 107 | 108 | /* pub fn allocate_frame(&mut self, count: usize) -> Option { 109 | let &mut MemoryController { 110 | ref mut active_table, 111 | ref mut frame_allocator, 112 | ref mut stack_allocator, 113 | } = self; 114 | 115 | frame_allocator.allocate_frame(count) 116 | } */ 117 | } 118 | 119 | #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] 120 | pub struct Frame { 121 | number: usize, 122 | } 123 | 124 | impl Frame { 125 | /// Return the frame that contains the given physical address. 126 | pub fn containing_address(address: PhysicalAddress) -> Frame { 127 | Frame { 128 | number: address.get() / PAGE_SIZE, 129 | } 130 | } 131 | 132 | /// Return the starting address of this frame. 133 | pub fn start_address(&self) -> PhysicalAddress { 134 | PhysicalAddress::new(self.number * PAGE_SIZE) 135 | } 136 | 137 | fn clone(&self) -> Frame { 138 | Frame { 139 | number: self.number, 140 | } 141 | } 142 | 143 | pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter { 144 | FrameIter { 145 | start: start, 146 | end: end, 147 | } 148 | } 149 | } 150 | 151 | pub struct FrameIter { 152 | start: Frame, 153 | end: Frame, 154 | } 155 | 156 | impl Iterator for FrameIter { 157 | type Item = Frame; 158 | 159 | fn next(&mut self) -> Option { 160 | if self.start <= self.end { 161 | let frame = self.start.clone(); 162 | self.start.number += 1; 163 | Some(frame) 164 | } else { 165 | None 166 | } 167 | } 168 | } 169 | 170 | pub trait FrameAllocator { 171 | fn allocate_frame(&mut self, count: usize) -> Option; 172 | fn deallocate_frame(&mut self, frame: Frame); 173 | fn free_frames(&mut self) -> usize; 174 | } 175 | 176 | /// Allocate a frame. 177 | pub fn allocate_frames(count: usize) -> Option { 178 | if let Some(ref mut frame_allocator) = *ALLOCATOR.lock() { 179 | return frame_allocator.allocate_frame(count); 180 | } else { 181 | panic!("Frame allocator called before init."); 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /src/arch/x86_64/memory/paging/entry.rs: -------------------------------------------------------------------------------- 1 | use arch::memory::Frame; 2 | use multiboot2::ElfSection; 3 | use arch::memory::paging::PhysicalAddress; 4 | 5 | /// A page table entry. 6 | pub struct Entry(u64); 7 | 8 | impl Entry { 9 | /// Check if the entry is used or not. 10 | pub fn is_unused(&self) -> bool { 11 | self.0 == 0 12 | } 13 | 14 | /// Set this entry as unused. 15 | pub fn set_unused(&mut self) { 16 | self.0 = 0; 17 | } 18 | 19 | /// Return the current flags on the page. 20 | pub fn flags(&self) -> EntryFlags { 21 | EntryFlags::from_bits_truncate(self.0) 22 | } 23 | 24 | /// Return the physical frame that this page points to. 25 | pub fn pointed_frame(&self) -> Option { 26 | if self.flags().contains(EntryFlags::PRESENT) { 27 | Some(Frame::containing_address(PhysicalAddress::new( 28 | self.0 as usize & 0x000fffff_fffff000, 29 | ))) 30 | } else { 31 | None 32 | } 33 | } 34 | 35 | /// Set some flags on an entry. 36 | pub fn set(&mut self, frame: Frame, flags: EntryFlags) { 37 | assert!(frame.start_address().get() & !0x000fffff_fffff000 == 0); 38 | self.0 = (frame.start_address().get() as u64) | flags.bits(); 39 | } 40 | } 41 | 42 | bitflags! { 43 | pub struct EntryFlags: u64 { 44 | /// Page is present. 45 | const PRESENT = 1 << 0; 46 | /// Page is writable-to. 47 | const WRITABLE = 1 << 1; 48 | /// Page is accesible from ring-3 49 | const USER_ACCESSIBLE = 1 << 2; 50 | /// Write through caching is performed 51 | /// on this page. 52 | const WRITE_THROUGH = 1 << 3; 53 | /// This page should not be cached. 54 | const NO_CACHE = 1 << 4; 55 | /// This page has been accessed. 56 | const ACCESSED = 1 << 5; 57 | /// This page has been written to. 58 | const DIRTY = 1 << 6; 59 | /// Page is a hugepage. 60 | const HUGE_PAGE = 1 << 7; 61 | /// This page's address will not be updated in the TLB, 62 | /// if CR3 is reset. 63 | const GLOBAL = 1 << 8; 64 | /// Non-executable page. 65 | const NO_EXECUTE = 1 << 63; 66 | } 67 | } 68 | 69 | impl EntryFlags { 70 | /// Parse the flags on an ELF section to our `EntryFlags` struct. 71 | pub fn from_elf_section_flags(section: &ElfSection) -> EntryFlags { 72 | use multiboot2::ElfSectionFlags; 73 | 74 | let mut flags = EntryFlags::empty(); 75 | 76 | if section.flags().contains(ElfSectionFlags::ALLOCATED) { 77 | // section is loaded to memory 78 | flags = flags | EntryFlags::PRESENT; 79 | } 80 | if section.flags().contains(ElfSectionFlags::WRITABLE) { 81 | flags = flags | EntryFlags::WRITABLE; 82 | } 83 | if !section.flags().contains(ElfSectionFlags::EXECUTABLE) { 84 | flags = flags | EntryFlags::NO_EXECUTE; 85 | } 86 | 87 | flags 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/arch/x86_64/memory/paging/mapper.rs: -------------------------------------------------------------------------------- 1 | use super::{ActivePageTable, Page, PhysicalAddress, VirtualAddress, ENTRY_COUNT}; 2 | use super::entry::EntryFlags; 3 | use super::table::{self, Level4, Table}; 4 | use arch::memory::{allocate_frames, Frame, PAGE_SIZE}; 5 | use core::ptr::Unique; 6 | use core::mem; 7 | 8 | /// A helper struct which does most of the paging gruntwork. 9 | pub struct Mapper { 10 | p4: Unique>, 11 | } 12 | 13 | impl Mapper { 14 | pub unsafe fn new() -> Mapper { 15 | Mapper { 16 | p4: Unique::new_unchecked(table::P4), 17 | } 18 | } 19 | 20 | pub fn p4(&self) -> &Table { 21 | unsafe { self.p4.as_ref() } 22 | } 23 | 24 | pub fn p4_mut(&mut self) -> &mut Table { 25 | unsafe { self.p4.as_mut() } 26 | } 27 | 28 | /// Translate a virtual address to a physical address. 29 | pub fn translate(&self, virtual_address: VirtualAddress) -> Option { 30 | let offset = virtual_address.get() % PAGE_SIZE; 31 | self.translate_page(Page::containing_address(virtual_address)) 32 | .map(|frame| PhysicalAddress::new(frame.number * PAGE_SIZE + offset)) 33 | } 34 | 35 | /// Walk the page tables to find the physical frame that a passed `page` is mapped to. 36 | pub fn translate_page(&self, page: Page) -> Option { 37 | // Get reference to the P3 table. 38 | let p3 = self.p4().next_table(page.p4_index()); 39 | 40 | // Check if this page is a huge page. 41 | let huge_page = || { 42 | p3.and_then(|p3| { 43 | let p3_entry = &p3[page.p3_index()]; 44 | // 1GiB page? 45 | if let Some(start_frame) = p3_entry.pointed_frame() { 46 | if p3_entry.flags().contains(EntryFlags::HUGE_PAGE) { 47 | // address must be 1GiB aligned 48 | assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0); 49 | return Some(Frame { 50 | number: start_frame.number + page.p2_index() * ENTRY_COUNT 51 | + page.p1_index(), 52 | }); 53 | } 54 | } 55 | if let Some(p2) = p3.next_table(page.p3_index()) { 56 | let p2_entry = &p2[page.p2_index()]; 57 | // 2MiB page? 58 | if let Some(start_frame) = p2_entry.pointed_frame() { 59 | if p2_entry.flags().contains(EntryFlags::HUGE_PAGE) { 60 | // address must be 2MiB aligned 61 | assert!(start_frame.number % ENTRY_COUNT == 0); 62 | return Some(Frame { 63 | number: start_frame.number + page.p1_index(), 64 | }); 65 | } 66 | } 67 | } 68 | None 69 | }) 70 | }; 71 | 72 | p3.and_then(|p3| p3.next_table(page.p3_index())) 73 | .and_then(|p2| p2.next_table(page.p2_index())) 74 | .and_then(|p1| p1[page.p1_index()].pointed_frame()) 75 | .or_else(huge_page) 76 | } 77 | 78 | /// Map a page to a frame by getting reference to the page tables and setting the index in the 79 | /// P1 table to the given frame. 80 | pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) -> MapperFlush { 81 | let p3 = self.p4_mut().next_table_create(page.p4_index()); 82 | let p2 = p3.next_table_create(page.p3_index()); 83 | let p1 = p2.next_table_create(page.p2_index()); 84 | 85 | assert!(p1[page.p1_index()].is_unused()); 86 | p1[page.p1_index()].set(frame, flags | EntryFlags::PRESENT); 87 | 88 | MapperFlush::new(page) 89 | } 90 | 91 | /// Map a page by allocating a free frame and mapping a page to that frame. 92 | pub fn map(&mut self, page: Page, flags: EntryFlags) -> MapperFlush { 93 | let frame = allocate_frames(1).expect("out of memory"); 94 | self.map_to(page, frame, flags) 95 | } 96 | 97 | /// Map a page by translating a given `Frame` to a `Page`. 98 | pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags) -> MapperFlush { 99 | let page = Page::containing_address(VirtualAddress::new(frame.start_address().get())); 100 | self.map_to(page, frame, flags) 101 | } 102 | 103 | /// Unmap a page from a physical frame. 104 | pub fn unmap(&mut self, page: Page) -> MapperFlush { 105 | use x86_64; 106 | use x86_64::instructions::tlb; 107 | 108 | // Check if the page is already unmapped (page not mapped to frame, translation failed). 109 | assert!(self.translate(page.start_address()).is_some()); 110 | 111 | let p1 = self.p4_mut() 112 | .next_table_mut(page.p4_index()) 113 | .and_then(|p3| p3.next_table_mut(page.p3_index())) 114 | .and_then(|p2| p2.next_table_mut(page.p2_index())) 115 | .expect("mapping code does not support huge pages"); 116 | let _frame = p1[page.p1_index()].pointed_frame().unwrap(); 117 | p1[page.p1_index()].set_unused(); 118 | tlb::flush(x86_64::VirtualAddress(page.start_address().get())); 119 | // TODO free p(1,2,3) table if empty 120 | // allocator.deallocate_frame(frame); 121 | MapperFlush::new(page) 122 | } 123 | } 124 | 125 | /// A promise to flush a virtual address. 126 | #[must_use = "The page must be flushed, or the changes are ignored."] 127 | pub struct MapperFlush(Page); 128 | 129 | impl Drop for MapperFlush { 130 | fn drop(&mut self) { 131 | panic!("Flush not consumed!"); 132 | } 133 | } 134 | 135 | impl MapperFlush { 136 | pub fn new(page: Page) -> Self { 137 | MapperFlush(page) 138 | } 139 | 140 | pub fn flush(self, table: &mut ActivePageTable) { 141 | table.flush(self.0); 142 | mem::forget(self); 143 | } 144 | 145 | pub unsafe fn ignore(self) { 146 | mem::forget(self); 147 | } 148 | } 149 | 150 | /// A way to flush the entire active page table. 151 | #[must_use = "The active page table must be flushed, or the changes ignored"] 152 | pub struct MapperFlushAll(bool); 153 | 154 | impl Drop for MapperFlushAll { 155 | fn drop(&mut self) { 156 | panic!("FlushAll not consumed!"); 157 | } 158 | } 159 | 160 | impl MapperFlushAll { 161 | pub fn new() -> Self { 162 | MapperFlushAll(false) 163 | } 164 | 165 | pub fn consume(&mut self, flush: MapperFlush) { 166 | self.0 = true; 167 | mem::forget(flush); 168 | } 169 | 170 | pub fn flush(self, table: &mut ActivePageTable) { 171 | if self.0 { 172 | unsafe { table.flush_all() }; 173 | } 174 | 175 | mem::forget(self); 176 | } 177 | 178 | pub unsafe fn forget(self) { 179 | mem::forget(self); 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/arch/x86_64/memory/paging/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::entry::EntryFlags; 2 | pub use self::mapper::Mapper; 3 | use arch::memory::{Frame, PAGE_SIZE}; 4 | use arch::memory::allocate_frames; 5 | use self::temporary_page::TemporaryPage; 6 | use core::ops::{Add, Deref, DerefMut}; 7 | use multiboot2::BootInformation; 8 | 9 | pub mod entry; 10 | mod table; 11 | mod temporary_page; 12 | pub mod mapper; 13 | 14 | /// Maximum number of entries a page table can hold. 15 | const ENTRY_COUNT: usize = 512; 16 | 17 | /// A physical memory address. 18 | pub struct PhysicalAddress(pub usize); 19 | 20 | impl PhysicalAddress { 21 | pub fn new(addr: usize) -> Self { 22 | PhysicalAddress(addr) 23 | } 24 | 25 | /// Return the inner address this `PhysicalAddress` wraps. 26 | pub fn get(&self) -> usize { 27 | self.0 28 | } 29 | } 30 | 31 | /// A virtual memory address. 32 | pub struct VirtualAddress(pub usize); 33 | 34 | impl VirtualAddress { 35 | /// Create a new virtual address. 36 | pub fn new(addr: usize) -> Self { 37 | VirtualAddress(addr) 38 | } 39 | 40 | /// Return the inner address this `VirtualAddress` wraps. 41 | pub fn get(&self) -> usize { 42 | self.0 43 | } 44 | } 45 | 46 | /// A 4KiB page. 47 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 48 | pub struct Page { 49 | number: usize, 50 | } 51 | 52 | impl Page { 53 | /// Return the number of the page which contains the given `VirtualAddress`. 54 | pub fn containing_address(address: VirtualAddress) -> Page { 55 | assert!( 56 | address.get() < 0x0000_8000_0000_0000 || address.get() >= 0xffff_8000_0000_0000, 57 | "invalid address: 0x{:x}", 58 | address.get() 59 | ); 60 | Page { 61 | number: address.get() / PAGE_SIZE, 62 | } 63 | } 64 | 65 | /// Return the starting address of a page. 66 | pub fn start_address(&self) -> VirtualAddress { 67 | VirtualAddress::new(self.number * PAGE_SIZE) 68 | } 69 | 70 | fn p4_index(&self) -> usize { 71 | (self.number >> 27) & 0o777 72 | } 73 | fn p3_index(&self) -> usize { 74 | (self.number >> 18) & 0o777 75 | } 76 | fn p2_index(&self) -> usize { 77 | (self.number >> 9) & 0o777 78 | } 79 | fn p1_index(&self) -> usize { 80 | (self.number >> 0) & 0o777 81 | } 82 | 83 | /// Return an iterator between the given two pages. 84 | pub fn range_inclusive(start: Page, end: Page) -> PageIter { 85 | PageIter { 86 | start: start, 87 | end: end, 88 | } 89 | } 90 | } 91 | 92 | impl Add for Page { 93 | type Output = Page; 94 | 95 | fn add(self, rhs: usize) -> Page { 96 | Page { 97 | number: self.number + rhs, 98 | } 99 | } 100 | } 101 | 102 | /// An iterator over pages between `start` and `end`. 103 | #[derive(Copy, Clone)] 104 | pub struct PageIter { 105 | start: Page, 106 | end: Page, 107 | } 108 | 109 | impl Iterator for PageIter { 110 | type Item = Page; 111 | 112 | fn next(&mut self) -> Option { 113 | if self.start <= self.end { 114 | let page = self.start; 115 | self.start.number += 1; 116 | Some(page) 117 | } else { 118 | None 119 | } 120 | } 121 | } 122 | 123 | /// The system's active page table. 124 | pub struct ActivePageTable { 125 | mapper: Mapper, 126 | } 127 | 128 | impl Deref for ActivePageTable { 129 | type Target = Mapper; 130 | 131 | fn deref(&self) -> &Mapper { 132 | &self.mapper 133 | } 134 | } 135 | 136 | impl DerefMut for ActivePageTable { 137 | fn deref_mut(&mut self) -> &mut Mapper { 138 | &mut self.mapper 139 | } 140 | } 141 | 142 | impl ActivePageTable { 143 | pub unsafe fn new() -> ActivePageTable { 144 | ActivePageTable { 145 | mapper: Mapper::new(), 146 | } 147 | } 148 | 149 | /// Get the start address of the current P4 table as stored in `cr3`. 150 | pub fn address(&self) -> usize { 151 | use x86_64::registers::control_regs; 152 | control_regs::cr3().0 as usize 153 | } 154 | 155 | pub fn with( 156 | &mut self, 157 | table: &mut InactivePageTable, 158 | temporary_page: &mut temporary_page::TemporaryPage, 159 | f: F, 160 | ) where 161 | F: FnOnce(&mut Mapper), 162 | { 163 | use x86_64::registers::control_regs; 164 | use x86_64::instructions::tlb; 165 | 166 | { 167 | // Get reference to current P4 table. 168 | let backup = 169 | Frame::containing_address(PhysicalAddress::new(control_regs::cr3().0 as usize)); 170 | 171 | // map temporary_page to current P4 table 172 | let p4_table = temporary_page.map_table_frame(backup.clone(), self); 173 | 174 | // overwrite recursive mapping 175 | self.p4_mut()[511].set( 176 | table.p4_frame.clone(), 177 | EntryFlags::PRESENT | EntryFlags::WRITABLE, 178 | ); 179 | tlb::flush_all(); 180 | 181 | // execute f in the new context 182 | f(self); 183 | 184 | // restore recursive mapping to original P4 table 185 | p4_table[511].set(backup, EntryFlags::PRESENT | EntryFlags::WRITABLE); 186 | tlb::flush_all(); 187 | } 188 | 189 | temporary_page.unmap(self); 190 | } 191 | 192 | /// Switch the active page table, and return the old page table. 193 | pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable { 194 | use x86_64; 195 | use x86_64::registers::control_regs; 196 | 197 | let old_table = InactivePageTable { 198 | p4_frame: Frame::containing_address(PhysicalAddress::new( 199 | control_regs::cr3().0 as usize, 200 | )), 201 | }; 202 | 203 | unsafe { 204 | control_regs::cr3_write(x86_64::PhysicalAddress( 205 | new_table.p4_frame.start_address().get() as u64, 206 | )); 207 | } 208 | old_table 209 | } 210 | 211 | pub fn flush(&mut self, page: Page) { 212 | unsafe { asm!("invlpg ($0)" :: "r"(page.start_address().get())) }; 213 | } 214 | 215 | pub unsafe fn flush_all(&mut self) { 216 | use x86_64::registers::control_regs::{cr3, cr3_write}; 217 | 218 | cr3_write(cr3()); 219 | } 220 | } 221 | 222 | /// A page table which has a frame wherein the P4 table lives. 223 | pub struct InactivePageTable { 224 | p4_frame: Frame, 225 | } 226 | 227 | impl InactivePageTable { 228 | pub fn new( 229 | frame: Frame, 230 | active_table: &mut ActivePageTable, 231 | temporary_page: &mut TemporaryPage, 232 | ) -> InactivePageTable { 233 | { 234 | let table = temporary_page.map_table_frame(frame.clone(), active_table); 235 | table.zero(); 236 | table[511].set(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE); 237 | } 238 | temporary_page.unmap(active_table); 239 | 240 | InactivePageTable { p4_frame: frame } 241 | } 242 | } 243 | 244 | /// Identity map important sections and switch the page table, remapping the kernel one page above 245 | /// and turning the previous kernel stack into a guard page - this prevents silent stack overflows, as 246 | /// given that the guard page is unmapped, any stack overflow into this page will instantly cause a 247 | /// page fault. Returns the currently active kernel page table. 248 | pub fn init(boot_info: &BootInformation) -> ActivePageTable { 249 | let mut temporary_page = TemporaryPage::new(Page { number: 0xcafebabe }); 250 | let mut active_table = unsafe { ActivePageTable::new() }; 251 | let mut new_table = { 252 | // Allocate a frame for the PML4. 253 | let frame = allocate_frames(1).expect("out of memory"); 254 | InactivePageTable::new(frame, &mut active_table, &mut temporary_page) 255 | }; 256 | 257 | // Do important mapping work. 258 | active_table.with(&mut new_table, &mut temporary_page, |mapper| { 259 | println!("[ vmm ] Initialising paging."); 260 | 261 | let elf_sections_tag = boot_info 262 | .elf_sections_tag() 263 | .expect("Memory map tag required"); 264 | 265 | // identity map the entire kernel. 266 | for section in elf_sections_tag.sections() { 267 | if !section.is_allocated() { 268 | // section is not loaded to memory 269 | continue; 270 | } 271 | 272 | assert!( 273 | section.start_address() as usize % PAGE_SIZE == 0, 274 | "sections need to be page aligned" 275 | ); 276 | println!( 277 | "[ vmm ] Identity mapping kernel section at addr: {:#x}, size: {} KiB", 278 | section.start_address(), 279 | section.size() / 1024, 280 | ); 281 | 282 | // Translate ELF section flags to paging flags, and map the kernel sections 283 | // into the virtual address space using these flags. 284 | let flags = EntryFlags::from_elf_section_flags(§ion); 285 | 286 | let start_frame = 287 | Frame::containing_address(PhysicalAddress::new(section.start_address() as usize)); 288 | let end_frame = Frame::containing_address(PhysicalAddress::new( 289 | (section.end_address() - 1) as usize, 290 | )); 291 | for frame in Frame::range_inclusive(start_frame, end_frame) { 292 | let result = mapper.identity_map(frame, flags); 293 | // Ignore this result since this table is not currently active. 294 | unsafe { result.ignore() }; 295 | } 296 | } 297 | 298 | // identity map the VGA text buffer 299 | println!("[ vmm ] Identity mapping the VGA text buffer."); 300 | let vga_buffer_frame = Frame::containing_address(PhysicalAddress::new(0xb8000)); 301 | let res = mapper.identity_map(vga_buffer_frame, EntryFlags::WRITABLE); 302 | unsafe { res.ignore() }; 303 | 304 | // identity map the multiboot info structure. 305 | println!("[ vmm ] Identity mapping multiboot structures."); 306 | let multiboot_start = 307 | Frame::containing_address(PhysicalAddress::new(boot_info.start_address())); 308 | let multiboot_end = 309 | Frame::containing_address(PhysicalAddress::new(boot_info.end_address() - 1)); 310 | for frame in Frame::range_inclusive(multiboot_start, multiboot_end) { 311 | let result = mapper.identity_map(frame, EntryFlags::PRESENT); 312 | unsafe { result.ignore() }; 313 | } 314 | }); 315 | 316 | let old_table = active_table.switch(new_table); 317 | println!( 318 | "[ vmm ] Switched to new page table. PML4 at {:#x}", 319 | active_table.address() 320 | ); 321 | 322 | // Create a guard page. 323 | let old_p4_page = Page::containing_address(VirtualAddress::new( 324 | old_table.p4_frame.start_address().get(), 325 | )); 326 | 327 | let result = active_table.unmap(old_p4_page); 328 | // Flush old p4 in TLB. 329 | result.flush(&mut active_table); 330 | 331 | println!( 332 | "[ vmm ] Guard page at {:#x}.", 333 | old_p4_page.start_address().get() 334 | ); 335 | 336 | active_table 337 | } 338 | -------------------------------------------------------------------------------- /src/arch/x86_64/memory/paging/table.rs: -------------------------------------------------------------------------------- 1 | use arch::memory::paging::entry::EntryFlags; 2 | use arch::memory::paging::entry::*; 3 | use arch::memory::paging::ENTRY_COUNT; 4 | use arch::memory::allocate_frames; 5 | use core::ops::{Index, IndexMut}; 6 | use core::marker::PhantomData; 7 | 8 | /// This physical address will point to the highest-level P4 table. 9 | pub const P4: *mut Table = 0xffffffff_fffff000 as *mut _; 10 | 11 | pub struct Table { 12 | entries: [Entry; ENTRY_COUNT], 13 | level: PhantomData, 14 | } 15 | 16 | impl Table 17 | where 18 | L: TableLevel, 19 | { 20 | /// Set each entry of the page table as unused - leaves them free. 21 | pub fn zero(&mut self) { 22 | for entry in self.entries.iter_mut() { 23 | entry.set_unused(); 24 | } 25 | } 26 | } 27 | 28 | impl Table 29 | where 30 | L: HierarchicalLevel, 31 | { 32 | /// Get the address of the next-lowest page table, using the passed index which should be the 33 | /// index of the next page table in the current-level page table. 34 | fn next_table_address(&self, index: usize) -> Option { 35 | let entry_flags = self[index].flags(); 36 | if entry_flags.contains(EntryFlags::PRESENT) && !entry_flags.contains(EntryFlags::HUGE_PAGE) 37 | { 38 | let table_address = self as *const _ as usize; 39 | Some((table_address << 9) | (index << 12)) 40 | } else { 41 | None 42 | } 43 | } 44 | 45 | /// Return a reference to the next table. 46 | pub fn next_table(&self, index: usize) -> Option<&Table> { 47 | self.next_table_address(index) 48 | .map(|address| unsafe { &*(address as *const _) }) 49 | } 50 | 51 | /// Return a mutable reference to the next table. 52 | pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table> { 53 | self.next_table_address(index) 54 | .map(|address| unsafe { &mut *(address as *mut _) }) 55 | } 56 | 57 | pub fn next_table_create(&mut self, index: usize) -> &mut Table { 58 | if self.next_table(index).is_none() { 59 | assert!( 60 | !self.entries[index].flags().contains(EntryFlags::HUGE_PAGE), 61 | "mapping code does not support huge pages" 62 | ); 63 | let frame = allocate_frames(1).expect("no frames available"); 64 | self.entries[index].set(frame, EntryFlags::PRESENT | EntryFlags::WRITABLE); 65 | self.next_table_mut(index).unwrap().zero(); 66 | } 67 | self.next_table_mut(index).unwrap() 68 | } 69 | } 70 | 71 | impl Index for Table 72 | where 73 | L: TableLevel, 74 | { 75 | type Output = Entry; 76 | 77 | fn index(&self, index: usize) -> &Entry { 78 | &self.entries[index] 79 | } 80 | } 81 | 82 | impl IndexMut for Table 83 | where 84 | L: TableLevel, 85 | { 86 | fn index_mut(&mut self, index: usize) -> &mut Entry { 87 | &mut self.entries[index] 88 | } 89 | } 90 | 91 | pub trait TableLevel {} 92 | 93 | pub enum Level4 {} 94 | #[allow(dead_code)] 95 | pub enum Level3 {} 96 | #[allow(dead_code)] 97 | pub enum Level2 {} 98 | pub enum Level1 {} 99 | 100 | impl TableLevel for Level4 {} 101 | impl TableLevel for Level3 {} 102 | impl TableLevel for Level2 {} 103 | impl TableLevel for Level1 {} 104 | 105 | pub trait HierarchicalLevel: TableLevel { 106 | type NextLevel: TableLevel; 107 | } 108 | 109 | impl HierarchicalLevel for Level4 { 110 | type NextLevel = Level3; 111 | } 112 | 113 | impl HierarchicalLevel for Level3 { 114 | type NextLevel = Level2; 115 | } 116 | 117 | impl HierarchicalLevel for Level2 { 118 | type NextLevel = Level1; 119 | } 120 | -------------------------------------------------------------------------------- /src/arch/x86_64/memory/paging/temporary_page.rs: -------------------------------------------------------------------------------- 1 | use super::{ActivePageTable, Page, VirtualAddress}; 2 | use super::table::{Level1, Table}; 3 | use arch::memory::Frame; 4 | 5 | pub struct TemporaryPage { 6 | page: Page, 7 | } 8 | 9 | impl TemporaryPage { 10 | pub fn new(page: Page) -> TemporaryPage { 11 | TemporaryPage { page: page } 12 | } 13 | 14 | /// Maps the temporary page to the given frame in the active table. 15 | /// Returns the start address of the temporary page. 16 | pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable) -> VirtualAddress { 17 | use super::entry::EntryFlags; 18 | 19 | assert!( 20 | active_table.translate_page(self.page).is_none(), 21 | "temporary page is already mapped" 22 | ); 23 | let result = active_table.map_to(self.page, frame, EntryFlags::WRITABLE); 24 | result.flush(active_table); 25 | self.page.start_address() 26 | } 27 | 28 | /// Maps the temporary page to the given page table frame in the active table. 29 | /// Returns a reference to the now mapped table. 30 | pub fn map_table_frame( 31 | &mut self, 32 | frame: Frame, 33 | active_table: &mut ActivePageTable, 34 | ) -> &mut Table { 35 | unsafe { &mut *(self.map(frame, active_table).get() as *mut Table) } 36 | } 37 | 38 | /// Unmaps the temporary page in the active table. 39 | pub fn unmap(&mut self, active_table: &mut ActivePageTable) { 40 | let result = active_table.unmap(self.page); 41 | result.flush(active_table); 42 | } 43 | } 44 | 45 | /* struct TinyAllocator([Option; 3]); 46 | 47 | impl TinyAllocator { 48 | fn new(allocator: &mut A) -> TinyAllocator 49 | where 50 | A: FrameAllocator, 51 | { 52 | let mut f = || allocator.allocate_frame(1); 53 | let frames = [f(), f(), f()]; 54 | TinyAllocator(frames) 55 | } 56 | } 57 | 58 | impl FrameAllocator for TinyAllocator { 59 | /// Allocate the frames that have been borrowed from the main allocator. 60 | fn allocate_frame(&mut self, _count: usize) -> Option { 61 | for frame_option in &mut self.0 { 62 | if frame_option.is_some() { 63 | return frame_option.take(); 64 | } 65 | } 66 | None 67 | } 68 | 69 | /// Mark any `None` frames as `Some`, and return. 70 | fn deallocate_frame(&mut self, frame: Frame) { 71 | for frame_option in &mut self.0 { 72 | if frame_option.is_none() { 73 | *frame_option = Some(frame); 74 | return; 75 | } 76 | } 77 | panic!("Tiny allocator can hold only 3 frames."); 78 | } 79 | 80 | fn free_frames(&mut self) -> usize { 81 | let mut count: usize = 0; 82 | 83 | for frame_option in &mut self.0 { 84 | if frame_option.is_some() { 85 | count += 1; 86 | } 87 | } 88 | 89 | count 90 | } 91 | } */ 92 | -------------------------------------------------------------------------------- /src/arch/x86_64/memory/stack_allocator.rs: -------------------------------------------------------------------------------- 1 | use arch::memory::paging::{ActivePageTable, Page, PageIter}; 2 | use arch::memory::PAGE_SIZE; 3 | use arch::memory::paging::EntryFlags; 4 | 5 | /// A stack allocator. 6 | #[derive(Copy, Clone)] 7 | pub struct StackAllocator { 8 | range: PageIter, 9 | } 10 | 11 | impl StackAllocator { 12 | pub fn new(page_range: PageIter) -> StackAllocator { 13 | StackAllocator { range: page_range } 14 | } 15 | } 16 | 17 | impl StackAllocator { 18 | /// Allocate a range of pages to use as a stack. 19 | pub fn alloc_stack( 20 | &mut self, 21 | active_table: &mut ActivePageTable, 22 | size_in_pages: usize, 23 | ) -> Option { 24 | if size_in_pages == 0 { 25 | return None; /* a zero sized stack makes no sense */ 26 | } 27 | 28 | // clone the range, since we only want to change it on success 29 | let mut range = self.range.clone(); 30 | 31 | // try to allocate the stack pages and a guard page 32 | let guard_page = range.next(); 33 | let stack_start = range.next(); 34 | let stack_end = if size_in_pages == 1 { 35 | stack_start 36 | } else { 37 | // choose the (size_in_pages-2)th element, since index 38 | // starts at 0 and we already allocated the start page 39 | range.nth(size_in_pages - 2) 40 | }; 41 | 42 | match (guard_page, stack_start, stack_end) { 43 | (Some(_), Some(start), Some(end)) => { 44 | // success! write back updated range 45 | self.range = range; 46 | 47 | // map stack pages to physical frames 48 | for page in Page::range_inclusive(start, end) { 49 | let result = active_table.map(page, EntryFlags::PRESENT); 50 | result.flush(active_table); 51 | } 52 | 53 | // create a new stack 54 | let top_of_stack = end.start_address().get() + PAGE_SIZE; 55 | Some(Stack::new(top_of_stack, start.start_address().get())) 56 | } 57 | _ => None, /* not enough pages */ 58 | } 59 | } 60 | } 61 | 62 | /// A stack that grows downwards. 63 | #[derive(Debug)] 64 | pub struct Stack { 65 | top: usize, 66 | bottom: usize, 67 | } 68 | 69 | impl Stack { 70 | fn new(top: usize, bottom: usize) -> Stack { 71 | assert!(top > bottom); 72 | Stack { 73 | top: top, 74 | bottom: bottom, 75 | } 76 | } 77 | 78 | pub fn top(&self) -> usize { 79 | self.top 80 | } 81 | 82 | #[allow(dead_code)] 83 | pub fn bottom(&self) -> usize { 84 | self.bottom 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/arch/x86_64/mod.rs: -------------------------------------------------------------------------------- 1 | //! Architecture-specific code for AMD64. 2 | 3 | pub mod interrupts; 4 | pub mod memory; 5 | pub mod init; 6 | 7 | pub use self::init::init; 8 | -------------------------------------------------------------------------------- /src/device/ahci/fis.rs: -------------------------------------------------------------------------------- 1 | /// All of these structures are pretty much just adaptations of 2 | /// http://wiki.osdev.org/AHCI#SATA_basic. 3 | 4 | use device::io::mmio::Mmio; 5 | 6 | /// Types of FIS. 7 | #[repr(u8)] 8 | pub enum FisType { 9 | RegH2D = 0x27, 10 | RegD2H = 0x34, 11 | DmaAct = 0x39, 12 | DmaSetup = 0x41, 13 | Data = 0x46, 14 | Bist = 0x58, 15 | PioSetup = 0x5F, 16 | DevBits = 0xA1, 17 | } 18 | 19 | #[repr(packed)] 20 | pub struct FisRegH2D { 21 | // DWORD 0 22 | pub fis_type: Mmio, // FIS_TYPE_REG_H2D 23 | 24 | pub pm: Mmio, // Port multiplier, 1: Command, 0: Control 25 | 26 | pub command: Mmio, // Command register 27 | pub featurel: Mmio, // Feature register, 7:0 28 | 29 | // DWORD 1 30 | pub lba0: Mmio, // LBA low register, 7:0 31 | pub lba1: Mmio, // LBA mid register, 15:8 32 | pub lba2: Mmio, // LBA high register, 23:16 33 | pub device: Mmio, // Device register 34 | 35 | // DWORD 2 36 | pub lba3: Mmio, // LBA register, 31:24 37 | pub lba4: Mmio, // LBA register, 39:32 38 | pub lba5: Mmio, // LBA register, 47:40 39 | pub featureh: Mmio, // Feature register, 15:8 40 | 41 | // DWORD 3 42 | pub countl: Mmio, // Count register, 7:0 43 | pub counth: Mmio, // Count register, 15:8 44 | pub icc: Mmio, // Isochronous command completion 45 | pub control: Mmio, // Control register 46 | 47 | // DWORD 4 48 | pub rsv1: [Mmio; 4], // Reserved 49 | } 50 | 51 | #[repr(packed)] 52 | pub struct FisRegD2H { 53 | // DWORD 0 54 | pub fis_type: Mmio, // FIS_TYPE_REG_D2H 55 | 56 | pub pm: Mmio, // Port multiplier, Interrupt bit: 2 57 | 58 | pub status: Mmio, // Status register 59 | pub error: Mmio, // Error register 60 | 61 | // DWORD 1 62 | pub lba0: Mmio, // LBA low register, 7:0 63 | pub lba1: Mmio, // LBA mid register, 15:8 64 | pub lba2: Mmio, // LBA high register, 23:16 65 | pub device: Mmio, // Device register 66 | 67 | // DWORD 2 68 | pub lba3: Mmio, // LBA register, 31:24 69 | pub lba4: Mmio, // LBA register, 39:32 70 | pub lba5: Mmio, // LBA register, 47:40 71 | pub rsv2: Mmio, // Reserved 72 | 73 | // DWORD 3 74 | pub countl: Mmio, // Count register, 7:0 75 | pub counth: Mmio, // Count register, 15:8 76 | pub rsv3: [Mmio; 2], // Reserved 77 | 78 | // DWORD 4 79 | pub rsv4: [Mmio; 4], // Reserved 80 | } 81 | 82 | #[repr(packed)] 83 | pub struct FisData { 84 | // DWORD 0 85 | pub fis_type: Mmio, // FIS_TYPE_DATA 86 | 87 | pub pm: Mmio, // Port multiplier 88 | 89 | pub rsv1: [Mmio; 2], // Reserved 90 | 91 | // DWORD 1 ~ N 92 | pub data: [Mmio; 252], // Payload 93 | } 94 | 95 | #[repr(packed)] 96 | pub struct FisPioSetup { 97 | // DWORD 0 98 | pub fis_type: Mmio, // FIS_TYPE_PIO_SETUP 99 | 100 | pub pm: Mmio, // Port multiplier, direction: 4 - device to host, interrupt: 2 101 | 102 | pub status: Mmio, // Status register 103 | pub error: Mmio, // Error register 104 | 105 | // DWORD 1 106 | pub lba0: Mmio, // LBA low register, 7:0 107 | pub lba1: Mmio, // LBA mid register, 15:8 108 | pub lba2: Mmio, // LBA high register, 23:16 109 | pub device: Mmio, // Device register 110 | 111 | // DWORD 2 112 | pub lba3: Mmio, // LBA register, 31:24 113 | pub lba4: Mmio, // LBA register, 39:32 114 | pub lba5: Mmio, // LBA register, 47:40 115 | pub rsv2: Mmio, // Reserved 116 | 117 | // DWORD 3 118 | pub countl: Mmio, // Count register, 7:0 119 | pub counth: Mmio, // Count register, 15:8 120 | pub rsv3: Mmio, // Reserved 121 | pub e_status: Mmio, // New value of status register 122 | 123 | // DWORD 4 124 | pub tc: Mmio, // Transfer count 125 | pub rsv4: [Mmio; 2], // Reserved 126 | } 127 | 128 | #[repr(packed)] 129 | pub struct FisDmaSetup { 130 | // DWORD 0 131 | pub fis_type: Mmio, // FIS_TYPE_DMA_SETUP 132 | 133 | pub pm: Mmio, // Port multiplier, direction: 4 - device to host, interrupt: 2, auto-activate: 1 134 | 135 | pub rsv1: [Mmio; 2], // Reserved 136 | 137 | // DWORD 1&2 138 | pub dma_buffer_id: Mmio, 139 | 140 | // DWORD 3 141 | pub rsv3: Mmio, // More reserved 142 | 143 | // DWORD 4 144 | pub dma_buffer_offset: Mmio, // Byte offset into buffer. First 2 bits must be 0 145 | 146 | // DWORD 5 147 | pub transfer_count: Mmio, // Number of bytes to transfer. Bit 0 must be 0 148 | 149 | // DWORD 6 150 | pub rsv6: Mmio, // Reserved 151 | } 152 | -------------------------------------------------------------------------------- /src/device/ahci/hba.rs: -------------------------------------------------------------------------------- 1 | use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; 2 | 3 | pub static AHCI_BASE: AtomicUsize = ATOMIC_USIZE_INIT; 4 | -------------------------------------------------------------------------------- /src/device/ahci/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod fis; 2 | pub mod hba; 3 | -------------------------------------------------------------------------------- /src/device/apic.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused_imports)] 2 | use x86_64::registers::msr::{rdmsr, wrmsr, IA32_APIC_BASE}; 3 | use core::ptr; 4 | use core::sync::atomic::{AtomicU32, Ordering}; 5 | use arch::memory::paging::{Page, VirtualAddress, PhysicalAddress, ActivePageTable}; 6 | use arch::memory::paging::entry::EntryFlags; 7 | use arch::memory::Frame; 8 | use heapless::Vec as StaticVec; 9 | use spin::Mutex; 10 | use acpi::madt; 11 | 12 | /// This will manage all the apic hardware on the system. 13 | pub struct ApicManager { 14 | /// The base address of the local APIC register space. 15 | pub lapic_base: u32, 16 | pub local_apics: StaticVec<&'static madt::LapicEntry, [&'static madt::LapicEntry; 20]>, 17 | /// All the I/O APICs on a system. FIXME: Figure out how to set the size of the backing 18 | /// array dynamically. 19 | pub io_apics: StaticVec<&'static madt::IoApic, [&'static madt::IoApic; 10]>, 20 | /// All the non-maskable interrupts, specified by the MADT. 21 | pub nmis: StaticVec<&'static madt::ApicNMI, [&'static madt::ApicNMI; 10]>, 22 | /// Interrupt source overrides. 23 | pub isos: StaticVec<&'static madt::InterruptSourceOverride, [&'static madt::InterruptSourceOverride; 10]>, 24 | } 25 | 26 | impl ApicManager { 27 | pub fn new() -> Self { 28 | ApicManager { 29 | lapic_base: 0, 30 | local_apics: StaticVec::new(), 31 | io_apics: StaticVec::new(), 32 | nmis: StaticVec::new(), 33 | isos: StaticVec::new(), 34 | } 35 | } 36 | 37 | pub fn lapic_read(&self, register: u32) -> u32 { 38 | unsafe { ptr::read_volatile(&(self.lapic_base + register) as *const u32) } 39 | } 40 | 41 | pub fn lapic_write(&self, register: u32, value: u32) { 42 | unsafe { ptr::write_volatile(&mut (self.lapic_base + register) as *mut u32, value) } 43 | } 44 | 45 | pub fn lapic_set_nmi(&self, vec: u8, flags: u16, lint: u8) { 46 | // Set as NMI. 47 | let mut nmi: u32 = (800 | vec) as u32; 48 | // Active low. 49 | if flags & 2 != 0 { 50 | nmi |= 1 << 13; 51 | } 52 | 53 | // Level triggered. 54 | if flags & 8 != 0 { 55 | nmi |= 1 << 15; 56 | } 57 | 58 | println!("[ dev ] Setting NMI, {:#x}", nmi); 59 | 60 | match lint { 61 | 1 => { 62 | self.lapic_write(0x360, nmi); 63 | }, 64 | 0 => { 65 | self.lapic_write(0x350, nmi); 66 | }, 67 | _ => {}, 68 | } 69 | } 70 | 71 | pub fn install_nmis(&self) { 72 | for (i, nmi) in self.nmis.iter().enumerate() { 73 | println!("[ dev ] Installing NMI {}, vector offset: {:#x}", i, 0x90 + i); 74 | println!("[ dev ] NMI has flags: {}, using register LVT{}", nmi.flags, nmi.lint_no); 75 | self.lapic_set_nmi(0x90 + i as u8, nmi.flags, nmi.lint_no); 76 | } 77 | } 78 | 79 | /// Enable the Local APIC and set the spurious interrupt vector to 0xff, 255. 80 | pub fn lapic_enable(&self) { 81 | let read = self.lapic_read(0xf0); 82 | self.lapic_write(0xf0, read | (0x100 | 0xff)); 83 | } 84 | 85 | pub fn io_apic_read(&self, reg: u32, num: usize) -> u32 { 86 | // First, find the base address of the I/O APIC referenced by `num` 87 | // in our list of entries. 88 | let mut addr: u32 = self.io_apics[num].address; 89 | 90 | unsafe { 91 | let val = reg; 92 | let ioregsel = &mut addr as *mut u32; 93 | // Tell the apic which register we which to use. 94 | ptr::write_volatile(ioregsel, val); 95 | 96 | let ioregwin = &mut (addr + 4) as *mut u32; 97 | ptr::read_volatile(ioregwin) 98 | } 99 | } 100 | 101 | pub fn io_apic_write(&self, reg: u32, num: usize, data: u32) { 102 | let mut addr: u32 = self.io_apics[num].address; 103 | 104 | unsafe { 105 | let val = reg; 106 | let ioregsel = &mut addr as *mut u32; 107 | ptr::write_volatile(ioregsel, val); 108 | 109 | let ioregwin = &mut (addr + 4) as *mut u32; 110 | ptr::write_volatile(ioregwin, data); 111 | }; 112 | } 113 | 114 | pub fn io_apic_from_gsi(&self, gsi: u32) -> Option { 115 | for (i, apic) in self.io_apics.iter().enumerate() { 116 | if apic.gsib < gsi && apic.gsib + self.get_max_redirect(i) > gsi { 117 | return Some(i); 118 | } else { 119 | continue; 120 | } 121 | } 122 | 123 | None 124 | } 125 | 126 | pub fn get_max_redirect(&self, num: usize) -> u32 { 127 | (self.io_apic_read(1, num) & 0xff0000) >> 16 128 | } 129 | 130 | /// Set the redirect for a given IRQ and GSI. 131 | pub fn set_redirect(&self, irq: u8, gsi: u32, flags: u16, id: u8) { 132 | let apic = self.io_apic_from_gsi(gsi); 133 | 134 | if apic.is_none() { 135 | println!("[ apic ] Error: Could not find an I/O APIC that handles GSI: {}", gsi); 136 | // return; 137 | } else { 138 | let io_apic = apic.unwrap(); 139 | 140 | let mut redirection: u64 = irq as u64 + 0x30; 141 | if flags & 2 != 0 { 142 | redirection |= 1<<13; 143 | } else if flags & 8 != 0 { 144 | redirection |= 1<<15; 145 | } 146 | 147 | redirection |= (id as u64) << 56; 148 | 149 | let ioredtbl: u32 = (gsi - self.io_apics[io_apic].gsib) * 2 + 16; 150 | 151 | println!("[ dev ] Redirecting IRQ {}, redirection data: {}", irq, redirection); 152 | 153 | self.io_apic_write(ioredtbl, io_apic, redirection as u32); 154 | self.io_apic_write(ioredtbl + 1, io_apic, redirection as u32); 155 | } 156 | } 157 | 158 | pub fn install_redirects(&self) { 159 | for iso in self.isos.iter() { 160 | self.set_redirect(iso.irq_source, iso.gsi, iso.flags, self.local_apics[0].id) 161 | } 162 | } 163 | 164 | pub fn eoi(&self) { 165 | self.lapic_write(0xb0, 0); 166 | } 167 | } 168 | 169 | pub fn init(active_table: &mut ActivePageTable) { 170 | if let Some(ref mut apic_manager) = *APIC_MANAGER.lock() { 171 | println!("[ dev ] Initialising APIC, lapic base at {:#x}", apic_manager.lapic_base); 172 | println!("[ dev ] Mapping local APIC address space..."); 173 | 174 | for (i, _) in apic_manager.io_apics.iter().enumerate() { 175 | println!("Max redirect for this i/o apic is {}", apic_manager.get_max_redirect(i)); 176 | } 177 | 178 | { 179 | let page = Page::containing_address(VirtualAddress::new(apic_manager.lapic_base as usize)); 180 | let frame = Frame::containing_address(PhysicalAddress::new(apic_manager.lapic_base as usize)); 181 | let result = active_table.map_to(page, frame, 182 | EntryFlags::PRESENT | 183 | EntryFlags::WRITABLE | 184 | EntryFlags::NO_EXECUTE); 185 | result.flush(active_table); 186 | } 187 | 188 | { 189 | for io_apic in apic_manager.io_apics.iter() { 190 | let page = Page::containing_address(VirtualAddress::new(io_apic.address as usize)); 191 | let frame = Frame::containing_address(PhysicalAddress::new(io_apic.address as usize)); 192 | let result = active_table.map_to(page, frame, 193 | EntryFlags::PRESENT | 194 | EntryFlags::WRITABLE | 195 | EntryFlags::NO_EXECUTE); 196 | result.flush(active_table); 197 | } 198 | } 199 | 200 | println!("[ dev ] Installing non-maskable interrupts..."); 201 | apic_manager.install_nmis(); 202 | println!("[ dev ] Installing interrupt source overrides..."); 203 | apic_manager.install_redirects(); 204 | println!("[ dev ] Enabling Local APIC"); 205 | apic_manager.lapic_enable(); 206 | } 207 | } 208 | 209 | pub fn eoi() { 210 | if let Some(ref mut apic_manager) = *APIC_MANAGER.lock() { 211 | apic_manager.eoi(); 212 | } else { 213 | panic!("apic not initialised"); 214 | } 215 | } 216 | 217 | lazy_static! { 218 | pub static ref APIC_MANAGER: Mutex> = Mutex::new(None); 219 | } 220 | -------------------------------------------------------------------------------- /src/device/io/cpuio.rs: -------------------------------------------------------------------------------- 1 | use core::marker::PhantomData; 2 | 3 | pub mod x86_io { 4 | /// Read a single byte from the port. 5 | pub unsafe fn inb(port: u16) -> u8 { 6 | let result: u8; 7 | asm!("inb %dx, %al" : "={al}"(result) : "{dx}"(port) :: "volatile"); 8 | result 9 | } 10 | 11 | /// Write a single byte to the port. 12 | pub unsafe fn outb(value: u8, port: u16) { 13 | asm!("outb %al, %dx" :: "{dx}"(port), "{al}"(value) :: "volatile"); 14 | } 15 | 16 | /// Read a word from the port. 17 | pub unsafe fn inw(port: u16) -> u16 { 18 | let result: u16; 19 | asm!("inw %dx, %ax" : "={ax}"(result) : "{dx}"(port) :: "volatile"); 20 | result 21 | } 22 | 23 | /// Write a word to the port. 24 | pub unsafe fn outw(value: u16, port: u16) { 25 | asm!("outw %ax, %dx" :: "{dx}"(port), "{ax}"(value) :: "volatile"); 26 | } 27 | 28 | /// Read a dword from the port. 29 | pub unsafe fn inl(port: u16) -> u32 { 30 | let result: u32; 31 | asm!("inl %dx, %eax" : "={eax}"(result) : "{dx}"(port) :: "volatile"); 32 | result 33 | } 34 | 35 | /// Write a dword to the port. 36 | pub unsafe fn outl(value: u32, port: u16) { 37 | asm!("outl %eax, %dx" :: "{dx}"(port), "{eax}"(value) :: "volatile"); 38 | } 39 | } 40 | 41 | use self::x86_io::{inb, inl, inw, outb, outl, outw}; 42 | 43 | /// Nice little type that allows us to specify the size of the value read without using inb 44 | /// directly. 45 | pub trait InOut { 46 | unsafe fn port_in(port: u16) -> Self; 47 | unsafe fn port_out(port: u16, value: Self); 48 | } 49 | 50 | impl InOut for u8 { 51 | unsafe fn port_in(port: u16) -> u8 { 52 | inb(port) 53 | } 54 | unsafe fn port_out(port: u16, value: u8) { 55 | outb(value, port); 56 | } 57 | } 58 | 59 | impl InOut for u16 { 60 | unsafe fn port_in(port: u16) -> u16 { 61 | inw(port) 62 | } 63 | unsafe fn port_out(port: u16, value: u16) { 64 | outw(value, port); 65 | } 66 | } 67 | 68 | impl InOut for u32 { 69 | unsafe fn port_in(port: u16) -> u32 { 70 | inl(port) 71 | } 72 | unsafe fn port_out(port: u16, value: u32) { 73 | outl(value, port); 74 | } 75 | } 76 | 77 | /// An `InOut`sized port. This could be any of the type implementors for `InOut`. 78 | #[derive(Debug)] 79 | pub struct Port { 80 | /// Port address. 81 | pub port: u16, 82 | 83 | /// Zero-byte placeholder. This is only here so that we can have a 84 | /// type parameter `T` without a compiler error. 85 | phantom: PhantomData, 86 | } 87 | 88 | impl Port { 89 | /// Create a port which can handle values of `T` size. 90 | pub const unsafe fn new(port: u16) -> Port { 91 | Port { 92 | port: port, 93 | phantom: PhantomData, 94 | } 95 | } 96 | 97 | /// Read a value from the port. 98 | pub fn read(&mut self) -> T { 99 | unsafe { T::port_in(self.port) } 100 | } 101 | 102 | /// Write a value to the port. 103 | pub fn write(&mut self, value: T) { 104 | unsafe { 105 | T::port_out(self.port, value); 106 | } 107 | } 108 | } 109 | 110 | #[derive(Debug)] 111 | pub struct UnsafePort { 112 | port: u16, 113 | phantom: PhantomData, 114 | } 115 | 116 | impl UnsafePort { 117 | /// Create a new unsafe port. 118 | pub const unsafe fn new(port: u16) -> UnsafePort { 119 | UnsafePort { 120 | port: port, 121 | phantom: PhantomData, 122 | } 123 | } 124 | 125 | /// Read a value from the port. 126 | pub unsafe fn read(&mut self) -> T { 127 | T::port_in(self.port) 128 | } 129 | 130 | /// Write a value to the port. 131 | pub unsafe fn write(&mut self, value: T) { 132 | T::port_out(self.port, value); 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/device/io/dma.rs: -------------------------------------------------------------------------------- 1 | use core::{mem, ptr}; 2 | use arch::memory; 3 | 4 | // TODO: Add Drop impl. 5 | struct PhysBox { 6 | address: usize, 7 | size: usize, 8 | } 9 | 10 | impl PhysBox { 11 | /// Allocate some physical memory and return the start address of the allocated frame. 12 | fn new(size: usize) -> Result { 13 | let address = unsafe { memory::physalloc(size)? }; 14 | 15 | Ok(PhysBox { 16 | address: address, 17 | size: size, 18 | }) 19 | 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/device/io/mmio.rs: -------------------------------------------------------------------------------- 1 | use core::intrinsics::{volatile_load, volatile_store}; 2 | use core::mem::uninitialized; 3 | use core::ops::{BitAnd, BitOr, Not}; 4 | 5 | #[repr(packed)] 6 | pub struct Mmio { 7 | value: T, 8 | } 9 | 10 | impl Mmio 11 | where 12 | T: Copy + PartialEq + BitAnd + BitOr + Not, 13 | { 14 | pub fn new() -> Self { 15 | Mmio { 16 | value: unsafe { uninitialized() }, 17 | } 18 | } 19 | 20 | pub fn read(&self) -> T { 21 | unsafe { volatile_load(&self.value) } 22 | } 23 | 24 | pub fn write(&mut self, value: T) { 25 | unsafe { volatile_store(&mut self.value, value) } 26 | } 27 | 28 | pub fn readf(&self, flags: T) -> bool { 29 | (self.value & flags) as T == flags 30 | } 31 | 32 | pub fn writef(&mut self, flags: T, value: bool) { 33 | let tmp: T = match value { 34 | true => self.read() | flags, 35 | false => self.read() & !flags, 36 | }; 37 | 38 | self.write(tmp); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/device/io/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cpuio; 2 | pub mod mmio; 3 | 4 | pub use self::cpuio::Port; 5 | -------------------------------------------------------------------------------- /src/device/keyboard/keyboard.rs: -------------------------------------------------------------------------------- 1 | use device::keyboard::ps2_keyboard::{Key, KeyEvent}; 2 | use device::keyboard::ps2_keyboard::Key::*; 3 | use device::keyboard::ps2_keyboard::Modifiers::*; 4 | 5 | macro_rules! key_press { 6 | ($x:expr) => (Some(KeyEvent::Pressed($x))) 7 | } 8 | 9 | macro_rules! key_release { 10 | ($x:expr) => (Some(KeyEvent::Released($x))) 11 | } 12 | 13 | /// Gets a key from a given keyboard event. 14 | pub fn get_key(scancode: u64) -> Option { 15 | match get_key_event(scancode) { 16 | Some(KeyEvent::Pressed(key)) => Some(key), 17 | Some(KeyEvent::Released(key)) => Some(key), 18 | _ => None, 19 | } 20 | } 21 | 22 | /// Calls `match_scancode` and is then matched on itself to retrieve a `Key` based on the returned 23 | /// `KeyEvent`. 24 | pub fn get_key_event(scancode: u64) -> Option { 25 | match_scancode(scancode) 26 | } 27 | 28 | /// Special keys that are part of a byte sequence. 29 | pub fn is_special_key(byte: u8) -> Option { 30 | match byte { 31 | 0x5B | 0xDB => Some(byte), 32 | 0x1D | 0x9D => Some(byte), 33 | 0x5C | 0xDC => Some(byte), 34 | 0x38 | 0xB8 => Some(byte), 35 | 0x5D | 0xDD => Some(byte), 36 | 0x52 | 0xD2 => Some(byte), 37 | 0x47 | 0x97 => Some(byte), 38 | 0x49 | 0xC9 => Some(byte), 39 | 0x53 | 0xD3 => Some(byte), 40 | 0x4F | 0xCF => Some(byte), 41 | 0x51 | 0xD1 => Some(byte), 42 | 0x48 | 0xC8 => Some(byte), 43 | 0x4B | 0xCB => Some(byte), 44 | 0x50 | 0xD0 => Some(byte), 45 | 0x4D | 0xCD => Some(byte), 46 | 0x35 | 0xB5 => Some(byte), 47 | 0x1C | 0x9C => Some(byte), 48 | _ => None, 49 | } 50 | } 51 | 52 | /// Use range matching to convert our passed scancode to some type of ASCII or to update modifiers, 53 | /// and return a key-event based on whether this was a key press/release (only relevant for 54 | /// modifiers). 55 | fn match_scancode(scancode: u64) -> Option { 56 | let idx = scancode as usize; 57 | match scancode { 58 | // ASCII Keys by keyboard row. 59 | 0x02...0x0D => key_press!(LowerAscii(b"1234567890-="[idx - 0x02])), 60 | 0x10...0x1B => key_press!(LowerAscii(b"qwertyuiop[]"[idx - 0x10])), 61 | 0x1E...0x28 => key_press!(LowerAscii(b"asdfghjkl;'"[idx - 0x1E])), 62 | 0x2C...0x35 => key_press!(LowerAscii(b"zxcvbnm,./"[idx - 0x2C])), 63 | 0x29 => key_press!(LowerAscii(b'`')), 64 | 0x2B => key_press!(LowerAscii(b'\\')), 65 | 66 | // Non-modifiable ASCII keys 67 | 0x01 => key_press!(Ascii(0x1B)), // escape 68 | 0x0E => key_press!(Ascii(0x8)), // backspace 69 | 0x0F => key_press!(Ascii(b'\t')), // tab 70 | 0x1C => key_press!(Ascii(b'\n')), // newline 71 | 0x39 => key_press!(Ascii(b' ')), // space 72 | 73 | // Meta keys 74 | 0x1D => key_press!(Meta(ControlLeft(true))), 75 | 0xE01D => key_press!(Meta(ControlRight(true))), 76 | 0x2A => key_press!(Meta(ShiftLeft(true))), 77 | 0x36 => key_press!(Meta(ShiftRight(true))), 78 | 0x38 => key_press!(Meta(AltLeft(true))), 79 | 0xE038 => key_press!(Meta(AltRight(false))), 80 | 0x3A => key_press!(Meta(CapsLock)), 81 | 0x45 => key_press!(Meta(NumLock)), 82 | 0x46 => key_press!(Meta(ScrollLock)), 83 | // F1 .. F10 84 | 0x3B => key_press!(Meta(FunctionKeys(0))), 85 | 0x3C => key_press!(Meta(FunctionKeys(1))), 86 | 0x3D => key_press!(Meta(FunctionKeys(2))), 87 | 0x3E => key_press!(Meta(FunctionKeys(3))), 88 | 0x3F => key_press!(Meta(FunctionKeys(4))), 89 | 0x40 => key_press!(Meta(FunctionKeys(5))), 90 | 0x41 => key_press!(Meta(FunctionKeys(6))), 91 | 0x42 => key_press!(Meta(FunctionKeys(7))), 92 | 0x43 => key_press!(Meta(FunctionKeys(8))), 93 | 0x44 => key_press!(Meta(FunctionKeys(9))), 94 | // F11, F12 95 | 0x57 => key_press!(Meta(FunctionKeys(10))), 96 | 0x58 => key_press!(Meta(FunctionKeys(11))), 97 | 98 | 0xAA => key_release!(Meta(ShiftLeft(false))), 99 | 0xB6 => key_release!(Meta(ShiftRight(false))), 100 | 0x9D => key_release!(Meta(ControlLeft(false))), 101 | 0xE09D => key_release!(Meta(ControlRight(false))), 102 | 0xB8 => key_release!(Meta(AltLeft(false))), 103 | 0xE0B8 => key_release!(Meta(AltRight(false))), 104 | // F1 .. F10 105 | 0xBB => key_release!(Meta(FunctionKeys(0))), 106 | 0xBC => key_release!(Meta(FunctionKeys(1))), 107 | 0xBD => key_release!(Meta(FunctionKeys(2))), 108 | 0xBE => key_release!(Meta(FunctionKeys(3))), 109 | 0xBF => key_release!(Meta(FunctionKeys(4))), 110 | 0xC0 => key_release!(Meta(FunctionKeys(5))), 111 | 0xC1 => key_release!(Meta(FunctionKeys(6))), 112 | 0xC2 => key_release!(Meta(FunctionKeys(7))), 113 | 0xC3 => key_release!(Meta(FunctionKeys(8))), 114 | 0xC4 => key_release!(Meta(FunctionKeys(9))), 115 | 0xD7 => key_release!(Meta(FunctionKeys(10))), 116 | 0xD8 => key_release!(Meta(FunctionKeys(11))), 117 | 118 | _ => None, 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/device/keyboard/layout/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "uk")] 2 | pub mod uk_std; 3 | #[cfg(feature = "us")] 4 | pub mod us_std; 5 | 6 | #[cfg(feature = "uk")] 7 | pub use self::uk_std::map_to_upper; 8 | #[cfg(feature = "us")] 9 | pub use self::us_std::map_to_upper; 10 | -------------------------------------------------------------------------------- /src/device/keyboard/layout/uk_std.rs: -------------------------------------------------------------------------------- 1 | use alloc::Vec; 2 | 3 | pub fn map_to_upper(lower: char) -> Vec { 4 | if lower.is_alphabetic() { 5 | lower.to_uppercase().collect() 6 | } else { 7 | let upper = match lower { 8 | '`' => '¬', 9 | '1' => '!', 10 | '2' => '"', 11 | '3' => '£', 12 | '4' => '$', 13 | '5' => '%', 14 | '6' => '^', 15 | '7' => '&', 16 | '8' => '*', 17 | '9' => '(', 18 | '0' => ')', 19 | '-' => '_', 20 | '=' => '+', 21 | '[' => '{', 22 | ']' => '}', 23 | '\\' => '|', 24 | ';' => ':', 25 | '\'' => '@', 26 | '#' => '~', 27 | ',' => '<', 28 | '.' => '>', 29 | '/' => '?', 30 | _ => 0x0 as char, 31 | }; 32 | 33 | vec![upper] 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/device/keyboard/layout/us_std.rs: -------------------------------------------------------------------------------- 1 | use alloc::Vec; 2 | 3 | pub fn map_to_upper(lower: char) -> Vec { 4 | if lower.is_alphabetic() { 5 | lower.to_uppercase().collect() 6 | } else { 7 | let upper = match lower { 8 | '`' => '~', 9 | '1' => '!', 10 | '2' => '@', 11 | '3' => '#', 12 | '4' => '$', 13 | '5' => '%', 14 | '6' => '^', 15 | '7' => '&', 16 | '8' => '*', 17 | '9' => '(', 18 | '0' => ')', 19 | '-' => '_', 20 | '=' => '+', 21 | '[' => '{', 22 | ']' => '}', 23 | '\\' => '|', 24 | ';' => ':', 25 | '\'' => '"', 26 | ',' => '<', 27 | '.' => '>', 28 | '/' => '?', 29 | _ => 0x0 as char, 30 | }; 31 | 32 | vec![upper] 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/device/keyboard/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod keyboard; 2 | pub mod layout; 3 | pub mod ps2_keyboard; 4 | 5 | pub use self::keyboard::*; 6 | pub use self::ps2_keyboard::*; 7 | -------------------------------------------------------------------------------- /src/device/keyboard/ps2_keyboard.rs: -------------------------------------------------------------------------------- 1 | use device::ps2_8042; 2 | use device::keyboard; 3 | use alloc::Vec; 4 | use alloc::string::{String, ToString}; 5 | use spin::Mutex; 6 | 7 | /// A pair of keys on the left and the right of the keyboard. 8 | #[derive(Debug)] 9 | struct KeyPair { 10 | left: bool, 11 | right: bool, 12 | } 13 | 14 | impl KeyPair { 15 | const fn new() -> Self { 16 | KeyPair { 17 | left: false, 18 | right: false, 19 | } 20 | } 21 | 22 | fn is_pressed(&self) -> bool { 23 | self.left || self.right 24 | } 25 | } 26 | 27 | /// Possible modifications to state we could have. 28 | pub enum Modifiers { 29 | AltLeft(bool), 30 | AltRight(bool), 31 | CapsLock, 32 | ControlLeft(bool), 33 | ControlRight(bool), 34 | NumLock, 35 | ScrollLock, 36 | ShiftLeft(bool), 37 | ShiftRight(bool), 38 | /// Function keys, the usize represents the index 39 | /// of the key in the array `functions` under `ModifierState`. 40 | FunctionKeys(usize), 41 | } 42 | 43 | struct ModifierState { 44 | shift: KeyPair, 45 | control: KeyPair, 46 | alt: KeyPair, 47 | caps_lock: bool, 48 | num_lock: bool, 49 | scroll_lock: bool, 50 | function_keys: [bool; 12], 51 | } 52 | 53 | impl ModifierState { 54 | const fn new() -> Self { 55 | ModifierState { 56 | shift: KeyPair::new(), 57 | control: KeyPair::new(), 58 | alt: KeyPair::new(), 59 | caps_lock: false, 60 | num_lock: false, 61 | scroll_lock: false, 62 | function_keys: [false; 12], 63 | } 64 | } 65 | 66 | /// Should we use uppercase letters? 67 | fn use_uppercase_letters(&self) -> bool { 68 | self.shift.is_pressed() ^ self.caps_lock 69 | } 70 | 71 | fn should_switch_tty(&self) -> (bool, usize) { 72 | let is_ctrl: bool = self.control.left || self.control.right; 73 | 74 | for i in 0..12 { 75 | // If any one of the function keys is pressed 76 | if self.function_keys[i] && is_ctrl { 77 | return (true, i); 78 | } else { 79 | continue; 80 | } 81 | } 82 | 83 | (false, 0) 84 | } 85 | 86 | /// Apply modifiers to ascii and return updated ascii. 87 | fn apply_to(&self, ascii: char) -> String { 88 | if self.use_uppercase_letters() { 89 | use device::keyboard::layout::map_to_upper; 90 | 91 | map_to_upper(ascii).iter().collect() 92 | } else if self.should_switch_tty().0 { 93 | let index = self.should_switch_tty().1; 94 | tty_switch!(index); 95 | 96 | ascii.to_string() 97 | } else { 98 | ascii.to_string() 99 | } 100 | } 101 | 102 | /// Update modifier state. 103 | fn update(&mut self, modifier: Modifiers) { 104 | use self::Modifiers::*; 105 | 106 | match modifier { 107 | AltLeft(m) => self.alt.left = m, 108 | AltRight(m) => self.alt.right = m, 109 | CapsLock => self.caps_lock = !self.caps_lock, 110 | ControlLeft(m) => self.control.left = m, 111 | ControlRight(m) => self.control.right = m, 112 | NumLock => self.num_lock = !self.num_lock, 113 | ScrollLock => self.num_lock = !self.scroll_lock, 114 | ShiftLeft(m) => self.shift.left = m, 115 | ShiftRight(m) => self.shift.right = m, 116 | FunctionKeys(m) => self.function_keys[m] = true, 117 | } 118 | } 119 | } 120 | 121 | /// Possible types of keyboard input we might receive. 122 | pub enum Key { 123 | Ascii(u8), 124 | Meta(Modifiers), 125 | LowerAscii(u8), 126 | } 127 | 128 | /// A key can be pressed or released and there are different scancodes as such. 129 | pub enum KeyEvent { 130 | Pressed(Key), 131 | Released(Key), 132 | } 133 | 134 | static STATE: Mutex = Mutex::new(ModifierState::new()); 135 | 136 | /// Parse the retrieved key and print the output or update modifier state dependant on the type of 137 | /// key received. This is called by our keyboard IRQ handler. 138 | pub fn parse_key(scancode: u8) { 139 | let sequence: u64 = retrieve_bytes(scancode); 140 | 141 | if let Some(key) = keyboard::get_key(sequence) { 142 | match key { 143 | Key::Ascii(k) => print_char(k as char), 144 | Key::Meta(modifier) => STATE.lock().update(modifier), 145 | Key::LowerAscii(byte) => print_str(STATE.lock().apply_to(byte as char)), 146 | } 147 | } 148 | } 149 | 150 | /// Read bytes until end of sequence and combine into a number. 151 | fn retrieve_bytes(scancode: u8) -> u64 { 152 | let mut byte_sequence: Vec = vec![scancode]; 153 | 154 | // These scancodes are special - they indicate the start of a byte sequence which is sent when 155 | // some keys are pressed. If they are the byte we receive, read until the end of the sequence. 156 | if scancode == 0xE0 || scancode == 0xE1 { 157 | // Read another byte from the keyboard. 158 | let check: u8 = ps2_8042::read_char(); 159 | 160 | if let Some(byte) = keyboard::is_special_key(check) { 161 | byte_sequence.push(byte); 162 | } 163 | } 164 | 165 | byte_sequence 166 | .iter() 167 | .rev() 168 | .fold(0, |acc, &b| (acc << 1) + b as u64) 169 | } 170 | 171 | /// Print an ascii character. 172 | pub fn print_char(character: char) { 173 | match character { 174 | '\n' | ' ' | '\t' | '\x08' => print!("{}", character), 175 | _ => (), 176 | } 177 | } 178 | 179 | pub fn print_str(string: String) { 180 | print!("{}", string); 181 | } 182 | -------------------------------------------------------------------------------- /src/device/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | pub mod io; 3 | pub mod keyboard; 4 | pub mod ps2_8042; 5 | pub mod vga; 6 | pub mod pic; 7 | pub mod pit; 8 | pub mod ahci; 9 | pub mod pci; 10 | pub mod apic; 11 | pub mod serial; 12 | 13 | pub use self::io::cpuio::{Port, UnsafePort}; 14 | pub use self::io::mmio; 15 | 16 | use raw_cpuid::CpuId; 17 | 18 | /// Perform hardware init. 19 | pub unsafe fn init() { 20 | vga::init(); 21 | pit::init(); 22 | ps2_8042::PS2.lock().init(); 23 | pci::init(); 24 | } 25 | -------------------------------------------------------------------------------- /src/device/pci.rs: -------------------------------------------------------------------------------- 1 | use device::io::Port; 2 | use spin::Mutex; 3 | use alloc::Vec; 4 | use core::fmt; 5 | // use core::num::Float; 6 | 7 | #[allow(dead_code)] 8 | const MAX_BUS: u8 = 255; 9 | 10 | #[allow(dead_code)] 11 | const MAX_DEVICE: u8 = 31; 12 | 13 | #[allow(dead_code)] 14 | const MAX_FUNCTION: u8 = 7; 15 | 16 | static PCI: Mutex = Mutex::new(Pci { 17 | cfg_address: unsafe { Port::new(0xCF8) }, 18 | cfg_data: unsafe { Port::new(0xCFC) }, 19 | }); 20 | 21 | lazy_static! { 22 | static ref DEVICES: Mutex> = Mutex::new(Vec::new()); 23 | } 24 | 25 | pub struct Pci { 26 | pub cfg_address: Port, 27 | pub cfg_data: Port, 28 | } 29 | 30 | impl Pci { 31 | /// Read an aligned dword from the PCI configuration space. 32 | pub unsafe fn read_config(&mut self, bus: u8, slot: u8, func: u8, offset: u8) -> u32 { 33 | let address: u32 = 0x80000000 | (bus as u32) << 16 | (slot as u32) << 11 34 | | (func as u32) << 8 | (offset & 0xFC) as u32; 35 | 36 | self.cfg_address.write(address); 37 | self.cfg_data.read() 38 | } 39 | 40 | /// Read data from `CFG_DATA` to determine unique info about a device. 41 | pub unsafe fn probe(&mut self, bus: u8, slot: u8, function: u8) -> Option { 42 | let config_0 = self.read_config(bus, slot, function, 0); 43 | 44 | if config_0 == 0xFFFFFFFF { 45 | return None; 46 | } 47 | 48 | let config_4 = self.read_config(bus, slot, function, 0x8); 49 | let config_c = self.read_config(bus, slot, function, 0xC); 50 | 51 | Some(Device { 52 | bus: bus, 53 | function: function, 54 | device: slot, 55 | device_id: (config_0 >> 16) as u16, 56 | vendor_id: config_0 as u16, 57 | rev_id: config_4 as u8, 58 | subclass: (config_4 >> 16) as u8, 59 | class: DeviceClass::from_u8((config_4 >> 24) as u8), 60 | multifunction: config_c & 0x800000 != 0, 61 | bars: [0; 6], 62 | }) 63 | } 64 | } 65 | 66 | impl fmt::Display for Device { 67 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 68 | write!( 69 | f, 70 | "{}.{}.{}: 0x{:04x} 0x{:04x} {:?} {:02x}", 71 | self.bus, 72 | self.device, 73 | self.function, 74 | self.vendor_id, 75 | self.device_id, 76 | self.class, 77 | self.subclass 78 | ) 79 | } 80 | } 81 | 82 | #[derive(Debug, Copy, Clone, PartialEq)] 83 | #[repr(u8)] 84 | #[allow(dead_code)] 85 | pub enum DeviceClass { 86 | Legacy = 0x00, 87 | MassStorage = 0x01, 88 | Network = 0x02, 89 | Display = 0x03, 90 | Multimedia = 0x04, 91 | Memory = 0x05, 92 | BridgeDevice = 0x06, 93 | SimpleCommunication = 0x07, 94 | BaseSystemPeripheral = 0x08, 95 | InputDevice = 0x09, 96 | DockingStation = 0x0A, 97 | Processor = 0x0B, 98 | SerialBus = 0x0C, 99 | Wireless = 0x0D, 100 | IntelligentIO = 0x0E, 101 | SatelliteCommunication = 0x0F, 102 | EncryptionDecryption = 0x10, 103 | DataAndSignalProcessing = 0x11, 104 | Unknown, 105 | } 106 | 107 | impl DeviceClass { 108 | /// Convert a given device code to a possible `DeviceClass` variant. 109 | fn from_u8(c: u8) -> Self { 110 | if c <= DeviceClass::DataAndSignalProcessing as u8 { 111 | unsafe { ::core::mem::transmute(c) } 112 | } else { 113 | DeviceClass::Unknown 114 | } 115 | } 116 | } 117 | 118 | /// A PCI device. 119 | #[derive(Debug, Copy, Clone)] 120 | pub struct Device { 121 | bus: u8, 122 | function: u8, 123 | device: u8, 124 | device_id: u16, 125 | vendor_id: u16, 126 | rev_id: u8, 127 | subclass: u8, 128 | class: DeviceClass, 129 | /// Whether this device is multifunction or not. 130 | multifunction: bool, 131 | /// Base addresses. 132 | bars: [u32; 6], 133 | } 134 | 135 | impl Device { 136 | fn address(&self, offset: u32) -> u32 { 137 | return 1 << 31 | (self.bus as u32) << 16 | (self.device as u32) << 11 138 | | (self.function as u32) << 8 | (offset as u32 & 0xFC); 139 | } 140 | 141 | /// Read. 142 | pub unsafe fn read(&self, offset: u32) -> u32 { 143 | let address = self.address(offset); 144 | PCI.lock().cfg_address.write(address); 145 | return PCI.lock().cfg_data.read(); 146 | } 147 | 148 | /// Write. 149 | pub unsafe fn write(&self, offset: u32, value: u32) { 150 | let address = self.address(offset); 151 | PCI.lock().cfg_address.write(address); 152 | PCI.lock().cfg_data.write(value); 153 | } 154 | 155 | /// Set a certain flag 156 | pub unsafe fn set_flag(&self, offset: u32, flag: u32, toggle: bool) { 157 | let mut value = self.read(offset); 158 | 159 | if toggle { 160 | value |= flag 161 | } else { 162 | value &= 0xFFFFFFFF - flag; 163 | } 164 | self.write(offset, value); 165 | } 166 | 167 | unsafe fn load_bars(&mut self) { 168 | for i in 0..6 { 169 | let bar = self.read(i * 4 + 0x10); 170 | if bar > 0 { 171 | self.bars[i as usize] = bar; 172 | self.write(i * 4 + 0x10, 0xFFFFFFFF); 173 | let size = (0xFFFFFFFF - (self.read(i * 4 + 0x10) & 0xFFFFFFF0)) + 1; 174 | self.write(i * 4 + 0x10, bar); 175 | if size > 0 { 176 | self.bars[i as usize] = size; 177 | } 178 | } 179 | } 180 | } 181 | 182 | pub fn bar(&self, index: usize) -> u32 { 183 | self.bars[index] 184 | } 185 | } 186 | 187 | fn init_dev(bus: u8, dev: u8) { 188 | for func in 0..MAX_FUNCTION { 189 | unsafe { 190 | let device = PCI.lock().probe(bus, dev, func); 191 | 192 | match device { 193 | // Device found, load bars. 194 | Some(mut d) => { 195 | d.load_bars(); 196 | DEVICES.lock().push(d); 197 | } 198 | 199 | None => {} 200 | } 201 | } 202 | } 203 | } 204 | 205 | fn init_bus(bus: u8) { 206 | for dev in 0..MAX_DEVICE { 207 | init_dev(bus, dev); 208 | } 209 | } 210 | 211 | pub fn init() { 212 | for bus in 0..MAX_BUS { 213 | init_bus(bus); 214 | } 215 | 216 | println!("[ dev ] Discovered {} PCI devices.", DEVICES.lock().len()); 217 | 218 | for dev in DEVICES.lock().iter_mut() { 219 | // Check the type of device, in order to identify important stuff that we will use. 220 | match dev.class { 221 | DeviceClass::Legacy => {} 222 | DeviceClass::MassStorage => { 223 | match dev.subclass { 224 | 0x06 => { 225 | use device::ahci::hba::AHCI_BASE; 226 | use core::sync::atomic::Ordering; 227 | 228 | // Read header offset 24h to get reference to the ABAR. 229 | let mut bar = unsafe { dev.read(0x24) }; 230 | 231 | // Read bits 31-34, these point to the ABAR. 232 | let address = bar & 0xFFFFFFF0; 233 | 234 | AHCI_BASE.store(address as usize, Ordering::SeqCst); 235 | 236 | println!( 237 | "[ dev ] Found AHCI controller. Controller mapped at {:#x}", 238 | address 239 | ); 240 | } 241 | _ => {} 242 | } 243 | } 244 | _ => {} 245 | } 246 | } 247 | } 248 | -------------------------------------------------------------------------------- /src/device/pic.rs: -------------------------------------------------------------------------------- 1 | use spin::Mutex; 2 | use device::Port; 3 | 4 | /// Global interface to the PIC. 5 | pub static PICS: Mutex = Mutex::new(unsafe { ChainedPics::new(0x20, 0x28) }); 6 | 7 | /// Command to begin init of the PIC chip. 8 | const CMD_INIT: u8 = 0x11; 9 | 10 | /// EOI command, that tells the PIC it can begin receiving other interrupts again. 11 | const CMD_END_OF_INTERRUPT: u8 = 0x20; 12 | 13 | /// PIC mode. 14 | const MODE_8086: u8 = 0x01; 15 | 16 | /// A single interrupt controller. 17 | /// The `offset` is set to the value from which the handled IRQs begin. 18 | pub struct Pic { 19 | offset: u8, 20 | command: Port, 21 | pub data: Port, 22 | } 23 | 24 | impl Pic { 25 | /// The offset is less than or equal to the interrupt id and the interrupt id is less than the 26 | /// offset + 8. This is done because the master PIC handles IRQs 0-7, where the vector number of 27 | /// IRQ 0 is the offset of the master PIC. 28 | fn handles_interrupt(&self, interrupt_id: u8) -> bool { 29 | self.offset <= interrupt_id && interrupt_id < self.offset + 8 30 | } 31 | 32 | /// Write the EOI command for a single PIC. 33 | unsafe fn end_of_interrupt(&mut self) { 34 | self.command.write(CMD_END_OF_INTERRUPT); 35 | } 36 | } 37 | 38 | /// A master and slave PIC. 39 | pub struct ChainedPics { 40 | pub pics: [Pic; 2], 41 | } 42 | 43 | impl ChainedPics { 44 | /// Create a new pair of controllers. 45 | pub const unsafe fn new(offset1: u8, offset2: u8) -> ChainedPics { 46 | ChainedPics { 47 | pics: [ 48 | Pic { 49 | // The data port has an offset of 1 from the command ports of both the Master and 50 | // Slave PICS. 51 | offset: offset1, 52 | command: Port::new(0x20), 53 | data: Port::new(0x21), 54 | }, 55 | Pic { 56 | offset: offset2, 57 | command: Port::new(0xA0), 58 | data: Port::new(0xA1), 59 | }, 60 | ], 61 | } 62 | } 63 | 64 | /// Initialize PICS. We remap the IRQs to begin at 0x20, and the slave IRQs to begin at 0x28. 65 | pub unsafe fn init(&mut self) { 66 | // Write garbage data to a port as a method of telling the CPU to wait for a bit in-between 67 | // commands. 68 | let mut wait_port: Port = Port::new(0x80); 69 | let mut wait = || wait_port.write(0); 70 | 71 | // Send each PIC the 0x11 byte to tell them to expect initialization 72 | self.pics[0].command.write(CMD_INIT); 73 | wait(); 74 | self.pics[1].command.write(CMD_INIT); 75 | wait(); 76 | 77 | // Master PIC Vector offset. 78 | self.pics[0].data.write(self.pics[0].offset); 79 | wait(); 80 | // Slave PIC Vector offset. 81 | self.pics[1].data.write(self.pics[1].offset); 82 | wait(); 83 | 84 | // Tell the Master PIC there is a slave PIC at IRQ 2. 85 | self.pics[0].data.write(4); 86 | wait(); 87 | // Tell the Slave PIC its cascade identity (IRQ 2) 88 | self.pics[1].data.write(2); 89 | wait(); 90 | 91 | // Byte 3: set the mode 92 | self.pics[0].data.write(MODE_8086); 93 | wait(); 94 | self.pics[1].data.write(MODE_8086); 95 | 96 | println!("[ dev ] Initialised master and slave 8259 PICs."); 97 | println!("[ dev ] PIC0 has vector offset: {:#x}", self.pics[0].offset); 98 | println!("[ dev ] PIC1 has vector offset: {:#x}", self.pics[1].offset); 99 | } 100 | 101 | /// Cycle through the PICS until we find one that can handle this interrupt. 102 | pub fn handles_interrupt(&self, interrupt_id: u8) -> bool { 103 | self.pics.iter().any(|p| p.handles_interrupt(interrupt_id)) 104 | } 105 | 106 | /// Notify EOI for master and slave. 107 | pub unsafe fn notify_end_of_interrupt(&mut self, interrupt_id: u8) { 108 | if self.handles_interrupt(interrupt_id) { 109 | // If the slave can handle this interrupt, tell it the interrupt has ended. 110 | if self.pics[1].handles_interrupt(interrupt_id) { 111 | self.pics[1].end_of_interrupt(); 112 | } 113 | 114 | // Notify the Master PIC that the interrupt has ended. 115 | self.pics[0].end_of_interrupt(); 116 | } 117 | } 118 | 119 | /// Disables both PIC0 and PIC1. 120 | pub unsafe fn disable_8259_pic(&mut self) { 121 | self.pics[0].data.write(0xff); 122 | self.pics[1].data.write(0xff); 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/device/pit.rs: -------------------------------------------------------------------------------- 1 | use device::Port; 2 | use spin::Mutex; 3 | use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; 4 | 5 | /// Configuration data. Use channel 0 and mode 3, square wave generator. Use lohi operation. 6 | const PIT_SET: u8 = 0x36; 7 | static DIVISOR: u16 = 2685; 8 | 9 | /// Simple interface to the PIT. 10 | pub static PIT: Mutex<[Port; 2]> = Mutex::new(unsafe { [Port::new(0x43), Port::new(0x40)] }); 11 | 12 | pub fn init() { 13 | println!("[ dev ] Setting pit mode."); 14 | PIT.lock()[0].write(PIT_SET); 15 | println!("[ dev ] Setting up frequency."); 16 | PIT.lock()[1].write((DIVISOR & 0xFF) as u8); 17 | PIT.lock()[1].write((DIVISOR >> 8) as u8); 18 | 19 | let frequency: u32 = 1193182 / 2685; 20 | 21 | let irq0_int_timeout = { 22 | let val = 1 / frequency; 23 | val * 1000 24 | }; 25 | 26 | println!( 27 | "[ dev ] Initialising PIT, setup to interrupt every {} ms", 28 | irq0_int_timeout 29 | ); 30 | } 31 | 32 | pub static PIT_TICKS: AtomicUsize = ATOMIC_USIZE_INIT; 33 | -------------------------------------------------------------------------------- /src/device/ps2_8042.rs: -------------------------------------------------------------------------------- 1 | use spin::Mutex; 2 | use device::io::Port; 3 | 4 | pub struct Ps2 { 5 | pub controller: Port, 6 | pub device: Port, 7 | } 8 | 9 | impl Ps2 { 10 | pub const unsafe fn new(controller: u16, device: u16) -> Ps2 { 11 | Ps2 { 12 | controller: Port::new(controller), 13 | device: Port::new(device), 14 | } 15 | } 16 | 17 | /// Poll bit 0 of status register: "Output buffer empty/full" 18 | pub fn wait_then_read(&mut self) -> u8 { 19 | while self.controller.read() & 0x1 == 0 {} 20 | self.device.read() 21 | } 22 | 23 | /// Poll bit 1 of status register: "Input buffer empty/full" 24 | pub fn wait_then_write(&mut self, data: u8) { 25 | while self.controller.read() & 0x2 == 1 {} 26 | self.device.write(data); 27 | } 28 | 29 | pub fn init(&mut self) { 30 | println!("[ dev ] Initialising PS/2 8042 controller."); 31 | // Disable devices. 32 | self.controller.write(0xAD); 33 | self.controller.write(0xA7); 34 | 35 | // Flush output buffer. 36 | self.device.read(); 37 | 38 | // Setup Controller Config Byte. 39 | self.controller.write(0x20); 40 | let mut config_byte: u8 = self.wait_then_read(); 41 | 42 | // Disable IRQs. 43 | config_byte &= !(1 << 0); 44 | config_byte &= !(1 << 1); 45 | 46 | // Write back the modified config. 47 | self.controller.write(0x60); 48 | self.wait_then_write(config_byte); 49 | 50 | // Controller self test. 51 | self.controller.write(0xAA); 52 | assert!(self.wait_then_read() == 0x55, "PS/2 self test failed"); 53 | 54 | // Interface tests. 55 | self.controller.write(0xAB); 56 | assert!(self.wait_then_read() == 0x0, "Interface tests failed",); 57 | 58 | // Enable devices. 59 | self.controller.write(0xAE); 60 | 61 | // Config byte. 62 | self.controller.write(0x20); 63 | let mut enable: u8 = self.wait_then_read(); 64 | 65 | // Re-enable IRQs. 66 | enable |= 1 << 0; 67 | 68 | self.controller.write(0x60); 69 | self.wait_then_write(enable); 70 | 71 | // Clear output buffer. 72 | self.device.read(); 73 | 74 | println!("[ dev ] PS/2 8042 initialised."); 75 | } 76 | 77 | pub fn read_char(&mut self) -> u8 { 78 | self.device.read() 79 | } 80 | } 81 | 82 | pub static PS2: Mutex = Mutex::new(unsafe { Ps2::new(0x64, 0x60) }); 83 | 84 | pub fn read_char() -> u8 { 85 | PS2.lock().read_char() 86 | } 87 | -------------------------------------------------------------------------------- /src/device/serial.rs: -------------------------------------------------------------------------------- 1 | use device::io::cpuio::Port; 2 | use self::Register::*; 3 | use spin::Mutex; 4 | use core::fmt::{self, Write}; 5 | 6 | #[repr(C, u8)] 7 | #[allow(dead_code)] 8 | /// Serial port registers. 9 | enum Register { 10 | DataOrBaudLsb = 0, 11 | IntEnableOrMsb = 1, 12 | InterruptIdentAndFifo = 2, 13 | LineControl = 3, 14 | ModemControl = 4, 15 | LineStatus = 5, 16 | ModemStatus = 6, 17 | Scratch = 7, 18 | } 19 | 20 | /// An interface to a serial port. 21 | pub struct SerialPort { 22 | base: u16, 23 | is_initialized: bool, 24 | } 25 | 26 | impl SerialPort { 27 | const unsafe fn new(base: u16) -> SerialPort { 28 | SerialPort { 29 | base: base, 30 | is_initialized: false, 31 | } 32 | } 33 | 34 | pub fn do_init(&mut self) { 35 | // Check if this function has already been called. 36 | if self.is_initialized == true { 37 | return; 38 | } 39 | self.is_initialized = true; 40 | 41 | // Disable interrupts. 42 | self.port(IntEnableOrMsb).write(0x00); 43 | // Enable DLAB. 44 | self.port(LineControl).write(0x80); 45 | // Set divisor as 2. 46 | self.port(DataOrBaudLsb).write(0x02); 47 | self.port(IntEnableOrMsb).write(0x00); 48 | // 8 bits, no parity, one stop bit. 49 | self.port(LineControl).write(0x03); 50 | self.port(InterruptIdentAndFifo).write(0xc7); 51 | self.port(ModemControl).write(0x0b); 52 | // Done! 53 | } 54 | 55 | /// Check if it is safe to read from this port. 56 | fn can_read(&mut self) -> bool { 57 | (self.port(LineStatus).read() & 1) == 0 58 | } 59 | 60 | /// Wait until we can get a hold on the data register, and then read from the serial port. 61 | pub fn read(&mut self) -> u8 { 62 | while self.can_read() {} 63 | 64 | self.port(DataOrBaudLsb).read() 65 | } 66 | 67 | /// Check if we can safely write the data to the serial port. 68 | fn is_transmit_empty(&mut self) -> bool { 69 | (self.port(LineStatus).read() & 0x20) == 0 70 | } 71 | 72 | /// Wait until we can get a hold on the data register, and then write to the serial port. 73 | pub fn write(&mut self, data: u8) { 74 | while self.is_transmit_empty() {} 75 | 76 | self.port(DataOrBaudLsb).write(data); 77 | } 78 | 79 | fn port(&mut self, register: Register) -> Port { 80 | unsafe { Port::new(self.base + (register as u8 as u16)) } 81 | } 82 | } 83 | 84 | impl Write for SerialPort { 85 | fn write_str(&mut self, s: &str) -> fmt::Result { 86 | for byte in s.bytes() { 87 | self.write(byte); 88 | } 89 | 90 | Ok(()) 91 | } 92 | } 93 | 94 | pub static COM1: Mutex = Mutex::new(unsafe { SerialPort::new(0x3f8) }); 95 | 96 | pub fn init() { 97 | COM1.lock().do_init(); 98 | } 99 | -------------------------------------------------------------------------------- /src/device/vga/buffer.rs: -------------------------------------------------------------------------------- 1 | use spin::Mutex; 2 | use device::vga::vga::{Color, ColorCode, VGA}; 3 | use core::fmt; 4 | 5 | /// Main print interface. 6 | pub fn print(args: fmt::Arguments) { 7 | use core::fmt::Write; 8 | SCREEN.lock().write_fmt(args).unwrap(); 9 | } 10 | 11 | /// The width of the VGA text buffer. 12 | pub const BUFFER_WIDTH: usize = 80; 13 | /// The height of the VGA text buffer. 14 | pub const BUFFER_HEIGHT: usize = 25; 15 | 16 | #[derive(Copy, Clone)] 17 | /// A virtual text buffer. 18 | pub struct TextBuffer { 19 | /// Array of rows of characters. 20 | pub chars: [[u8; BUFFER_WIDTH]; BUFFER_HEIGHT], 21 | /// How far along a row we are. 22 | pub column_position: usize, 23 | /// Represents the colour of the TTY buffer. 24 | pub color_code: ColorCode, 25 | pub active: bool, 26 | } 27 | 28 | /// Clear the VGA buffer. 29 | pub fn clear_screen() { 30 | for _row in 0..BUFFER_HEIGHT { 31 | SCREEN.lock().new_line(); 32 | } 33 | } 34 | 35 | impl TextBuffer { 36 | /// Sync this virtual text buffer with the actual VGA buffer at 0xb8000. 37 | fn sync(&self) { 38 | VGA.lock().sync_buffer(&self); 39 | VGA.lock() 40 | .update_cursor(BUFFER_HEIGHT - 1, self.column_position); 41 | } 42 | 43 | /// Return the current character array. 44 | pub fn chars(&self) -> &[[u8; BUFFER_WIDTH]; BUFFER_HEIGHT] { 45 | &self.chars 46 | } 47 | 48 | /// Return the current colour code. 49 | pub fn color_code(&self) -> ColorCode { 50 | self.color_code 51 | } 52 | 53 | /// Write a byte to the VGA buffer. 54 | pub fn write_byte(&mut self, byte: u8) { 55 | match byte { 56 | // Newline character. 57 | b'\n' => self.new_line(), 58 | // Backspace. 59 | 0x8 => self.delete_byte(), 60 | // Tab escape. 61 | b'\t' => for _ in 0..4 { 62 | self.write_byte(b' '); 63 | }, 64 | // Catch-all pattern that just updates the character array with the given byte. 65 | byte => { 66 | if self.column_position >= BUFFER_WIDTH { 67 | // At end of row. 68 | self.new_line(); 69 | } 70 | 71 | let row = BUFFER_HEIGHT - 1; 72 | let col = self.column_position; 73 | self.chars[row][col] = byte; 74 | self.column_position += 1; 75 | } 76 | } 77 | 78 | if self.active { 79 | self.sync(); 80 | } 81 | } 82 | 83 | /// Delete a single byte from the buffer. 84 | pub fn delete_byte(&mut self) { 85 | if self.column_position == 0 { 86 | //At start of row, no bytes to delete. 87 | return; 88 | } 89 | 90 | let col = self.column_position - 1; 91 | 92 | self.chars[BUFFER_HEIGHT - 1][col] = b' '; 93 | self.column_position -= 1; 94 | 95 | if self.active { 96 | self.sync(); 97 | } 98 | } 99 | 100 | /// Newline. This method will be called when a `\n` character is written 101 | /// to the virtual buffer. 102 | pub fn new_line(&mut self) { 103 | for row in 1..BUFFER_HEIGHT { 104 | for col in 0..BUFFER_WIDTH { 105 | self.chars[row - 1][col] = self.chars[row][col] 106 | } 107 | } 108 | 109 | self.clear_row(BUFFER_HEIGHT - 1); 110 | //Set position to start of row. 111 | self.column_position = 0; 112 | 113 | if self.active { 114 | self.sync(); 115 | } 116 | } 117 | 118 | /// Clear a single row by stepping across the entire width of the current row, and writing a 119 | /// blank character to each position. 120 | pub fn clear_row(&mut self, row: usize) { 121 | for col in 0..BUFFER_WIDTH { 122 | self.chars[row][col] = b' '; 123 | } 124 | } 125 | } 126 | 127 | impl ::core::fmt::Write for TextBuffer { 128 | fn write_str(&mut self, s: &str) -> ::core::fmt::Result { 129 | for byte in s.bytes() { 130 | self.write_byte(byte) 131 | } 132 | 133 | Ok(()) 134 | } 135 | } 136 | 137 | /// Global interface to the VGA text mode. 138 | pub static SCREEN: Mutex = Mutex::new(TextBuffer { 139 | column_position: 0, 140 | color_code: ColorCode::new(Color::LightGray, Color::Black), 141 | chars: [[b' '; BUFFER_WIDTH]; BUFFER_HEIGHT], 142 | active: true, 143 | }); 144 | 145 | pub static TTYS: Mutex> = Mutex::new(None); 146 | 147 | /// Switch `SCREEN` to `ttys[index]`. 148 | pub fn switch(index: usize) { 149 | let inner = |idx: usize, list: &mut [TextBuffer; 6]| { 150 | list[idx].active = true; 151 | *SCREEN.lock() = list[idx]; 152 | }; 153 | 154 | let mut list = *TTYS.lock(); 155 | 156 | let list = match list { 157 | Some(ref mut t) => t, 158 | None => panic!("TTY list called before init."), 159 | }; 160 | 161 | // Only gets called if `list` is Some 162 | inner(index, list); 163 | } 164 | 165 | /// Initialise all the TTYS. 166 | pub fn tty_init() { 167 | // Create six identical TTYS. 168 | let buffers: [TextBuffer; 6] = [TextBuffer { 169 | column_position: 0, 170 | color_code: ColorCode::new(Color::LightGray, Color::Black), 171 | chars: [[b' '; BUFFER_WIDTH]; BUFFER_HEIGHT], 172 | active: false, 173 | }; 6]; 174 | 175 | *TTYS.lock() = Some(buffers); 176 | } 177 | -------------------------------------------------------------------------------- /src/device/vga/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod buffer; 2 | pub mod vga; 3 | 4 | pub fn init() { 5 | self::buffer::tty_init(); 6 | } 7 | -------------------------------------------------------------------------------- /src/device/vga/vga.rs: -------------------------------------------------------------------------------- 1 | //! VGA - Interface to the VGA text buffer at physical address 0xb8000. 2 | 3 | use device::vga::buffer::{TextBuffer, BUFFER_HEIGHT, BUFFER_WIDTH}; 4 | use core::ptr::Unique; 5 | use spin::Mutex; 6 | use volatile::Volatile; 7 | 8 | #[repr(u8)] 9 | #[derive(Debug, Clone, Copy)] 10 | /// The possible colours that characters on the VGA buffer can be. 11 | pub enum Color { 12 | Black = 0, 13 | Blue = 1, 14 | Green = 2, 15 | Cyan = 3, 16 | Red = 4, 17 | Magenta = 5, 18 | Brown = 6, 19 | LightGray = 7, 20 | DarkGray = 8, 21 | LightBlue = 9, 22 | LightGreen = 10, 23 | LightCyan = 11, 24 | LightRed = 12, 25 | Pink = 13, 26 | Yellow = 14, 27 | White = 15, 28 | } 29 | 30 | #[derive(Debug, Clone, Copy)] 31 | /// A representation of a VGA bg/fg colour code, calculated from byte-sized 32 | /// bg/fg data. 33 | pub struct ColorCode(u8); 34 | 35 | impl ColorCode { 36 | /// Create a new `ColorCode`. 37 | pub const fn new(foreground: Color, background: Color) -> ColorCode { 38 | ColorCode((background as u8) << 4 | (foreground as u8)) 39 | } 40 | } 41 | 42 | #[repr(C)] 43 | #[derive(Debug, Clone, Copy)] 44 | /// A single character on the VGA text buffer. 45 | pub struct ScreenChar { 46 | /// The 7-bit ascii character this `ScreenChar` represents. 47 | pub ascii_character: u8, 48 | /// The colour of this `ScreenChar`. 49 | pub color_code: ColorCode, 50 | } 51 | 52 | /// A 2D array of `ScreenChar`s, with 80*25 elements. 53 | struct ScreenBuffer { 54 | chars: [[Volatile; BUFFER_WIDTH]; BUFFER_HEIGHT], 55 | } 56 | 57 | /// Interface to the VGA buffer. 58 | pub struct Vga { 59 | /// Unique pointer to a screen buffer in physical memory. 60 | frame: Unique, 61 | } 62 | 63 | /// Static VGA interface. We cast the base address `0xb8000` of VGA memory to a `ScreenBuffer` 64 | /// struct, which makes it useful to us. 65 | pub static VGA: Mutex = Mutex::new(Vga { 66 | frame: unsafe { Unique::new_unchecked(0xb8000 as *mut _) }, 67 | }); 68 | 69 | impl Vga { 70 | /// Return a reference to the `ScreenBuffer` pointer. 71 | fn frame(&mut self) -> &mut ScreenBuffer { 72 | unsafe { self.frame.as_mut() } 73 | } 74 | 75 | /// Sync the virtual `buffer` with the `ScreenBuffer` pointer. 76 | pub fn sync_buffer(&mut self, buffer: &TextBuffer) { 77 | let frame = self.frame(); 78 | 79 | for row in 0..BUFFER_HEIGHT { 80 | for col in 0..BUFFER_WIDTH { 81 | // Update using the text buffer. 82 | let character = ScreenChar { 83 | ascii_character: buffer.chars()[row][col], 84 | color_code: buffer.color_code(), 85 | }; 86 | 87 | frame.chars[row][col].write(character); 88 | } 89 | } 90 | } 91 | 92 | #[allow(exceeding_bitshifts)] 93 | /// Update the text mode cursor to coordinates (row, col). 94 | pub fn update_cursor(&self, row: usize, col: usize) { 95 | let pos = ((BUFFER_WIDTH as u16) * (row as u16)) + col as u16; 96 | use device::Port; 97 | 98 | unsafe { 99 | let mut control_port: Port = Port::new(0x3D4); 100 | let mut value_port: Port = Port::new(0x3D5); 101 | 102 | control_port.write(0x0F); 103 | value_port.write((pos & 0xFF) as u8); 104 | control_port.write(0x0E); 105 | value_port.write(((pos >> 8) & 0xFF) as u8); 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(lang_items)] 2 | #![feature(const_fn, unique)] 3 | #![feature(alloc)] 4 | #![feature(allocator_api)] 5 | #![feature(asm)] 6 | #![feature(naked_functions)] 7 | #![feature(abi_x86_interrupt)] 8 | #![feature(const_unique_new)] 9 | #![feature(const_max_value)] 10 | #![feature(core_intrinsics)] 11 | #![feature(global_allocator)] 12 | #![feature(ptr_internals)] 13 | #![feature(integer_atomics)] 14 | #![no_std] 15 | 16 | #[macro_use] 17 | extern crate alloc; 18 | extern crate bit_field; 19 | #[macro_use] 20 | extern crate bitflags; 21 | #[macro_use] 22 | extern crate lazy_static; 23 | extern crate linked_list_allocator; 24 | extern crate multiboot2; 25 | #[macro_use] 26 | extern crate once; 27 | extern crate raw_cpuid; 28 | extern crate rlibc; 29 | extern crate spin; 30 | extern crate volatile; 31 | extern crate x86_64; 32 | extern crate heapless; 33 | 34 | #[macro_use] 35 | mod macros; 36 | pub mod device; 37 | pub mod task; 38 | pub mod syscall; 39 | pub mod arch; 40 | pub mod acpi; 41 | mod runtime_glue; 42 | 43 | pub use runtime_glue::*; 44 | 45 | #[no_mangle] 46 | pub extern "C" fn kmain(multiboot_information_address: usize) { 47 | unsafe { arch::init(multiboot_information_address) }; 48 | 49 | loop {} 50 | } 51 | 52 | // TODO: Move this to the memory module once some bugs with Rust get figured out. 53 | use arch::memory::heap_allocator::HeapAllocator; 54 | 55 | #[global_allocator] 56 | static HEAP_ALLOCATOR: HeapAllocator = HeapAllocator::new(); 57 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | macro_rules! print { 2 | ($($arg:tt)*) => ({ 3 | use device::serial; 4 | use core::fmt::Write; 5 | 6 | let _ = write!(serial::COM1.lock(), $($arg)*); 7 | }); 8 | } 9 | 10 | macro_rules! println { 11 | ($fmt:expr) => (print!(concat!($fmt, "\n"))); 12 | ($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*)); 13 | } 14 | 15 | macro_rules! format { 16 | ($($arg:tt)*) => ({ 17 | use alloc::string::String; 18 | use core::fmt; 19 | let mut output = String::new(); 20 | fmt::write(&mut output, format_args!($($arg)*)).unwrap(); 21 | output 22 | }); 23 | } 24 | 25 | macro_rules! tty_switch { 26 | ($x:expr) => ({ 27 | use device::vga::buffer::switch; 28 | 29 | switch($x); 30 | }); 31 | } 32 | -------------------------------------------------------------------------------- /src/runtime_glue.rs: -------------------------------------------------------------------------------- 1 | use core; 2 | 3 | #[cfg(not(test))] 4 | #[lang = "eh_personality"] 5 | #[no_mangle] 6 | pub extern "C" fn eh_personality() {} 7 | 8 | #[cfg(not(test))] 9 | #[lang = "panic_fmt"] 10 | #[no_mangle] 11 | pub extern "C" fn panic_fmt(fmt: core::fmt::Arguments, file: &'static str, line: u32) -> ! { 12 | println!("\n\nPANIC in {} at line {}:", file, line); 13 | println!(" {}", fmt); 14 | loop {} 15 | } 16 | 17 | #[allow(non_snake_case)] 18 | #[no_mangle] 19 | pub extern "C" fn _Unwind_Resume() -> ! { 20 | loop {} 21 | } 22 | -------------------------------------------------------------------------------- /src/syscall/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod process; 2 | 3 | pub use self::process::*; 4 | -------------------------------------------------------------------------------- /src/syscall/process.rs: -------------------------------------------------------------------------------- 1 | use alloc::String; 2 | use task::{ProcessId, Scheduling, SCHEDULER}; 3 | use arch::interrupts::disable_interrupts_and_then; 4 | 5 | /// Simple system call that wraps creating a process and marking it as ready. 6 | pub fn create(new: extern "C" fn(), name: String) -> ProcessId { 7 | disable_interrupts_and_then(|| -> ProcessId { 8 | let pid = SCHEDULER 9 | .create(new, name) 10 | .expect("Could not create new process!"); 11 | SCHEDULER.ready(pid.clone()); 12 | pid 13 | }) 14 | } 15 | -------------------------------------------------------------------------------- /src/task/context.rs: -------------------------------------------------------------------------------- 1 | #[derive(Clone, Debug)] 2 | /// Register context. 3 | pub struct Context { 4 | pub cr3: usize, 5 | rbp: usize, 6 | rflags: usize, 7 | pub rsp: usize, 8 | rbx: usize, 9 | r12: usize, 10 | r13: usize, 11 | r14: usize, 12 | r15: usize, 13 | } 14 | 15 | impl Context { 16 | pub fn new() -> Self { 17 | Context { 18 | // Init all fields as 0. 19 | cr3: 0, 20 | rbp: 0, 21 | rflags: 0, 22 | rsp: 0, 23 | rbx: 0, 24 | r12: 0, 25 | r13: 0, 26 | r14: 0, 27 | r15: 0, 28 | } 29 | } 30 | 31 | /// Switch to the new context. 32 | #[naked] 33 | #[inline(never)] 34 | pub unsafe extern "C" fn switch_to(&mut self, next: &mut Context) { 35 | asm!("pushfq ; pop $0" : "=r"(self.rflags) : : "memory" : "intel", "volatile"); 36 | asm!("push $0 ; popfq" : : "r"(next.rflags) : "memory" : "intel", "volatile"); 37 | 38 | asm!("mov $0, cr3" : "=r"(self.cr3) : : "memory" : "intel", "volatile"); 39 | asm!("mov $0, rbx" : "=r"(self.rbx) : : "memory" : "intel", "volatile"); 40 | asm!("mov $0, r12" : "=r"(self.r12) : : "memory" : "intel", "volatile"); 41 | asm!("mov $0, r13" : "=r"(self.r13) : : "memory" : "intel", "volatile"); 42 | asm!("mov $0, r14" : "=r"(self.r14) : : "memory" : "intel", "volatile"); 43 | asm!("mov $0, r15" : "=r"(self.r15) : : "memory" : "intel", "volatile"); 44 | asm!("mov $0, rsp" : "=r"(self.rsp) : : "memory" : "intel", "volatile"); 45 | asm!("mov $0, rbp" : "=r"(self.rbp) : : "memory" : "intel", "volatile"); 46 | 47 | if next.cr3 != self.cr3 { 48 | asm!("mov cr3, $0" : : "r"(next.cr3) : "memory" : "intel", "volatile"); 49 | } 50 | 51 | asm!("mov rbx, $0" : : "r"(next.rbx) : "memory" : "intel", "volatile"); 52 | asm!("mov r12, $0" : : "r"(next.r12) : "memory" : "intel", "volatile"); 53 | asm!("mov r13, $0" : : "r"(next.r13) : "memory" : "intel", "volatile"); 54 | asm!("mov r14, $0" : : "r"(next.r14) : "memory" : "intel", "volatile"); 55 | asm!("mov r15, $0" : : "r"(next.r15) : "memory" : "intel", "volatile"); 56 | asm!("mov rsp, $0" : : "r"(next.rsp) : "memory" : "intel", "volatile"); 57 | asm!("mov rbp, $0" : : "r"(next.rbp) : "memory" : "intel", "volatile"); 58 | } 59 | 60 | /// Set the active page table of this context. 61 | pub fn set_page_table(&mut self, address: usize) { 62 | self.cr3 = address; 63 | } 64 | 65 | /// Set stack pointer. 66 | pub fn set_stack(&mut self, address: usize) { 67 | self.rsp = address; 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/task/coop_sched.rs: -------------------------------------------------------------------------------- 1 | use alloc::VecDeque; 2 | use alloc::vec::Vec; 3 | use alloc::String; 4 | use core::mem; 5 | use core::ops::DerefMut; 6 | use core::sync::atomic::{AtomicUsize, Ordering}; 7 | use task::{Process, ProcessId, ProcessList, Scheduling, State, INITIAL_STACK}; 8 | use task::process; 9 | use spin::RwLock; 10 | 11 | /// Global kernel scheduler type. 12 | pub type Scheduler = CoopScheduler; 13 | 14 | /// A simple cooperative scheduler. It uses round-robin scheduling, where the next available, ready 15 | /// process is the next process to be ran. 16 | pub struct CoopScheduler { 17 | current_pid: AtomicUsize, 18 | task_table: RwLock, 19 | ready_list: RwLock>, 20 | } 21 | 22 | impl Scheduling for CoopScheduler { 23 | /// Create a process using a C-declared function pointer as an argument. This function allocates a 24 | /// 1 KiB stack. 25 | fn create(&self, func: extern "C" fn(), name: String) -> Result { 26 | use arch::memory::paging; 27 | 28 | let mut stack: Vec = vec![0; INITIAL_STACK]; 29 | 30 | let proc_top: usize = stack.len() - 3; 31 | 32 | let proc_sp = stack.as_ptr() as usize + (proc_top * mem::size_of::()); 33 | 34 | use alloc::boxed::Box; 35 | let self_ptr: Box<&Scheduling> = Box::new(self); 36 | 37 | // Reserve three elements on the stack. 38 | // stack.len() - 3 -> pointer to the entry point of the process. This is what RSP is set to 39 | // under Context::switch_to(). 40 | // stack.len() - 2 -> function that we jump to after process return. 41 | 42 | let stack_vals: Vec = vec![ 43 | func as usize, 44 | process::process_return as usize, 45 | Box::into_raw(self_ptr) as usize, 46 | ]; 47 | 48 | for (i, val) in stack_vals.iter().enumerate() { 49 | stack[proc_top + i] = *val; 50 | } 51 | 52 | let mut task_table_lock = self.task_table.write(); 53 | 54 | let proc_lock = task_table_lock.add()?; 55 | { 56 | let mut process = proc_lock.write(); 57 | 58 | process.stack = Some(stack); 59 | process.name = name; 60 | 61 | // Create a new page table. This saves the address placed in cr3 after page table 62 | // creation for a context switch later on. 63 | process 64 | .ctx 65 | .set_page_table(unsafe { paging::ActivePageTable::new().address() }); 66 | 67 | // Set the stack pointer. 68 | process.ctx.set_stack(proc_sp); 69 | 70 | Ok(process.pid) 71 | } 72 | } 73 | 74 | /// Returns the PID of the current process. 75 | fn get_id(&self) -> ProcessId { 76 | ProcessId(self.current_pid.load(Ordering::SeqCst)) 77 | } 78 | 79 | /// Kill the process. We do this by marking it as free in the task table. 80 | /// To free memory held by the process, we drop the String that holds the process name, 81 | /// and mark the Option stack as None - this causes the memory held by the Some() to be 82 | /// dropped. 83 | fn kill(&self, id: ProcessId) { 84 | { 85 | let task_table_lock = self.task_table.read(); 86 | let mut proc_lock = task_table_lock 87 | .get(id) 88 | .expect("Cannot kill a non-existent process") 89 | .write(); 90 | 91 | proc_lock.set_state(State::Free); 92 | proc_lock.stack = None; 93 | drop(&mut proc_lock.name); 94 | } 95 | 96 | unsafe { 97 | self.resched(); 98 | } 99 | } 100 | 101 | /// Mark a process as ready which enables it to be ran under resched(). 102 | fn ready(&self, id: ProcessId) { 103 | self.ready_list.write().push_back(id); 104 | } 105 | 106 | /// Perform a context switch to the new process. This method will deadlock if any software 107 | /// locks are still held - it is therefore important to scope locking of data structures to 108 | /// ensure that these locks will be dropped. 109 | unsafe fn resched(&self) { 110 | { 111 | if self.ready_list.read().is_empty() { 112 | return; 113 | } 114 | } 115 | 116 | let mut prev_ptr = 0 as *mut Process; 117 | let mut next_ptr = 0 as *mut Process; 118 | 119 | // Separate the locks from the context switch through scoping 120 | { 121 | let task_table_lock = self.task_table.read(); 122 | let mut ready_list_lock = self.ready_list.write(); 123 | 124 | let curr_id: ProcessId = self.get_id(); 125 | 126 | let mut prev = task_table_lock 127 | .get(curr_id) 128 | .expect("Could not find old process") 129 | .write(); 130 | 131 | if prev.state == State::Current { 132 | prev.set_state(State::Ready); 133 | ready_list_lock.push_back(curr_id); 134 | } 135 | 136 | if let Some(next_id) = ready_list_lock.pop_front() { 137 | if next_id != self.get_id() { 138 | let mut next = task_table_lock 139 | .get(next_id) 140 | .expect("Could not find new process") 141 | .write(); 142 | 143 | next.set_state(State::Current); 144 | 145 | self.current_pid.store(next.pid.inner(), Ordering::SeqCst); 146 | 147 | // Save process pointers for out of scope context switch 148 | prev_ptr = prev.deref_mut() as *mut Process; 149 | next_ptr = next.deref_mut() as *mut Process; 150 | } 151 | } 152 | } 153 | 154 | if next_ptr as usize != 0 { 155 | assert!( 156 | prev_ptr as usize != 0, 157 | "Pointer to new proc has not been set!" 158 | ); 159 | 160 | let prev: &mut Process = &mut *prev_ptr; 161 | let next: &mut Process = &mut *next_ptr; 162 | 163 | prev.ctx.switch_to(&mut next.ctx); 164 | } 165 | } 166 | } 167 | 168 | impl CoopScheduler { 169 | /// Initialise the cooperative scheduler. This sets the current PID as the null kernel process, 170 | /// and creates an empty task table and ready list. 171 | pub fn new() -> Self { 172 | CoopScheduler { 173 | current_pid: AtomicUsize::new(ProcessId::NULL_PROC.inner()), 174 | task_table: RwLock::new(ProcessList::new()), 175 | ready_list: RwLock::new(VecDeque::::new()), 176 | } 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /src/task/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod context; 2 | pub mod process; 3 | pub mod proc_list; 4 | pub mod coop_sched; 5 | 6 | use self::coop_sched as scheduler; 7 | 8 | pub use self::process::{Process, ProcessId, State}; 9 | pub use self::proc_list::ProcessList; 10 | pub use self::scheduler::Scheduler; 11 | use core::result::Result; 12 | use alloc::string::String; 13 | 14 | /// Methods a scheduler should impl. 15 | pub trait Scheduling { 16 | fn create(&self, func: extern "C" fn(), name: String) -> Result; 17 | fn get_id(&self) -> ProcessId; 18 | fn kill(&self, id: ProcessId); 19 | fn ready(&self, id: ProcessId); 20 | unsafe fn resched(&self); 21 | } 22 | 23 | /// Max no. of processes we can handle. 24 | pub const MAX_PROCS: usize = usize::max_value() - 1; 25 | 26 | /// Initial size of vector stack. 27 | pub const INITIAL_STACK: usize = 1024; 28 | 29 | lazy_static! { 30 | /// Global kernel scheduler. 31 | pub static ref SCHEDULER: Scheduler = Scheduler::new(); 32 | } 33 | -------------------------------------------------------------------------------- /src/task/proc_list.rs: -------------------------------------------------------------------------------- 1 | use alloc::btree_map::{self, BTreeMap}; 2 | use alloc::vec::Vec; 3 | use alloc::arc::Arc; 4 | use core::result::Result; 5 | use spin::RwLock; 6 | use task::{Process, ProcessId, State}; 7 | 8 | /// System task table. 9 | pub struct ProcessList { 10 | /// Each entry is a PID attached to a locked process. 11 | procs: BTreeMap>>, 12 | /// The next process in the list. 13 | next: usize, 14 | } 15 | 16 | impl ProcessList { 17 | /// Create an initial `ProcessList`. 18 | pub fn new() -> Self { 19 | let mut list: BTreeMap>> = BTreeMap::new(); 20 | 21 | // The inital kernel thread, with pid 0. 22 | let mut null_proc: Process = Process::new(ProcessId::NULL_PROC); 23 | null_proc.state = State::Current; 24 | null_proc.stack = Some(Vec::new()); 25 | 26 | // Insert this process into the list. 27 | list.insert(ProcessId::NULL_PROC, Arc::new(RwLock::new(null_proc))); 28 | 29 | ProcessList { 30 | procs: list, 31 | next: 1, 32 | } 33 | } 34 | 35 | /// Retrieve the given process from the task table. Returns None if the passed PID does not 36 | /// exist. 37 | pub fn get(&self, id: ProcessId) -> Option<&Arc>> { 38 | self.procs.get(&id) 39 | } 40 | 41 | /// Transform process collection into iterator. 42 | pub fn iter(&self) -> btree_map::Iter>> { 43 | self.procs.iter() 44 | } 45 | 46 | /// Add a process to the task table. 47 | pub fn add(&mut self) -> Result<&Arc>, i16> { 48 | // Reset search if we're at the end of the table. 49 | if self.next >= super::MAX_PROCS { 50 | self.next = 1; 51 | } 52 | 53 | while self.procs.contains_key(&ProcessId(self.next)) { 54 | self.next += 1; 55 | } 56 | 57 | if self.next >= super::MAX_PROCS { 58 | Err(-1) 59 | } else { 60 | let id: ProcessId = ProcessId(self.next); 61 | self.next += 1; 62 | 63 | assert!( 64 | self.procs 65 | .insert(id, Arc::new(RwLock::new(Process::new(id)))) 66 | .is_none(), 67 | "Process already exists" 68 | ); 69 | 70 | Ok(self.procs.get(&id).expect("Failed to add new process.")) 71 | } 72 | } 73 | 74 | /// Remove a process from the task table. 75 | pub fn remove(&mut self, id: ProcessId) -> Option>> { 76 | self.procs.remove(&id) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/task/process.rs: -------------------------------------------------------------------------------- 1 | use alloc::string::String; 2 | use alloc::vec::Vec; 3 | use task::context::Context; 4 | 5 | #[derive(Clone, Debug, Eq, PartialEq)] 6 | /// Current state of the process. 7 | pub enum State { 8 | /// Process is free. 9 | Free, 10 | /// Process is the current process. 11 | Current, 12 | /// Process has been stopped. 13 | Suspended, 14 | /// Process is ready to be ran by the scheduler. 15 | Ready, 16 | } 17 | 18 | #[derive(Clone, Debug)] 19 | /// Process priority. 20 | pub struct Priority(pub u64); 21 | 22 | #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] 23 | /// Tuple type for PID. 24 | pub struct ProcessId(pub usize); 25 | 26 | impl ProcessId { 27 | /// Null kernel process. 28 | pub const NULL_PROC: ProcessId = ProcessId(0); 29 | 30 | pub fn inner(&self) -> usize { 31 | self.0 32 | } 33 | } 34 | 35 | #[derive(Clone, Debug)] 36 | /// A single process on the system. 37 | /// It has register context, id, name and an Optional process stack. 38 | pub struct Process { 39 | pub pid: ProcessId, 40 | pub name: String, 41 | pub state: State, 42 | pub priority: Priority, 43 | pub ctx: Context, 44 | pub stack: Option>, 45 | } 46 | 47 | impl Process { 48 | pub fn new(id: ProcessId) -> Self { 49 | Process { 50 | pid: id, 51 | name: String::from("new_proc"), 52 | state: State::Suspended, 53 | priority: Priority(0), 54 | ctx: Context::new(), 55 | stack: None, 56 | } 57 | } 58 | 59 | /// Set the state of the process. 60 | pub fn set_state(&mut self, new: State) { 61 | self.state = new; 62 | } 63 | 64 | /// Set `cr3` to point to the address specified by `addr`. 65 | pub fn set_page_table(&mut self, addr: usize) { 66 | self.ctx.set_page_table(addr); 67 | } 68 | 69 | /// Set the stack pointer register. 70 | pub fn set_stack(&mut self, addr: usize) { 71 | self.ctx.set_stack(addr); 72 | } 73 | } 74 | 75 | ///A returned process pops an instruction pointer off the stack then jumps to it. 76 | /// The IP from the stack will point to this function. 77 | #[naked] 78 | pub unsafe extern "C" fn process_return() { 79 | use task::Scheduling; 80 | use alloc::boxed::Box; 81 | 82 | // Pop a pointer to the self object off the stack. 83 | let scheduler_ptr: *mut &Scheduling; 84 | asm!("pop $0" : "=r"(scheduler_ptr) : : "memory" : "intel", "volatile"); 85 | 86 | let scheduler = Box::from_raw(scheduler_ptr); 87 | 88 | let current: ProcessId = scheduler.get_id(); 89 | 90 | // Process returned, we kill it 91 | scheduler.kill(current); 92 | } 93 | -------------------------------------------------------------------------------- /x86_64-lambda.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "x86_64-unknown-none", 3 | "data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128", 4 | "linker-flavor": "gcc", 5 | "target-endian": "little", 6 | "target-pointer-width": "64", 7 | "target-c-int-width": "32", 8 | "arch": "x86_64", 9 | "os": "none", 10 | "disable-redzone": true, 11 | "features": "-mmx,-sse,+soft-float", 12 | "panic-strategy": "abort" 13 | } 14 | --------------------------------------------------------------------------------