├── Makefile ├── biblio.bib ├── style └── template.tex └── state_harmful.md /Makefile: -------------------------------------------------------------------------------- 1 | TITLE := state_harmful 2 | 3 | all: $(TITLE).pdf 4 | 5 | $(TITLE).pdf: $(TITLE).tex 6 | pdflatex -interaction batchmode -halt-on-error $(TITLE) && \ 7 | bibtex $(TITLE) && \ 8 | pdflatex -interaction batchmode -halt-on-error $(TITLE) && \ 9 | pdflatex -interaction batchmode -halt-on-error $(TITLE) 10 | 11 | $(TITLE).tex: $(TITLE).md biblio.bib style/template.tex 12 | pandoc $(TITLE).md \ 13 | -f markdown+footnotes \ 14 | --toc \ 15 | --biblio biblio.bib \ 16 | --natbib \ 17 | --template style/template.tex \ 18 | -V documentclass=report \ 19 | -V papersize=a4paper \ 20 | -V fontsize=12pt \ 21 | -V biblio-style=plain \ 22 | -o $(TITLE).tex 23 | 24 | clean: 25 | rm -f $(TITLE).tex $(TITLE).aux $(TITLE).log $(TITLE).toc $(TITLE).bbl $(TITLE).blg $(TITLE).out 26 | 27 | clean-all: clean 28 | rm -f $(TITLE).pdf 29 | -------------------------------------------------------------------------------- /biblio.bib: -------------------------------------------------------------------------------- 1 | % HW 2 | 3 | @MISC { usbarmory_internal_bootrom, 4 | author = "Andrea Barisani", 5 | title = "Internal Boot {ROM}", 6 | howpublished = "USB Armory Wiki, \url{https://github.com/inversepath/usbarmory/wiki/Internal-Boot-ROM}" 7 | } 8 | 9 | @MISC { chromium_ec, 10 | author = "{Google Chromium Project}", 11 | title = "Chromium Embedded Controller ({EC}) Development", 12 | howpublished = "\url{https://www.chromium.org/chromium-os/ec-development}", 13 | } 14 | 15 | @MISC { olpc_kb3930_datasheet, 16 | author = "{ENE} Technology Inc.", 17 | title = "{KB3930} for {OLPC} Keyboard Controller Data Sheet", 18 | howpublished = "\url{http://dev.laptop.org/~rsmith/KB3930_OLPC_v02_20100503.pdf}", 19 | } 20 | 21 | @MISC{ x86_harmful, 22 | author = "Joanna Rutkowska", 23 | title = "Intel x86 considered harmful", 24 | howpublished = "\url{http://blog.invisiblethings.org/papers/2015/x86_harmful.pdf}", 25 | year = 2015, 26 | } 27 | 28 | @MISC{ openssd_project, 29 | author = "{The OpenSSD Project}", 30 | title = "{OpenSSD} Wiki", 31 | howpublished = "\url{http://www.openssd-project.org/wiki/The_OpenSSD_Project}", 32 | } 33 | 34 | @MISC{ thegrugq_portal, 35 | author = "thaddeus t. grugq", 36 | title = "{P.O.R.T.A.L.}: Personal Onion Router To Assure Liberty", 37 | howpublished = "\url{https://github.com/grugq/portal}", 38 | year = 2012, 39 | } 40 | 41 | @MISC{ appelbaum_technical_action_plan, 42 | author = "Jacob Appelbaum", 43 | title = "A technical action plan", 44 | howpublished = "Video archives for {Security in Times of Surveillance} conference, \url{https://projectbullrun.org/surveillance/2015/video-2015.html#appelbaum}", 45 | year = 2015, 46 | } 47 | 48 | @MISC{ wikipedia_arm_arch, 49 | author = "Wikipedia", 50 | title = "{ARM} architecture", 51 | howpublished = "\url{https://en.wikipedia.org/wiki/ARM_architecture}", 52 | } 53 | 54 | @MISC{ lowrisc, 55 | title = "The {lowRISC} project", 56 | howpublished = "\url{http://www.lowrisc.org/}", 57 | } 58 | 59 | @MISC{ Opf, 60 | title = "Open Processor Foundation", 61 | howpublished = "\url{http://0pf.org/}", 62 | } 63 | 64 | @MISC{ genode_armvirt, 65 | author = "Genode developers", 66 | title = "An in-depth look into the {ARM} virtualization extensions", 67 | howpublished = "\url{http://genode.org/documentation/articles/arm_virtualization}", 68 | year = 2015, 69 | } 70 | 71 | % Operating Systems 72 | 73 | @MISC{ tails, 74 | author = "The {TAILS} Project", 75 | title = "{Tails: The Amnesic Incognito Live System}", 76 | howpublished = "\url{https://tails.boum.org/}", 77 | } 78 | 79 | @MISC{ qubes_os, 80 | author = "{The Qubes OS Project}", 81 | title = "{Qubes OS}: A reasonably secure desktop OS", 82 | howpublished = "\url{https://qubes-os.org}", 83 | } 84 | 85 | @MISC{ qubes_arch, 86 | author = "Joanna Rutkowska and Rafał Wojtczuk", 87 | title = "{Qubes OS} Architecture", 88 | howpublished = "\url{http://files.qubes-os.org/files/doc/arch-spec-0.3.pdf}", 89 | year = 2010, 90 | } 91 | 92 | @MISC{ qubes_compartmentalization, 93 | author = "Joanna Rutkowska", 94 | title = "Software compartmentalization vs. physical separation (Or why {Qubes OS} is more than just a random collection of {VMs})", 95 | howpublished = "\url{http://invisiblethingslab.com/resources/2014/Software_compartmentalization_vs_physical_separation.pdf}", 96 | year = 2014, 97 | } 98 | 99 | @MISC{ coreboot, 100 | author = "{The coreboot project}", 101 | title = "{coreboot}: fast and flexible Open Source firmware", 102 | howpublished = "\url{http://coreboot.org/}", 103 | } 104 | 105 | 106 | % Attacks & weaknesses analysis 107 | 108 | @MISC { coldboot_attack, 109 | author = "J. Alex Halderman and Seth D. Schoen and Nadia Heninger and William Clarkson and William Paul and Joseph A. Calandrino and Ariel J. Feldman and Jacob Appelbaum and Edward W. Felten", 110 | title = "Lest We Remember: Cold Boot Attacks on Encryption Keys", 111 | howpublished = "In Proc. 2008 USENIX Security Symposium, \url{https://citp.princeton.edu/research/memory/}", 112 | year = "2008" 113 | } 114 | 115 | @MISC{ itl_evil_maid_attack, 116 | author = "Joanna Rutkowska and Alexander Tereshkin", 117 | title = "{Evil Maid} goes after {TrueCrypt}!", 118 | howpublished = "The Invisibe Things Blog, \url{http://blog.invisiblethings.org/2009/10/15/evil-maid-goes-after-truecrypt.html}", 119 | year = 2009, 120 | } 121 | 122 | @MISC{ rutkowska_cpu_backdoors_thoughts, 123 | author = "Joanna Rutkowska", 124 | title = "More Thoughts on {CPU} backdoors", 125 | howpublished = "The Invisibe Things Blog, \url{http://blog.invisiblethings.org/2009/06/01/more-thoughts-on-cpu-backdoors.html}", 126 | year = 2009, 127 | } 128 | 129 | @MISC{ rutkowska_nushu, 130 | author = "Joanna Rutkowska", 131 | title = "{Nushu}: Passive Covert Channels Implementation in {Linux} Kernel", 132 | howpublished = "Presented at the Chaos Communication Congress, \url{https://events.ccc.de/congress/2004/fahrplan/files/319-passive-covert-channels-slides.pdf}", 133 | year = 2004, 134 | } 135 | 136 | % vim: set nospell nowrap cc=0 : 137 | -------------------------------------------------------------------------------- /style/template.tex: -------------------------------------------------------------------------------- 1 | \documentclass[$if(fontsize)$$fontsize$,$endif$$if(lang)$$lang$,$endif$$if(papersize)$$papersize$,$endif$$for(classoption)$$classoption$$sep$,$endfor$]{$documentclass$} 2 | $if(fontfamily)$ 3 | \usepackage{$fontfamily$} 4 | $else$ 5 | \usepackage{lmodern} 6 | $endif$ 7 | \renewcommand{\familydefault}{\sfdefault} 8 | $if(linestretch)$ 9 | \usepackage{setspace} 10 | \setstretch{$linestretch$} 11 | $endif$ 12 | \usepackage{amssymb,amsmath} 13 | \usepackage{ifxetex,ifluatex} 14 | \usepackage{fixltx2e} % provides \textsubscript 15 | \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex 16 | \usepackage[T1]{fontenc} 17 | \usepackage[utf8]{inputenc} 18 | $if(euro)$ 19 | \usepackage{eurosym} 20 | $endif$ 21 | \else % if luatex or xelatex 22 | \ifxetex 23 | \usepackage{mathspec} 24 | \usepackage{xltxtra,xunicode} 25 | \else 26 | \usepackage{fontspec} 27 | \fi 28 | \defaultfontfeatures{Mapping=tex-text,Scale=MatchLowercase} 29 | \newcommand{\euro}{€} 30 | $if(mainfont)$ 31 | \setmainfont{$mainfont$} 32 | $endif$ 33 | $if(sansfont)$ 34 | \setsansfont{$sansfont$} 35 | $endif$ 36 | $if(monofont)$ 37 | \setmonofont[Mapping=tex-ansi]{$monofont$} 38 | $endif$ 39 | $if(mathfont)$ 40 | \setmathfont(Digits,Latin,Greek){$mathfont$} 41 | $endif$ 42 | \fi 43 | % use upquote if available, for straight quotes in verbatim environments 44 | \IfFileExists{upquote.sty}{\usepackage{upquote}}{} 45 | % use microtype if available 46 | \IfFileExists{microtype.sty}{% 47 | \usepackage{microtype} 48 | \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts 49 | }{} 50 | $if(geometry)$ 51 | \usepackage[$for(geometry)$$geometry$$sep$,$endfor$]{geometry} 52 | $endif$ 53 | \ifxetex 54 | \usepackage[setpagesize=false, % page size defined by xetex 55 | unicode=false, % unicode breaks when used with xetex 56 | xetex]{hyperref} 57 | \else 58 | \usepackage[unicode=true]{hyperref} 59 | \fi 60 | \hypersetup{breaklinks=true, 61 | bookmarks=true, 62 | pdfauthor={$author-meta$}, 63 | pdftitle={$title-meta$}, 64 | colorlinks=true, 65 | citecolor=$if(citecolor)$$citecolor$$else$blue$endif$, 66 | urlcolor=$if(urlcolor)$$urlcolor$$else$blue$endif$, 67 | linkcolor=$if(linkcolor)$$linkcolor$$else$magenta$endif$, 68 | pdfborder={0 0 0}} 69 | % \urlstyle{same} % don't use monospace font for urls 70 | \usepackage{fancyhdr} 71 | \pagestyle{fancy} 72 | \pagenumbering{arabic} 73 | \lhead{\itshape $title$} 74 | \chead{} 75 | \rhead{\itshape{\nouppercase{\leftmark}}} 76 | \cfoot{} 77 | \rfoot{\thepage} 78 | $if(lang)$ 79 | \ifxetex 80 | \usepackage{polyglossia} 81 | \setmainlanguage{$mainlang$} 82 | \setotherlanguages{$for(otherlang)$$otherlang$$sep$,$endfor$} 83 | \else 84 | \usepackage[shorthands=off,$lang$]{babel} 85 | \fi 86 | $endif$ 87 | $if(natbib)$ 88 | \usepackage{natbib} 89 | \bibliographystyle{$if(biblio-style)$$biblio-style$$else$plainnat$endif$} 90 | $endif$ 91 | $if(biblatex)$ 92 | \usepackage{biblatex} 93 | $for(bibliography)$ 94 | \addbibresource{$bibliography$} 95 | $endfor$ 96 | $endif$ 97 | $if(listings)$ 98 | \usepackage{listings} 99 | $endif$ 100 | $if(lhs)$ 101 | \lstnewenvironment{code}{\lstset{language=Haskell,basicstyle=\small\ttfamily}}{} 102 | $endif$ 103 | $if(highlighting-macros)$ 104 | $highlighting-macros$ 105 | $endif$ 106 | $if(verbatim-in-note)$ 107 | \usepackage{fancyvrb} 108 | \VerbatimFootnotes 109 | $endif$ 110 | $if(tables)$ 111 | \usepackage{longtable,booktabs} 112 | $endif$ 113 | $if(graphics)$ 114 | \usepackage{graphicx,grffile} 115 | \makeatletter 116 | \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} 117 | \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} 118 | \makeatother 119 | % Scale images if necessary, so that they will not overflow the page 120 | % margins by default, and it is still possible to overwrite the defaults 121 | % using explicit options in \includegraphics[width, height, ...]{} 122 | \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} 123 | $endif$ 124 | $if(links-as-notes)$ 125 | % Make links footnotes instead of hotlinks: 126 | \renewcommand{\href}[2]{#2\footnote{\url{#1}}} 127 | $endif$ 128 | $if(strikeout)$ 129 | \usepackage[normalem]{ulem} 130 | % avoid problems with \sout in headers with hyperref: 131 | \pdfstringdefDisableCommands{\renewcommand{\sout}{}} 132 | $endif$ 133 | \setlength{\parindent}{0pt} 134 | \setlength{\parskip}{6pt plus 2pt minus 1pt} 135 | \setlength{\emergencystretch}{3em} % prevent overfull lines 136 | \providecommand{\tightlist}{% 137 | \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} 138 | $if(numbersections)$ 139 | \setcounter{secnumdepth}{5} 140 | $else$ 141 | \setcounter{secnumdepth}{0} 142 | $endif$ 143 | $if(verbatim-in-note)$ 144 | \VerbatimFootnotes % allows verbatim text in footnotes 145 | $endif$ 146 | 147 | $if(title)$ 148 | \title{$title$$if(subtitle)$\\\vspace{0.5em}{\large $subtitle$}$endif$} 149 | $endif$ 150 | $if(author)$ 151 | \author{$for(author)$$author$$sep$ \and $endfor$} 152 | $endif$ 153 | \date{$date$} 154 | $for(header-includes)$ 155 | $header-includes$ 156 | $endfor$ 157 | 158 | % Redefines (sub)paragraphs to behave more like sections 159 | \ifx\paragraph\undefined\else 160 | \let\oldparagraph\paragraph 161 | \renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} 162 | \fi 163 | \ifx\subparagraph\undefined\else 164 | \let\oldsubparagraph\subparagraph 165 | \renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} 166 | \fi 167 | 168 | \begin{document} 169 | $if(title)$ 170 | \maketitle 171 | $endif$ 172 | $if(abstract)$ 173 | \begin{abstract} 174 | $abstract$ 175 | \end{abstract} 176 | $endif$ 177 | 178 | $if(version)$ 179 | Version: $version$ 180 | $endif$ 181 | 182 | $for(include-before)$ 183 | $include-before$ 184 | 185 | $endfor$ 186 | $if(toc)$ 187 | { 188 | \hypersetup{linkcolor=$if(toccolor)$$toccolor$$else$black$endif$} 189 | \setcounter{tocdepth}{$toc-depth$} 190 | \tableofcontents 191 | } 192 | $endif$ 193 | $if(lot)$ 194 | \listoftables 195 | $endif$ 196 | $if(lof)$ 197 | \listoffigures 198 | $endif$ 199 | $body$ 200 | 201 | $if(natbib)$ 202 | $if(bibliography)$ 203 | $if(biblio-title)$ 204 | $if(book-class)$ 205 | \renewcommand\bibname{$biblio-title$} 206 | $else$ 207 | \renewcommand\refname{$biblio-title$} 208 | $endif$ 209 | $endif$ 210 | 211 | \renewcommand{\chapter}[2]{} 212 | \bibliography{$for(bibliography)$$bibliography$$sep$,$endfor$} 213 | 214 | $endif$ 215 | $endif$ 216 | $if(biblatex)$ 217 | \printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$ 218 | 219 | $endif$ 220 | $for(include-after)$ 221 | $include-after$ 222 | 223 | $endfor$ 224 | \end{document} 225 | -------------------------------------------------------------------------------- /state_harmful.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: State considered harmful 3 | subtitle: A proposal for a stateless laptop 4 | author: Joanna Rutkowska 5 | date: December 2015 6 | version: 1.0 7 | --- 8 | 9 | # Introduction 10 | 11 | Modern Intel x86-based endpoint systems, such as laptops, are plagued by a 12 | number of security-related problems. Additionally, with the recent introduction 13 | of Intel Management Engine (ME) microcontroller into _all_ new Intel processors, 14 | the trustworthiness of the Intel platform has been seriously questioned. 15 | 16 | In a recently published paper [@x86_harmful] the author has presented an 17 | in-depth survey of these topics. In this paper the author proposes what she 18 | believes might be a reasonable, practical and relatively simple solution to most 19 | of the problems. 20 | 21 | The main principle introduced below is the requirement for the laptop hardware 22 | to be _stateless_, i.e. lacking any persistent storage. This includes it having 23 | no firmware-carrying flash memory chips. All the state is to be kept on an 24 | external, trusted device. This trusted device is envisioned to be of a small USB 25 | stick or SD card form factor. 26 | 27 | This clean separation of state-carrying vs. stateless silicon is, however, only 28 | one of the requirements, itself not enough to address many of the problems 29 | discussed in the article referenced above. There are a number of additional 30 | requirements: for the endpoint (laptop) hardware, for the trusted "stick", and 31 | for the host OS. We discuss them in this paper. 32 | 33 | The author thinks the solution proposed here is not limited to solving the 34 | Intel-specific challenges and might be useful for other future platforms also. 35 | 36 | Those readers who can't help but think that the (Intel) x86 is an already a lost 37 | battle, and that we should be moving to other architectures, are advised to have 38 | a look at the end of the paper where such alternatives are quickly discussed, 39 | and then... potentially jump back here to continue reading. 40 | 41 | # State(-carrying) considered harmful 42 | 43 | There are several fundamental reasons why endpoint computing devices, such as 44 | laptops, without clearly defined separation of state-carrying elements are 45 | problematic: 46 | 47 | 1. The presence of persistent storage intermixed with the hardware makes it 48 | possible for the attacker to persist malware on the platform, without the 49 | user to have any simple way of learning about it, nor removing it (e.g. via 50 | OS re-installation). 51 | 52 | 2. This also allows dishonest vendors, such as the OEMs or shipping agents, to 53 | deliver already infected hardware without the user being able to easily find 54 | out. This also includes Evil Maid attacks, which might be executed by other 55 | actors than vendors. 56 | 57 | 3. The malware, once installed on the platform somehow, is given places where to 58 | store stolen secrets from the user. This is especially worrisome in the 59 | context of disk encryption keys, which could be exfiltrated this way even on 60 | air-gapped machines.^[In such cases the leaked keys would become readily 61 | available to whoever has seized the laptop and has the keys to decrypt the 62 | backdoor-created data with the keys inside.] 63 | 64 | 4. Finally, these state-carrying elements make it possible to identify platforms 65 | and ultimately their users, due to various personally identifiable 66 | information, such as MAC addresses for WiFi or BT, which make the hardware 67 | unique. 68 | 69 | It's worth stressing that modern computer architectures make it very hard, 70 | sometimes impossible, for the user to inspect what firmware has really been 71 | programmed into the flash memory on the platform[^inspecting_firmware]. This is 72 | especially true for any so called tamper-proof chips. The use of such "secure" 73 | chips on endpoint computing devices should be avoided at all cost (see also 74 | discussion at the end of the paper). Any tamper-proof electronics on client 75 | systems should be considered harmful to the user as they jeopardize any form of 76 | transparency or verification. 77 | 78 | [^inspecting_firmware]: Indeed, by merely asking a flash-hosting device, such as 79 | the SPI flash chip, or some other u-controller such as one used on a NIC, to 80 | tell us what firmware it has inside, we can only get as trustworthy a response 81 | as is the device itself, or worse even: as is the current firmware that is used 82 | to serve our request... A classic chicken and egg problem. 83 | 84 | # The Stateless Laptop 85 | 86 | In this chapter a vision for the stateless laptop is described. The author 87 | believes the clean separation of state introduced by these modification would be 88 | attractive not just on current x86-based platforms, but also on any future 89 | platforms, be they based on ARM or any other processor architectures. 90 | 91 | Also discussed are additional modifications needed to make the laptop more 92 | trustworthy, or to state it in a more direct way: much less of a threat to its 93 | user. 94 | 95 | ## State-carrying elements on modern laptops 96 | 97 | We start by identifying the state-carrying (persistence-carrying) elements on a 98 | modern x86 laptop. These are: 99 | 100 | 1. The SPI flash chip carrying the BIOS, ME, and other firmware. 101 | 102 | 2. The Embedded Controller (EC), which is an OEM-specific microcontroller (uC), 103 | and which requires its own flash memory, which might be either implemented 104 | inside the uC itself or as a discrete chip on the board (potentially shared 105 | with the SPI flash chip mentioned in the previous point). 106 | 107 | 3. Additional discrete devices, such as e.g. the WiFi or BT modules. Typically 108 | they would contain their own flash memories to hold their own firmware. 109 | 110 | 4. Finally, there is the hard disk.^[The disk should not be confused with the 111 | SATA controller, which is part of the processor package on the latest Intel 112 | models.] 113 | 114 | Notably the above list does not contain the processor package, which includes 115 | the actual processor (CPU), and what was previously known as "the chipset", 116 | comprising the MCH (formerly the "northbridge"), and ICH/PCH (formerly the 117 | "southbridge"). Indeed, it seems that none of the modern processors are being 118 | equipped with flash-able memory. The reason for this seems to be resulting from 119 | the limitation of the manufacturing technology as used for modern processors. If 120 | it was otherwise, we would likely not see discrete SPI flash chips for holding 121 | of the BIOS, ME, and other firmware on notebook motherboards anymore... 122 | 123 | Although it's worth mentioning that Intel processor packages still contain a 124 | residual form of persistent state storage: so called fuses. It's unclear to the 125 | author if it's possible for the processor itself to blow its own fuses.^[More 126 | specifically: for either the instructions running on the host CPU, or those 127 | running on the ME processor to blow the fuses.] Even if that was possible, 128 | however, it seems like the usefulness of this form of state storage would be 129 | very limited to the attacker: it could potentially only be used once, and only 130 | for storing very short secrets. Notably, it doesn't seem it could be used for 131 | platform re-infection.^[Although it could be used to implement reply-protection 132 | for hypothetical CPU-based backdoors, as discussed in 133 | [@rutkowska_cpu_backdoors_thoughts].] 134 | 135 | Because we don't have any control over the processor package, i.e. we must 136 | accept it the way it is, at least if we want to build an x86 laptop today, and 137 | also because of the limitations mentioned above, we will treat the processor 138 | package as a stateless element in the rest of this paper. Nevertheless, it would 139 | be desirable if the processor vendors used such a technology for fuses 140 | implementation, as it would not be possible for the processor itself to 141 | self-blow these. 142 | 143 | Let us now discuss what we could potentially do about all the above mentioned 144 | state-carrying elements. 145 | 146 | ## The SPI flash chip 147 | 148 | The platform's firmware-carrying flash chip (the SPI flash as it's often called) 149 | presents the biggest challenge for us, the state-less laptop proponents. The SPI 150 | flash chip is tasked with several crucial goals on modern Intel x86 laptops: 151 | 152 | 1. It provides the firmware to the Intel ME processor. Failure to do so would, 153 | most likely, result in the platform shutdown.^[While there is no clear 154 | officially statement in the Intel platform specs about this, it's considered 155 | a tribal knowledge among many experts.] 156 | 157 | 2. It provides the BIOS firmware. Failure to provide a valid BIOS firmware would 158 | render the platform un-bootable. 159 | 160 | 3. It provides the firmware to some of the integrated devices, such as Ethernet 161 | network controller, potentially also other devices. Also, it might provide 162 | some of the crucial personally identifiable information, such as the MAC 163 | address(es) to be used by the networking device(s). 164 | 165 | 4. Additionally, the flash chip serves as a storage space for various persistent 166 | platform configuration settings, for both the BIOS as well as the Intel ME. 167 | 168 | The general idea is to remove the SPI flash chip from the motherboard, and route 169 | the wiring to one of the external ports, such as either a standard SD or a USB 170 | port, or perhaps even to a custom connector[^custom_connector]. A Trusted Stick 171 | (discussed in the next chapter) would be then plugged into this port before the 172 | platform boots, and would be delivering all the required firmware requested by 173 | the processor, as well as other firmware and, optionally, all the software for 174 | the platform. 175 | 176 | [^custom_connector]: While use of a custom connector might increase the cost of 177 | manufacturing of a Stateless Laptop, it might have some advantages related to 178 | usability (clear indication to the user where to plug the Trusted Stick), and 179 | messaging ("This laptop is _supposed_ to be implementing the stateless laptop 180 | standard"). 181 | 182 | One problem is that when the system wants to read the ME or the BIOS firmware, 183 | none of the devices, not even the DRAM memory is initialized at this stage. This 184 | means we cannot use e.g. a USB controller, and consequently a USB "stick" easily 185 | to provide the firmware at this stage. What we can do, however, is to reuse some 186 | of the pins in a USB port for the purpose of passing the SPI connections to our 187 | Trusted Stick. Ideally these could be multiplexed with original USB port 188 | connections, so that after the platform boot is complete, the USB port could be 189 | used as a fully featured USB port.^[Without dynamic multiplexing of these extra 190 | signals, we would need to downgrade a USB 3.0 port to USB 2.0, as we would 191 | likely need to use the 4 "Super Speed" signals to pipe SPI over them.] 192 | 193 | In either case, the goal is to relocate the SPI flash element from the 194 | motherboard -- where it cannot be neither properly protected (e.g. against 195 | software-based reflashing attacks, or physical Evil Maid attacks), nor reliably 196 | verified by the user. By relocating it to the Trusted Stick, we 197 | 198 | 1. provide a reliable way to enforce read-only property of the (select) firmware 199 | partitions, 200 | 201 | 2. allow the user to reliably inspect its content, perhaps using some other 202 | (more trusted) machine, 203 | 204 | 3. also allow the user to reliably write content to the stick (e.g. an image for 205 | a trustworthy BIOS the user decides to use). 206 | 207 | 208 | ## The Embedded Controller's flash memory 209 | 210 | The Embedded Controller (which should not be confused with Intel ME) is a little 211 | auxiliary, discrete, microcontroller connected through an LPC bus to the 212 | chipset. It is responsible for 1) keyboard operation, 2) thermal management, 3) 213 | battery charging control and 4) various other OEM-specific things, such as LEDs, 214 | custom switches, etc. More discussion about how this uC might be compromising 215 | platform's safety has been provided in [@x86_harmful]. 216 | 217 | In this paper, however, we're more concerned with the fact that the uC that is 218 | used to implement EC would typically also contain an internal flash memory (e.g. 219 | see [@chromium_ec]), yielding it a state-carrying element on the platform, 220 | something we would like to avoid.^[Admittedly the EC, no matter how evil 221 | firmware it executes, would not be able to interfere with the platform boot 222 | sequence, and thus would not be able to compromise the system or any other 223 | software execution directly. However, as already discussed in [@x86_harmful], 224 | the EC might pull out a few other, more subtle attacks, such as e.g. injecting 225 | keystrokes that could trigger some actions that might also be fatal. Or, as one 226 | of the reviewers noted, might pretend the system is off when it really is not, 227 | which might be problematic e.g. when switching between supposedly separate boot 228 | environments, or trying to prevent potential Cold Boot attacks.] 229 | 230 | We thus would like to use a uC without a built-in flash-able memory, one that 231 | expects the firmware for execution to be provided by an external chip. One 232 | example of such a chip is the one used by the OLPC and Purism laptops 233 | [@olpc_kb3930_datasheet]. 234 | 235 | ## The hard disk 236 | 237 | The internal hard disk is an obvious device which is capable of storing the 238 | state. In fact this is the very reason disks are made. 239 | 240 | What might be less obvious though, is that disks typically contain their own uC 241 | with their own internal flash-able memory. This naturally breaks the stateless 242 | requirement for the platform even further... 243 | 244 | Also, due to potentially backdoored firmware, or just due to how modern 245 | solid-state disks work (wear-leveling mechanisms), some information stored 246 | directly on the disk by malware (such as the stolen user disk encryption key) 247 | might not be easy for the user to wipe using traditional disk shredding methods. 248 | 249 | There are two ways how to solve the above problems: 250 | 251 | 1. Get rid of the hard disk, and rely on external storage only (perhaps also 252 | implemented on the Trusted Stick) connected e.g. through USB or SD protocols. 253 | Of course this solution is neither elegant nor convenient. Also an internal 254 | disk will always excel in terms of speed and capacity for a given cost. 255 | 256 | 2. Use an internal disk with _trusted_ firmware satisfying the requirements 257 | discussed in the next section. 258 | 259 | ## The trusted internal hard disk requirements 260 | 261 | The first requirement for using an internal disk would be for it be flash-less, 262 | of course. The disk uC would need to obtain its firmware from the trusted stick, 263 | just like the EC described above is expected to do that. 264 | 265 | Additionally, the firmware that would power the disk would need to be _trusted_ 266 | (this is in contrast to e.g. the ME firmware which we do not assume to be 267 | trusted!). Trusted, to do a few things: 268 | 269 | 1. Implement reliable read-only protection for select partitions on the disk 270 | (e.g. those containing the /boot and root filesystems), 271 | 272 | 2. Implement reliable transparent encryption for anything that is ever written 273 | to the disk. In other words make it impossible (e.g. for the malware in the 274 | ME or on the host) to store anything on the disk that would not be encrypted 275 | with a _user_ controllable key. This requirement has an added advantage that 276 | wiping of all the user data on the disk can be implemented by simply throwing 277 | away the encryption key, something that could be done very quickly and 278 | easily. 279 | 280 | The above requirements demand, in practice, the disk hardware to be of 281 | open-hardware design, running open-firmware. Fortunately it appears significant 282 | work has already been made in this area [@openssd_project], which should be a 283 | good starting point. 284 | 285 | It should be reiterated here the requirement for the trusted internal disk is an 286 | optional one, and it is envisioned that meanwhile an external disk could used, 287 | ideally integrated into the Trusted Stick. 288 | 289 | ## Other discrete elements 290 | 291 | Occasionally there might be additional discrete devices on the laptop, such as a 292 | discrete GPU. Such devices will likely come with their own internal flash 293 | memory, thus breaking the stateless principle. In most cases these discrete 294 | devices would also be bus-mastering devices (capable of issuing DMA to host 295 | memory), which means they could not only be used as a secret storage, but also 296 | interfere with the platform boot process if it is not properly secured against 297 | DMA from devices.^[The author is not aware of any BIOS implementation that 298 | would actively try to protect itself against potential DMA attacks 299 | originating from devices during boot, especially early boot.] 300 | 301 | It's thus best to ensure no discrete devices are present on the laptop, 302 | especially no discrete GPUs. We talk more about the discrete wireless cards, 303 | such as WiFi and cellular modem, below.^[One of the reviewers also pointed to 304 | battery as a potential element containing embedded microcontroller with its own 305 | flash memory. Needless to say such "smart batteries" should be avoided and all 306 | the charging/monitoring logic implemented by the EC, or using "dumb" electronics 307 | without persistent state storage.] 308 | 309 | ## Other modifications to the laptop 310 | 311 | In addition to removing persistent state-carrying elements from the laptop, 312 | there are also a few other minor, yet important, modifications that are needed 313 | to assure the laptop is not harmful to the user. We discuss these below. 314 | 315 | ## The wireless devices 316 | 317 | All the wireless devices (WiFi, BT, 3G/LTE modems, etc.) deserve special 318 | consideration (even if they do not have their own flash memory) because they 319 | provide a very convenient way for the malware that runs on the platform (e.g. in 320 | the ME or SMM, or even on the host OS) to leak information using a wireless 321 | channel (so, a channel very difficult to block or notice). This could happen 322 | irrespectively of whether the user decided to consciously enable and use the 323 | actual device or not (e.g. turned on WiFi in the host OS and connected to a WiFi 324 | network). 325 | 326 | Additionally any wireless device could be used to gather information about the 327 | user surroundings, such as e.g. the list of active WiFi networks (SSIDs) or BT 328 | devices MAC addresses. 329 | 330 | Admittedly though, most such exfiltration channels would require the attacker to 331 | be physically close to the user's laptop, so for some of the users this might 332 | not be a realistic threat^[Although the adversary might use e.g. the user's 333 | phone as a relaying device]. Notably with one exception -- if the malware 334 | managed to interpose on legitimate traffic generated by the user, e.g. by 335 | finding and modifying network buffers in the host memory, it might then easily 336 | leak the stolen secrets at least to the user's ISP, or with some luck, to 337 | whatever server on the Internet the user chose to establish connection with. We 338 | discuss this problem as well as potential countermeasures later in this paper. 339 | 340 | Similarly not every user would be concerned about their physical location being 341 | leaked (through the information sniffed by the wireless devices). But for those 342 | who care, a mechanism is needed to prevent this from happening. 343 | 344 | The easiest way to address all the above mentioned problems is to fit a physical 345 | kill switch for each (or all) of the wireless devices. Care should be taken for 346 | the switch(es) to control the actual power supply wires to the devices, rather 347 | than merely asking the devices to disable themselves, a request which a 348 | malicious device (or one with an infected firmware) might simply ignore. 349 | 350 | Of course physical kill switches are not an elegant solution, as in most cases 351 | the user would like to have some form of wireless connectivity. After all there 352 | is a reason we want to have these networking devices in the first place... As 353 | mentioned we will consider this problem in more details later in this paper. For 354 | now suffice to say that it would be beneficial to either: 1) not have any 355 | internal WiFi or BT card, or 2) a simple networking proxy implemented on an 356 | external (trusted) uC, not directly connected to the host processor. 357 | 358 | It should be pointed out for completeness, that a GPS receiver (if fitted to the 359 | laptop), while a one-way radio device, should also be fitted with a kill switch, 360 | for the reasons discussed above. 361 | 362 | ## The audio and camera devices 363 | 364 | The audio and video (camera) devices can compromise user's privacy similarly to 365 | the above discussed wireless devices. In addition to the obvious threats posed 366 | by these devices, it's perhaps worth mentioning a possibility of using the mic 367 | and the camera not only to sniff the conversations in the room where the laptop 368 | is kept, but also to allow the attacker to sniff the user's disk and login 369 | password. Also, it seems possible, in theory at least, for the malware to use 370 | the speakers to communicate with other devices (such as the user's phone or even 371 | an internet-connected TV) in order to exfiltrate some low-bandwidth information 372 | (e.g. the disk decryption key stolen from the host DRAM page or registers). 373 | 374 | For this reason it seems only reasonably to put all the audio and video devices 375 | behind physical kill switches, just like it was recommended for all the wireless 376 | ones. Again it should be stressed the physical switches should be cutting the 377 | actual power or signal lines to the devices, accounting for potentially 378 | misbehaving ones. 379 | 380 | ## Volatile memory quick wiping 381 | 382 | Finally, one additional aspect of building a stateless laptop is to account for 383 | all the state accumulated in the _volatile_ memory, specifically DRAM and the 384 | processor internal SRAM used by the ME. Even though we're talking about volatile 385 | memory, it's a well know fact that residual information might remain there for a 386 | surprisingly long time [@coldboot_attack]. Additionally, the ME internal memory 387 | (SRAM) is believed to remain to be sustained despite platform normal shutdown 388 | state, as the ME is still in operation, albeit it might be in sleep mode (again, 389 | the platform does not need to be in e.g. S3 for this). 390 | 391 | Thus a mechanism is needed to ensure, upon user's request, a reliable and quick 392 | clearing of all the volatile memories fitted on the platform. This might be the 393 | default behaviour every time the platform is going to be shutdown for 394 | hibernation. One of the reviewers suggested short-circuiting Vcc with GND pins 395 | might do the trick for the processor and DRAM. 396 | 397 | \newpage 398 | 399 | ## Putting it all together 400 | 401 | The diagram below wraps up our discussion so far: 402 | 403 | +-----------------------------------------+ 404 | | +--------+ +-----+ | 405 | | | HDD |<----SPI(?)-| SPI | | 406 | | +--------+ | MUX | | 407 | | | USB/PCIe/ | | | +---------------+ 408 | | | SATA | |<---| Trusted Stick | 409 | | +-----------+ | | | +---------------+ 410 | | | Processor |<----SPI----| | | 411 | | | Chip | | | | 412 | | +-----------+ +-----+ | 413 | ((+)) | USB| | LPC | | 414 | | | +------+ | | | | 415 | | | | WiFi | USB +------+ | | 416 | +------| |-----| EC |<----SPI------+ | 417 | | +------+ +------+ | 418 | +-----------------------------------------+ 419 | 420 | Fig 1. The stateless laptop with trusted internal disk and network proxy. 421 | 422 | A new element introduced on the above diagram is the "SPI MUX" box. It's a 423 | multiplexer for the SPI buses coming from different devices, which are normally 424 | expecting to be the only ones talking to the SPI flash chip. It should be 425 | possible to implement this using an FPGA (for a prototype) or an inexpensive 426 | ASIC (for production models). 427 | 428 | The next diagram shows a simplified laptop: without trusted internal disk 429 | (implemented according to the requirements laid out above), and also without 430 | networking proxy (as discussed later), but rather using an external 431 | USB-connected WiFi device. 432 | 433 | \newpage 434 | 435 | +-----+ 436 | | SD/ | 437 | | USB | 438 | \-----+ USB/SD 439 | | .............................. 440 | | : : 441 | +---------------|-:-----------------------+ : 442 | | | : +-----+ | : 443 | | | : | SPI | | : 444 | | | : | MUX | | : 445 | | | : | | | +---------------+ 446 | | v V | |<---| Trusted Stick | 447 | | +-----------+ | | | +---------------+ 448 | | | Processor |<----SPI----| | | 449 | | +--| Chip | | | | 450 | | | +-----------+ +-----+ | 451 | ((+)) | | | LPC | | 452 | | +------+ | | | | 453 | | | | | +------+ | | 454 | +-| USB |-+ | EC |<----SPI------+ | 455 | +------+ +------+ | 456 | +-----------------------------------------+ 457 | 458 | Fig 2. The simplified stateless laptop (no internal disk, USB WiFI) 459 | 460 | It might also be possible to use a microcontroller with one-time-programmable 461 | (OTP) memory in order to avoid the need to do the SPI multiplexing, and so to 462 | further simplify the construction of the laptop and the Trusted Stick. While an 463 | OTP would not provide state persistence, it would still be a sub-optimal 464 | solution because the user would (likely) not be able to inspect the firmware, or 465 | load one the they want. 466 | 467 | # The Trusted Stick 468 | 469 | The Trusted Stick, a small device of a "USB stick" or an SD card form factor, is 470 | an element that the user always carries with themselves and which contains all 471 | the "state" for the platform. This includes the (encrypted) user files and 472 | platform configuration. It also is expected to carry all the software and -- 473 | what is unique as of today -- firmware for the platform, and also enforce 474 | read-only'iness of these.^[A mechanism for updating the software and firmware on 475 | the stick should be explicitly under the control of the user. One can easily 476 | imagine this to be implemented using a physical switch on the stick, i.e. 477 | something that software could not be able to interfere with.] 478 | 479 | As the name suggests, it is assumed the device is to be _trusted_. In other 480 | words, should this device malfunction (due to a bug in its own firmware), or get 481 | compromised by the attacker somehow, the security of the user data is in 482 | jeopardy. 483 | 484 | It is thus expected this device should be as simple as possible to assure it's 485 | reasonably secure, and also to make it possible for various vendors, ideally by 486 | users themselves, to be able to build it. It goes without saying the device 487 | should be an open-source, open-hardware device. The author believes there is no 488 | excuse for entrusting proprietary products with such important things as 489 | ones digital life. 490 | 491 | We are now considering what functionality should the Trusted Stick implement. 492 | 493 | ## Firmware Storage 494 | 495 | First of all it should provide read-only (from the host perspective at least) 496 | storage for all the platform firmware. This includes the Intel ME, the BIOS 497 | (including any blobs it might depend on, such as the FSP, ACMs, etc.), any of 498 | the standard integrated devices firmware (e.g. the GbE firmware), as well as 499 | firmware for the OEM-specific Embedded Controller, and potentially other 500 | devices, such as the already discussed (optional) internal disk, and perhaps any 501 | discrete networking devices. 502 | 503 | The above (read-only) firmware storage should cover also any platform 504 | configuration. Typically the BIOS, ME, and potentially other devices would want 505 | to use some parts of the flash partitions to store their own configuration (e.g. 506 | which devices to boot from, the MAC address, etc). 507 | 508 | It should be stressed that all this firmware should be exposed to the platform 509 | (e.g. to the host processor or the EC u-controller) using the standard protocols 510 | that would normally be used to fetch the firmware. In most cases this is the SPI 511 | protocol. 512 | 513 | ## Disk Storage 514 | 515 | In addition to playing the role of a firmware storage (in practice: an SPI flash 516 | device), the Trusted Stick might also act as a normal mass storage device, seen 517 | by the host as e.g. a USB mass storage device, or an SD card. 518 | 519 | Here we should further distinguish between two types of storage that is going to 520 | be exposed to the platform (the same applies also in the scenario with an 521 | internal trusted disk): 522 | 523 | 1. A read-only non-encrypted storage containing the system code (i.e. the 524 | bootloader, the boot partition, and the root filesystem), 525 | 526 | 2. A writeable (but encrypted) partition for the user files (i.e. the home 527 | directory and perhaps some additional system configuration). The key for the 528 | encryption could be derived from: 1) the user provided passphrase (provided 529 | via keyboard), optionally combined with: 2) a TPM-released secret which can 530 | be used, to some extent, to prevent laptop-replacing Evil Maid attacks (which 531 | we discuss at the end of this paper in more detail), 3) and also a secret 532 | generated by the Trusted Stick and subject to wiping in case the user 533 | requested secure deletion of all user-specific data. 534 | 535 | It should be noted that it might not be possible to obtain the user passphrase 536 | using the standard keyboard during early phase of the platform boot. It is not 537 | expected this to be necessary because all the early boot firmware should not be 538 | encrypted, but only read-only protected by the Trusted Stick. However, in case 539 | it turned out that e.g. Intel ME refused to run having only read-only access 540 | to its flash partition, then we might need to encrypt the flash partitions on 541 | the Trusted Stick holding this early boot firmware. More on this at the end of 542 | the paper. 543 | 544 | An alert user might be wondering what a TPM device is doing on a stateless 545 | laptop? After all the TPM has its own non-volatile memory, doesn't it? 546 | Interestingly on the recent Intel platforms the TPM has been integrated into the 547 | processor package (it's in fact an application running on the ME processor), and 548 | so it uses the system's SPI flash memory as its own non-volatile storage. Of 549 | course everything that is written there is encrypted with a key that is 550 | tamper-proof protected inside the processor, so the mere fact the attacker is 551 | able to read the SPI flash content with an external programmer does not 552 | compromise safety of this TPM's storage. While it hasn't been confirmed 553 | experimentally if such a processor-internal TPM would work with a read-only 554 | storage exposed by the Trusted Stick, it seems plausible to expect it 555 | should^[And in case it didn't work with a read-only flash, we might still be 556 | able to use it with an encrypted writeable flash, as discussed later in the 557 | paper]. Of course the user would be expected to let the TPM write its generated 558 | keys during the platform initialization, by operating the read-protect switch 559 | on the Trusted Stick. 560 | 561 | 562 | ## The variant with internal trusted disk 563 | 564 | As already discussed earlier, assuming a trusted, open implementation of an 565 | internal hard disk was available, then the stick would not need to act as (fast) 566 | storage. It would only have to provide the decryption key to the (trusted) 567 | internal disk device.^[Potentially it might also be providing the /boot 568 | partition, although the benefit of this is unclear.] 569 | 570 | The primary benefit in this case would be the simplification of the stick: no 571 | need to fit high-capacity, high-performance flash memory. Depending on the 572 | application this could be an important benefit. 573 | 574 | 575 | ## Self-destruct 576 | 577 | Optionally, at least for some groups of users, it might be desirable for the 578 | Trusted Stick to implement quick and reliable wiping of its content, especially 579 | of the user partition.^[Although, there might be scenarios extending this 580 | requirement also for other partitions, i.e. these holding the firmware and 581 | system image.] This should be easily implemented by securely erasing just the 582 | encryption key, for which even a small battery or perhaps even a capacitor 583 | should be enough. 584 | 585 | 586 | # Addressing leaks through networking 587 | 588 | Assuming the platform might be compromised with sophisticated rootkits, e.g. 589 | running in SMM or ME, that are actively trying to steal e.g. GPG private keys 590 | from the host memory, it is important to ensure the malware cannot leak the data 591 | using networking. It should be realized that for malware running in ME or SMM it 592 | might be possible to leak data using networking irrespectively of what specific 593 | networking hardware is in use by the host OS. It should be just enough for the 594 | malware to (asynchronously) find pages containing what looks like specific data 595 | structures (e.g. Linux sk_buff structures) and modify just a few fields there in 596 | order to implement some form of covert channel for exfiltration (see 597 | e.g.[@rutkowska_nushu]). 598 | 599 | On the other hand, such advanced malware (e.g. especially when running in the 600 | ME) might be reluctant to (somehow blindly) modify outgoing networking packets 601 | without fully understanding the bigger picture of the specifics of the 602 | environment and the user setup. This is because such modifications might easily 603 | be detected by more inquisitive users or admins, using more or less standard 604 | network analysis tools, risking detection of the malware. Again, for malware 605 | located that deep in the hardware, in the processor itself, this might not be 606 | acceptable. Nevertheless, let's discuss what we could do to prevent such leaks 607 | anyway. We will do that starting from the simplified scenario of an air-gapped 608 | system, then move on to increasingly more connected scenarios. 609 | 610 | ## Scenario 0: An air-gapped system (no network) 611 | 612 | Contrary to what it might seem at first sight, the mere fact that we are keeping 613 | the laptop not connected to any network does not automatically make it a truly 614 | air-gapped system! If there is malware on the laptop it can still establish 615 | communication with the outside world through a number of channels: it might use 616 | the existing WiFi or BT, or LTE/3G devices to send packets to other 617 | attacker-controlled devices^[Which might be the user phone or Smart TV, for 618 | example.], ostensibly without connecting to any network. It might even use more 619 | exotic means of establishing covert channels, such as the audio spectrum using 620 | the built-in speakers, as mentioned previously in this document. 621 | 622 | Also, even if the system is not yet compromised (i.e. no malware or backdoors 623 | running on it yet), it might get compromised when devices such as WiFi or BT are 624 | exposed to the environment and are processing the (untrusted) input "from the 625 | air" around the laptop.^[This is especially true if the host OS does not 626 | explicitly try to sandbox the devices, drivers, and corresponding stacks, which 627 | is often the case.] 628 | 629 | Thus to keep the laptop truly air-gapped one must ensure access to all these 630 | devices is forbidden, and not just to the host OS, but also to any of the 631 | hardware on the platform, including the processor. The physical kill switches 632 | seem to be a reliable way for guaranteeing this, as discussed previously. 633 | Obviously, assuming such kill switches have been fitted (and set to the "off" 634 | positions), and assuming that the stateless laptop is indeed lacking any 635 | persistent memory, and that even if the ME (or any other rootkit) managed to 636 | steal any of the user data, it would not be able to leak them anyway.^[One 637 | reviewer pointed the malware might try to e.g. modulate CPU usage, thus 638 | indirectly trying to leaking the data via electro-magnetic field...] 639 | 640 | A truly world-disconnected computer is of very limited use, however. In practice 641 | we would like to transfer some files from/to such an air-gapped system. One 642 | popular approach is to use a USB storage device (stick) for that purpose. Such 643 | an approach, however, exposes the air-gapped computer to potential infections 644 | when its host OS is processing the device, volume, and filesystem metadata 645 | brought by this device. Additionally, and more importantly, a potential 646 | backdoor, e.g. in the ME, might now dump all the previously stolen data onto the 647 | stick (and these blobs might now not be easy discoverable by the user, thanks to 648 | e.g. the wear levelling mechanisms used on the stick, or potentially backdoored 649 | firmware on the said USB device). 650 | 651 | A better approach is to use physically read-only media, such as DVD-R. While 652 | such a medium can still bring infection to the air-gapped system, it wouldn't be 653 | possible to use it to exfiltrate the stolen data^[Although one should remember 654 | the DVD-R driver will likely be fitted with its own uC featuring its own flash 655 | memory, which might be a good candidate for malware to store stolen secrets 656 | to.]. Of course, this would result in a "black-hole" use model -- the air-gapped 657 | system can only accept files from the outside world, but never give anything 658 | back to the universe -- again, possibly a sub-optimal use of computer 659 | technology... 660 | 661 | ## Scenario 1: A closed network of trusted peers 662 | 663 | Now, let's consider a closed network of trusted peers who would like to 664 | communicate securely with each other, also exchanging files.^[Again, this means, 665 | by definition, that any of these "trusted peers" is able to compromise the whole 666 | network.] Of course the humankind has researched this problem extensively over 667 | the last couple of decades, which resulted in an abundance of cryptographic 668 | protocols allowing to build secure tunnels over insecure networks. 669 | 670 | However, assuming a rootkit running in the ME or SMM, we're suddenly facing a 671 | significantly more difficult challenge. This is because the ME might be now 672 | piggybacking stolen information (such as the session keys for the crypto 673 | tunnels we're trying to build) on the existing network packets, allowing an 674 | adversary -- who e.g. controls the user's ISP -- to receive them on a plate. 675 | 676 | In order to prevent this from happening we need to move the actual networking 677 | device away from the jurisdiction of the ME and the host processor. It seems 678 | convenient, at first thought, to place the networking device on the Trusted 679 | Stick. Indeed, if the trusted module was implemented as a USB-pluggable device 680 | then it would be able to provide emulated Ethernet device to the host. The 681 | Trusted Stick would then perform simple tunneling to establish the virtual 682 | trusted network with other peers (hopefully using also similarly designed 683 | laptops). This way, even if a hypothetical ME rootkit was trying to leak some 684 | information over networking, this would get encapsulated into the encrypted 685 | tunnel, which only the trusted peers were able to see.^[Admittedly, as several 686 | reviewers noted, the rootkit might try to leak the stolen keys by interfering 687 | with the timings of packet transmissions, or using some other sophisticated 688 | side-channel attack...] 689 | 690 | Implementing Ethernet-emulation and networking proxy on the Trusted Stick has 691 | several disadvantages though: 692 | 693 | 1. It complicates the Trusted Stick design, increasing its cost, as well as its 694 | size (which is an important factor given the stick is assumed to be carried 695 | by the user with themselves all the time, perhaps in a form of a necklace, or 696 | maybe even a ring in the future). 697 | 698 | 2. Even more importantly: it significantly enlarges the attack surface on this 699 | trusted device. Admittedly the uC used for networking proxy implementation 700 | might be a physically different one than the chip used for SPI firmware 701 | exposure, although this would now complicate the host-stick interface, in 702 | addition to further increasing the cost and size. 703 | 704 | However, just as we discussed the use of a stateless internal disk (which runs a 705 | trusted firmware from the stick), we could similarly envision a simple 706 | networking proxy implemented using a stateless (i.e. flash-less) uC, which would 707 | then connect to a traditional WiFi card. The WiFi, however, would _not_ be 708 | directly connected to the host CPU. 709 | 710 | Incidentally we have already outlined the need for a stateless uC on the laptop 711 | -- this is to implement the Embedded Controller. It seems thus logical to use 712 | this same uC for both realization of the EC as well as for the (trusted) 713 | networking proxy. 714 | 715 | Obviously it would take time to write firmware implementing the envisioned 716 | proxy, and before this one is ready, a temporary solution could be to use an 717 | external, USB-connected or Ethernet-connected network proxy (similar in nature 718 | to e.g. [@thegrugq_portal]). 719 | 720 | ## Scenario 2: Tor-ified or VPN-ed open Internet 721 | 722 | Let's now consider the traditional scenario in which the user wants to interact 723 | with any computer on the Internet, whether trusted or not. 724 | 725 | In this scenario we would also like to use the previously discussed networking 726 | tunneling proxy. Of course at some point the tunnel would need to be terminated 727 | and the user connection will now be visible to some 3rd party Internet 728 | infrastructure, including the final 3rd party server (e.g. a cat-photo-serving 729 | website the user might be addicted to). The termination of the tunnel would take 730 | place at a VPN service provider (which we assume to be a trusted service 731 | provider for the user), or at a Tor exit node (which itself is not assumed to be 732 | trusted, but the Tor network, as a whole, should be in that case). 733 | 734 | Now, assuming the malware has modified the content of the user-generated packets 735 | high enough (OSI-layer wise), such as e.g. modified some of the HTTP(s) headers 736 | or data payloads, the 3rd party infrastructure or the final server would be able 737 | to read any potentially covertly transmitted data from the compromised machine. 738 | 739 | But the attacker (who controls the cat-pictures service server), even though 740 | receiving some of the user sensitive data, e.g. disk encryption key, might not 741 | be able to figure out which user do they belong to. Of course the user might 742 | have plenty of identifiable information on their laptop, and the malware might 743 | be smart enough to search around for them and include them with the blobs sent 744 | over the covert channel. Theoretically, if the user was careful enough this 745 | might not be the case, but in reality expecting the user to be so careful with 746 | regards to _all_ of the activity performed on their laptop, might be 747 | unacceptable for most users. 748 | 749 | Typically users would be willing to be careful only with regards to some of the 750 | _domains_, while would like to "live a normal life" in others. Operating systems 751 | such as Qubes OS [@qubes_os] try to resolve this problem by using Virtual 752 | Machine-based compartmentalization. Sadly in case of malware operating in the ME 753 | or SMM^[Although systems that properly use compartmentalization might make it 754 | very hard for the SMM to ever get infected. On the other hand, they can do 755 | nothing against the backdoors built in by vendors right from start.] the Virtual 756 | Machine technology (even augmented by technology such as Intel VT-x and VT-d) is 757 | of little help.^[Admittedly Intel VT-x allows for SMM sandboxing using Dual 758 | Monitor Mode, although in practice there seem to be lots of problems with this 759 | approach, as the author has discussed in [@x86_harmful].] 760 | 761 | On the other hand, forcing the attacker's malware to modify only high-level 762 | protocol payloads to leak data might already be considered a significant win. 763 | The higher protocol the attacker needs to intercept, the higher the complexity 764 | of the malware, which increases the probability of getting caught by curious 765 | users or administrators. 766 | 767 | In addition the attacker has little control over which servers or infrastructure 768 | she should control in order to be able to receive stolen data from a given user. 769 | 770 | ## Scenario 3: Unconstrained Internet access? 771 | 772 | Not every user would like to forward all their networking through Tor or even a 773 | fast VPN gateway. The primary reason not to do that might be the limitation on 774 | the bandwidth and latency imposed by such proxies. 775 | 776 | A user might typically want to use such proxies for only some of their 777 | activities (say to follow the news surrounding anti-government protests), while 778 | still enjoying "un-handicapped" Internet for other activities (such as watching 779 | full HD cat movies). 780 | 781 | The problem with such an approach, again, is that the potential malware might 782 | choose to piggyback the stolen information onto the innocent traffic. 783 | 784 | About the only one left solution here would be to keep an eye on the traffic 785 | generated by the user. The adversary knowing that the user might be closely 786 | monitoring their traffic should be reluctant to (somehow blindly) piggyback a 787 | covert channel on top of it, afraid of getting caught. Thus, it would seem more 788 | reasonable for the adversary to target higher-level protocols also in this 789 | scenario, facing also the same problems as discussed in the previous section. 790 | 791 | 792 | # (Un)trusting firmware and the host OS 793 | 794 | ## Firmware considerations 795 | 796 | We would like to treat most of the platform firmware as untrusted. This applies 797 | to the Intel ME, other devices, and the BIOS. While it should be obvious why 798 | Intel ME should be considered untrusted, it's also prudent to treat the BIOS as 799 | untrusted even if we decided to use an open-source implementation, such as 800 | coreboot [@coreboot]. This is because the task of creating a truly secure, i.e. 801 | attack-resistant, BIOS implementation for Intel x86 platform seems like a very 802 | challenging task. Not to mention that it is currently very difficult 803 | (impossible?) to have a truly open source BIOS which would not need to execute 804 | Intel-provided blobs such as the Intel FSP. 805 | 806 | The trick of keeping the platform's firmware on the trusted stick is a 807 | game-changer here, because we can be reasonably confident the stick will: 1) 808 | implement proper read-only protection, this way stopping any potential 809 | flash-persisting attacks originating from the platform, and 2) even if the 810 | firmware was to be somehow malicious, the construction of our stateless laptop 811 | leaves no places for the malware to store any data stolen from the user. (It 812 | could still try to leak it through networking, a problem we discussed in more 813 | detail in the previous chapter.) 814 | 815 | There are two important exceptions with regard to trusting the firmware though: 816 | 817 | 1. If we decided to use an internal disk, as discussed earlier, then we would 818 | need to trust the disk's firmware to properly implement encryption, and 819 | read-only protection for select sectors/partitions, 820 | 821 | 2. If we decided to use the Embedded Controller (again, let's not confuse this 822 | with the Intel ME) to implement internal networking proxy (as discussed 823 | below), then we would need to trust its firmware also. 824 | 825 | Of course, as already discussed, both of these devices would be fetching the 826 | firmware from the Trusted Stick. 827 | 828 | ## Host OS considerations 829 | 830 | It's tempting to also assume the host OS could be treated as untrusted, using 831 | the similar argumentation we just used to convince ourselves we didn't need to 832 | trust Intel ME or the BIOS... 833 | 834 | Indeed, at least for the networking scenarios #0 (air-gap) and #1 (closed 835 | network of trusted peers), as discussed in the previous chapter, that might 836 | indeed be a justified assumption. 837 | 838 | However, for the more open networking scenarios #2 and #3, this might no longer 839 | be the case. Indeed, an insecure OS might allow malware infections that could 840 | now use all the convenience of a locally-executing program to steal user data, 841 | collect additional personal identifying information, and exfiltrate all this to 842 | some remote server using one of the million of ways how modern malware 843 | typically would do that. This would naturally lower the bar for the adversary 844 | significantly, almost negating the benefits of a stateless laptop... 845 | 846 | This means it is still prudent to run a secure OS on the stateless laptop. 847 | 848 | ## Reconsidering BIOS and ME (un)trusting? 849 | 850 | An alert user might, however, now point out that we cannot assume the host OS to 851 | provide any security if we don't trust the BIOS, or ME. In theory this is true, 852 | of course. In practice, however, we should consider how a malicious ME or BIOS 853 | could potentially inject malware into our (otherwise secure) host OS. 854 | 855 | The only way for such an infection to occur would be either for the Intel ME, or 856 | the BIOS, to inject malware into the host memory. In practice this means that 857 | Intel would release a processor which, under certain circumstances (yet not 858 | depending on any persistent state) writes malware to the host memory pages. 859 | 860 | Alternatively this might have been done by the Intel FSP blob.^[Here we assume a 861 | mostly open source BIOS has been used. Such a BIOS will still likely need to 862 | execute the Intel FSP blob, and this blob would be the only place which might 863 | inject the malware]. 864 | 865 | The author believe such a move would be extremely risky for a vendor like Intel. 866 | Again, we should remember that such malware insertion (by either the processor 867 | or FSP blob) could not be conditioned on any persistent state, and so would be 868 | subject to reply "attack". In other words, once the processor or the FSP got 869 | caught while pulling this off, it should be possible for the user to reproduce 870 | and demonstrate this malicious behaviour arbitrary number of times subsequently. 871 | 872 | Of course, Intel ME, or a malicious SMM, instead of injecting malware into the 873 | host memory, might chose a more subtle approach and instead only expose a 874 | privilege escalation backdoor which could then be used by some malware to 875 | undermine security isolation offered by the host OS.^[E.g. the backdoor might 876 | allow to escape a virtual machine, allowing some more-or-less standard malware 877 | which came through some standard channels, such as an email attachment, and 878 | which would otherwise be contained to some untrusted VM, to now spread over the 879 | whole system.] 880 | 881 | Again, by using a largely open source BIOS implementation we can practically 882 | rule out such a backdoor in an SMM^[Indeed, it's hardly imaginable for the FSP 883 | blob to bring such a backdoor into the SMM.]. This leaves us with the 884 | possibility of the Intel ME providing this hidden escalation trap. That, 885 | however, is something that a processor vendor might always do _trivially_, 886 | without introducing technology such as Intel ME, as discussed e.g. in 887 | [@rutkowska_cpu_backdoors_thoughts]. In that case, again, our only hope is that 888 | Intel would not risk being caught red-handed, given the hypothetical backdoor 889 | would need to be _stateless_. 890 | 891 | We thus see that, while we cannot fully eliminate the problem of subversion of 892 | the host OS security by potentially malicious processor, the construction of the 893 | stateless laptop allows us to force the adversary into a very dangerous 894 | territory, requiring them to take high risk and also making the attack very 895 | complex. 896 | 897 | It's worth nothing, however, how we have silently started assuming that we need 898 | to have a largely open source BIOS (so largely trustworthy), even on our 899 | stateless laptop. Needless to say, the coreboot project [@coreboot] is a natural 900 | candidate for such a BIOS, and we are very lucky there is such a project in the 901 | wild already. 902 | 903 | # Addressing Evil Maid Attacks 904 | 905 | Originally the term Evil Maid Attack [@itl_evil_maid_attack] was used to 906 | describe attacks on the full disk encryption schemes. In such scenarios the 907 | attacker (the Evil Maid) was replacing or infecting part of the code which was 908 | asking the user for the disk decryption passphrase. Once the passphrase was 909 | obtained from the unsuspecting user (who thought they provided it to the 910 | legitimate system software), the malicious code could have store it somewhere 911 | (e.g. save on unused disk sectors), or leak through networking, allowing the 912 | attacker to decrypt the laptop once the attacker somehow got access to it 913 | subsequently (e.g. after physically stealing it from the user, or perhaps 914 | covertly making a copy of the hard disk). 915 | 916 | But the old Evil Maid Attack concept can be easily generalized and applied to 917 | the stateless laptop scenario. Now the Evil Maid would be replacing the whole 918 | laptop, rather than just the software on it (because there is no software to be 919 | replaced on the laptop in this case, of course). The new, fake, laptop would 920 | look identical to the user from the outside, but might be a completely different 921 | machine on the inside. E.g. it might be full of persistent memory, and also 922 | feature an army of wireless devices to leak all the user secrets to everybody in 923 | a radius of miles. 924 | 925 | A special case of such an Evil Maid attack would be when the laptop was replaced 926 | during shipment, or simply if the vendor of the laptop turned out to be (or was 927 | forced to be) malicious. 928 | 929 | What could we do about such attacks? 930 | 931 | First, we should stress the primary reason behind introducing the stateless 932 | laptop idea is _not_ to prevent sophisticated physical attacks, such as "full" 933 | Evil Maid attacks which replace the whole laptop with an identically-looking 934 | one. 935 | 936 | Having said that, the author is of the opinion that the stateless laptop design 937 | makes lots of physical attacks difficult, or simply not feasible. This applies 938 | to the "classic" Evil Maid attacks, as well as various attacks targeting the 939 | firmware. 940 | 941 | Still, in order to somehow address (or increase the cost significantly) of the 942 | full laptop-replacing Evil Maid attacks, one can think of several solutions 943 | which include traditional physical-based protection applied to the laptop, when 944 | it is being left unattended by the user. These are things such as custom, 945 | personalized stickers, which make it more difficult to bring an identically 946 | looking laptop, as well as more classic means in a form of a vault or strong 947 | box, or a monitoring camera. 948 | 949 | An inquisitive reader might wonder why would we need all this hassle with 950 | stateless laptops, if the user was expected to implement the physical 951 | protection, anyway? As already mentioned several times in this paper, there are 952 | many more problems with x86 platform, and which we try to resolve with the 953 | stateless laptop, than just the physical attacks. Such other problems include: 954 | software attacks on firmware, malicious firmware (backdoored by the vendor, or 955 | somewhere during the shipment), software attacks against secure boot mechanisms. 956 | A reader is, again, directed to the [@x86_harmful] for a more complete 957 | discussion. 958 | 959 | The physical protections mentioned above do not, however, resolve the problem of 960 | the attackers subverting the laptop hardware at manufacturing or shipment 961 | stages. This includes, naturally, a potentially conspiring laptop vendor. 962 | 963 | In order to address this latter problem we -- the industry -- need to come up 964 | with reliable and simple methods for comparing PCBs with each other. A tool 965 | analogical to 'diff', only working for PCBs rather than on files. Such a tool, 966 | implemented as a software, could e.g. take two (sets of) photos taken by the 967 | user of the two boards to compare. The photos might be taken with an ordinary 968 | camera, or, in a more sophisticated setup, using X-ray imaging to reveal also 969 | the internal layer wiring. This inititive has already been proposed by other 970 | researchers recently (e.g. [@appelbaum_technical_action_plan]), so it is not 971 | unreasonable to expect some progress in this area in the near future. 972 | 973 | Admittedly such an approach would not be able to detect sophisticated attacks 974 | which replace the original laptop board with identically looking one 975 | (connection- and chip-geometry-wise), yet with different chips. The author 976 | thinks that such attacks might be very difficult to pull off in practice, 977 | probably extremely pricey due to the need of manufacturing small series of 978 | custom integrated circuits. 979 | 980 | 981 | # Select implementation considerations 982 | 983 | Here we briefly list some of the potential challenges and some other aspects 984 | that are still left open for further discussion and research. 985 | 986 | ## SPI Flash emulation challenges 987 | 988 | One anticipated complication for emulation of the SPI flash by the trusted stick 989 | is that the processor (chipset) expects the specific timings to be met by the 990 | SPI chip when reading firmware, so it's unlikely one could use a general-purpose 991 | uC on the stick to emulate the flash chip. Also the timing requirements make it 992 | unlikely that a regular SD storage card will work for us here.^[Which otherwise 993 | sounds like a great solution, at least for prototyping, as most of these cards 994 | should be implementing the simple SPI protocol.] Rather, we need a real SPI 995 | flash chip located on the trusted stick, or better: an FPGA-based 996 | implementation.^[The reason to use an FPGA-based implementation of an SPI flash 997 | is transparency, required to assure that our Trusted Stick indeed implements 998 | read-only protection for certain parts of the flash, as well as reliable 999 | encryption for other partitions, as discussed earlier in the paper.] 1000 | 1001 | Also it does not seem trivial to use the same one SPI chip to both serve the 1002 | firmware (i.e. ME, BIOS, other) to the host processor, and at the same time to 1003 | also act as a flash provider to the EC, and optionally also to the internal 1004 | disk. The primary reason for this might be lack of a good multiplexing mechanism 1005 | built into the SPI protocol. This seems, however, merely a technical 1006 | complication that, in the worst case, could be resolved by having the Trusted 1007 | Stick exposing two separate SPI interfaces: one for the host processor, another 1008 | to the EC uC. Of course, such an approach is far from ideal, as it increases 1009 | the amount of signals required for the port to which the Trusted Stick is 1010 | inserted.^[And we would like to keep these down to a minimum in order to be able 1011 | to re-use existing USB or SD ports.] As mentioned earlier, a temporary solution 1012 | might be to use a uC with OTP memory for firmware storage. 1013 | 1014 | It's also not yet clear if the Intel ME (which is part of the processor) would 1015 | be happy when being put into an environment where the SPI flash it gets access 1016 | to is externally forced to be read-only. Should this be the case, it might be 1017 | necessary for the Trusted Stick to allow selective write-access for the ME 1018 | partition accesses. In that case this region should be encrypted by the Trusted 1019 | Stick, as already discussed earlier. This is to assure that in case the 1020 | processor wanted to store some user-compromising secrets there, these secrets 1021 | would not fall into the hands of an adversary. While this solution might seem 1022 | simple enough, a slight complication might arise from the inability to ask the 1023 | user for a passphrase (at least using the standard keyboard) upon early platform 1024 | boot. In that case we would likely need to use a key kept on the Trusted Stick 1025 | which is not conditioned on user passphrase to protect these partitions. It 1026 | might be even possible to use auto-generated, discard-able keys for this 1027 | purpose. Further research is needed. 1028 | 1029 | ## Host OS implementation considerations 1030 | 1031 | As previously noted the host OS should be engineered so that it was able to boot 1032 | and operate efficiently from read-only storage. This is generally not a problem 1033 | today: many Linux distributions support such a mode of operation (LiveUSB). It 1034 | does however present some challenges for systems which aggressively try to 1035 | decompose their TCB, such as Qubes OS [@qubes_os]. Such systems would like to 1036 | keep all the USB subsystem, drivers, and devices into separate de-privileged 1037 | domains (VMs). In order to keep such USB-hosting domain(s) truly untrusted, 1038 | while at the same time use it as a provider (backend) for the system root 1039 | storage, special additional mechanisms would have to be used [@qubes_arch]. This 1040 | complication could be avoided, however, when an internal trusted disk was used 1041 | on the stateless laptop. 1042 | 1043 | 1044 | ## User partition encryption considerations 1045 | 1046 | It seems tempting to delegate the user partition encryption to the host -- after 1047 | all it runs the user approved trusted code from the stick's read-only partition, 1048 | while at the same time this simplifies the construction of the stick 1049 | significantly. 1050 | 1051 | Unfortunately, running the encryption on the host processor we're exposing it to 1052 | potential malicious interference from the ME processor. The ME can e.g. steal 1053 | the encryption key from the host registers or memory pages and then try to leak 1054 | it through some of the user networking activity, although this might be very 1055 | difficult in practice as discussed earlier in the paper. What the ME can do, 1056 | however, and very simply, is to store some of the leaked user sensitive 1057 | information (such as the email private keys) on the user private partition 1058 | _without_ encrypting them with the user key, but rather with some other key. 1059 | This would then look like random garbage for the user, if they ever decided to 1060 | examine the sectors on the partition. But for the attacker who (physically) 1061 | obtains access to the user stick this might be immediately readable. 1062 | 1063 | On the other hand, if it was the Trusted Stick that performed the encryption, 1064 | then there should be no way for the hypothetical ME rootkit to write anything 1065 | onto the user partition bypassing the forced encryption with the user key. 1066 | 1067 | ## Temper-resistance considerations 1068 | 1069 | The use of tamper-resistance technology is often thought as a beneficial means 1070 | to improve physical security of an endpoint device. Care must be applied 1071 | however as to whether this does not compromise the ultimate trustworthiness of 1072 | the product. 1073 | 1074 | In the author's opinion it is unacceptable for any _code_, that the user is 1075 | forced to entrust their digital life to, to be tamper-proof-protected if that 1076 | results in an inability for the user to dump and analyze the code that runs on 1077 | the device at any time the user feels a need to do that.^[And it is completely 1078 | irrelevant whether the user would, in practice, be willing or capable to do that 1079 | or not -- it's a matter of having an _opportunity_ to do that. This is very 1080 | similar to guarantees of civil liberties, such as free speech.] 1081 | 1082 | Thus a temper-proof mechanism might only be acceptable for the actual (small) 1083 | persistent memory which holds the bits of the user keys, and for nothing more, 1084 | particularly not for the memory which holds the firmware for the device. Also, 1085 | any tamper-proof protection on volatile memory (RAM) is not necessary, as such 1086 | protection only makes sense if the threat model assumes the legitimate user to 1087 | be a potential attacker. This admittedly is the case for various Digital Rights 1088 | Management (DRM) or payment processing systems. For these systems the end user 1089 | is considered a potential enemy, who might want to illegally make a copy of a 1090 | movie, or clone credit card information. Indeed, only then the device would like 1091 | to protect its _runtime_ processing. Otherwise an attacker who managed to steal 1092 | the device would not be able to get it to start doing the processing of 1093 | sensitive data in its RAM, without providing a proper unlock password or key in 1094 | the first place. It's worrying that the industry has been aggressively 1095 | advertising various DRM-friendly technologies as protecting the user, while in 1096 | fact they have an opposite effect, degrading trustworthiness of the user devices 1097 | (from the user point of view, that is). 1098 | 1099 | An exception would be a tamper-proof design which allowed for reliable read-only 1100 | access for all the firmware (and preventing access only to key-holding storage), 1101 | but it seems like existing devices (specifically microcontrollers) do not 1102 | support such a mode today, at least the author is not aware of any. 1103 | 1104 | 1105 | # Alternative solutions? 1106 | 1107 | Many people voice concerns that perhaps a much better strategy is to ditch the 1108 | (Intel) x86 platform, and look for an alternative architecture as a foundation 1109 | for secure and trustworthy personal computers... In this chapter we quickly 1110 | review what options we might have, in practice, here. 1111 | 1112 | ## ARM-based platforms? 1113 | 1114 | The ARM architecture [@wikipedia_arm_arch] seems like a natural candidate to 1115 | replace x86 for desktop computers, including laptops. Indeed it has already 1116 | dominated the smartphone and tablet markets, and it doesn't seem like the gap in 1117 | performance is that great between these devices. This indeed might seem like a 1118 | plausible direction at first sight, but there are at least two problems here: 1119 | 1120 | First, there is no such thing as an "ARM processor" -- rather ARM releases only 1121 | a set of specifications and other IP, which are then licensed by various 1122 | vendors, such as NVIDIA, Samsung, Texas Instruments, and so forth. These vendors 1123 | then combine the licensed ARM IP with their own, creating unique final products: 1124 | the actual processors, customary called System-on-Chips (SoCs). 1125 | 1126 | This large diversity of "ARM processors", while undoubtedly beneficial in some 1127 | aspects, is also problematic -- e.g. it presents multiple research targets for 1128 | security researchers, as well as for system architects and developers. E.g. 1129 | some of the SoCs would implement IOMMU functionality adhering to 1130 | the ARM-published specification, while others would use a completely different 1131 | technology, invented by the OEM that makes the SoC [@genode_armvirt]. 1132 | 1133 | Also, most of the ARM-based SoC's implement a so called TrustZone (TZ) 1134 | extension. Of course, as with most technologies on ARM, TZ is just a 1135 | specification and not malicious in itself. However, it opens a possibility for 1136 | the vendors who produce TZ-compatible SoCs (which most do) to lock down their 1137 | processor so that their TZ implementation will not differ significantly from 1138 | Intel ME. 1139 | 1140 | Also, there is nothing special in ARM-based architecture that could prevent a 1141 | vendor from introducing backdoors into the SoCs they produce. 1142 | 1143 | ## FPGA-based, true open source processors and platforms? 1144 | 1145 | There are also efforts to create a fully open processor design ([@lowrisc], 1146 | [@Opf]). This surely is the proper way to go for our civilization, long term. 1147 | The important question is how much time it would take for such processors to 1148 | become performant enough for typical desktop workflows (e.g. watching HD movies, 1149 | running modern Web browsers or an office suite)? 1150 | 1151 | But performance is only part of the story -- another question relates to 1152 | security technologies these processors should be offering? Technologies such as 1153 | e.g. IOMMU and potentially also CPU and memory virtualization?^[Arguably 1154 | virtualization technologies might not be needed for such new processors. On the 1155 | other hand, it might turn out more practical to port e.g. the existing Linux 1156 | kernel and recompile many of the currently used POSIX applications for these new 1157 | processors, than to write everything from scratch. In that case we would need 1158 | virtualization in order to implement reasonably strong compartmentalization.] 1159 | 1160 | Sadly, it seems like we're at least years away from having consumer-grade 1161 | laptops based on such processors, and perhaps more than a decade from having 1162 | these systems offering isolation technologies on par with what the current Intel 1163 | processors offer. 1164 | 1165 | 1166 | # Summary 1167 | 1168 | Personal computers have become extensions of our brains. This symbiosis is only 1169 | going to strengthen in the years to come, and not just metaphorically! The 1170 | author believes it should be paramount for humankind to ensure we can trust our 1171 | personal computers. Unfortunately the industry does not seem to share this 1172 | opinion. Not only do we not see much effort to create secure and trustworthy 1173 | hardware and Operating Systems, but we also witness the introduction of 1174 | technologies, such as Intel ME, that could undermine our trust in computers, 1175 | (especially personal computers) more than anytime before. 1176 | 1177 | The strict separation of state-carrying (trusted) element from the rest of the 1178 | hardware, proposed in this paper, is an attempt to change this game in favour of 1179 | the user. While this solution might appeal to many as simple and elegant, care 1180 | should be exercised in understanding various implementation-specific subtleties, 1181 | many of which, hopefully, have been discussed in this paper. 1182 | 1183 | The author thinks this clean separation of state might be beneficial not just 1184 | for Intel x86 systems, but also for other architectures of our future personal 1185 | computers. 1186 | 1187 | 1188 | # Credits {.unnumbered} 1189 | 1190 | I would like to thank the following people for many insightful discussions as 1191 | well as for reviewing of this paper: Rop Gonggrijp (especially for turning my 1192 | attention to the problem of "state"), Peter Stuge (for sharing his rich hardware 1193 | expertise), and Rafał Wojtczuk (for being a great sparring partner in many 1194 | discussions). 1195 | 1196 | # Contacting the author {.unnumbered} 1197 | 1198 | Joanna Rutkowska can be contacted by email at: `joanna@invisiblethings.org` 1199 | 1200 | Her personal master key fingerprint^[See http://blog.invisiblethings.org/keys/] 1201 | is also provided here for additional verification: 1202 | 1203 | ED72 7C30 6E76 6BC8 5E62 1AA6 5FA6 C3E4 D9AF BB99 1204 | 1205 | 1206 | # References {.unnumbered} 1207 | 1208 | 1209 | 1210 | --------------------------------------------------------------------------------