├── .github
└── workflows
│ └── c-cpp.yml
├── .gitignore
├── License.txt
├── Makefile
├── README.md
├── SECURITY.md
├── inker2ext
├── internal-to-external-tree-changes.patch
├── kernel_2_extern.sh
└── sgx2.patch
├── sgx.h
├── sgx_arch.h
├── sgx_asm.h
├── sgx_encl.c
├── sgx_encl2.c
├── sgx_ioctl.c
├── sgx_main.c
├── sgx_page_cache.c
├── sgx_user.h
├── sgx_util.c
└── sgx_vma.c
/.github/workflows/c-cpp.yml:
--------------------------------------------------------------------------------
1 | name: C/C++ CI
2 |
3 | on:
4 | push:
5 | branches: [ "main" ]
6 | pull_request:
7 | branches: [ "main" ]
8 |
9 | permissions:
10 | actions: read
11 | checks: read
12 | contents: read
13 | issues: write
14 | pull-requests: write
15 |
16 | jobs:
17 | build:
18 |
19 | runs-on: ubuntu-20.04
20 |
21 | steps:
22 | - uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4.1.3
23 | - name: build
24 | run: make
25 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.ko
2 | *.o
3 | *.cmd
4 | *.mod
5 | *.mod.*
6 | *.symvers
7 | *.order
8 | *.tmp_versions
9 |
--------------------------------------------------------------------------------
/License.txt:
--------------------------------------------------------------------------------
1 | Copyright (C) 2018 Intel Corporation
2 |
3 | This software is licensed under
4 | (a) a 3-clause BSD license; or alternatively
5 | (b) the GPL v2 license
6 |
7 | -- A. BSD-3-Clause ----------------------------
8 | Redistribution and use in source and binary forms, with or without modification,
9 | are permitted provided that the following conditions are met:
10 | 1. Redistributions of source code must retain the above copyright notice,
11 | this list of conditions and the following disclaimer.
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 | 3. Neither the name of the copyright holder nor the names of its contributors
16 | may be used to endorse or promote products derived from this software
17 | without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
23 | BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
24 | OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
25 | OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 | OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 | WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
28 | OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 | EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
31 | -- B. GPL-2.0 ----------------------------
32 | This program is free software; you can redistribute it and/or modify it
33 | under the terms of the GNU General Public License, as published
34 | by the Free Software Foundation; either version 2 of the License,
35 | or (at your option) any later version.
36 |
37 | This program is distributed in the hope that it will be useful,
38 | but WITHOUT ANY WARRANTY; without even the implied warranty of
39 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
40 | GNU General Public License for more details.
41 |
42 | You should have received a copy of the GNU General Public License
43 | along with this program; if not, see .
44 | ------------------------------
45 |
46 | SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
47 |
48 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | ifneq ($(KERNELRELEASE),)
2 | isgx-y := \
3 | sgx_main.o \
4 | sgx_page_cache.o \
5 | sgx_ioctl.o \
6 | sgx_vma.o \
7 | sgx_util.o\
8 | sgx_encl.o \
9 | sgx_encl2.o
10 | obj-m += isgx.o
11 | else
12 | KDIR := /lib/modules/$(shell uname -r)/build
13 | PWD := $(shell pwd)
14 |
15 | default:
16 | $(MAKE) -C $(KDIR) M=$(PWD) modules
17 |
18 | install: default
19 | $(MAKE) INSTALL_MOD_DIR=kernel/drivers/intel/sgx -C $(KDIR) M=$(PWD) modules_install
20 | depmod -A
21 | sh -c "cat /etc/modules | grep -Fxq isgx || echo isgx >> /etc/modules"
22 |
23 | endif
24 |
25 | clean:
26 | rm -vrf *.o *.ko *.order *.symvers *.mod.c .tmp_versions .*o.cmd *.mod
27 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PROJECT NOT UNDER ACTIVE MANAGEMENT #
2 | This project will no longer be maintained by Intel.
3 | Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project.
4 | Intel no longer accepts patches to this project.
5 | If you have an ongoing need to use this project, are interested in independently developing it, or would like to maintain patches for the open source software community, please create your own fork of this project.
6 |
7 | Intel(R) Software Guard Extensions for Linux\* OS
8 | ================================================
9 |
10 | # linux-sgx-driver
11 |
12 | Introduction
13 | ------------
14 | Intel(R) Software Guard Extensions (Intel(R) SGX) is an Intel technology for application developers seeking to protect select code and data from disclosure or modification.
15 |
16 | The Linux SGX software stack is comprised of the Intel(R) SGX driver, the Intel(R) SGX SDK, and the Intel(R) SGX Platform Software. The Intel(R) SGX SDK and Intel(R) SGX PSW are hosted in the [linux-sgx](https://github.com/01org/linux-sgx) project.
17 |
18 | The [linux-sgx-driver](https://github.com/01org/linux-sgx-driver) project hosts the out-of-tree driver for the Linux Intel(R) SGX software stack, which was used until the driver upstreaming process was complete.
19 |
20 | IMPORTANT:
21 | ---------
22 | This driver is deprecated and no longer maintained by Intel. We recommend the SGX community to use the SGX driver that was upstreamed into the Linux kernel. If that is not possible you may still use the [DCAP driver](https://github.com/intel/SGXDataCenterAttestationPrimitives/tree/master/driver) that tracks closely the upstreamed kernel driver. Note that both kernel and DCAP drivers require SGX CPUs with Flexible Launch Control (FLC) support.
23 |
24 | For new feature requests/patches, please submit them directly to the [linux-sgx mailing list](http://vger.kernel.org/vger-lists.html#linux-sgx)
25 |
26 | License
27 | -------
28 | See License.txt for details.
29 |
30 | Documentation
31 | -------------
32 | - [Intel(R) SGX for Linux\* OS](https://www.intel.com/content/www/us/en/developer/tools/software-guard-extensions/linux-overview.html)
33 | - [Intel(R) SGX Programming Reference](https://software.intel.com/en-us/articles/intel-sdm)
34 |
35 | Build and Install the Intel(R) SGX Driver
36 | -----------------------------------------
37 |
38 | ### Prerequisites
39 | - Ensure that you have an operating system version supported as listed for specific releases: [https://01.org/intel-software-guard-extensions/downloads](https://www.intel.com/content/www/us/en/developer/tools/software-guard-extensions/linux-overview.html#downloads)
40 | - Ensure that you have the following required hardware:
41 | * 6th Generation Intel(R) Core(TM) Processor or newer
42 | - Configure the system with the **SGX hardware enabled** option.
43 | - To build the driver, the version of installed kernel headers must match the active kernel version on the system.
44 | * On Ubuntu
45 | * To check if matching kernel headers are installed:
46 | ```
47 | $ dpkg-query -s linux-headers-$(uname -r)
48 | ```
49 | * To install matching headers:
50 | ```
51 | $ sudo apt-get install linux-headers-$(uname -r)
52 | ```
53 | * On CentOS, RHEL or Fedora
54 | * To check if matching kernel headers are installed:
55 | ```
56 | $ ls /usr/src/kernels/$(uname -r)
57 | ```
58 | * To install matching headers:
59 | ```
60 | $ sudo yum install kernel-devel
61 | ```
62 | * After the above command, if the matching headers are still missing in /usr/src/kernels, try update kernel and reboot usig commands below. Then choose updated kernel on boot menu.
63 | ```
64 | $ sudo yum install kernel
65 | $ sudo reboot
66 | ```
67 | * On RHEL 8.0 elfutils-libelf-devel package is required:
68 | ```
69 | $ sudo yum install elfutils-libelf-devel
70 | ```
71 |
72 |
73 | **Note:** Refer to the *"Intel® SGX Resource Enumeration Leaves"* section in the [Intel SGX Programming reference guide](https://software.intel.com/en-us/articles/intel-sdm) to make sure your cpu has the SGX feature.
74 |
75 |
76 | ### Build the Intel(R) SGX Driver
77 |
78 | To build Intel(R) SGX driver, change the directory to the driver path and enter the following command:
79 | ```
80 | $ make
81 | ```
82 | You can find the driver *isgx.ko* generated in the same directory.
83 |
84 | ### Install the Intel(R) SGX Driver
85 | To install the Intel(R) SGX driver, enter the following command with root privilege:
86 | ```
87 | $ sudo mkdir -p "/lib/modules/"`uname -r`"/kernel/drivers/intel/sgx"
88 | $ sudo cp isgx.ko "/lib/modules/"`uname -r`"/kernel/drivers/intel/sgx"
89 | $ sudo sh -c "cat /etc/modules | grep -Fxq isgx || echo isgx >> /etc/modules"
90 | $ sudo /sbin/depmod
91 | $ sudo /sbin/modprobe isgx
92 | ```
93 | On Red Hat Enterprise Linux Server or CentOS, need to run below command on each reboot
94 | ```
95 | $ sudo /sbin/modprobe isgx
96 | ```
97 | On SUSE, need to add '--allow-unsupported' flag when executing 'modprobe' command during the SGX driver intallation and on each reboot
98 | ```
99 | $ sudo /sbin/modprobe isgx --allow-unsupported
100 | ```
101 |
102 | ### Uninstall the Intel(R) SGX Driver
103 | Before uninstall the Intel(R) SGX driver, make sure the aesmd service is stopped. See the topic, Start or Stop aesmd Service, on how to stop the aesmd service.
104 | To uninstall the Intel(R) SGX driver, enter the following commands:
105 | ```
106 | $ sudo /sbin/modprobe -r isgx
107 | $ sudo rm -rf "/lib/modules/"`uname -r`"/kernel/drivers/intel/sgx"
108 | $ sudo /sbin/depmod
109 | $ sudo /bin/sed -i '/^isgx$/d' /etc/modules
110 | ```
111 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Report a Vulnerability
4 |
5 | Please report security issues or vulnerabilities to the [Intel Security Center].
6 |
7 | For more information on how Intel works to resolve security issues, see
8 | [Vulnerability Handling Guidelines].
9 |
10 | [Intel Security Center]:https://www.intel.com/security
11 |
12 | [Vulnerability Handling Guidelines]:https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html
13 |
--------------------------------------------------------------------------------
/inker2ext/internal-to-external-tree-changes.patch:
--------------------------------------------------------------------------------
1 | From 2ff2ed01f25da0e87eb7891594f32469eae1b0bc Mon Sep 17 00:00:00 2001
2 | From: Serge Ayoun
3 | Date: Mon, 23 Oct 2017 17:30:10 +0300
4 | Subject: [PATCH] in kernel code to out of tree driver patch.
5 |
6 | ---
7 | drivers/platform/x86/intel_sgx/Makefile | 37 ++--
8 | drivers/platform/x86/intel_sgx/sgx.h | 6 +-
9 | drivers/platform/x86/intel_sgx/sgx_arch.h | 269 ++++++++++++++++++++++++
10 | drivers/platform/x86/intel_sgx/sgx_asm.h | 233 ++++++++++++++++++++
11 | drivers/platform/x86/intel_sgx/sgx_encl.c | 13 +-
12 | drivers/platform/x86/intel_sgx/sgx_ioctl.c | 6 +-
13 | drivers/platform/x86/intel_sgx/sgx_main.c | 151 ++++---------
14 | drivers/platform/x86/intel_sgx/sgx_page_cache.c | 6 +-
15 | drivers/platform/x86/intel_sgx/sgx_user.h | 139 ++++++++++++
16 | drivers/platform/x86/intel_sgx/sgx_util.c | 6 +-
17 | drivers/platform/x86/intel_sgx/sgx_vma.c | 11 +
18 | 11 files changed, 749 insertions(+), 128 deletions(-)
19 | create mode 100644 drivers/platform/x86/intel_sgx/sgx_arch.h
20 | create mode 100644 drivers/platform/x86/intel_sgx/sgx_asm.h
21 | create mode 100644 drivers/platform/x86/intel_sgx/sgx_user.h
22 |
23 | diff --git a/drivers/platform/x86/intel_sgx/Makefile b/drivers/platform/x86/intel_sgx/Makefile
24 | index 92af946..4b5edaf 100644
25 | --- a/drivers/platform/x86/intel_sgx/Makefile
26 | +++ b/drivers/platform/x86/intel_sgx/Makefile
27 | @@ -1,13 +1,24 @@
28 | -#
29 | -# Intel SGX
30 | -#
31 | -
32 | -obj-$(CONFIG_INTEL_SGX) += intel_sgx.o
33 | -
34 | -intel_sgx-$(CONFIG_INTEL_SGX) += \
35 | - sgx_ioctl.o \
36 | - sgx_encl.o \
37 | - sgx_main.o \
38 | - sgx_page_cache.o \
39 | - sgx_util.o \
40 | - sgx_vma.o \
41 | +ifneq ($(KERNELRELEASE),)
42 | + isgx-y := \
43 | + sgx_main.o \
44 | + sgx_page_cache.o \
45 | + sgx_ioctl.o \
46 | + sgx_vma.o \
47 | + sgx_util.o\
48 | + sgx_encl.o
49 | + obj-m += isgx.o
50 | +else
51 | +KDIR := /lib/modules/$(shell uname -r)/build
52 | +PWD := $(shell pwd)
53 | +
54 | +default:
55 | + $(MAKE) -C $(KDIR) SUBDIRS=$(PWD) CFLAGS_MODULE="-DDEBUG -g -O0" modules
56 | +
57 | +install: default
58 | + $(MAKE) INSTALL_MOD_DIR=kernel/drivers/intel/sgx -C $(KDIR) M=$(PWD) modules_install
59 | + sh -c "cat /etc/modules | grep -Fxq isgx || echo isgx >> /etc/modules"
60 | +
61 | +endif
62 | +
63 | +clean:
64 | + rm -vrf *.o *.ko *.order *.symvers *.mod.c .tmp_versions .*o.cmd
65 | diff --git a/drivers/platform/x86/intel_sgx/sgx.h b/drivers/platform/x86/intel_sgx/sgx.h
66 | index 24140a3..bfc24c0 100644
67 | --- a/drivers/platform/x86/intel_sgx/sgx.h
68 | +++ b/drivers/platform/x86/intel_sgx/sgx.h
69 | @@ -60,15 +60,17 @@
70 | #ifndef __ARCH_INTEL_SGX_H__
71 | #define __ARCH_INTEL_SGX_H__
72 |
73 | -#include
74 | +#include "sgx_asm.h"
75 | #include
76 | +#include
77 | #include
78 | #include
79 | #include
80 | #include
81 | #include
82 | #include
83 | -#include
84 | +#include "sgx_arch.h"
85 | +#include "sgx_user.h"
86 |
87 | #define SGX_EINIT_SPIN_COUNT 20
88 | #define SGX_EINIT_SLEEP_COUNT 50
89 | diff --git a/drivers/platform/x86/intel_sgx/sgx_arch.h b/drivers/platform/x86/intel_sgx/sgx_arch.h
90 | new file mode 100644
91 | index 0000000..dcb620e
92 | --- /dev/null
93 | +++ b/drivers/platform/x86/intel_sgx/sgx_arch.h
94 | @@ -0,0 +1,269 @@
95 | +/*
96 | + * This file is provided under a dual BSD/GPLv2 license. When using or
97 | + * redistributing this file, you may do so under either license.
98 | + *
99 | + * GPL LICENSE SUMMARY
100 | + *
101 | + * Copyright(c) 2016-2017 Intel Corporation.
102 | + *
103 | + * This program is free software; you can redistribute it and/or modify
104 | + * it under the terms of version 2 of the GNU General Public License as
105 | + * published by the Free Software Foundation.
106 | + *
107 | + * This program is distributed in the hope that it will be useful, but
108 | + * WITHOUT ANY WARRANTY; without even the implied warranty of
109 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
110 | + * General Public License for more details.
111 | + *
112 | + * Contact Information:
113 | + * Jarkko Sakkinen
114 | + * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
115 | + *
116 | + * BSD LICENSE
117 | + *
118 | + * Copyright(c) 2016-2017 Intel Corporation.
119 | + *
120 | + * Redistribution and use in source and binary forms, with or without
121 | + * modification, are permitted provided that the following conditions
122 | + * are met:
123 | + *
124 | + * * Redistributions of source code must retain the above copyright
125 | + * notice, this list of conditions and the following disclaimer.
126 | + * * Redistributions in binary form must reproduce the above copyright
127 | + * notice, this list of conditions and the following disclaimer in
128 | + * the documentation and/or other materials provided with the
129 | + * distribution.
130 | + * * Neither the name of Intel Corporation nor the names of its
131 | + * contributors may be used to endorse or promote products derived
132 | + * from this software without specific prior written permission.
133 | + *
134 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
135 | + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
136 | + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
137 | + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
138 | + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
139 | + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
140 | + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
141 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
142 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
143 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
144 | + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
145 | + *
146 | + * Authors:
147 | + *
148 | + * Jarkko Sakkinen
149 | + */
150 | +
151 | +#include
152 | +#ifndef _ASM_X86_SGX_ARCH_H
153 | +#define _ASM_X86_SGX_ARCH_H
154 | +
155 | +#define SGX_SSA_GPRS_SIZE 182
156 | +#define SGX_SSA_MISC_EXINFO_SIZE 16
157 | +
158 | +enum sgx_misc {
159 | + SGX_MISC_EXINFO = 0x01,
160 | +};
161 | +
162 | +#define SGX_MISC_RESERVED_MASK 0xFFFFFFFFFFFFFFFEL
163 | +
164 | +enum sgx_attribute {
165 | + SGX_ATTR_DEBUG = 0x02,
166 | + SGX_ATTR_MODE64BIT = 0x04,
167 | + SGX_ATTR_PROVISIONKEY = 0x10,
168 | + SGX_ATTR_EINITTOKENKEY = 0x20,
169 | +};
170 | +
171 | +#define SGX_ATTR_RESERVED_MASK 0xFFFFFFFFFFFFFFC9L
172 | +
173 | +#define SGX_SECS_RESERVED1_SIZE 24
174 | +#define SGX_SECS_RESERVED2_SIZE 32
175 | +#define SGX_SECS_RESERVED3_SIZE 96
176 | +#define SGX_SECS_RESERVED4_SIZE 3836
177 | +
178 | +struct sgx_secs {
179 | + uint64_t size;
180 | + uint64_t base;
181 | + uint32_t ssaframesize;
182 | + uint32_t miscselect;
183 | + uint8_t reserved1[SGX_SECS_RESERVED1_SIZE];
184 | + uint64_t attributes;
185 | + uint64_t xfrm;
186 | + uint32_t mrenclave[8];
187 | + uint8_t reserved2[SGX_SECS_RESERVED2_SIZE];
188 | + uint32_t mrsigner[8];
189 | + uint8_t reserved3[SGX_SECS_RESERVED3_SIZE];
190 | + uint16_t isvvprodid;
191 | + uint16_t isvsvn;
192 | + uint8_t reserved4[SGX_SECS_RESERVED4_SIZE];
193 | +};
194 | +
195 | +enum sgx_tcs_flags {
196 | + SGX_TCS_DBGOPTIN = 0x01, /* cleared on EADD */
197 | +};
198 | +
199 | +#define SGX_TCS_RESERVED_MASK 0xFFFFFFFFFFFFFFFEL
200 | +
201 | +struct sgx_tcs {
202 | + uint64_t state;
203 | + uint64_t flags;
204 | + uint64_t ossa;
205 | + uint32_t cssa;
206 | + uint32_t nssa;
207 | + uint64_t oentry;
208 | + uint64_t aep;
209 | + uint64_t ofsbase;
210 | + uint64_t ogsbase;
211 | + uint32_t fslimit;
212 | + uint32_t gslimit;
213 | + uint64_t reserved[503];
214 | +};
215 | +
216 | +struct sgx_pageinfo {
217 | + uint64_t linaddr;
218 | + uint64_t srcpge;
219 | + union {
220 | + uint64_t secinfo;
221 | + uint64_t pcmd;
222 | + };
223 | + uint64_t secs;
224 | +} __attribute__((aligned(32)));
225 | +
226 | +
227 | +#define SGX_SECINFO_PERMISSION_MASK 0x0000000000000007L
228 | +#define SGX_SECINFO_PAGE_TYPE_MASK 0x000000000000FF00L
229 | +#define SGX_SECINFO_RESERVED_MASK 0xFFFFFFFFFFFF00F8L
230 | +
231 | +enum sgx_page_type {
232 | + SGX_PAGE_TYPE_SECS = 0x00,
233 | + SGX_PAGE_TYPE_TCS = 0x01,
234 | + SGX_PAGE_TYPE_REG = 0x02,
235 | + SGX_PAGE_TYPE_VA = 0x03,
236 | +};
237 | +
238 | +enum sgx_secinfo_flags {
239 | + SGX_SECINFO_R = 0x01,
240 | + SGX_SECINFO_W = 0x02,
241 | + SGX_SECINFO_X = 0x04,
242 | + SGX_SECINFO_SECS = (SGX_PAGE_TYPE_SECS << 8),
243 | + SGX_SECINFO_TCS = (SGX_PAGE_TYPE_TCS << 8),
244 | + SGX_SECINFO_REG = (SGX_PAGE_TYPE_REG << 8),
245 | +};
246 | +
247 | +struct sgx_secinfo {
248 | + uint64_t flags;
249 | + uint64_t reserved[7];
250 | +} __attribute__((aligned(64)));
251 | +
252 | +struct sgx_pcmd {
253 | + struct sgx_secinfo secinfo;
254 | + uint64_t enclave_id;
255 | + uint8_t reserved[40];
256 | + uint8_t mac[16];
257 | +};
258 | +
259 | +#define SGX_MODULUS_SIZE 384
260 | +
261 | +struct sgx_sigstruct_header {
262 | + uint64_t header1[2];
263 | + uint32_t vendor;
264 | + uint32_t date;
265 | + uint64_t header2[2];
266 | + uint32_t swdefined;
267 | + uint8_t reserved1[84];
268 | +};
269 | +
270 | +struct sgx_sigstruct_body {
271 | + uint32_t miscselect;
272 | + uint32_t miscmask;
273 | + uint8_t reserved2[20];
274 | + uint64_t attributes;
275 | + uint64_t xfrm;
276 | + uint8_t attributemask[16];
277 | + uint8_t mrenclave[32];
278 | + uint8_t reserved3[32];
279 | + uint16_t isvprodid;
280 | + uint16_t isvsvn;
281 | +} __attribute__((__packed__));
282 | +
283 | +struct sgx_sigstruct {
284 | + struct sgx_sigstruct_header header;
285 | + uint8_t modulus[SGX_MODULUS_SIZE];
286 | + uint32_t exponent;
287 | + uint8_t signature[SGX_MODULUS_SIZE];
288 | + struct sgx_sigstruct_body body;
289 | + uint8_t reserved4[12];
290 | + uint8_t q1[SGX_MODULUS_SIZE];
291 | + uint8_t q2[SGX_MODULUS_SIZE];
292 | +};
293 | +
294 | +struct sgx_sigstruct_payload {
295 | + struct sgx_sigstruct_header header;
296 | + struct sgx_sigstruct_body body;
297 | +};
298 | +
299 | +struct sgx_einittoken_payload {
300 | + uint32_t valid;
301 | + uint32_t reserved1[11];
302 | + uint64_t attributes;
303 | + uint64_t xfrm;
304 | + uint8_t mrenclave[32];
305 | + uint8_t reserved2[32];
306 | + uint8_t mrsigner[32];
307 | + uint8_t reserved3[32];
308 | +};
309 | +
310 | +struct sgx_einittoken {
311 | + struct sgx_einittoken_payload payload;
312 | + uint8_t cpusvnle[16];
313 | + uint16_t isvprodidle;
314 | + uint16_t isvsvnle;
315 | + uint8_t reserved2[24];
316 | + uint32_t maskedmiscselectle;
317 | + uint64_t maskedattributesle;
318 | + uint64_t maskedxfrmle;
319 | + uint8_t keyid[32];
320 | + uint8_t mac[16];
321 | +};
322 | +
323 | +struct sgx_report {
324 | + uint8_t cpusvn[16];
325 | + uint32_t miscselect;
326 | + uint8_t reserved1[28];
327 | + uint64_t attributes;
328 | + uint64_t xfrm;
329 | + uint8_t mrenclave[32];
330 | + uint8_t reserved2[32];
331 | + uint8_t mrsigner[32];
332 | + uint8_t reserved3[96];
333 | + uint16_t isvprodid;
334 | + uint16_t isvsvn;
335 | + uint8_t reserved4[60];
336 | + uint8_t reportdata[64];
337 | + uint8_t keyid[32];
338 | + uint8_t mac[16];
339 | +};
340 | +
341 | +struct sgx_targetinfo {
342 | + uint8_t mrenclave[32];
343 | + uint64_t attributes;
344 | + uint64_t xfrm;
345 | + uint8_t reserved1[4];
346 | + uint32_t miscselect;
347 | + uint8_t reserved2[456];
348 | +};
349 | +
350 | +struct sgx_keyrequest {
351 | + uint16_t keyname;
352 | + uint16_t keypolicy;
353 | + uint16_t isvsvn;
354 | + uint16_t reserved1;
355 | + uint8_t cpusvn[16];
356 | + uint64_t attributemask;
357 | + uint64_t xfrmmask;
358 | + uint8_t keyid[32];
359 | + uint32_t miscmask;
360 | + uint8_t reserved2[436];
361 | +};
362 | +
363 | +#endif /* _ASM_X86_SGX_ARCH_H */
364 | diff --git a/drivers/platform/x86/intel_sgx/sgx_asm.h b/drivers/platform/x86/intel_sgx/sgx_asm.h
365 | new file mode 100644
366 | index 0000000..b786f34
367 | --- /dev/null
368 | +++ b/drivers/platform/x86/intel_sgx/sgx_asm.h
369 | @@ -0,0 +1,233 @@
370 | +/*
371 | + * This file is provided under a dual BSD/GPLv2 license. When using or
372 | + * redistributing this file, you may do so under either license.
373 | + *
374 | + * GPL LICENSE SUMMARY
375 | + *
376 | + * Copyright(c) 2016-2017 Intel Corporation.
377 | + *
378 | + * This program is free software; you can redistribute it and/or modify
379 | + * it under the terms of version 2 of the GNU General Public License as
380 | + * published by the Free Software Foundation.
381 | + *
382 | + * This program is distributed in the hope that it will be useful, but
383 | + * WITHOUT ANY WARRANTY; without even the implied warranty of
384 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
385 | + * General Public License for more details.
386 | + *
387 | + * Contact Information:
388 | + * Jarkko Sakkinen
389 | + * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
390 | + *
391 | + * BSD LICENSE
392 | + *
393 | + * Copyright(c) 2016-2017 Intel Corporation.
394 | + *
395 | + * Redistribution and use in source and binary forms, with or without
396 | + * modification, are permitted provided that the following conditions
397 | + * are met:
398 | + *
399 | + * * Redistributions of source code must retain the above copyright
400 | + * notice, this list of conditions and the following disclaimer.
401 | + * * Redistributions in binary form must reproduce the above copyright
402 | + * notice, this list of conditions and the following disclaimer in
403 | + * the documentation and/or other materials provided with the
404 | + * distribution.
405 | + * * Neither the name of Intel Corporation nor the names of its
406 | + * contributors may be used to endorse or promote products derived
407 | + * from this software without specific prior written permission.
408 | + *
409 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
410 | + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
411 | + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
412 | + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
413 | + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
414 | + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
415 | + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
416 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
417 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
418 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
419 | + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
420 | + *
421 | + * Authors:
422 | + *
423 | + * Jarkko Sakkinen
424 | + * Suresh Siddha
425 | + */
426 | +
427 | +#ifndef _ASM_X86_SGX_H
428 | +#define _ASM_X86_SGX_H
429 | +
430 | +#include "sgx_arch.h"
431 | +#include
432 | +#include
433 | +#include
434 | +#include
435 | +
436 | +#define SGX_CPUID 0x12
437 | +
438 | +enum sgx_cpuid {
439 | + SGX_CPUID_CAPABILITIES = 0,
440 | + SGX_CPUID_ATTRIBUTES = 1,
441 | + SGX_CPUID_EPC_BANKS = 2,
442 | +};
443 | +
444 | +enum sgx_commands {
445 | + ECREATE = 0x0,
446 | + EADD = 0x1,
447 | + EINIT = 0x2,
448 | + EREMOVE = 0x3,
449 | + EDGBRD = 0x4,
450 | + EDGBWR = 0x5,
451 | + EEXTEND = 0x6,
452 | + ELDU = 0x8,
453 | + EBLOCK = 0x9,
454 | + EPA = 0xA,
455 | + EWB = 0xB,
456 | + ETRACK = 0xC,
457 | + EAUG = 0xD,
458 | + EMODPR = 0xE,
459 | + EMODT = 0xF,
460 | +};
461 | +
462 | +#ifdef CONFIG_X86_64
463 | +#define XAX "%%rax"
464 | +#else
465 | +#define XAX "%%eax"
466 | +#endif
467 | +
468 | +#define __encls_ret(rax, rbx, rcx, rdx) \
469 | + ({ \
470 | + int ret; \
471 | + asm volatile( \
472 | + "1: .byte 0x0f, 0x01, 0xcf;\n\t" \
473 | + "2:\n" \
474 | + ".section .fixup,\"ax\"\n" \
475 | + "3: mov $-14,"XAX"\n" \
476 | + " jmp 2b\n" \
477 | + ".previous\n" \
478 | + _ASM_EXTABLE(1b, 3b) \
479 | + : "=a"(ret) \
480 | + : "a"(rax), "b"(rbx), "c"(rcx), "d"(rdx) \
481 | + : "memory"); \
482 | + ret; \
483 | + })
484 | +
485 | +#define __encls(rax, rbx, rcx, rdx...) \
486 | + ({ \
487 | + int ret; \
488 | + asm volatile( \
489 | + "1: .byte 0x0f, 0x01, 0xcf;\n\t" \
490 | + " xor "XAX","XAX"\n" \
491 | + "2:\n" \
492 | + ".section .fixup,\"ax\"\n" \
493 | + "3: mov $-14,"XAX"\n" \
494 | + " jmp 2b\n" \
495 | + ".previous\n" \
496 | + _ASM_EXTABLE(1b, 3b) \
497 | + : "=a"(ret), "=b"(rbx), "=c"(rcx) \
498 | + : "a"(rax), "b"(rbx), "c"(rcx), rdx \
499 | + : "memory"); \
500 | + ret; \
501 | + })
502 | +
503 | +static inline unsigned long __ecreate(struct sgx_pageinfo *pginfo, void *secs)
504 | +{
505 | + return __encls(ECREATE, pginfo, secs, "d"(0));
506 | +}
507 | +
508 | +static inline int __eextend(void *secs, void *epc)
509 | +{
510 | + return __encls(EEXTEND, secs, epc, "d"(0));
511 | +}
512 | +
513 | +static inline int __eadd(struct sgx_pageinfo *pginfo, void *epc)
514 | +{
515 | + return __encls(EADD, pginfo, epc, "d"(0));
516 | +}
517 | +
518 | +static inline int __einit(void *sigstruct, struct sgx_einittoken *einittoken,
519 | + void *secs)
520 | +{
521 | + return __encls_ret(EINIT, sigstruct, secs, einittoken);
522 | +}
523 | +
524 | +static inline int __eremove(void *epc)
525 | +{
526 | + unsigned long rbx = 0;
527 | + unsigned long rdx = 0;
528 | +
529 | + return __encls_ret(EREMOVE, rbx, epc, rdx);
530 | +}
531 | +
532 | +static inline int __edbgwr(void *epc, unsigned long *data)
533 | +{
534 | + return __encls(EDGBWR, *data, epc, "d"(0));
535 | +}
536 | +
537 | +static inline int __edbgrd(void *epc, unsigned long *data)
538 | +{
539 | + unsigned long rbx = 0;
540 | + int ret;
541 | +
542 | + ret = __encls(EDGBRD, rbx, epc, "d"(0));
543 | + if (!ret)
544 | + *(unsigned long *) data = rbx;
545 | +
546 | + return ret;
547 | +}
548 | +
549 | +static inline int __etrack(void *epc)
550 | +{
551 | + unsigned long rbx = 0;
552 | + unsigned long rdx = 0;
553 | +
554 | + return __encls_ret(ETRACK, rbx, epc, rdx);
555 | +}
556 | +
557 | +static inline int __eldu(unsigned long rbx, unsigned long rcx,
558 | + unsigned long rdx)
559 | +{
560 | + return __encls_ret(ELDU, rbx, rcx, rdx);
561 | +}
562 | +
563 | +static inline int __eblock(unsigned long rcx)
564 | +{
565 | + unsigned long rbx = 0;
566 | + unsigned long rdx = 0;
567 | +
568 | + return __encls_ret(EBLOCK, rbx, rcx, rdx);
569 | +}
570 | +
571 | +static inline int __epa(void *epc)
572 | +{
573 | + unsigned long rbx = SGX_PAGE_TYPE_VA;
574 | +
575 | + return __encls(EPA, rbx, epc, "d"(0));
576 | +}
577 | +
578 | +static inline int __ewb(struct sgx_pageinfo *pginfo, void *epc, void *va)
579 | +{
580 | + return __encls_ret(EWB, pginfo, epc, va);
581 | +}
582 | +
583 | +static inline int __eaug(struct sgx_pageinfo *pginfo, void *epc)
584 | +{
585 | + return __encls(EAUG, pginfo, epc, "d"(0));
586 | +}
587 | +
588 | +static inline int __emodpr(struct sgx_secinfo *secinfo, void *epc)
589 | +{
590 | + unsigned long rdx = 0;
591 | +
592 | + return __encls_ret(EMODPR, secinfo, epc, rdx);
593 | +}
594 | +
595 | +static inline int __emodt(struct sgx_secinfo *secinfo, void *epc)
596 | +{
597 | + unsigned long rdx = 0;
598 | +
599 | + return __encls_ret(EMODT, secinfo, epc, rdx);
600 | +}
601 | +
602 | +#endif /* _ASM_X86_SGX_H */
603 | diff --git a/drivers/platform/x86/intel_sgx/sgx_encl.c b/drivers/platform/x86/intel_sgx/sgx_encl.c
604 | index 6f69126..2669509 100644
605 | --- a/drivers/platform/x86/intel_sgx/sgx_encl.c
606 | +++ b/drivers/platform/x86/intel_sgx/sgx_encl.c
607 | @@ -64,7 +64,12 @@
608 | #include
609 | #include
610 | #include
611 | -#include
612 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
613 | + #include
614 | +#else
615 | + #include
616 | +#endif
617 | +#include "linux/file.h"
618 | #include
619 | #include
620 | #include
621 | @@ -576,7 +581,7 @@ int sgx_encl_create(struct sgx_secs *secs)
622 | long ret;
623 |
624 | encl = sgx_encl_alloc(secs);
625 | - if (IS_ERR(secs))
626 | + if (IS_ERR(encl))
627 | return PTR_ERR(encl);
628 |
629 | secs_epc = sgx_alloc_page(0);
630 | @@ -634,8 +639,8 @@ int sgx_encl_create(struct sgx_secs *secs)
631 | }
632 |
633 | if (vma->vm_start != secs->base ||
634 | - vma->vm_end != (secs->base + secs->size) ||
635 | - vma->vm_pgoff != 0) {
636 | + vma->vm_end != (secs->base + secs->size)
637 | + /* vma->vm_pgoff != 0 */) {
638 | ret = -EINVAL;
639 | up_read(¤t->mm->mmap_sem);
640 | goto out;
641 | diff --git a/drivers/platform/x86/intel_sgx/sgx_ioctl.c b/drivers/platform/x86/intel_sgx/sgx_ioctl.c
642 | index af8b6b6..f7540fc 100644
643 | --- a/drivers/platform/x86/intel_sgx/sgx_ioctl.c
644 | +++ b/drivers/platform/x86/intel_sgx/sgx_ioctl.c
645 | @@ -64,7 +64,11 @@
646 | #include
647 | #include
648 | #include
649 | -#include
650 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
651 | + #include
652 | +#else
653 | + #include
654 | +#endif
655 | #include
656 | #include
657 | #include
658 | diff --git a/drivers/platform/x86/intel_sgx/sgx_main.c b/drivers/platform/x86/intel_sgx/sgx_main.c
659 | index dc50410..7ff3864 100644
660 | --- a/drivers/platform/x86/intel_sgx/sgx_main.c
661 | +++ b/drivers/platform/x86/intel_sgx/sgx_main.c
662 | @@ -58,16 +58,17 @@
663 | * Sean Christopherson
664 | */
665 |
666 | +#include "asm/msr-index.h"
667 | #include "sgx.h"
668 | #include
669 | #include
670 | #include
671 | +#include
672 | #include
673 | #include
674 | #include
675 | #include
676 | #include
677 | -#include
678 |
679 | #define DRV_DESCRIPTION "Intel SGX Driver"
680 | #define DRV_VERSION "0.10"
681 | @@ -75,6 +76,11 @@
682 | MODULE_DESCRIPTION(DRV_DESCRIPTION);
683 | MODULE_AUTHOR("Jarkko Sakkinen ");
684 | MODULE_VERSION(DRV_VERSION);
685 | +#ifndef X86_FEATURE_SGX
686 | + #define X86_FEATURE_SGX (9 * 32 + 2)
687 | +#endif
688 | +
689 | +#define FEATURE_CONTROL_SGX_ENABLE (1<<18)
690 |
691 | /*
692 | * Global data.
693 | @@ -152,6 +158,13 @@ static const struct file_operations sgx_fops = {
694 | .get_unmapped_area = sgx_get_unmapped_area,
695 | };
696 |
697 | +static struct miscdevice sgx_dev = {
698 | + .minor = MISC_DYNAMIC_MINOR,
699 | + .name = "isgx",
700 | + .fops = &sgx_fops,
701 | + .mode = 0666,
702 | +};
703 | +
704 | static int sgx_pm_suspend(struct device *dev)
705 | {
706 | struct sgx_tgid_ctx *ctx;
707 | @@ -170,71 +183,8 @@ static int sgx_pm_suspend(struct device *dev)
708 |
709 | static SIMPLE_DEV_PM_OPS(sgx_drv_pm, sgx_pm_suspend, NULL);
710 |
711 | -static struct bus_type sgx_bus_type = {
712 | - .name = "sgx",
713 | -};
714 | -
715 | -struct sgx_context {
716 | - struct device dev;
717 | - struct cdev cdev;
718 | -};
719 | -
720 | -static dev_t sgx_devt;
721 | -
722 | -static void sgx_dev_release(struct device *dev)
723 | -{
724 | - struct sgx_context *ctx = container_of(dev, struct sgx_context, dev);
725 | -
726 | - kfree(ctx);
727 | -}
728 | -
729 | -static struct sgx_context *sgx_ctx_alloc(struct device *parent)
730 | -{
731 | - struct sgx_context *ctx;
732 | -
733 | - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
734 | - if (!ctx)
735 | - return ERR_PTR(-ENOMEM);
736 | -
737 | - device_initialize(&ctx->dev);
738 | -
739 | - ctx->dev.bus = &sgx_bus_type;
740 | - ctx->dev.parent = parent;
741 | - ctx->dev.devt = MKDEV(MAJOR(sgx_devt), 0);
742 | - ctx->dev.release = sgx_dev_release;
743 | -
744 | - dev_set_name(&ctx->dev, "sgx");
745 | -
746 | - cdev_init(&ctx->cdev, &sgx_fops);
747 | - ctx->cdev.owner = THIS_MODULE;
748 | -
749 | - dev_set_drvdata(parent, ctx);
750 | -
751 | - return ctx;
752 | -}
753 | -
754 | -static struct sgx_context *sgxm_ctx_alloc(struct device *parent)
755 | -{
756 | - struct sgx_context *ctx;
757 | - int rc;
758 | -
759 | - ctx = sgx_ctx_alloc(parent);
760 | - if (IS_ERR(ctx))
761 | - return ctx;
762 | -
763 | - rc = devm_add_action_or_reset(parent, (void (*)(void *))put_device,
764 | - &ctx->dev);
765 | - if (rc) {
766 | - kfree(ctx);
767 | - return ERR_PTR(rc);
768 | - }
769 | -
770 | - return ctx;
771 | -}
772 | -
773 | static int sgx_dev_init(struct device *parent)
774 | {
775 | - struct sgx_context *sgx_dev;
776 | unsigned int eax, ebx, ecx, edx;
777 | unsigned long pa;
778 | unsigned long size;
779 | @@ -243,8 +193,6 @@ static int sgx_dev_init(struct device *parent)
780 |
781 | pr_info("intel_sgx: " DRV_DESCRIPTION " v" DRV_VERSION "\n");
782 |
783 | - sgx_dev = sgxm_ctx_alloc(parent);
784 | -
785 | cpuid_count(SGX_CPUID, SGX_CPUID_CAPABILITIES, &eax, &ebx, &ecx, &edx);
786 | /* Only allow misc bits supported by the driver. */
787 | sgx_misc_reserved = ~ebx | SGX_MISC_RESERVED_MASK;
788 | @@ -313,7 +261,13 @@ static int sgx_dev_init(struct device *parent)
789 | goto out_iounmap;
790 | }
791 |
792 | - ret = cdev_device_add(&sgx_dev->cdev, &sgx_dev->dev);
793 | + sgx_dev.parent = parent;
794 | + ret = misc_register(&sgx_dev);
795 | + if (ret) {
796 | + pr_err("intel_sgx: misc_register() failed\n");
797 | + goto out_workqueue;
798 | + }
799 | +
800 | if (ret)
801 | goto out_workqueue;
802 |
803 | @@ -328,10 +282,16 @@ static int sgx_dev_init(struct device *parent)
804 | return ret;
805 | }
806 |
807 | +static atomic_t sgx_init_flag = ATOMIC_INIT(0);
808 | static int sgx_drv_probe(struct platform_device *pdev)
809 | {
810 | unsigned int eax, ebx, ecx, edx;
811 | unsigned long fc;
812 | + if (atomic_cmpxchg(&sgx_init_flag, 0, 1)) {
813 | + pr_warn("intel_sgx: second initialization call skipped\n");
814 | + return 0;
815 | + }
816 | +
817 |
818 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
819 | return -ENODEV;
820 | @@ -370,11 +330,14 @@ static int sgx_drv_probe(struct platform_device *pdev)
821 |
822 | static int sgx_drv_remove(struct platform_device *pdev)
823 | {
824 | - struct device *parent = &pdev->dev;
825 | - struct sgx_context *ctx = dev_get_drvdata(parent);
826 | int i;
827 |
828 | - cdev_device_del(&ctx->cdev, &ctx->dev);
829 | + if (!atomic_cmpxchg(&sgx_init_flag, 1, 0)) {
830 | + pr_warn("intel_sgx: second release call skipped\n");
831 | + return 0;
832 | + }
833 | +
834 | + misc_deregister(&sgx_dev);
835 |
836 | destroy_workqueue(sgx_add_page_wq);
837 | #ifdef CONFIG_X86_64
838 | @@ -404,48 +367,24 @@ static struct platform_driver sgx_drv = {
839 | },
840 | };
841 |
842 | -static int __init sgx_drv_subsys_init(void)
843 | +static struct platform_device *pdev;
844 | +int init_sgx_module(void)
845 | {
846 | - int ret;
847 | -
848 | - ret = bus_register(&sgx_bus_type);
849 | - if (ret)
850 | - return ret;
851 | -
852 | - ret = alloc_chrdev_region(&sgx_devt, 0, 1, "sgx");
853 | - if (ret < 0) {
854 | - bus_unregister(&sgx_bus_type);
855 | - return ret;
856 | - }
857 | -
858 | + platform_driver_register(&sgx_drv);
859 | + pdev = platform_device_register_simple("intel_sgx", 0, NULL, 0);
860 | + if (IS_ERR(pdev))
861 | + pr_err("platform_device_register_simple failed\n");
862 | return 0;
863 | }
864 |
865 | -static void sgx_drv_subsys_exit(void)
866 | -{
867 | - bus_unregister(&sgx_bus_type);
868 | - unregister_chrdev_region(sgx_devt, 1);
869 | -}
870 | -
871 | -static int __init sgx_drv_init(void)
872 | -{
873 | - int ret;
874 | -
875 | - ret = sgx_drv_subsys_init();
876 | -
877 | - ret = platform_driver_register(&sgx_drv);
878 | - if (ret)
879 | - sgx_drv_subsys_exit();
880 | -
881 | - return ret;
882 | -}
883 | -module_init(sgx_drv_init);
884 | -
885 | -static void __exit sgx_drv_exit(void)
886 | +void cleanup_sgx_module(void)
887 | {
888 | + dev_set_uevent_suppress(&pdev->dev, true);
889 | + platform_device_unregister(pdev);
890 | platform_driver_unregister(&sgx_drv);
891 | - sgx_drv_subsys_exit();
892 | }
893 | -module_exit(sgx_drv_exit);
894 | +
895 | +module_init(init_sgx_module);
896 | +module_exit(cleanup_sgx_module);
897 |
898 | MODULE_LICENSE("Dual BSD/GPL");
899 | diff --git a/drivers/platform/x86/intel_sgx/sgx_page_cache.c b/drivers/platform/x86/intel_sgx/sgx_page_cache.c
900 | index f8883d2..8472037 100644
901 | --- a/drivers/platform/x86/intel_sgx/sgx_page_cache.c
902 | +++ b/drivers/platform/x86/intel_sgx/sgx_page_cache.c
903 | @@ -63,7 +63,11 @@
904 | #include
905 | #include
906 | #include
907 | -#include
908 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
909 | + #include
910 | +#else
911 | + #include
912 | +#endif
913 | #include
914 |
915 | #define SGX_NR_LOW_EPC_PAGES_DEFAULT 32
916 | diff --git a/drivers/platform/x86/intel_sgx/sgx_user.h b/drivers/platform/x86/intel_sgx/sgx_user.h
917 | new file mode 100644
918 | index 0000000..a15f87b
919 | --- /dev/null
920 | +++ b/drivers/platform/x86/intel_sgx/sgx_user.h
921 | @@ -0,0 +1,139 @@
922 | +/*
923 | + * This file is provided under a dual BSD/GPLv2 license. When using or
924 | + * redistributing this file, you may do so under either license.
925 | + *
926 | + * GPL LICENSE SUMMARY
927 | + *
928 | + * Copyright(c) 2016 Intel Corporation.
929 | + *
930 | + * This program is free software; you can redistribute it and/or modify
931 | + * it under the terms of version 2 of the GNU General Public License as
932 | + * published by the Free Software Foundation.
933 | + *
934 | + * This program is distributed in the hope that it will be useful, but
935 | + * WITHOUT ANY WARRANTY; without even the implied warranty of
936 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
937 | + * General Public License for more details.
938 | + *
939 | + * Contact Information:
940 | + * Jarkko Sakkinen
941 | + * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
942 | + *
943 | + * BSD LICENSE
944 | + *
945 | + * Copyright(c) 2016 Intel Corporation.
946 | + *
947 | + * Redistribution and use in source and binary forms, with or without
948 | + * modification, are permitted provided that the following conditions
949 | + * are met:
950 | + *
951 | + * * Redistributions of source code must retain the above copyright
952 | + * notice, this list of conditions and the following disclaimer.
953 | + * * Redistributions in binary form must reproduce the above copyright
954 | + * notice, this list of conditions and the following disclaimer in
955 | + * the documentation and/or other materials provided with the
956 | + * distribution.
957 | + * * Neither the name of Intel Corporation nor the names of its
958 | + * contributors may be used to endorse or promote products derived
959 | + * from this software without specific prior written permission.
960 | + *
961 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
962 | + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
963 | + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
964 | + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
965 | + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
966 | + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
967 | + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
968 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
969 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
970 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
971 | + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
972 | + *
973 | + * Authors:
974 | + *
975 | + * Jarkko Sakkinen
976 | + * Suresh Siddha
977 | + */
978 | +
979 | +#ifndef _UAPI_ASM_X86_SGX_H
980 | +#define _UAPI_ASM_X86_SGX_H
981 | +
982 | +#include
983 | +#include
984 | +
985 | +#define SGX_MAGIC 0xA4
986 | +
987 | +#define SGX_IOC_ENCLAVE_CREATE \
988 | + _IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
989 | +#define SGX_IOC_ENCLAVE_ADD_PAGE \
990 | + _IOW(SGX_MAGIC, 0x01, struct sgx_enclave_add_page)
991 | +#define SGX_IOC_ENCLAVE_INIT \
992 | + _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
993 | +
994 | +/* SGX leaf instruction return values */
995 | +#define SGX_SUCCESS 0
996 | +#define SGX_INVALID_SIG_STRUCT 1
997 | +#define SGX_INVALID_ATTRIBUTE 2
998 | +#define SGX_BLKSTATE 3
999 | +#define SGX_INVALID_MEASUREMENT 4
1000 | +#define SGX_NOTBLOCKABLE 5
1001 | +#define SGX_PG_INVLD 6
1002 | +#define SGX_LOCKFAIL 7
1003 | +#define SGX_INVALID_SIGNATURE 8
1004 | +#define SGX_MAC_COMPARE_FAIL 9
1005 | +#define SGX_PAGE_NOT_BLOCKED 10
1006 | +#define SGX_NOT_TRACKED 11
1007 | +#define SGX_VA_SLOT_OCCUPIED 12
1008 | +#define SGX_CHILD_PRESENT 13
1009 | +#define SGX_ENCLAVE_ACT 14
1010 | +#define SGX_ENTRYEPOCH_LOCKED 15
1011 | +#define SGX_INVALID_EINITTOKEN 16
1012 | +#define SGX_PREV_TRK_INCMPL 17
1013 | +#define SGX_PG_IS_SECS 18
1014 | +#define SGX_INVALID_CPUSVN 32
1015 | +#define SGX_INVALID_ISVSVN 64
1016 | +#define SGX_UNMASKED_EVENT 128
1017 | +#define SGX_INVALID_KEYNAME 256
1018 | +
1019 | +/* IOCTL return values */
1020 | +#define SGX_POWER_LOST_ENCLAVE 0x40000000
1021 | +#define SGX_LE_ROLLBACK 0x40000001
1022 | +
1023 | +/**
1024 | + * struct sgx_enclave_create - parameter structure for the
1025 | + * %SGX_IOC_ENCLAVE_CREATE ioctl
1026 | + * @src: address for the SECS page data
1027 | + */
1028 | +struct sgx_enclave_create {
1029 | + __u64 src;
1030 | +} __attribute__((__packed__));
1031 | +
1032 | +/**
1033 | + * struct sgx_enclave_add_page - parameter structure for the
1034 | + * %SGX_IOC_ENCLAVE_ADD_PAGE ioctl
1035 | + * @addr: address in the ELRANGE
1036 | + * @src: address for the page data
1037 | + * @secinfo: address for the SECINFO data
1038 | + * @mrmask: bitmask for the 256 byte chunks that are to be measured
1039 | + */
1040 | +struct sgx_enclave_add_page {
1041 | + __u64 addr;
1042 | + __u64 src;
1043 | + __u64 secinfo;
1044 | + __u16 mrmask;
1045 | +} __attribute__((__packed__));
1046 | +
1047 | +/**
1048 | + * struct sgx_enclave_init - parameter structure for the
1049 | + * %SGX_IOC_ENCLAVE_INIT ioctl
1050 | + * @addr: address in the ELRANGE
1051 | + * @sigstruct: address for the page data
1052 | + * @einittoken: EINITTOKEN
1053 | + */
1054 | +struct sgx_enclave_init {
1055 | + __u64 addr;
1056 | + __u64 sigstruct;
1057 | + __u64 einittoken;
1058 | +} __attribute__((__packed__));
1059 | +
1060 | +#endif /* _UAPI_ASM_X86_SGX_H */
1061 | diff --git a/drivers/platform/x86/intel_sgx/sgx_util.c b/drivers/platform/x86/intel_sgx/sgx_util.c
1062 | index 6ef7949..ff0e40a 100644
1063 | --- a/drivers/platform/x86/intel_sgx/sgx_util.c
1064 | +++ b/drivers/platform/x86/intel_sgx/sgx_util.c
1065 | @@ -61,7 +61,11 @@
1066 | #include "sgx.h"
1067 | #include
1068 | #include
1069 | -#include
1070 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
1071 | + #include
1072 | +#else
1073 | + #include
1074 | +#endif
1075 |
1076 | struct page *sgx_get_backing(struct sgx_encl *encl,
1077 | struct sgx_encl_page *entry,
1078 | diff --git a/drivers/platform/x86/intel_sgx/sgx_vma.c b/drivers/platform/x86/intel_sgx/sgx_vma.c
1079 | index 54b588f..dae9eb9 100644
1080 | --- a/drivers/platform/x86/intel_sgx/sgx_vma.c
1081 | +++ b/drivers/platform/x86/intel_sgx/sgx_vma.c
1082 | @@ -96,10 +96,21 @@ static void sgx_vma_close(struct vm_area_struct *vma)
1083 | kref_put(&encl->refcount, sgx_encl_release);
1084 | }
1085 |
1086 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
1087 | static int sgx_vma_fault(struct vm_fault *vmf)
1088 | {
1089 | struct vm_area_struct *vma = vmf->vma;
1090 | +#else
1091 | +static int sgx_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1092 | +{
1093 | +#endif
1094 | +
1095 | +
1096 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
1097 | unsigned long addr = (unsigned long)vmf->address;
1098 | +#else
1099 | + unsigned long addr = (unsigned long) vmf->virtual_address;
1100 | +#endif
1101 | struct sgx_encl_page *entry;
1102 |
1103 | entry = sgx_fault_page(vma, addr, 0);
1104 | --
1105 | 2.7.4
1106 |
1107 |
--------------------------------------------------------------------------------
/inker2ext/kernel_2_extern.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | # Should be run from git-hub sgx driver root directory.
3 | # Assumes in kernel sgx master branch code repo has been cloned
4 | #
5 | # Usage:
6 | # kernel_2_extern
7 | pa=`pwd`
8 |
9 | patchfile="$pa/inker2ext/internal-to-external-tree-changes.patch"
10 |
11 | if [ ! -f $file ]; then
12 | echo "Missing patch file: $file"
13 | echo "You should run the script from the out of tree driver repository root directory"
14 | exit
15 | fi
16 |
17 | cd $1
18 | git apply $patchfile
19 |
20 | cp *.c $pa
21 | cp *.h $pa
22 | cp Makefile $pa
23 |
24 | cd $pa
25 |
26 |
27 |
--------------------------------------------------------------------------------
/inker2ext/sgx2.patch:
--------------------------------------------------------------------------------
1 | From fc0cb151466bc6197506ab7e6dc5e3f516ca8d7a Mon Sep 17 00:00:00 2001
2 | From: Angie Chinchilla
3 | Date: Mon, 27 Nov 2017 00:15:00 -0500
4 | Subject: [PATCH] SGX 2.0 Implementation
5 |
6 | Rebased 2.0 patch applies to linux-sgx-driver:master
7 | commit-id 03435d33de0bcca6c5777f23ac161249b9158f1e
8 |
9 | Authors:
10 | Serge Ayoun
11 | Angie Chinchilla
12 | Shay Katz-zamir
13 | Cedric Xing
14 |
15 | Signed-off-by: Angie Chinchilla
16 | ---
17 | Makefile | 3 +-
18 | sgx.h | 29 +++-
19 | sgx_arch.h | 2 +
20 | sgx_encl.c | 30 ++--
21 | sgx_encl2.c | 446 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
22 | sgx_ioctl.c | 134 ++++++++++++++++-
23 | sgx_main.c | 5 +-
24 | sgx_page_cache.c | 2 +-
25 | sgx_user.h | 25 ++++
26 | sgx_util.c | 40 +++--
27 | sgx_vma.c | 10 +-
28 | 11 files changed, 698 insertions(+), 28 deletions(-)
29 | create mode 100644 sgx_encl2.c
30 |
31 | diff --git a/Makefile b/Makefile
32 | index 4b5edaf..c88f38c 100644
33 | --- a/Makefile
34 | +++ b/Makefile
35 | @@ -5,7 +5,8 @@ ifneq ($(KERNELRELEASE),)
36 | sgx_ioctl.o \
37 | sgx_vma.o \
38 | sgx_util.o\
39 | - sgx_encl.o
40 | + sgx_encl.o \
41 | + sgx_encl2.o
42 | obj-m += isgx.o
43 | else
44 | KDIR := /lib/modules/$(shell uname -r)/build
45 | diff --git a/sgx.h b/sgx.h
46 | index bfc24c0..46dfc0f 100644
47 | --- a/sgx.h
48 | +++ b/sgx.h
49 | @@ -69,12 +69,14 @@
50 | #include
51 | #include
52 | #include
53 | +#include
54 | #include "sgx_arch.h"
55 | #include "sgx_user.h"
56 |
57 | #define SGX_EINIT_SPIN_COUNT 20
58 | #define SGX_EINIT_SLEEP_COUNT 50
59 | #define SGX_EINIT_SLEEP_TIME 20
60 | +#define SGX_EDMM_SPIN_COUNT 20
61 |
62 | #define SGX_VA_SLOT_COUNT 512
63 |
64 | @@ -110,9 +112,21 @@ static inline void sgx_free_va_slot(struct sgx_va_page *page,
65 | clear_bit(offset >> 3, page->slots);
66 | }
67 |
68 | +static inline bool sgx_va_slots_empty(struct sgx_va_page *page)
69 | +{
70 | + int slot = find_first_bit(page->slots, SGX_VA_SLOT_COUNT);
71 | +
72 | + if (slot == SGX_VA_SLOT_COUNT)
73 | + return true;
74 | +
75 | + return false;
76 | +}
77 | +
78 | enum sgx_encl_page_flags {
79 | SGX_ENCL_PAGE_TCS = BIT(0),
80 | SGX_ENCL_PAGE_RESERVED = BIT(1),
81 | + SGX_ENCL_PAGE_TRIM = BIT(2),
82 | + SGX_ENCL_PAGE_ADDED = BIT(3),
83 | };
84 |
85 | struct sgx_encl_page {
86 | @@ -160,6 +174,7 @@ struct sgx_encl {
87 | struct sgx_tgid_ctx *tgid_ctx;
88 | struct list_head encl_list;
89 | struct mmu_notifier mmu_notifier;
90 | + unsigned int shadow_epoch;
91 | };
92 |
93 | struct sgx_epc_bank {
94 | @@ -178,6 +193,7 @@ extern u64 sgx_encl_size_max_64;
95 | extern u64 sgx_xfrm_mask;
96 | extern u32 sgx_misc_reserved;
97 | extern u32 sgx_xsave_size_tbl[64];
98 | +extern bool sgx_has_sgx2;
99 |
100 | extern const struct vm_operations_struct sgx_vm_ops;
101 |
102 | @@ -205,6 +221,8 @@ int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr, void *data,
103 | struct sgx_secinfo *secinfo, unsigned int mrmask);
104 | int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
105 | struct sgx_einittoken *einittoken);
106 | +struct sgx_encl_page *sgx_encl_augment(struct vm_area_struct *vma,
107 | + unsigned long addr, bool write);
108 | void sgx_encl_release(struct kref *ref);
109 |
110 | long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
111 | @@ -234,7 +252,8 @@ enum sgx_fault_flags {
112 |
113 | struct sgx_encl_page *sgx_fault_page(struct vm_area_struct *vma,
114 | unsigned long addr,
115 | - unsigned int flags);
116 | + unsigned int flags,
117 | + struct vm_fault *vmf);
118 |
119 |
120 | extern struct mutex sgx_tgid_ctx_mutex;
121 | @@ -249,6 +268,12 @@ void sgx_free_page(struct sgx_epc_page *entry, struct sgx_encl *encl);
122 | void *sgx_get_page(struct sgx_epc_page *entry);
123 | void sgx_put_page(void *epc_page_vaddr);
124 | void sgx_eblock(struct sgx_encl *encl, struct sgx_epc_page *epc_page);
125 | -void sgx_etrack(struct sgx_encl *encl);
126 | +void sgx_etrack(struct sgx_encl *encl, unsigned int epoch);
127 | +void sgx_ipi_cb(void *info);
128 | +int sgx_eldu(struct sgx_encl *encl, struct sgx_encl_page *encl_page,
129 | + struct sgx_epc_page *epc_page, bool is_secs);
130 | +long modify_range(struct sgx_range *rg, unsigned long flags);
131 | +int remove_page(struct sgx_encl *encl, unsigned long address, bool trim);
132 | +int sgx_get_encl(unsigned long addr, struct sgx_encl **encl);
133 |
134 | #endif /* __ARCH_X86_INTEL_SGX_H__ */
135 | diff --git a/sgx_arch.h b/sgx_arch.h
136 | index dcb620e..a609fd6 100644
137 | --- a/sgx_arch.h
138 | +++ b/sgx_arch.h
139 | @@ -139,6 +139,7 @@ enum sgx_page_type {
140 | SGX_PAGE_TYPE_TCS = 0x01,
141 | SGX_PAGE_TYPE_REG = 0x02,
142 | SGX_PAGE_TYPE_VA = 0x03,
143 | + SGX_PAGE_TYPE_TRIM = 0x04,
144 | };
145 |
146 | enum sgx_secinfo_flags {
147 | @@ -148,6 +149,7 @@ enum sgx_secinfo_flags {
148 | SGX_SECINFO_SECS = (SGX_PAGE_TYPE_SECS << 8),
149 | SGX_SECINFO_TCS = (SGX_PAGE_TYPE_TCS << 8),
150 | SGX_SECINFO_REG = (SGX_PAGE_TYPE_REG << 8),
151 | + SGX_SECINFO_TRIM = (SGX_PAGE_TYPE_TRIM << 8),
152 | };
153 |
154 | struct sgx_secinfo {
155 | diff --git a/sgx_encl.c b/sgx_encl.c
156 | index 2669509..ccab446 100644
157 | --- a/sgx_encl.c
158 | +++ b/sgx_encl.c
159 | @@ -279,6 +279,7 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req,
160 | encl_page->epc_page = epc_page;
161 | sgx_test_and_clear_young(encl_page, encl);
162 | list_add_tail(&epc_page->list, &encl->load_list);
163 | + encl_page->flags |= SGX_ENCL_PAGE_ADDED;
164 |
165 | return true;
166 | }
167 | @@ -431,8 +432,9 @@ static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
168 | .release = sgx_mmu_notifier_release,
169 | };
170 |
171 | -static int sgx_init_page(struct sgx_encl *encl, struct sgx_encl_page *entry,
172 | - unsigned long addr, unsigned int alloc_flags)
173 | +int sgx_init_page(struct sgx_encl *encl, struct sgx_encl_page *entry,
174 | + unsigned long addr, unsigned int alloc_flags,
175 | + struct sgx_epc_page **va_src, bool already_locked)
176 | {
177 | struct sgx_va_page *va_page;
178 | struct sgx_epc_page *epc_page = NULL;
179 | @@ -451,10 +453,15 @@ static int sgx_init_page(struct sgx_encl *encl, struct sgx_encl_page *entry,
180 | if (!va_page)
181 | return -ENOMEM;
182 |
183 | - epc_page = sgx_alloc_page(alloc_flags);
184 | - if (IS_ERR(epc_page)) {
185 | - kfree(va_page);
186 | - return PTR_ERR(epc_page);
187 | + if (va_src) {
188 | + epc_page = *va_src;
189 | + *va_src = NULL;
190 | + } else {
191 | + epc_page = sgx_alloc_page(alloc_flags);
192 | + if (IS_ERR(epc_page)) {
193 | + kfree(va_page);
194 | + return PTR_ERR(epc_page);
195 | + }
196 | }
197 |
198 | vaddr = sgx_get_page(epc_page);
199 | @@ -481,9 +488,11 @@ static int sgx_init_page(struct sgx_encl *encl, struct sgx_encl_page *entry,
200 | va_page->epc_page = epc_page;
201 | va_offset = sgx_alloc_va_slot(va_page);
202 |
203 | - mutex_lock(&encl->lock);
204 | + if (!already_locked)
205 | + mutex_lock(&encl->lock);
206 | list_add(&va_page->list, &encl->va_pages);
207 | - mutex_unlock(&encl->lock);
208 | + if (!already_locked)
209 | + mutex_unlock(&encl->lock);
210 | }
211 |
212 | entry->va_page = va_page;
213 | @@ -596,7 +605,8 @@ int sgx_encl_create(struct sgx_secs *secs)
214 | if (ret)
215 | goto out;
216 |
217 | - ret = sgx_init_page(encl, &encl->secs, encl->base + encl->size, 0);
218 | + ret = sgx_init_page(encl, &encl->secs, encl->base + encl->size, 0,
219 | + NULL, false);
220 | if (ret)
221 | goto out;
222 |
223 | @@ -766,7 +776,7 @@ static int __sgx_encl_add_page(struct sgx_encl *encl,
224 | return ret;
225 | }
226 |
227 | - ret = sgx_init_page(encl, encl_page, addr, 0);
228 | + ret = sgx_init_page(encl, encl_page, addr, 0, NULL, false);
229 | if (ret)
230 | return ret;
231 |
232 | diff --git a/sgx_encl2.c b/sgx_encl2.c
233 | new file mode 100644
234 | index 0000000..6a5556d
235 | --- /dev/null
236 | +++ b/sgx_encl2.c
237 | @@ -0,0 +1,446 @@
238 | +/*
239 | + * This file is provided under a dual BSD/GPLv2 license. When using or
240 | + * redistributing this file, you may do so under either license.
241 | + *
242 | + * GPL LICENSE SUMMARY
243 | + *
244 | + * Copyright(c) 2016-2017 Intel Corporation.
245 | + *
246 | + * This program is free software; you can redistribute it and/or modify
247 | + * it under the terms of version 2 of the GNU General Public License as
248 | + * published by the Free Software Foundation.
249 | + *
250 | + * This program is distributed in the hope that it will be useful, but
251 | + * WITHOUT ANY WARRANTY; without even the implied warranty of
252 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
253 | + * General Public License for more details.
254 | + *
255 | + * Contact Information:
256 | + * Jarkko Sakkinen
257 | + * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
258 | + *
259 | + * BSD LICENSE
260 | + *
261 | + * Copyright(c) 2016-2017 Intel Corporation.
262 | + *
263 | + * Redistribution and use in source and binary forms, with or without
264 | + * modification, are permitted provided that the following conditions
265 | + * are met:
266 | + *
267 | + * * Redistributions of source code must retain the above copyright
268 | + * notice, this list of conditions and the following disclaimer.
269 | + * * Redistributions in binary form must reproduce the above copyright
270 | + * notice, this list of conditions and the following disclaimer in
271 | + * the documentation and/or other materials provided with the
272 | + * distribution.
273 | + * * Neither the name of Intel Corporation nor the names of its
274 | + * contributors may be used to endorse or promote products derived
275 | + * from this software without specific prior written permission.
276 | + *
277 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
278 | + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
279 | + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
280 | + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
281 | + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
282 | + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
283 | + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
284 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
285 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
286 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
287 | + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
288 | + *
289 | + * Authors:
290 | + *
291 | + * Serge Ayoun
292 | + * Angie Chinchilla
293 | + * Shay Katz-zamir
294 | + * Cedric Xing
295 | + */
296 | +
297 | +#include "sgx.h"
298 | +#include
299 | +#include
300 | +
301 | +#define SGX_NR_MOD_CHUNK_PAGES 16
302 | +
303 | +int sgx_init_page(struct sgx_encl *encl, struct sgx_encl_page *entry,
304 | + unsigned long addr, unsigned int alloc_flags,
305 | + struct sgx_epc_page **va_src, bool already_locked);
306 | +/**
307 | + * sgx_encl_augment() - adds a page to an enclave
308 | + * @addr: virtual address where the page should be added
309 | + *
310 | + * the address is checked against the dynamic ranges defined for
311 | + * the enclave. If it matches one, a page is added at the
312 | + * corresponding location
313 | + *
314 | + * Note: Invoking function must already hold the encl->lock
315 | + */
316 | +struct sgx_encl_page *sgx_encl_augment(struct vm_area_struct *vma,
317 | + unsigned long addr,
318 | + bool write)
319 | +{
320 | + struct sgx_pageinfo pginfo;
321 | + struct sgx_epc_page *epc_page, *va_page = NULL;
322 | + struct sgx_epc_page *secs_epc_page = NULL;
323 | + struct sgx_encl_page *encl_page;
324 | + struct sgx_encl *encl = (struct sgx_encl *) vma->vm_private_data;
325 | + void *epc_va;
326 | + void *secs_va;
327 | + int ret = -EFAULT;
328 | +
329 | + if (!sgx_has_sgx2)
330 | + return ERR_PTR(-EFAULT);
331 | +
332 | + /* if vma area is not writable then we will not eaug */
333 | + if (unlikely(!(vma->vm_flags & VM_WRITE)))
334 | + return ERR_PTR(-EFAULT);
335 | +
336 | + addr &= ~(PAGE_SIZE-1);
337 | +
338 | + /* Note: Invoking function holds the encl->lock */
339 | +
340 | + epc_page = sgx_alloc_page(SGX_ALLOC_ATOMIC);
341 | + if (IS_ERR(epc_page)) {
342 | + return ERR_PTR(PTR_ERR(epc_page));
343 | + }
344 | +
345 | + va_page = sgx_alloc_page(SGX_ALLOC_ATOMIC);
346 | + if (IS_ERR(va_page)) {
347 | + sgx_free_page(epc_page, encl);
348 | + return ERR_PTR(PTR_ERR(va_page));
349 | + }
350 | +
351 | + encl_page = kzalloc(sizeof(struct sgx_encl_page), GFP_KERNEL);
352 | + if (!encl_page) {
353 | + sgx_free_page(epc_page, encl);
354 | + sgx_free_page(va_page, encl);
355 | + return ERR_PTR(-EFAULT);
356 | + }
357 | +
358 | + if (!(encl->flags & SGX_ENCL_INITIALIZED))
359 | + goto out;
360 | +
361 | + if (encl->flags & (SGX_ENCL_SUSPEND | SGX_ENCL_DEAD))
362 | + goto out;
363 | +
364 | + /*
365 | + if ((rg->rg_desc.flags & SGX_GROW_DOWN_FLAG) && !write)
366 | + goto out;
367 | + */
368 | +
369 | + /* Start the augmenting process */
370 | + ret = sgx_init_page(encl, encl_page, addr, 0, &va_page, true);
371 | + if (ret)
372 | + goto out;
373 | +
374 | + /* If SECS is evicted then reload it first */
375 | + /* Same steps as in sgx_do_fault */
376 | + if (encl->flags & SGX_ENCL_SECS_EVICTED) {
377 | + secs_epc_page = sgx_alloc_page(SGX_ALLOC_ATOMIC);
378 | + if (IS_ERR(secs_epc_page)) {
379 | + ret = PTR_ERR(secs_epc_page);
380 | + secs_epc_page = NULL;
381 | + goto out;
382 | + }
383 | +
384 | + ret = sgx_eldu(encl, &encl->secs, secs_epc_page, true);
385 | + if (ret)
386 | + goto out;
387 | +
388 | + encl->secs.epc_page = secs_epc_page;
389 | + encl->flags &= ~SGX_ENCL_SECS_EVICTED;
390 | +
391 | + /* Do not free */
392 | + secs_epc_page = NULL;
393 | + }
394 | +
395 | + secs_va = sgx_get_page(encl->secs.epc_page);
396 | + epc_va = sgx_get_page(epc_page);
397 | +
398 | + pginfo.srcpge = 0;
399 | + pginfo.secinfo = 0;
400 | + pginfo.linaddr = addr;
401 | + pginfo.secs = (unsigned long) secs_va;
402 | +
403 | + ret = __eaug(&pginfo, epc_va);
404 | + if (ret) {
405 | + pr_err("sgx: eaug failure with ret=%d\n", ret);
406 | + goto out;
407 | + }
408 | +
409 | + ret = vm_insert_pfn(vma, encl_page->addr, PFN_DOWN(epc_page->pa));
410 | + sgx_put_page(epc_va);
411 | + sgx_put_page(secs_va);
412 | + if (ret) {
413 | + pr_err("sgx: vm_insert_pfn failure with ret=%d\n", ret);
414 | + goto out;
415 | + }
416 | +
417 | + epc_page->encl_page = encl_page;
418 | + encl_page->epc_page = epc_page;
419 | + encl->secs_child_cnt++;
420 | +
421 | + ret = radix_tree_insert(&encl->page_tree, encl_page->addr >> PAGE_SHIFT,
422 | + encl_page);
423 | + if (ret) {
424 | + pr_err("sgx: radix_tree_insert failed with ret=%d\n", ret);
425 | + goto out;
426 | + }
427 | + sgx_test_and_clear_young(encl_page, encl);
428 | + list_add_tail(&encl_page->epc_page->list, &encl->load_list);
429 | + encl_page->flags |= SGX_ENCL_PAGE_ADDED;
430 | +
431 | + if (va_page)
432 | + sgx_free_page(va_page, encl);
433 | + if (secs_epc_page)
434 | + sgx_free_page(secs_epc_page, encl);
435 | +
436 | + /*
437 | + * Write operation corresponds to stack extension
438 | + * In this case the #PF is caused by a write operation,
439 | + * most probably a push.
440 | + * We return SIGBUS such that the OS invokes the enclave's exception
441 | + * handler which will execute eaccept.
442 | + */
443 | + if (write)
444 | + return ERR_PTR(-EFAULT);
445 | +
446 | + return encl_page;
447 | +
448 | +out:
449 | + if (encl_page->va_offset)
450 | + sgx_free_va_slot(encl_page->va_page, encl_page->va_offset);
451 | + sgx_free_page(epc_page, encl);
452 | + if (va_page)
453 | + sgx_free_page(va_page, encl);
454 | + kfree(encl_page);
455 | + if (secs_epc_page)
456 | + sgx_free_page(secs_epc_page, encl);
457 | +
458 | + if ((ret == -EBUSY)||(ret == -ERESTARTSYS))
459 | + return ERR_PTR(ret);
460 | +
461 | + return ERR_PTR(-EFAULT);
462 | +}
463 | +
464 | +static int isolate_range(struct sgx_encl *encl,
465 | + struct sgx_range *rg, struct list_head *list)
466 | +{
467 | + unsigned long address, end;
468 | + struct sgx_encl_page *encl_page;
469 | + struct vm_area_struct *vma;
470 | + int ret;
471 | +
472 | + address = rg->start_addr;
473 | + end = address + rg->nr_pages * PAGE_SIZE;
474 | +
475 | + ret = sgx_encl_find(encl->mm, address, &vma);
476 | + if (ret || encl != vma->vm_private_data)
477 | + return -EINVAL;
478 | +
479 | + for (; address < end; address += PAGE_SIZE) {
480 | + encl_page = ERR_PTR(-EBUSY);
481 | + while (encl_page == ERR_PTR(-EBUSY))
482 | + /* bring back page in case it was evicted */
483 | + encl_page = sgx_fault_page(vma, address,
484 | + SGX_FAULT_RESERVE, NULL);
485 | +
486 | + if (IS_ERR(encl_page)) {
487 | + sgx_err(encl, "sgx: No page found at address 0x%lx\n",
488 | + address);
489 | + return PTR_ERR(encl_page);
490 | + }
491 | +
492 | + /* We do not need the reserved bit anymore as page
493 | + * is removed from the load list
494 | + */
495 | + mutex_lock(&encl->lock);
496 | + list_move_tail(&encl_page->epc_page->list, list);
497 | + encl_page->flags &= ~SGX_ENCL_PAGE_RESERVED;
498 | + mutex_unlock(&encl->lock);
499 | + }
500 | +
501 | + return 0;
502 | +}
503 | +
504 | +static int __modify_range(struct sgx_encl *encl,
505 | + struct sgx_range *rg, struct sgx_secinfo *secinfo)
506 | +{
507 | + struct sgx_encl_page *encl_page;
508 | + struct sgx_epc_page *epc_page, *tmp;
509 | + LIST_HEAD(list);
510 | + bool emodt = secinfo->flags & (SGX_SECINFO_TRIM | SGX_SECINFO_TCS);
511 | + unsigned int epoch = 0;
512 | + void *epc_va;
513 | + int ret = 0, cnt, status = 0;
514 | +
515 | + ret = isolate_range(encl, rg, &list);
516 | + if (ret)
517 | + goto out;
518 | +
519 | + if (list_empty(&list))
520 | + goto out;
521 | +
522 | + /* EMODT / EMODPR */
523 | + list_for_each_entry_safe(epc_page, tmp, &list, list) {
524 | + encl_page = epc_page->encl_page;
525 | + if (!emodt && (encl_page->flags & SGX_ENCL_PAGE_TCS)) {
526 | + sgx_err(encl, "sgx: illegal request: page at\
527 | + address=0x%lx is a TCS, req flags=0x%llx\n",
528 | + encl_page->addr, secinfo->flags);
529 | + ret = -EINVAL;
530 | + continue;
531 | + }
532 | + mutex_lock(&encl->lock);
533 | + epc_va = sgx_get_page(epc_page);
534 | + status = SGX_LOCKFAIL;
535 | + cnt = 0;
536 | + while (SGX_LOCKFAIL == status && cnt < SGX_EDMM_SPIN_COUNT) {
537 | + if (emodt) {
538 | + status = __emodt(secinfo, epc_va);
539 | + if (!status)
540 | + encl_page->flags |= SGX_ENCL_PAGE_TCS;
541 | + } else
542 | + status = __emodpr(secinfo, epc_va);
543 | + cnt++;
544 | + }
545 | +
546 | + epoch = encl->shadow_epoch;
547 | + sgx_put_page(epc_va);
548 | + mutex_unlock(&encl->lock);
549 | +
550 | + if (status) {
551 | + sgx_err(encl, "sgx: Page at address=0x%lx \
552 | + can't be modified err=%d req flags=0x%llx\n",
553 | + encl_page->addr, status, secinfo->flags);
554 | + ret = (ret) ? ret : status;
555 | + } else {
556 | + if (SGX_SECINFO_TRIM == secinfo->flags)
557 | + encl_page->flags |= SGX_ENCL_PAGE_TRIM;
558 | + }
559 | + }
560 | +
561 | + /* ETRACK */
562 | + mutex_lock(&encl->lock);
563 | + sgx_etrack(encl, epoch);
564 | + mutex_unlock(&encl->lock);
565 | +
566 | + smp_call_function(sgx_ipi_cb, NULL, 1);
567 | +
568 | +out:
569 | + if (!list_empty(&list)) {
570 | + mutex_lock(&encl->lock);
571 | + list_splice(&list, &encl->load_list);
572 | + mutex_unlock(&encl->lock);
573 | + }
574 | +
575 | + return ret;
576 | +}
577 | +
578 | +long modify_range(struct sgx_range *rg, unsigned long flags)
579 | +{
580 | + struct sgx_encl *encl;
581 | + struct sgx_secinfo secinfo;
582 | + struct sgx_range _rg;
583 | + unsigned long end = rg->start_addr + rg->nr_pages * PAGE_SIZE;
584 | + int ret = 0;
585 | +
586 | + if (!sgx_has_sgx2)
587 | + return -ENOSYS;
588 | +
589 | + if (rg->start_addr & (PAGE_SIZE - 1))
590 | + return -EINVAL;
591 | +
592 | + if (!rg->nr_pages)
593 | + return -EINVAL;
594 | +
595 | + ret = sgx_get_encl(rg->start_addr, &encl);
596 | + if (ret) {
597 | + pr_warn("sgx: No enclave found at start addr 0x%lx ret=%d\n",
598 | + rg->start_addr, ret);
599 | + return ret;
600 | + }
601 | +
602 | + if (end > encl->base + encl->size) {
603 | + ret = -EINVAL;
604 | + goto out;
605 | + }
606 | +
607 | + memset(&secinfo, 0, sizeof(secinfo));
608 | + secinfo.flags = flags;
609 | +
610 | + /*
611 | + * Modifying the range by chunks of 16 pages:
612 | + * these pages are removed from the load list. Bigger chunks
613 | + * may empty EPC load lists and stall SGX.
614 | + */
615 | + for (_rg.start_addr = rg->start_addr;
616 | + _rg.start_addr < end;
617 | + rg->nr_pages -= SGX_NR_MOD_CHUNK_PAGES,
618 | + _rg.start_addr += SGX_NR_MOD_CHUNK_PAGES*PAGE_SIZE) {
619 | + _rg.nr_pages = rg->nr_pages > 0x10 ? 0x10 : rg->nr_pages;
620 | + ret = __modify_range(encl, &_rg, &secinfo);
621 | + if (ret)
622 | + break;
623 | + }
624 | +
625 | +out:
626 | + kref_put(&encl->refcount, sgx_encl_release);
627 | + return ret;
628 | +}
629 | +
630 | +int remove_page(struct sgx_encl *encl, unsigned long address, bool trim)
631 | +{
632 | + struct sgx_encl_page *encl_page;
633 | + struct vm_area_struct *vma;
634 | + struct sgx_va_page *va_page;
635 | + int ret;
636 | +
637 | + ret = sgx_encl_find(encl->mm, address, &vma);
638 | + if (ret || encl != vma->vm_private_data)
639 | + return -EINVAL;
640 | +
641 | + encl_page = sgx_fault_page(vma, address, SGX_FAULT_RESERVE, NULL);
642 | + if (IS_ERR(encl_page))
643 | + return (PTR_ERR(encl_page) == -EBUSY) ? -EBUSY : -EINVAL;
644 | +
645 | + if (trim && !(encl_page->flags & SGX_ENCL_PAGE_TRIM)) {
646 | + encl_page->flags &= ~SGX_ENCL_PAGE_RESERVED;
647 | + return -EINVAL;
648 | + }
649 | +
650 | + if (!(encl_page->flags & SGX_ENCL_PAGE_ADDED)) {
651 | + encl_page->flags &= ~SGX_ENCL_PAGE_RESERVED;
652 | + return -EINVAL;
653 | + }
654 | +
655 | + mutex_lock(&encl->lock);
656 | +
657 | + radix_tree_delete(&encl->page_tree, encl_page->addr >> PAGE_SHIFT);
658 | + va_page = encl_page->va_page;
659 | +
660 | + if (va_page) {
661 | + sgx_free_va_slot(va_page, encl_page->va_offset);
662 | +
663 | + if (sgx_va_slots_empty(va_page)) {
664 | + list_del(&va_page->list);
665 | + sgx_free_page(va_page->epc_page, encl);
666 | + kfree(va_page);
667 | + }
668 | + }
669 | +
670 | + if (encl_page->epc_page) {
671 | + list_del(&encl_page->epc_page->list);
672 | + encl_page->epc_page->encl_page = NULL;
673 | + zap_vma_ptes(vma, encl_page->addr, PAGE_SIZE);
674 | + sgx_free_page(encl_page->epc_page, encl);
675 | + encl->secs_child_cnt--;
676 | + }
677 | +
678 | + mutex_unlock(&encl->lock);
679 | +
680 | + kfree(encl_page);
681 | +
682 | + return 0;
683 | +}
684 | diff --git a/sgx_ioctl.c b/sgx_ioctl.c
685 | index f7540fc..0b3476d 100644
686 | --- a/sgx_ioctl.c
687 | +++ b/sgx_ioctl.c
688 | @@ -73,7 +73,7 @@
689 | #include
690 | #include
691 |
692 | -static int sgx_get_encl(unsigned long addr, struct sgx_encl **encl)
693 | +int sgx_get_encl(unsigned long addr, struct sgx_encl **encl)
694 | {
695 | struct mm_struct *mm = current->mm;
696 | struct vm_area_struct *vma;
697 | @@ -251,6 +251,123 @@ out:
698 | return ret;
699 | }
700 |
701 | +long sgx_ioc_page_modpr(struct file *filep, unsigned int cmd,
702 | + unsigned long arg)
703 | +{
704 | + struct sgx_modification_param *p =
705 | + (struct sgx_modification_param *) arg;
706 | +
707 | + /*
708 | + * Only RWX flags in mask are allowed
709 | + * Restricting WR w/o RD is not allowed
710 | + */
711 | + if (p->flags & ~(SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X))
712 | + return -EINVAL;
713 | + if (!(p->flags & SGX_SECINFO_R) &&
714 | + (p->flags & SGX_SECINFO_W))
715 | + return -EINVAL;
716 | + return modify_range(&p->range, p->flags);
717 | +}
718 | +
719 | +/**
720 | + * sgx_ioc_page_to_tcs() - Pages defined in range are switched to TCS.
721 | + * These pages should be of type REG.
722 | + * eaccept needs to be invoked after return.
723 | + * @arg range address of pages to be switched
724 | + */
725 | +long sgx_ioc_page_to_tcs(struct file *filep, unsigned int cmd,
726 | + unsigned long arg)
727 | +{
728 | + return modify_range((struct sgx_range *)arg, SGX_SECINFO_TCS);
729 | +}
730 | +
731 | +/**
732 | + * sgx_ioc_trim_page() - Pages defined in range are being trimmed.
733 | + * These pages still belong to the enclave and can not be removed until
734 | + * eaccept has been invoked
735 | + * @arg range address of pages to be trimmed
736 | + */
737 | +long sgx_ioc_trim_page(struct file *filep, unsigned int cmd,
738 | + unsigned long arg)
739 | +{
740 | + return modify_range((struct sgx_range *)arg, SGX_SECINFO_TRIM);
741 | +}
742 | +
743 | +/**
744 | + * sgx_ioc_page_notify_accept() - Pages defined in range will be moved to
745 | + * the trimmed list, i.e. they can be freely removed from now. These pages
746 | + * should have PT_TRIM page type and should have been eaccepted priorly
747 | + * @arg range address of pages
748 | + */
749 | +long sgx_ioc_page_notify_accept(struct file *filep, unsigned int cmd,
750 | + unsigned long arg)
751 | +{
752 | + struct sgx_range *rg;
753 | + unsigned long address, end;
754 | + struct sgx_encl *encl;
755 | + int ret, tmp_ret = 0;
756 | +
757 | + if (!sgx_has_sgx2)
758 | + return -ENOSYS;
759 | +
760 | + rg = (struct sgx_range *)arg;
761 | +
762 | + address = rg->start_addr;
763 | + address &= ~(PAGE_SIZE-1);
764 | + end = address + rg->nr_pages * PAGE_SIZE;
765 | +
766 | + ret = sgx_get_encl(address, &encl);
767 | + if (ret) {
768 | + pr_warn("sgx: No enclave found at start address 0x%lx\n",
769 | + address);
770 | + return ret;
771 | + }
772 | +
773 | + for (; address < end; address += PAGE_SIZE) {
774 | + tmp_ret = remove_page(encl, address, true);
775 | + if (tmp_ret) {
776 | + sgx_dbg(encl, "sgx: remove failed, addr=0x%lx ret=%d\n",
777 | + address, tmp_ret);
778 | + ret = tmp_ret;
779 | + continue;
780 | + }
781 | + }
782 | +
783 | + kref_put(&encl->refcount, sgx_encl_release);
784 | +
785 | + return ret;
786 | +}
787 | +
788 | +/**
789 | + * sgx_ioc_page_remove() - Pages defined by address will be removed
790 | + * @arg address of page
791 | + */
792 | +long sgx_ioc_page_remove(struct file *filep, unsigned int cmd,
793 | + unsigned long arg)
794 | +{
795 | + struct sgx_encl *encl;
796 | + unsigned long address = *((unsigned long *) arg);
797 | + int ret;
798 | +
799 | + if (!sgx_has_sgx2)
800 | + return -ENOSYS;
801 | +
802 | + if (sgx_get_encl(address, &encl)) {
803 | + pr_warn("sgx: No enclave found at start address 0x%lx\n",
804 | + address);
805 | + return -EINVAL;
806 | + }
807 | +
808 | + ret = remove_page(encl, address, false);
809 | + if (ret) {
810 | + pr_warn("sgx: Failed to remove page, address=0x%lx ret=%d\n",
811 | + address, ret);
812 | + }
813 | +
814 | + kref_put(&encl->refcount, sgx_encl_release);
815 | + return ret;
816 | +}
817 | +
818 | typedef long (*sgx_ioc_t)(struct file *filep, unsigned int cmd,
819 | unsigned long arg);
820 |
821 | @@ -270,6 +387,21 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
822 | case SGX_IOC_ENCLAVE_INIT:
823 | handler = sgx_ioc_enclave_init;
824 | break;
825 | + case SGX_IOC_ENCLAVE_EMODPR:
826 | + handler = sgx_ioc_page_modpr;
827 | + break;
828 | + case SGX_IOC_ENCLAVE_MKTCS:
829 | + handler = sgx_ioc_page_to_tcs;
830 | + break;
831 | + case SGX_IOC_ENCLAVE_TRIM:
832 | + handler = sgx_ioc_trim_page;
833 | + break;
834 | + case SGX_IOC_ENCLAVE_NOTIFY_ACCEPT:
835 | + handler = sgx_ioc_page_notify_accept;
836 | + break;
837 | + case SGX_IOC_ENCLAVE_PAGE_REMOVE:
838 | + handler = sgx_ioc_page_remove;
839 | + break;
840 | default:
841 | return -ENOIOCTLCMD;
842 | }
843 | diff --git a/sgx_main.c b/sgx_main.c
844 | index 7ff3864..7b7c384 100644
845 | --- a/sgx_main.c
846 | +++ b/sgx_main.c
847 | @@ -71,7 +71,7 @@
848 | #include
849 |
850 | #define DRV_DESCRIPTION "Intel SGX Driver"
851 | -#define DRV_VERSION "0.10"
852 | +#define DRV_VERSION "0.11"
853 |
854 | MODULE_DESCRIPTION(DRV_DESCRIPTION);
855 | MODULE_AUTHOR("Jarkko Sakkinen ");
856 | @@ -95,6 +95,7 @@ u64 sgx_encl_size_max_64;
857 | u64 sgx_xfrm_mask = 0x3;
858 | u32 sgx_misc_reserved;
859 | u32 sgx_xsave_size_tbl[64];
860 | +bool sgx_has_sgx2;
861 |
862 | #ifdef CONFIG_COMPAT
863 | long sgx_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
864 | @@ -325,6 +326,8 @@ static int sgx_drv_probe(struct platform_device *pdev)
865 | return -ENODEV;
866 | }
867 |
868 | + sgx_has_sgx2 = (eax & 2) != 0;
869 | +
870 | return sgx_dev_init(&pdev->dev);
871 | }
872 |
873 | diff --git a/sgx_page_cache.c b/sgx_page_cache.c
874 | index 8472037..b4bc985 100644
875 | --- a/sgx_page_cache.c
876 | +++ b/sgx_page_cache.c
877 | @@ -335,7 +335,7 @@ static void sgx_write_pages(struct sgx_encl *encl, struct list_head *src)
878 | }
879 |
880 | /* ETRACK */
881 | - sgx_etrack(encl);
882 | + sgx_etrack(encl, encl->shadow_epoch);
883 |
884 | /* EWB */
885 | while (!list_empty(src)) {
886 | diff --git a/sgx_user.h b/sgx_user.h
887 | index a15f87b..50f0931 100644
888 | --- a/sgx_user.h
889 | +++ b/sgx_user.h
890 | @@ -69,6 +69,16 @@
891 | _IOW(SGX_MAGIC, 0x01, struct sgx_enclave_add_page)
892 | #define SGX_IOC_ENCLAVE_INIT \
893 | _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
894 | +#define SGX_IOC_ENCLAVE_EMODPR \
895 | + _IOW(SGX_MAGIC, 0x09, struct sgx_modification_param)
896 | +#define SGX_IOC_ENCLAVE_MKTCS \
897 | + _IOW(SGX_MAGIC, 0x0a, struct sgx_range)
898 | +#define SGX_IOC_ENCLAVE_TRIM \
899 | + _IOW(SGX_MAGIC, 0x0b, struct sgx_range)
900 | +#define SGX_IOC_ENCLAVE_NOTIFY_ACCEPT \
901 | + _IOW(SGX_MAGIC, 0x0c, struct sgx_range)
902 | +#define SGX_IOC_ENCLAVE_PAGE_REMOVE \
903 | + _IOW(SGX_MAGIC, 0x0d, unsigned long)
904 |
905 | /* SGX leaf instruction return values */
906 | #define SGX_SUCCESS 0
907 | @@ -90,6 +100,7 @@
908 | #define SGX_INVALID_EINITTOKEN 16
909 | #define SGX_PREV_TRK_INCMPL 17
910 | #define SGX_PG_IS_SECS 18
911 | +#define SGX_PAGE_NOT_MODIFIABLE 20
912 | #define SGX_INVALID_CPUSVN 32
913 | #define SGX_INVALID_ISVSVN 64
914 | #define SGX_UNMASKED_EVENT 128
915 | @@ -136,4 +147,18 @@ struct sgx_enclave_init {
916 | __u64 einittoken;
917 | } __attribute__((__packed__));
918 |
919 | +/*
920 | + * SGX2.0 definitions
921 | + */
922 | +
923 | +struct sgx_range {
924 | + unsigned long start_addr;
925 | + unsigned int nr_pages;
926 | +};
927 | +
928 | +struct sgx_modification_param {
929 | + struct sgx_range range;
930 | + unsigned long flags;
931 | +};
932 | +
933 | #endif /* _UAPI_ASM_X86_SGX_H */
934 | diff --git a/sgx_util.c b/sgx_util.c
935 | index ff0e40a..fd5022a 100644
936 | --- a/sgx_util.c
937 | +++ b/sgx_util.c
938 | @@ -135,7 +135,7 @@ void sgx_invalidate(struct sgx_encl *encl, bool flush_cpus)
939 | sgx_flush_cpus(encl);
940 | }
941 |
942 | -static void sgx_ipi_cb(void *info)
943 | +void sgx_ipi_cb(void *info)
944 | {
945 | }
946 |
947 | @@ -144,10 +144,10 @@ void sgx_flush_cpus(struct sgx_encl *encl)
948 | on_each_cpu_mask(mm_cpumask(encl->mm), sgx_ipi_cb, NULL, 1);
949 | }
950 |
951 | -static int sgx_eldu(struct sgx_encl *encl,
952 | - struct sgx_encl_page *encl_page,
953 | - struct sgx_epc_page *epc_page,
954 | - bool is_secs)
955 | +int sgx_eldu(struct sgx_encl *encl,
956 | + struct sgx_encl_page *encl_page,
957 | + struct sgx_epc_page *epc_page,
958 | + bool is_secs)
959 | {
960 | struct page *backing;
961 | struct page *pcmd;
962 | @@ -212,7 +212,8 @@ out:
963 |
964 | static struct sgx_encl_page *sgx_do_fault(struct vm_area_struct *vma,
965 | unsigned long addr,
966 | - unsigned int flags)
967 | + unsigned int flags,
968 | + struct vm_fault *vmf)
969 | {
970 | struct sgx_encl *encl = vma->vm_private_data;
971 | struct sgx_encl_page *entry;
972 | @@ -220,6 +221,7 @@ static struct sgx_encl_page *sgx_do_fault(struct vm_area_struct *vma,
973 | struct sgx_epc_page *secs_epc_page = NULL;
974 | bool reserve = (flags & SGX_FAULT_RESERVE) != 0;
975 | int rc = 0;
976 | + bool write = (vmf) ? (FAULT_FLAG_WRITE & vmf->flags) : false;
977 |
978 | /* If process was forked, VMA is still there but vm_private_data is set
979 | * to NULL.
980 | @@ -230,6 +232,14 @@ static struct sgx_encl_page *sgx_do_fault(struct vm_area_struct *vma,
981 | mutex_lock(&encl->lock);
982 |
983 | entry = radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT);
984 | + if (vmf && !entry) {
985 | + entry = sgx_encl_augment(vma, addr, write);
986 | + goto out;
987 | + }
988 | +
989 | + /* No entry found can not happen in 'reloading an evicted page'
990 | + * flow.
991 | + */
992 | if (!entry) {
993 | rc = -EFAULT;
994 | goto out;
995 | @@ -331,12 +341,13 @@ out:
996 |
997 | struct sgx_encl_page *sgx_fault_page(struct vm_area_struct *vma,
998 | unsigned long addr,
999 | - unsigned int flags)
1000 | + unsigned int flags,
1001 | + struct vm_fault *vmf)
1002 | {
1003 | struct sgx_encl_page *entry;
1004 |
1005 | do {
1006 | - entry = sgx_do_fault(vma, addr, flags);
1007 | + entry = sgx_do_fault(vma, addr, flags, vmf);
1008 | if (!(flags & SGX_FAULT_RESERVE))
1009 | break;
1010 | } while (PTR_ERR(entry) == -EBUSY);
1011 | @@ -360,16 +371,25 @@ void sgx_eblock(struct sgx_encl *encl, struct sgx_epc_page *epc_page)
1012 |
1013 | }
1014 |
1015 | -void sgx_etrack(struct sgx_encl *encl)
1016 | +void sgx_etrack(struct sgx_encl *encl, unsigned int epoch)
1017 | {
1018 | void *epc;
1019 | int ret;
1020 |
1021 | + /* If someone already called etrack in the meantime */
1022 | + if (epoch < encl->shadow_epoch)
1023 | + return;
1024 | +
1025 | epc = sgx_get_page(encl->secs.epc_page);
1026 | ret = __etrack(epc);
1027 | sgx_put_page(epc);
1028 | + encl->shadow_epoch++;
1029 |
1030 | - if (ret) {
1031 | + if (ret == SGX_PREV_TRK_INCMPL) {
1032 | + sgx_dbg(encl, "ETRACK returned %d\n", ret);
1033 | + smp_call_function(sgx_ipi_cb, NULL, 1);
1034 | + BUG_ON(__etrack(epc));
1035 | + } else if (ret) {
1036 | sgx_crit(encl, "ETRACK returned %d\n", ret);
1037 | sgx_invalidate(encl, true);
1038 | }
1039 | diff --git a/sgx_vma.c b/sgx_vma.c
1040 | index dae9eb9..bcd1047 100644
1041 | --- a/sgx_vma.c
1042 | +++ b/sgx_vma.c
1043 | @@ -76,6 +76,12 @@ static void sgx_vma_open(struct vm_area_struct *vma)
1044 | if (!encl)
1045 | return;
1046 |
1047 | + /* protect from fork */
1048 | + if (encl->mm != current->mm) {
1049 | + vma->vm_private_data = NULL;
1050 | + return;
1051 | + }
1052 | +
1053 | /* kref cannot underflow because ECREATE ioctl checks that there is only
1054 | * one single VMA for the enclave before proceeding.
1055 | */
1056 | @@ -113,7 +119,7 @@ static int sgx_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1057 | #endif
1058 | struct sgx_encl_page *entry;
1059 |
1060 | - entry = sgx_fault_page(vma, addr, 0);
1061 | + entry = sgx_fault_page(vma, addr, 0, vmf);
1062 |
1063 | if (!IS_ERR(entry) || PTR_ERR(entry) == -EBUSY)
1064 | return VM_FAULT_NOPAGE;
1065 | @@ -212,7 +218,7 @@ static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
1066 | entry->flags &= ~SGX_ENCL_PAGE_RESERVED;
1067 |
1068 | entry = sgx_fault_page(vma, (addr + i) & PAGE_MASK,
1069 | - SGX_FAULT_RESERVE);
1070 | + SGX_FAULT_RESERVE, NULL);
1071 | if (IS_ERR(entry)) {
1072 | ret = PTR_ERR(entry);
1073 | entry = NULL;
1074 | --
1075 | 2.7.4
1076 |
1077 |
--------------------------------------------------------------------------------
/sgx.h:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016-2017 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016-2017 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Jarkko Sakkinen
55 | * Suresh Siddha
56 | * Serge Ayoun
57 | * Shay Katz-zamir
58 | */
59 |
60 | #ifndef __ARCH_INTEL_SGX_H__
61 | #define __ARCH_INTEL_SGX_H__
62 |
63 | #include "sgx_asm.h"
64 | #include
65 | #include
66 | #include
67 | #include
68 | #include
69 | #include
70 | #include
71 | #include
72 | #include
73 | #include "sgx_arch.h"
74 | #include "sgx_user.h"
75 |
76 | #define SGX_EINIT_SPIN_COUNT 20
77 | #define SGX_EINIT_SLEEP_COUNT 50
78 | #define SGX_EINIT_SLEEP_TIME 20
79 | #define SGX_EDMM_SPIN_COUNT 20
80 |
81 | #define SGX_VA_SLOT_COUNT 512
82 | #ifndef MSR_IA32_SGXLEPUBKEYHASH0
83 | #define MSR_IA32_SGXLEPUBKEYHASH0 0x0000008C
84 | #define MSR_IA32_SGXLEPUBKEYHASH1 0x0000008D
85 | #define MSR_IA32_SGXLEPUBKEYHASH2 0x0000008E
86 | #define MSR_IA32_SGXLEPUBKEYHASH3 0x0000008F
87 | #endif
88 |
89 | struct sgx_epc_page {
90 | resource_size_t pa;
91 | struct list_head list;
92 | struct sgx_encl_page *encl_page;
93 | };
94 |
95 | enum sgx_alloc_flags {
96 | SGX_ALLOC_ATOMIC = BIT(0),
97 | };
98 |
99 | struct sgx_va_page {
100 | struct sgx_epc_page *epc_page;
101 | DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT);
102 | struct list_head list;
103 | };
104 |
105 | static inline unsigned int sgx_alloc_va_slot(struct sgx_va_page *page)
106 | {
107 | int slot = find_first_zero_bit(page->slots, SGX_VA_SLOT_COUNT);
108 |
109 | if (slot < SGX_VA_SLOT_COUNT)
110 | set_bit(slot, page->slots);
111 |
112 | return slot << 3;
113 | }
114 |
115 | static inline void sgx_free_va_slot(struct sgx_va_page *page,
116 | unsigned int offset)
117 | {
118 | clear_bit(offset >> 3, page->slots);
119 | }
120 |
121 | static inline bool sgx_va_slots_empty(struct sgx_va_page *page)
122 | {
123 | int slot = find_first_bit(page->slots, SGX_VA_SLOT_COUNT);
124 |
125 | if (slot == SGX_VA_SLOT_COUNT)
126 | return true;
127 |
128 | return false;
129 | }
130 |
131 | enum sgx_encl_page_flags {
132 | SGX_ENCL_PAGE_TCS = BIT(0),
133 | SGX_ENCL_PAGE_RESERVED = BIT(1),
134 | SGX_ENCL_PAGE_TRIM = BIT(2),
135 | SGX_ENCL_PAGE_ADDED = BIT(3),
136 | };
137 |
138 | struct sgx_encl_page {
139 | unsigned long addr;
140 | unsigned int flags;
141 | struct sgx_epc_page *epc_page;
142 | struct sgx_va_page *va_page;
143 | unsigned int va_offset;
144 | };
145 |
146 | struct sgx_tgid_ctx {
147 | struct pid *tgid;
148 | struct kref refcount;
149 | struct list_head encl_list;
150 | struct list_head list;
151 | };
152 |
153 | enum sgx_encl_flags {
154 | SGX_ENCL_INITIALIZED = BIT(0),
155 | SGX_ENCL_DEBUG = BIT(1),
156 | SGX_ENCL_SECS_EVICTED = BIT(2),
157 | SGX_ENCL_SUSPEND = BIT(3),
158 | SGX_ENCL_DEAD = BIT(4),
159 | };
160 |
161 | struct sgx_encl {
162 | unsigned int flags;
163 | uint64_t attributes;
164 | uint64_t xfrm;
165 | unsigned int secs_child_cnt;
166 | struct mutex lock;
167 | struct mm_struct *mm;
168 | struct file *backing;
169 | struct file *pcmd;
170 | struct list_head load_list;
171 | struct kref refcount;
172 | unsigned long base;
173 | unsigned long size;
174 | unsigned long ssaframesize;
175 | struct list_head va_pages;
176 | struct radix_tree_root page_tree;
177 | struct list_head add_page_reqs;
178 | struct work_struct add_page_work;
179 | struct sgx_encl_page secs;
180 | struct sgx_tgid_ctx *tgid_ctx;
181 | struct list_head encl_list;
182 | struct mmu_notifier mmu_notifier;
183 | unsigned int shadow_epoch;
184 | };
185 |
186 | struct sgx_epc_bank {
187 | unsigned long pa;
188 | #ifdef CONFIG_X86_64
189 | unsigned long va;
190 | #endif
191 | unsigned long size;
192 | };
193 |
194 | extern struct workqueue_struct *sgx_add_page_wq;
195 | extern struct sgx_epc_bank sgx_epc_banks[];
196 | extern int sgx_nr_epc_banks;
197 | extern u64 sgx_encl_size_max_32;
198 | extern u64 sgx_encl_size_max_64;
199 | extern u64 sgx_xfrm_mask;
200 | extern u32 sgx_misc_reserved;
201 | extern u32 sgx_xsave_size_tbl[64];
202 | extern bool sgx_has_sgx2;
203 |
204 | extern const struct vm_operations_struct sgx_vm_ops;
205 |
206 | #define sgx_pr_ratelimited(level, encl, fmt, ...) \
207 | pr_ ## level ## _ratelimited("intel_sgx: [%d:0x%p] " fmt, \
208 | pid_nr((encl)->tgid_ctx->tgid), \
209 | (void *)(encl)->base, ##__VA_ARGS__)
210 |
211 | #define sgx_dbg(encl, fmt, ...) \
212 | sgx_pr_ratelimited(debug, encl, fmt, ##__VA_ARGS__)
213 | #define sgx_info(encl, fmt, ...) \
214 | sgx_pr_ratelimited(info, encl, fmt, ##__VA_ARGS__)
215 | #define sgx_warn(encl, fmt, ...) \
216 | sgx_pr_ratelimited(warn, encl, fmt, ##__VA_ARGS__)
217 | #define sgx_err(encl, fmt, ...) \
218 | sgx_pr_ratelimited(err, encl, fmt, ##__VA_ARGS__)
219 | #define sgx_crit(encl, fmt, ...) \
220 | sgx_pr_ratelimited(crit, encl, fmt, ##__VA_ARGS__)
221 |
222 | int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
223 | struct vm_area_struct **vma);
224 | void sgx_tgid_ctx_release(struct kref *ref);
225 | int sgx_encl_create(struct sgx_secs *secs);
226 | int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr, void *data,
227 | struct sgx_secinfo *secinfo, unsigned int mrmask);
228 | int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
229 | struct sgx_einittoken *einittoken);
230 | struct sgx_encl_page *sgx_encl_augment(struct vm_area_struct *vma,
231 | unsigned long addr, bool write);
232 | void sgx_encl_release(struct kref *ref);
233 |
234 | long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
235 | #ifdef CONFIG_COMPAT
236 | long sgx_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
237 | #endif
238 |
239 | /* Utility functions */
240 | int sgx_test_and_clear_young(struct sgx_encl_page *page, struct sgx_encl *encl);
241 | struct page *sgx_get_backing(struct sgx_encl *encl,
242 | struct sgx_encl_page *entry,
243 | bool pcmd);
244 | void sgx_put_backing(struct page *backing, bool write);
245 | void sgx_insert_pte(struct sgx_encl *encl,
246 | struct sgx_encl_page *encl_page,
247 | struct sgx_epc_page *epc_page,
248 | struct vm_area_struct *vma);
249 | int sgx_eremove(struct sgx_epc_page *epc_page);
250 | void sgx_zap_tcs_ptes(struct sgx_encl *encl,
251 | struct vm_area_struct *vma);
252 | void sgx_invalidate(struct sgx_encl *encl, bool flush_cpus);
253 | void sgx_flush_cpus(struct sgx_encl *encl);
254 |
255 | enum sgx_fault_flags {
256 | SGX_FAULT_RESERVE = BIT(0),
257 | };
258 |
259 | struct sgx_encl_page *sgx_fault_page(struct vm_area_struct *vma,
260 | unsigned long addr,
261 | unsigned int flags,
262 | struct vm_fault *vmf);
263 |
264 |
265 | extern struct mutex sgx_tgid_ctx_mutex;
266 | extern struct list_head sgx_tgid_ctx_list;
267 | extern atomic_t sgx_va_pages_cnt;
268 |
269 | int sgx_add_epc_bank(resource_size_t start, unsigned long size, int bank);
270 | int sgx_page_cache_init(void);
271 | void sgx_page_cache_teardown(void);
272 | struct sgx_epc_page *sgx_alloc_page(unsigned int flags);
273 | void sgx_free_page(struct sgx_epc_page *entry, struct sgx_encl *encl);
274 | void *sgx_get_page(struct sgx_epc_page *entry);
275 | void sgx_put_page(void *epc_page_vaddr);
276 | void sgx_eblock(struct sgx_encl *encl, struct sgx_epc_page *epc_page);
277 | void sgx_etrack(struct sgx_encl *encl, unsigned int epoch);
278 | void sgx_ipi_cb(void *info);
279 | int sgx_eldu(struct sgx_encl *encl, struct sgx_encl_page *encl_page,
280 | struct sgx_epc_page *epc_page, bool is_secs);
281 | long modify_range(struct sgx_range *rg, unsigned long flags);
282 | int remove_page(struct sgx_encl *encl, unsigned long address, bool trim);
283 | int sgx_get_encl(unsigned long addr, struct sgx_encl **encl);
284 | int sgx_vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, resource_size_t pa);
285 | #endif /* __ARCH_X86_INTEL_SGX_H__ */
286 |
--------------------------------------------------------------------------------
/sgx_arch.h:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016-2017 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016-2017 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Jarkko Sakkinen
55 | */
56 |
57 | #include
58 | #ifndef _ASM_X86_SGX_ARCH_H
59 | #define _ASM_X86_SGX_ARCH_H
60 |
61 | #define SGX_SSA_GPRS_SIZE 182
62 | #define SGX_SSA_MISC_EXINFO_SIZE 16
63 |
64 | enum sgx_misc {
65 | SGX_MISC_EXINFO = 0x01,
66 | };
67 |
68 | #define SGX_MISC_RESERVED_MASK 0xFFFFFFFFFFFFFFFEL
69 |
70 | enum sgx_attribute {
71 | SGX_ATTR_DEBUG = 0x02,
72 | SGX_ATTR_MODE64BIT = 0x04,
73 | SGX_ATTR_PROVISIONKEY = 0x10,
74 | SGX_ATTR_EINITTOKENKEY = 0x20,
75 | };
76 |
77 | #define SGX_ATTR_RESERVED_MASK 0xFFFFFFFFFFFFFF49L
78 |
79 | #define SGX_SECS_RESERVED1_SIZE 24
80 | #define SGX_SECS_RESERVED2_SIZE 32
81 | #define SGX_SECS_RESERVED3_SIZE 32
82 | #define SGX_SECS_RESERVED4_SIZE 3834
83 |
84 | struct sgx_secs {
85 | uint64_t size;
86 | uint64_t base;
87 | uint32_t ssaframesize;
88 | uint32_t miscselect;
89 | uint8_t reserved1[SGX_SECS_RESERVED1_SIZE];
90 | uint64_t attributes;
91 | uint64_t xfrm;
92 | uint32_t mrenclave[8];
93 | uint8_t reserved2[SGX_SECS_RESERVED2_SIZE];
94 | uint32_t mrsigner[8];
95 | uint8_t reserved3[SGX_SECS_RESERVED3_SIZE];
96 | uint32_t configid[16];
97 | uint16_t isvvprodid;
98 | uint16_t isvsvn;
99 | uint16_t configsvn;
100 | uint8_t reserved4[SGX_SECS_RESERVED4_SIZE];
101 | };
102 |
103 | enum sgx_tcs_flags {
104 | SGX_TCS_DBGOPTIN = 0x01, /* cleared on EADD */
105 | };
106 |
107 | #define SGX_TCS_RESERVED_MASK 0xFFFFFFFFFFFFFFFEL
108 |
109 | struct sgx_tcs {
110 | uint64_t state;
111 | uint64_t flags;
112 | uint64_t ossa;
113 | uint32_t cssa;
114 | uint32_t nssa;
115 | uint64_t oentry;
116 | uint64_t aep;
117 | uint64_t ofsbase;
118 | uint64_t ogsbase;
119 | uint32_t fslimit;
120 | uint32_t gslimit;
121 | uint64_t reserved[503];
122 | };
123 |
124 | struct sgx_pageinfo {
125 | uint64_t linaddr;
126 | uint64_t srcpge;
127 | union {
128 | uint64_t secinfo;
129 | uint64_t pcmd;
130 | };
131 | uint64_t secs;
132 | } __attribute__((aligned(32)));
133 |
134 |
135 | #define SGX_SECINFO_PERMISSION_MASK 0x0000000000000007L
136 | #define SGX_SECINFO_PAGE_TYPE_MASK 0x000000000000FF00L
137 | #define SGX_SECINFO_RESERVED_MASK 0xFFFFFFFFFFFF00F8L
138 |
139 | enum sgx_page_type {
140 | SGX_PAGE_TYPE_SECS = 0x00,
141 | SGX_PAGE_TYPE_TCS = 0x01,
142 | SGX_PAGE_TYPE_REG = 0x02,
143 | SGX_PAGE_TYPE_VA = 0x03,
144 | SGX_PAGE_TYPE_TRIM = 0x04,
145 | };
146 |
147 | enum sgx_secinfo_flags {
148 | SGX_SECINFO_R = 0x01,
149 | SGX_SECINFO_W = 0x02,
150 | SGX_SECINFO_X = 0x04,
151 | SGX_SECINFO_SECS = (SGX_PAGE_TYPE_SECS << 8),
152 | SGX_SECINFO_TCS = (SGX_PAGE_TYPE_TCS << 8),
153 | SGX_SECINFO_REG = (SGX_PAGE_TYPE_REG << 8),
154 | SGX_SECINFO_TRIM = (SGX_PAGE_TYPE_TRIM << 8),
155 | };
156 |
157 | struct sgx_secinfo {
158 | uint64_t flags;
159 | uint64_t reserved[7];
160 | } __attribute__((aligned(64)));
161 |
162 | struct sgx_pcmd {
163 | struct sgx_secinfo secinfo;
164 | uint64_t enclave_id;
165 | uint8_t reserved[40];
166 | uint8_t mac[16];
167 | };
168 |
169 | #define SGX_MODULUS_SIZE 384
170 |
171 | struct sgx_sigstruct_header {
172 | uint64_t header1[2];
173 | uint32_t vendor;
174 | uint32_t date;
175 | uint64_t header2[2];
176 | uint32_t swdefined;
177 | uint8_t reserved1[84];
178 | };
179 |
180 | struct sgx_sigstruct_body {
181 | uint32_t miscselect;
182 | uint32_t miscmask;
183 | uint8_t reserved2[20];
184 | uint64_t attributes;
185 | uint64_t xfrm;
186 | uint8_t attributemask[16];
187 | uint8_t mrenclave[32];
188 | uint8_t reserved3[32];
189 | uint16_t isvprodid;
190 | uint16_t isvsvn;
191 | } __attribute__((__packed__));
192 |
193 | struct sgx_sigstruct {
194 | struct sgx_sigstruct_header header;
195 | uint8_t modulus[SGX_MODULUS_SIZE];
196 | uint32_t exponent;
197 | uint8_t signature[SGX_MODULUS_SIZE];
198 | struct sgx_sigstruct_body body;
199 | uint8_t reserved4[12];
200 | uint8_t q1[SGX_MODULUS_SIZE];
201 | uint8_t q2[SGX_MODULUS_SIZE];
202 | };
203 |
204 | struct sgx_sigstruct_payload {
205 | struct sgx_sigstruct_header header;
206 | struct sgx_sigstruct_body body;
207 | };
208 |
209 | struct sgx_einittoken_payload {
210 | uint32_t valid;
211 | uint32_t reserved1[11];
212 | uint64_t attributes;
213 | uint64_t xfrm;
214 | uint8_t mrenclave[32];
215 | uint8_t reserved2[32];
216 | uint8_t mrsigner[32];
217 | uint8_t reserved3[32];
218 | };
219 |
220 | struct sgx_einittoken {
221 | struct sgx_einittoken_payload payload;
222 | uint8_t cpusvnle[16];
223 | uint16_t isvprodidle;
224 | uint16_t isvsvnle;
225 | uint8_t reserved2[24];
226 | uint32_t maskedmiscselectle;
227 | uint64_t maskedattributesle;
228 | uint64_t maskedxfrmle;
229 | uint8_t keyid[32];
230 | uint8_t mac[16];
231 | };
232 |
233 | struct sgx_report {
234 | uint8_t cpusvn[16];
235 | uint32_t miscselect;
236 | uint8_t reserved1[28];
237 | uint64_t attributes;
238 | uint64_t xfrm;
239 | uint8_t mrenclave[32];
240 | uint8_t reserved2[32];
241 | uint8_t mrsigner[32];
242 | uint8_t reserved3[96];
243 | uint16_t isvprodid;
244 | uint16_t isvsvn;
245 | uint8_t reserved4[60];
246 | uint8_t reportdata[64];
247 | uint8_t keyid[32];
248 | uint8_t mac[16];
249 | };
250 |
251 | struct sgx_targetinfo {
252 | uint8_t mrenclave[32];
253 | uint64_t attributes;
254 | uint64_t xfrm;
255 | uint8_t reserved1[4];
256 | uint32_t miscselect;
257 | uint8_t reserved2[456];
258 | };
259 |
260 | struct sgx_keyrequest {
261 | uint16_t keyname;
262 | uint16_t keypolicy;
263 | uint16_t isvsvn;
264 | uint16_t reserved1;
265 | uint8_t cpusvn[16];
266 | uint64_t attributemask;
267 | uint64_t xfrmmask;
268 | uint8_t keyid[32];
269 | uint32_t miscmask;
270 | uint8_t reserved2[436];
271 | };
272 |
273 | #endif /* _ASM_X86_SGX_ARCH_H */
274 |
--------------------------------------------------------------------------------
/sgx_asm.h:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016-2017 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016-2017 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Jarkko Sakkinen
55 | * Suresh Siddha
56 | */
57 |
58 | #ifndef _ASM_X86_SGX_H
59 | #define _ASM_X86_SGX_H
60 |
61 | #include "sgx_arch.h"
62 | #include
63 | #include
64 | #include
65 | #include
66 |
67 | #define SGX_CPUID 0x12
68 |
69 | enum sgx_cpuid {
70 | SGX_CPUID_CAPABILITIES = 0,
71 | SGX_CPUID_ATTRIBUTES = 1,
72 | SGX_CPUID_EPC_BANKS = 2,
73 | };
74 |
75 | enum sgx_commands {
76 | ECREATE = 0x0,
77 | EADD = 0x1,
78 | EINIT = 0x2,
79 | EREMOVE = 0x3,
80 | EDGBRD = 0x4,
81 | EDGBWR = 0x5,
82 | EEXTEND = 0x6,
83 | ELDU = 0x8,
84 | EBLOCK = 0x9,
85 | EPA = 0xA,
86 | EWB = 0xB,
87 | ETRACK = 0xC,
88 | EAUG = 0xD,
89 | EMODPR = 0xE,
90 | EMODT = 0xF,
91 | };
92 |
93 | #ifdef CONFIG_X86_64
94 | #define XAX "%%rax"
95 | #else
96 | #define XAX "%%eax"
97 | #endif
98 |
99 | #define __encls_ret(rax, rbx, rcx, rdx) \
100 | ({ \
101 | int ret; \
102 | asm volatile( \
103 | "1: .byte 0x0f, 0x01, 0xcf;\n\t" \
104 | "2:\n" \
105 | ".section .fixup,\"ax\"\n" \
106 | "3: mov $-14,"XAX"\n" \
107 | " jmp 2b\n" \
108 | ".previous\n" \
109 | _ASM_EXTABLE(1b, 3b) \
110 | : "=a"(ret) \
111 | : "a"(rax), "b"(rbx), "c"(rcx), "d"(rdx) \
112 | : "memory"); \
113 | ret; \
114 | })
115 |
116 | #define __encls(rax, rbx, rcx, rdx...) \
117 | ({ \
118 | int ret; \
119 | asm volatile( \
120 | "1: .byte 0x0f, 0x01, 0xcf;\n\t" \
121 | " xor "XAX","XAX"\n" \
122 | "2:\n" \
123 | ".section .fixup,\"ax\"\n" \
124 | "3: mov $-14,"XAX"\n" \
125 | " jmp 2b\n" \
126 | ".previous\n" \
127 | _ASM_EXTABLE(1b, 3b) \
128 | : "=a"(ret), "=b"(rbx), "=c"(rcx) \
129 | : "a"(rax), "b"(rbx), "c"(rcx), rdx \
130 | : "memory"); \
131 | ret; \
132 | })
133 |
134 | static inline unsigned long __ecreate(struct sgx_pageinfo *pginfo, void *secs)
135 | {
136 | return __encls(ECREATE, pginfo, secs, "d"(0));
137 | }
138 |
139 | static inline int __eextend(void *secs, void *epc)
140 | {
141 | return __encls(EEXTEND, secs, epc, "d"(0));
142 | }
143 |
144 | static inline int __eadd(struct sgx_pageinfo *pginfo, void *epc)
145 | {
146 | return __encls(EADD, pginfo, epc, "d"(0));
147 | }
148 |
149 | static inline int __einit(void *sigstruct, struct sgx_einittoken *einittoken,
150 | void *secs)
151 | {
152 | return __encls_ret(EINIT, sigstruct, secs, einittoken);
153 | }
154 |
155 | static inline int __eremove(void *epc)
156 | {
157 | unsigned long rbx = 0;
158 | unsigned long rdx = 0;
159 |
160 | return __encls_ret(EREMOVE, rbx, epc, rdx);
161 | }
162 |
163 | static inline int __edbgwr(void *epc, unsigned long *data)
164 | {
165 | return __encls(EDGBWR, *data, epc, "d"(0));
166 | }
167 |
168 | static inline int __edbgrd(void *epc, unsigned long *data)
169 | {
170 | unsigned long rbx = 0;
171 | int ret;
172 |
173 | ret = __encls(EDGBRD, rbx, epc, "d"(0));
174 | if (!ret)
175 | *(unsigned long *) data = rbx;
176 |
177 | return ret;
178 | }
179 |
180 | static inline int __etrack(void *epc)
181 | {
182 | unsigned long rbx = 0;
183 | unsigned long rdx = 0;
184 |
185 | return __encls_ret(ETRACK, rbx, epc, rdx);
186 | }
187 |
188 | static inline int __eldu(unsigned long rbx, unsigned long rcx,
189 | unsigned long rdx)
190 | {
191 | return __encls_ret(ELDU, rbx, rcx, rdx);
192 | }
193 |
194 | static inline int __eblock(unsigned long rcx)
195 | {
196 | unsigned long rbx = 0;
197 | unsigned long rdx = 0;
198 |
199 | return __encls_ret(EBLOCK, rbx, rcx, rdx);
200 | }
201 |
202 | static inline int __epa(void *epc)
203 | {
204 | unsigned long rbx = SGX_PAGE_TYPE_VA;
205 |
206 | return __encls(EPA, rbx, epc, "d"(0));
207 | }
208 |
209 | static inline int __ewb(struct sgx_pageinfo *pginfo, void *epc, void *va)
210 | {
211 | return __encls_ret(EWB, pginfo, epc, va);
212 | }
213 |
214 | static inline int __eaug(struct sgx_pageinfo *pginfo, void *epc)
215 | {
216 | return __encls(EAUG, pginfo, epc, "d"(0));
217 | }
218 |
219 | static inline int __emodpr(struct sgx_secinfo *secinfo, void *epc)
220 | {
221 | unsigned long rdx = 0;
222 |
223 | return __encls_ret(EMODPR, secinfo, epc, rdx);
224 | }
225 |
226 | static inline int __emodt(struct sgx_secinfo *secinfo, void *epc)
227 | {
228 | unsigned long rdx = 0;
229 |
230 | return __encls_ret(EMODT, secinfo, epc, rdx);
231 | }
232 |
233 | #endif /* _ASM_X86_SGX_H */
234 |
--------------------------------------------------------------------------------
/sgx_encl.c:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016-2017 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016-2017 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Jarkko Sakkinen
55 | * Suresh Siddha
56 | * Serge Ayoun
57 | * Shay Katz-zamir
58 | * Sean Christopherson
59 | */
60 |
61 | #include "sgx.h"
62 | #include
63 | #include
64 | #include
65 | #include
66 | #include
67 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
68 | #include
69 | #else
70 | #include
71 | #endif
72 | #include "linux/file.h"
73 | #include
74 | #include
75 | #include
76 |
77 | struct sgx_add_page_req {
78 | struct sgx_encl *encl;
79 | struct sgx_encl_page *encl_page;
80 | struct sgx_secinfo secinfo;
81 | u16 mrmask;
82 | struct list_head list;
83 | };
84 |
85 | /**
86 | * sgx_encl_find - find an enclave
87 | * @mm: mm struct of the current process
88 | * @addr: address in the ELRANGE
89 | * @vma: the resulting VMA
90 | *
91 | * Finds an enclave identified by the given address. Gives back the VMA, that
92 | * is part of the enclave, located in that address. The VMA is given back if it
93 | * is a proper enclave VMA even if a &struct sgx_encl instance does not exist
94 | * yet (enclave creation has not been performed).
95 | *
96 | * Return:
97 | * 0 on success,
98 | * -EINVAL if an enclave was not found,
99 | * -ENOENT if the enclave has not been created yet
100 | */
101 | int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
102 | struct vm_area_struct **vma)
103 | {
104 | struct vm_area_struct *result;
105 | struct sgx_encl *encl;
106 |
107 | result = find_vma(mm, addr);
108 | if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
109 | return -EINVAL;
110 |
111 | encl = result->vm_private_data;
112 | *vma = result;
113 |
114 | return encl ? 0 : -ENOENT;
115 | }
116 |
117 | static struct sgx_tgid_ctx *sgx_find_tgid_ctx(struct pid *tgid)
118 | {
119 | struct sgx_tgid_ctx *ctx;
120 |
121 | list_for_each_entry(ctx, &sgx_tgid_ctx_list, list)
122 | if (pid_nr(ctx->tgid) == pid_nr(tgid))
123 | return ctx;
124 |
125 | return NULL;
126 | }
127 |
128 | static int sgx_add_to_tgid_ctx(struct sgx_encl *encl)
129 | {
130 | struct sgx_tgid_ctx *ctx;
131 | struct pid *tgid = get_pid(task_tgid(current));
132 |
133 | mutex_lock(&sgx_tgid_ctx_mutex);
134 |
135 | ctx = sgx_find_tgid_ctx(tgid);
136 | if (ctx) {
137 | if (kref_get_unless_zero(&ctx->refcount)) {
138 | encl->tgid_ctx = ctx;
139 | mutex_unlock(&sgx_tgid_ctx_mutex);
140 | put_pid(tgid);
141 | return 0;
142 | } else {
143 | list_del_init(&ctx->list);
144 | }
145 | }
146 |
147 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
148 | if (!ctx) {
149 | mutex_unlock(&sgx_tgid_ctx_mutex);
150 | put_pid(tgid);
151 | return -ENOMEM;
152 | }
153 |
154 | ctx->tgid = tgid;
155 | kref_init(&ctx->refcount);
156 | INIT_LIST_HEAD(&ctx->encl_list);
157 |
158 | list_add(&ctx->list, &sgx_tgid_ctx_list);
159 |
160 | encl->tgid_ctx = ctx;
161 |
162 | mutex_unlock(&sgx_tgid_ctx_mutex);
163 | return 0;
164 | }
165 |
166 | void sgx_tgid_ctx_release(struct kref *ref)
167 | {
168 | struct sgx_tgid_ctx *pe =
169 | container_of(ref, struct sgx_tgid_ctx, refcount);
170 | mutex_lock(&sgx_tgid_ctx_mutex);
171 | list_del(&pe->list);
172 | mutex_unlock(&sgx_tgid_ctx_mutex);
173 | put_pid(pe->tgid);
174 | kfree(pe);
175 | }
176 |
177 | static int sgx_measure(struct sgx_epc_page *secs_page,
178 | struct sgx_epc_page *epc_page,
179 | u16 mrmask)
180 | {
181 | void *secs;
182 | void *epc;
183 | int ret = 0;
184 | int i, j;
185 |
186 | for (i = 0, j = 1; i < 0x1000 && !ret; i += 0x100, j <<= 1) {
187 | if (!(j & mrmask))
188 | continue;
189 |
190 | secs = sgx_get_page(secs_page);
191 | epc = sgx_get_page(epc_page);
192 |
193 | ret = __eextend(secs, (void *)((unsigned long)epc + i));
194 |
195 | sgx_put_page(epc);
196 | sgx_put_page(secs);
197 | }
198 |
199 | return ret;
200 | }
201 |
202 | static int sgx_eadd(struct sgx_epc_page *secs_page,
203 | struct sgx_epc_page *epc_page,
204 | unsigned long linaddr,
205 | struct sgx_secinfo *secinfo,
206 | struct page *backing)
207 | {
208 | struct sgx_pageinfo pginfo;
209 | void *epc_page_vaddr;
210 | int ret;
211 |
212 | pginfo.srcpge = (unsigned long)kmap_atomic(backing);
213 | pginfo.secs = (unsigned long)sgx_get_page(secs_page);
214 | epc_page_vaddr = sgx_get_page(epc_page);
215 |
216 | pginfo.linaddr = linaddr;
217 | pginfo.secinfo = (unsigned long)secinfo;
218 | ret = __eadd(&pginfo, epc_page_vaddr);
219 |
220 | sgx_put_page(epc_page_vaddr);
221 | sgx_put_page((void *)(unsigned long)pginfo.secs);
222 | kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
223 |
224 | return ret;
225 | }
226 |
227 | static bool sgx_process_add_page_req(struct sgx_add_page_req *req,
228 | struct sgx_epc_page *epc_page)
229 | {
230 | struct page *backing;
231 | struct sgx_encl_page *encl_page = req->encl_page;
232 | struct sgx_encl *encl = req->encl;
233 | struct vm_area_struct *vma;
234 | int ret;
235 |
236 | if (encl->flags & (SGX_ENCL_SUSPEND | SGX_ENCL_DEAD))
237 | return false;
238 |
239 | ret = sgx_encl_find(encl->mm, encl_page->addr, &vma);
240 | if (ret)
241 | return false;
242 |
243 | backing = sgx_get_backing(encl, encl_page, false);
244 | if (IS_ERR(backing))
245 | return false;
246 |
247 | /* Do not race with do_exit() */
248 | if (!atomic_read(&encl->mm->mm_users)) {
249 | sgx_put_backing(backing, 0);
250 | return false;
251 | }
252 |
253 | ret = sgx_vm_insert_pfn(vma, encl_page->addr, epc_page->pa);
254 | if (ret != VM_FAULT_NOPAGE) {
255 | sgx_put_backing(backing, 0);
256 | return false;
257 | }
258 |
259 | ret = sgx_eadd(encl->secs.epc_page, epc_page, encl_page->addr,
260 | &req->secinfo, backing);
261 |
262 | sgx_put_backing(backing, 0);
263 | if (ret) {
264 | sgx_warn(encl, "EADD returned %d\n", ret);
265 | zap_vma_ptes(vma, encl_page->addr, PAGE_SIZE);
266 | return false;
267 | }
268 |
269 | encl->secs_child_cnt++;
270 |
271 | ret = sgx_measure(encl->secs.epc_page, epc_page, req->mrmask);
272 | if (ret) {
273 | sgx_warn(encl, "EEXTEND returned %d\n", ret);
274 | zap_vma_ptes(vma, encl_page->addr, PAGE_SIZE);
275 | return false;
276 | }
277 |
278 | epc_page->encl_page = encl_page;
279 | encl_page->epc_page = epc_page;
280 | sgx_test_and_clear_young(encl_page, encl);
281 | list_add_tail(&epc_page->list, &encl->load_list);
282 | encl_page->flags |= SGX_ENCL_PAGE_ADDED;
283 |
284 | return true;
285 | }
286 |
287 | static void sgx_add_page_worker(struct work_struct *work)
288 | {
289 | struct sgx_encl *encl;
290 | struct sgx_add_page_req *req;
291 | struct sgx_epc_page *epc_page;
292 | bool skip_rest = false;
293 | bool is_empty = false;
294 |
295 | encl = container_of(work, struct sgx_encl, add_page_work);
296 |
297 | do {
298 | schedule();
299 |
300 | if (encl->flags & SGX_ENCL_DEAD)
301 | skip_rest = true;
302 |
303 | mutex_lock(&encl->lock);
304 | req = list_first_entry(&encl->add_page_reqs,
305 | struct sgx_add_page_req, list);
306 | list_del(&req->list);
307 | is_empty = list_empty(&encl->add_page_reqs);
308 | mutex_unlock(&encl->lock);
309 |
310 | if (skip_rest)
311 | goto next;
312 |
313 | epc_page = sgx_alloc_page(0);
314 | if (IS_ERR(epc_page)) {
315 | skip_rest = true;
316 | goto next;
317 | }
318 |
319 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
320 | mmap_read_lock(encl->mm);
321 | #else
322 | down_read(&encl->mm->mmap_sem);
323 | #endif
324 | mutex_lock(&encl->lock);
325 |
326 | if (!sgx_process_add_page_req(req, epc_page)) {
327 | sgx_free_page(epc_page, encl);
328 | skip_rest = true;
329 | }
330 |
331 | mutex_unlock(&encl->lock);
332 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
333 | mmap_read_unlock(encl->mm);
334 | #else
335 | up_read(&encl->mm->mmap_sem);
336 | #endif
337 |
338 | next:
339 | kfree(req);
340 | } while (!kref_put(&encl->refcount, sgx_encl_release) && !is_empty);
341 | }
342 |
343 | static u32 sgx_calc_ssaframesize(u32 miscselect, u64 xfrm)
344 | {
345 | u32 size_max = PAGE_SIZE;
346 | u32 size;
347 | int i;
348 |
349 | for (i = 2; i < 64; i++) {
350 | if (!((1UL << i) & xfrm))
351 | continue;
352 |
353 | size = SGX_SSA_GPRS_SIZE + sgx_xsave_size_tbl[i];
354 | if (miscselect & SGX_MISC_EXINFO)
355 | size += SGX_SSA_MISC_EXINFO_SIZE;
356 |
357 | if (size > size_max)
358 | size_max = size;
359 | }
360 |
361 | return (size_max + PAGE_SIZE - 1) >> PAGE_SHIFT;
362 | }
363 |
364 | static int sgx_validate_secs(const struct sgx_secs *secs,
365 | unsigned long ssaframesize)
366 | {
367 | int i;
368 |
369 | if (secs->size < (2 * PAGE_SIZE) ||
370 | (secs->size & (secs->size - 1)) != 0)
371 | return -EINVAL;
372 |
373 | if (secs->base & (secs->size - 1))
374 | return -EINVAL;
375 |
376 | if (secs->attributes & SGX_ATTR_RESERVED_MASK ||
377 | secs->miscselect & sgx_misc_reserved)
378 | return -EINVAL;
379 |
380 | if (secs->attributes & SGX_ATTR_MODE64BIT) {
381 | #ifdef CONFIG_X86_64
382 | if (secs->size > sgx_encl_size_max_64)
383 | return -EINVAL;
384 | #else
385 | return -EINVAL;
386 | #endif
387 | } else {
388 | /* On 64-bit architecture allow 32-bit encls only in
389 | * the compatibility mode.
390 | */
391 | #ifdef CONFIG_X86_64
392 | if (!test_thread_flag(TIF_ADDR32))
393 | return -EINVAL;
394 | #endif
395 | if (secs->size > sgx_encl_size_max_32)
396 | return -EINVAL;
397 | }
398 |
399 | if ((secs->xfrm & 0x3) != 0x3 || (secs->xfrm & ~sgx_xfrm_mask))
400 | return -EINVAL;
401 |
402 | /* Check that BNDREGS and BNDCSR are equal. */
403 | if (((secs->xfrm >> 3) & 1) != ((secs->xfrm >> 4) & 1))
404 | return -EINVAL;
405 |
406 | if (!secs->ssaframesize || ssaframesize > secs->ssaframesize)
407 | return -EINVAL;
408 |
409 | for (i = 0; i < SGX_SECS_RESERVED1_SIZE; i++)
410 | if (secs->reserved1[i])
411 | return -EINVAL;
412 |
413 | for (i = 0; i < SGX_SECS_RESERVED2_SIZE; i++)
414 | if (secs->reserved2[i])
415 | return -EINVAL;
416 |
417 | for (i = 0; i < SGX_SECS_RESERVED3_SIZE; i++)
418 | if (secs->reserved3[i])
419 | return -EINVAL;
420 |
421 | for (i = 0; i < SGX_SECS_RESERVED4_SIZE; i++)
422 | if (secs->reserved4[i])
423 | return -EINVAL;
424 |
425 | return 0;
426 | }
427 |
428 | static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
429 | struct mm_struct *mm)
430 | {
431 | struct sgx_encl *encl =
432 | container_of(mn, struct sgx_encl, mmu_notifier);
433 |
434 | mutex_lock(&encl->lock);
435 | encl->flags |= SGX_ENCL_DEAD;
436 | mutex_unlock(&encl->lock);
437 | }
438 |
439 | static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
440 | .release = sgx_mmu_notifier_release,
441 | };
442 |
443 | int sgx_init_page(struct sgx_encl *encl, struct sgx_encl_page *entry,
444 | unsigned long addr, unsigned int alloc_flags,
445 | struct sgx_epc_page **va_src, bool already_locked)
446 | {
447 | struct sgx_va_page *va_page;
448 | struct sgx_epc_page *epc_page = NULL;
449 | unsigned int va_offset = PAGE_SIZE;
450 | void *vaddr;
451 | int ret = 0;
452 |
453 | list_for_each_entry(va_page, &encl->va_pages, list) {
454 | va_offset = sgx_alloc_va_slot(va_page);
455 | if (va_offset < PAGE_SIZE)
456 | break;
457 | }
458 |
459 | if (va_offset == PAGE_SIZE) {
460 | va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
461 | if (!va_page)
462 | return -ENOMEM;
463 |
464 | if (va_src) {
465 | epc_page = *va_src;
466 | *va_src = NULL;
467 | } else {
468 | epc_page = sgx_alloc_page(alloc_flags);
469 | if (IS_ERR(epc_page)) {
470 | kfree(va_page);
471 | return PTR_ERR(epc_page);
472 | }
473 | }
474 |
475 | vaddr = sgx_get_page(epc_page);
476 | if (!vaddr) {
477 | sgx_warn(encl, "kmap of a new VA page failed %d\n",
478 | ret);
479 | sgx_free_page(epc_page, encl);
480 | kfree(va_page);
481 | return -EFAULT;
482 | }
483 |
484 | ret = __epa(vaddr);
485 | sgx_put_page(vaddr);
486 |
487 | if (ret) {
488 | sgx_warn(encl, "EPA returned %d\n", ret);
489 | sgx_free_page(epc_page, encl);
490 | kfree(va_page);
491 | return -EFAULT;
492 | }
493 |
494 | atomic_inc(&sgx_va_pages_cnt);
495 |
496 | va_page->epc_page = epc_page;
497 | va_offset = sgx_alloc_va_slot(va_page);
498 |
499 | if (!already_locked)
500 | mutex_lock(&encl->lock);
501 | list_add(&va_page->list, &encl->va_pages);
502 | if (!already_locked)
503 | mutex_unlock(&encl->lock);
504 | }
505 |
506 | entry->va_page = va_page;
507 | entry->va_offset = va_offset;
508 | entry->addr = addr;
509 |
510 | return 0;
511 | }
512 |
513 | /**
514 | * sgx_encl_alloc - allocate memory for an enclave and set attributes
515 | *
516 | * @secs: SECS data (must be page aligned)
517 | *
518 | * Allocates a new &struct sgx_encl instance. Validates SECS attributes, creates
519 | * backing storage for the enclave and sets enclave attributes to sane initial
520 | * values.
521 | *
522 | * Return:
523 | * &struct sgx_encl instance on success,
524 | * system error on failure
525 | */
526 | static struct sgx_encl *sgx_encl_alloc(struct sgx_secs *secs)
527 | {
528 | unsigned long ssaframesize;
529 | struct sgx_encl *encl;
530 | struct file *backing;
531 | struct file *pcmd;
532 |
533 | ssaframesize = sgx_calc_ssaframesize(secs->miscselect, secs->xfrm);
534 | if (sgx_validate_secs(secs, ssaframesize))
535 | return ERR_PTR(-EINVAL);
536 |
537 | backing = shmem_file_setup("[dev/sgx]", secs->size + PAGE_SIZE,
538 | VM_NORESERVE);
539 | if (IS_ERR(backing))
540 | return (void *)backing;
541 |
542 | pcmd = shmem_file_setup("[dev/sgx]", (secs->size + PAGE_SIZE) >> 5,
543 | VM_NORESERVE);
544 | if (IS_ERR(pcmd)) {
545 | fput(backing);
546 | return (void *)pcmd;
547 | }
548 |
549 | encl = kzalloc(sizeof(*encl), GFP_KERNEL);
550 | if (!encl) {
551 | fput(backing);
552 | fput(pcmd);
553 | return ERR_PTR(-ENOMEM);
554 | }
555 |
556 | encl->attributes = secs->attributes;
557 | encl->xfrm = secs->xfrm;
558 |
559 | kref_init(&encl->refcount);
560 | INIT_LIST_HEAD(&encl->add_page_reqs);
561 | INIT_LIST_HEAD(&encl->va_pages);
562 | INIT_RADIX_TREE(&encl->page_tree, GFP_KERNEL);
563 | INIT_LIST_HEAD(&encl->load_list);
564 | INIT_LIST_HEAD(&encl->encl_list);
565 | mutex_init(&encl->lock);
566 | INIT_WORK(&encl->add_page_work, sgx_add_page_worker);
567 |
568 | encl->mm = current->mm;
569 | encl->base = secs->base;
570 | encl->size = secs->size;
571 | encl->ssaframesize = secs->ssaframesize;
572 | encl->backing = backing;
573 | encl->pcmd = pcmd;
574 |
575 | return encl;
576 | }
577 |
578 | /**
579 | * sgx_encl_create - create an enclave
580 | *
581 | * @secs: page aligned SECS data
582 | *
583 | * Validates SECS attributes, allocates an EPC page for the SECS and creates
584 | * the enclave by performing ECREATE.
585 | *
586 | * Return:
587 | * 0 on success,
588 | * system error on failure
589 | */
590 | int sgx_encl_create(struct sgx_secs *secs)
591 | {
592 | struct sgx_pageinfo pginfo;
593 | struct sgx_secinfo secinfo;
594 | struct sgx_encl *encl;
595 | struct sgx_epc_page *secs_epc;
596 | struct vm_area_struct *vma;
597 | void *secs_vaddr;
598 | long ret;
599 |
600 | encl = sgx_encl_alloc(secs);
601 | if (IS_ERR(encl))
602 | return PTR_ERR(encl);
603 |
604 | secs_epc = sgx_alloc_page(0);
605 | if (IS_ERR(secs_epc)) {
606 | ret = PTR_ERR(secs_epc);
607 | goto out;
608 | }
609 |
610 | encl->secs.epc_page = secs_epc;
611 |
612 | ret = sgx_add_to_tgid_ctx(encl);
613 | if (ret)
614 | goto out;
615 |
616 | ret = sgx_init_page(encl, &encl->secs, encl->base + encl->size, 0,
617 | NULL, false);
618 | if (ret)
619 | goto out;
620 |
621 | secs_vaddr = sgx_get_page(secs_epc);
622 |
623 | pginfo.srcpge = (unsigned long)secs;
624 | pginfo.linaddr = 0;
625 | pginfo.secinfo = (unsigned long)&secinfo;
626 | pginfo.secs = 0;
627 | memset(&secinfo, 0, sizeof(secinfo));
628 | ret = __ecreate((void *)&pginfo, secs_vaddr);
629 |
630 | sgx_put_page(secs_vaddr);
631 |
632 | if (ret) {
633 | sgx_dbg(encl, "ECREATE returned %ld\n", ret);
634 | ret = -EFAULT;
635 | goto out;
636 | }
637 |
638 | if (secs->attributes & SGX_ATTR_DEBUG)
639 | encl->flags |= SGX_ENCL_DEBUG;
640 |
641 | encl->mmu_notifier.ops = &sgx_mmu_notifier_ops;
642 | ret = mmu_notifier_register(&encl->mmu_notifier, encl->mm);
643 | if (ret) {
644 | if (ret == -EINTR)
645 | ret = -ERESTARTSYS;
646 | encl->mmu_notifier.ops = NULL;
647 | goto out;
648 | }
649 |
650 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
651 | mmap_read_lock(current->mm);
652 | #else
653 | down_read(¤t->mm->mmap_sem);
654 | #endif
655 | ret = sgx_encl_find(current->mm, secs->base, &vma);
656 | if (ret != -ENOENT) {
657 | if (!ret)
658 | ret = -EINVAL;
659 | goto out_locked;
660 | }
661 |
662 | if (vma->vm_start != secs->base ||
663 | vma->vm_end != (secs->base + secs->size)
664 | /* vma->vm_pgoff != 0 */) {
665 | ret = -EINVAL;
666 | goto out_locked;
667 | }
668 |
669 | vma->vm_private_data = encl;
670 |
671 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
672 | mmap_read_unlock(current->mm);
673 | #else
674 | up_read(¤t->mm->mmap_sem);
675 | #endif
676 |
677 | mutex_lock(&sgx_tgid_ctx_mutex);
678 | list_add_tail(&encl->encl_list, &encl->tgid_ctx->encl_list);
679 | mutex_unlock(&sgx_tgid_ctx_mutex);
680 |
681 | return 0;
682 | out_locked:
683 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
684 | mmap_read_unlock(current->mm);
685 | #else
686 | up_read(¤t->mm->mmap_sem);
687 | #endif
688 |
689 | out:
690 | if (encl)
691 | kref_put(&encl->refcount, sgx_encl_release);
692 | return ret;
693 | }
694 |
695 | static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
696 | {
697 | u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
698 | u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
699 | int i;
700 |
701 | if ((secinfo->flags & SGX_SECINFO_RESERVED_MASK) ||
702 | ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R)) ||
703 | (page_type != SGX_SECINFO_TCS &&
704 | page_type != SGX_SECINFO_REG))
705 | return -EINVAL;
706 |
707 | for (i = 0; i < sizeof(secinfo->reserved) / sizeof(u64); i++)
708 | if (secinfo->reserved[i])
709 | return -EINVAL;
710 |
711 | return 0;
712 | }
713 |
714 | static bool sgx_validate_offset(struct sgx_encl *encl, unsigned long offset)
715 | {
716 | if (offset & (PAGE_SIZE - 1))
717 | return false;
718 |
719 | if (offset >= encl->size)
720 | return false;
721 |
722 | return true;
723 | }
724 |
725 | static int sgx_validate_tcs(struct sgx_encl *encl, struct sgx_tcs *tcs)
726 | {
727 | int i;
728 |
729 | if (tcs->flags & SGX_TCS_RESERVED_MASK) {
730 | sgx_dbg(encl, "%s: invalid TCS flags = 0x%lx\n",
731 | __func__, (unsigned long)tcs->flags);
732 | return -EINVAL;
733 | }
734 |
735 | if (tcs->flags & SGX_TCS_DBGOPTIN) {
736 | sgx_dbg(encl, "%s: DBGOPTIN TCS flag is set, EADD will clear it\n",
737 | __func__);
738 | return -EINVAL;
739 | }
740 |
741 | if (!sgx_validate_offset(encl, tcs->ossa)) {
742 | sgx_dbg(encl, "%s: invalid OSSA: 0x%lx\n", __func__,
743 | (unsigned long)tcs->ossa);
744 | return -EINVAL;
745 | }
746 |
747 | if (!sgx_validate_offset(encl, tcs->ofsbase)) {
748 | sgx_dbg(encl, "%s: invalid OFSBASE: 0x%lx\n", __func__,
749 | (unsigned long)tcs->ofsbase);
750 | return -EINVAL;
751 | }
752 |
753 | if (!sgx_validate_offset(encl, tcs->ogsbase)) {
754 | sgx_dbg(encl, "%s: invalid OGSBASE: 0x%lx\n", __func__,
755 | (unsigned long)tcs->ogsbase);
756 | return -EINVAL;
757 | }
758 |
759 | if ((tcs->fslimit & 0xFFF) != 0xFFF) {
760 | sgx_dbg(encl, "%s: invalid FSLIMIT: 0x%x\n", __func__,
761 | tcs->fslimit);
762 | return -EINVAL;
763 | }
764 |
765 | if ((tcs->gslimit & 0xFFF) != 0xFFF) {
766 | sgx_dbg(encl, "%s: invalid GSLIMIT: 0x%x\n", __func__,
767 | tcs->gslimit);
768 | return -EINVAL;
769 | }
770 |
771 | for (i = 0; i < sizeof(tcs->reserved) / sizeof(u64); i++)
772 | if (tcs->reserved[i])
773 | return -EINVAL;
774 |
775 | return 0;
776 | }
777 |
778 | static int __sgx_encl_add_page(struct sgx_encl *encl,
779 | struct sgx_encl_page *encl_page,
780 | unsigned long addr,
781 | void *data,
782 | struct sgx_secinfo *secinfo,
783 | unsigned int mrmask)
784 | {
785 | u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
786 | struct page *backing;
787 | struct sgx_add_page_req *req = NULL;
788 | int ret;
789 | int empty;
790 | void *backing_ptr;
791 |
792 | if (sgx_validate_secinfo(secinfo))
793 | return -EINVAL;
794 |
795 | if (page_type == SGX_SECINFO_TCS) {
796 | ret = sgx_validate_tcs(encl, data);
797 | if (ret)
798 | return ret;
799 | }
800 |
801 | ret = sgx_init_page(encl, encl_page, addr, 0, NULL, false);
802 | if (ret)
803 | return ret;
804 |
805 | mutex_lock(&encl->lock);
806 |
807 | if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD)) {
808 | ret = -EINVAL;
809 | goto out;
810 | }
811 |
812 | if (radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT)) {
813 | ret = -EEXIST;
814 | goto out;
815 | }
816 |
817 | req = kzalloc(sizeof(*req), GFP_KERNEL);
818 | if (!req) {
819 | ret = -ENOMEM;
820 | goto out;
821 | }
822 |
823 | backing = sgx_get_backing(encl, encl_page, false);
824 | if (IS_ERR((void *)backing)) {
825 | ret = PTR_ERR((void *)backing);
826 | goto out;
827 | }
828 |
829 | ret = radix_tree_insert(&encl->page_tree, encl_page->addr >> PAGE_SHIFT,
830 | encl_page);
831 | if (ret) {
832 | sgx_put_backing(backing, false /* write */);
833 | goto out;
834 | }
835 |
836 | backing_ptr = kmap(backing);
837 | memcpy(backing_ptr, data, PAGE_SIZE);
838 | kunmap(backing);
839 |
840 | if (page_type == SGX_SECINFO_TCS)
841 | encl_page->flags |= SGX_ENCL_PAGE_TCS;
842 |
843 | memcpy(&req->secinfo, secinfo, sizeof(*secinfo));
844 |
845 | req->encl = encl;
846 | req->encl_page = encl_page;
847 | req->mrmask = mrmask;
848 | empty = list_empty(&encl->add_page_reqs);
849 | kref_get(&encl->refcount);
850 | list_add_tail(&req->list, &encl->add_page_reqs);
851 | if (empty)
852 | queue_work(sgx_add_page_wq, &encl->add_page_work);
853 |
854 | sgx_put_backing(backing, true /* write */);
855 |
856 | mutex_unlock(&encl->lock);
857 | return 0;
858 | out:
859 | kfree(req);
860 | sgx_free_va_slot(encl_page->va_page, encl_page->va_offset);
861 | mutex_unlock(&encl->lock);
862 | return ret;
863 | }
864 |
865 | /**
866 | * sgx_encl_add_page - add a page to the enclave
867 | *
868 | * @encl: an enclave
869 | * @addr: page address in the ELRANGE
870 | * @data: page data
871 | * @secinfo: page permissions
872 | * @mrmask: bitmask to select the 256 byte chunks to be measured
873 | *
874 | * Creates a new enclave page and enqueues an EADD operation that will be
875 | * processed by a worker thread later on.
876 | *
877 | * Return:
878 | * 0 on success,
879 | * system error on failure
880 | */
881 | int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr, void *data,
882 | struct sgx_secinfo *secinfo, unsigned int mrmask)
883 | {
884 | struct sgx_encl_page *page;
885 | int ret;
886 |
887 | page = kzalloc(sizeof(*page), GFP_KERNEL);
888 | if (!page)
889 | return -ENOMEM;
890 |
891 | ret = __sgx_encl_add_page(encl, page, addr, data, secinfo, mrmask);
892 |
893 | if (ret)
894 | kfree(page);
895 |
896 | return ret;
897 | }
898 |
899 | static int sgx_einit(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
900 | struct sgx_einittoken *token)
901 | {
902 | struct sgx_epc_page *secs_epc = encl->secs.epc_page;
903 | void *secs_va;
904 | int ret;
905 |
906 | secs_va = sgx_get_page(secs_epc);
907 | ret = __einit(sigstruct, token, secs_va);
908 | sgx_put_page(secs_va);
909 |
910 | return ret;
911 | }
912 |
913 | /**
914 | * sgx_encl_init - perform EINIT for the given enclave
915 | *
916 | * @encl: an enclave
917 | * @sigstruct: SIGSTRUCT for the enclave
918 | * @token: EINITTOKEN for the enclave
919 | *
920 | * Retries a few times in order to perform EINIT operation on an enclave
921 | * because there could be potentially an interrupt storm.
922 | *
923 | * Return:
924 | * 0 on success,
925 | * -FAULT on a CPU exception during EINIT,
926 | * SGX error code
927 | */
928 | int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
929 | struct sgx_einittoken *token)
930 | {
931 | int ret;
932 | int i;
933 | int j;
934 |
935 | flush_work(&encl->add_page_work);
936 |
937 | mutex_lock(&encl->lock);
938 |
939 | if (encl->flags & SGX_ENCL_INITIALIZED) {
940 | mutex_unlock(&encl->lock);
941 | return 0;
942 | }
943 |
944 | for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
945 | for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
946 | ret = sgx_einit(encl, sigstruct, token);
947 |
948 | if (ret == SGX_UNMASKED_EVENT)
949 | continue;
950 | else
951 | break;
952 | }
953 |
954 | if (ret != SGX_UNMASKED_EVENT)
955 | break;
956 |
957 | msleep_interruptible(SGX_EINIT_SLEEP_TIME);
958 | if (signal_pending(current)) {
959 | mutex_unlock(&encl->lock);
960 | return -ERESTARTSYS;
961 | }
962 | }
963 |
964 | mutex_unlock(&encl->lock);
965 |
966 | if (ret) {
967 | if (ret > 0)
968 | sgx_dbg(encl, "EINIT returned %d\n", ret);
969 | return ret;
970 | }
971 |
972 | encl->flags |= SGX_ENCL_INITIALIZED;
973 | return 0;
974 | }
975 |
976 | void sgx_encl_release(struct kref *ref)
977 | {
978 | struct sgx_encl_page *entry;
979 | struct sgx_va_page *va_page;
980 | struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
981 | struct radix_tree_iter iter;
982 | void **slot;
983 |
984 | mutex_lock(&sgx_tgid_ctx_mutex);
985 | if (!list_empty(&encl->encl_list))
986 | list_del(&encl->encl_list);
987 | mutex_unlock(&sgx_tgid_ctx_mutex);
988 |
989 | if (encl->mmu_notifier.ops)
990 | mmu_notifier_unregister(&encl->mmu_notifier, encl->mm);
991 |
992 | radix_tree_for_each_slot(slot, &encl->page_tree, &iter, 0) {
993 | entry = *slot;
994 | if (entry->epc_page) {
995 | list_del(&entry->epc_page->list);
996 | sgx_free_page(entry->epc_page, encl);
997 | }
998 | radix_tree_delete(&encl->page_tree, entry->addr >> PAGE_SHIFT);
999 | kfree(entry);
1000 | }
1001 |
1002 | while (!list_empty(&encl->va_pages)) {
1003 | va_page = list_first_entry(&encl->va_pages,
1004 | struct sgx_va_page, list);
1005 | list_del(&va_page->list);
1006 | sgx_free_page(va_page->epc_page, encl);
1007 | kfree(va_page);
1008 | atomic_dec(&sgx_va_pages_cnt);
1009 | }
1010 |
1011 | if (encl->secs.epc_page)
1012 | sgx_free_page(encl->secs.epc_page, encl);
1013 |
1014 | if (encl->tgid_ctx)
1015 | kref_put(&encl->tgid_ctx->refcount, sgx_tgid_ctx_release);
1016 |
1017 | if (encl->backing)
1018 | fput(encl->backing);
1019 |
1020 | if (encl->pcmd)
1021 | fput(encl->pcmd);
1022 |
1023 | kfree(encl);
1024 | }
1025 |
--------------------------------------------------------------------------------
/sgx_encl2.c:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016-2017 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016-2017 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Serge Ayoun
55 | * Angie Chinchilla
56 | * Shay Katz-zamir
57 | * Cedric Xing
58 | */
59 |
60 | #include "sgx.h"
61 | #include
62 | #include
63 |
64 | #define SGX_NR_MOD_CHUNK_PAGES 16
65 |
66 | int sgx_init_page(struct sgx_encl *encl, struct sgx_encl_page *entry,
67 | unsigned long addr, unsigned int alloc_flags,
68 | struct sgx_epc_page **va_src, bool already_locked);
69 | /**
70 | * sgx_encl_augment() - adds a page to an enclave
71 | * @addr: virtual address where the page should be added
72 | *
73 | * the address is checked against the dynamic ranges defined for
74 | * the enclave. If it matches one, a page is added at the
75 | * corresponding location
76 | *
77 | * Note: Invoking function must already hold the encl->lock
78 | */
79 | struct sgx_encl_page *sgx_encl_augment(struct vm_area_struct *vma,
80 | unsigned long addr,
81 | bool write)
82 | {
83 | struct sgx_pageinfo pginfo;
84 | struct sgx_epc_page *epc_page, *va_page = NULL;
85 | struct sgx_epc_page *secs_epc_page = NULL;
86 | struct sgx_encl_page *encl_page;
87 | struct sgx_encl *encl = (struct sgx_encl *) vma->vm_private_data;
88 | void *epc_va;
89 | void *secs_va;
90 | int ret = -EFAULT;
91 |
92 | if (!sgx_has_sgx2)
93 | return ERR_PTR(-EFAULT);
94 |
95 | /* if vma area is not writable then we will not eaug */
96 | if (unlikely(!(vma->vm_flags & VM_WRITE)))
97 | return ERR_PTR(-EFAULT);
98 |
99 | addr &= ~(PAGE_SIZE-1);
100 |
101 | /* Note: Invoking function holds the encl->lock */
102 |
103 | epc_page = sgx_alloc_page(SGX_ALLOC_ATOMIC);
104 | if (IS_ERR(epc_page)) {
105 | return ERR_PTR(PTR_ERR(epc_page));
106 | }
107 |
108 | va_page = sgx_alloc_page(SGX_ALLOC_ATOMIC);
109 | if (IS_ERR(va_page)) {
110 | sgx_free_page(epc_page, encl);
111 | return ERR_PTR(PTR_ERR(va_page));
112 | }
113 |
114 | encl_page = kzalloc(sizeof(struct sgx_encl_page), GFP_KERNEL);
115 | if (!encl_page) {
116 | sgx_free_page(epc_page, encl);
117 | sgx_free_page(va_page, encl);
118 | return ERR_PTR(-EFAULT);
119 | }
120 |
121 | if (!(encl->flags & SGX_ENCL_INITIALIZED))
122 | goto out;
123 |
124 | if (encl->flags & (SGX_ENCL_SUSPEND | SGX_ENCL_DEAD))
125 | goto out;
126 |
127 | /*
128 | if ((rg->rg_desc.flags & SGX_GROW_DOWN_FLAG) && !write)
129 | goto out;
130 | */
131 |
132 | /* Start the augmenting process */
133 | ret = sgx_init_page(encl, encl_page, addr, 0, &va_page, true);
134 | if (ret)
135 | goto out;
136 |
137 | /* If SECS is evicted then reload it first */
138 | /* Same steps as in sgx_do_fault */
139 | if (encl->flags & SGX_ENCL_SECS_EVICTED) {
140 | secs_epc_page = sgx_alloc_page(SGX_ALLOC_ATOMIC);
141 | if (IS_ERR(secs_epc_page)) {
142 | ret = PTR_ERR(secs_epc_page);
143 | secs_epc_page = NULL;
144 | goto out;
145 | }
146 |
147 | ret = sgx_eldu(encl, &encl->secs, secs_epc_page, true);
148 | if (ret)
149 | goto out;
150 |
151 | encl->secs.epc_page = secs_epc_page;
152 | encl->flags &= ~SGX_ENCL_SECS_EVICTED;
153 |
154 | /* Do not free */
155 | secs_epc_page = NULL;
156 | }
157 |
158 | secs_va = sgx_get_page(encl->secs.epc_page);
159 | epc_va = sgx_get_page(epc_page);
160 |
161 | pginfo.srcpge = 0;
162 | pginfo.secinfo = 0;
163 | pginfo.linaddr = addr;
164 | pginfo.secs = (unsigned long) secs_va;
165 |
166 | ret = __eaug(&pginfo, epc_va);
167 | if (ret) {
168 | pr_err("sgx: eaug failure with ret=%d\n", ret);
169 | goto out;
170 | }
171 |
172 | ret = sgx_vm_insert_pfn(vma, encl_page->addr, epc_page->pa);
173 | sgx_put_page(epc_va);
174 | sgx_put_page(secs_va);
175 | if (ret != VM_FAULT_NOPAGE) {
176 | pr_err("sgx: vm_insert_pfn failure with ret=%d\n", ret);
177 | goto out;
178 | }
179 |
180 | epc_page->encl_page = encl_page;
181 | encl_page->epc_page = epc_page;
182 | encl->secs_child_cnt++;
183 |
184 | ret = radix_tree_insert(&encl->page_tree, encl_page->addr >> PAGE_SHIFT,
185 | encl_page);
186 | if (ret) {
187 | pr_err("sgx: radix_tree_insert failed with ret=%d\n", ret);
188 | goto out;
189 | }
190 | sgx_test_and_clear_young(encl_page, encl);
191 | list_add_tail(&encl_page->epc_page->list, &encl->load_list);
192 | encl_page->flags |= SGX_ENCL_PAGE_ADDED;
193 |
194 | if (va_page)
195 | sgx_free_page(va_page, encl);
196 | if (secs_epc_page)
197 | sgx_free_page(secs_epc_page, encl);
198 |
199 | /*
200 | * Write operation corresponds to stack extension
201 | * In this case the #PF is caused by a write operation,
202 | * most probably a push.
203 | * We return SIGBUS such that the OS invokes the enclave's exception
204 | * handler which will execute eaccept.
205 | */
206 | if (write)
207 | return ERR_PTR(-EFAULT);
208 |
209 | return encl_page;
210 |
211 | out:
212 | if (encl_page->va_offset)
213 | sgx_free_va_slot(encl_page->va_page, encl_page->va_offset);
214 | sgx_free_page(epc_page, encl);
215 | if (va_page)
216 | sgx_free_page(va_page, encl);
217 | kfree(encl_page);
218 | if (secs_epc_page)
219 | sgx_free_page(secs_epc_page, encl);
220 |
221 | if ((ret == -EBUSY)||(ret == -ERESTARTSYS))
222 | return ERR_PTR(ret);
223 |
224 | return ERR_PTR(-EFAULT);
225 | }
226 |
227 | static int isolate_range(struct sgx_encl *encl,
228 | struct sgx_range *rg, struct list_head *list)
229 | {
230 | unsigned long address, end;
231 | struct sgx_encl_page *encl_page;
232 | struct vm_area_struct *vma;
233 | int ret;
234 |
235 | address = rg->start_addr;
236 | end = address + rg->nr_pages * PAGE_SIZE;
237 |
238 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
239 | mmap_read_lock(encl->mm);
240 | #else
241 | down_read(&encl->mm->mmap_sem);
242 | #endif
243 |
244 |
245 | for (; address < end; address += PAGE_SIZE) {
246 | ret = sgx_encl_find(encl->mm, address, &vma);
247 | if (ret || encl != vma->vm_private_data) {
248 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
249 | mmap_read_unlock(encl->mm);
250 | #else
251 | up_read(&encl->mm->mmap_sem);
252 | #endif
253 | return -EINVAL;
254 | }
255 |
256 | encl_page = ERR_PTR(-EBUSY);
257 | while (encl_page == ERR_PTR(-EBUSY))
258 | /* bring back page in case it was evicted */
259 | encl_page = sgx_fault_page(vma, address,
260 | SGX_FAULT_RESERVE, NULL);
261 |
262 | if (IS_ERR(encl_page)) {
263 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
264 | mmap_read_unlock(encl->mm);
265 | #else
266 | up_read(&encl->mm->mmap_sem);
267 | #endif
268 | sgx_err(encl, "sgx: No page found at address 0x%lx\n",
269 | address);
270 | return PTR_ERR(encl_page);
271 | }
272 |
273 | /* We do not need the reserved bit anymore as page
274 | * is removed from the load list
275 | */
276 | mutex_lock(&encl->lock);
277 | list_move_tail(&encl_page->epc_page->list, list);
278 | encl_page->flags &= ~SGX_ENCL_PAGE_RESERVED;
279 | mutex_unlock(&encl->lock);
280 | }
281 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
282 | mmap_read_unlock(encl->mm);
283 | #else
284 | up_read(&encl->mm->mmap_sem);
285 | #endif
286 | return 0;
287 | }
288 |
289 | static int __modify_range(struct sgx_encl *encl,
290 | struct sgx_range *rg, struct sgx_secinfo *secinfo)
291 | {
292 | struct sgx_encl_page *encl_page;
293 | struct sgx_epc_page *epc_page, *tmp;
294 | LIST_HEAD(list);
295 | bool emodt = secinfo->flags & (SGX_SECINFO_TRIM | SGX_SECINFO_TCS);
296 | unsigned int epoch = 0;
297 | void *epc_va;
298 | int ret = 0, cnt, status = 0;
299 |
300 | ret = isolate_range(encl, rg, &list);
301 | if (ret)
302 | goto out;
303 |
304 | if (list_empty(&list))
305 | goto out;
306 |
307 | /* EMODT / EMODPR */
308 | list_for_each_entry_safe(epc_page, tmp, &list, list) {
309 | encl_page = epc_page->encl_page;
310 | if (!emodt && (encl_page->flags & SGX_ENCL_PAGE_TCS)) {
311 | sgx_err(encl, "sgx: illegal request: page at\
312 | address=0x%lx is a TCS, req flags=0x%llx\n",
313 | encl_page->addr, secinfo->flags);
314 | ret = -EINVAL;
315 | continue;
316 | }
317 | mutex_lock(&encl->lock);
318 | epc_va = sgx_get_page(epc_page);
319 | status = SGX_LOCKFAIL;
320 | cnt = 0;
321 | while (SGX_LOCKFAIL == status && cnt < SGX_EDMM_SPIN_COUNT) {
322 | if (emodt) {
323 | status = __emodt(secinfo, epc_va);
324 | if (!status)
325 | encl_page->flags |= SGX_ENCL_PAGE_TCS;
326 | } else
327 | status = __emodpr(secinfo, epc_va);
328 | cnt++;
329 | }
330 |
331 | epoch = encl->shadow_epoch;
332 | sgx_put_page(epc_va);
333 | mutex_unlock(&encl->lock);
334 |
335 | if (status) {
336 | sgx_err(encl, "sgx: Page at address=0x%lx \
337 | can't be modified err=%d req flags=0x%llx\n",
338 | encl_page->addr, status, secinfo->flags);
339 | ret = (ret) ? ret : status;
340 | } else {
341 | if (SGX_SECINFO_TRIM == secinfo->flags)
342 | encl_page->flags |= SGX_ENCL_PAGE_TRIM;
343 | }
344 | }
345 |
346 | /* ETRACK */
347 | mutex_lock(&encl->lock);
348 | sgx_etrack(encl, epoch);
349 | mutex_unlock(&encl->lock);
350 |
351 | smp_call_function(sgx_ipi_cb, NULL, 1);
352 |
353 | out:
354 | if (!list_empty(&list)) {
355 | mutex_lock(&encl->lock);
356 | list_splice(&list, &encl->load_list);
357 | mutex_unlock(&encl->lock);
358 | }
359 |
360 | return ret;
361 | }
362 |
363 | long modify_range(struct sgx_range *rg, unsigned long flags)
364 | {
365 | struct sgx_encl *encl;
366 | struct sgx_secinfo secinfo;
367 | struct sgx_range _rg;
368 | unsigned long end = rg->start_addr + rg->nr_pages * PAGE_SIZE;
369 | int ret = 0;
370 |
371 | if (!sgx_has_sgx2)
372 | return -ENOSYS;
373 |
374 | if (rg->start_addr & (PAGE_SIZE - 1))
375 | return -EINVAL;
376 |
377 | if (!rg->nr_pages)
378 | return -EINVAL;
379 |
380 | ret = sgx_get_encl(rg->start_addr, &encl);
381 | if (ret) {
382 | pr_warn("sgx: No enclave found at start addr 0x%lx ret=%d\n",
383 | rg->start_addr, ret);
384 | return ret;
385 | }
386 |
387 | if (end > encl->base + encl->size) {
388 | ret = -EINVAL;
389 | goto out;
390 | }
391 |
392 | memset(&secinfo, 0, sizeof(secinfo));
393 | secinfo.flags = flags;
394 |
395 | /*
396 | * Modifying the range by chunks of 16 pages:
397 | * these pages are removed from the load list. Bigger chunks
398 | * may empty EPC load lists and stall SGX.
399 | */
400 | for (_rg.start_addr = rg->start_addr;
401 | _rg.start_addr < end;
402 | rg->nr_pages -= SGX_NR_MOD_CHUNK_PAGES,
403 | _rg.start_addr += SGX_NR_MOD_CHUNK_PAGES*PAGE_SIZE) {
404 | _rg.nr_pages = rg->nr_pages > 0x10 ? 0x10 : rg->nr_pages;
405 | ret = __modify_range(encl, &_rg, &secinfo);
406 | if (ret)
407 | break;
408 | }
409 |
410 | out:
411 | kref_put(&encl->refcount, sgx_encl_release);
412 | return ret;
413 | }
414 |
415 | int remove_page(struct sgx_encl *encl, unsigned long address, bool trim)
416 | {
417 | struct sgx_encl_page *encl_page;
418 | struct vm_area_struct *vma;
419 | struct sgx_va_page *va_page;
420 | int ret;
421 |
422 | ret = sgx_encl_find(encl->mm, address, &vma);
423 | if (ret || encl != vma->vm_private_data)
424 | return -EINVAL;
425 |
426 | encl_page = sgx_fault_page(vma, address, SGX_FAULT_RESERVE, NULL);
427 | if (IS_ERR(encl_page))
428 | return (PTR_ERR(encl_page) == -EBUSY) ? -EBUSY : -EINVAL;
429 |
430 | if (trim && !(encl_page->flags & SGX_ENCL_PAGE_TRIM)) {
431 | encl_page->flags &= ~SGX_ENCL_PAGE_RESERVED;
432 | return -EINVAL;
433 | }
434 |
435 | if (!(encl_page->flags & SGX_ENCL_PAGE_ADDED)) {
436 | encl_page->flags &= ~SGX_ENCL_PAGE_RESERVED;
437 | return -EINVAL;
438 | }
439 |
440 | mutex_lock(&encl->lock);
441 |
442 | radix_tree_delete(&encl->page_tree, encl_page->addr >> PAGE_SHIFT);
443 | va_page = encl_page->va_page;
444 |
445 | if (va_page) {
446 | sgx_free_va_slot(va_page, encl_page->va_offset);
447 |
448 | if (sgx_va_slots_empty(va_page)) {
449 | list_del(&va_page->list);
450 | sgx_free_page(va_page->epc_page, encl);
451 | kfree(va_page);
452 | }
453 | }
454 |
455 | if (encl_page->epc_page) {
456 | list_del(&encl_page->epc_page->list);
457 | encl_page->epc_page->encl_page = NULL;
458 | zap_vma_ptes(vma, encl_page->addr, PAGE_SIZE);
459 | sgx_free_page(encl_page->epc_page, encl);
460 | encl->secs_child_cnt--;
461 | }
462 |
463 | mutex_unlock(&encl->lock);
464 |
465 | kfree(encl_page);
466 |
467 | return 0;
468 | }
469 |
--------------------------------------------------------------------------------
/sgx_ioctl.c:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016-2017 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016-2017 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Jarkko Sakkinen
55 | * Suresh Siddha
56 | * Serge Ayoun
57 | * Shay Katz-zamir
58 | * Sean Christopherson
59 | */
60 |
61 | #include "sgx.h"
62 | #include
63 | #include
64 | #include
65 | #include
66 | #include
67 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
68 | #include
69 | #else
70 | #include
71 | #endif
72 | #include
73 | #include
74 | #include
75 |
76 | int sgx_get_encl(unsigned long addr, struct sgx_encl **encl)
77 | {
78 | struct mm_struct *mm = current->mm;
79 | struct vm_area_struct *vma;
80 | int ret;
81 |
82 | if (addr & (PAGE_SIZE - 1))
83 | return -EINVAL;
84 |
85 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
86 | mmap_read_lock(mm);
87 | #else
88 | down_read(&mm->mmap_sem);
89 | #endif
90 |
91 | ret = sgx_encl_find(mm, addr, &vma);
92 | if (!ret) {
93 | *encl = vma->vm_private_data;
94 |
95 | if ((*encl)->flags & SGX_ENCL_SUSPEND)
96 | ret = SGX_POWER_LOST_ENCLAVE;
97 | else
98 | kref_get(&(*encl)->refcount);
99 | }
100 |
101 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
102 | mmap_read_unlock(mm);
103 | #else
104 | up_read(&mm->mmap_sem);
105 | #endif
106 |
107 | return ret;
108 | }
109 |
110 | /**
111 | * sgx_ioc_enclave_create - handler for %SGX_IOC_ENCLAVE_CREATE
112 | * @filep: open file to /dev/sgx
113 | * @cmd: the command value
114 | * @arg: pointer to the &struct sgx_enclave_create
115 | *
116 | * Validates SECS attributes, allocates an EPC page for the SECS and performs
117 | * ECREATE.
118 | *
119 | * Return:
120 | * 0 on success,
121 | * system error on failure
122 | */
123 | static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
124 | unsigned long arg)
125 | {
126 | struct sgx_enclave_create *createp = (struct sgx_enclave_create *)arg;
127 | void __user *src = (void __user *)createp->src;
128 | struct sgx_secs *secs;
129 | int ret;
130 |
131 | secs = kzalloc(sizeof(*secs), GFP_KERNEL);
132 | if (!secs)
133 | return -ENOMEM;
134 |
135 | ret = copy_from_user(secs, src, sizeof(*secs));
136 | if (ret) {
137 | kfree(secs);
138 | return ret;
139 | }
140 |
141 | ret = sgx_encl_create(secs);
142 |
143 | kfree(secs);
144 | return ret;
145 | }
146 |
147 | /**
148 | * sgx_ioc_enclave_add_page - handler for %SGX_IOC_ENCLAVE_ADD_PAGE
149 | *
150 | * @filep: open file to /dev/sgx
151 | * @cmd: the command value
152 | * @arg: pointer to the &struct sgx_enclave_add_page
153 | *
154 | * Creates a new enclave page and enqueues an EADD operation that will be
155 | * processed by a worker thread later on.
156 | *
157 | * Return:
158 | * 0 on success,
159 | * system error on failure
160 | */
161 | static long sgx_ioc_enclave_add_page(struct file *filep, unsigned int cmd,
162 | unsigned long arg)
163 | {
164 | struct sgx_enclave_add_page *addp = (void *)arg;
165 | unsigned long secinfop = (unsigned long)addp->secinfo;
166 | struct sgx_secinfo secinfo;
167 | struct sgx_encl *encl;
168 | struct page *data_page;
169 | void *data;
170 | int ret;
171 |
172 | ret = sgx_get_encl(addp->addr, &encl);
173 | if (ret)
174 | return ret;
175 |
176 | if (copy_from_user(&secinfo, (void __user *)secinfop,
177 | sizeof(secinfo))) {
178 | kref_put(&encl->refcount, sgx_encl_release);
179 | return -EFAULT;
180 | }
181 |
182 | data_page = alloc_page(GFP_HIGHUSER);
183 | if (!data_page) {
184 | kref_put(&encl->refcount, sgx_encl_release);
185 | return -ENOMEM;
186 | }
187 |
188 | data = kmap(data_page);
189 |
190 | ret = copy_from_user((void *)data, (void __user *)addp->src, PAGE_SIZE);
191 | if (ret)
192 | goto out;
193 |
194 | ret = sgx_encl_add_page(encl, addp->addr, data, &secinfo, addp->mrmask);
195 | if (ret)
196 | goto out;
197 |
198 | out:
199 | kref_put(&encl->refcount, sgx_encl_release);
200 | kunmap(data_page);
201 | __free_page(data_page);
202 | return ret;
203 | }
204 |
205 | /**
206 | * sgx_ioc_enclave_init - handler for %SGX_IOC_ENCLAVE_INIT
207 | *
208 | * @filep: open file to /dev/sgx
209 | * @cmd: the command value
210 | * @arg: pointer to the &struct sgx_enclave_init
211 | *
212 | * Flushes the remaining enqueued EADD operations and performs EINIT.
213 | *
214 | * Return:
215 | * 0 on success,
216 | * system error on failure
217 | */
218 | static long sgx_ioc_enclave_init(struct file *filep, unsigned int cmd,
219 | unsigned long arg)
220 | {
221 | struct sgx_enclave_init *initp = (struct sgx_enclave_init *)arg;
222 | unsigned long sigstructp = (unsigned long)initp->sigstruct;
223 | unsigned long einittokenp = (unsigned long)initp->einittoken;
224 | unsigned long encl_id = initp->addr;
225 | struct sgx_sigstruct *sigstruct;
226 | struct sgx_einittoken *einittoken;
227 | struct sgx_encl *encl;
228 | struct page *initp_page;
229 | int ret;
230 |
231 | initp_page = alloc_page(GFP_HIGHUSER);
232 | if (!initp_page)
233 | return -ENOMEM;
234 |
235 | sigstruct = kmap(initp_page);
236 | einittoken = (struct sgx_einittoken *)
237 | ((unsigned long)sigstruct + PAGE_SIZE / 2);
238 |
239 | ret = copy_from_user(sigstruct, (void __user *)sigstructp,
240 | sizeof(*sigstruct));
241 | if (ret)
242 | goto out;
243 |
244 | ret = copy_from_user(einittoken, (void __user *)einittokenp,
245 | sizeof(*einittoken));
246 | if (ret)
247 | goto out;
248 |
249 | ret = sgx_get_encl(encl_id, &encl);
250 | if (ret)
251 | goto out;
252 |
253 | ret = sgx_encl_init(encl, sigstruct, einittoken);
254 |
255 | kref_put(&encl->refcount, sgx_encl_release);
256 |
257 | out:
258 | kunmap(initp_page);
259 | __free_page(initp_page);
260 | return ret;
261 | }
262 |
263 | long sgx_ioc_page_modpr(struct file *filep, unsigned int cmd,
264 | unsigned long arg)
265 | {
266 | struct sgx_modification_param *p =
267 | (struct sgx_modification_param *) arg;
268 |
269 | /*
270 | * Only RWX flags in mask are allowed
271 | * Restricting WR w/o RD is not allowed
272 | */
273 | if (p->flags & ~(SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X))
274 | return -EINVAL;
275 | if (!(p->flags & SGX_SECINFO_R) &&
276 | (p->flags & SGX_SECINFO_W))
277 | return -EINVAL;
278 | return modify_range(&p->range, p->flags);
279 | }
280 |
281 | /**
282 | * sgx_ioc_page_to_tcs() - Pages defined in range are switched to TCS.
283 | * These pages should be of type REG.
284 | * eaccept needs to be invoked after return.
285 | * @arg range address of pages to be switched
286 | */
287 | long sgx_ioc_page_to_tcs(struct file *filep, unsigned int cmd,
288 | unsigned long arg)
289 | {
290 | return modify_range((struct sgx_range *)arg, SGX_SECINFO_TCS);
291 | }
292 |
293 | /**
294 | * sgx_ioc_trim_page() - Pages defined in range are being trimmed.
295 | * These pages still belong to the enclave and can not be removed until
296 | * eaccept has been invoked
297 | * @arg range address of pages to be trimmed
298 | */
299 | long sgx_ioc_trim_page(struct file *filep, unsigned int cmd,
300 | unsigned long arg)
301 | {
302 | return modify_range((struct sgx_range *)arg, SGX_SECINFO_TRIM);
303 | }
304 |
305 | /**
306 | * sgx_ioc_page_notify_accept() - Pages defined in range will be moved to
307 | * the trimmed list, i.e. they can be freely removed from now. These pages
308 | * should have PT_TRIM page type and should have been eaccepted priorly
309 | * @arg range address of pages
310 | */
311 | long sgx_ioc_page_notify_accept(struct file *filep, unsigned int cmd,
312 | unsigned long arg)
313 | {
314 | struct sgx_range *rg;
315 | unsigned long address, end;
316 | struct sgx_encl *encl;
317 | int ret, tmp_ret = 0;
318 |
319 | if (!sgx_has_sgx2)
320 | return -ENOSYS;
321 |
322 | rg = (struct sgx_range *)arg;
323 |
324 | address = rg->start_addr;
325 | address &= ~(PAGE_SIZE-1);
326 | end = address + rg->nr_pages * PAGE_SIZE;
327 |
328 | ret = sgx_get_encl(address, &encl);
329 | if (ret) {
330 | pr_warn("sgx: No enclave found at start address 0x%lx\n",
331 | address);
332 | return ret;
333 | }
334 |
335 | for (; address < end; address += PAGE_SIZE) {
336 | tmp_ret = remove_page(encl, address, true);
337 | if (tmp_ret) {
338 | sgx_dbg(encl, "sgx: remove failed, addr=0x%lx ret=%d\n",
339 | address, tmp_ret);
340 | ret = tmp_ret;
341 | continue;
342 | }
343 | }
344 |
345 | kref_put(&encl->refcount, sgx_encl_release);
346 |
347 | return ret;
348 | }
349 |
350 | /**
351 | * sgx_ioc_page_remove() - Pages defined by address will be removed
352 | * @arg address of page
353 | */
354 | long sgx_ioc_page_remove(struct file *filep, unsigned int cmd,
355 | unsigned long arg)
356 | {
357 | struct sgx_encl *encl;
358 | unsigned long address = *((unsigned long *) arg);
359 | int ret;
360 |
361 | if (!sgx_has_sgx2)
362 | return -ENOSYS;
363 |
364 | if (sgx_get_encl(address, &encl)) {
365 | pr_warn("sgx: No enclave found at start address 0x%lx\n",
366 | address);
367 | return -EINVAL;
368 | }
369 |
370 | ret = remove_page(encl, address, false);
371 | if (ret) {
372 | pr_warn("sgx: Failed to remove page, address=0x%lx ret=%d\n",
373 | address, ret);
374 | }
375 |
376 | kref_put(&encl->refcount, sgx_encl_release);
377 | return ret;
378 | }
379 |
380 | typedef long (*sgx_ioc_t)(struct file *filep, unsigned int cmd,
381 | unsigned long arg);
382 |
383 | long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
384 | {
385 | char data[256];
386 | sgx_ioc_t handler = NULL;
387 | long ret;
388 |
389 | switch (cmd) {
390 | case SGX_IOC_ENCLAVE_CREATE:
391 | handler = sgx_ioc_enclave_create;
392 | break;
393 | case SGX_IOC_ENCLAVE_ADD_PAGE:
394 | handler = sgx_ioc_enclave_add_page;
395 | break;
396 | case SGX_IOC_ENCLAVE_INIT:
397 | handler = sgx_ioc_enclave_init;
398 | break;
399 | case SGX_IOC_ENCLAVE_EMODPR:
400 | handler = sgx_ioc_page_modpr;
401 | break;
402 | case SGX_IOC_ENCLAVE_MKTCS:
403 | handler = sgx_ioc_page_to_tcs;
404 | break;
405 | case SGX_IOC_ENCLAVE_TRIM:
406 | handler = sgx_ioc_trim_page;
407 | break;
408 | case SGX_IOC_ENCLAVE_NOTIFY_ACCEPT:
409 | handler = sgx_ioc_page_notify_accept;
410 | break;
411 | case SGX_IOC_ENCLAVE_PAGE_REMOVE:
412 | handler = sgx_ioc_page_remove;
413 | break;
414 | default:
415 | return -ENOIOCTLCMD;
416 | }
417 |
418 | if (copy_from_user(data, (void __user *)arg, _IOC_SIZE(cmd)))
419 | return -EFAULT;
420 |
421 | ret = handler(filep, cmd, (unsigned long)((void *)data));
422 | if (!ret && (cmd & IOC_OUT)) {
423 | if (copy_to_user((void __user *)arg, data, _IOC_SIZE(cmd)))
424 | return -EFAULT;
425 | }
426 |
427 | return ret;
428 | }
429 |
--------------------------------------------------------------------------------
/sgx_main.c:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016-2017 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016-2017 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Jarkko Sakkinen
55 | * Suresh Siddha
56 | * Serge Ayoun
57 | * Shay Katz-zamir
58 | * Sean Christopherson
59 | */
60 |
61 | #include "asm/msr-index.h"
62 | #include "sgx.h"
63 | #include
64 | #include
65 | #include
66 | #include
67 | #include
68 | #include
69 | #include
70 | #include
71 | #include
72 | #include
73 |
74 | #define DRV_DESCRIPTION "Intel SGX Driver"
75 | #define DRV_VERSION "2.11.0"
76 |
77 | #ifndef MSR_IA32_FEAT_CTL
78 | #define MSR_IA32_FEAT_CTL MSR_IA32_FEATURE_CONTROL
79 | #endif
80 |
81 | #ifndef FEAT_CTL_LOCKED
82 | #define FEAT_CTL_LOCKED FEATURE_CONTROL_LOCKED
83 | #endif
84 |
85 | MODULE_DESCRIPTION(DRV_DESCRIPTION);
86 | MODULE_AUTHOR("Jarkko Sakkinen ");
87 | MODULE_VERSION(DRV_VERSION);
88 | #ifndef X86_FEATURE_SGX
89 | #define X86_FEATURE_SGX (9 * 32 + 2)
90 | #endif
91 | #ifndef FEAT_CTL_SGX_ENABLED
92 | #define FEAT_CTL_SGX_ENABLED (1<<18)
93 | #endif
94 | /*
95 | * Global data.
96 | */
97 |
98 | struct workqueue_struct *sgx_add_page_wq;
99 | #define SGX_MAX_EPC_BANKS 8
100 | struct sgx_epc_bank sgx_epc_banks[SGX_MAX_EPC_BANKS];
101 | int sgx_nr_epc_banks;
102 | u64 sgx_encl_size_max_32;
103 | u64 sgx_encl_size_max_64;
104 | u64 sgx_xfrm_mask = 0x3;
105 | u32 sgx_misc_reserved;
106 | u32 sgx_xsave_size_tbl[64];
107 | bool sgx_has_sgx2;
108 |
109 | static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
110 | {
111 | vma->vm_ops = &sgx_vm_ops;
112 | vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO |
113 | VM_DONTCOPY;
114 |
115 | return 0;
116 | }
117 |
118 | static unsigned long sgx_get_unmapped_area(struct file *file,
119 | unsigned long addr,
120 | unsigned long len,
121 | unsigned long pgoff,
122 | unsigned long flags)
123 | {
124 | if (len < 2 * PAGE_SIZE || (len & (len - 1)) || flags & MAP_PRIVATE)
125 | return -EINVAL;
126 |
127 | /* On 64-bit architecture, allow mmap() to exceed 32-bit encl
128 | * limit only if the task is not running in 32-bit compatibility
129 | * mode.
130 | */
131 | if (len > sgx_encl_size_max_32)
132 | #ifdef CONFIG_X86_64
133 | if (test_thread_flag(TIF_ADDR32))
134 | return -EINVAL;
135 | #else
136 | return -EINVAL;
137 | #endif
138 |
139 | #ifdef CONFIG_X86_64
140 | if (len > sgx_encl_size_max_64)
141 | return -EINVAL;
142 | #endif
143 |
144 | addr = current->mm->get_unmapped_area(file, addr, 2 * len, pgoff,
145 | flags);
146 | if (IS_ERR_VALUE(addr))
147 | return addr;
148 |
149 | addr = (addr + (len - 1)) & ~(len - 1);
150 |
151 | return addr;
152 | }
153 |
154 | static const struct file_operations sgx_fops = {
155 | .owner = THIS_MODULE,
156 | .unlocked_ioctl = sgx_ioctl,
157 | #ifdef CONFIG_COMPAT
158 | .compat_ioctl = sgx_ioctl,
159 | #endif
160 | .mmap = sgx_mmap,
161 | .get_unmapped_area = sgx_get_unmapped_area,
162 | };
163 |
164 | static struct miscdevice sgx_dev = {
165 | .minor = MISC_DYNAMIC_MINOR,
166 | .name = "isgx",
167 | .fops = &sgx_fops,
168 | .mode = 0666,
169 | };
170 |
171 | static int sgx_pm_suspend(struct device *dev)
172 | {
173 | struct sgx_tgid_ctx *ctx;
174 | struct sgx_encl *encl;
175 |
176 | list_for_each_entry(ctx, &sgx_tgid_ctx_list, list) {
177 | list_for_each_entry(encl, &ctx->encl_list, encl_list) {
178 | sgx_invalidate(encl, false);
179 | encl->flags |= SGX_ENCL_SUSPEND;
180 | flush_work(&encl->add_page_work);
181 | }
182 | }
183 |
184 | return 0;
185 | }
186 |
187 | static void sgx_reset_pubkey_hash(void *failed)
188 | {
189 | if (wrmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, 0xa6053e051270b7acULL) ||
190 | wrmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH1, 0x6cfbe8ba8b3b413dULL) ||
191 | wrmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH2, 0xc4916d99f2b3735dULL) ||
192 | wrmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH3, 0xd4f8c05909f9bb3bULL))
193 | *(int *)failed = -EIO;
194 | }
195 |
196 | static SIMPLE_DEV_PM_OPS(sgx_drv_pm, sgx_pm_suspend, NULL);
197 |
198 | static int sgx_dev_init(struct device *parent)
199 | {
200 | unsigned int eax, ebx, ecx, edx;
201 | unsigned long pa;
202 | unsigned long size;
203 | int ret;
204 | int i;
205 | int msr_reset_failed = 0;
206 |
207 | pr_info("intel_sgx: " DRV_DESCRIPTION " v" DRV_VERSION "\n");
208 |
209 | cpuid_count(SGX_CPUID, SGX_CPUID_CAPABILITIES, &eax, &ebx, &ecx, &edx);
210 | /* Only allow misc bits supported by the driver. */
211 | sgx_misc_reserved = ~ebx | SGX_MISC_RESERVED_MASK;
212 | #ifdef CONFIG_X86_64
213 | sgx_encl_size_max_64 = 1ULL << ((edx >> 8) & 0xFF);
214 | #endif
215 | sgx_encl_size_max_32 = 1ULL << (edx & 0xFF);
216 |
217 | if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
218 | cpuid_count(SGX_CPUID, SGX_CPUID_ATTRIBUTES, &eax, &ebx, &ecx,
219 | &edx);
220 | sgx_xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
221 |
222 | for (i = 2; i < 64; i++) {
223 | cpuid_count(0x0D, i, &eax, &ebx, &ecx, &edx);
224 | if ((1 << i) & sgx_xfrm_mask)
225 | sgx_xsave_size_tbl[i] = eax + ebx;
226 | }
227 | }
228 |
229 | for (i = 0; i < SGX_MAX_EPC_BANKS; i++) {
230 | cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC_BANKS, &eax, &ebx,
231 | &ecx, &edx);
232 | if (!(eax & 0xf))
233 | break;
234 |
235 | pa = ((u64)(ebx & 0xfffff) << 32) + (u64)(eax & 0xfffff000);
236 | size = ((u64)(edx & 0xfffff) << 32) + (u64)(ecx & 0xfffff000);
237 |
238 | dev_info(parent, "EPC bank 0x%lx-0x%lx\n", pa, pa + size);
239 |
240 | sgx_epc_banks[i].pa = pa;
241 | sgx_epc_banks[i].size = size;
242 | }
243 |
244 | sgx_nr_epc_banks = i;
245 |
246 | for (i = 0; i < sgx_nr_epc_banks; i++) {
247 | #ifdef CONFIG_X86_64
248 | sgx_epc_banks[i].va = (unsigned long)
249 | ioremap_cache(sgx_epc_banks[i].pa,
250 | sgx_epc_banks[i].size);
251 | if (!sgx_epc_banks[i].va) {
252 | sgx_nr_epc_banks = i;
253 | ret = -ENOMEM;
254 | goto out_iounmap;
255 | }
256 | #endif
257 | ret = sgx_add_epc_bank(sgx_epc_banks[i].pa,
258 | sgx_epc_banks[i].size, i);
259 | if (ret) {
260 | sgx_nr_epc_banks = i + 1;
261 | goto out_iounmap;
262 | }
263 | }
264 |
265 | ret = sgx_page_cache_init();
266 | if (ret)
267 | goto out_iounmap;
268 |
269 | sgx_add_page_wq = alloc_workqueue("intel_sgx-add-page-wq",
270 | WQ_UNBOUND | WQ_FREEZABLE, 1);
271 | if (!sgx_add_page_wq) {
272 | pr_err("intel_sgx: alloc_workqueue() failed\n");
273 | ret = -ENOMEM;
274 | goto out_page_cache;
275 | }
276 |
277 | sgx_dev.parent = parent;
278 | ret = misc_register(&sgx_dev);
279 | if (ret) {
280 | pr_err("intel_sgx: misc_register() failed\n");
281 | goto out_workqueue;
282 | }
283 |
284 | on_each_cpu(sgx_reset_pubkey_hash, &msr_reset_failed, 1);
285 | if (msr_reset_failed) {
286 | pr_info("intel_sgx: can not reset SGX LE public key hash MSRs\n");
287 | }
288 |
289 | return 0;
290 | out_workqueue:
291 | destroy_workqueue(sgx_add_page_wq);
292 | out_page_cache:
293 | sgx_page_cache_teardown();
294 | out_iounmap:
295 | #ifdef CONFIG_X86_64
296 | for (i = 0; i < sgx_nr_epc_banks; i++)
297 | iounmap((void *)sgx_epc_banks[i].va);
298 | #endif
299 | return ret;
300 | }
301 |
302 | static atomic_t sgx_init_flag = ATOMIC_INIT(0);
303 | static int sgx_drv_probe(struct platform_device *pdev)
304 | {
305 | unsigned int eax, ebx, ecx, edx;
306 | unsigned long fc;
307 | if (atomic_cmpxchg(&sgx_init_flag, 0, 1)) {
308 | pr_warn("intel_sgx: second initialization call skipped\n");
309 | return 0;
310 | }
311 |
312 |
313 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
314 | return -ENODEV;
315 |
316 | cpuid(7, &eax, &ebx, &ecx, &edx);
317 | if(!((ebx >> 2) & 0x1)){
318 | pr_err("intel_sgx: the CPU is missing SGX\n");
319 | return -ENODEV;
320 | }
321 |
322 | rdmsrl(MSR_IA32_FEAT_CTL, fc);
323 |
324 | if (!(fc & FEAT_CTL_LOCKED)) {
325 | pr_err("intel_sgx: the feature control MSR is not locked\n");
326 | return -ENODEV;
327 | }
328 |
329 | if (!(fc & FEAT_CTL_SGX_ENABLED)) {
330 | pr_err("intel_sgx: SGX is not enabled\n");
331 | return -ENODEV;
332 | }
333 |
334 | cpuid(0, &eax, &ebx, &ecx, &edx);
335 | if (eax < SGX_CPUID) {
336 | pr_err("intel_sgx: CPUID is missing the SGX leaf\n");
337 | return -ENODEV;
338 | }
339 |
340 | cpuid_count(SGX_CPUID, SGX_CPUID_CAPABILITIES, &eax, &ebx, &ecx, &edx);
341 | if (!(eax & 1)) {
342 | pr_err("intel_sgx: CPU does not support the SGX1 instructions\n");
343 | return -ENODEV;
344 | }
345 |
346 | sgx_has_sgx2 = (eax & 2) != 0;
347 |
348 | return sgx_dev_init(&pdev->dev);
349 | }
350 |
351 | static int sgx_drv_remove(struct platform_device *pdev)
352 | {
353 | int i;
354 |
355 | if (!atomic_cmpxchg(&sgx_init_flag, 1, 0)) {
356 | pr_warn("intel_sgx: second release call skipped\n");
357 | return 0;
358 | }
359 |
360 | misc_deregister(&sgx_dev);
361 |
362 | destroy_workqueue(sgx_add_page_wq);
363 | #ifdef CONFIG_X86_64
364 | for (i = 0; i < sgx_nr_epc_banks; i++)
365 | iounmap((void *)sgx_epc_banks[i].va);
366 | #endif
367 | sgx_page_cache_teardown();
368 |
369 | return 0;
370 | }
371 |
372 | #ifdef CONFIG_ACPI
373 | static struct acpi_device_id sgx_device_ids[] = {
374 | {"INT0E0C", 0},
375 | {"", 0},
376 | };
377 | MODULE_DEVICE_TABLE(acpi, sgx_device_ids);
378 | #endif
379 |
380 | static struct platform_driver sgx_drv = {
381 | .probe = sgx_drv_probe,
382 | .remove = sgx_drv_remove,
383 | .driver = {
384 | .name = "intel_sgx",
385 | .pm = &sgx_drv_pm,
386 | .acpi_match_table = ACPI_PTR(sgx_device_ids),
387 | },
388 | };
389 |
390 | static struct platform_device *pdev;
391 | int init_sgx_module(void)
392 | {
393 | platform_driver_register(&sgx_drv);
394 | pdev = platform_device_register_simple("intel_sgx", 0, NULL, 0);
395 | if (IS_ERR(pdev))
396 | pr_err("platform_device_register_simple failed\n");
397 | return 0;
398 | }
399 |
400 | void cleanup_sgx_module(void)
401 | {
402 | dev_set_uevent_suppress(&pdev->dev, true);
403 | platform_device_unregister(pdev);
404 | platform_driver_unregister(&sgx_drv);
405 | }
406 |
407 | module_init(init_sgx_module);
408 | module_exit(cleanup_sgx_module);
409 |
410 | MODULE_LICENSE("Dual BSD/GPL");
411 |
--------------------------------------------------------------------------------
/sgx_page_cache.c:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016-2017 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016-2017 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Jarkko Sakkinen
55 | * Suresh Siddha
56 | * Serge Ayoun
57 | * Shay Katz-zamir
58 | * Sean Christopherson
59 | */
60 |
61 | #include "sgx.h"
62 | #include
63 | #include
64 | #include
65 | #include
66 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
67 | #include
68 | #else
69 | #include
70 | #endif
71 | #include
72 |
73 | #define SGX_NR_LOW_EPC_PAGES_DEFAULT 32
74 | #define SGX_NR_SWAP_CLUSTER_MAX 16
75 |
76 | static LIST_HEAD(sgx_free_list);
77 | static DEFINE_SPINLOCK(sgx_free_list_lock);
78 |
79 | LIST_HEAD(sgx_tgid_ctx_list);
80 | DEFINE_MUTEX(sgx_tgid_ctx_mutex);
81 | atomic_t sgx_va_pages_cnt = ATOMIC_INIT(0);
82 | static unsigned int sgx_nr_total_epc_pages;
83 | static unsigned int sgx_nr_free_pages;
84 | static unsigned int sgx_nr_low_pages = SGX_NR_LOW_EPC_PAGES_DEFAULT;
85 | static unsigned int sgx_nr_high_pages;
86 | static struct task_struct *ksgxswapd_tsk;
87 | static DECLARE_WAIT_QUEUE_HEAD(ksgxswapd_waitq);
88 |
89 | static int sgx_test_and_clear_young_cb(pte_t *ptep,
90 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0))
91 | #if( defined(RHEL_RELEASE_VERSION) && defined(RHEL_RELEASE_CODE))
92 | #if (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(8, 1))
93 | pgtable_t token,
94 | #endif
95 | #else
96 | pgtable_t token,
97 | #endif
98 | #endif
99 | unsigned long addr, void *data)
100 | {
101 | pte_t pte;
102 | int ret;
103 |
104 | ret = pte_young(*ptep);
105 | if (ret) {
106 | pte = pte_mkold(*ptep);
107 | set_pte_at((struct mm_struct *)data, addr, ptep, pte);
108 | }
109 |
110 | return ret;
111 | }
112 |
113 | /**
114 | * sgx_test_and_clear_young() - Test and reset the accessed bit
115 | * @page: enclave EPC page to be tested for recent access
116 | * @encl: enclave which owns @page
117 | *
118 | * Checks the Access (A) bit from the PTE corresponding to the
119 | * enclave page and clears it. Returns 1 if the page has been
120 | * recently accessed and 0 if not.
121 | */
122 | int sgx_test_and_clear_young(struct sgx_encl_page *page, struct sgx_encl *encl)
123 | {
124 | struct vm_area_struct *vma;
125 | int ret;
126 |
127 | ret = sgx_encl_find(encl->mm, page->addr, &vma);
128 | if (ret)
129 | return 0;
130 |
131 | if (encl != vma->vm_private_data)
132 | return 0;
133 |
134 | return apply_to_page_range(vma->vm_mm, page->addr, PAGE_SIZE,
135 | sgx_test_and_clear_young_cb, vma->vm_mm);
136 | }
137 |
138 | static struct sgx_tgid_ctx *sgx_isolate_tgid_ctx(unsigned long nr_to_scan)
139 | {
140 | struct sgx_tgid_ctx *ctx = NULL;
141 | int i;
142 |
143 | mutex_lock(&sgx_tgid_ctx_mutex);
144 |
145 | if (list_empty(&sgx_tgid_ctx_list)) {
146 | mutex_unlock(&sgx_tgid_ctx_mutex);
147 | return NULL;
148 | }
149 |
150 | for (i = 0; i < nr_to_scan; i++) {
151 | /* Peek TGID context from the head. */
152 | ctx = list_first_entry(&sgx_tgid_ctx_list,
153 | struct sgx_tgid_ctx,
154 | list);
155 |
156 | /* Move to the tail so that we do not encounter it in the
157 | * next iteration.
158 | */
159 | list_move_tail(&ctx->list, &sgx_tgid_ctx_list);
160 |
161 | /* Non-empty TGID context? */
162 | if (!list_empty(&ctx->encl_list) &&
163 | kref_get_unless_zero(&ctx->refcount))
164 | break;
165 |
166 | ctx = NULL;
167 | }
168 |
169 | mutex_unlock(&sgx_tgid_ctx_mutex);
170 |
171 | return ctx;
172 | }
173 |
174 | static struct sgx_encl *sgx_isolate_encl(struct sgx_tgid_ctx *ctx,
175 | unsigned long nr_to_scan)
176 | {
177 | struct sgx_encl *encl = NULL;
178 | int i;
179 |
180 | mutex_lock(&sgx_tgid_ctx_mutex);
181 |
182 | if (list_empty(&ctx->encl_list)) {
183 | mutex_unlock(&sgx_tgid_ctx_mutex);
184 | return NULL;
185 | }
186 |
187 | for (i = 0; i < nr_to_scan; i++) {
188 | /* Peek encl from the head. */
189 | encl = list_first_entry(&ctx->encl_list, struct sgx_encl,
190 | encl_list);
191 |
192 | /* Move to the tail so that we do not encounter it in the
193 | * next iteration.
194 | */
195 | list_move_tail(&encl->encl_list, &ctx->encl_list);
196 |
197 | /* Enclave with faulted pages? */
198 | if (!list_empty(&encl->load_list) &&
199 | kref_get_unless_zero(&encl->refcount))
200 | break;
201 |
202 | encl = NULL;
203 | }
204 |
205 | mutex_unlock(&sgx_tgid_ctx_mutex);
206 |
207 | return encl;
208 | }
209 |
210 | static void sgx_isolate_pages(struct sgx_encl *encl,
211 | struct list_head *dst,
212 | unsigned long nr_to_scan)
213 | {
214 | struct sgx_epc_page *entry;
215 | int i;
216 |
217 | mutex_lock(&encl->lock);
218 |
219 | if (encl->flags & SGX_ENCL_DEAD)
220 | goto out;
221 |
222 | for (i = 0; i < nr_to_scan; i++) {
223 | if (list_empty(&encl->load_list))
224 | break;
225 |
226 | entry = list_first_entry(&encl->load_list,
227 | struct sgx_epc_page,
228 | list);
229 |
230 | if (!sgx_test_and_clear_young(entry->encl_page, encl) &&
231 | !(entry->encl_page->flags & SGX_ENCL_PAGE_RESERVED)) {
232 | entry->encl_page->flags |= SGX_ENCL_PAGE_RESERVED;
233 | list_move_tail(&entry->list, dst);
234 | } else {
235 | list_move_tail(&entry->list, &encl->load_list);
236 | }
237 | }
238 | out:
239 | mutex_unlock(&encl->lock);
240 | }
241 |
242 | static int __sgx_ewb(struct sgx_encl *encl,
243 | struct sgx_encl_page *encl_page)
244 | {
245 | struct sgx_pageinfo pginfo;
246 | struct page *backing;
247 | struct page *pcmd;
248 | unsigned long pcmd_offset;
249 | void *epc;
250 | void *va;
251 | int ret;
252 |
253 | pcmd_offset = ((encl_page->addr >> PAGE_SHIFT) & 31) * 128;
254 |
255 | backing = sgx_get_backing(encl, encl_page, false);
256 | if (IS_ERR(backing)) {
257 | ret = PTR_ERR(backing);
258 | sgx_warn(encl, "pinning the backing page for EWB failed with %d\n",
259 | ret);
260 | return ret;
261 | }
262 |
263 | pcmd = sgx_get_backing(encl, encl_page, true);
264 | if (IS_ERR(pcmd)) {
265 | ret = PTR_ERR(pcmd);
266 | sgx_warn(encl, "pinning the pcmd page for EWB failed with %d\n",
267 | ret);
268 | goto out;
269 | }
270 |
271 | epc = sgx_get_page(encl_page->epc_page);
272 | va = sgx_get_page(encl_page->va_page->epc_page);
273 |
274 | pginfo.srcpge = (unsigned long)kmap_atomic(backing);
275 | pginfo.pcmd = (unsigned long)kmap_atomic(pcmd) + pcmd_offset;
276 | pginfo.linaddr = 0;
277 | pginfo.secs = 0;
278 | ret = __ewb(&pginfo, epc,
279 | (void *)((unsigned long)va + encl_page->va_offset));
280 | kunmap_atomic((void *)(unsigned long)(pginfo.pcmd - pcmd_offset));
281 | kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
282 |
283 | sgx_put_page(va);
284 | sgx_put_page(epc);
285 | sgx_put_backing(pcmd, true);
286 |
287 | out:
288 | sgx_put_backing(backing, true);
289 | return ret;
290 | }
291 |
292 | static bool sgx_ewb(struct sgx_encl *encl,
293 | struct sgx_encl_page *entry)
294 | {
295 | int ret = __sgx_ewb(encl, entry);
296 |
297 | if (ret == SGX_NOT_TRACKED) {
298 | /* slow path, IPI needed */
299 | sgx_flush_cpus(encl);
300 | ret = __sgx_ewb(encl, entry);
301 | }
302 |
303 | if (ret) {
304 | /* make enclave inaccessible */
305 | sgx_invalidate(encl, true);
306 | if (ret > 0)
307 | sgx_err(encl, "EWB returned %d, enclave killed\n", ret);
308 | return false;
309 | }
310 |
311 | return true;
312 | }
313 |
314 | static void sgx_evict_page(struct sgx_encl_page *entry,
315 | struct sgx_encl *encl)
316 | {
317 | sgx_ewb(encl, entry);
318 | sgx_free_page(entry->epc_page, encl);
319 | entry->epc_page = NULL;
320 | entry->flags &= ~SGX_ENCL_PAGE_RESERVED;
321 | }
322 |
323 | static void sgx_write_pages(struct sgx_encl *encl, struct list_head *src)
324 | {
325 | struct sgx_epc_page *entry;
326 | struct sgx_epc_page *tmp;
327 | struct vm_area_struct *vma;
328 | int ret;
329 |
330 | if (list_empty(src))
331 | return;
332 |
333 | entry = list_first_entry(src, struct sgx_epc_page, list);
334 |
335 | mutex_lock(&encl->lock);
336 |
337 | /* EBLOCK */
338 | list_for_each_entry_safe(entry, tmp, src, list) {
339 | ret = sgx_encl_find(encl->mm, entry->encl_page->addr, &vma);
340 | if (!ret && encl == vma->vm_private_data)
341 | zap_vma_ptes(vma, entry->encl_page->addr, PAGE_SIZE);
342 |
343 | sgx_eblock(encl, entry);
344 | }
345 |
346 | /* ETRACK */
347 | sgx_etrack(encl, encl->shadow_epoch);
348 |
349 | /* EWB */
350 | while (!list_empty(src)) {
351 | entry = list_first_entry(src, struct sgx_epc_page, list);
352 | list_del(&entry->list);
353 | sgx_evict_page(entry->encl_page, encl);
354 | encl->secs_child_cnt--;
355 | }
356 |
357 | if (!encl->secs_child_cnt && (encl->flags & SGX_ENCL_INITIALIZED)) {
358 | sgx_evict_page(&encl->secs, encl);
359 | encl->flags |= SGX_ENCL_SECS_EVICTED;
360 | }
361 |
362 | mutex_unlock(&encl->lock);
363 | }
364 |
365 | static void sgx_swap_pages(unsigned long nr_to_scan)
366 | {
367 | struct sgx_tgid_ctx *ctx;
368 | struct sgx_encl *encl;
369 | LIST_HEAD(cluster);
370 |
371 | ctx = sgx_isolate_tgid_ctx(nr_to_scan);
372 | if (!ctx)
373 | return;
374 |
375 | encl = sgx_isolate_encl(ctx, nr_to_scan);
376 | if (!encl)
377 | goto out;
378 |
379 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
380 | mmap_read_lock(encl->mm);
381 | #else
382 | down_read(&encl->mm->mmap_sem);
383 | #endif
384 |
385 | sgx_isolate_pages(encl, &cluster, nr_to_scan);
386 | sgx_write_pages(encl, &cluster);
387 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
388 | mmap_read_unlock(encl->mm);
389 | #else
390 | up_read(&encl->mm->mmap_sem);
391 | #endif
392 |
393 | kref_put(&encl->refcount, sgx_encl_release);
394 | out:
395 | kref_put(&ctx->refcount, sgx_tgid_ctx_release);
396 | }
397 |
398 | static int ksgxswapd(void *p)
399 | {
400 | set_freezable();
401 |
402 | while (!kthread_should_stop()) {
403 | if (try_to_freeze())
404 | continue;
405 |
406 | wait_event_freezable(ksgxswapd_waitq,
407 | kthread_should_stop() ||
408 | sgx_nr_free_pages < sgx_nr_high_pages);
409 |
410 | if (sgx_nr_free_pages < sgx_nr_high_pages)
411 | sgx_swap_pages(SGX_NR_SWAP_CLUSTER_MAX);
412 | }
413 |
414 | pr_info("%s: done\n", __func__);
415 | return 0;
416 | }
417 |
418 | int sgx_add_epc_bank(resource_size_t start, unsigned long size, int bank)
419 | {
420 | unsigned long i;
421 | struct sgx_epc_page *new_epc_page, *entry;
422 | struct list_head *parser, *temp;
423 |
424 | for (i = 0; i < size; i += PAGE_SIZE) {
425 | new_epc_page = kzalloc(sizeof(*new_epc_page), GFP_KERNEL);
426 | if (!new_epc_page)
427 | goto err_freelist;
428 | new_epc_page->pa = (start + i) | bank;
429 |
430 | spin_lock(&sgx_free_list_lock);
431 | list_add_tail(&new_epc_page->list, &sgx_free_list);
432 | sgx_nr_total_epc_pages++;
433 | sgx_nr_free_pages++;
434 | spin_unlock(&sgx_free_list_lock);
435 | }
436 |
437 | return 0;
438 | err_freelist:
439 | list_for_each_safe(parser, temp, &sgx_free_list) {
440 | spin_lock(&sgx_free_list_lock);
441 | entry = list_entry(parser, struct sgx_epc_page, list);
442 | list_del(&entry->list);
443 | spin_unlock(&sgx_free_list_lock);
444 | kfree(entry);
445 | }
446 | return -ENOMEM;
447 | }
448 |
449 | int sgx_page_cache_init(void)
450 | {
451 | struct task_struct *tmp;
452 |
453 | sgx_nr_high_pages = 2 * sgx_nr_low_pages;
454 |
455 | tmp = kthread_run(ksgxswapd, NULL, "ksgxswapd");
456 | if (!IS_ERR(tmp))
457 | ksgxswapd_tsk = tmp;
458 | return PTR_ERR_OR_ZERO(tmp);
459 | }
460 |
461 | void sgx_page_cache_teardown(void)
462 | {
463 | struct sgx_epc_page *entry;
464 | struct list_head *parser, *temp;
465 |
466 | if (ksgxswapd_tsk) {
467 | kthread_stop(ksgxswapd_tsk);
468 | ksgxswapd_tsk = NULL;
469 | }
470 |
471 | spin_lock(&sgx_free_list_lock);
472 | list_for_each_safe(parser, temp, &sgx_free_list) {
473 | entry = list_entry(parser, struct sgx_epc_page, list);
474 | list_del(&entry->list);
475 | kfree(entry);
476 | }
477 | spin_unlock(&sgx_free_list_lock);
478 | }
479 |
480 | static struct sgx_epc_page *sgx_alloc_page_fast(void)
481 | {
482 | struct sgx_epc_page *entry = NULL;
483 |
484 | spin_lock(&sgx_free_list_lock);
485 |
486 | if (!list_empty(&sgx_free_list)) {
487 | entry = list_first_entry(&sgx_free_list, struct sgx_epc_page,
488 | list);
489 | list_del(&entry->list);
490 | sgx_nr_free_pages--;
491 | }
492 |
493 | spin_unlock(&sgx_free_list_lock);
494 |
495 | return entry;
496 | }
497 |
498 | /**
499 | * sgx_alloc_page - allocate an EPC page
500 | * @flags: allocation flags
501 | *
502 | * Try to grab a page from the free EPC page list. If there is a free page
503 | * available, it is returned to the caller. If called with SGX_ALLOC_ATOMIC,
504 | * the function will return immediately if the list is empty. Otherwise, it
505 | * will swap pages up until there is a free page available. Before returning
506 | * the low watermark is checked and ksgxswapd is waken up if we are below it.
507 | *
508 | * Return: an EPC page or a system error code
509 | */
510 | struct sgx_epc_page *sgx_alloc_page(unsigned int flags)
511 | {
512 | struct sgx_epc_page *entry;
513 |
514 | for ( ; ; ) {
515 | entry = sgx_alloc_page_fast();
516 | if (entry)
517 | break;
518 |
519 | /* We need at minimum two pages for the #PF handler. */
520 | if (atomic_read(&sgx_va_pages_cnt) >
521 | (sgx_nr_total_epc_pages - 2))
522 | return ERR_PTR(-ENOMEM);
523 |
524 | if (flags & SGX_ALLOC_ATOMIC) {
525 | entry = ERR_PTR(-EBUSY);
526 | break;
527 | }
528 |
529 | if (signal_pending(current)) {
530 | entry = ERR_PTR(-ERESTARTSYS);
531 | break;
532 | }
533 |
534 | sgx_swap_pages(SGX_NR_SWAP_CLUSTER_MAX);
535 | schedule();
536 | }
537 |
538 | if (sgx_nr_free_pages < sgx_nr_low_pages)
539 | wake_up(&ksgxswapd_waitq);
540 |
541 | return entry;
542 | }
543 |
544 | /**
545 | * sgx_free_page - free an EPC page
546 | *
547 | * EREMOVE an EPC page and insert it back to the list of free pages.
548 | * If EREMOVE fails, the error is printed out loud as a critical error.
549 | * It is an indicator of a driver bug if that would happen.
550 | *
551 | * @entry: any EPC page
552 | * @encl: enclave that owns the given EPC page
553 | */
554 | void sgx_free_page(struct sgx_epc_page *entry, struct sgx_encl *encl)
555 | {
556 | void *epc;
557 | int ret;
558 |
559 | epc = sgx_get_page(entry);
560 | ret = __eremove(epc);
561 | sgx_put_page(epc);
562 |
563 | if (ret)
564 | sgx_crit(encl, "EREMOVE returned %d\n", ret);
565 |
566 | spin_lock(&sgx_free_list_lock);
567 | list_add(&entry->list, &sgx_free_list);
568 | sgx_nr_free_pages++;
569 | spin_unlock(&sgx_free_list_lock);
570 | }
571 |
572 | void *sgx_get_page(struct sgx_epc_page *entry)
573 | {
574 | #ifdef CONFIG_X86_32
575 | return kmap_atomic_pfn(PFN_DOWN(entry->pa));
576 | #else
577 | int i = ((entry->pa) & ~PAGE_MASK);
578 |
579 | return (void *)(sgx_epc_banks[i].va +
580 | ((entry->pa & PAGE_MASK) - sgx_epc_banks[i].pa));
581 | #endif
582 | }
583 |
584 | void sgx_put_page(void *epc_page_vaddr)
585 | {
586 | #ifdef CONFIG_X86_32
587 | kunmap_atomic(epc_page_vaddr);
588 | #else
589 | #endif
590 | }
591 |
--------------------------------------------------------------------------------
/sgx_user.h:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Jarkko Sakkinen
55 | * Suresh Siddha
56 | */
57 |
58 | #ifndef _UAPI_ASM_X86_SGX_H
59 | #define _UAPI_ASM_X86_SGX_H
60 |
61 | #include
62 | #include
63 |
64 | #define SGX_MAGIC 0xA4
65 |
66 | #define SGX_IOC_ENCLAVE_CREATE \
67 | _IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
68 | #define SGX_IOC_ENCLAVE_ADD_PAGE \
69 | _IOW(SGX_MAGIC, 0x01, struct sgx_enclave_add_page)
70 | #define SGX_IOC_ENCLAVE_INIT \
71 | _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
72 | #define SGX_IOC_ENCLAVE_EMODPR \
73 | _IOW(SGX_MAGIC, 0x09, struct sgx_modification_param)
74 | #define SGX_IOC_ENCLAVE_MKTCS \
75 | _IOW(SGX_MAGIC, 0x0a, struct sgx_range)
76 | #define SGX_IOC_ENCLAVE_TRIM \
77 | _IOW(SGX_MAGIC, 0x0b, struct sgx_range)
78 | #define SGX_IOC_ENCLAVE_NOTIFY_ACCEPT \
79 | _IOW(SGX_MAGIC, 0x0c, struct sgx_range)
80 | #define SGX_IOC_ENCLAVE_PAGE_REMOVE \
81 | _IOW(SGX_MAGIC, 0x0d, unsigned long)
82 |
83 | /* SGX leaf instruction return values */
84 | #define SGX_SUCCESS 0
85 | #define SGX_INVALID_SIG_STRUCT 1
86 | #define SGX_INVALID_ATTRIBUTE 2
87 | #define SGX_BLKSTATE 3
88 | #define SGX_INVALID_MEASUREMENT 4
89 | #define SGX_NOTBLOCKABLE 5
90 | #define SGX_PG_INVLD 6
91 | #define SGX_LOCKFAIL 7
92 | #define SGX_INVALID_SIGNATURE 8
93 | #define SGX_MAC_COMPARE_FAIL 9
94 | #define SGX_PAGE_NOT_BLOCKED 10
95 | #define SGX_NOT_TRACKED 11
96 | #define SGX_VA_SLOT_OCCUPIED 12
97 | #define SGX_CHILD_PRESENT 13
98 | #define SGX_ENCLAVE_ACT 14
99 | #define SGX_ENTRYEPOCH_LOCKED 15
100 | #define SGX_INVALID_EINITTOKEN 16
101 | #define SGX_PREV_TRK_INCMPL 17
102 | #define SGX_PG_IS_SECS 18
103 | #define SGX_PAGE_NOT_MODIFIABLE 20
104 | #define SGX_INVALID_CPUSVN 32
105 | #define SGX_INVALID_ISVSVN 64
106 | #define SGX_UNMASKED_EVENT 128
107 | #define SGX_INVALID_KEYNAME 256
108 |
109 | /* IOCTL return values */
110 | #define SGX_POWER_LOST_ENCLAVE 0x40000000
111 | #define SGX_LE_ROLLBACK 0x40000001
112 |
113 | /**
114 | * struct sgx_enclave_create - parameter structure for the
115 | * %SGX_IOC_ENCLAVE_CREATE ioctl
116 | * @src: address for the SECS page data
117 | */
118 | struct sgx_enclave_create {
119 | __u64 src;
120 | } __attribute__((__packed__));
121 |
122 | /**
123 | * struct sgx_enclave_add_page - parameter structure for the
124 | * %SGX_IOC_ENCLAVE_ADD_PAGE ioctl
125 | * @addr: address in the ELRANGE
126 | * @src: address for the page data
127 | * @secinfo: address for the SECINFO data
128 | * @mrmask: bitmask for the 256 byte chunks that are to be measured
129 | */
130 | struct sgx_enclave_add_page {
131 | __u64 addr;
132 | __u64 src;
133 | __u64 secinfo;
134 | __u16 mrmask;
135 | } __attribute__((__packed__));
136 |
137 | /**
138 | * struct sgx_enclave_init - parameter structure for the
139 | * %SGX_IOC_ENCLAVE_INIT ioctl
140 | * @addr: address in the ELRANGE
141 | * @sigstruct: address for the page data
142 | * @einittoken: EINITTOKEN
143 | */
144 | struct sgx_enclave_init {
145 | __u64 addr;
146 | __u64 sigstruct;
147 | __u64 einittoken;
148 | } __attribute__((__packed__));
149 |
150 | /*
151 | * SGX2.0 definitions
152 | */
153 |
154 | struct sgx_range {
155 | unsigned long start_addr;
156 | unsigned int nr_pages;
157 | };
158 |
159 | struct sgx_modification_param {
160 | struct sgx_range range;
161 | unsigned long flags;
162 | };
163 |
164 | #endif /* _UAPI_ASM_X86_SGX_H */
165 |
--------------------------------------------------------------------------------
/sgx_util.c:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016-2017 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016-2017 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Jarkko Sakkinen
55 | * Suresh Siddha
56 | * Serge Ayoun
57 | * Shay Katz-zamir
58 | * Sean Christopherson
59 | */
60 |
61 | #include "sgx.h"
62 | #include
63 | #include
64 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
65 | #include
66 | #else
67 | #include
68 | #endif
69 | int sgx_vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, resource_size_t pa)
70 | {
71 | int rc;
72 |
73 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
74 | rc = vmf_insert_pfn(vma, addr, PFN_DOWN(pa));
75 | #else
76 | #if( defined(RHEL_RELEASE_VERSION) && defined(RHEL_RELEASE_CODE))
77 | #if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8, 1))
78 | rc = vmf_insert_pfn(vma, addr, PFN_DOWN(pa));
79 | #else //8.1 or below
80 | rc = vm_insert_pfn(vma, addr, PFN_DOWN(pa));
81 | if (!rc){
82 | rc = VM_FAULT_NOPAGE;
83 | }
84 | #endif
85 | #else
86 | rc = vm_insert_pfn(vma, addr, PFN_DOWN(pa));
87 | if (!rc){
88 | rc = VM_FAULT_NOPAGE;
89 | }
90 | #endif
91 | #endif
92 | return rc;
93 | }
94 |
95 | struct page *sgx_get_backing(struct sgx_encl *encl,
96 | struct sgx_encl_page *entry,
97 | bool pcmd)
98 | {
99 | struct inode *inode;
100 | struct address_space *mapping;
101 | gfp_t gfpmask;
102 | pgoff_t index;
103 |
104 | if (pcmd)
105 | inode = encl->pcmd->f_path.dentry->d_inode;
106 | else
107 | inode = encl->backing->f_path.dentry->d_inode;
108 |
109 | mapping = inode->i_mapping;
110 | gfpmask = mapping_gfp_mask(mapping);
111 |
112 | if (pcmd)
113 | index = (entry->addr - encl->base) >> (PAGE_SHIFT + 5);
114 | else
115 | index = (entry->addr - encl->base) >> PAGE_SHIFT;
116 |
117 | return shmem_read_mapping_page_gfp(mapping, index, gfpmask);
118 | }
119 |
120 | void sgx_put_backing(struct page *backing_page, bool write)
121 | {
122 | if (write)
123 | set_page_dirty(backing_page);
124 |
125 | put_page(backing_page);
126 | }
127 |
128 | void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma)
129 | {
130 | struct sgx_epc_page *tmp;
131 | struct sgx_encl_page *entry;
132 |
133 | list_for_each_entry(tmp, &encl->load_list, list) {
134 | entry = tmp->encl_page;
135 | if ((entry->flags & SGX_ENCL_PAGE_TCS) &&
136 | entry->addr >= vma->vm_start &&
137 | entry->addr < vma->vm_end)
138 | zap_vma_ptes(vma, entry->addr, PAGE_SIZE);
139 | }
140 | }
141 |
142 | void sgx_invalidate(struct sgx_encl *encl, bool flush_cpus)
143 | {
144 | struct vm_area_struct *vma;
145 | unsigned long addr;
146 | int ret;
147 |
148 | for (addr = encl->base; addr < (encl->base + encl->size);
149 | addr = vma->vm_end) {
150 | ret = sgx_encl_find(encl->mm, addr, &vma);
151 | if (!ret && encl == vma->vm_private_data)
152 | sgx_zap_tcs_ptes(encl, vma);
153 | else
154 | break;
155 | }
156 |
157 | encl->flags |= SGX_ENCL_DEAD;
158 |
159 | if (flush_cpus)
160 | sgx_flush_cpus(encl);
161 | }
162 |
163 | void sgx_ipi_cb(void *info)
164 | {
165 | }
166 |
167 | void sgx_flush_cpus(struct sgx_encl *encl)
168 | {
169 | on_each_cpu_mask(mm_cpumask(encl->mm), sgx_ipi_cb, NULL, 1);
170 | }
171 |
172 | int sgx_eldu(struct sgx_encl *encl,
173 | struct sgx_encl_page *encl_page,
174 | struct sgx_epc_page *epc_page,
175 | bool is_secs)
176 | {
177 | struct page *backing;
178 | struct page *pcmd;
179 | unsigned long pcmd_offset;
180 | struct sgx_pageinfo pginfo;
181 | void *secs_ptr = NULL;
182 | void *epc_ptr;
183 | void *va_ptr;
184 | int ret;
185 |
186 | pcmd_offset = ((encl_page->addr >> PAGE_SHIFT) & 31) * 128;
187 |
188 | backing = sgx_get_backing(encl, encl_page, false);
189 | if (IS_ERR(backing)) {
190 | ret = PTR_ERR(backing);
191 | sgx_warn(encl, "pinning the backing page for ELDU failed with %d\n",
192 | ret);
193 | return ret;
194 | }
195 |
196 | pcmd = sgx_get_backing(encl, encl_page, true);
197 | if (IS_ERR(pcmd)) {
198 | ret = PTR_ERR(pcmd);
199 | sgx_warn(encl, "pinning the pcmd page for EWB failed with %d\n",
200 | ret);
201 | goto out;
202 | }
203 |
204 | if (!is_secs)
205 | secs_ptr = sgx_get_page(encl->secs.epc_page);
206 |
207 | epc_ptr = sgx_get_page(epc_page);
208 | va_ptr = sgx_get_page(encl_page->va_page->epc_page);
209 | pginfo.srcpge = (unsigned long)kmap_atomic(backing);
210 | pginfo.pcmd = (unsigned long)kmap_atomic(pcmd) + pcmd_offset;
211 | pginfo.linaddr = is_secs ? 0 : encl_page->addr;
212 | pginfo.secs = (unsigned long)secs_ptr;
213 |
214 | ret = __eldu((unsigned long)&pginfo,
215 | (unsigned long)epc_ptr,
216 | (unsigned long)va_ptr +
217 | encl_page->va_offset);
218 | if (ret) {
219 | sgx_err(encl, "ELDU returned %d\n", ret);
220 | ret = -EFAULT;
221 | }
222 |
223 | kunmap_atomic((void *)(unsigned long)(pginfo.pcmd - pcmd_offset));
224 | kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
225 | sgx_put_page(va_ptr);
226 | sgx_put_page(epc_ptr);
227 |
228 | if (!is_secs)
229 | sgx_put_page(secs_ptr);
230 |
231 | sgx_put_backing(pcmd, false);
232 |
233 | out:
234 | sgx_put_backing(backing, false);
235 | return ret;
236 | }
237 |
238 | static struct sgx_encl_page *sgx_do_fault(struct vm_area_struct *vma,
239 | unsigned long addr,
240 | unsigned int flags,
241 | struct vm_fault *vmf)
242 | {
243 | struct sgx_encl *encl = vma->vm_private_data;
244 | struct sgx_encl_page *entry;
245 | struct sgx_epc_page *epc_page = NULL;
246 | struct sgx_epc_page *secs_epc_page = NULL;
247 | bool reserve = (flags & SGX_FAULT_RESERVE) != 0;
248 | int rc = 0;
249 | bool write = (vmf) ? (FAULT_FLAG_WRITE & vmf->flags) : false;
250 |
251 | /* If process was forked, VMA is still there but vm_private_data is set
252 | * to NULL.
253 | */
254 | if (!encl)
255 | return ERR_PTR(-EFAULT);
256 |
257 | mutex_lock(&encl->lock);
258 |
259 | entry = radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT);
260 | if (vmf && !entry) {
261 | entry = sgx_encl_augment(vma, addr, write);
262 | goto out;
263 | }
264 |
265 | /* No entry found can not happen in 'reloading an evicted page'
266 | * flow.
267 | */
268 | if (!entry) {
269 | rc = -EFAULT;
270 | goto out;
271 | }
272 |
273 | if (encl->flags & SGX_ENCL_DEAD) {
274 | rc = -EFAULT;
275 | goto out;
276 | }
277 |
278 | if (!(encl->flags & SGX_ENCL_INITIALIZED)) {
279 | sgx_dbg(encl, "cannot fault, unitialized\n");
280 | rc = -EFAULT;
281 | goto out;
282 | }
283 |
284 | if (reserve && (entry->flags & SGX_ENCL_PAGE_RESERVED)) {
285 | sgx_dbg(encl, "cannot fault, 0x%p is reserved\n",
286 | (void *)entry->addr);
287 | rc = -EBUSY;
288 | goto out;
289 | }
290 |
291 | /* Legal race condition, page is already faulted. */
292 | if (entry->epc_page) {
293 | if (reserve)
294 | entry->flags |= SGX_ENCL_PAGE_RESERVED;
295 | goto out;
296 | }
297 |
298 | epc_page = sgx_alloc_page(SGX_ALLOC_ATOMIC);
299 | if (IS_ERR(epc_page)) {
300 | rc = PTR_ERR(epc_page);
301 | epc_page = NULL;
302 | goto out;
303 | }
304 |
305 | /* If SECS is evicted then reload it first */
306 | if (encl->flags & SGX_ENCL_SECS_EVICTED) {
307 | secs_epc_page = sgx_alloc_page(SGX_ALLOC_ATOMIC);
308 | if (IS_ERR(secs_epc_page)) {
309 | rc = PTR_ERR(secs_epc_page);
310 | secs_epc_page = NULL;
311 | goto out;
312 | }
313 |
314 | rc = sgx_eldu(encl, &encl->secs, secs_epc_page, true);
315 | if (rc)
316 | goto out;
317 |
318 | encl->secs.epc_page = secs_epc_page;
319 | encl->flags &= ~SGX_ENCL_SECS_EVICTED;
320 |
321 | /* Do not free */
322 | secs_epc_page = NULL;
323 | }
324 |
325 | rc = sgx_eldu(encl, entry, epc_page, false /* is_secs */);
326 | if (rc)
327 | goto out;
328 |
329 | /* Track the EPC page even if vm_insert_pfn fails; we need to ensure
330 | * the EPC page is properly freed and we can't do EREMOVE right away
331 | * because EREMOVE may fail due to an active cpu in the enclave. We
332 | * can't call vm_insert_pfn before sgx_eldu because SKL signals #GP
333 | * instead of #PF if the EPC page is invalid.
334 | */
335 | encl->secs_child_cnt++;
336 |
337 | epc_page->encl_page = entry;
338 | entry->epc_page = epc_page;
339 |
340 | if (reserve)
341 | entry->flags |= SGX_ENCL_PAGE_RESERVED;
342 |
343 | /* Do not free */
344 | epc_page = NULL;
345 | list_add_tail(&entry->epc_page->list, &encl->load_list);
346 | rc = sgx_vm_insert_pfn(vma, entry->addr, entry->epc_page->pa);
347 |
348 | if (rc != VM_FAULT_NOPAGE) {
349 | /* Kill the enclave if vm_insert_pfn fails; failure only occurs
350 | * if there is a driver bug or an unrecoverable issue, e.g. OOM.
351 | */
352 | sgx_crit(encl, "vm_insert_pfn returned %d\n", rc);
353 | sgx_invalidate(encl, true);
354 | goto out;
355 | }
356 |
357 | rc = 0;
358 | sgx_test_and_clear_young(entry, encl);
359 | out:
360 | mutex_unlock(&encl->lock);
361 | if (epc_page)
362 | sgx_free_page(epc_page, encl);
363 | if (secs_epc_page)
364 | sgx_free_page(secs_epc_page, encl);
365 | return rc ? ERR_PTR(rc) : entry;
366 | }
367 |
368 | struct sgx_encl_page *sgx_fault_page(struct vm_area_struct *vma,
369 | unsigned long addr,
370 | unsigned int flags,
371 | struct vm_fault *vmf)
372 | {
373 | struct sgx_encl_page *entry;
374 |
375 | do {
376 | entry = sgx_do_fault(vma, addr, flags, vmf);
377 | if (!(flags & SGX_FAULT_RESERVE))
378 | break;
379 | } while (PTR_ERR(entry) == -EBUSY);
380 |
381 | return entry;
382 | }
383 |
384 | void sgx_eblock(struct sgx_encl *encl, struct sgx_epc_page *epc_page)
385 | {
386 | void *vaddr;
387 | int ret;
388 |
389 | vaddr = sgx_get_page(epc_page);
390 | ret = __eblock((unsigned long)vaddr);
391 | sgx_put_page(vaddr);
392 |
393 | if (ret) {
394 | sgx_crit(encl, "EBLOCK returned %d\n", ret);
395 | sgx_invalidate(encl, true);
396 | }
397 |
398 | }
399 |
400 | void sgx_etrack(struct sgx_encl *encl, unsigned int epoch)
401 | {
402 | void *epc;
403 | int ret;
404 |
405 | /* If someone already called etrack in the meantime */
406 | if (epoch < encl->shadow_epoch)
407 | return;
408 |
409 | epc = sgx_get_page(encl->secs.epc_page);
410 | ret = __etrack(epc);
411 | sgx_put_page(epc);
412 | encl->shadow_epoch++;
413 |
414 | if (ret == SGX_PREV_TRK_INCMPL) {
415 | sgx_dbg(encl, "ETRACK returned %d\n", ret);
416 | smp_call_function(sgx_ipi_cb, NULL, 1);
417 | BUG_ON(__etrack(epc));
418 | } else if (ret) {
419 | sgx_crit(encl, "ETRACK returned %d\n", ret);
420 | sgx_invalidate(encl, true);
421 | }
422 | }
423 |
--------------------------------------------------------------------------------
/sgx_vma.c:
--------------------------------------------------------------------------------
1 | /*
2 | * This file is provided under a dual BSD/GPLv2 license. When using or
3 | * redistributing this file, you may do so under either license.
4 | *
5 | * GPL LICENSE SUMMARY
6 | *
7 | * Copyright(c) 2016 Intel Corporation.
8 | *
9 | * This program is free software; you can redistribute it and/or modify
10 | * it under the terms of version 2 of the GNU General Public License as
11 | * published by the Free Software Foundation.
12 | *
13 | * This program is distributed in the hope that it will be useful, but
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 | * General Public License for more details.
17 | *
18 | * Contact Information:
19 | * Jarkko Sakkinen
20 | * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
21 | *
22 | * BSD LICENSE
23 | *
24 | * Copyright(c) 2016 Intel Corporation.
25 | *
26 | * Redistribution and use in source and binary forms, with or without
27 | * modification, are permitted provided that the following conditions
28 | * are met:
29 | *
30 | * * Redistributions of source code must retain the above copyright
31 | * notice, this list of conditions and the following disclaimer.
32 | * * Redistributions in binary form must reproduce the above copyright
33 | * notice, this list of conditions and the following disclaimer in
34 | * the documentation and/or other materials provided with the
35 | * distribution.
36 | * * Neither the name of Intel Corporation nor the names of its
37 | * contributors may be used to endorse or promote products derived
38 | * from this software without specific prior written permission.
39 | *
40 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | * Authors:
53 | *
54 | * Jarkko Sakkinen
55 | * Suresh Siddha
56 | * Serge Ayoun
57 | * Shay Katz-zamir
58 | * Sean Christopherson
59 | */
60 |
61 | #include "sgx.h"
62 | #include
63 | #include
64 | #include
65 | #include
66 | #include
67 | #include
68 | #include
69 | #include
70 | #include
71 |
72 | static void sgx_vma_open(struct vm_area_struct *vma)
73 | {
74 | struct sgx_encl *encl = vma->vm_private_data;
75 |
76 | if (!encl)
77 | return;
78 |
79 | /* protect from fork */
80 | if (encl->mm != current->mm) {
81 | vma->vm_private_data = NULL;
82 | return;
83 | }
84 |
85 | /* kref cannot underflow because ECREATE ioctl checks that there is only
86 | * one single VMA for the enclave before proceeding.
87 | */
88 | kref_get(&encl->refcount);
89 | }
90 |
91 | static void sgx_vma_close(struct vm_area_struct *vma)
92 | {
93 | struct sgx_encl *encl = vma->vm_private_data;
94 |
95 | if (!encl)
96 | return;
97 |
98 | mutex_lock(&encl->lock);
99 | zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
100 | encl->flags |= SGX_ENCL_DEAD;
101 | mutex_unlock(&encl->lock);
102 | kref_put(&encl->refcount, sgx_encl_release);
103 | }
104 |
105 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
106 | static unsigned int sgx_vma_fault(struct vm_fault *vmf)
107 | {
108 | struct vm_area_struct *vma = vmf->vma;
109 | #else
110 | #if( defined(RHEL_RELEASE_VERSION) && defined(RHEL_RELEASE_CODE))
111 | #if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8, 1))
112 | static unsigned int sgx_vma_fault(struct vm_fault *vmf)
113 | {
114 | struct vm_area_struct *vma = vmf->vma;
115 | #elif (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0))
116 | static int sgx_vma_fault(struct vm_fault *vmf)
117 | {
118 | struct vm_area_struct *vma = vmf->vma;
119 | #else // 7.x
120 | static int sgx_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
121 | {
122 | #endif
123 | #else
124 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
125 | static int sgx_vma_fault(struct vm_fault *vmf)
126 | {
127 | struct vm_area_struct *vma = vmf->vma;
128 | #else
129 | static int sgx_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
130 | {
131 | #endif
132 | #endif
133 | #endif
134 |
135 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
136 | unsigned long addr = (unsigned long)vmf->address;
137 | #else
138 | unsigned long addr = (unsigned long) vmf->virtual_address;
139 | #endif
140 | struct sgx_encl_page *entry;
141 |
142 | entry = sgx_fault_page(vma, addr, 0, vmf);
143 |
144 | if (!IS_ERR(entry) || PTR_ERR(entry) == -EBUSY)
145 | return VM_FAULT_NOPAGE;
146 | else
147 | return VM_FAULT_SIGBUS;
148 | }
149 |
150 | static inline int sgx_vma_access_word(struct sgx_encl *encl,
151 | unsigned long addr,
152 | void *buf,
153 | int len,
154 | int write,
155 | struct sgx_encl_page *encl_page,
156 | int i)
157 | {
158 | char data[sizeof(unsigned long)];
159 | int align, cnt, offset;
160 | void *vaddr;
161 | int ret;
162 |
163 | offset = ((addr + i) & (PAGE_SIZE - 1)) & ~(sizeof(unsigned long) - 1);
164 | align = (addr + i) & (sizeof(unsigned long) - 1);
165 | cnt = sizeof(unsigned long) - align;
166 | cnt = min(cnt, len - i);
167 |
168 | if (write) {
169 | if (encl_page->flags & SGX_ENCL_PAGE_TCS &&
170 | (offset < 8 || (offset + (len - i)) > 16))
171 | return -ECANCELED;
172 |
173 | if (align || (cnt != sizeof(unsigned long))) {
174 | vaddr = sgx_get_page(encl_page->epc_page);
175 | ret = __edbgrd((void *)((unsigned long)vaddr + offset),
176 | (unsigned long *)data);
177 | sgx_put_page(vaddr);
178 | if (ret) {
179 | sgx_dbg(encl, "EDBGRD returned %d\n", ret);
180 | return -EFAULT;
181 | }
182 | }
183 |
184 | memcpy(data + align, buf + i, cnt);
185 | vaddr = sgx_get_page(encl_page->epc_page);
186 | ret = __edbgwr((void *)((unsigned long)vaddr + offset),
187 | (unsigned long *)data);
188 | sgx_put_page(vaddr);
189 | if (ret) {
190 | sgx_dbg(encl, "EDBGWR returned %d\n", ret);
191 | return -EFAULT;
192 | }
193 | } else {
194 | if (encl_page->flags & SGX_ENCL_PAGE_TCS &&
195 | (offset + (len - i)) > 72)
196 | return -ECANCELED;
197 |
198 | vaddr = sgx_get_page(encl_page->epc_page);
199 | ret = __edbgrd((void *)((unsigned long)vaddr + offset),
200 | (unsigned long *)data);
201 | sgx_put_page(vaddr);
202 | if (ret) {
203 | sgx_dbg(encl, "EDBGRD returned %d\n", ret);
204 | return -EFAULT;
205 | }
206 |
207 | memcpy(buf + i, data + align, cnt);
208 | }
209 |
210 | return cnt;
211 | }
212 |
213 | static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
214 | void *buf, int len, int write)
215 | {
216 | struct sgx_encl *encl = vma->vm_private_data;
217 | struct sgx_encl_page *entry = NULL;
218 | const char *op_str = write ? "EDBGWR" : "EDBGRD";
219 | int ret = 0;
220 | int i;
221 |
222 | /* If process was forked, VMA is still there but vm_private_data is set
223 | * to NULL.
224 | */
225 | if (!encl)
226 | return -EFAULT;
227 |
228 | if (!(encl->flags & SGX_ENCL_DEBUG) ||
229 | !(encl->flags & SGX_ENCL_INITIALIZED) ||
230 | (encl->flags & SGX_ENCL_DEAD))
231 | return -EFAULT;
232 |
233 | sgx_dbg(encl, "%s addr=0x%lx, len=%d\n", op_str, addr, len);
234 |
235 | for (i = 0; i < len; i += ret) {
236 | if (!entry || !((addr + i) & (PAGE_SIZE - 1))) {
237 | if (entry)
238 | entry->flags &= ~SGX_ENCL_PAGE_RESERVED;
239 |
240 | entry = sgx_fault_page(vma, (addr + i) & PAGE_MASK,
241 | SGX_FAULT_RESERVE, NULL);
242 | if (IS_ERR(entry)) {
243 | ret = PTR_ERR(entry);
244 | entry = NULL;
245 | break;
246 | }
247 | }
248 |
249 | /* No locks are needed because used fields are immutable after
250 | * intialization.
251 | */
252 | ret = sgx_vma_access_word(encl, addr, buf, len, write,
253 | entry, i);
254 | if (ret < 0)
255 | break;
256 | }
257 |
258 | if (entry)
259 | entry->flags &= ~SGX_ENCL_PAGE_RESERVED;
260 |
261 | return (ret < 0 && ret != -ECANCELED) ? ret : i;
262 | }
263 |
264 | const struct vm_operations_struct sgx_vm_ops = {
265 | .close = sgx_vma_close,
266 | .open = sgx_vma_open,
267 | .fault = sgx_vma_fault,
268 | .access = sgx_vma_access,
269 | };
270 |
--------------------------------------------------------------------------------