├── .dockerignore ├── .gitignore ├── .gitmodules ├── ACKNOWLEDGEMENTS ├── AUTHORS ├── COPYRIGHT ├── README.md ├── docker-compose.yml ├── docker ├── actc │ ├── Dockerfile │ ├── development.sh │ ├── install_modules.sh │ ├── install_prebuilts.sh │ ├── patch_gcc.sh │ └── service │ │ ├── main.py │ │ └── uwsgi.ini ├── development.sh ├── mysql │ └── mysql-setup.sh ├── portal │ ├── Dockerfile │ ├── backends.json │ ├── main.py │ └── uwsgi.ini └── run.sh ├── projects └── .gitkeep └── setup.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | projects 2 | .git 3 | **/Dockerfile* 4 | **/*.pyc 5 | **/*.sw* 6 | **/*.vim 7 | **/*~ 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | modules/anti_debugging 2 | projects 3 | *.sublime* 4 | *swp 5 | *~ 6 | *.vim 7 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "diablo"] 2 | path = modules/diablo 3 | url = https://github.com/csl-ugent/diablo 4 | [submodule "code-mobility"] 5 | path = modules/code_mobility 6 | url = https://github.com/uel-aspire-fp7/code-mobility.git 7 | [submodule "ascl"] 8 | path = modules/ascl 9 | url = https://github.com/uel-aspire-fp7/ascl.git 10 | [submodule "accl"] 11 | path = modules/accl 12 | url = https://github.com/uel-aspire-fp7/accl.git 13 | [submodule "annotation_extractor"] 14 | path = modules/annotation_extractor 15 | url = https://github.com/aspire-fp7/annotation_extractor.git 16 | [submodule "code-guards"] 17 | path = modules/code_guards 18 | url = https://github.com/aspire-fp7/code-guards 19 | [submodule "renewability"] 20 | path = modules/renewability 21 | url = https://github.com/uel-aspire-fp7/renewability 22 | [submodule "remote-attestation"] 23 | path = modules/remote_attestation 24 | url = https://github.com/aspire-fp7/remote-attestation 25 | [submodule "actc"] 26 | path = modules/actc 27 | url = https://github.com/aspire-fp7/actc 28 | -------------------------------------------------------------------------------- /ACKNOWLEDGEMENTS: -------------------------------------------------------------------------------- 1 | The research is supported by the European Union Seventh Framework Programme 2 | (FP7/2007-2013), project ASPIRE (Advanced Software Protection: Integration, 3 | Research, and Exploitation), under grant agreement no. 609734; on-line at 4 | https://aspire-fp7.eu/. 5 | 6 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Bart Coppens 2 | Bert Abrath 3 | -------------------------------------------------------------------------------- /COPYRIGHT: -------------------------------------------------------------------------------- 1 | This is the license for the files in this central framework repository. The 2 | submodules are licensed under their own, different licenses. 3 | 4 | Copyright (c) 2016-2017, Ghent University 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are met: 9 | * Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | * Redistributions in binary form must reproduce the above copyright 12 | notice, this list of conditions and the following disclaimer in the 13 | documentation and/or other materials provided with the distribution. 14 | * Neither the name of Ghent University, nor the 15 | names of its contributors may be used to endorse or promote products 16 | derived from this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL GHENT UNIVERSITY BE LIABLE FOR ANY 22 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # How to use the ASPIRE framework 2 | In this manual, we will explain how to set up the ASPIRE tools and how to use 3 | them to protect a simple application. 4 | 5 | The set up of the open sourced version ASPIRE tools is simple thanks to the use 6 | of the [Docker virtualization framework](https://www.docker.com/). More 7 | specifically, we use [Docker 8 | Compose](https://docs.docker.com/compose/overview/) for running and combining 9 | all the different components. The rest of this manual describes how to set 10 | everything up, and how to apply different ASPIRE protections with the ASPIRE 11 | Compiler Tool Chain (ACTC). 12 | 13 | ## Docker & Setup 14 | We assume that you have already Docker installed on your machine. If not, you 15 | can follow the instructions from the [Docker website](https://www.docker.com/). 16 | At least version 1.13 of Docker Compose is required. 17 | 18 | To set up the ASPIRE framework, you just have to clone this repository which 19 | contains the base Docker file. As all the actual tools are linked into this 20 | repository with git submodules, you'll also have to initialize the git 21 | submodules in addition to cloning this repository. We use a separate script for 22 | this, which will also query you to (optionally) install additional support for 23 | anti_debugging. 24 | 25 | # git clone https://github.com/aspire-fp7/framework/ 26 | # cd framework 27 | # ./setup.sh 28 | 29 | No extra setup is required: all components will automatically be built the at 30 | the first run. Most ASPIRE projects are built from scratch from the source 31 | code. This allows you to immediately start developing and extending any 32 | existing tools, without having to worry about how to build the projects and how 33 | to overwrite the pre-built files. The only down side is that the initial build 34 | takes some time. This process takes about 6 minutes on a decently modern 35 | machine. 36 | 37 | The only files that are not built inside containers are: 38 | 39 | * The patched binary tool chains. These can be rebuilt from source by cloning 40 | Diablo's toolchains repository located at 41 | [https://github.com/csl-ugent/toolchains](https://github.com/csl-ugent/toolchains) 42 | and following the instructions in the `README.MD` file. 43 | * Versions of the OpenSSL, libwebsockets and libcurl libraries that have been 44 | built with Diablo-compatible tool chains. These can be rebuilt from source by 45 | cloning the 46 | [https://github.com/aspire-fp7/3rd_party/](https://github.com/aspire-fp7/3rd_party/) 47 | repository and following the instructions in the `README.MD` file. 48 | 49 | ## Running the ACTC without any protections 50 | During the first run of the ACTC the 'projects' directory is prepared by 51 | checking out the actc-demos repository into it. We will use one of the samples 52 | in this repository for the remainder of this manual. The `projects/` directory 53 | is mapped into the ACTC container as `/projects/`, so this demos repository is 54 | located inside the container at `/projects/actc-demos/`. We will be protecting 55 | the bzip2 compression utility in the repository. As an example, we have already 56 | added some annotations to the `bzip2.c` source file. 57 | 58 | First, we will use the ACTC to build an unprotected bzip2 binary for the ARM 59 | Linux platform. We have provided an ACTC configuration file for this purpose. 60 | You can view this file at 61 | `projects/actc-demos/bzip2-1.0.6/actc/bzip2_linux.json`. This configuration 62 | file instructs the ACTC in such a way that the ACTC will build the binary, but 63 | it will not apply any protections. To run the ACTC with this configuration 64 | file, do the following: 65 | 66 | # ./docker/run.sh -d -f /projects/actc-demos/bzip2-1.0.6/actc/bzip2_linux.json 67 | 68 | This produces a final binary in 69 | `projects/actc-demos/bzip2-1.0.6/actc/build/bzip2_linux/BC05/bzip2`. If you 70 | have an ARM development board, you can copy this file and run it as you would 71 | do any other binary. 72 | 73 | ## Running the ACTC with only offline protections 74 | The ACTC now has applied no protections at all to our binary. As a first step, 75 | we will apply some offline protection techniques. The annotations to instruct 76 | the protection tools which code fragments need to be protected have already 77 | been added in the source code (you can find these by looking for 78 | `_Pragma("ASPIRE begin` and `_Pragma ("ASPIRE end")` in the source files). As 79 | you can see, we have added annotations for *call stack checks*, *binary 80 | obfuscations*, and *code guards*. We only need to enable their application in 81 | the configuration file of the ACTC. 82 | 83 | Annotations can be added and modified at will. Their syntax and semantics are 84 | described in detail in the appendices of the [ASPIRE Framework 85 | report](https://aspire-fp7.eu/sites/default/files/D5.11-ASPIRE-Framework-Report.pdf). 86 | 87 | The configuration file is a JSON file that can be edited easily: 88 | 89 | # vim projects/actc-demos/bzip2-1.0.6/actc/bzip2_linux.json 90 | 91 | Code guards is a technique that consists of a source-to-source transformation 92 | and a binary transformation. First, we enable the source-to-source part. We 93 | search for `"SLP08"`, which is the name of the source step responsible for 94 | this. We see that the `"traverse"` is currently set to `true`, which means that 95 | the action for this step simply copies the files from the input directory to 96 | the output directory, rather than applying the action in this step. Thus, set 97 | this action to `false`. 98 | 99 | Next, we have to edit the configuration where the binary protection techniques 100 | are described. Search for `"BLP04"`, which is the name of the final binary 101 | protection step in the ACTC. In the configuration for this step, we will now 102 | enable all the aforementioned protections: set `"obfuscations"` and 103 | `"call_stack_check"` to `true` (the binary part of the code guards protection 104 | is automatically enabled or disabled depending on how we configured the 105 | `"SLP08"` step). We can now run the ACTC as before: 106 | 107 | # ./docker/run.sh -d -f /projects/actc-demos/bzip2-1.0.6/actc/bzip2_linux.json 108 | 109 | This again produces the protected binary in 110 | `projects/actc-demos/bzip2-1.0.6/actc/build/bzip2_linux/BC05/bzip2`. This 111 | time, you can check the log files in the 112 | `projects/actc-demos/bzip2-1.0.6/actc/build/bzip2_linux/BC05/` directory to 113 | verify that the protections have indeed been applied. For example, the file 114 | `bzip2.diablo.obfuscation.log` contains information of which binary 115 | obfuscations have been applied to which code fragments. 116 | 117 | ## Running the ACTC with code mobility 118 | Now that we have created a version of our binary with offline protections 119 | applied, it is time to apply our first online protection called code mobility. 120 | This will split off binary code fragments from the application, and replace 121 | them with stubs that at run time ask for these code fragments to be downloaded 122 | from a protection server. Only once they are downloaded at run time, are these 123 | code fragments executed. 124 | 125 | The first step is to enable this protection in the JSON configuration. There 126 | are two steps needed to enable this: enabling the technique itself, and 127 | configuring the server. To enable the technique, simply change the value of 128 | `"code_mobility"` to `true` in the `"BLP04"` section of the configuration file. 129 | 130 | To configure the server in the JSON configuration file, go to the 131 | `"COMPILE_ACCL"` section of the configuration, and modify the value of 132 | `"endpoint"` to be the IP of the machine on which your Docker container is 133 | running. 134 | 135 | Again, run the ACTC as before: 136 | 137 | # ./docker/run.sh -d -f /projects/actc-demos/bzip2-1.0.6/actc/bzip2_linux.json 138 | 139 | You can first verify that the code mobility protection was applied by 140 | inspecting the log file called `bzip2.diablo.code_mobility.log`. Next, verify 141 | that mobile code blocks were indeed produced. If you look up at the output of 142 | the ACTC, near the end it will write information to the terminal similar to: 143 | 144 | . SERVER_P20 145 | code mobility /projects/actc-demos/bzip2-1.0.6/actc/build/bzip2_linux/BC05/mobile_blocks 146 | 147 | /opt/code_mobility/deploy_application.sh -a D7846E47BB09D62A2824CA9CF5000AE8 -p 20 -i YOUR_IP_HERE /projects/actc-demos/bzip2-1.0.6/actc/build/bzip2_linux/BC05/mobile_blocks && touch /projects/actc-demos/bzip2-1.0.6/actc/build/bzip2_linux/BC05/mobile_blocks/.p20done 148 | 149 | APPLICATION ID = D7846E47BB09D62A2824CA9CF5000AE8 150 | . SERVER_RENEWABILITY_CREATE 151 | 152 | This indicates that the ACTC is deploying the mobile blocks to the location in 153 | which the code mobility server expects these blocks to be. This location 154 | depends on the `APPLICATION ID` (AID) mentioned in the output of the ACTC: they 155 | are located inside the container at 156 | `/opt/online_backends//code_mobility/00000000/`: 157 | 158 | # docker-compose exec actc bash 159 | root@b343a3897ad4:/projects# 160 | root@b343a3897ad4:/projects# ls /opt/online_backends/D7846E47BB09D62A2824CA9CF5000AE8/code_mobility/00000000/ 161 | mobile_dump_00000000 mobile_dump_00000001 mobile_dump_00000002 mobile_dump_00000003 mobile_dump_00000004 mobile_dump_00000005 mobile_dump_00000006 mobile_dump_00000007 mobile_dump_00000008 mobile_dump_00000009 mobile_dump_0000000a source.txt 162 | 163 | As you can see in the source code, the code mobility annotation was applied to 164 | the `uncompress` function. So if you now copy the protected file to an ARM 165 | board and try to decompress a file, the code mobility protection will be 166 | triggered. You can afterwards verify the logs in the server to see which blocks 167 | were requested: 168 | 169 | # scp projects/actc-demos/bzip2-1.0.6/actc/build/bzip2_linux/BC05/bzip2 user@armboard:. 170 | # ssh user@armboard 171 | user@armboard:~$ dd if=/dev/zero of=./zeroes bs=1M count=10 ; bzip2 zeroes 172 | 10+0 records in 173 | 10+0 records out 174 | 10485760 bytes (10 MB) copied, 0.080404 s, 130 MB/s 175 | user@armboard:~$ ./bzip2 -d zeroes.bz2 176 | user@armboard:~$ logout 177 | # tail /opt/online_backends/code_mobility/mobility_server.log 178 | Fri Nov 25 15:02:24 2016 [Code Mobility Server] Actual revision for app_id D7846E47BB09D62A2824CA9CF5000AE8 is 00000000 179 | 180 | Fri Nov 25 15:02:24 2016 [Code Mobility Server] BLOCK_ID 8 requested 181 | 182 | Fri Nov 25 15:02:24 2016 [Code Mobility Server] BLOCK_ID 8 (filename: /opt/online_backends/D7846E47BB09D62A2824CA9CF5000AE8/code_mobility/00000000/mobile_dump_00000008) is going to be served. 183 | 184 | Fri Nov 25 15:02:24 2016 [Code Mobility Server] BLOCK_ID 8 is 52 bytes long. 185 | 186 | Fri Nov 25 15:02:24 2016 [Code Mobility Server] BLOCK_ID 8 correctly sent to ASPIRE Portal. 187 | 188 | **Warning:** the server ports of the ASPIRE servers should not be firewalled on 189 | your Docker machine. Similarly, if your Docker is running inside a virtual 190 | machine, your virtual machine monitor should forward these ports to the VM 191 | itself. The ports are: port 8088, all ports between 8090 to 8099 (inclusive), 192 | and port 18001. 193 | 194 | ## Running the ACTC with remote attestation 195 | Now that we have protected the application with code mobility, we will enable 196 | another online protection technique called remote attestation. This technique 197 | uses a server to verify the integrity of code fragments during the application 198 | execution. The application connects to the server, which then instructs the 199 | application to send attestations of certain code regions back to the server. 200 | The results of these attestations can be linked to the code mobility protection 201 | technique to stop serving code to applications that have been compromised. 202 | 203 | First, we enable the remote attestation step in the ACTC configuration file. 204 | The name of the step source-to-source part of the remote attestation protection 205 | is named SLP07. So, to enable remote attestation, simply change the 206 | `"excluded"` value in the `"SLP07"` section of the configuration file to 207 | `false`. The binary part of this protection technique is again automatically 208 | enabled and disabled based on the value in `"SLP07"`. 209 | 210 | Again, to build the protected application with the ACTC, just run as before: 211 | 212 | # ./docker/run.sh -d -f /projects/actc-demos/bzip2-1.0.6/actc/bzip2_linux.json 213 | 214 | The output of the ACTC immediately shows that the remote attestation was deployed: 215 | 216 | Add attestator in the DB (nr: 00000000000000000000, name: remote_ra_region, f: 10) 217 | Generating inital 100 prepared data for current attestator (launching extractor) 218 | Attestator inserted with ID: 5 219 | Inserting startup areas in the DB, found 1 areas 220 | Startup area: 0 221 | 222 | **** RA application components deployed on server successfully **** 223 | 224 | To verify that the binary itself indeed connects to the protection server of 225 | the remote attestation technique, and that this server indeed receives valid 226 | attestations, we can again copy the protected binary to a development board, 227 | run it to compress and decompress the file we created earlier to demonstrate 228 | code mobility, and check the attestation logs on the server: 229 | 230 | # scp projects/actc-demos/bzip2-1.0.6/actc/build/bzip2_linux/BC05/bzip2 user@armboard:. 231 | # ssh user@armboard 232 | user@armboard:~$ ./bzip2 zeroes ; ./bzip2 -d zeroes.bz2 233 | [1927760:9055] NOTICE: Initial logging level 7 234 | <... some additional logging information about the connection is displayed ...> 235 | user@armboard:~$ logout 236 | # tail /opt/online_backends/remote_attestation/verifier.log 237 | 03E2A03010E28DBB00E3922A020A02D3 238 | 75E190009DE39330F8E3FE305CE19F00 239 | 00E5933078EB014D9AE1FE2C45E39CCA 240 | 38EB00FFD40A5330AFEB02490AE19220 241 | 9DE30300BCE39CCA06E2A03027E1900A 242 | 03E24F3848E2911A00E58D1010E30D20 243 | 03E2900A0EE3A0DB 244 | (Verifier) Response verification result: SUCCESS 245 | (Verifier) Response verified in = 0.001435 s 246 | (Verifier) Execution finished at: Fri Nov 25 15:55:07 2016 247 | 248 | This shows that the protected application indeed connected to the server, and 249 | that the server sent an annotation request back to this client, and that this 250 | client successfully responded to that request. 251 | 252 | ## Doing development with this Docker container 253 | 254 | When you're playing with the Docker container and want to edit the sources of 255 | one of the tools, it can be handy to have changes made in your host immediately 256 | propagate to the Docker, and vice versa. To make this easier, we have provided 257 | the option of running the container in development mode. This can be started by 258 | running the development script, and then running the ACTC inside the container: 259 | 260 | # ./docker/development.sh 261 | root@b343a3897ad4:/projects# 262 | root@b343a3897ad4:/projects# /opt/ACTC/actc.py -d -f /projects/actc-demos/bzip2-1.0.6/actc/bzip2_linux.json 263 | 264 | The development script works by setting up Docker volumes volumes in 265 | `/opt/development` that refer to the directories of all the tools on your host. 266 | `/opt/framework` is then updated to be a symlink to `/opt/development`, and 267 | Diablo is built from (development) source. This build happens on a named volume 268 | (located at /build in the Docker) so that it has to happen only once, and 269 | incremental builds are easy. 270 | 271 | ## Further reading 272 | 273 | These are some documents describing parts of the framework and its components in more detail: 274 | 275 | * [Reference architecture](https://aspire-fp7.eu/sites/default/files/D1.04-ASPIRE-Reference-Architecture-v2.1.pdf) Describes the architectural design of the ASPIRE protections and the communication logic for the client and server components of the protections. 276 | * [ASPIRE Framework report](https://aspire-fp7.eu/sites/default/files/D5.11-ASPIRE-Framework-Report.pdf) Documents the ASPIRE tool chain and decision support components in exhaustive detail. This document furthermore documents the supported protection annotations. 277 | * [ASPIRE Open Source Manual](https://aspire-fp7.eu/sites/default/files/D5.13-ASPIRE-Open-Source-Manual.pdf) Contains manuals for the framework (which is an older version of this README), and documentation of the ADSS Full. 278 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | actc: 4 | build: 5 | context: . 6 | dockerfile: ./docker/actc/Dockerfile 7 | expose: 8 | - "80" 9 | links: 10 | - mysql 11 | - portal 12 | - ra_manager 13 | - renewability_manager 14 | security_opt: 15 | - seccomp=unconfined # Allow gdb debugging 16 | volumes: 17 | - "./projects:/projects" 18 | - "build:/build" 19 | - "online_backends:/opt/online_backends" 20 | - "root_home:/root" 21 | working_dir: "/projects" 22 | mysql: 23 | environment: 24 | - MYSQL_ROOT_PASSWORD=aspire 25 | image: mysql:5.5 26 | expose: 27 | - "3306" 28 | # Set up databases. 29 | volumes: 30 | - "./modules/remote_attestation/setup/:/docker-entrypoint-initdb.d/remote_attestation" 31 | - "./modules/renewability/setup/:/docker-entrypoint-initdb.d/renewability" 32 | - "./docker/mysql/mysql-setup.sh:/docker-entrypoint-initdb.d/setup.sh" 33 | portal: 34 | build: 35 | context: . 36 | dockerfile: ./docker/portal/Dockerfile 37 | links: 38 | - mysql 39 | ports: 40 | - "8088:80" 41 | volumes: 42 | - "online_backends:/opt/online_backends" 43 | ra_manager: 44 | build: 45 | context: . 46 | dockerfile: ./modules/remote_attestation/Dockerfile 47 | links: 48 | - mysql 49 | ports: 50 | - "8090-8099:8090-8099" 51 | volumes: 52 | - "online_backends:/opt/online_backends" 53 | renewability_manager: 54 | build: 55 | context: . 56 | dockerfile: ./modules/renewability/Dockerfile 57 | links: 58 | - mysql 59 | ports: 60 | - "18001:18001" 61 | volumes: 62 | - "online_backends:/opt/online_backends" 63 | volumes: 64 | build: 65 | online_backends: 66 | root_home: 67 | -------------------------------------------------------------------------------- /docker/actc/Dockerfile: -------------------------------------------------------------------------------- 1 | # Build Diablo 2 | FROM ubuntu:16.04 as diablo-builder 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | 5 | # Install the required packages 6 | RUN \ 7 | apt-get update && \ 8 | apt-get install -y bison build-essential cmake flex g++-multilib 9 | 10 | COPY modules/diablo /tmp/diablo/ 11 | RUN \ 12 | mkdir -p /tmp/diablo/build/ && \ 13 | cd /tmp/diablo/build/ && \ 14 | cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/diablo -DUseInstallPrefixForLinkerScripts=on .. && \ 15 | make -j$(nproc) install 16 | 17 | # Actual docker image 18 | FROM debian:stretch 19 | ARG DEBIAN_FRONTEND=noninteractive 20 | 21 | RUN \ 22 | # The i386, and installs of binutils-multiarch gcc-multilib zlib1g:i386 are workarounds for the 32 bit Android toolchain 23 | dpkg --add-architecture i386 && \ 24 | sed -i 's/# \(.*multiverse$\)/\1/g' /etc/apt/sources.list && \ 25 | apt-get update && \ 26 | apt-get install -y htop man ssh vim wget && \ 27 | apt-get install -y binutils-multiarch gcc-multilib gfortran zlib1g:i386 && \ 28 | # ACTC \ 29 | apt-get install -y binutils-dev default-libmysqlclient-dev libwebsockets-dev mysql-client openjdk-8-jre-headless python python-pip && \ 30 | pip install doit==0.29.0 && \ 31 | # UWSGI interface \ 32 | apt-get install -y uwsgi-plugin-python && \ 33 | # Development \ 34 | apt-get install -y bison cmake flex g++-multilib gdb 35 | 36 | # Install the prebuilts 37 | COPY docker/actc/install_prebuilts.sh docker/actc/patch_gcc.sh /tmp/ 38 | RUN /tmp/install_prebuilts.sh 39 | 40 | # Install Diablo 41 | COPY --from=diablo-builder /opt/diablo /opt/diablo 42 | 43 | # Copy the modules and install them 44 | RUN mkdir -p /opt/framework_buildtime && ln -s /opt/framework_buildtime /opt/framework 45 | COPY modules/ /opt/framework/ 46 | COPY docker/actc/install_modules.sh /tmp/ 47 | RUN /tmp/install_modules.sh 48 | 49 | # Install the UWSGI service 50 | COPY docker/actc/service/ /opt/service 51 | CMD [ "uwsgi", "--http-socket", ":80", "--ini", "/opt/service/uwsgi.ini" ] 52 | 53 | # Clean up 54 | RUN rm -rf /tmp/* 55 | -------------------------------------------------------------------------------- /docker/actc/development.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | set -o pipefail 4 | set -o nounset 5 | 6 | # Save PWD 7 | OLD_PWD=$PWD 8 | 9 | if [ ! -d /opt/development ] 10 | then 11 | echo "/opt/development needs to be mounted!" 12 | exit -1 13 | fi 14 | 15 | # Replace the /opt/framework link so we get all source code from the mounts 16 | rm /opt/framework 17 | ln -s /opt/development /opt/framework 18 | 19 | # Build diablo 20 | if [ ! -d /build/diablo ]; then 21 | mkdir -p /build/diablo 22 | cd /build/diablo 23 | cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=/opt/diablo -DUseInstallPrefixForLinkerScripts=on /opt/framework/diablo 24 | make -j$(nproc) install 25 | fi 26 | 27 | # Start the actual shell 28 | cd $OLD_PWD 29 | bash 30 | -------------------------------------------------------------------------------- /docker/actc/install_modules.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | set -o pipefail 4 | set -o nounset 5 | #set -o xtrace 6 | 7 | diablo_selfprofiling() { 8 | echo "Building Diablo-Selfprofiling..." 9 | /opt/framework/diablo/build_obj.sh /opt/diablo/obj 10 | } 11 | 12 | # Set up the symlinks for the modules that don't require anything special. 13 | setup_symlinks() { 14 | echo "Setting up symlinks..." 15 | ln -s /opt/framework/actc/src/ /opt/ACTC 16 | ln -s /opt/framework/annotation_extractor /opt/annotation_extractor 17 | ln -s /opt/framework/code_guards /opt/codeguard 18 | } 19 | 20 | accl() { 21 | echo " Building ACCL..." 22 | /opt/framework/accl/build.sh /opt/ACCL 23 | } 24 | 25 | anti_debugging() { 26 | echo "Building anti_debugging..." 27 | /opt/framework/anti_debugging/build.sh /opt/anti_debugging 28 | } 29 | 30 | code_mobility() { 31 | echo "Building code mobility..." 32 | /opt/framework/code_mobility/build.sh /opt/code_mobility 33 | } 34 | 35 | renewability() { 36 | echo "Building renewability..." 37 | /opt/framework/renewability/build.sh /opt/renewability 38 | } 39 | 40 | remote_attestation() { 41 | echo "Building remote attestation..." 42 | /opt/framework/remote_attestation/build.sh /opt/remote_attestation 43 | } 44 | 45 | setup_symlinks 46 | 47 | [ -d /opt/framework/anti_debugging ] && anti_debugging 48 | diablo_selfprofiling 49 | accl 50 | code_mobility 51 | renewability 52 | remote_attestation 53 | -------------------------------------------------------------------------------- /docker/actc/install_prebuilts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | set -o pipefail 4 | set -o nounset 5 | #set -o xtrace 6 | 7 | toolchains() { 8 | echo "Installing toolchains..." 9 | 10 | if [ ! -f /opt/diablo-gcc-toolchain ] 11 | then 12 | wget -O /tmp/linux-gcc-4.8.1.tar.bz2 https://diablo.elis.ugent.be/sites/diablo/files/toolchains/diablo-binutils-2.23.2-gcc-4.8.1-eglibc-2.17.tar.bz2 && \ 13 | mkdir -p /opt/diablo-gcc-toolchain && \ 14 | cd /opt/diablo-gcc-toolchain && \ 15 | tar xvf /tmp/linux-gcc-4.8.1.tar.bz2 && \ 16 | /tmp/patch_gcc.sh /opt/diablo-gcc-toolchain/ 17 | fi 18 | 19 | if [ ! -f /opt/diablo-android-gcc-toolchain ] 20 | then 21 | wget -O /tmp/android-gcc-4.8.tar.bz2 https://diablo.elis.ugent.be/sites/diablo/files/toolchains/diablo-binutils-2.23.2-gcc-4.8.1-android-API-18.tar.bz2 && \ 22 | mkdir -p /opt/diablo-android-gcc-toolchain && \ 23 | cd /opt/diablo-android-gcc-toolchain && \ 24 | tar xvf /tmp/android-gcc-4.8.tar.bz2 && \ 25 | /tmp/patch_gcc.sh /opt/diablo-android-gcc-toolchain/ 26 | fi 27 | } 28 | 29 | thirdparty() { 30 | echo "Installing third-party libraries..." 31 | 32 | mkdir -p /opt/3rd_party 33 | cd /opt/3rd_party 34 | wget -P /tmp/ https://diablo.elis.ugent.be/sites/diablo/files/prebuilt/curl-7.45.0-prebuilt.tar.bz2 35 | wget -P /tmp/ https://diablo.elis.ugent.be/sites/diablo/files/prebuilt/libwebsockets-1.5-prebuilt.tar.bz2 36 | wget -P /tmp/ https://diablo.elis.ugent.be/sites/diablo/files/prebuilt/openssl-1.0.2d-prebuilt.tar.bz2 37 | tar xvf /tmp/curl-7.45.0-prebuilt.tar.bz2 38 | tar xvf /tmp/libwebsockets-1.5-prebuilt.tar.bz2 39 | tar xvf /tmp/openssl-1.0.2d-prebuilt.tar.bz2 40 | } 41 | 42 | toolchains 43 | thirdparty 44 | -------------------------------------------------------------------------------- /docker/actc/patch_gcc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -u -e 4 | NEWPATH=$1 5 | 6 | for f in `grep -lr "DIABLO_TOOLCHAIN_PATH" .` 7 | do 8 | # only process text files 9 | if [ -n "`file $f | grep text`" ]; then 10 | echo "Patching file $f" 11 | 12 | sed -i "s:DIABLO_TOOLCHAIN_PATH:${NEWPATH}:g" $f 13 | fi 14 | done 15 | -------------------------------------------------------------------------------- /docker/actc/service/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | def response(start_response, status, message): 5 | response_status = status 6 | response_body = message 7 | response_headers = [('Content-Length', str(len(response_body)))] 8 | 9 | start_response(response_status, response_headers) 10 | return [response_body]; 11 | 12 | def application(env, start_response): 13 | path = env['PATH_INFO'].split(os.sep) 14 | request_type = path[1] 15 | if not request_type: 16 | return response(start_response, '400 Bad Request', 'No request type specified!') 17 | 18 | if request_type == 'renewability': 19 | # Get the path to the renewability script and validate it 20 | rs_path = os.path.join(os.sep, *path[2:-1]) 21 | name = os.path.basename(rs_path) 22 | if not name.startswith('generate_') or not name.endswith('.sh'): 23 | return response(start_response, '400 Bad Request', 'Invalid renewability script requested.') 24 | 25 | # Get the seed and validate it 26 | seed = path[-1] 27 | if not seed.isdigit(): 28 | return response(start_response, '400 Bad Request', 'Invalid seed for renewability script.') 29 | 30 | # Execute the script, and check its return code 31 | if subprocess.call([rs_path, seed]): 32 | return response(start_response, '500 Internal Server Error', 'Renewability script did not execute successfully.') 33 | 34 | return response(start_response, '200 OK', 'Script successfully executed.') 35 | else: 36 | return response(start_response, '400 Bad Request', 'Unknown request type.') 37 | -------------------------------------------------------------------------------- /docker/actc/service/uwsgi.ini: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | logger = file:/opt/online_backends/actc.log 3 | master = true 4 | plugins = python,logfile 5 | processes = 10 6 | uid = root 7 | wsgi-file = /opt/service/main.py 8 | -------------------------------------------------------------------------------- /docker/development.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | set -o pipefail 4 | set -o nounset 5 | 6 | ADDITIONALVOLUMES="" 7 | add_volume() { 8 | vol=$1 9 | export ADDITIONALVOLUMES="$ADDITIONALVOLUMES -v ${PWD}/${vol}:/opt/development/${vol}" 10 | } 11 | 12 | # Add a volume for every module 13 | cd modules 14 | for module in $(ls); 15 | do 16 | add_volume $module 17 | done 18 | cd .. 19 | 20 | # Add a volume for docker 21 | add_volume docker 22 | 23 | # Start the ACTC container with extra volumes, then enter the development shell. 24 | container_name=$(docker-compose run -d ${ADDITIONALVOLUMES} actc) 25 | docker exec --interactive --tty ${container_name} /opt/development/docker/actc/development.sh 26 | docker stop ${container_name} > /dev/null 27 | -------------------------------------------------------------------------------- /docker/mysql/mysql-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Kind of hacky script: We're supposed to put DB setup scripts into the /docker-entrypoint-initdb.d/ 3 | # directory, but as these scripts aren't actually executed, but sourced, we have some issues with 4 | # them finding the extra files they depend on (such as SQL scripts). Therefore we use this intermediate 5 | # script to do the actual invoking of setup scripts. 6 | 7 | cd /docker-entrypoint-initdb.d/ 8 | 9 | for f in $(ls); do 10 | [ -f $f/database_setup.sh ] && $f/database_setup.sh || true 11 | done 12 | -------------------------------------------------------------------------------- /docker/portal/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uwsgi-nginx:python2.7 2 | ARG DEBIAN_FRONTEND=noninteractive 3 | 4 | RUN apt-get update && apt-get install -y default-libmysqlclient-dev 5 | 6 | # Build code mobility components 7 | COPY modules/code_mobility /tmp/code_mobility 8 | RUN /tmp/code_mobility/build_portal.sh /opt/code_mobility 9 | 10 | # Build remote attestation components 11 | COPY modules/remote_attestation /tmp/remote_attestation 12 | RUN /tmp/remote_attestation/build_portal.sh /opt/remote_attestation 13 | 14 | # Install the app 15 | COPY docker/portal/ /app 16 | 17 | # Clean up 18 | RUN rm -rf /tmp/* 19 | -------------------------------------------------------------------------------- /docker/portal/backends.json: -------------------------------------------------------------------------------- 1 | { 2 | "20": { 3 | "exchange": "/opt/code_mobility/mobility_server" 4 | }, 5 | "90":{ 6 | "exchange": "", 7 | "send": "/opt/remote_attestation/ra_forwarder" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /docker/portal/main.py: -------------------------------------------------------------------------------- 1 | import json 2 | from subprocess import Popen, PIPE 3 | 4 | ACCL_TID_CODE_SPLITTING = 10 5 | ACCL_TID_CODE_MOBILITY = 20 6 | ACCL_TID_DATA_MOBILITY = 21 7 | ACCL_TID_WBS = 30 8 | ACCL_TID_MTC_CRYPTO_SERVER = 40 9 | ACCL_TID_DIVERSIFIED_CRYPTO = 41 10 | ACCL_TID_CG_HASH_RANDOMIZATION = 50 11 | ACCL_TID_CG_HASH_VERIFICATION = 55 12 | ACCL_TID_CFGT_REMOVE_VERIFIER = 60 13 | ACCL_TID_AC_DECISION_LOGIC = 70 14 | ACCL_TID_AC_STATUS_LOGIC = 75 15 | ACCL_TID_RA_REACTION_MANAGER = 80 16 | ACCL_TID_TEST = 9999 17 | 18 | def error_response(start_response, message): 19 | response_status = '500 KO' 20 | response_body = message 21 | response_headers = [('Content-Length', str(len(response_body)))] 22 | 23 | start_response(response_status, response_headers) 24 | 25 | return [response_body]; 26 | 27 | def success_response(start_response, response): 28 | response_status = '200 OK' 29 | response_body = response 30 | response_headers = [('Content-Type', 'application/octet-stream'), 31 | ('Content-Length', str(len(response)))] 32 | 33 | start_response(response_status, response_headers) 34 | 35 | return [response_body]; 36 | 37 | def application(environ, start_response): 38 | # the environment variable CONTENT_LENGTH may be empty or missing 39 | try: 40 | request_body_size = int(environ.get('CONTENT_LENGTH', 0)) 41 | except (ValueError): 42 | request_body_size = 0 43 | 44 | if request_body_size == 0: 45 | return error_response(start_response, 'payload size error') 46 | 47 | error = 0 48 | response_headers = [] 49 | response_body = '' 50 | 51 | # first element of the path is the Technique ID 52 | path = environ['PATH_INFO'].split('/') 53 | 54 | # if an element was provided 55 | if (len(path) > 3): 56 | try: 57 | # read the request type 58 | request_type = path[1] 59 | 60 | # read the technique id 61 | technique_id = path[2] 62 | 63 | # read the application id 64 | application_id = path[3] 65 | 66 | # load techniques definition JSON file 67 | backends_file = open('/app/backends.json') 68 | backends = json.load(backends_file) 69 | 70 | if request_type != 'exchange' and request_type != 'send': 71 | return error_response(start_response, 'invalid request type'); 72 | 73 | if not backends.has_key(str(technique_id)): 74 | return error_response(start_response, 'invalid technique ' 75 | + str(technique_id)); 76 | 77 | # read POSTed data (payload) 78 | request_body = environ['wsgi.input'].read(request_body_size) 79 | 80 | # read the name of backend service 81 | technique_backend = backends[technique_id][request_type]; 82 | 83 | # launch backend service passing the payload size as first param 84 | process_res = Popen([technique_backend, 85 | request_type, 86 | str(len(request_body)), 87 | application_id], 88 | stdout=PIPE, 89 | stdin=PIPE) 90 | 91 | # send the buffer to the backend service 92 | out, err = process_res.communicate(request_body) 93 | 94 | if process_res.returncode == 0: 95 | return success_response(start_response, out) 96 | 97 | return error_response(start_response, 'technique backend failed') 98 | except Exception as err: 99 | return error_response(start_response, err.message) 100 | else: 101 | return error_response(start_response, 'invalid arguments') 102 | 103 | return error_response(start_response, 'unknown') 104 | -------------------------------------------------------------------------------- /docker/portal/uwsgi.ini: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | harakiri = 30 3 | logger = file:/opt/online_backends/aspire_service.log 4 | master = true 5 | processes = 10 6 | uid = root 7 | wsgi-file = /app/main.py 8 | -------------------------------------------------------------------------------- /docker/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | set -o pipefail 4 | set -o nounset 5 | 6 | DEMO_PROJECTS="${DEMO_PROJECTS:-yes}" 7 | if [ "${DEMO_PROJECTS}" == "yes" ] 8 | then 9 | if [ ! -d projects/actc-demos ] 10 | then 11 | mkdir -p projects 12 | cd projects 13 | git clone https://github.com/aspire-fp7/actc-demos 14 | cd .. 15 | else 16 | echo "Demo projects are already present in projects/actc-demos. (These will not be updated/re-installed...)" 17 | fi 18 | fi 19 | 20 | # Make sure all services run 21 | docker-compose up -d 22 | 23 | COMMAND="${@:1}" 24 | docker-compose exec actc /opt/ACTC/actc.py ${COMMAND} 25 | -------------------------------------------------------------------------------- /projects/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aspire-fp7/framework/99c9c32a2be28105845c9d708fd723464395dad4/projects/.gitkeep -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | set -o pipefail 4 | set -o nounset 5 | #set -o xtrace 6 | 7 | git submodule update --init --recursive 8 | 9 | echo 10 | read -r -p "Would you like to include the anti_debugging protection? (y/N)" response 11 | if [[ $response =~ ^([yY][eE][sS]|[yY])$ ]] 12 | then 13 | cd modules/ 14 | MODULES_DIR=$PWD 15 | git clone https://github.com/csl-ugent/anti-debugging.git anti_debugging 16 | cd anti_debugging 17 | git checkout d545755048763f6cb0597776de22b141eaf03bf9 18 | cp diablo/* $MODULES_DIR/diablo/aspire/self_debugging/ 19 | fi 20 | --------------------------------------------------------------------------------