├── data-plane ├── src │ ├── acme │ │ ├── mocks │ │ │ ├── mod.rs │ │ │ └── client_mock.rs │ │ ├── provider.rs │ │ ├── mod.rs │ │ ├── utils.rs │ │ ├── error.rs │ │ ├── client.rs │ │ ├── lock.rs │ │ └── account.rs │ ├── mocks │ │ ├── mod.rs │ │ └── config_client_mock.rs │ ├── stats │ │ ├── mod.rs │ │ └── proxy.rs │ ├── server │ │ ├── tls │ │ │ ├── mod.rs │ │ │ ├── trusted_cert_container.rs │ │ │ └── tls_server.rs │ │ ├── layers │ │ │ ├── mod.rs │ │ │ └── forward.rs │ │ ├── mod.rs │ │ ├── error.rs │ │ └── http │ │ │ └── mod.rs │ ├── utils │ │ ├── mod.rs │ │ ├── nsm.rs │ │ └── trx_handler.rs │ ├── dns │ │ ├── mod.rs │ │ └── error.rs │ ├── crypto │ │ ├── common.rs │ │ ├── mod.rs │ │ ├── rand.rs │ │ └── token.rs │ ├── base_tls_client │ │ ├── error.rs │ │ ├── server_cert_verifier.rs │ │ ├── tls_client_config.rs │ │ └── mod.rs │ ├── connection.rs │ ├── cache.rs │ ├── configuration.rs │ ├── cert_provisioner_client │ │ ├── tls_verifier.rs │ │ └── mod.rs │ ├── e3client │ │ └── mock.rs │ ├── time │ │ └── mod.rs │ ├── error.rs │ └── health │ │ └── mod.rs ├── README.md └── Cargo.toml ├── control-plane ├── src │ ├── mocks │ │ ├── mod.rs │ │ └── storage_client_mock.rs │ ├── clients │ │ └── mod.rs │ ├── lib.rs │ ├── stats │ │ ├── mod.rs │ │ ├── client.rs │ │ └── proxy.rs │ ├── acme_account_details.rs │ ├── error.rs │ ├── dns.rs │ └── e3proxy.rs ├── Dockerfile └── Cargo.toml ├── shared ├── src │ ├── rpc │ │ ├── mod.rs │ │ ├── error.rs │ │ └── request.rs │ ├── acme │ │ ├── mod.rs │ │ ├── helpers.rs │ │ └── error.rs │ ├── server │ │ ├── error.rs │ │ ├── mod.rs │ │ ├── vsock.rs │ │ ├── tcp.rs │ │ └── sni.rs │ ├── lib.rs │ ├── utils.rs │ ├── stats.rs │ └── notify_shutdown.rs └── Cargo.toml ├── .cargo └── config.toml ├── e2e-tests ├── scripts │ ├── start_mock_process │ ├── start_mock_cert_provisioner │ ├── start-data-plane.sh │ ├── start_customer_process │ └── start-control-plane.sh ├── acme-key │ └── key.pem ├── wsCustomerProcess.js ├── noTlsTests.js ├── mock-crypto │ ├── Cargo.toml │ └── src │ │ ├── encrypt_mock.rs │ │ └── main.rs ├── health-check-tests.js ├── noAuthTests.js ├── package.json ├── generate-sample-ca.sh ├── apiKeyAuthTests.js ├── websocketTests.js ├── mtls-testing-certs │ └── ca │ │ └── generate-certs.sh ├── run-local-cage.sh ├── run-tls-disabled-tests.sh ├── httpCustomerProcess.js ├── run-all-feature-tests.sh └── mockCertProvisionerApi.js ├── scripts ├── insert-data-plane-version.sh ├── start-cage.sh ├── health-check.sh ├── export-dev-env-vars.sh ├── update-installer-version.sh └── update-runtime-version.sh ├── .github ├── pull_request_template.md ├── fixtures │ └── test-installer-update.json ├── CODEOWNERS ├── workflows │ ├── deploy-runtime-installer-staging.yml │ ├── test-shared-lib.yml │ ├── deploy-runtime-installer-production.yml │ ├── test-control-plane.yml │ ├── lint.yml │ ├── test-runtime-builder.yml │ ├── test-data-plane.yml │ ├── test-e2e.yml │ ├── vsock-proxy.yml │ └── deploy-runtime-installer.yml └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── installer ├── action.yml ├── test.Dockerfile ├── test-alpine.Dockerfile ├── test-abyss.Dockerfile ├── scripts │ ├── test-installer.sh │ ├── installer.sh │ └── compile-runtime-dependencies.sh ├── Dockerfile ├── README.md └── config │ └── net-tools.h ├── Cargo.toml ├── .gitignore ├── crates └── vsock-proxy │ ├── README.md │ ├── Cargo.toml │ └── src │ └── main.rs ├── SECURITY.md ├── docker-compose.yml ├── enclave.Dockerfile ├── control-plane.Dockerfile └── README.md /data-plane/src/acme/mocks/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod client_mock; 2 | -------------------------------------------------------------------------------- /control-plane/src/mocks/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod storage_client_mock; 2 | -------------------------------------------------------------------------------- /data-plane/src/mocks/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod config_client_mock; 2 | -------------------------------------------------------------------------------- /shared/src/rpc/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod error; 2 | pub mod request; 3 | -------------------------------------------------------------------------------- /data-plane/src/stats/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | pub mod proxy; 3 | -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "x86_64-unknown-linux-musl" 3 | -------------------------------------------------------------------------------- /shared/src/acme/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod error; 2 | pub mod helpers; 3 | pub mod jws; 4 | -------------------------------------------------------------------------------- /control-plane/src/clients/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cert_provisioner; 2 | pub mod mtls_config; 3 | -------------------------------------------------------------------------------- /e2e-tests/scripts/start_mock_process: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "Starting mock crypto process" 4 | 5 | /services/mock-crypto -------------------------------------------------------------------------------- /scripts/insert-data-plane-version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | pattern="s/0.0.0-dev/$1/" 3 | sed -i -e "$pattern" ./data-plane/Cargo.toml -------------------------------------------------------------------------------- /data-plane/src/server/tls/mod.rs: -------------------------------------------------------------------------------- 1 | mod cert_resolver; 2 | mod tls_server; 3 | pub mod trusted_cert_container; 4 | pub use tls_server::*; 5 | -------------------------------------------------------------------------------- /data-plane/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "enclave")] 2 | pub mod nsm; 3 | #[cfg(feature = "tls_termination")] 4 | pub mod trx_handler; 5 | -------------------------------------------------------------------------------- /data-plane/src/server/layers/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "enclave")] 2 | pub mod attest; 3 | pub mod auth; 4 | pub mod context_log; 5 | pub mod decrypt; 6 | pub mod forward; 7 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Why 2 | Add a short description about this PR. 3 | Add links to issues, tech plans etc. 4 | 5 | # How 6 | Describe how you've approached the problem 7 | -------------------------------------------------------------------------------- /data-plane/src/dns/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "network_egress")] 2 | pub mod egressproxy; 3 | #[cfg(feature = "network_egress")] 4 | pub mod enclavedns; 5 | #[cfg(feature = "network_egress")] 6 | pub mod error; 7 | -------------------------------------------------------------------------------- /installer/action.yml: -------------------------------------------------------------------------------- 1 | name: "Compile Cage Runtime Installer" 2 | description: "Action to pull in the Cage Runtime dependencies, compile them from source, and bundle their binaries with an installer." 3 | runs: 4 | using: "docker" 5 | image: "Dockerfile" 6 | -------------------------------------------------------------------------------- /data-plane/src/crypto/common.rs: -------------------------------------------------------------------------------- 1 | use sha2::Digest; 2 | 3 | pub fn compute_sha256(input: impl AsRef<[u8]>) -> Vec { 4 | let mut hasher = sha2::Sha256::new(); 5 | hasher.update(input.as_ref()); 6 | let hash_digest = hasher.finalize(); 7 | hash_digest.to_vec() 8 | } 9 | -------------------------------------------------------------------------------- /e2e-tests/acme-key/key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgyO6X5xZelTjtwXWc 3 | yaYQzG0FqzDveU3mn8770FGljDuhRANCAARUNSzOKr/Pw8EUvc2IDQIOGCzkxdXU 4 | qcLLSTAjsJTixygLQGVy8kyS0GC89Eec4mT9XGSAbGst8H03IGRZoLA0 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = ["control-plane","data-plane","shared","crates/*"] 4 | exclude = ["./e2e-tests/mock-crypto"] 5 | 6 | [workspace.dependencies] 7 | openssl = { version = "0.10.72", features = ["vendored"] } 8 | cadence = "1.5.0" 9 | cadence-macros = "1.5.0" -------------------------------------------------------------------------------- /e2e-tests/wsCustomerProcess.js: -------------------------------------------------------------------------------- 1 | const WebSocket = require('ws'); 2 | const wss = new WebSocket.Server({ port: 8008 }); 3 | 4 | 5 | wss.on('connection', (ws) => { 6 | ws.on('message', (messageAsString) => { 7 | ws.send("SERVER RECIEVED MESSAGE: " + messageAsString); 8 | }) 9 | }) -------------------------------------------------------------------------------- /data-plane/src/server/tls/trusted_cert_container.rs: -------------------------------------------------------------------------------- 1 | use once_cell::sync::Lazy; 2 | use std::sync::{Arc, RwLock}; 3 | use tokio_rustls::rustls::sign::CertifiedKey; 4 | 5 | pub static TRUSTED_CERT_STORE: Lazy>>> = 6 | Lazy::new(|| Arc::new(RwLock::new(None))); 7 | -------------------------------------------------------------------------------- /data-plane/src/crypto/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | #[cfg(feature = "enclave")] 3 | pub mod attest; 4 | #[cfg(feature = "enclave")] 5 | pub mod common; 6 | #[cfg(feature = "tls_termination")] 7 | pub mod parser; 8 | pub mod rand; 9 | #[cfg(feature = "tls_termination")] 10 | pub mod stream; 11 | pub mod token; 12 | -------------------------------------------------------------------------------- /data-plane/src/server/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod error; 2 | #[cfg(feature = "tls_termination")] 3 | pub mod http; 4 | #[cfg(feature = "tls_termination")] 5 | pub mod layers; 6 | #[cfg(feature = "tls_termination")] 7 | #[allow(clippy::module_inception)] 8 | pub mod server; 9 | #[cfg(feature = "tls_termination")] 10 | pub mod tls; 11 | -------------------------------------------------------------------------------- /.github/fixtures/test-installer-update.json: -------------------------------------------------------------------------------- 1 | { 2 | "after": "", 3 | "base_ref": "", 4 | "before": "", 5 | "commits": [], 6 | "compare": "", 7 | "created": "", 8 | "deleted": "", 9 | "forced": "", 10 | "head_commit": null, 11 | "pusher": {}, 12 | "ref": "refs/tags/installer/v1.0.0", 13 | "repository": {} 14 | } 15 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # This file is used to automatically request reviews on PRs. 2 | 3 | # These owners will be the default owners for everything in 4 | # the repo, unless a match on a later line takes precedence. 5 | # "@evervault/product-engineering" below requests review from all members of Product Engineering. 6 | * @evervault/product-engineering -------------------------------------------------------------------------------- /control-plane/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM amazonlinux:2023 2 | RUN yum -y update 3 | RUN yum install -y aws-nitro-enclaves-cli 4 | 5 | COPY ./control-plane . 6 | RUN chmod a+x ./control-plane 7 | 8 | COPY ./scripts/health-check.sh . 9 | COPY ./scripts/start-cage.sh . 10 | RUN chmod a+x ./start-cage.sh 11 | 12 | EXPOSE 443 13 | ENTRYPOINT ["./start-cage.sh"] 14 | -------------------------------------------------------------------------------- /shared/src/rpc/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | extern crate rmp_serde as rmps; 3 | 4 | #[derive(Error, Debug)] 5 | pub enum RpcError { 6 | #[error("An error occured while decoding the message - {0:?}")] 7 | DecodeError(#[from] rmps::decode::Error), 8 | #[error("An error occured while encoding the message - {0:?}")] 9 | EncodeError(#[from] rmps::encode::Error), 10 | } 11 | -------------------------------------------------------------------------------- /e2e-tests/scripts/start_mock_cert_provisioner: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "Running mock cert provisioner startup script." 4 | 5 | export MOCK_CERT_PROVISIONER_SERVER_KEY=$MOCK_CERT_PROVISIONER_SERVER_KEY 6 | export MOCK_CERT_PROVISIONER_SERVER_CERT=$MOCK_CERT_PROVISIONER_SERVER_CERT 7 | export MOCK_CERT_PROVISIONER_ROOT_CERT=$MOCK_CERT_PROVISIONER_ROOT_CERT 8 | 9 | node /services/mockCertProvisionerApi.js -------------------------------------------------------------------------------- /installer/test.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=amd64 ubuntu 2 | 3 | RUN mkdir -p /opt/evervault 4 | COPY output/runtime-dependencies.tar.gz /opt/evervault 5 | RUN cd /opt/evervault ; \ 6 | gunzip runtime-dependencies.tar.gz ; \ 7 | tar -xf runtime-dependencies.tar ; \ 8 | sh installer.sh 9 | 10 | COPY scripts/test-installer.sh /test-installer.sh 11 | 12 | ENTRYPOINT [ "sh", "/test-installer.sh" ] -------------------------------------------------------------------------------- /installer/test-alpine.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=amd64 alpine 2 | 3 | RUN mkdir -p /opt/evervault 4 | COPY output/runtime-dependencies.tar.gz /opt/evervault 5 | RUN cd /opt/evervault ; \ 6 | gunzip runtime-dependencies.tar.gz ; \ 7 | tar -xf runtime-dependencies.tar ; \ 8 | sh installer.sh 9 | 10 | COPY scripts/test-installer.sh /test-installer.sh 11 | 12 | ENTRYPOINT [ "sh", "/test-installer.sh" ] -------------------------------------------------------------------------------- /installer/test-abyss.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=amd64 autamus/abyss:latest 2 | 3 | RUN mkdir -p /opt/evervault 4 | COPY output/runtime-dependencies.tar.gz /opt/evervault 5 | RUN cd /opt/evervault ; \ 6 | gunzip runtime-dependencies.tar.gz ; \ 7 | tar -xf runtime-dependencies.tar ; \ 8 | sh installer.sh 9 | 10 | COPY scripts/test-installer.sh /test-installer.sh 11 | 12 | ENTRYPOINT [ "sh", "/test-installer.sh" ] -------------------------------------------------------------------------------- /e2e-tests/noTlsTests.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const axios = require('axios').default; 3 | 4 | describe('GET env from enclave', () => { 5 | it('returns the injected environment var', async () => { 6 | const result = await axios.get('http://enclave.localhost:443/env', { headers: { 'api-key': 'placeholder' } }) 7 | expect("123").to.deep.equal(result.data.ANOTHER_ENV_VAR) 8 | }); 9 | }); -------------------------------------------------------------------------------- /scripts/start-cage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | TOKEN=`curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 60" -sL` 3 | INSTANCE_ID=$(curl http://169.254.169.254/latest/dynamic/instance-identity/document -H "X-aws-ec2-metadata-token: $TOKEN" | jq -r ."instanceId") 4 | 5 | export EC2_INSTANCE_ID=${INSTANCE_ID} 6 | 7 | # Boot control plane 8 | echo "[HOST] Starting control plane..." 9 | exec ./control-plane -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | target 3 | */target 4 | e2e-tests/node_modules 5 | e2e-tests/customer-process/node_modules 6 | data-plane/*.pem 7 | e2e-tests/sample-ca 8 | e2e-tests/testing-certs/* 9 | e2e-tests/mtls-testing-certs/ca/* 10 | !e2e-tests/mtls-testing-certs/ca/*.sh 11 | e2e-tests/mock-crypto/Cargo.lock 12 | e2e-tests/health-check-response.txt 13 | ci/**/node_modules 14 | .vscode 15 | logs.txt 16 | certs/ 17 | installer/output 18 | -------------------------------------------------------------------------------- /e2e-tests/scripts/start-data-plane.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo {\"api_key_auth\":${EV_API_KEY_AUTH},\"egress\":{\"allow_list\": \"jsonplaceholder.typicode.com\"},\"trx_logging_enabled\":true,\"forward_proxy_protocol\":false,\"trusted_headers\": []} > /etc/dataplane-config.json 4 | 5 | iptables -A OUTPUT -t nat -p tcp --dport 443 ! -d 127.0.0.1 -j DNAT --to-destination 127.0.0.1:4444 6 | 7 | SYSTEM_STATS_INTERVAL=1 exec $DATA_PLANE_EXECUTABLE_PATH 8 | -------------------------------------------------------------------------------- /control-plane/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod acme_account_details; 2 | pub mod clients; 3 | pub mod config_server; 4 | pub mod configuration; 5 | pub mod dns; 6 | #[cfg(feature = "network_egress")] 7 | pub mod dnsproxy; 8 | pub mod e3proxy; 9 | #[cfg(feature = "network_egress")] 10 | pub mod egressproxy; 11 | pub mod error; 12 | pub mod health; 13 | pub mod orchestration; 14 | pub mod stats; 15 | pub mod tls_proxy; 16 | 17 | #[cfg(test)] 18 | pub mod mocks; 19 | -------------------------------------------------------------------------------- /installer/scripts/test-installer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | # Rudimentary test script to ensure all binaries are available on multiple distros 6 | 7 | # Cannot use runit directly 8 | command -v runit 9 | 10 | command -v ifconfig 11 | # Run base command (list interfaces) 12 | ifconfig 13 | 14 | command -v iptables 15 | iptables -h 16 | 17 | command -v ip6tables 18 | ip6tables -h 19 | 20 | command -v ip 21 | # List available addresses 22 | ip addr -------------------------------------------------------------------------------- /crates/vsock-proxy/README.md: -------------------------------------------------------------------------------- 1 | # vsock-proxy 2 | 3 | A utility crate for proxying connections between TCP and Vsock. 4 | 5 | ## Install 6 | 7 | ```sh 8 | cargo install vsock-proxy 9 | ``` 10 | 11 | ## Examples 12 | 13 | ### Proxy from TCP to VSock 14 | 15 | ```sh 16 | vsock-proxy --tcp-source 127.0.0.1:8008 --vsock-dest 1234:5678 17 | ``` 18 | 19 | ### Proxy from VSock to TCP 20 | 21 | ```sh 22 | vsock-proxy --vsock-source 3:6789 --tcp-dest 127.0.0.1:5000 23 | ``` -------------------------------------------------------------------------------- /e2e-tests/scripts/start_customer_process: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | CUSTOMER_PROCESS="$1" 3 | 4 | # Wait for environment to be placed in env faile before starting process 5 | while ! grep -q "EV_INITIALIZED" /etc/customer-env; 6 | do 7 | echo "Environment not ready, sleeping user process for one second"; 8 | sleep 1; 9 | done 10 | 11 | echo "Environment ready.. Starting user process $CUSTOMER_PROCESS" 12 | 13 | source /etc/customer-env 14 | node /services/${CUSTOMER_PROCESS} -------------------------------------------------------------------------------- /data-plane/src/acme/mocks/client_mock.rs: -------------------------------------------------------------------------------- 1 | use crate::acme::{client::AcmeClientInterface, error::AcmeError}; 2 | use async_trait::async_trait; 3 | use hyper::{Body, Response}; 4 | use mockall::mock; 5 | 6 | mock! { 7 | #[derive(Debug, Clone)] 8 | pub AcmeClientInterface {} 9 | 10 | #[async_trait] 11 | impl AcmeClientInterface for AcmeClientInterface { 12 | async fn send(&self, request: hyper::Request) -> Result, AcmeError>; 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /data-plane/src/base_tls_client/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | #[derive(Debug, Error)] 4 | pub enum ClientError { 5 | #[error("IO Error — {0:?}")] 6 | IoError(#[from] std::io::Error), 7 | #[error("Hyper Error — {0:?}")] 8 | HyperError(#[from] hyper::Error), 9 | #[error("Deserialization Error — {0:?}")] 10 | SerdeError(#[from] serde_json::Error), 11 | #[error("Request to server failed with status: {0:?}")] 12 | FailedRequest(hyper::StatusCode), 13 | #[error("Client Error {0:?}")] 14 | General(String), 15 | } 16 | -------------------------------------------------------------------------------- /data-plane/README.md: -------------------------------------------------------------------------------- 1 | # Enclave Data Plane 2 | 3 | The data plane is the Evervault managed process which runs within the Enclave to terminate TLS, perform decryption, and 4 | proxy any network egress. 5 | 6 | ## Local Development 7 | 8 | If running the data-plane locally, you will need to generate a cert and private key. 9 | 10 | We suggest using [mkcert](https://github.com/FiloSottile/mkcert): 11 | 12 | ```shell 13 | # install mkcert as a trusted CA 14 | mkcert -install 15 | 16 | # generate a cert and key for the data-plane 17 | mkcert data-plane.localhost 18 | ``` 19 | -------------------------------------------------------------------------------- /crates/vsock-proxy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "vsock-proxy" 3 | version = "0.1.2" 4 | edition = "2021" 5 | license = "MIT" 6 | description = "A minimal CLI to proxy TCP traffic to or from VSock" 7 | authors = ["Evervault Engineering "] 8 | readme = "README.md" 9 | 10 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 11 | 12 | [dependencies] 13 | async-trait = "0.1.73" 14 | clap = "4.4.6" 15 | pin-project = "1.1.3" 16 | thiserror = "1.0.30" 17 | tokio = { version = "1.12.0", features = ["net", "rt", "io-util"] } 18 | tokio-vsock = "0.3.2" -------------------------------------------------------------------------------- /.github/workflows/deploy-runtime-installer-staging.yml: -------------------------------------------------------------------------------- 1 | name: Deploy installer to staging 2 | 3 | on: 4 | push: 5 | paths: 6 | - "installer/**" 7 | branches: 8 | - "main" 9 | permissions: 10 | id-token: write 11 | contents: read 12 | 13 | jobs: 14 | build-and-deploy: 15 | uses: ./.github/workflows/deploy-runtime-installer.yml 16 | with: 17 | stage: "staging" 18 | version: 1 19 | secrets: 20 | AWS_ENCLAVES_OIDC_ROLE_ARN: ${{ secrets.AWS_ENCLAVES_STAGING_OIDC_ROLE_ARN }} 21 | AWS_CLOUDFRONT_DISTRIBUTION_ID: ${{ secrets.AWS_CLOUDFRONT_DISTRIBUTION_STAGING }} 22 | -------------------------------------------------------------------------------- /control-plane/src/stats/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | pub mod proxy; 3 | 4 | /// Port with statsd listener for internal metrics 5 | pub const INTERNAL_METRIC_PORT: u16 = 8125; 6 | /// Port with statsd listener for external customer metrics 7 | pub const EXTERNAL_METRIC_PORT: u16 = 8126; 8 | 9 | #[cfg(not(feature = "enclave"))] 10 | pub fn get_stats_target_ip() -> std::net::IpAddr { 11 | std::net::IpAddr::V4(std::net::Ipv4Addr::new(172, 20, 0, 6)) 12 | } 13 | 14 | #[cfg(feature = "enclave")] 15 | pub fn get_stats_target_ip() -> std::net::IpAddr { 16 | std::net::IpAddr::V4(std::net::Ipv4Addr::new(127, 0, 0, 1)) 17 | } 18 | -------------------------------------------------------------------------------- /control-plane/src/mocks/storage_client_mock.rs: -------------------------------------------------------------------------------- 1 | use storage_client_interface::{StorageClientError, StorageClientInterface}; 2 | 3 | use async_trait::async_trait; 4 | use mockall::mock; 5 | 6 | mock! { 7 | #[derive(Debug, Clone)] 8 | pub StorageClientInterface {} 9 | #[async_trait] 10 | impl StorageClientInterface for StorageClientInterface { 11 | async fn get_object(&self, key: String) -> Result, StorageClientError>; 12 | async fn put_object(&self, key: String, body: String) -> Result<(), StorageClientError>; 13 | async fn delete_object(&self, key: String) -> Result<(), StorageClientError>; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /e2e-tests/mock-crypto/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mock-crypto" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | axum = "0.5.16" 10 | axum-server = { version = "0.5.0", features = ["tls-rustls"] } 11 | tokio = {version="1.24.2", features=["macros", "rt-multi-thread"]} 12 | lazy_static = "1.4.0" 13 | serde_json = "1.0.114" 14 | serde = { version="1.0.200", features = ["derive"] } 15 | rustls = "0.21.12" 16 | rustls-pemfile = "1.0.4" 17 | serde_cbor = "0.11.2" 18 | base64 = "0.13.0" 19 | rand = { version = "0.8.5" } 20 | thiserror = "1.0" -------------------------------------------------------------------------------- /scripts/health-check.sh: -------------------------------------------------------------------------------- 1 | rm -f health-check-response.txt 2 | # this writes the response to the request to a file, and stores the status code as a variable 3 | response_code=$(curl -s -o health-check-response.txt -w "%{http_code}" -H 'User-Agent: ECS-HealthCheck' localhost:3032) 4 | 5 | # curl will set the response code to 000 if the request errors 6 | if [ $response_code == "000" ]; then 7 | echo "Health check failed. Failed to connect to control plane." 8 | exit 1 9 | elif [ $response_code != "200" ]; then 10 | response=$(cat health-check-response.txt) 11 | echo "Health check failed. Response from control-plane: $response" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /shared/src/server/error.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Formatter; 2 | use thiserror::Error; 3 | 4 | #[derive(Error, Debug)] 5 | pub enum ServerError { 6 | IoError(#[from] std::io::Error), 7 | Hyper(#[from] hyper::Error), 8 | JsonError(#[from] serde_json::Error), 9 | InvalidPath(String), 10 | #[cfg(feature = "network_egress")] 11 | EgressError(#[from] super::egress::EgressError), 12 | UnexpectedEOF, 13 | } 14 | 15 | impl std::fmt::Display for ServerError { 16 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 17 | write!(f, "{self:?}") 18 | } 19 | } 20 | 21 | pub type ServerResult = std::result::Result; 22 | -------------------------------------------------------------------------------- /data-plane/src/connection.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(feature = "enclave"))] 2 | pub type Connection = tokio::net::TcpStream; 3 | 4 | #[cfg(feature = "enclave")] 5 | pub type Connection = tokio_vsock::VsockStream; 6 | 7 | #[cfg(not(feature = "enclave"))] 8 | pub async fn get_socket(port: u16) -> Result { 9 | Connection::connect(std::net::SocketAddr::new( 10 | std::net::IpAddr::V4(std::net::Ipv4Addr::new(172, 20, 0, 8)), 11 | port, 12 | )) 13 | .await 14 | } 15 | 16 | #[cfg(feature = "enclave")] 17 | pub async fn get_socket(port: u16) -> Result { 18 | Connection::connect(shared::PARENT_CID, port.into()).await 19 | } 20 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | To report a security issue, please email engineering@evervault.com with a description of the issue, the steps you took to create the issue, affected versions, and, if known, mitigations for the issue. 6 | 7 | This project follows a 90 day disclosure timeline. 8 | 9 | Evervault pledge to: 10 | - Take all reported findings seriously and respond to you in a timely manner 11 | - Acknowledge and thank you once any legitimate vulnerabilities have been fixed 12 | - Publicly disclose significant vulnerabilities 13 | 14 | Evervault does not currently offer a bug bounty, however are appreciative of the efforts of the security researcher community. 15 | -------------------------------------------------------------------------------- /data-plane/src/cache.rs: -------------------------------------------------------------------------------- 1 | use cached::TimedSizedCache; 2 | use once_cell::sync::Lazy; 3 | use tokio::sync::Mutex; 4 | 5 | use crate::crypto::token::AttestationAuth; 6 | 7 | const E3_TOKEN_LIFETIME: u64 = 280; 8 | const ATTESTATION_DOC_LIFETIME: u64 = 300; // 5 minutes 9 | 10 | pub static E3_TOKEN: Lazy>> = Lazy::new(|| { 11 | Mutex::new(TimedSizedCache::with_size_and_lifespan( 12 | 1, 13 | E3_TOKEN_LIFETIME, 14 | )) 15 | }); 16 | 17 | pub static ATTESTATION_DOC: Lazy>> = Lazy::new(|| { 18 | Mutex::new(TimedSizedCache::with_size_and_lifespan( 19 | 1, 20 | ATTESTATION_DOC_LIFETIME, 21 | )) 22 | }); 23 | -------------------------------------------------------------------------------- /data-plane/src/base_tls_client/server_cert_verifier.rs: -------------------------------------------------------------------------------- 1 | use std::time::SystemTime; 2 | use tokio_rustls::rustls::client::ServerCertVerifier; 3 | use tokio_rustls::rustls::{ 4 | client::{ServerCertVerified, ServerName}, 5 | Certificate, Error, 6 | }; 7 | 8 | pub struct OpenServerCertVerifier; 9 | 10 | impl ServerCertVerifier for OpenServerCertVerifier { 11 | fn verify_server_cert( 12 | &self, 13 | _end_entity: &Certificate, 14 | _intermediates: &[Certificate], 15 | _server_name: &ServerName, 16 | _scts: &mut dyn Iterator, 17 | _ocsp_response: &[u8], 18 | _now: SystemTime, 19 | ) -> Result { 20 | Ok(ServerCertVerified::assertion()) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /scripts/export-dev-env-vars.sh: -------------------------------------------------------------------------------- 1 | export MOCK_CRYPTO_CERT=`cat certs/ca.crt` 2 | export MOCK_CRYPTO_KEY=`cat certs/ca.key` 3 | export MOCK_CERT_PROVISIONER_CLIENT_CERT=`cat certs/client_0.crt` 4 | export MOCK_CERT_PROVISIONER_CLIENT_KEY=`cat certs/client_0.key` 5 | export MOCK_CERT_PROVISIONER_ROOT_CERT=`cat certs/ca.crt` 6 | export MOCK_CERT_PROVISIONER_SERVER_KEY=`cat certs/localhost.key` 7 | export MOCK_CERT_PROVISIONER_ROOT_CERT=`cat certs/ca.crt` 8 | export MOCK_CERT_PROVISIONER_SERVER_CERT=`cat certs/localhost.crt` 9 | export EV_API_KEY_AUTH=true 10 | export CUSTOMER_PROCESS=httpCustomerProcess.js 11 | export ACME_ACCOUNT_EC_KEY=`cat ./e2e-tests/acme-key/key.pem` 12 | export ACME_ACCOUNT_HMAC_KEY="cGxhY2Vob2xkZXI=" 13 | export ACME_ACCOUNT_HMAC_KEY_ID="placeholder_id" -------------------------------------------------------------------------------- /e2e-tests/health-check-tests.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const { spawn } = require('child_process'); 3 | 4 | describe('Run health-check request', () => { 5 | 6 | const runHealthCheckScript = async () => { 7 | const cmd = spawn('sh', ['../scripts/health-check.sh']); 8 | return await new Promise((resolve) => cmd.on('exit', code => { 9 | resolve(code) 10 | })); 11 | }; 12 | 13 | it('should output success exit code', async () => { 14 | const exitCode = await runHealthCheckScript(); 15 | expect(exitCode).to.equal(0); 16 | }); 17 | 18 | it('should fail with non-zero exit code', async () => { 19 | const exitCode = await runHealthCheckScript(); 20 | expect(exitCode).to.not.equal(0); 21 | }); 22 | }); -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Note: Security vulnerabilities should follow the disclosure policy outlined in the [security policy](/security.md).** 11 | 12 | **Describe the bug** 13 | A clear and concise description of what the bug is. 14 | 15 | **To Reproduce** 16 | Steps to reproduce the behavior: 17 | 1. Go to '...' 18 | 2. Click on '....' 19 | 3. Scroll down to '....' 20 | 4. See error 21 | 22 | **Expected behavior** 23 | A clear and concise description of what you expected to happen. 24 | 25 | **Screenshots** 26 | If applicable, add screenshots to help explain your problem. 27 | 28 | **Additional context** 29 | Add any other context about the problem here. 30 | -------------------------------------------------------------------------------- /data-plane/src/mocks/config_client_mock.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use error::Result; 3 | use mockall::mock; 4 | use shared::server::config_server::requests::GetClockSyncResponse; 5 | use shared::server::config_server::requests::GetObjectResponse; 6 | 7 | use crate::{config_client::StorageConfigClientInterface, error}; 8 | 9 | mock! { 10 | #[derive(Debug, Clone)] 11 | pub StorageConfigClientInterface {} 12 | 13 | #[async_trait] 14 | impl StorageConfigClientInterface for StorageConfigClientInterface { 15 | async fn get_object(&self, key: String) -> Result>; 16 | async fn put_object(&self, key: String, object: String) -> Result<()>; 17 | async fn delete_object(&self, key: String) -> Result<()>; 18 | async fn get_time_from_host(&self) -> Result; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /data-plane/src/configuration.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "enclave")] 2 | pub fn get_cert_provisioner_host() -> String { 3 | "provisioner.cages.internal".to_string() 4 | } 5 | 6 | #[cfg(not(feature = "enclave"))] 7 | pub fn get_cert_provisioner_host() -> String { 8 | "localhost".to_string() 9 | } 10 | 11 | pub fn get_acme_host() -> String { 12 | "acme-v02.api.letsencrypt.org".to_string() 13 | } 14 | 15 | pub fn get_acme_base_path() -> String { 16 | "/directory".to_string() 17 | } 18 | 19 | #[cfg(feature = "enclave")] 20 | pub fn get_e3_host() -> String { 21 | "e3.cages-e3.internal".to_string() 22 | } 23 | 24 | #[cfg(not(feature = "enclave"))] 25 | pub fn get_e3_host() -> String { 26 | "localhost".to_string() 27 | } 28 | 29 | pub fn should_forward_proxy_protocol() -> bool { 30 | std::env::var("FORWARD_PROXY_PROTOCOL").is_ok() 31 | } 32 | -------------------------------------------------------------------------------- /e2e-tests/scripts/start-control-plane.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "Running start control plane script" 4 | 5 | export EV_APP_UUID=app_12345678 6 | export EV_TEAM_UUID=team_12345678 7 | export EV_CAGE_NAME=test-cage 8 | export CAGE_UUID=cage_123456 9 | export EV_CAGE_VERSION_ID=10 10 | export ACME_S3_BUCKET="cages-acme-local" 11 | export ACME_ACCOUNT_HMAC_KEY="cGxhY2Vob2xkZXI" # base64 url encoded "placeholder" 12 | export ACME_ACCOUNT_HMAC_KEY_ID="placeholder_id" 13 | 14 | export CERT_PROVISIONER_MTLS_CLIENT_CERT=$MOCK_CERT_PROVISIONER_CLIENT_CERT 15 | export CERT_PROVISIONER_MTLS_CLIENT_KEY=$MOCK_CERT_PROVISIONER_CLIENT_KEY 16 | export CERT_PROVISIONER_MTLS_ROOT_CERT=$MOCK_CERT_PROVISIONER_ROOT_CERT 17 | export ACME_ACCOUNT_EC_KEY=$ACME_ACCOUNT_EC_KEY 18 | export EXTERNAL_METRICS_ENABLED="true" 19 | 20 | 21 | exec $CONTROL_PLANE_EXECUTABLE_PATH 22 | 23 | -------------------------------------------------------------------------------- /data-plane/src/utils/nsm.rs: -------------------------------------------------------------------------------- 1 | use aws_nitro_enclaves_nsm_api as nitro; 2 | use thiserror::Error; 3 | 4 | /// Thin wrapper on the NSM API which handles initialization and cleanup. 5 | pub struct NsmConnection(i32); 6 | 7 | #[derive(Debug, Error)] 8 | pub enum NsmConnectionError { 9 | #[error("Failed to initialize NSM connection")] 10 | InitFailed, 11 | } 12 | 13 | impl NsmConnection { 14 | pub fn try_new() -> Result { 15 | let nsm_fd = nitro::driver::nsm_init(); 16 | if nsm_fd < 0 { 17 | return Err(NsmConnectionError::InitFailed); 18 | } 19 | Ok(Self(nsm_fd)) 20 | } 21 | 22 | pub fn fd(&self) -> i32 { 23 | self.0 24 | } 25 | } 26 | 27 | impl std::ops::Drop for NsmConnection { 28 | fn drop(&mut self) { 29 | nitro::driver::nsm_exit(self.fd()); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /.github/workflows/test-shared-lib.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - shared/** 5 | - .github/workflows/test-shared-lib.yml 6 | - Cargo.lock 7 | name: Test Shared Library 8 | jobs: 9 | test_shared_lib: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Install MUSL Tools 14 | run: | 15 | sudo apt-get update 16 | sudo apt-get install -y musl-tools 17 | - uses: dtolnay/rust-toolchain@stable 18 | with: 19 | targets: x86_64-unknown-linux-musl 20 | - uses: Swatinem/rust-cache@v2 21 | with: 22 | shared-key: "standard-cache" 23 | - name: Compile project 24 | run: cargo check -p shared --no-default-features --features network_egress 25 | - name: Test project 26 | run: cargo test -p shared --no-default-features --features network_egress 27 | -------------------------------------------------------------------------------- /e2e-tests/noAuthTests.js: -------------------------------------------------------------------------------- 1 | const { expect } = require("chai"); 2 | const axios = require("axios").default; 3 | const https = require("https"); 4 | 5 | describe("POST data to enclave", () => { 6 | const allowAllCerts = axios.create({ 7 | httpsAgent: new https.Agent({ 8 | rejectUnauthorized: false, 9 | }), 10 | }); 11 | context("api key auth is disabled", () => { 12 | it("returns successfully", () => { 13 | return allowAllCerts 14 | .post("https://enclave.localhost:443/hello", { secret: "ev:123" }) 15 | .then((result) => { 16 | console.log("Post request sent to the enclave"); 17 | expect(result.data).to.deep.equal({ 18 | response: "Hello from enclave", 19 | secret: "ev:123", 20 | }); 21 | }) 22 | .catch((err) => { 23 | console.error(err); 24 | throw err; 25 | }); 26 | }); 27 | }); 28 | }); 29 | -------------------------------------------------------------------------------- /shared/src/rpc/request.rs: -------------------------------------------------------------------------------- 1 | extern crate rmp_serde as rmps; 2 | extern crate serde; 3 | extern crate serde_derive; 4 | use std::net::IpAddr; 5 | 6 | use crate::rpc::error::RpcError; 7 | 8 | use rmps::{Deserializer, Serializer}; 9 | use serde::{Deserialize, Serialize}; 10 | 11 | #[derive(Debug, PartialEq, Deserialize, Serialize, Eq)] 12 | pub struct ExternalRequest { 13 | pub ip: IpAddr, 14 | pub data: Vec, 15 | pub port: u16, 16 | } 17 | 18 | impl ExternalRequest { 19 | pub fn to_bytes(&self) -> Result, RpcError> { 20 | let mut buf = Vec::new(); 21 | self.serialize(&mut Serializer::new(&mut buf))?; 22 | Ok(buf) 23 | } 24 | 25 | pub fn from_bytes(bytes: Vec) -> Result { 26 | let mut deserializer = Deserializer::new(&bytes[..]); 27 | let res = Deserialize::deserialize(&mut deserializer)?; 28 | Ok(res) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /.github/workflows/deploy-runtime-installer-production.yml: -------------------------------------------------------------------------------- 1 | name: Deploy installer to production 2 | 3 | on: 4 | push: 5 | tags: 6 | - "installer/v*.*.*" 7 | permissions: 8 | id-token: write 9 | contents: read 10 | 11 | jobs: 12 | get-version: 13 | runs-on: ubuntu-latest 14 | outputs: 15 | version: ${{ steps.get-version.outputs.version }} 16 | steps: 17 | - id: get-version 18 | run: | 19 | echo "using version tag ${GITHUB_REF:21}" 20 | echo ::set-output name=version::${GITHUB_REF:21} 21 | 22 | build-and-deploy: 23 | needs: [get-version] 24 | uses: ./.github/workflows/deploy-runtime-installer.yml 25 | with: 26 | stage: "production" 27 | version: ${{ needs.get-version.outputs.version }} 28 | secrets: 29 | AWS_ENCLAVES_OIDC_ROLE_ARN: ${{ secrets.AWS_ENCLAVES_PRODUCTION_OIDC_ROLE_ARN }} 30 | AWS_CLOUDFRONT_DISTRIBUTION_ID: ${{ secrets.AWS_CLOUDFRONT_DISTRIBUTION }} 31 | -------------------------------------------------------------------------------- /e2e-tests/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "e2e-tests", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "NODE_TLS_REJECT_UNAUTHORIZED=0 mocha e2e.js --timeout 10000", 8 | "health-check-tests": "NODE_TLS_REJECT_UNAUTHORIZED=0 mocha health-check-tests.js --grep", 9 | "customer": "node httpCustomerProcess.js", 10 | "no-auth-tests": "NODE_TLS_REJECT_UNAUTHORIZED=0 mocha noAuthTests.js", 11 | "api-key-auth-tests": "NODE_TLS_REJECT_UNAUTHORIZED=0 mocha apiKeyAuthTests.js", 12 | "no-tls-termination-tests": "NODE_TLS_REJECT_UNAUTHORIZED=0 mocha noTlsTests.js", 13 | "websocket-tests": "mocha websocketTests.js" 14 | }, 15 | "author": "", 16 | "license": "ISC", 17 | "dependencies": { 18 | "axios": "^1.6.0", 19 | "cbor-sync": "^1.0.4", 20 | "chai": "^4.3.10", 21 | "chai-http": "^5.1.1", 22 | "express": "^4.18.1", 23 | "mocha": "^10.0.0", 24 | "ws": "^8.13.0" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /data-plane/src/dns/error.rs: -------------------------------------------------------------------------------- 1 | use shared::server::{egress::EgressError, error::ServerError}; 2 | use thiserror::Error; 3 | 4 | #[derive(Debug, Error)] 5 | pub enum DNSError { 6 | #[error("{0}")] 7 | Io(#[from] std::io::Error), 8 | #[error("DNS query format error — no questions found")] 9 | DNSNoQuestionsFound, 10 | #[error("{0}")] 11 | RpcError(#[from] shared::rpc::error::RpcError), 12 | #[error("{0}")] 13 | MissingIP(String), 14 | #[error("{0}")] 15 | TlsParseError(String), 16 | #[error("Could not find a hostname in the TLS hello message. Perhaps SNI is not being used.")] 17 | NoHostnameFound, 18 | #[error("Egress error {0}")] 19 | EgressError(#[from] EgressError), 20 | #[error("DNS lookup failed due to a timeout after: {0}")] 21 | DNSTimeout(#[from] tokio::time::error::Elapsed), 22 | #[error("An error occurred while connecting to the host process - {0}")] 23 | BridgeNetworkingError(#[from] ServerError), 24 | } 25 | -------------------------------------------------------------------------------- /data-plane/src/acme/provider.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, Clone)] 2 | pub enum Provider { 3 | LetsEncrypt, 4 | ZeroSSL, 5 | } 6 | 7 | impl Provider { 8 | pub fn directory_path(&self) -> &str { 9 | match self { 10 | Provider::LetsEncrypt => "/directory", 11 | Provider::ZeroSSL => "/v2/DV90", 12 | } 13 | } 14 | 15 | pub fn hostname(&self) -> &str { 16 | match self { 17 | Provider::LetsEncrypt => "acme-v02.api.letsencrypt.org", 18 | Provider::ZeroSSL => "acme.zerossl.com", 19 | } 20 | } 21 | 22 | pub fn eab_required(&self) -> bool { 23 | match self { 24 | Provider::LetsEncrypt => false, 25 | Provider::ZeroSSL => true, 26 | } 27 | } 28 | 29 | pub fn get_stats_key(&self) -> &str { 30 | match &self { 31 | Self::LetsEncrypt => "acme.letsencrypt", 32 | Self::ZeroSSL => "acme.zerossl", 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /data-plane/src/base_tls_client/tls_client_config.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use tokio_rustls::rustls::client::ServerCertVerifier; 4 | use tokio_rustls::rustls::{ClientConfig, OwnedTrustAnchor}; 5 | 6 | pub fn get_tls_client_config(verifier: Arc) -> ClientConfig { 7 | let config_builder = tokio_rustls::rustls::ClientConfig::builder().with_safe_defaults(); 8 | let mut root_store = tokio_rustls::rustls::RootCertStore::empty(); 9 | root_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { 10 | OwnedTrustAnchor::from_subject_spki_name_constraints( 11 | ta.subject, 12 | ta.spki, 13 | ta.name_constraints, 14 | ) 15 | })); 16 | let mut client_config = config_builder 17 | .with_root_certificates(root_store) 18 | .with_no_client_auth(); 19 | let mut dangerous = client_config.dangerous(); 20 | dangerous.set_certificate_verifier(verifier); 21 | client_config 22 | } 23 | -------------------------------------------------------------------------------- /shared/src/acme/helpers.rs: -------------------------------------------------------------------------------- 1 | use crate::acme::error::*; 2 | use openssl::ec::EcGroup; 3 | use openssl::ec::EcKey; 4 | use openssl::nid::Nid; 5 | use openssl::pkey::PKey; 6 | use openssl::pkey::Private; 7 | 8 | pub fn b64(data: &[u8]) -> String { 9 | base64::encode_config(data, ::base64::URL_SAFE_NO_PAD) 10 | } 11 | 12 | pub fn b64_decode(data: &str) -> Result, AcmeError> { 13 | base64::decode_config(data, ::base64::URL_SAFE_NO_PAD).map_err(AcmeError::Base64DecodeError) 14 | } 15 | 16 | pub fn gen_ec_private_key() -> Result, AcmeError> { 17 | let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).expect("Infallible - Harcoded NID"); 18 | let ec_key: EcKey = EcKey::generate(&group)?; 19 | let key = PKey::from_ec_key(ec_key)?; 20 | Ok(key) 21 | } 22 | 23 | pub fn hmac_from_b64_string(key: &str) -> Result, AcmeError> { 24 | let key_bytes = b64_decode(key)?; 25 | let pkey = PKey::hmac(&key_bytes)?; 26 | Ok(pkey) 27 | } 28 | -------------------------------------------------------------------------------- /scripts/update-installer-version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ -z "$1" ]; then 5 | echo "Installer version is null. Exiting..." 6 | exit 1 7 | fi 8 | 9 | release_version="$1" 10 | installer_hash="$2" 11 | stage="$3" 12 | 13 | echo installer_hash $installer_hash 14 | 15 | major_version=$(echo "$release_version" | cut -d '.' -f 1) 16 | 17 | 18 | echo "Release major version: $major_version" 19 | 20 | if [ stage="staging" ]; then 21 | domain="evervault.io" 22 | else 23 | domain="evervault.com" 24 | fi 25 | 26 | version_json=$(curl -s "https://enclave-build-assets.$domain/runtime/versions") 27 | echo "Version response: $version_json" 28 | 29 | 30 | if [ $? -eq 0 ]; then 31 | version_json=$(echo "$version_json" | jq --arg major_version "$major_version" --arg installer_hash "$installer_hash" '.versions[$major_version].installer = $installer_hash') 32 | echo "Updated versions: $version_json" 33 | echo "$version_json" > ./scripts/versions 34 | else 35 | echo "Couldn't get versions from S3 $version_json" 36 | fi -------------------------------------------------------------------------------- /data-plane/src/crypto/rand.rs: -------------------------------------------------------------------------------- 1 | use crate::error::{Error, Result}; 2 | 3 | #[cfg(feature = "enclave")] 4 | pub fn rand_bytes(buffer: &mut [u8]) -> Result<()> { 5 | use crate::utils::nsm::NsmConnection; 6 | use aws_nitro_enclaves_nsm_api as nitro; 7 | let nsm_conn = NsmConnection::try_new()?; 8 | match nitro::driver::nsm_process_request(nsm_conn.fd(), nitro::api::Request::GetRandom) { 9 | nitro::api::Response::GetRandom { random } => { 10 | buffer.copy_from_slice(&random); 11 | Ok(()) 12 | } 13 | nitro::api::Response::Error(e) => Err(Error::Crypto(format!( 14 | "Could not get entropy from the Nitro Secure Module! {e:?}" 15 | ))), 16 | _ => Err(Error::Crypto( 17 | "Received unknown response from Nitro Secure Module".to_string(), 18 | )), 19 | } 20 | } 21 | 22 | #[cfg(not(feature = "enclave"))] 23 | pub fn rand_bytes(buffer: &mut [u8]) -> Result<()> { 24 | openssl::rand::rand_bytes(buffer).map_err(|e| Error::Crypto(e.to_string())) 25 | } 26 | -------------------------------------------------------------------------------- /.github/workflows/test-control-plane.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - control-plane/** 5 | - shared/** 6 | - .github/workflows/test-control-plane.yml 7 | - Cargo.lock 8 | name: Test Control Plane 9 | jobs: 10 | test_control_plane: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Install MUSL Tools 15 | run: | 16 | sudo apt-get update 17 | sudo apt-get install -y musl-tools 18 | - uses: dtolnay/rust-toolchain@stable 19 | with: 20 | targets: x86_64-unknown-linux-musl 21 | - uses: Swatinem/rust-cache@v2 22 | with: 23 | shared-key: "standard-cache" 24 | - name: Compile project feature -enclave 25 | run: cargo build --features enclave,network_egress -p control-plane 26 | - name: Compile project feature -not_enclave 27 | run: cargo build --features not_enclave,network_egress -p control-plane 28 | - name: Test project 29 | run: cargo test --features enclave,network_egress -p control-plane 30 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - control-plane/** 5 | - data-plane/** 6 | - shared/** 7 | - acme-server 8 | - .github/workflows/lint.yml 9 | name: Lint 10 | jobs: 11 | clippy_check: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | - name: Install MUSL Tools 16 | run: | 17 | sudo apt-get update 18 | sudo apt-get install -y musl-tools 19 | - uses: dtolnay/rust-toolchain@stable 20 | with: 21 | targets: x86_64-unknown-linux-musl 22 | components: clippy, rustfmt 23 | - uses: Swatinem/rust-cache@v2 24 | with: 25 | shared-key: "standard-cache" 26 | - name: Check formatting 27 | run: cargo fmt --check 28 | - name: Clippy 29 | uses: clechasseur/rs-clippy-check@v3 30 | with: 31 | args: --features enclave -- -D warnings 32 | - name: Clippy check not enclave 33 | uses: clechasseur/rs-clippy-check@v3 34 | with: 35 | args: --features not_enclave -- -D warnings 36 | -------------------------------------------------------------------------------- /e2e-tests/generate-sample-ca.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # If sample-ca directory doesn't exist, create it 4 | if [ ! -d "e2e-tests/sample-ca" ]; then 5 | mkdir -p e2e-tests/sample-ca 6 | fi 7 | # Generate self signed root CA 8 | openssl genrsa -out e2e-tests/sample-ca/sample-root-ca-key.pem 2048 9 | openssl req -x509 -extensions v3_ca -subj "/C=US/ST=CA/L=San Francisco/O=Evervault/OU=Cages/CN=Cage Test Root CA" -sha256 -new -nodes -key e2e-tests/sample-ca/sample-root-ca-key.pem -days 3650 -out e2e-tests/sample-ca/sample-root-ca-cert.pem 10 | # Generate intermediate CA 11 | openssl genrsa -out e2e-tests/sample-ca/sample-intermediate-key.pem 2048 12 | openssl req -new -subj "/C=US/ST=CA/L=San Francisco/O=Evervault/OU=Cages/CN=Cage Test Intermediate CA" -addext "basicConstraints=critical,CA:TRUE" -key e2e-tests/sample-ca/sample-intermediate-key.pem -out e2e-tests/sample-ca/intermediate.csr 13 | openssl x509 -copy_extensions copyall -req -in e2e-tests/sample-ca/intermediate.csr -CA e2e-tests/sample-ca/sample-root-ca-cert.pem -CAkey e2e-tests/sample-ca/sample-root-ca-key.pem -out e2e-tests/sample-ca/sample-intermediate-cert.pem -days 365 -sha256 14 | rm e2e-tests/sample-ca/intermediate.csr -------------------------------------------------------------------------------- /control-plane/src/stats/client.rs: -------------------------------------------------------------------------------- 1 | use super::INTERNAL_METRIC_PORT; 2 | use crate::configuration::EnclaveContext; 3 | use cadence::{BufferedUdpMetricSink, QueuingMetricSink, StatsdClient}; 4 | use cadence_macros::{set_global_default, statsd_count}; 5 | use shared::{publish_count, stats::StatsError}; 6 | use std::net::UdpSocket; 7 | 8 | pub struct StatsClient; 9 | 10 | impl StatsClient { 11 | pub fn init() { 12 | if let Err(e) = Self::initialize_sink() { 13 | log::error!("Couldn't init statsd client: {e}"); 14 | } 15 | } 16 | 17 | fn initialize_sink() -> Result<(), StatsError> { 18 | let target_ip = super::get_stats_target_ip(); 19 | let socket = UdpSocket::bind("0.0.0.0:0")?; 20 | let udp_sink = BufferedUdpMetricSink::from((target_ip, INTERNAL_METRIC_PORT), socket)?; 21 | let queuing_sink = QueuingMetricSink::from(udp_sink); 22 | let client = StatsdClient::from_sink("", queuing_sink); 23 | set_global_default(client); 24 | Ok(()) 25 | } 26 | 27 | pub fn record_request() { 28 | let context = EnclaveContext::from_env_vars(); 29 | publish_count!("request.count", 1, context); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /data-plane/src/stats/proxy.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use shared::bridge::{Bridge, BridgeInterface, Direction}; 3 | use shared::EXTERNAL_STATS_BRIDGE_PORT; 4 | use tokio::io::AsyncWriteExt; 5 | use tokio::net::UdpSocket; 6 | 7 | pub struct StatsProxy; 8 | 9 | impl StatsProxy { 10 | pub async fn listen() -> Result<(), shared::server::error::ServerError> { 11 | let socket = UdpSocket::bind("127.0.0.1:8125").await?; 12 | 13 | let mut stream = 14 | Bridge::get_client_connection(EXTERNAL_STATS_BRIDGE_PORT, Direction::EnclaveToHost) 15 | .await?; 16 | 17 | let mut buffer = [0; 512]; 18 | loop { 19 | let (amt, _) = match socket.recv_from(&mut buffer).await { 20 | Ok((amt, src)) => (amt, src), 21 | Err(e) => { 22 | log::error!("Error receiving stats: {e}"); 23 | buffer.fill(0); 24 | continue; 25 | } 26 | }; 27 | 28 | let buf = Bytes::copy_from_slice(&buffer[..amt]); 29 | let _ = stream.write_all(&buf).await; 30 | let _ = stream.flush().await; 31 | buffer.fill(0); 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /data-plane/src/cert_provisioner_client/tls_verifier.rs: -------------------------------------------------------------------------------- 1 | use std::time::SystemTime; 2 | use tokio_rustls::rustls::client::ServerCertVerifier; 3 | use tokio_rustls::rustls::{ 4 | client::{ServerCertVerified, ServerName}, 5 | Certificate, CertificateError, Error, 6 | }; 7 | 8 | use crate::configuration; 9 | 10 | pub struct CertProvisionerCertVerifier; 11 | 12 | impl ServerCertVerifier for CertProvisionerCertVerifier { 13 | fn verify_server_cert( 14 | &self, 15 | _end_entity: &Certificate, 16 | _intermediates: &[Certificate], 17 | server_name: &ServerName, 18 | _scts: &mut dyn Iterator, 19 | _ocsp_response: &[u8], 20 | _now: SystemTime, 21 | ) -> Result { 22 | let cert_provisioner_hostname = configuration::get_cert_provisioner_host(); 23 | let expected_server_name = 24 | ServerName::try_from(cert_provisioner_hostname.as_str()).expect("Infallible"); 25 | 26 | if &expected_server_name == server_name { 27 | Ok(ServerCertVerified::assertion()) 28 | } else { 29 | Err(Error::InvalidCertificate(CertificateError::NotValidForName)) 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /shared/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub const ENCLAVE_CERT_PORT: u16 = 7775; 2 | pub const ENCLAVE_CONFIG_PORT: u16 = 7776; 3 | pub const ENCLAVE_CONNECT_PORT: u16 = 7777; 4 | pub const ENCLAVE_CRYPTO_PORT: u16 = 7778; 5 | pub const ENCLAVE_HEALTH_CHECK_PORT: u16 = 7779; 6 | pub const EGRESS_PROXY_VSOCK_PORT: u16 = 4433; 7 | pub const EGRESS_PROXY_PORT: u16 = 4444; 8 | pub const INTERNAL_STATS_BRIDGE_PORT: u16 = 8128; 9 | pub const EXTERNAL_STATS_BRIDGE_PORT: u16 = 8129; 10 | pub const DNS_PROXY_VSOCK_PORT: u16 = 8585; 11 | pub const STATS_VSOCK_PORT: u16 = 8129; 12 | pub const ENCLAVE_ACME_PORT: u16 = 7780; 13 | pub const ENCLAVE_CID: u32 = 2021; 14 | pub const PARENT_CID: u32 = 3; 15 | 16 | pub mod acme; 17 | pub mod bridge; 18 | pub mod logging; 19 | pub mod notify_shutdown; 20 | pub mod rpc; 21 | pub mod server; 22 | pub mod stats; 23 | pub mod utils; 24 | 25 | lazy_static::lazy_static! { 26 | pub static ref CLIENT_VERSION: String = option_env!("CARGO_PKG_VERSION").map(|version| version.to_string()).unwrap_or_else(|| "unknown".to_string()); 27 | pub static ref CLIENT_MAJOR_VERSION: String = option_env!("CARGO_PKG_VERSION").and_then(|version| version.split('.').next().map(|major| major.to_string())).unwrap_or_else(|| "unknown".to_string()); 28 | } 29 | -------------------------------------------------------------------------------- /installer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 alpine:3.20.3 2 | 3 | RUN mkdir -p /packages 4 | RUN apk update ; apk add xz make gcc build-base bash linux-headers curl pkgconfig bison flex 5 | 6 | ADD http://smarden.org/runit/runit-2.1.2.tar.gz /packages/runit-2.1.2.tar.gz 7 | ADD https://downloads.sourceforge.net/project/net-tools/net-tools-2.10.tar.xz /packages/net-tools-2.10.tar.xz 8 | 9 | RUN curl -o /packages/libmnl-1.0.4.tar.bz2 https://www.netfilter.org/projects/libmnl/files/libmnl-1.0.4.tar.bz2 10 | RUN curl -o /packages/iptables-1.8.10.tar.xz https://www.netfilter.org/projects/iptables/files/iptables-1.8.10.tar.xz 11 | RUN curl -o /packages/libnftnl-1.2.6.tar.xz https://www.netfilter.org/projects/libnftnl/files/libnftnl-1.2.6.tar.xz 12 | RUN curl -o /packages/iproute2-6.7.0.tar.gz https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/snapshot/iproute2-6.7.0.tar.gz 13 | 14 | # Copy in minimal preset header file to configure net-tools compilation 15 | COPY config/net-tools.h /packages/ 16 | 17 | COPY scripts/installer.sh /packages 18 | RUN chmod +x /packages/installer.sh 19 | 20 | COPY scripts/compile-runtime-dependencies.sh /compile-runtime-dependencies.sh 21 | RUN chmod +x /compile-runtime-dependencies.sh 22 | 23 | ENTRYPOINT [ "/compile-runtime-dependencies.sh" ] 24 | -------------------------------------------------------------------------------- /data-plane/src/e3client/mock.rs: -------------------------------------------------------------------------------- 1 | use super::{AuthRequest, E3Api, E3Error, E3Payload}; 2 | use async_trait::async_trait; 3 | use hyper::http::HeaderValue; 4 | use mockall::mock; 5 | use serde::de::DeserializeOwned; 6 | 7 | mock! { 8 | #[derive(Debug, Clone)] 9 | pub E3TestClient {} 10 | 11 | #[async_trait] 12 | impl E3Api for E3TestClient { 13 | async fn decrypt(&self, payload: P) -> Result; 14 | 15 | async fn encrypt( 16 | &self, 17 | payload: P, 18 | data_role: Option, 19 | ) -> Result; 20 | 21 | async fn authenticate(&self, api_key: &HeaderValue, payload: AuthRequest) -> Result<(), E3Error>; 22 | 23 | async fn decrypt_with_retries( 24 | &self, 25 | retries: usize, 26 | payload: P, 27 | ) -> Result; 28 | 29 | async fn encrypt_with_retries( 30 | &self, 31 | retries: usize, 32 | payload: P, 33 | data_role: Option, 34 | ) -> Result; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /data-plane/src/acme/mod.rs: -------------------------------------------------------------------------------- 1 | use openssl::pkey::PKey; 2 | use tokio_rustls::rustls::sign::CertifiedKey; 3 | 4 | use crate::{config_client::ConfigClient, e3client::E3Client, EnclaveContext}; 5 | 6 | use self::{cert::AcmeCertificateRetreiver, error::AcmeError, key::AcmeKeyRetreiver}; 7 | 8 | pub mod account; 9 | pub mod authorization; 10 | pub mod cert; 11 | pub mod client; 12 | pub mod directory; 13 | pub mod error; 14 | pub mod key; 15 | pub mod lock; 16 | pub mod order; 17 | pub mod provider; 18 | pub mod raw_cert; 19 | pub mod utils; 20 | 21 | #[cfg(test)] 22 | pub mod mocks; 23 | 24 | pub async fn get_trusted_cert() -> Result<(Vec, CertifiedKey), AcmeError> { 25 | let config_client = ConfigClient::new(); 26 | let e3_client = E3Client::new(); 27 | let enclave_context = EnclaveContext::get()?; 28 | 29 | let trusted_key_pair: PKey = 30 | AcmeKeyRetreiver::new(config_client.clone(), e3_client.clone()) 31 | .get_or_create_enclave_key_pair() 32 | .await 33 | .expect("Failed to get key pair for trusted cert"); 34 | 35 | let cert = AcmeCertificateRetreiver::new(config_client, e3_client) 36 | .get_or_create_enclave_certificate(trusted_key_pair.clone(), enclave_context) 37 | .await?; 38 | Ok((trusted_key_pair.public_key_to_der()?, cert)) 39 | } 40 | -------------------------------------------------------------------------------- /data-plane/src/acme/utils.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, SystemTime}; 2 | 3 | use openssl::asn1::Asn1Time; 4 | use openssl::asn1::Asn1TimeRef; 5 | use rand::Rng; 6 | 7 | use super::error::AcmeError; 8 | 9 | pub const CERTIFICATE_LOCK_NAME: &str = "certificate-v1"; 10 | pub const CERTIFICATE_OBJECT_KEY: &str = "certificate-v1.pem"; 11 | pub const ONE_DAY_IN_SECONDS: u64 = 86400; 12 | pub const THIRTY_DAYS_IN_SECONDS: u64 = ONE_DAY_IN_SECONDS * 30; 13 | pub const SIXTY_DAYS_IN_SECONDS: u64 = ONE_DAY_IN_SECONDS * 60; 14 | 15 | pub(crate) fn asn1_time_to_system_time(time: &Asn1TimeRef) -> Result { 16 | let unix_time = Asn1Time::from_unix(0)?.diff(time)?; 17 | Ok(SystemTime::UNIX_EPOCH 18 | + Duration::from_secs(unix_time.days as u64 * 86400 + unix_time.secs as u64)) 19 | } 20 | 21 | pub(crate) fn get_jittered_time(base_time: SystemTime) -> SystemTime { 22 | let mut rng = rand::thread_rng(); 23 | let jitter_duration = Duration::from_secs(rng.gen_range(1..86400)); 24 | base_time + jitter_duration 25 | } 26 | 27 | pub(crate) fn seconds_with_jitter_to_time(seconds: u64) -> Result { 28 | let time = SystemTime::now() + Duration::from_secs(seconds); 29 | let jittered_time = get_jittered_time(time); 30 | jittered_time 31 | .duration_since(SystemTime::now()) 32 | .map_err(AcmeError::SystemTimeError) 33 | } 34 | -------------------------------------------------------------------------------- /shared/src/server/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod config_server; 2 | #[cfg(feature = "network_egress")] 3 | pub mod egress; 4 | pub mod error; 5 | pub mod health; 6 | pub mod proxy_protocol; 7 | pub mod sni; 8 | pub mod tcp; 9 | pub use tcp::{TcpServer, TcpServerWithProxyProtocol}; 10 | 11 | #[cfg(feature = "enclave")] 12 | pub mod vsock; 13 | use async_trait::async_trait; 14 | use tokio::io::{AsyncRead, AsyncWrite}; 15 | #[cfg(not(feature = "enclave"))] 16 | use tokio::net::TcpStream; 17 | #[cfg(feature = "enclave")] 18 | use tokio_vsock::VsockStream; 19 | #[cfg(feature = "enclave")] 20 | pub use vsock::{VsockServer, VsockServerWithProxyProtocol}; 21 | 22 | #[async_trait] 23 | pub trait Listener: Sized { 24 | type Connection: AsyncRead + AsyncWrite + Send + Sync + Unpin; 25 | type Error: std::fmt::Debug + std::fmt::Display; 26 | async fn accept(&mut self) -> Result; 27 | } 28 | 29 | #[cfg(not(feature = "enclave"))] 30 | impl proxy_protocol::ProxiedConnection for TcpStream {} 31 | 32 | #[cfg(feature = "enclave")] 33 | impl proxy_protocol::ProxiedConnection for VsockStream {} 34 | 35 | impl proxy_protocol::ProxiedConnection 36 | for tokio_rustls::server::TlsStream 37 | { 38 | fn proxy_protocol(&self) -> Option<&ppp::v2::Header<'_>> { 39 | self.get_ref().0.proxy_protocol() 40 | } 41 | 42 | fn has_proxy_protocol(&self) -> bool { 43 | self.get_ref().0.has_proxy_protocol() 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /.github/workflows/test-runtime-builder.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | branches: 4 | - main 5 | paths: 6 | - installer/** 7 | - .github/workflows/test-runtime-builder.yml 8 | - Cargo.lock 9 | name: Build runtime installer 10 | jobs: 11 | run-build-on-runtime-installer: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v3 15 | - name: Build bundle using local action 16 | uses: ./installer 17 | - name: Upload bundle as artifact 18 | uses: actions/upload-artifact@v4 19 | with: 20 | name: runtime-dependencies-${{github.sha}}.zip 21 | path: ./output/runtime-dependencies.tar.gz 22 | test-installer-on-multiple-runtimes: 23 | needs: run-build-on-runtime-installer 24 | runs-on: ubuntu-latest 25 | strategy: 26 | matrix: 27 | dockerfile: 28 | - test-abyss.Dockerfile 29 | - test-alpine.Dockerfile 30 | - test.Dockerfile 31 | steps: 32 | - uses: actions/checkout@v3 33 | - name: Download bundle artifact 34 | uses: actions/download-artifact@v4.1.7 35 | with: 36 | name: runtime-dependencies-${{github.sha}}.zip 37 | path: ./installer/output 38 | - name: Build image 39 | run: | 40 | cd ./installer 41 | docker build --platform linux/amd64 -t runtime-dependencies-test -f ${{matrix.dockerfile}} . 42 | docker run --platform linux/amd64 -t runtime-dependencies-test 43 | -------------------------------------------------------------------------------- /shared/src/server/vsock.rs: -------------------------------------------------------------------------------- 1 | use tokio_vsock::{VsockListener, VsockStream}; 2 | 3 | use super::{error::ServerError, Listener}; 4 | use async_trait::async_trait; 5 | 6 | pub struct VsockServer { 7 | inner: VsockListener, 8 | } 9 | 10 | impl VsockServer { 11 | pub async fn bind(cid: u32, port: u32) -> super::error::ServerResult { 12 | let listener = VsockListener::bind(cid, port)?; 13 | Ok(Self { inner: listener }) 14 | } 15 | } 16 | 17 | #[async_trait] 18 | impl Listener for VsockServer { 19 | type Connection = VsockStream; 20 | type Error = ServerError; 21 | async fn accept(&mut self) -> Result { 22 | let (conn, _socket_addr) = self.inner.accept().await?; 23 | Ok(conn) 24 | } 25 | } 26 | 27 | pub struct VsockServerWithProxyProtocol { 28 | inner: VsockListener, 29 | } 30 | 31 | impl VsockServerWithProxyProtocol { 32 | pub async fn bind(cid: u32, port: u32) -> super::error::ServerResult { 33 | let listener = VsockListener::bind(cid, port)?; 34 | Ok(Self { inner: listener }) 35 | } 36 | } 37 | 38 | #[async_trait] 39 | impl Listener for VsockServerWithProxyProtocol { 40 | type Connection = super::proxy_protocol::AcceptedConn; 41 | type Error = ServerError; 42 | async fn accept(&mut self) -> Result { 43 | let (conn, _socket_addr) = self.inner.accept().await?; 44 | let proxy_protocol_conn = super::proxy_protocol::try_parse_proxy_protocol(conn).await?; 45 | Ok(proxy_protocol_conn) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /shared/src/server/tcp.rs: -------------------------------------------------------------------------------- 1 | use crate::server::error::ServerError; 2 | use tokio::net::{TcpListener, TcpStream, ToSocketAddrs}; 3 | 4 | use super::{proxy_protocol, Listener}; 5 | use async_trait::async_trait; 6 | 7 | pub struct TcpServer { 8 | inner: TcpListener, 9 | } 10 | 11 | impl TcpServer { 12 | pub async fn bind(addr: impl ToSocketAddrs) -> super::error::ServerResult { 13 | let listener = TcpListener::bind(addr).await?; 14 | Ok(Self { inner: listener }) 15 | } 16 | } 17 | 18 | #[async_trait] 19 | impl Listener for TcpServer { 20 | type Connection = TcpStream; 21 | type Error = ServerError; 22 | async fn accept(&mut self) -> Result { 23 | let (conn, _socket_addr) = self.inner.accept().await?; 24 | Ok(conn) 25 | } 26 | } 27 | 28 | pub struct TcpServerWithProxyProtocol { 29 | inner: TcpListener, 30 | } 31 | 32 | impl TcpServerWithProxyProtocol { 33 | pub async fn bind(addr: impl ToSocketAddrs) -> super::error::ServerResult { 34 | let listener = TcpListener::bind(addr).await?; 35 | Ok(Self { inner: listener }) 36 | } 37 | } 38 | 39 | #[async_trait] 40 | impl Listener for TcpServerWithProxyProtocol { 41 | type Connection = proxy_protocol::AcceptedConn; 42 | type Error = ServerError; 43 | async fn accept(&mut self) -> Result { 44 | let (conn, _socket_addr) = self.inner.accept().await?; 45 | let proxy_protocol_conn = proxy_protocol::try_parse_proxy_protocol(conn).await?; 46 | Ok(proxy_protocol_conn) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /installer/README.md: -------------------------------------------------------------------------------- 1 | # Enclave Runtime Dependencies Installer 2 | 3 | The Enclave runtime depends on the following packages: 4 | 5 | - runit 6 | - ifconfig 7 | - iptables 8 | - iproute2 9 | 10 | Typically, these would be installed using a package manager like `apk` or `apt-get`. However, to allow Enclave builds to be reproducible we cannot use these package managers. Installing dependencies using a package manager generates internal files which throw off the reproducible builds by including things like timestamps (such as the scripts tar file generated by apk). 11 | 12 | To get around using the package managers, we need our own consistent installer for the runtime's dependencies. To make it consistent, the CLI will pull a pinned version of the installer (guaranteeing the tar file's timestamps won't change). 13 | 14 | The archive includes precompiled binaries of the runtimes dependencies. These binaries are built in a container defined in `Dockerfile` which outputs the binaries and an installer script in an archive. 15 | 16 | ## Testing 17 | 18 | The binaries and installer can be tested by building `test.Dockerfile`, and running the commands: runit, ifconfig 19 | 20 | ``` 21 | # Build the installer 22 | docker build --platform linux/amd64 -t build-installer . 23 | 24 | # Run the installer with volume attached to local dir 25 | docker run --platform=linux/amd64 -v output:/output build-installer 26 | 27 | # Build the test docker file 28 | docker build -f test.Dockerfile -t installer-test . 29 | 30 | # Run the docker file with command to check dependency has been properly installed 31 | docker run installer-test iptables -v 32 | ``` 33 | -------------------------------------------------------------------------------- /e2e-tests/apiKeyAuthTests.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const axios = require('axios').default; 3 | const https = require('https'); 4 | 5 | describe('POST data to enclave with api key auth enabled', async () => { 6 | const allowAllCerts = axios.create({ 7 | httpsAgent: new https.Agent({ 8 | rejectUnauthorized: false 9 | }) 10 | }); 11 | context('Valid api key is sent as header', () => { 12 | it('returns successfully', async () => { 13 | let result = await allowAllCerts.post('https://enclave.localhost:443/hello', { secret: 'ev:123' }, { headers: { 'api-key': 'placeholder' } }) 14 | expect(result.data).to.deep.equal({ response: 'Hello from enclave', secret: 'ev:123' }); 15 | }); 16 | }); 17 | 18 | context('Invalid api key is sent as header', () => { 19 | it('returns 401', async () => { 20 | try { 21 | let result = await allowAllCerts.post('https://enclave.localhost:443/hello', { secret: 'ev:123' }, { headers: { 'api-key': 'invalid' } }) 22 | expect(result.status).to.not.equal(200) 23 | } catch (err) { 24 | expect(err.response.status).to.equal(401); 25 | } 26 | }); 27 | }); 28 | 29 | context('No api key is sent as header', () => { 30 | it('returns 401', async () => { 31 | try { 32 | let result = await allowAllCerts.post('https://enclave.localhost:443/hello', { secret: 'ev:123' }) 33 | expect(result.status).to.not.equal(200) 34 | } catch (err) { 35 | expect(err.response.status).to.equal(401); 36 | } 37 | }); 38 | }); 39 | }); -------------------------------------------------------------------------------- /control-plane/src/acme_account_details.rs: -------------------------------------------------------------------------------- 1 | use crate::configuration; 2 | use openssl::pkey::{PKey, Private}; 3 | use shared::acme::error::AcmeError; 4 | use shared::acme::helpers; 5 | 6 | #[derive(Debug, Clone)] 7 | pub struct ExternalAccountBinding { 8 | key_id: String, 9 | //HMAC key 10 | private_key: PKey, 11 | } 12 | 13 | impl ExternalAccountBinding { 14 | pub fn new(key_id: String, private_key: PKey) -> Self { 15 | Self { 16 | key_id, 17 | private_key, 18 | } 19 | } 20 | 21 | pub fn key_id(&self) -> String { 22 | self.key_id.clone() 23 | } 24 | 25 | pub fn private_key(&self) -> PKey { 26 | self.private_key.clone() 27 | } 28 | } 29 | 30 | #[derive(Clone)] 31 | pub struct AcmeAccountDetails { 32 | pub account_ec_key: PKey, 33 | //Used for CA's that require External Account Bindings (eg: zeroSSL) 34 | pub eab_config: Option, 35 | } 36 | 37 | impl AcmeAccountDetails { 38 | pub fn new(account_ec_key: PKey, eab_config: Option) -> Self { 39 | Self { 40 | account_ec_key, 41 | eab_config, 42 | } 43 | } 44 | 45 | pub fn new_from_env() -> Result { 46 | let ec_key = configuration::get_acme_ec_key(); 47 | let hmac_key_id = configuration::get_acme_hmac_key_id(); 48 | let hmac_key_raw = configuration::get_acme_hmac_key(); 49 | let hmac_key = helpers::hmac_from_b64_string(&hmac_key_raw)?; 50 | 51 | let eab_config = ExternalAccountBinding::new(hmac_key_id, hmac_key); 52 | Ok(AcmeAccountDetails::new(ec_key, Some(eab_config))) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /control-plane/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "control-plane" 3 | version = "1.0.0-beta" 4 | edition = "2021" 5 | authors = ["Evervault "] 6 | 7 | [dependencies] 8 | tokio = { version = "1.24.2", features = ["net", "macros", "rt", "rt-multi-thread", "io-util", "time", "fs"] } 9 | dashmap = "4.0.2" 10 | trust-dns-resolver = { version = "*" } 11 | dns-message-parser = { version = "~0.7.0" } 12 | bytes = "1" 13 | thiserror = "1.0" 14 | tokio-vsock = { version = "0.3.2", optional = true } 15 | tokio-rustls = { version = "0.24.1", features = ["dangerous_configuration"] } 16 | tls-parser = "*" 17 | shared = { path = "../shared" } 18 | rand = { version = "0.8.5" } 19 | ctrlc = { version = "3.2.3", features = ["termination"] } 20 | serde = { version = "1.0.200", features = ["derive"] } 21 | serde_json = "1.0.64" 22 | hyper = { version = "0.14.4", features = ["server", "client", "http1", "tcp" ] } 23 | lazy_static = "1.4.0" 24 | rustls-pemfile = "1.0.1" 25 | aws-config = "1.5.8" 26 | aws-types = "1.3.3" 27 | semver = "1.0.17" 28 | cadence.workspace = true 29 | cadence-macros.workspace = true 30 | async-trait = "0.1.56" 31 | mockall = "0.11.4" 32 | axum = "0.6.19" 33 | openssl = { version = "0.10.48", features = ["vendored"] } 34 | base64 = "0.13.0" 35 | storage-client-interface = "0.3.0" 36 | log = { version = "0.4.19", features = ["max_level_debug"] } 37 | rand_chacha = "0.3.1" 38 | 39 | [dev-dependencies] 40 | tokio-test = "0.4.2" 41 | ppp = "2.2.0" 42 | 43 | [features] 44 | default = [] 45 | network_egress = ["shared/network_egress"] 46 | enclave = ["dep:tokio-vsock", "shared/enclave"] 47 | not_enclave = ["network_egress"] 48 | release_logging = ["log/release_max_level_info"] 49 | 50 | [lints.rust] 51 | unexpected_cfgs = { level = "allow", check-cfg = ['cfg(staging)'] } -------------------------------------------------------------------------------- /e2e-tests/websocketTests.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const assert = require('assert'); 3 | const WebSocket = require('ws'); 4 | 5 | describe('Make websocket request', () => { 6 | 7 | it("should start websocket session when authorised", (done) => { 8 | const options = { 9 | rejectUnauthorized: false, 10 | headers: { 11 | "api-key": "placeholder", 12 | }, 13 | }; 14 | 15 | const serverUrl = "wss://localhost:443/hello"; 16 | 17 | const socket = new WebSocket(serverUrl, options); 18 | 19 | socket.on("open", () => { 20 | console.log("Connected to WebSocket server"); 21 | socket.send("test connection"); 22 | }); 23 | 24 | socket.on("message", (data) => { 25 | console.log("Received message from server:", data.toString("utf8")); 26 | expect(data.toString("utf8")).to.equal( 27 | "SERVER RECIEVED MESSAGE: test connection" 28 | ); 29 | socket.close(); 30 | done(); 31 | }); 32 | }); 33 | 34 | it("should not start websocket session when not authorised", (done) => { 35 | const options = { 36 | rejectUnauthorized: false, 37 | }; 38 | 39 | const serverUrl = "wss://localhost:443/hello"; 40 | 41 | const socket = new WebSocket(serverUrl, options); 42 | 43 | socket.on("open", () => { 44 | console.log("Connected to WebSocket server"); 45 | socket.send("test connection"); 46 | }); 47 | 48 | socket.on("error", (err) => { 49 | expect(err.message).to.equal("Unexpected server response: 401"); 50 | done(); 51 | }); 52 | 53 | socket.on("message", (data) => { 54 | assert.fail("Connection was sucessful, 401 expected"); 55 | }); 56 | }); 57 | 58 | }); -------------------------------------------------------------------------------- /shared/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "shared" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["Evervault "] 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | byteorder = "1.4.2" 11 | rmp-serde = "1.1.1" 12 | serde = { version = "1.0.200", features = ["derive"] } 13 | serde_derive = "1.0.119" 14 | serde_json = "1.0.61" 15 | thiserror = "1.0.25" 16 | hyper = { version = "0.14.4", features = [ 17 | "server", 18 | "http1", 19 | "http2", 20 | "tcp", 21 | "stream", 22 | "client", 23 | ] } 24 | tokio = { version = "1.24.2", features = [ 25 | "net", 26 | "macros", 27 | "rt", 28 | "rt-multi-thread", 29 | "io-util", 30 | "time", 31 | ] } 32 | async-trait = "0.1.56" 33 | tokio-vsock = { version = "0.3.2", optional = true } 34 | lazy_static = "1.4.0" 35 | derive_builder = "0.12.0" 36 | chrono = "0.4.23" 37 | rand = "^0.8" 38 | tls-parser = { version = "0.12.2" } 39 | ppp = { version = "2.2.0" } 40 | tokio-rustls = { version = "0.24.1", features = ["dangerous_configuration"] } 41 | sys-info = "0.9.1" 42 | cadence.workspace = true 43 | httparse = "1.8.0" 44 | openssl = { workspace = true } 45 | base64 = "0.13.0" 46 | env_logger = "0.10.0" 47 | once_cell = { version = "1.19.0", optional = true } 48 | ttl_cache = { version = "0.5.1", optional = true } 49 | dns-parser = { version = "0.8.0", optional = true } 50 | pin-project = "1" 51 | log = { version = "0.4.19", features = ["max_level_debug"] } 52 | 53 | [dev-dependencies] 54 | tokio-test = "0.4.2" 55 | 56 | [lib] 57 | name = "shared" 58 | path = "src/lib.rs" 59 | doc = true 60 | crate-type = ["lib"] 61 | 62 | [features] 63 | default = [] 64 | network_egress = ["dep:once_cell", "dep:ttl_cache", "dep:dns-parser"] 65 | enclave = ["dep:tokio-vsock"] 66 | -------------------------------------------------------------------------------- /data-plane/src/server/error.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "enclave")] 2 | use crate::crypto::attest::AttestationError; 3 | use crate::ContextError; 4 | use std::{num::TryFromIntError, time::SystemTimeError}; 5 | use thiserror::Error; 6 | use tokio_rustls::rustls::sign::SignError; 7 | 8 | #[derive(Error, Debug)] 9 | pub enum TlsError { 10 | #[error("IoError - {0}")] 11 | IoError(#[from] std::io::Error), 12 | #[error("TlsError - {0}")] 13 | TlsError(#[from] tokio_rustls::rustls::Error), 14 | #[error("NoHostnameSpecified - Invalid config options given to generate certificate")] 15 | NoHostnameSpecified, 16 | #[error("NoCertFound - Failed to load TLS certificate for the Enclave")] 17 | NoCertFound, 18 | #[error("NoKeyFound - Failed to load the private key for the Enclave")] 19 | NoKeyFound, 20 | #[error(transparent)] 21 | ServerError(#[from] shared::server::error::ServerError), 22 | #[cfg(feature = "enclave")] 23 | #[error(transparent)] 24 | Attestation(#[from] AttestationError), 25 | #[error("OpensslError")] 26 | OpensslError(#[from] openssl::error::ErrorStack), 27 | #[error("SignError")] 28 | SignError(#[from] SignError), 29 | #[error("PemError")] 30 | PemError(#[from] pem::PemError), 31 | #[error("CertProvisionerError - {0}")] 32 | CertProvisionerError(String), 33 | #[error("ContextError - Failed to access Enclave context: {0}")] 34 | ContextError(#[from] ContextError), 35 | #[error("SystemTimeError - {0}")] 36 | SystemTimeError(#[from] SystemTimeError), 37 | #[error("TryFromIntError - {0}")] 38 | TryFromIntError(#[from] TryFromIntError), 39 | #[error("EnvError - an unexpected error occurred while preparing the Enclave environment")] 40 | EnvError(#[from] crate::env::EnvError), 41 | } 42 | 43 | pub type ServerResult = Result; 44 | -------------------------------------------------------------------------------- /control-plane/src/error.rs: -------------------------------------------------------------------------------- 1 | use shared::server::sni::SNIError; 2 | use thiserror::Error; 3 | use trust_dns_resolver::error::ResolveError; 4 | 5 | use storage_client_interface::StorageClientError; 6 | 7 | #[derive(Error, Debug)] 8 | pub enum ServerError { 9 | #[error(transparent)] 10 | Io(#[from] std::io::Error), 11 | #[error(transparent)] 12 | Rpc(#[from] shared::rpc::error::RpcError), 13 | #[error(transparent)] 14 | Server(#[from] shared::server::error::ServerError), 15 | #[error(transparent)] 16 | Hyper(#[from] hyper::Error), 17 | #[error(transparent)] 18 | HyperHttp(#[from] hyper::http::Error), 19 | #[error(transparent)] 20 | DNSError(#[from] ResolveError), 21 | #[error("Request to internal IP ({0}) blocked")] 22 | IllegalInternalIp(std::net::IpAddr), 23 | #[error("Invalid IP included in egress request — {0}")] 24 | InvalidIp(#[from] std::net::AddrParseError), 25 | #[error("Failed sending request - {0}")] 26 | FailedRequest(String), 27 | #[error(transparent)] 28 | JsonError(#[from] serde_json::Error), 29 | #[error("Error Setting up Mtls for Cert Provisioner: {0}")] 30 | CertProvisionerMtls(String), 31 | #[error(transparent)] 32 | EnvError(#[from] std::env::VarError), 33 | #[cfg(feature = "network_egress")] 34 | #[error("Egress error: {0}")] 35 | EgressError(#[from] shared::server::egress::EgressError), 36 | #[error("Storage Error - {0}")] 37 | StorageClientError(#[from] StorageClientError), 38 | #[error("Acme Error - {0}")] 39 | AcmeError(#[from] shared::acme::error::AcmeError), 40 | #[error("Invalid DNS Config provided - at least 2 valid DNS Servers must be provided")] 41 | InvalidDnsConfig, 42 | #[error(transparent)] 43 | SNIError(#[from] SNIError), 44 | } 45 | 46 | pub type Result = std::result::Result; 47 | -------------------------------------------------------------------------------- /control-plane/src/dns.rs: -------------------------------------------------------------------------------- 1 | use crate::error; 2 | use rand::prelude::IteratorRandom; 3 | use std::net::Ipv4Addr; 4 | use std::net::{IpAddr, SocketAddr}; 5 | use trust_dns_resolver::config::ResolverOpts; 6 | use trust_dns_resolver::config::{NameServerConfigGroup, ResolverConfig}; 7 | use trust_dns_resolver::TokioAsyncResolver; 8 | 9 | pub struct InternalAsyncDnsResolver {} 10 | pub struct ExternalAsyncDnsResolver {} 11 | 12 | impl InternalAsyncDnsResolver { 13 | pub fn new_resolver() -> TokioAsyncResolver { 14 | let dns_ip = IpAddr::V4(Ipv4Addr::new(169, 254, 169, 253)); 15 | get_dns_resolver(dns_ip) 16 | } 17 | } 18 | 19 | impl ExternalAsyncDnsResolver { 20 | pub fn new_resolver() -> TokioAsyncResolver { 21 | let dns_ip = IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)); 22 | get_dns_resolver(dns_ip) 23 | } 24 | } 25 | 26 | fn get_dns_resolver(dns_ip: IpAddr) -> TokioAsyncResolver { 27 | TokioAsyncResolver::tokio( 28 | ResolverConfig::from_parts( 29 | None, 30 | vec![], 31 | NameServerConfigGroup::from_ips_clear(&[dns_ip], 53, true), 32 | ), 33 | ResolverOpts::default(), 34 | ) 35 | } 36 | 37 | pub async fn get_ip_for_host_with_dns_resolver( 38 | dns_resolver: &TokioAsyncResolver, 39 | host: &str, 40 | port: u16, 41 | ) -> error::Result> { 42 | let addr = dns_resolver 43 | .lookup_ip(host) 44 | .await? 45 | .iter() 46 | .choose(&mut rand::thread_rng()) 47 | .map(|ip| SocketAddr::new(ip, port)); 48 | Ok(addr) 49 | } 50 | 51 | #[cfg(not(feature = "enclave"))] 52 | pub fn get_ip_for_localhost(port: u16) -> error::Result> { 53 | let addr = Some(SocketAddr::new( 54 | IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 55 | port, 56 | )); 57 | 58 | Ok(addr) 59 | } 60 | -------------------------------------------------------------------------------- /shared/src/acme/error.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | use thiserror::Error; 3 | 4 | #[derive(Debug, Error)] 5 | pub enum AcmeError { 6 | #[error("IO Error — {0:?}")] 7 | IoError(#[from] std::io::Error), 8 | #[error("Hyper Error — {0:?}")] 9 | HyperError(#[from] hyper::Error), 10 | #[error("Deserialization Error — {0:?}")] 11 | SerdeError(#[from] serde_json::Error), 12 | #[error("Request to server failed with status: {0:?}")] 13 | FailedRequest(hyper::StatusCode), 14 | #[error("Client Error — {0}")] 15 | ClientError(String), 16 | #[error("HTTP Error — {0:?}")] 17 | HttpError(#[from] hyper::http::Error), 18 | #[error("No Nonce Found")] 19 | NoNonce, 20 | #[error("Nonce Mutex Poison Error - {0:?}")] 21 | PoisonError(String), 22 | #[error("Http Header Conversion Error")] 23 | HeaderConversionError(#[from] hyper::header::ToStrError), 24 | #[error("OpenSSL Error — {0:?}")] 25 | OpenSSLError(#[from] openssl::error::ErrorStack), 26 | #[error("Base64 Decode Error — {0:?}")] 27 | Base64DecodeError(#[from] base64::DecodeError), 28 | #[error("Error interpretting utf8 sequence — {0:?}")] 29 | Utf8Error(#[from] std::str::Utf8Error), 30 | #[error("No directory for acme account - {0:?}")] 31 | NoDirectory(String), 32 | #[error("Error creating CSR - {0:?}")] 33 | CsrError(String), 34 | #[error("{0:?} Field Not Found")] 35 | FieldNotFound(String), 36 | #[error("Private Key Not Set For Acme Account")] 37 | PrivateKeyNotSet, 38 | #[error("ACME Error {0:?}")] 39 | General(String), 40 | } 41 | 42 | /// This is an error as returned by the ACME server. 43 | #[derive(Deserialize, Debug, Clone)] 44 | #[serde(rename_all = "camelCase")] 45 | pub struct AcmeServerError { 46 | pub r#type: Option, 47 | pub title: Option, 48 | pub status: Option, 49 | pub detail: Option, 50 | } 51 | -------------------------------------------------------------------------------- /scripts/update-runtime-version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Script to update version json on release 5 | # Version json is stored in S3 and is used to determine the latest version of the runtime for each major 6 | # The json is structured as follows: 7 | # { 8 | # "latest": "2.0.4", 9 | # "versions": { 10 | # "0": { "latest": "0.0.4", "deprecationDate": "1697719181"}, 11 | # "1": { "latest": "1.0.4" }, 12 | # "2": { "latest": "2.0.4" } 13 | # } 14 | # } 15 | # The top level latest version is used to determine the latest version of the runtime overall 16 | 17 | if [ -z "$1" ]; then 18 | echo "Runtime version is null. Exiting..." 19 | exit 1 20 | fi 21 | 22 | release_version="$1" 23 | 24 | major_version=$(echo "$release_version" | cut -d '.' -f 1) 25 | 26 | echo "Release major version: $major_version" 27 | 28 | version_json=$(curl -s "https://${CAGE_BUILD_ASSETS_HOSTNAME:-enclave-build-assets.evervault.com}/runtime/versions") 29 | echo "Version response: $version_json" 30 | 31 | if [ $? -eq 0 ]; then 32 | highest_major_version=$(echo "$version_json" | jq '.versions | keys_unsorted[] | tonumber' | sort -nr | head -1) 33 | echo Highest current major version: $highest_major_version 34 | if [ "$major_version" -ge "$highest_major_version" ]; then 35 | #update overall latest version if release is current major 36 | version_json=$(echo "$version_json" | jq --arg release_version "$release_version" '.latest = $release_version') 37 | else 38 | echo "Major version is less than current highest, not updating top level latest version" 39 | fi 40 | 41 | version_json=$(echo "$version_json" | jq --arg major_version "$major_version" --arg new_version "$release_version" '.versions[$major_version].latest = $new_version') 42 | echo "Updated versions: $version_json" 43 | echo "$version_json" > ./scripts/versions 44 | else 45 | echo "Couldn't get versions from S3 $version_json" 46 | fi -------------------------------------------------------------------------------- /installer/config/net-tools.h: -------------------------------------------------------------------------------- 1 | /* 2 | * config.h Automatically generated configuration includefile 3 | * 4 | * NET-TOOLS A collection of programs that form the base set of the 5 | * NET-3 Networking Distribution for the LINUX operating 6 | * system. 7 | * 8 | * DO NOT EDIT DIRECTLY 9 | * 10 | */ 11 | 12 | /* 13 | * 14 | * Internationalization 15 | * 16 | * The net-tools package has currently been translated to French, 17 | * German and Brazilian Portugese. Other translations are, of 18 | * course, welcome. Answer `n' here if you have no support for 19 | * internationalization on your system. 20 | * 21 | */ 22 | #define I18N 0 23 | 24 | /* 25 | * 26 | * Protocol Families. 27 | * 28 | */ 29 | #define HAVE_AFUNIX 1 30 | #define HAVE_AFINET 1 31 | #define HAVE_AFINET6 1 32 | #define HAVE_AFIPX 0 33 | #define HAVE_AFATALK 0 34 | #define HAVE_AFAX25 0 35 | #define HAVE_AFNETROM 0 36 | #define HAVE_AFROSE 0 37 | #define HAVE_AFX25 0 38 | #define HAVE_AFECONET 0 39 | #define HAVE_AFDECnet 0 40 | #define HAVE_AFASH 0 41 | #define HAVE_AFBLUETOOTH 0 42 | 43 | /* 44 | * 45 | * Device Hardware types. 46 | * 47 | */ 48 | #define HAVE_HWETHER 0 49 | #define HAVE_HWARC 0 50 | #define HAVE_HWSLIP 0 51 | #define HAVE_HWPPP 0 52 | #define HAVE_HWTUNNEL 0 53 | #define HAVE_HWSTRIP 0 54 | #define HAVE_HWTR 0 55 | #define HAVE_HWAX25 0 56 | #define HAVE_HWROSE 0 57 | #define HAVE_HWNETROM 0 58 | #define HAVE_HWX25 0 59 | #define HAVE_HWFR 0 60 | #define HAVE_HWSIT 0 61 | #define HAVE_HWFDDI 0 62 | #define HAVE_HWHIPPI 0 63 | #define HAVE_HWASH 0 64 | #define HAVE_HWHDLCLAPB 0 65 | #define HAVE_HWIRDA 0 66 | #define HAVE_HWEC 0 67 | #define HAVE_HWEUI64 0 68 | #define HAVE_HWIB 0 69 | 70 | /* 71 | * 72 | * Other Features. 73 | * 74 | */ 75 | #define HAVE_FW_MASQUERADE 0 76 | #define HAVE_ARP_TOOLS 0 77 | #define HAVE_HOSTNAME_TOOLS 0 78 | #define HAVE_HOSTNAME_SYMLINKS 0 79 | #define HAVE_IP_TOOLS 0 80 | #define HAVE_MII 0 81 | #define HAVE_PLIP_TOOLS 0 82 | #define HAVE_SERIAL_TOOLS 0 83 | #define HAVE_SELINUX 0 -------------------------------------------------------------------------------- /shared/src/server/sni.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | use tls_parser::{ 3 | parse_tls_extensions, parse_tls_plaintext, TlsExtension, TlsMessage, TlsMessageHandshake, 4 | }; 5 | 6 | #[derive(Debug, Error)] 7 | 8 | pub enum SNIError { 9 | #[error("Failed to parse hostname from packet")] 10 | HostnameError, 11 | #[error("Attempted request to banned domain {0}")] 12 | EgressDomainNotAllowed(String), 13 | #[error("Client Hello not found")] 14 | ClientHelloMissing, 15 | #[error("TLS extension missing")] 16 | ExtensionMissing, 17 | #[error("Insufficient data received to parse client hello")] 18 | IncompleteHelloReceived, 19 | } 20 | 21 | impl std::convert::From> for SNIError { 22 | fn from(value: tls_parser::nom::Err) -> Self { 23 | match value { 24 | tls_parser::Err::Incomplete(_) => SNIError::IncompleteHelloReceived, 25 | tls_parser::Err::Error(_) | tls_parser::Err::Failure(_) => SNIError::HostnameError, 26 | } 27 | } 28 | } 29 | 30 | pub fn get_hostname(data: &[u8]) -> Result { 31 | let (_, parsed_request) = parse_tls_plaintext(data).map_err(SNIError::from)?; 32 | 33 | let client_hello = match &parsed_request.msg[0] { 34 | TlsMessage::Handshake(TlsMessageHandshake::ClientHello(client_hello)) => client_hello, 35 | _ => return Err(SNIError::ClientHelloMissing), 36 | }; 37 | 38 | let raw_extensions = match client_hello.ext { 39 | Some(raw_extensions) => raw_extensions, 40 | _ => return Err(SNIError::ExtensionMissing), 41 | }; 42 | 43 | let mut destination = "".to_string(); 44 | let (_, extensions) = parse_tls_extensions(raw_extensions).map_err(SNIError::from)?; 45 | 46 | for extension in extensions { 47 | if let TlsExtension::SNI(sni_vec) = extension { 48 | for (_, item) in sni_vec { 49 | if let Ok(hostname) = std::str::from_utf8(item) { 50 | destination = hostname.to_string(); 51 | } 52 | } 53 | } 54 | } 55 | Ok(destination) 56 | } 57 | -------------------------------------------------------------------------------- /e2e-tests/mtls-testing-certs/ca/generate-certs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DELIMETER="*****************************" 4 | 5 | emit_msg() { 6 | echo "$DELIMETER" 7 | echo "$1" 8 | echo "$DELIMETER" 9 | } 10 | 11 | PLATFORM=$(uname -s) 12 | if [ "$PLATFORM" = "Darwin" ]; then 13 | emit_msg "Detected MacOS, setting path to use homebrew openssl" 14 | export PATH=$(brew --prefix openssl)/bin:$PATH #make sure openssl is linked, not libressl 15 | fi 16 | 17 | mkdir -p certs 18 | cd certs 19 | 20 | emit_msg "BEGINNING PROCESS TO GENERATE CERTS" 21 | 22 | emit_msg "GENERATING KEY FOR CA" 23 | openssl genrsa -out ca.key 2048 24 | 25 | emit_msg "GENERATING A SELF SIGNED CERT FOR THE CA" 26 | openssl req -subj "/C=IE/ST=Leinster/L=Dublin/O=Evervault/OU=Engineering/CN=support@evervault.com" -new -x509 -key ca.key -out ca.crt 27 | 28 | emit_msg "GENERATING RSA KEY FOR SERVER AT localhost" 29 | openssl genrsa -out localhost.key 2048 30 | 31 | emit_msg "GENERATING CSR FOR SERVER CERT. MAKE SURE TO SET COMMON NAME AS localhost" 32 | openssl req -subj "/C=IE/ST=Leinster/L=Dublin/O=Evervault/OU=Engineering/CN=support@evervault.com" -new -key localhost.key -addext "subjectAltName = DNS:localhost" -out localhost.csr 33 | 34 | emit_msg "GENERATING A CERT SIGNED BY THE CA" 35 | printf "subjectAltName=DNS:localhost" > extfile.cnf 36 | openssl x509 -req -in localhost.csr -CA ca.crt -CAkey ca.key -CAcreateserial -extfile extfile.cnf -out localhost.crt 37 | 38 | emit_msg "GENERATING RSA KEY FOR CLIENT" 39 | openssl genrsa -out client_0.key 2048 40 | 41 | emit_msg "GENERATING CSR FOR CLIENT CERT. MAKE SURE TO SET COMMON NAME AS localhost" 42 | openssl req -subj "/C=IE/ST=Leinster/L=Dublin/O=Evervault/OU=Engineering/CN=support@evervault.com" -new -key client_0.key -addext "subjectAltName = DNS:localhost" -out client_0.csr 43 | 44 | emit_msg "GENERATING A CLIENT CERT SIGNED BY THE CA" 45 | openssl x509 -req -subj "/C=IE/ST=Leinster/L=Dublin/O=Evervault/OU=Engineering/CN=support@evervault.com" -in client_0.csr -CA ca.crt -CAkey ca.key -CAcreateserial -extfile extfile.cnf -out client_0.crt 46 | rm extfile.cnf 47 | 48 | emit_msg "GENERATED CERTS FOR MTLS TESTING" 49 | 50 | cd .. -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | networks: 3 | mynetwork: 4 | ipam: 5 | config: 6 | - subnet: 172.20.0.0/24 7 | services: 8 | statsd: 9 | image: statsd/statsd 10 | restart: always 11 | ports: 12 | - 8125:8125/udp 13 | - 8126:8126 14 | networks: 15 | mynetwork: 16 | ipv4_address: 172.20.0.6 17 | platform: linux/amd64 18 | enclave: 19 | build: 20 | dockerfile: enclave.Dockerfile 21 | dns: 127.0.0.1 22 | cap_add: 23 | - NET_ADMIN 24 | privileged: true 25 | ports: 26 | - "7777:7777" 27 | - "7779:7779" 28 | depends_on: 29 | - statsd 30 | - control-plane 31 | networks: 32 | mynetwork: 33 | ipv4_address: 172.20.0.7 34 | environment: 35 | - EV_API_KEY_AUTH=${EV_API_KEY_AUTH:?No api key auth set, failing fast} 36 | - CUSTOMER_PROCESS=${CUSTOMER_PROCESS} 37 | - AWS_REGION=us-east-1 38 | - TEST_EGRESS_IP=${TEST_EGRESS_IP} 39 | control-plane: 40 | build: 41 | dockerfile: control-plane.Dockerfile 42 | ports: 43 | - "443:3031" 44 | - "3032:3032" 45 | depends_on: 46 | - statsd 47 | networks: 48 | mynetwork: 49 | ipv4_address: 172.20.0.8 50 | environment: 51 | - MOCK_CRYPTO_CERT=${MOCK_CRYPTO_CERT} 52 | - MOCK_CRYPTO_KEY=${MOCK_CRYPTO_KEY} 53 | - MOCK_CERT_PROVISIONER_CLIENT_CERT=${MOCK_CERT_PROVISIONER_CLIENT_CERT} 54 | - MOCK_CERT_PROVISIONER_CLIENT_KEY=${MOCK_CERT_PROVISIONER_CLIENT_KEY} 55 | - MOCK_CERT_PROVISIONER_ROOT_CERT=${MOCK_CERT_PROVISIONER_ROOT_CERT} 56 | - MOCK_CERT_PROVISIONER_SERVER_KEY=${MOCK_CERT_PROVISIONER_SERVER_KEY} 57 | - MOCK_CERT_PROVISIONER_SERVER_CERT=${MOCK_CERT_PROVISIONER_SERVER_CERT} 58 | - ACME_ACCOUNT_EC_KEY=${ACME_ACCOUNT_EC_KEY} 59 | - ACME_ACCOUNT_HMAC_KEY=${ACME_ACCOUNT_HMAC_KEY} 60 | - ACME_ACCOUNT_HMAC_KEY_ID=${ACME_ACCOUNT_HMAC_KEY_ID} 61 | - DEREGISTRATION_TOPIC_ARN=testarn 62 | - EV_API_KEY_AUTH=${EV_API_KEY_AUTH:?No api key auth set, failing fast} 63 | - CUSTOMER_PROCESS=${CUSTOMER_PROCESS} 64 | - AWS_REGION=us-east-1 -------------------------------------------------------------------------------- /enclave.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16-alpine3.18 2 | 3 | ENV DATA_PLANE_EXECUTABLE_PATH=/data-plane 4 | ENV DATA_PLANE_SERVICE_PATH=/etc/service/data-plane 5 | ENV START_EV_SERVICES_PATH=/etc/service/ev-services-entrypoint 6 | 7 | RUN apk update &&\ 8 | apk add runit && apk add curl && \ 9 | rm -rf /var/cache/apk/* 10 | RUN apk add iptables 11 | 12 | COPY ./target/x86_64-unknown-linux-musl/release/data-plane $DATA_PLANE_EXECUTABLE_PATH 13 | RUN chmod +x $DATA_PLANE_EXECUTABLE_PATH 14 | 15 | RUN mkdir $DATA_PLANE_SERVICE_PATH 16 | COPY ./e2e-tests/scripts/start-data-plane.sh $DATA_PLANE_SERVICE_PATH/run 17 | RUN chmod +x $DATA_PLANE_SERVICE_PATH/run 18 | 19 | ENV PCR0 000 20 | ENV PCR1 000 21 | ENV PCR2 000 22 | ENV PCR8 000 23 | 24 | # CERTS FOR CERT PROVISIONER 25 | ARG MOCK_CERT_PROVISIONER_CLIENT_CERT 26 | ARG MOCK_CERT_PROVISIONER_CLIENT_KEY 27 | ARG MOCK_CERT_PROVISIONER_ROOT_CERT 28 | ARG MOCK_CERT_PROVISIONER_SERVER_KEY 29 | ARG MOCK_CERT_PROVISIONER_SERVER_CERT 30 | ENV MOCK_CERT_PROVISIONER_CLIENT_CERT $MOCK_CERT_PROVISIONER_CLIENT_CERT 31 | ENV MOCK_CERT_PROVISIONER_CLIENT_KEY $MOCK_CERT_PROVISIONER_CLIENT_KEY 32 | ENV MOCK_CERT_PROVISIONER_ROOT_CERT $MOCK_CERT_PROVISIONER_ROOT_CERT 33 | ENV MOCK_CERT_PROVISIONER_SERVER_KEY $MOCK_CERT_PROVISIONER_SERVER_KEY 34 | ENV MOCK_CERT_PROVISIONER_SERVER_CERT $MOCK_CERT_PROVISIONER_SERVER_CERT 35 | 36 | # USE HTTP OR WS CUSTOMER SERVER 37 | ARG CUSTOMER_PROCESS=httpCustomerProcess.js 38 | 39 | COPY ./e2e-tests/sample-ca/* /services/ 40 | COPY ./e2e-tests/$CUSTOMER_PROCESS /services/$CUSTOMER_PROCESS 41 | COPY ./e2e-tests/package.json /services/package.json 42 | COPY ./e2e-tests/package-lock.json /services/package-lock.json 43 | 44 | RUN cd services && npm i 45 | 46 | RUN mkdir /etc/service/customer_process \ 47 | && /bin/sh -c "echo -e '"'#!/bin/sh\nexec /customer_process/customer_process ${CUSTOMER_PROCESS}\n'"' > /etc/service/customer_process/run" \ 48 | && chmod +x /etc/service/customer_process/run 49 | 50 | RUN mkdir /customer_process 51 | 52 | COPY ./e2e-tests/scripts/start_customer_process /customer_process/customer_process 53 | RUN chmod +x /customer_process/customer_process 54 | 55 | CMD ["runsvdir", "/etc/service"] 56 | -------------------------------------------------------------------------------- /control-plane/src/stats/proxy.rs: -------------------------------------------------------------------------------- 1 | use crate::error::Result; 2 | use shared::{ 3 | bridge::{Bridge, BridgeInterface, Direction}, 4 | server::Listener, 5 | }; 6 | use std::net::SocketAddr; 7 | use std::ops::Deref; 8 | use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite}; 9 | use tokio::net::UdpSocket; 10 | 11 | pub struct StatsProxy; 12 | impl StatsProxy { 13 | pub async fn spawn(port: u16, target_addrs: Vec) -> Result<()> { 14 | log::info!("Started control plane stats proxy"); 15 | let mut server = Bridge::get_listener(port, Direction::HostToEnclave).await?; 16 | let target_addrs = std::sync::Arc::new(target_addrs); 17 | while !crate::health::is_draining() { 18 | match server.accept().await { 19 | Ok(stream) => { 20 | let owned_addrs = target_addrs.clone(); 21 | tokio::spawn(async move { 22 | if let Err(e) = Self::proxy_connection(stream, owned_addrs).await { 23 | log::warn!("Error proxying stats connection: {e}"); 24 | } 25 | }); 26 | } 27 | Err(e) => log::error!("Error accepting connection in stats proxy - {e:?}"), 28 | } 29 | } 30 | Ok(()) 31 | } 32 | 33 | async fn proxy_connection( 34 | mut stream: T, 35 | target_addrs: std::sync::Arc>, 36 | ) -> Result<()> { 37 | let socket = UdpSocket::bind("0.0.0.0:0").await?; 38 | let mut request_buffer = [0; 512]; 39 | 40 | loop { 41 | let packet_size = stream.read(&mut request_buffer).await?; 42 | if packet_size == 0 { 43 | return Ok(()); 44 | } 45 | for addr in target_addrs.deref() { 46 | if let Err(e) = socket.send_to(&request_buffer[..packet_size], addr).await { 47 | log::error!( 48 | "An error occurred while forwarding metrics to the remote server: {e}" 49 | ); 50 | } 51 | } 52 | request_buffer.fill(0); 53 | } 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /e2e-tests/run-local-cage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | 5 | # kill container if it is left running by hanging test, then generate local testing certs 6 | if [ "${CI:-unset}" = "unset" ]; 7 | then 8 | docker compose down || true 9 | cargo build --release --target x86_64-unknown-linux-musl --features network_egress 10 | . e2e-tests/mtls-testing-certs/ca/generate-certs.sh & 11 | else 12 | # if in CI, generate certs and export them 13 | . e2e-tests/mtls-testing-certs/ca/generate-certs.sh 14 | 15 | MOCK_CRYPTO_CERT=`cat certs/ca.crt` && export MOCK_CRYPTO_CERT 16 | MOCK_CRYPTO_KEY=`cat certs/ca.key` && export MOCK_CRYPTO_KEY 17 | 18 | 19 | MOCK_CERT_PROVISIONER_CLIENT_CERT=`cat certs/client_0.crt` && export MOCK_CERT_PROVISIONER_CLIENT_CERT 20 | MOCK_CERT_PROVISIONER_CLIENT_KEY=`cat certs/client_0.key` && export MOCK_CERT_PROVISIONER_CLIENT_KEY 21 | MOCK_CERT_PROVISIONER_ROOT_CERT=`cat certs/ca.crt` && export MOCK_CERT_PROVISIONER_ROOT_CERT 22 | 23 | MOCK_CERT_PROVISIONER_SERVER_KEY=`cat certs/localhost.key` && export MOCK_CERT_PROVISIONER_SERVER_KEY 24 | MOCK_CERT_PROVISIONER_ROOT_CERT=`cat certs/ca.crt` && export MOCK_CERT_PROVISIONER_ROOT_CERT 25 | MOCK_CERT_PROVISIONER_SERVER_CERT=`cat certs/localhost.crt` && export MOCK_CERT_PROVISIONER_SERVER_CERT 26 | ACME_ACCOUNT_EC_KEY=`cat ./e2e-tests/acme-key/key.pem` && export ACME_ACCOUNT_EC_KEY 27 | ACME_ACCOUNT_HMAC_KEY="cGxhY2Vob2xkZXI=" && export ACME_ACCOUNT_HMAC_KEY 28 | ACME_ACCOUNT_HMAC_KEY_ID="placeholder_id" && export ACME_ACCOUNT_HMAC_KEY_ID 29 | ACME_S3_BUCKET="enclaves-acme-local && export ACME_S3_BUCKET" 30 | fi 31 | 32 | cargo build --release --target x86_64-unknown-linux-musl 33 | 34 | # install the node modules for customer process and test script 35 | cd e2e-tests && npm install && cd .. 36 | 37 | # Compile mock crypto api 38 | if [[ -z "${CI}" ]]; 39 | then 40 | cd ./e2e-tests/mock-crypto 41 | cargo build --release --target x86_64-unknown-linux-musl -Z registry-auth 42 | cd ../.. 43 | fi 44 | 45 | export CUSTOMER_PROCESS=httpCustomerProcess.js 46 | echo "Building enclave container" 47 | docker compose build --build-arg CUSTOMER_PROCESS=httpCustomerProcess.js 48 | 49 | echo "Running enclave container" 50 | # run the container 51 | EV_API_KEY_AUTH=true docker compose up -------------------------------------------------------------------------------- /installer/scripts/installer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | IFCONFIG_PATH=`command -v ifconfig` 4 | if [ -z "$IFCONFIG_PATH" ]; then 5 | echo "Installing prebuilt ifconfig" 6 | IFCONFIG_TARGET_PATH=/usr/local/bin/ifconfig 7 | install -m 0755 net-tools-2.10/ifconfig "$IFCONFIG_TARGET_PATH" 8 | IFCONFIG_PATH_POST_INSTALL=`command -v ifconfig` 9 | test "$IFCONFIG_PATH_POST_INSTALL" = "$IFCONFIG_TARGET_PATH" || exit 1 10 | echo "ifconfig installed successfully" 11 | fi 12 | 13 | RUNIT_PATH=`command -v runit` 14 | if [ -z "$RUNIT_PATH" ]; then 15 | echo "Installing prebuilt runit" 16 | cd runit-2.1.2 17 | sh ./package/upgrade 18 | RUNIT_PATH_POST_INSTALL=`command -v runit` 19 | if [ -z "$RUNIT_PATH_POST_INSTALL" ]; then 20 | exit 2 21 | fi 22 | cd .. 23 | echo "runit installed successfully" 24 | fi 25 | 26 | IPTABLES_PATH=`command -v iptables` 27 | if [ -z "$IPTABLES_PATH" ]; then 28 | echo "Installing prebuilt iptables" 29 | IPTABLES_TARGET_PATH=/usr/local/bin/iptables 30 | mkdir -p /packages/iptables-1.8.10 31 | install -m 0755 ./iptables-1.8.10/iptables/xtables-legacy-multi "$IPTABLES_TARGET_PATH" 32 | IPTABLES_PATH_POST_INSTALL=`command -v iptables` 33 | echo "IPTABLES_PATH_POST_INSTALL: $IPTABLES_PATH_POST_INSTALL" 34 | test "$IPTABLES_PATH_POST_INSTALL" = "$IPTABLES_TARGET_PATH" || exit 1 35 | echo "iptables installed successfully" 36 | fi 37 | 38 | 39 | IP6TABLES_PATH=`command -v ip6tables` 40 | if [ -z "$IP6TABLES_PATH" ]; then 41 | IP6TABLES_TARGET_PATH=/usr/local/bin/ip6tables 42 | echo "Installing prebuilt ip6tables" 43 | install -m 0755 ./iptables-1.8.10/iptables/xtables-legacy-multi "$IP6TABLES_TARGET_PATH" 44 | IP6TABLES_PATH_POST_INSTALL=`command -v ip6tables` 45 | echo "IP6TABLES_PATH_POST_INSTALL: $IP6TABLES_PATH_POST_INSTALL" 46 | test "$IP6TABLES_PATH_POST_INSTALL" = "$IP6TABLES_TARGET_PATH" || exit 1 47 | echo "ip6tables installed successfully" 48 | fi 49 | 50 | 51 | IP_PATH=`command -v ip` 52 | if [ -z "$IP_PATH" ]; then 53 | echo "Installing prebuilt ip" 54 | IP_TARGET_PATH=/usr/local/bin/ip 55 | install -m 0755 ./iproute2-6.7.0/ip "$IP_TARGET_PATH" 56 | IP_PATH_POST_INSTALL=`command -v ip` 57 | echo "IP_PATH_POST_INSTALL: $IP_PATH_POST_INSTALL" 58 | test "$IP_PATH_POST_INSTALL" = "$IP_TARGET_PATH" || exit 1 59 | echo "ip installed successfully" 60 | fi 61 | 62 | exit 0 -------------------------------------------------------------------------------- /shared/src/utils.rs: -------------------------------------------------------------------------------- 1 | use std::io::ErrorKind; 2 | 3 | use tokio::io::{AsyncRead, AsyncWrite}; 4 | 5 | pub async fn pipe_streams(mut src: T1, mut dest: T2) -> Result<(), tokio::io::Error> 6 | where 7 | T1: AsyncRead + AsyncWrite + Unpin, 8 | T2: AsyncRead + AsyncWrite + Unpin, 9 | { 10 | match tokio::io::copy_bidirectional(&mut src, &mut dest).await { 11 | Ok(_) => Ok(()), 12 | Err(e) if e.kind() == ErrorKind::BrokenPipe => Ok(()), 13 | Err(e) => Err(e), 14 | } 15 | } 16 | 17 | pub struct HexSlice<'a>(&'a [u8]); 18 | 19 | impl std::fmt::UpperHex for HexSlice<'_> { 20 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 21 | for byte in self.0 { 22 | write!(f, "{byte:02X}")?; 23 | } 24 | Ok(()) 25 | } 26 | } 27 | 28 | impl std::fmt::LowerHex for HexSlice<'_> { 29 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 30 | for byte in self.0 { 31 | write!(f, "{byte:02x}")?; 32 | } 33 | Ok(()) 34 | } 35 | } 36 | 37 | impl<'a> std::convert::From<&'a [u8]> for HexSlice<'a> { 38 | fn from(slice: &'a [u8]) -> Self { 39 | Self(slice) 40 | } 41 | } 42 | 43 | #[macro_export] 44 | macro_rules! print_version { 45 | ($label:tt) => { 46 | if let Some(version) = option_env!("CARGO_PKG_VERSION") { 47 | log::info!("{}@{} running", $label, version); 48 | } 49 | }; 50 | } 51 | 52 | #[macro_export] 53 | macro_rules! env_var_present_and_true { 54 | ($var_name:tt) => { 55 | match std::env::var($var_name) { 56 | Ok(s) if s.as_str() == "true" => true, 57 | _ => false, 58 | } 59 | }; 60 | } 61 | 62 | #[cfg(test)] 63 | mod tests { 64 | use super::HexSlice; 65 | 66 | #[test] 67 | fn test_upper_hex_slice_formatting() { 68 | let slice: [u8; 2] = [255, 3]; 69 | let hex_slice = HexSlice(slice.as_slice()); 70 | let expected_hex = "FF03".to_string(); 71 | assert_eq!(format!("{hex_slice:X}"), expected_hex); 72 | } 73 | 74 | #[test] 75 | fn test_lower_hex_slice_formatting() { 76 | let slice: [u8; 2] = [255, 90]; 77 | let hex_slice = HexSlice(slice.as_slice()); 78 | let expected_hex = "ff5a".to_string(); 79 | assert_eq!(format!("{hex_slice:x}"), expected_hex); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /e2e-tests/run-tls-disabled-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # kill container if it is left running by hanging test, then generate local testing certs 5 | if [ "${CI:-unset}" = "unset" ]; 6 | then 7 | docker compose down || true 8 | cargo build --release --target x86_64-unknown-linux-musl --no-default-features 9 | . e2e-tests/mtls-testing-certs/ca/generate-certs.sh & 10 | else 11 | # if in CI, generate certs and export them 12 | . e2e-tests/mtls-testing-certs/ca/generate-certs.sh 13 | 14 | MOCK_CRYPTO_CERT=`cat certs/ca.crt` && export MOCK_CRYPTO_CERT 15 | MOCK_CRYPTO_KEY=`cat certs/ca.key` && export MOCK_CRYPTO_KEY 16 | 17 | 18 | MOCK_CERT_PROVISIONER_CLIENT_CERT=`cat certs/client_0.crt` && export MOCK_CERT_PROVISIONER_CLIENT_CERT 19 | MOCK_CERT_PROVISIONER_CLIENT_KEY=`cat certs/client_0.key` && export MOCK_CERT_PROVISIONER_CLIENT_KEY 20 | MOCK_CERT_PROVISIONER_ROOT_CERT=`cat certs/ca.crt` && export MOCK_CERT_PROVISIONER_ROOT_CERT 21 | 22 | MOCK_CERT_PROVISIONER_SERVER_KEY=`cat certs/localhost.key` && export MOCK_CERT_PROVISIONER_SERVER_KEY 23 | MOCK_CERT_PROVISIONER_ROOT_CERT=`cat certs/ca.crt` && export MOCK_CERT_PROVISIONER_ROOT_CERT 24 | MOCK_CERT_PROVISIONER_SERVER_CERT=`cat certs/localhost.crt` && export MOCK_CERT_PROVISIONER_SERVER_CERT 25 | ACME_ACCOUNT_EC_KEY=`cat ./e2e-tests/acme-key/key.pem` && export ACME_ACCOUNT_EC_KEY 26 | ACME_ACCOUNT_HMAC_KEY="cGxhY2Vob2xkZXI=" && export ACME_ACCOUNT_HMAC_KEY 27 | ACME_ACCOUNT_HMAC_KEY_ID="placeholder_id" && export ACME_ACCOUNT_HMAC_KEY_ID 28 | fi 29 | 30 | # install the node modules for customer process and test script 31 | cd e2e-tests && npm install && cd .. 32 | 33 | # Compile mock crypto api 34 | if [[ -z "${CI}" ]]; 35 | then 36 | cd ./e2e-tests/mock-crypto 37 | cargo build --release --target x86_64-unknown-linux-musl 38 | cd ../.. 39 | fi 40 | 41 | export CUSTOMER_PROCESS=httpCustomerProcess.js 42 | 43 | echo "Building enclave container CI" 44 | export EV_API_KEY_AUTH=true 45 | docker compose build --build-arg CUSTOMER_PROCESS=httpCustomerProcess.js 46 | 47 | echo "Running enclave container" 48 | # run the container 49 | docker compose up -d 50 | echo "SLEEPING 15 SECONDS to let enclave initialize..." 51 | sleep 15 52 | 53 | docker compose logs --tail enclaves-enclaves 54 | 55 | echo "Running end-to-end tests for enclave without TLS termination" 56 | cd e2e-tests && npm run no-tls-termination-tests || ($(docker compose logs --tail enclaves-enclaves) && false) 57 | 58 | echo "Tests complete" 59 | docker compose down 60 | 61 | 62 | -------------------------------------------------------------------------------- /.github/workflows/test-data-plane.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - data-plane/** 5 | - shared/** 6 | - .github/workflows/test-data-plane.yml 7 | - Cargo.lock 8 | name: Test Data Plane 9 | jobs: 10 | check_data_plane: 11 | strategy: 12 | matrix: 13 | feature-flags: 14 | [ 15 | "tls_termination,enclave", 16 | "tls_termination,not_enclave", 17 | "enclave", 18 | "not_enclave", 19 | "tls_termination,enclave,network_egress", 20 | "tls_termination,not_enclave,network_egress", 21 | "enclave,network_egress", 22 | "not_enclave,network_egress", 23 | ] 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v2 27 | - name: Install MUSL Tools 28 | run: | 29 | sudo apt-get update 30 | sudo apt-get install -y musl-tools 31 | - uses: dtolnay/rust-toolchain@stable 32 | with: 33 | targets: x86_64-unknown-linux-musl 34 | - uses: Swatinem/rust-cache@v2 35 | with: 36 | shared-key: "standard-cache" 37 | - name: Compile project with features ${{ matrix.feature-flags }} 38 | run: cargo check -p data-plane --no-default-features --features ${{ matrix.feature-flags }} 39 | 40 | test_data_plane: 41 | needs: [check_data_plane] 42 | strategy: 43 | matrix: 44 | feature-flags: 45 | [ 46 | "not_enclave", 47 | "not_enclave,network_egress", 48 | "not_enclave,tls_termination", 49 | "not_enclave,tls_termination,network_egress", 50 | ] 51 | runs-on: ubuntu-latest 52 | steps: 53 | - uses: actions/checkout@v2 54 | - name: Install MUSL Tools 55 | run: | 56 | sudo apt-get update 57 | sudo apt-get install -y musl-tools 58 | - uses: dtolnay/rust-toolchain@stable 59 | with: 60 | targets: x86_64-unknown-linux-musl 61 | - uses: Swatinem/rust-cache@v2 62 | with: 63 | shared-key: "standard-cache" 64 | - name: Test project with features ${{ matrix.feature-flags }} 65 | run: cargo test -p data-plane --no-default-features --features ${{ matrix.feature-flags }} 66 | - name: Test project with features ${{ matrix.feature-flags }} using staging flag 67 | run: cargo test -p data-plane --no-default-features --features ${{ matrix.feature-flags }} 68 | env: 69 | RUSTFLAGS: "--cfg staging" 70 | -------------------------------------------------------------------------------- /data-plane/src/time/mod.rs: -------------------------------------------------------------------------------- 1 | use libc::{clock_settime, timespec, CLOCK_REALTIME}; 2 | use std::io::Error; 3 | use std::time::SystemTimeError; 4 | use thiserror::Error; 5 | use tokio::time; 6 | use tokio::time::Duration; 7 | 8 | use crate::config_client::ConfigClient; 9 | use crate::config_client::StorageConfigClientInterface; 10 | use crate::error::Error as DataPlaneError; 11 | 12 | #[derive(Error, Debug)] 13 | pub enum ClockSyncError { 14 | #[error(transparent)] 15 | Error(#[from] DataPlaneError), 16 | #[error("Clock sync error: {0}")] 17 | SyncError(String), 18 | #[error("Clock sync error: {0}")] 19 | SystemTimeError(#[from] SystemTimeError), 20 | } 21 | pub struct ClockSync; 22 | 23 | impl ClockSync { 24 | pub async fn run(interval_duration: Duration) { 25 | let mut interval = time::interval(interval_duration); 26 | let config_client = ConfigClient::new(); 27 | loop { 28 | interval.tick().await; 29 | if let Err(e) = Self::sync_time_from_host(&config_client).await { 30 | log::error!("{e:?}") 31 | } 32 | } 33 | } 34 | 35 | async fn sync_time_from_host(config_client: &ConfigClient) -> Result<(), ClockSyncError> { 36 | let request_timer = std::time::SystemTime::now(); 37 | let time = config_client.get_time_from_host().await?; 38 | let elapsed = request_timer.elapsed()?; 39 | 40 | // On startup the request can take a while so skip the sync till the proxies have stabilized 41 | if elapsed.as_millis() > 500 { 42 | log::info!( 43 | "Skipping clock sync because request took {}ms", 44 | elapsed.as_millis() 45 | ); 46 | return Ok(()); 47 | } 48 | let ts = timespec { 49 | tv_sec: time.seconds, 50 | tv_nsec: time.milliseconds, 51 | }; 52 | 53 | let result = unsafe { clock_settime(CLOCK_REALTIME, &ts as *const timespec) }; 54 | if result == 0 { 55 | log::info!( 56 | "Enclave time synced with host successfully - {}.{}s. Request round trip took {}ns", 57 | time.seconds, 58 | time.milliseconds, 59 | elapsed.as_nanos() 60 | ); 61 | Ok(()) 62 | } else { 63 | Err(ClockSyncError::SyncError(format!( 64 | "Could not sync enclave time with host {:?}", 65 | Error::last_os_error() 66 | ))) 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /data-plane/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "data-plane" 3 | version = "1.0.0-beta" 4 | edition = "2021" 5 | authors = ["Evervault "] 6 | 7 | [dependencies] 8 | hyper = { version = "0.14.4", features = [ 9 | "server", 10 | "http1", 11 | "http2", 12 | "tcp", 13 | "stream", 14 | "client", 15 | ] } 16 | tokio = { version = "1.24.2", features = [ 17 | "net", 18 | "macros", 19 | "rt", 20 | "rt-multi-thread", 21 | "io-util", 22 | "time", 23 | ] } 24 | openssl = { workspace = true } 25 | chrono = { version = "0.4.22", default-features = false, features = ["serde"] } 26 | aws-nitro-enclaves-nsm-api = "0.2.1" 27 | aws-nitro-enclaves-cose = "0.5.0" 28 | serde_cbor = "0.11" 29 | lazy_static = "1.4.0" 30 | async-trait = "0.1.56" 31 | thiserror = "1.0" 32 | bytes = "1" 33 | nom = { version = "7.1.1", optional = true } 34 | tokio-util = { version = "0.6", features = ["full"] } 35 | futures = "0.3.21" 36 | tokio-rustls = { version = "0.24.1", features = ["dangerous_configuration"] } 37 | tokio-vsock = { version = "0.3.2", optional = true } 38 | shared = { path = "../shared", default-features = false } 39 | serde = { version = "=1.0.200", features = ["derive"] } 40 | serde_bytes = "0.11.6" 41 | serde_json = "1.0.83" 42 | sha2 = "0.10.2" 43 | rand = { version = "0.8.5" } 44 | webpki-roots = "0.25.2" 45 | pem = "1.1.0" 46 | base64 = "0.13.0" 47 | once_cell = "1.17.0" 48 | cached = "0.54.0" 49 | sys-info = "0.9.1" 50 | cadence.workspace = true 51 | cadence-macros.workspace = true 52 | tokio-retry = "0.3.0" 53 | httparse = "1.8.0" 54 | mockall = "0.11.4" 55 | uuid = { version = "1.4.1", features = ["v4"] } 56 | log = { version = "0.4.19", features = ["max_level_debug"] } 57 | rlimit = { version = "0.10.1", optional = true } 58 | hyper-rustls = { version = "0.24.1", default-features = false, features = [ 59 | "http1", 60 | "http2", 61 | "tls12", 62 | "tokio-runtime", 63 | ] } 64 | chrono-tz = { version = "0.8.3" } 65 | tower = { version = "0.4.13", features = ["util"] } 66 | tower-http = { version = "0.5.0", features = ["catch-panic"] } 67 | libc = "0.2.150" 68 | serial_test = "3.0.0" 69 | regex = "1.10.6" 70 | 71 | 72 | [dev-dependencies] 73 | tokio-test = "0.4.2" 74 | yup-hyper-mock = "6.0.0" 75 | 76 | [features] 77 | default = ["tls_termination"] 78 | tls_termination = ["dep:nom"] 79 | network_egress = ["shared/network_egress"] 80 | enclave = ["dep:tokio-vsock", "shared/enclave", "dep:rlimit"] 81 | not_enclave = [] 82 | release_logging = ["log/release_max_level_info"] 83 | 84 | [lints.rust] 85 | unexpected_cfgs = { level = "allow", check-cfg = ['cfg(staging)'] } -------------------------------------------------------------------------------- /data-plane/src/acme/error.rs: -------------------------------------------------------------------------------- 1 | use std::string::FromUtf8Error; 2 | 3 | use serde::Deserialize; 4 | use thiserror::Error; 5 | 6 | use crate::{base_tls_client::ClientError, error, ContextError}; 7 | 8 | #[derive(Debug, Error)] 9 | pub enum AcmeError { 10 | #[error("IO Error — {0:?}")] 11 | IoError(#[from] std::io::Error), 12 | #[error("Hyper Error — {0:?}")] 13 | HyperError(#[from] hyper::Error), 14 | #[error("Deserialization Error — {0:?}")] 15 | SerdeError(#[from] serde_json::Error), 16 | #[error("Request to server failed with status: {0:?}")] 17 | FailedRequest(hyper::StatusCode), 18 | #[error("Client Error — {0}")] 19 | ClientError(String), 20 | #[error("HTTP Error — {0:?}")] 21 | HttpError(#[from] hyper::http::Error), 22 | #[error("No Nonce Found")] 23 | NoNonce, 24 | #[error("Nonce Mutex Poison Error - {0:?}")] 25 | PoisonError(String), 26 | #[error("Http Header Conversion Error")] 27 | HeaderConversionError(#[from] hyper::header::ToStrError), 28 | #[error("OpenSSL Error — {0:?}")] 29 | OpenSSLError(#[from] openssl::error::ErrorStack), 30 | #[error("Base64 Decode Error — {0:?}")] 31 | Base64DecodeError(#[from] base64::DecodeError), 32 | #[error("Error interpretting utf8 sequence — {0:?}")] 33 | Utf8Error(#[from] std::str::Utf8Error), 34 | #[error("Error interpretting utf8 sequence — {0:?}")] 35 | FromUtf8Error(#[from] FromUtf8Error), 36 | #[error("No directory for acme account - {0:?}")] 37 | NoDirectory(String), 38 | #[error("Error creating CSR - {0:?}")] 39 | CsrError(String), 40 | #[error("{0:?} Field Not Found")] 41 | FieldNotFound(String), 42 | #[error("Config Client Error {0:?}")] 43 | ConfigClient(#[from] error::Error), 44 | #[error("E3 Client Error {0:?}")] 45 | E3ClientError(#[from] ClientError), 46 | #[error("Chrono DataTime Parse Error - {0:?}")] 47 | ParseError(#[from] chrono::ParseError), 48 | #[error("PEM Error - {0:?}")] 49 | PEMError(#[from] pem::PemError), 50 | #[error("Rustls Error - {0:?}")] 51 | RustlsSignError(#[from] tokio_rustls::rustls::sign::SignError), 52 | #[error("Failed to access context - {0}")] 53 | ContextError(#[from] ContextError), 54 | #[error("System Time Error - {0:?}")] 55 | SystemTimeError(#[from] std::time::SystemTimeError), 56 | #[error("ACME Error {0:?}")] 57 | AcmeError(#[from] shared::acme::error::AcmeError), 58 | #[error("ACME Error {0:?}")] 59 | General(String), 60 | } 61 | 62 | /// This is an error as returned by the ACME server. 63 | #[derive(Deserialize, Debug, Clone)] 64 | #[serde(rename_all = "camelCase")] 65 | pub struct AcmeServerError { 66 | pub r#type: Option, 67 | pub title: Option, 68 | pub status: Option, 69 | pub detail: Option, 70 | } 71 | -------------------------------------------------------------------------------- /data-plane/src/crypto/token.rs: -------------------------------------------------------------------------------- 1 | use crate::cache::E3_TOKEN; 2 | use crate::config_client::ConfigClient; 3 | #[cfg(feature = "enclave")] 4 | use crate::crypto::attest::AttestationError; 5 | use cached::Cached; 6 | use hyper::header::InvalidHeaderValue; 7 | use hyper::http::HeaderValue; 8 | use thiserror::Error; 9 | 10 | #[derive(Error, Debug)] 11 | pub enum TokenError { 12 | #[error(transparent)] 13 | Io(#[from] std::io::Error), 14 | #[error("Error - {0:?}")] 15 | Error(#[from] crate::error::Error), 16 | #[cfg(feature = "enclave")] 17 | #[error("Attestation error - {0:?}")] 18 | AttestationError(#[from] AttestationError), 19 | #[error("Invalid header value - {0:?}")] 20 | InvalidHeaderValue(#[from] InvalidHeaderValue), 21 | } 22 | 23 | #[derive(Clone, Debug)] 24 | pub struct AttestationAuth { 25 | pub token: HeaderValue, 26 | pub doc: HeaderValue, 27 | } 28 | 29 | #[derive(Clone)] 30 | pub struct TokenClient { 31 | config: ConfigClient, 32 | } 33 | 34 | impl Default for TokenClient { 35 | fn default() -> Self { 36 | Self::new() 37 | } 38 | } 39 | 40 | impl TokenClient { 41 | pub fn new() -> Self { 42 | TokenClient { 43 | config: ConfigClient::new(), 44 | } 45 | } 46 | 47 | pub async fn get_token(&self) -> Result { 48 | let token_key: String = "e3_token".to_string(); 49 | let mut cache = E3_TOKEN.lock().await; 50 | let auth = match cache.cache_get(&token_key) { 51 | Some(token) => token.clone(), 52 | None => { 53 | let response = self.config.get_e3_token().await?; 54 | let doc = Self::get_attestation_doc_token(response.token_id().into_bytes())?; 55 | let token_header = hyper::http::header::HeaderValue::from_str(&response.token())?; 56 | let attestation_header = hyper::http::header::HeaderValue::from_str(&doc)?; 57 | let token = AttestationAuth { 58 | token: token_header, 59 | doc: attestation_header, 60 | }; 61 | cache.cache_set(token_key, token.clone()); 62 | token 63 | } 64 | }; 65 | Ok(auth) 66 | } 67 | 68 | #[cfg(feature = "enclave")] 69 | fn get_attestation_doc_token(nonce: Vec) -> Result { 70 | use crate::crypto::attest; 71 | use openssl::base64::encode_block; 72 | 73 | let attestation_doc = attest::get_attestation_doc(None, Some(nonce), None)?; 74 | Ok(encode_block(&attestation_doc)) 75 | } 76 | 77 | #[cfg(not(feature = "enclave"))] 78 | fn get_attestation_doc_token(_: Vec) -> Result { 79 | Ok("local-attestation-token".to_string()) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /.github/workflows/test-e2e.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - e2e-tests/** 5 | - data-plane/** 6 | - control-plane/** 7 | - .github/workflows/test-e2e.yml 8 | - Cargo.lock 9 | name: Run end-to-end tests 10 | jobs: 11 | run_e2e_tests_full_features: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | - uses: Swatinem/rust-cache@v2 16 | - name: Install MUSL Tools 17 | run: | 18 | sudo apt-get update 19 | sudo apt-get install -y musl-tools 20 | - uses: dtolnay/rust-toolchain@stable 21 | with: 22 | targets: x86_64-unknown-linux-musl 23 | - name: Build Cages Project 24 | run: cargo build --features network_egress --release --workspace --exclude vsock-proxy 25 | - name: Build mock crypto API 26 | working-directory: e2e-tests/mock-crypto 27 | run: cargo build --release 28 | env: 29 | CARGO_REGISTRIES_EVERVAULT_RUST_LIBRARIES_INDEX: ${{ secrets.RUST_CRYPTO_REGISTRY }} 30 | - uses: actions-hub/docker/cli@f5fdbfc3f9d2a9265ead8962c1314108a7b7ec5d 31 | env: 32 | SKIP_LOGIN: true 33 | - uses: actions/setup-node@v1 34 | with: 35 | node-version: "16.x" 36 | - name: Run end-to-end test script 37 | env: 38 | CI: true 39 | EV_API_KEY: ${{ secrets.EV_API_KEY }} 40 | run: | 41 | sh e2e-tests/generate-sample-ca.sh 42 | sh e2e-tests/run-all-feature-tests.sh 43 | run_e2e_tests_no_tls_termination: 44 | runs-on: ubuntu-latest 45 | steps: 46 | - uses: actions/checkout@v2 47 | - name: Install MUSL Tools 48 | run: | 49 | sudo apt-get update 50 | sudo apt-get install -y musl-tools 51 | - uses: Swatinem/rust-cache@v2 52 | - uses: dtolnay/rust-toolchain@stable 53 | with: 54 | targets: x86_64-unknown-linux-musl 55 | - name: Build Cages Project 56 | run: cargo build --release --features not_enclave --no-default-features --workspace --exclude vsock-proxy 57 | - name: Build mock crypto API 58 | working-directory: e2e-tests/mock-crypto 59 | run: cargo build --release --target x86_64-unknown-linux-musl 60 | env: 61 | CARGO_REGISTRIES_EVERVAULT_RUST_LIBRARIES_INDEX: ${{ secrets.RUST_CRYPTO_REGISTRY }} 62 | - uses: actions-hub/docker/cli@f5fdbfc3f9d2a9265ead8962c1314108a7b7ec5d 63 | env: 64 | SKIP_LOGIN: true 65 | - uses: actions/setup-node@v1 66 | with: 67 | node-version: "16.x" 68 | - name: Run end-to-end test script 69 | env: 70 | CI: true 71 | EV_API_KEY: ${{ secrets.EV_API_KEY }} 72 | run: | 73 | sh e2e-tests/generate-sample-ca.sh 74 | sh e2e-tests/run-tls-disabled-tests.sh 75 | -------------------------------------------------------------------------------- /control-plane.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16-alpine3.18 2 | 3 | ENV CONTROL_PLANE_EXECUTABLE_PATH=/control-plane 4 | ENV CONTROL_PLANE_SERVICE_PATH=/etc/service/control-plane 5 | 6 | 7 | # CERTS FOR CRYPTO API 8 | ARG MOCK_CRYPTO_CERT 9 | ARG MOCK_CRYPTO_KEY 10 | ENV MOCK_CRYPTO_CERT $MOCK_CRYPTO_CERT 11 | ENV MOCK_CRYPTO_KEY $MOCK_CRYPTO_KEY 12 | 13 | ARG MOCK_CERT_PROVISIONER_CLIENT_CERT 14 | ARG MOCK_CERT_PROVISIONER_CLIENT_KEY 15 | ARG MOCK_CERT_PROVISIONER_ROOT_CERT 16 | ARG MOCK_CERT_PROVISIONER_SERVER_KEY 17 | ARG MOCK_CERT_PROVISIONER_SERVER_CERT 18 | ENV MOCK_CERT_PROVISIONER_CLIENT_CERT $MOCK_CERT_PROVISIONER_CLIENT_CERT 19 | ENV MOCK_CERT_PROVISIONER_CLIENT_KEY $MOCK_CERT_PROVISIONER_CLIENT_KEY 20 | ENV MOCK_CERT_PROVISIONER_ROOT_CERT $MOCK_CERT_PROVISIONER_ROOT_CERT 21 | ENV MOCK_CERT_PROVISIONER_SERVER_KEY $MOCK_CERT_PROVISIONER_SERVER_KEY 22 | ENV MOCK_CERT_PROVISIONER_SERVER_CERT $MOCK_CERT_PROVISIONER_SERVER_CERT 23 | 24 | ARG EV_APP_UUID 25 | ENV EV_APP_UUID $EV_APP_UUID 26 | ARG EV_API_KEY 27 | ENV EV_API_KEY $EV_API_KEY 28 | 29 | RUN apk update &&\ 30 | apk add runit && apk add curl && \ 31 | rm -rf /var/cache/apk/* 32 | 33 | COPY ./e2e-tests/mock-crypto/target/x86_64-unknown-linux-musl/release/mock-crypto /services/ 34 | RUN chmod +x /services/mock-crypto 35 | 36 | RUN mkdir /etc/service/mock_process \ 37 | && /bin/sh -c "echo -e '"'#!/bin/sh\nexec /mock_process/mock_process\n'"' > /etc/service/mock_process/run" \ 38 | && chmod +x /etc/service/mock_process/run 39 | 40 | RUN mkdir /mock_process 41 | 42 | COPY ./e2e-tests/scripts/start_mock_process /mock_process/mock_process 43 | RUN chmod +x /mock_process/mock_process 44 | 45 | COPY ./target/x86_64-unknown-linux-musl/release/control-plane $CONTROL_PLANE_EXECUTABLE_PATH 46 | RUN chmod +x $CONTROL_PLANE_EXECUTABLE_PATH 47 | 48 | RUN mkdir $CONTROL_PLANE_SERVICE_PATH 49 | COPY ./e2e-tests/mtls-testing-certs/ca/* /$CONTROL_PLANE_SERVICE_PATH/ 50 | COPY ./e2e-tests/scripts/start-control-plane.sh $CONTROL_PLANE_SERVICE_PATH/run 51 | RUN chmod +x $CONTROL_PLANE_SERVICE_PATH/run 52 | 53 | COPY ./e2e-tests/mockCertProvisionerApi.js ./e2e-tests/mtls-testing-certs/ca/* /services/ 54 | COPY ./e2e-tests/sample-ca/* /services/ 55 | COPY ./e2e-tests/package.json /services/package.json 56 | COPY ./e2e-tests/package-lock.json /services/package-lock.json 57 | 58 | RUN cd services && npm i 59 | 60 | RUN mkdir /etc/service/mock_cert_provisioner \ 61 | && /bin/sh -c "echo -e '"'#!/bin/sh\nexec /mock_cert_provisioner/start_mock_cert_provisioner\n'"' > /etc/service/mock_cert_provisioner/run" \ 62 | && chmod +x /etc/service/mock_cert_provisioner/run 63 | 64 | RUN mkdir /mock_cert_provisioner 65 | 66 | COPY ./e2e-tests/scripts/start_mock_cert_provisioner /mock_cert_provisioner/start_mock_cert_provisioner 67 | RUN chmod +x /mock_cert_provisioner/start_mock_cert_provisioner 68 | 69 | CMD ["runsvdir", "/etc/service"] 70 | -------------------------------------------------------------------------------- /e2e-tests/mock-crypto/src/encrypt_mock.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Display; 2 | use thiserror::Error; 3 | use rand::RngCore; 4 | 5 | #[derive(Debug,Error)] 6 | pub enum MockCryptoError { 7 | #[error(transparent)] 8 | SerdeError(#[from] serde_json::Error), 9 | #[error("Invalid cipher received")] 10 | InvalidCipher 11 | } 12 | 13 | struct EncryptedValue(serde_json::Value); 14 | 15 | impl Display for EncryptedValue { 16 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 17 | let val_str = convert_value_to_string(&self.0); 18 | let encoded_val = base64::encode(&val_str); 19 | match get_string_repr_for_serde_value(&self.0) { 20 | Some(val) => write!(f, "ev:{}:{}:{}:{}:$", val, mock_iv(), mock_pub_key(), encoded_val), 21 | None => write!(f, "ev:{}:{}:{}:$", mock_iv(), mock_pub_key(), encoded_val), 22 | } 23 | } 24 | } 25 | 26 | impl std::convert::Into for EncryptedValue { 27 | fn into(self) -> serde_json::Value { 28 | self.0 29 | } 30 | } 31 | 32 | impl std::convert::TryFrom for EncryptedValue { 33 | type Error = MockCryptoError; 34 | 35 | fn try_from(val: String) -> Result { 36 | if !val.starts_with("ev:") || !val.ends_with(":$") { 37 | return Err(MockCryptoError::InvalidCipher); 38 | } 39 | // Use second last token as value 40 | let mut tokens = val.split(":").collect::>().into_iter(); 41 | let decoded_val = base64::decode(tokens.nth_back(1).unwrap()).map_err(|_| MockCryptoError::InvalidCipher)?; 42 | let decoded_str = std::str::from_utf8(&decoded_val).map_err(|_| MockCryptoError::InvalidCipher)?; 43 | let parsed_val: serde_json::Value = serde_json::from_str(decoded_str).unwrap_or_else(|_| serde_json::Value::String(decoded_str.to_string())); 44 | Ok(Self(parsed_val)) 45 | } 46 | } 47 | 48 | fn get_string_repr_for_serde_value(val: &serde_json::Value) -> Option { 49 | let repr = match val { 50 | serde_json::Value::String(_) => { 51 | return None; 52 | }, 53 | serde_json::Value::Number(_) => "number".to_string(), 54 | serde_json::Value::Bool(_) => "boolean".to_string(), 55 | _ => unimplemented!("Ciphertexts can only represent primitives") 56 | }; 57 | Some(repr) 58 | } 59 | 60 | pub fn encrypt(value: serde_json::Value) -> serde_json::Value { 61 | serde_json::Value::String(format!("{}", EncryptedValue(value))) 62 | } 63 | 64 | pub fn decrypt(value: String) -> Result { 65 | let enc_val = EncryptedValue::try_from(value)?; 66 | Ok(enc_val.0) 67 | } 68 | 69 | pub fn convert_value_to_string(value: &serde_json::Value) -> String { 70 | value.as_str() 71 | .map(|val| val.to_string()) 72 | .unwrap_or_else(|| serde_json::to_string(&value).unwrap()) 73 | } 74 | 75 | fn mock_iv() -> String { 76 | let mut iv = [0u8;12]; 77 | rand::thread_rng().fill_bytes(&mut iv); 78 | base64::encode(iv) 79 | } 80 | 81 | fn mock_pub_key() -> String { 82 | let mut pub_key = [0u8;33]; 83 | rand::thread_rng().fill_bytes(&mut pub_key); 84 | base64::encode(pub_key) 85 | } -------------------------------------------------------------------------------- /.github/workflows/vsock-proxy.yml: -------------------------------------------------------------------------------- 1 | on: 2 | release: 3 | types: [published] 4 | push: 5 | paths: 6 | - crates/vsock-proxy/** 7 | - shared/** 8 | - .github/workflows/vsock-proxy.yml 9 | - Cargo.lock 10 | name: vsock-proxy 11 | jobs: 12 | check-proxy: 13 | runs-on: ubuntu-latest 14 | if: ${{ contains(github.event.release.tag_name, 'vsock-proxy') || github.event_name == 'push' }} 15 | steps: 16 | - uses: actions/checkout@v4 17 | - name: Install MUSL Tools 18 | run: | 19 | sudo apt-get update 20 | sudo apt-get install -y musl-tools 21 | - uses: dtolnay/rust-toolchain@stable 22 | with: 23 | components: clippy, rustfmt 24 | targets: x86_64-unknown-linux-musl 25 | - uses: Swatinem/rust-cache@v2 26 | with: 27 | shared-key: "vsock-proxy" 28 | - name: Check project 29 | run: cargo check -p vsock-proxy 30 | - name: Check formatting 31 | run: cargo fmt --check 32 | - name: Clippy 33 | uses: clechasseur/rs-clippy-check@v3 34 | with: 35 | args: -p vsock-proxy -- -D warnings 36 | 37 | test-proxy: 38 | runs-on: ubuntu-latest 39 | needs: [check-proxy] 40 | if: ${{ contains(github.event.release.tag_name, 'vsock-proxy') || github.event_name == 'push' }} 41 | steps: 42 | - uses: actions/checkout@v4 43 | - name: Install MUSL Tools 44 | run: | 45 | sudo apt-get update 46 | sudo apt-get install -y musl-tools 47 | - uses: dtolnay/rust-toolchain@stable 48 | with: 49 | targets: x86_64-unknown-linux-musl 50 | - uses: Swatinem/rust-cache@v2 51 | with: 52 | shared-key: "vsock-proxy" 53 | - name: Test vsock proxy 54 | run: cargo test -p vsock-proxy 55 | 56 | build-proxy: 57 | runs-on: ubuntu-latest 58 | needs: [check-proxy,test-proxy] 59 | if: ${{ github.event_name == 'push' }} 60 | steps: 61 | - uses: actions/checkout@v4 62 | - name: Install MUSL Tools 63 | run: | 64 | sudo apt-get update 65 | sudo apt-get install -y musl-tools 66 | - uses: dtolnay/rust-toolchain@stable 67 | with: 68 | targets: x86_64-unknown-linux-musl 69 | - name: Compile proxy 70 | run: cargo build -p vsock-proxy --release 71 | - name: Upload proxy 72 | uses: actions/upload-artifact@v4 73 | with: 74 | name: vsock-proxy 75 | path: target/release/vsock-proxy 76 | 77 | release-proxy: 78 | runs-on: ubuntu-latest 79 | needs: [check-proxy,test-proxy] 80 | if: ${{ contains(github.event.release.tag_name, 'vsock-proxy') }} 81 | steps: 82 | - uses: actions/checkout@v4 83 | - name: Install MUSL Tools 84 | run: | 85 | sudo apt-get update 86 | sudo apt-get install -y musl-tools 87 | - uses: dtolnay/rust-toolchain@stable 88 | with: 89 | targets: x86_64-unknown-linux-musl 90 | - name: Publish vsock-proxy 91 | run: cargo publish -p vsock-proxy 92 | env: 93 | CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_IO_TOKEN }} 94 | -------------------------------------------------------------------------------- /.github/workflows/deploy-runtime-installer.yml: -------------------------------------------------------------------------------- 1 | name: 'Build and deploy runtime installer bundle' 2 | on: 3 | workflow_call: 4 | inputs: 5 | stage: 6 | required: true 7 | default: 'staging' 8 | type: string 9 | version: 10 | required: true 11 | type: string 12 | secrets: 13 | AWS_CLOUDFRONT_DISTRIBUTION_ID: 14 | required: true 15 | AWS_ENCLAVES_OIDC_ROLE_ARN: 16 | required: true 17 | permissions: 18 | id-token: write 19 | contents: read 20 | jobs: 21 | build-runtime-bundle: 22 | runs-on: ubuntu-latest 23 | outputs: 24 | checksum: ${{ steps.compute-checksum.outputs.checksum }} 25 | steps: 26 | - uses: actions/checkout@v3 27 | - name: Build bundle using local action 28 | uses: ./installer 29 | - name: Upload bundle as artifact 30 | uses: actions/upload-artifact@v4 31 | with: 32 | name: runtime-dependencies-${{github.sha}}.zip 33 | path: ./output/runtime-dependencies.tar.gz 34 | - name: Compute bundle checksum 35 | id: compute-checksum 36 | run: | 37 | CHECKSUM=$(shasum -a 256 ./output/runtime-dependencies.tar.gz | cut -d" " -f1) 38 | echo "checksum=$CHECKSUM" >> "$GITHUB_OUTPUT" 39 | upload-runtime-bundle: 40 | runs-on: ubuntu-latest 41 | needs: [build-runtime-bundle] 42 | steps: 43 | - uses: actions/download-artifact@v4.1.7 44 | with: 45 | name: runtime-dependencies-${{github.sha}}.zip 46 | - name: Configure AWS Credentials 47 | uses: aws-actions/configure-aws-credentials@v4 48 | with: 49 | role-to-assume: ${{ secrets.AWS_ENCLAVES_OIDC_ROLE_ARN }} 50 | aws-region: us-east-1 51 | - name: Upload installer bundle to S3 52 | env: 53 | CHECKSUM: ${{ needs.build-runtime-bundle.outputs.checksum }} 54 | run: 55 | aws s3 cp ./runtime-dependencies.tar.gz s3://cage-build-assets-${{ 56 | inputs.stage }}/installer/${{ env.CHECKSUM }}.tar.gz 57 | release-runtime-bundle: 58 | runs-on: ubuntu-latest 59 | needs: [build-runtime-bundle, upload-runtime-bundle] 60 | steps: 61 | - uses: actions/checkout@v4 62 | - name: Configure AWS Credentials 63 | uses: aws-actions/configure-aws-credentials@v4 64 | with: 65 | role-to-assume: ${{ secrets.AWS_ENCLAVES_OIDC_ROLE_ARN }} 66 | aws-region: us-east-1 67 | - name: Update latest record for installer 68 | env: 69 | CHECKSUM: ${{ needs.build-runtime-bundle.outputs.checksum }} 70 | run: | 71 | echo "${{ env.CHECKSUM }}" > latest.txt 72 | aws s3 cp ./latest.txt s3://cage-build-assets-${{ inputs.stage }}/installer/latest 73 | sh ./scripts/update-installer-version.sh ${{ inputs.version }} ${{ env.CHECKSUM }} ${{ inputs.stage }} 74 | aws s3 cp scripts/versions s3://cage-build-assets-${{ inputs.stage }}/runtime/versions 75 | - name: Cloudfront Cache Invalidation 76 | run: 77 | aws cloudfront create-invalidation --distribution-id ${{ 78 | secrets.AWS_CLOUDFRONT_DISTRIBUTION_ID }} --paths "/runtime/versions" 79 | -------------------------------------------------------------------------------- /data-plane/src/acme/client.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::acme::error::AcmeError; 4 | use crate::configuration; 5 | use async_trait::async_trait; 6 | use hyper::client::conn::{Connection as HyperConnection, SendRequest}; 7 | 8 | use hyper::{Body, Response}; 9 | use tokio_rustls::rustls::{ClientConfig, OwnedTrustAnchor, RootCertStore, ServerName}; 10 | use tokio_rustls::{client::TlsStream, TlsConnector}; 11 | 12 | use crate::connection::{self, Connection}; 13 | 14 | #[async_trait] 15 | pub trait AcmeClientInterface: Default { 16 | async fn send(&self, request: hyper::Request) -> Result, AcmeError>; 17 | } 18 | 19 | #[derive(Clone)] 20 | pub struct AcmeClient { 21 | tls_connector: TlsConnector, 22 | server_name: ServerName, 23 | port: u16, 24 | } 25 | 26 | impl Default for AcmeClient { 27 | fn default() -> Self { 28 | let server_name = ServerName::try_from(configuration::get_acme_host().as_str()) 29 | .expect("Hardcoded hostname"); 30 | 31 | Self::new(server_name) 32 | } 33 | } 34 | 35 | impl AcmeClient { 36 | pub fn new(server_name: ServerName) -> Self { 37 | let mut root_cert_store = RootCertStore::empty(); 38 | 39 | root_cert_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { 40 | OwnedTrustAnchor::from_subject_spki_name_constraints( 41 | ta.subject, 42 | ta.spki, 43 | ta.name_constraints, 44 | ) 45 | })); 46 | 47 | let config = ClientConfig::builder() 48 | .with_safe_defaults() 49 | .with_root_certificates(root_cert_store) 50 | .with_no_client_auth(); 51 | 52 | let tls_connector = TlsConnector::from(Arc::new(config)); 53 | 54 | Self { 55 | tls_connector, 56 | server_name, 57 | port: shared::ENCLAVE_ACME_PORT, 58 | } 59 | } 60 | 61 | async fn get_conn( 62 | &self, 63 | ) -> Result< 64 | ( 65 | SendRequest, 66 | HyperConnection, hyper::Body>, 67 | ), 68 | AcmeError, 69 | > { 70 | let client_connection: Connection = connection::get_socket(self.port).await?; 71 | let connection = self 72 | .tls_connector 73 | .connect(self.server_name.clone(), client_connection) 74 | .await?; 75 | 76 | let connection_info = hyper::client::conn::Builder::new() 77 | .handshake::, hyper::Body>(connection) 78 | .await?; 79 | 80 | Ok(connection_info) 81 | } 82 | } 83 | 84 | #[async_trait] 85 | impl AcmeClientInterface for AcmeClient { 86 | async fn send(&self, request: hyper::Request) -> Result, AcmeError> { 87 | let (mut request_sender, connection) = self.get_conn().await?; 88 | tokio::spawn(async move { 89 | if let Err(e) = connection.await { 90 | log::error!("Error in client connection: {e}"); 91 | } 92 | }); 93 | 94 | let response = request_sender.send_request(request).await?; 95 | 96 | Ok(response) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /e2e-tests/httpCustomerProcess.js: -------------------------------------------------------------------------------- 1 | const { default: axios } = require('axios') 2 | const express = require('express') 3 | const app = express() 4 | const port = 8008 5 | app.use(express.json()) 6 | 7 | 8 | app.all('/hello', async (req, res) => { 9 | res.send({response: "Hello from enclave", ...req.body}) 10 | }) 11 | 12 | app.get('/env', async (req, res) => { 13 | try { 14 | res.send({ANOTHER_ENV_VAR: process.env.ANOTHER_ENV_VAR}) 15 | } catch (e) { 16 | console.log("Failed", e) 17 | res.status(500).send(e) 18 | } 19 | }) 20 | 21 | app.get('/egress', async (req, res) => { 22 | try { 23 | const result = await axios.get("https://jsonplaceholder.typicode.com/posts/1") 24 | res.send({...result.data}) 25 | } catch (e) { 26 | console.log("Failed", e) 27 | res.status(500).send(e) 28 | } 29 | }) 30 | 31 | app.get('/egressBanned', async (req, res) => { 32 | try { 33 | const result = await axios.get("https://evervault.com") 34 | res.send({...result.data}) 35 | } catch (e) { 36 | res.status(500).send(e) 37 | } 38 | }) 39 | 40 | async function encrypt(payload) { 41 | const result = await axios.post("http://127.0.0.1:9999/encrypt", payload, { headers: { 'api-key': 'placeholder' } }); 42 | return result.data; 43 | } 44 | 45 | app.post('/encrypt', async (req, res) => { 46 | try { 47 | const result = await encrypt(req.body); 48 | res.send(result) 49 | } catch (e) { 50 | console.log("Failed", e) 51 | res.status(500).send(e) 52 | } 53 | }) 54 | 55 | async function decrypt(payload) { 56 | const result = await axios.post("http://127.0.0.1:9999/decrypt", payload, { headers: { 'api-key': 'placeholder' } }); 57 | return result.data; 58 | } 59 | 60 | app.post('/crypto', async (req, res) => { 61 | try { 62 | const encrypted = await encrypt(req.body); 63 | const decrypted = await decrypt(encrypted); 64 | res.send({ encrypted, decrypted }); 65 | } catch (e) { 66 | console.log("Failed", e) 67 | res.status(500).send(e) 68 | } 69 | }); 70 | 71 | app.post('/attestation-doc', async (req, res) => { 72 | try { 73 | const result = await axios.post("http://127.0.0.1:9999/attestation-doc", req.body, { headers: { 'api-key': 'placeholder' }, responseType: "arraybuffer"}) 74 | res.send(result.data) 75 | } catch (e) { 76 | console.log("Failed", e) 77 | res.status(500).send(e) 78 | } 79 | }) 80 | 81 | 82 | app.all("/chunked", async (req, res) => { 83 | try { 84 | res.setHeader('Content-Type', 'application/json'); 85 | res.setHeader('transfer-encoding', 'chunked'); 86 | const responseData = { response: 'Hello from enclave', ...req.body }; 87 | 88 | const jsonStr = JSON.stringify(responseData); 89 | 90 | const chunkSize = 20; 91 | 92 | for (let i = 0; i < jsonStr.length; i += chunkSize) { 93 | const chunk = jsonStr.slice(i, i + chunkSize); 94 | res.write(chunk); 95 | } 96 | 97 | res.end(); 98 | } catch (err) { 99 | console.log("Could not handle hello request", err); 100 | res.status(500).send({msg: "Error from within the cage!"}) 101 | } 102 | }); 103 | 104 | 105 | app.listen(port, () => { 106 | console.log(`Example app listening on port ${port}`) 107 | }) -------------------------------------------------------------------------------- /data-plane/src/error.rs: -------------------------------------------------------------------------------- 1 | use hyper::header::{InvalidHeaderName, InvalidHeaderValue}; 2 | use shared::{logging::TrxContextBuilderError, server::error::ServerError}; 3 | use thiserror::Error; 4 | 5 | use crate::{base_tls_client::ClientError, env::EnvError, ContextError}; 6 | 7 | #[derive(Debug, Error)] 8 | pub enum AuthError { 9 | #[error("No api-key header present on request")] 10 | NoApiKeyGiven, 11 | #[error("Invalid api key provided")] 12 | FailedToAuthenticateApiKey, 13 | } 14 | 15 | impl From for hyper::Response { 16 | fn from(err: AuthError) -> Self { 17 | let msg = err.to_string(); 18 | hyper::Response::builder() 19 | .status(401) 20 | .header("content-length", msg.len()) 21 | .body(msg.into()) 22 | .expect("Failed to build auth error to response") 23 | } 24 | } 25 | 26 | #[derive(Debug, Error)] 27 | pub enum Error { 28 | #[error("{0}")] 29 | Crypto(String), 30 | #[error("{0}")] 31 | Network(#[from] ServerError), 32 | #[error("{0}")] 33 | Io(#[from] std::io::Error), 34 | #[cfg(feature = "network_egress")] 35 | #[error("{0}")] 36 | DNS(#[from] crate::dns::error::DNSError), 37 | #[error("{0}")] 38 | Auth(#[from] AuthError), 39 | #[cfg(feature = "tls_termination")] 40 | #[error("An error occurred while parsing the incoming stream for ciphertexts — {0}")] 41 | ParseError(#[from] crate::crypto::stream::IncomingStreamError), 42 | #[error("{0}")] 43 | Hyper(#[from] hyper::Error), 44 | #[error("An error occurred — {0}")] 45 | ConfigServer(String), 46 | #[error("An error occurred requesting intermediate cert from the cert provisioner — {0}")] 47 | CertServer(String), 48 | #[error("Could not create header value — {0}")] 49 | InvalidHeaderValue(#[from] InvalidHeaderValue), 50 | #[error("Client error — {0}")] 51 | ClientError(#[from] ClientError), 52 | #[error("Deserialization Error — {0:?}")] 53 | SerdeError(#[from] serde_json::Error), 54 | #[error("Error initializing environment — {0:?}")] 55 | EnvError(#[from] EnvError), 56 | #[error("Couldn't get context")] 57 | ContextError(#[from] ContextError), 58 | #[cfg(feature = "enclave")] 59 | #[error("Failed to get connection to nsm")] 60 | NsmConnectionError(#[from] crate::utils::nsm::NsmConnectionError), 61 | #[error("Couldn't build header name")] 62 | InvalidHeaderName(#[from] InvalidHeaderName), 63 | #[error("Hyper error")] 64 | HyperError(#[from] hyper::http::Error), 65 | #[error("Api key is missing from request")] 66 | MissingApiKey, 67 | #[error("Api key is invalid")] 68 | ApiKeyInvalid, 69 | #[error("API key auth must be switched off for non http requests")] 70 | NonHttpAuthError, 71 | #[error("trx context builder error = {0}")] 72 | TrxContextBuilderError(#[from] TrxContextBuilderError), 73 | #[error("Failed to send trx log= {0}")] 74 | FailedToSendTrxLog(String), 75 | #[error("Failed to return attestation document - {0:?}")] 76 | AttestationRequestError(String), 77 | #[error("Request timed out in data plane after {0} seconds")] 78 | RequestTimeout(usize), 79 | #[error("FromUtf8Error")] 80 | FromUtf8Error(#[from] std::string::FromUtf8Error), 81 | } 82 | 83 | pub type Result = std::result::Result; 84 | -------------------------------------------------------------------------------- /data-plane/src/server/layers/forward.rs: -------------------------------------------------------------------------------- 1 | use hyper::client::{Client, HttpConnector}; 2 | use hyper::http::{header, Request, Response}; 3 | use hyper::Body; 4 | use shared::logging::TrxContextBuilder; 5 | use std::future::Future; 6 | use std::pin::Pin; 7 | use std::sync::OnceLock; 8 | use thiserror::Error; 9 | use tower::Service; 10 | 11 | static HTTP_CLIENT: OnceLock> = OnceLock::new(); 12 | 13 | #[derive(Debug, Error)] 14 | enum ForwardError { 15 | #[error("Failed to request user process - {0}")] 16 | FailedToRequestUserProcess(#[from] hyper::Error), 17 | } 18 | 19 | impl std::convert::From for Response { 20 | fn from(value: ForwardError) -> Self { 21 | let error_response = serde_json::json!({ 22 | "message": value.to_string() 23 | }) 24 | .to_string(); 25 | Response::builder() 26 | .status(500) 27 | .header("content-type", "application/json") 28 | .header("content-length", error_response.len()) 29 | .body(Body::from(error_response)) 30 | .expect("Infallible: hardcoded response") 31 | } 32 | } 33 | 34 | #[derive(Clone)] 35 | pub struct ForwardService; 36 | 37 | impl Service> for ForwardService { 38 | type Response = Response; 39 | type Error = hyper::Error; 40 | type Future = 41 | Pin> + Send + 'static>>; 42 | 43 | // Service is always ready to receive requests 44 | fn poll_ready( 45 | &mut self, 46 | _: &mut std::task::Context<'_>, 47 | ) -> std::task::Poll> { 48 | std::task::Poll::Ready(Ok(())) 49 | } 50 | 51 | fn call(&mut self, mut req: Request) -> Self::Future { 52 | Box::pin(async move { 53 | let mut http_client = HTTP_CLIENT.get_or_init(Client::new).clone(); 54 | let context_builder = req 55 | .extensions_mut() 56 | .remove::() 57 | .expect("No context set on received request"); 58 | match http_client.call(req).await { 59 | Ok(mut response) => { 60 | response.extensions_mut().insert(context_builder); 61 | // Temporary fix: remove transfer encoding from response 62 | if response 63 | .headers_mut() 64 | .remove(header::TRANSFER_ENCODING) 65 | .is_some() 66 | { 67 | let (mut parts, body) = response.into_parts(); 68 | let body_bytes = hyper::body::to_bytes(body) 69 | .await 70 | .expect("Failed to read entire response body"); 71 | parts 72 | .headers 73 | .append("content-length", body_bytes.len().into()); 74 | return Ok(Response::from_parts(parts, Body::from(body_bytes))); 75 | } 76 | Ok(response) 77 | } 78 | Err(e) => { 79 | let mut error_response: Response = ForwardError::from(e).into(); 80 | error_response.extensions_mut().insert(context_builder); 81 | Ok(error_response) 82 | } 83 | } 84 | }) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /data-plane/src/acme/lock.rs: -------------------------------------------------------------------------------- 1 | use chrono::{serde::ts_seconds, DateTime, Duration, Utc}; 2 | use serde::{Deserialize, Serialize}; 3 | use uuid::Uuid; 4 | 5 | use crate::config_client::{ConfigClient, StorageConfigClientInterface}; 6 | 7 | use super::error::AcmeError; 8 | 9 | #[derive(Debug, Clone, Deserialize, Serialize)] 10 | pub struct StorageLock { 11 | #[serde(skip)] 12 | config_client: ConfigClient, 13 | pub name: String, 14 | pub uuid: String, 15 | #[serde(with = "ts_seconds")] 16 | expiry_time: DateTime, 17 | attempts: Option, 18 | } 19 | 20 | impl StorageLock { 21 | pub fn new(name: String, attempts: u32) -> Self { 22 | let uuid = Uuid::new_v4().to_string(); 23 | Self { 24 | config_client: ConfigClient::new(), 25 | name, 26 | uuid, 27 | expiry_time: Utc::now() + Duration::seconds(30), 28 | attempts: Some(attempts), 29 | } 30 | } 31 | pub fn new_with_config_client( 32 | name: String, 33 | attempts: u32, 34 | config_client: ConfigClient, 35 | ) -> Self { 36 | let uuid = Uuid::new_v4().to_string(); 37 | Self { 38 | config_client, 39 | name, 40 | uuid, 41 | expiry_time: Utc::now() + Duration::seconds(30), 42 | attempts: Some(attempts), 43 | } 44 | } 45 | 46 | pub async fn read_from_storage(name: String) -> Result, AcmeError> { 47 | let config_client = ConfigClient::new(); 48 | let get_lock_response = config_client.get_object(format!("{name}.lock")).await?; 49 | match get_lock_response { 50 | Some(response) => { 51 | let mut lock: StorageLock = serde_json::from_str(&response.body())?; 52 | lock.config_client = config_client; 53 | Ok(Some(lock)) 54 | } 55 | None => Ok(None), 56 | } 57 | } 58 | 59 | fn lock_key_name(&self) -> String { 60 | format!("{}.lock", self.name) 61 | } 62 | 63 | pub fn is_expired(&self) -> bool { 64 | Utc::now() > self.expiry_time 65 | } 66 | 67 | pub fn number_of_attempts(&self) -> Option { 68 | self.attempts 69 | } 70 | 71 | pub fn has_uuid(&self, uuid: String) -> bool { 72 | self.uuid == uuid 73 | } 74 | 75 | pub async fn write_lock(&self) -> Result<(), AcmeError> { 76 | let lock = serde_json::to_string(self)?; 77 | self.config_client 78 | .put_object(self.lock_key_name(), lock) 79 | .await?; 80 | Ok(()) 81 | } 82 | 83 | pub async fn is_persisted(&self) -> Result { 84 | let persisted_lock_maybe_response = 85 | self.config_client.get_object(self.lock_key_name()).await?; 86 | match persisted_lock_maybe_response { 87 | Some(response) => { 88 | let lock: Self = serde_json::from_str(&response.body())?; 89 | Ok(lock.has_uuid(self.uuid.clone())) 90 | } 91 | None => Ok(false), 92 | } 93 | } 94 | 95 | pub async fn write_and_check_persisted(&self) -> Result { 96 | self.write_lock().await?; 97 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 98 | self.is_persisted().await 99 | } 100 | 101 | pub async fn delete(&self) -> Result<(), AcmeError> { 102 | self.config_client 103 | .delete_object(self.lock_key_name()) 104 | .await?; 105 | Ok(()) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /e2e-tests/run-all-feature-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # kill container if it is left running by hanging test, then generate local testing certs 5 | if [ "${CI:-unset}" = "unset" ]; 6 | then 7 | docker compose down || true 8 | cargo build --release --target x86_64-unknown-linux-musl --features network_egress 9 | . e2e-tests/mtls-testing-certs/ca/generate-certs.sh & 10 | else 11 | # if in CI, generate certs and export them 12 | . e2e-tests/mtls-testing-certs/ca/generate-certs.sh 13 | 14 | MOCK_CRYPTO_CERT=`cat certs/ca.crt` && export MOCK_CRYPTO_CERT 15 | MOCK_CRYPTO_KEY=`cat certs/ca.key` && export MOCK_CRYPTO_KEY 16 | 17 | 18 | MOCK_CERT_PROVISIONER_CLIENT_CERT=`cat certs/client_0.crt` && export MOCK_CERT_PROVISIONER_CLIENT_CERT 19 | MOCK_CERT_PROVISIONER_CLIENT_KEY=`cat certs/client_0.key` && export MOCK_CERT_PROVISIONER_CLIENT_KEY 20 | MOCK_CERT_PROVISIONER_ROOT_CERT=`cat certs/ca.crt` && export MOCK_CERT_PROVISIONER_ROOT_CERT 21 | 22 | MOCK_CERT_PROVISIONER_SERVER_KEY=`cat certs/localhost.key` && export MOCK_CERT_PROVISIONER_SERVER_KEY 23 | MOCK_CERT_PROVISIONER_ROOT_CERT=`cat certs/ca.crt` && export MOCK_CERT_PROVISIONER_ROOT_CERT 24 | MOCK_CERT_PROVISIONER_SERVER_CERT=`cat certs/localhost.crt` && export MOCK_CERT_PROVISIONER_SERVER_CERT 25 | ACME_ACCOUNT_EC_KEY=`cat ./e2e-tests/acme-key/key.pem` && export ACME_ACCOUNT_EC_KEY 26 | ACME_ACCOUNT_HMAC_KEY="cGxhY2Vob2xkZXI=" && export ACME_ACCOUNT_HMAC_KEY 27 | ACME_ACCOUNT_HMAC_KEY_ID="placeholder_id" && export ACME_ACCOUNT_HMAC_KEY_ID 28 | fi 29 | 30 | # install the node modules for customer process and test script 31 | cd e2e-tests && npm install && cd .. 32 | 33 | export TEST_EGRESS_IP=$(dig +short jsonplaceholder.typicode.com | head -n 1) 34 | 35 | # Compile mock crypto api 36 | if [[ -z "${CI}" ]]; 37 | then 38 | cd ./e2e-tests/mock-crypto 39 | cargo build --release --target x86_64-unknown-linux-musl 40 | cd ../.. 41 | fi 42 | 43 | export CUSTOMER_PROCESS=httpCustomerProcess.js 44 | echo "Building enclave setup" 45 | export EV_API_KEY_AUTH=true 46 | docker compose build --build-arg CUSTOMER_PROCESS=httpCustomerProcess.js 47 | 48 | echo "Running enclave local setup" 49 | # run the container 50 | docker compose up -d 51 | echo "SLEEPING 15 SECONDS to let enclave initialize..." 52 | sleep 15 53 | 54 | docker compose logs --tail cages-enclave 55 | 56 | echo "Running end-to-end tests" 57 | cd e2e-tests && npm run test || ($(docker compose logs --tail cages-enclave) && false) 58 | 59 | echo "Running tests for health-check configurations" 60 | 61 | echo "data-plane health checks ON, control-plane ON, data-plane ON" 62 | npm run health-check-tests "should succeed" 63 | 64 | echo "data-plane health checks ON, control-plane ON, data-plane OFF" 65 | docker compose exec enclave sh -c "sv down data-plane" 66 | npm run health-check-tests "should fail" 67 | 68 | echo "API Key Auth Tests" 69 | docker compose down 70 | docker compose up -d 71 | sleep 10 72 | npm run api-key-auth-tests 73 | 74 | echo "No API Key Auth Tests" 75 | docker compose down 76 | export EV_API_KEY_AUTH=false 77 | docker compose up -d 78 | sleep 10 79 | npm run no-auth-tests 80 | 81 | echo "Websocket Tests" 82 | export EV_API_KEY_AUTH=true 83 | export CUSTOMER_PROCESS=wsCustomerProcess.js 84 | docker compose down 85 | docker compose build --build-arg CUSTOMER_PROCESS=wsCustomerProcess.js 86 | docker compose up -d 87 | docker compose logs --tail cages-enclave 88 | sleep 10 89 | npm run websocket-tests 90 | 91 | echo "Testing that enclave is serving trustable cert chain" 92 | echo "Q" | openssl s_client -verifyCAfile sample-ca/sample-root-ca-cert.pem -showcerts -connect 0.0.0.0:443 | grep "Verification: OK" 93 | 94 | echo "Tests complete" 95 | docker compose down 96 | 97 | -------------------------------------------------------------------------------- /shared/src/stats.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io::{Error, Write}, 3 | sync::Mutex, 4 | }; 5 | 6 | use cadence::{ 7 | ext::{MultiLineWriter, SocketStats}, 8 | MetricError, MetricSink, 9 | }; 10 | use thiserror::Error; 11 | 12 | use crate::server; 13 | 14 | #[derive(Debug, Error)] 15 | pub enum StatsError { 16 | #[error("Sys info error {0}")] 17 | SysInfoError(#[from] sys_info::Error), 18 | #[error("Metric error {0}")] 19 | MetricError(#[from] MetricError), 20 | #[error("IO error {0}")] 21 | IOError(#[from] Error), 22 | #[error("Couldn't parse file descriptor values info from /proc/sys/fs/file-nr")] 23 | FDUsageParseError, 24 | #[error("Couldn't read file descriptor info from /proc/sys/fs/file-nr")] 25 | FDUsageReadError, 26 | #[error("Failed to create connection: {0}")] 27 | ServerError(#[from] server::error::ServerError), 28 | } 29 | 30 | #[macro_export] 31 | macro_rules! publish_gauge { 32 | ($label:literal, $val:expr, $context:expr) => { 33 | statsd_gauge!( 34 | $label, 35 | $val, 36 | "enclave_uuid" => &$context.uuid, 37 | "app_uuid" => &$context.app_uuid 38 | ); 39 | }; 40 | } 41 | 42 | #[macro_export] 43 | macro_rules! publish_count { 44 | ($label:literal, $val:expr, $context:expr) => { 45 | statsd_count!( 46 | $label, 47 | $val, 48 | "enclave_uuid" => &$context.uuid, 49 | "app_uuid" => &$context.app_uuid 50 | ); 51 | }; 52 | } 53 | 54 | #[macro_export] 55 | macro_rules! publish_count_dynamic_label { 56 | ($label:expr, $val:expr, $context:expr) => { 57 | statsd_count!( 58 | $label, 59 | $val, 60 | "enclave_uuid" => &$context.uuid, 61 | "app_uuid" => &$context.app_uuid 62 | ); 63 | }; 64 | } 65 | 66 | #[derive(Debug)] 67 | pub struct LocalSink { 68 | inner: T, 69 | stats: SocketStats, 70 | } 71 | 72 | impl LocalSink { 73 | fn new(stats: SocketStats, stream: T) -> Self { 74 | Self { 75 | stats, 76 | inner: stream, 77 | } 78 | } 79 | } 80 | 81 | impl Write for LocalSink { 82 | fn write(&mut self, buf: &[u8]) -> std::io::Result { 83 | self.stats.update(self.inner.write(buf), buf.len()) 84 | } 85 | 86 | fn flush(&mut self) -> std::io::Result<()> { 87 | Ok(()) 88 | } 89 | } 90 | 91 | const DEFAULT_BUFFER_SIZE: usize = 512; 92 | 93 | #[derive(Debug)] 94 | pub struct BufferedLocalStatsSink { 95 | stats: SocketStats, 96 | buffer: Mutex>>, 97 | } 98 | 99 | impl std::convert::From for BufferedLocalStatsSink { 100 | fn from(value: T) -> Self { 101 | let stats = SocketStats::default(); 102 | let sink = LocalSink::new(stats.clone(), value); 103 | let buffer_size = std::env::var("STATS_BUFFER_SIZE") 104 | .ok() 105 | .and_then(|buffer_size| buffer_size.parse().ok()) 106 | .unwrap_or(DEFAULT_BUFFER_SIZE); 107 | 108 | Self { 109 | stats, 110 | buffer: Mutex::new(MultiLineWriter::new(sink, buffer_size)), 111 | } 112 | } 113 | } 114 | 115 | impl MetricSink for BufferedLocalStatsSink { 116 | fn emit(&self, metric: &str) -> std::io::Result { 117 | let mut writer = self.buffer.lock().unwrap(); 118 | writer.write(metric.as_bytes()) 119 | } 120 | 121 | fn flush(&self) -> std::io::Result<()> { 122 | let mut writer = self.buffer.lock().unwrap(); 123 | writer.flush() 124 | } 125 | 126 | fn stats(&self) -> cadence::SinkStats { 127 | (&self.stats).into() 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /control-plane/src/e3proxy.rs: -------------------------------------------------------------------------------- 1 | use crate::dns; 2 | use crate::dns::InternalAsyncDnsResolver; 3 | use crate::error::Result; 4 | use shared::{ 5 | bridge::{Bridge, BridgeInterface, Direction}, 6 | server::Listener, 7 | }; 8 | use std::net::SocketAddr; 9 | #[cfg(not(feature = "enclave"))] 10 | use tokio::io::AsyncWriteExt; 11 | use trust_dns_resolver::TokioAsyncResolver; 12 | 13 | pub struct E3Proxy { 14 | #[allow(unused)] 15 | dns_resolver: TokioAsyncResolver, 16 | } 17 | 18 | impl std::default::Default for E3Proxy { 19 | fn default() -> Self { 20 | Self::new() 21 | } 22 | } 23 | 24 | impl E3Proxy { 25 | pub fn new() -> Self { 26 | let dns_resolver = InternalAsyncDnsResolver::new_resolver(); 27 | Self { dns_resolver } 28 | } 29 | 30 | #[cfg(feature = "enclave")] 31 | async fn shutdown_conn(connection: tokio_vsock::VsockStream) { 32 | if let Err(e) = connection.shutdown(std::net::Shutdown::Both) { 33 | log::warn!("Failed to shutdown data plane connection — {e:?}"); 34 | } 35 | } 36 | 37 | #[cfg(not(feature = "enclave"))] 38 | async fn shutdown_conn(mut connection: tokio::net::TcpStream) { 39 | if let Err(e) = connection.shutdown().await { 40 | log::warn!("Failed to shutdown data plane connection — {e:?}"); 41 | } 42 | } 43 | 44 | pub async fn listen(self) -> Result<()> { 45 | let mut enclave_conn = 46 | Bridge::get_listener(shared::ENCLAVE_CRYPTO_PORT, Direction::HostToEnclave).await?; 47 | 48 | log::info!("Running e3 proxy on {}", shared::ENCLAVE_CRYPTO_PORT); 49 | loop { 50 | let connection = match enclave_conn.accept().await { 51 | Ok(conn) => conn, 52 | Err(e) => { 53 | log::error!("Error accepting crypto request — {e:?}"); 54 | continue; 55 | } 56 | }; 57 | let e3_ip = match self.get_ip_for_e3().await { 58 | Ok(Some(ip)) => ip, 59 | Ok(None) => { 60 | log::error!("No ip returned for E3"); 61 | Self::shutdown_conn(connection).await; 62 | continue; 63 | } 64 | Err(e) => { 65 | log::error!("Error obtaining IP for E3 — {e:?}"); 66 | Self::shutdown_conn(connection).await; 67 | continue; 68 | } 69 | }; 70 | log::info!("Crypto request received, forwarding to {e3_ip}"); 71 | tokio::spawn(async move { 72 | let e3_stream = match tokio::net::TcpStream::connect(e3_ip).await { 73 | Ok(e3_stream) => e3_stream, 74 | Err(e) => { 75 | log::error!("Failed to connect to E3 ({e3_ip}) — {e:?}"); 76 | Self::shutdown_conn(connection).await; 77 | return; 78 | } 79 | }; 80 | 81 | if let Err(e) = shared::utils::pipe_streams(connection, e3_stream).await { 82 | log::error!("Error streaming from Data Plane to e3 ({e3_ip})— {e:?}"); 83 | } 84 | }); 85 | } 86 | 87 | #[allow(unreachable_code)] 88 | Ok(()) 89 | } 90 | 91 | #[cfg(feature = "enclave")] 92 | async fn get_ip_for_e3(&self) -> Result> { 93 | dns::get_ip_for_host_with_dns_resolver(&self.dns_resolver, "e3.cages-e3.internal.", 443) 94 | .await 95 | } 96 | 97 | // supporting local env 98 | #[cfg(not(feature = "enclave"))] 99 | async fn get_ip_for_e3(&self) -> Result> { 100 | dns::get_ip_for_localhost(7676) 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /shared/src/notify_shutdown.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::task::ready; 3 | use tokio::sync::mpsc::Sender; 4 | 5 | /// Enum covering all critical internal services within the Enclave. This is used to reportunexpected shutdowns of services in the Enclave. 6 | #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] 7 | pub enum Service { 8 | DataPlane, 9 | CryptoApi, 10 | ClockSync, 11 | DnsProxy, 12 | EgressProxy, 13 | E3Proxy, 14 | ProvisionerProxy, 15 | AcmeProxy, 16 | ConfigServer, 17 | } 18 | 19 | impl std::fmt::Display for Service { 20 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 21 | let service_label = match self { 22 | Self::DataPlane => "data-plane", 23 | Self::CryptoApi => "crypto-api", 24 | Self::ClockSync => "clock-sync", 25 | Self::DnsProxy => "dns-proxy", 26 | Self::EgressProxy => "egress-proxy", 27 | Self::E3Proxy => "e3-proxy", 28 | Self::ProvisionerProxy => "provisioner-proxy", 29 | Self::AcmeProxy => "acme-proxy", 30 | Self::ConfigServer => "config-server", 31 | }; 32 | f.write_str(service_label) 33 | } 34 | } 35 | 36 | /// The notify shutdown service trait is used to support shutdown notifications of any critical service running within the Enclave. 37 | /// Any future that is converted into a `NotifyShutdownFuture` will send a message containing the service label to the shutdown channel, 38 | /// allowing the healthcheck agent to move the Enclave into a draining state. 39 | /// 40 | /// Note: all critical services are assumed to run indefinitely. If one exits, it's assumed that the Enclave is entering an unhealthy state. 41 | pub trait NotifyShutdown: Future { 42 | fn notify_shutdown( 43 | self, 44 | service: Service, 45 | shutdown_channel: Sender, 46 | ) -> NotifyShutdownFuture 47 | where 48 | Self: Sized, 49 | { 50 | NotifyShutdownFuture { 51 | inner: self, 52 | service, 53 | shutdown_channel, 54 | } 55 | } 56 | } 57 | 58 | impl NotifyShutdown for F where F: Future {} 59 | 60 | #[pin_project::pin_project] 61 | pub struct NotifyShutdownFuture { 62 | #[pin] 63 | inner: F, 64 | service: Service, 65 | shutdown_channel: Sender, 66 | } 67 | 68 | impl Future for NotifyShutdownFuture { 69 | type Output = F::Output; 70 | 71 | fn poll( 72 | self: std::pin::Pin<&mut Self>, 73 | cx: &mut std::task::Context<'_>, 74 | ) -> std::task::Poll { 75 | let this = self.project(); 76 | let result = ready!(this.inner.poll(cx)); 77 | // We can ignore the error path here as the consumer is held for the lifetime of the healthcheck agent, 78 | // and a send error should signify that the Enclave is already unhealthy. 79 | log::warn!("{} exiting...", this.service); 80 | let _ = this.shutdown_channel.try_send(this.service.clone()); 81 | std::task::Poll::Ready(result) 82 | } 83 | } 84 | 85 | #[cfg(test)] 86 | mod test { 87 | use super::{NotifyShutdown, Service}; 88 | use tokio::sync::mpsc::channel; 89 | 90 | #[tokio::test] 91 | async fn test_notify_shutdown_service_exits_tasks_as_expected() { 92 | let (shutdown_channel, mut recv) = channel(1); 93 | let fut1 = async move { 1 }.notify_shutdown(Service::DataPlane, shutdown_channel); 94 | 95 | let result = fut1.await; 96 | assert_eq!(result, 1); 97 | // Assert that the task notified the shutdown channel on exit 98 | let msg = recv.try_recv(); 99 | assert!(msg.is_ok()); 100 | assert_eq!(msg.unwrap(), Service::DataPlane); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /data-plane/src/utils/trx_handler.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::VecDeque, time::Duration}; 2 | use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; 3 | 4 | use shared::logging::TrxContext; 5 | use tokio::time::interval; 6 | 7 | use crate::config_client::ConfigClient; 8 | 9 | enum LogHandlerMessageType { 10 | TickMsg, 11 | TrxMsg, 12 | } 13 | 14 | pub struct LogHandlerMessage { 15 | trx_log: Option, 16 | msg_type: LogHandlerMessageType, 17 | } 18 | 19 | impl LogHandlerMessage { 20 | pub fn new_log_message(trx_log: TrxContext) -> Self { 21 | Self { 22 | trx_log: Some(trx_log), 23 | msg_type: LogHandlerMessageType::TrxMsg, 24 | } 25 | } 26 | 27 | pub fn new_tick_message() -> Self { 28 | Self { 29 | trx_log: None, 30 | msg_type: LogHandlerMessageType::TickMsg, 31 | } 32 | } 33 | } 34 | 35 | struct LogHandlerBuffer { 36 | config_client: ConfigClient, 37 | buffer: VecDeque, 38 | } 39 | 40 | impl LogHandlerBuffer { 41 | pub fn new(capacity: usize) -> Self { 42 | Self { 43 | config_client: ConfigClient::new(), 44 | buffer: VecDeque::with_capacity(capacity), 45 | } 46 | } 47 | 48 | pub fn get_size(&self) -> usize { 49 | self.buffer.len() 50 | } 51 | 52 | pub fn add_log(&mut self, log: TrxContext) { 53 | self.buffer.push_back(log) 54 | } 55 | 56 | // Removes logs from the buffer and sends them to the control plane 57 | pub async fn send_logs(&mut self) { 58 | let trx_logs: Vec = self.buffer.drain(..).collect(); 59 | if let Err(err) = self.config_client.post_trx_logs(trx_logs).await { 60 | log::error!("Failed to ship trx logs to control plane. {err:?}"); 61 | }; 62 | } 63 | } 64 | 65 | pub async fn start_log_handler( 66 | tx: UnboundedSender, 67 | mut rx: UnboundedReceiver, 68 | ) { 69 | //Start timer send messages to periodically clear buffer 70 | start_log_timer(tx); 71 | 72 | // Give buffer max capacity of 20 for space. Will flush on 15 but have capacity for more. 73 | let mut buffer = LogHandlerBuffer::new(20); 74 | 75 | while let Some(message) = rx.recv().await { 76 | match message.msg_type { 77 | LogHandlerMessageType::TickMsg => { 78 | let current_size = buffer.get_size(); 79 | if current_size > 0 { 80 | log::debug!("{current_size:?} logs in the buffer. Sending to control plane"); 81 | buffer.send_logs().await; 82 | }; 83 | //No logs in the buffer. No op. 84 | } 85 | LogHandlerMessageType::TrxMsg => { 86 | if let Some(log) = message.trx_log { 87 | buffer.add_log(log); 88 | }; 89 | 90 | let current_size = buffer.get_size(); 91 | if current_size >= 15 { 92 | //Buffer size has multiple logs. Flush buffer and send to control plane 93 | buffer.send_logs().await; 94 | } 95 | //Don't flush buffer yet with only a small amount logs 96 | } 97 | } 98 | } 99 | } 100 | 101 | fn start_log_timer(tx: UnboundedSender) { 102 | tokio::spawn(async move { 103 | let mut log_interval = interval(Duration::from_secs(30)); 104 | loop { 105 | let _ = log_interval.tick().await; 106 | 107 | //Send tick message to handler 108 | if let Err(err) = tx.send(LogHandlerMessage::new_tick_message()) { 109 | log::error!("Failed sending trx tick message to trx handler. {err}") 110 | } 111 | } 112 | }); 113 | } 114 | -------------------------------------------------------------------------------- /data-plane/src/cert_provisioner_client/mod.rs: -------------------------------------------------------------------------------- 1 | mod tls_verifier; 2 | 3 | use hyper::{Body, Response}; 4 | use serde::de::DeserializeOwned; 5 | use shared::server::config_server::requests::{ 6 | ConfigServerPayload, GetCertRequestDataPlane, GetCertResponseDataPlane, 7 | GetSecretsResponseDataPlane, 8 | }; 9 | use tokio_rustls::rustls::ServerName; 10 | use tokio_rustls::TlsConnector; 11 | 12 | use crate::base_tls_client::tls_client_config::get_tls_client_config; 13 | use crate::base_tls_client::{BaseClient, ClientError}; 14 | use crate::configuration; 15 | #[cfg(feature = "enclave")] 16 | use crate::crypto::attest; 17 | 18 | type CertProvisionerError = ClientError; 19 | #[derive(Clone)] 20 | pub struct CertProvisionerClient { 21 | base_client: BaseClient, 22 | } 23 | 24 | impl Default for CertProvisionerClient { 25 | fn default() -> Self { 26 | Self::new() 27 | } 28 | } 29 | 30 | impl CertProvisionerClient { 31 | pub fn new() -> Self { 32 | let verifier = std::sync::Arc::new(tls_verifier::CertProvisionerCertVerifier); 33 | let tls_connector = 34 | TlsConnector::from(std::sync::Arc::new(get_tls_client_config(verifier))); 35 | 36 | let server_name = ServerName::try_from(configuration::get_cert_provisioner_host().as_str()) 37 | .expect("Hardcoded hostname"); 38 | 39 | Self { 40 | base_client: BaseClient::new(tls_connector, server_name, shared::ENCLAVE_CERT_PORT), 41 | } 42 | } 43 | 44 | fn uri(&self, path: &str) -> String { 45 | format!( 46 | "https://{}:{}{}", 47 | configuration::get_cert_provisioner_host(), 48 | shared::ENCLAVE_CERT_PORT, 49 | path 50 | ) 51 | } 52 | 53 | fn get_attestation_doc(&self, token: String) -> Result { 54 | let token_bytes = token.as_bytes().to_vec(); 55 | 56 | #[cfg(feature = "enclave")] 57 | let attestation_doc = attest::get_attestation_doc(Some(token_bytes), None, None) 58 | .map_err(|err| CertProvisionerError::General(err.to_string()))?; 59 | 60 | #[cfg(not(feature = "enclave"))] 61 | let attestation_doc: Vec = token_bytes; 62 | 63 | let base64_doc = base64::encode(attestation_doc); 64 | 65 | Ok(base64_doc) 66 | } 67 | 68 | pub async fn get_cert( 69 | &self, 70 | token: String, 71 | ) -> Result { 72 | let attestation_doc = self.get_attestation_doc(token)?; 73 | 74 | let body = GetCertRequestDataPlane::new(attestation_doc) 75 | .into_body() 76 | .map_err(|err| CertProvisionerError::General(err.to_string()))?; 77 | 78 | let response = self 79 | .base_client 80 | .send(None, "POST", &self.uri("/cert"), body, None) 81 | .await?; 82 | 83 | self.parse_response(response).await 84 | } 85 | 86 | pub async fn get_secrets( 87 | &self, 88 | token: String, 89 | ) -> Result { 90 | let attestation_doc = self.get_attestation_doc(token)?; 91 | 92 | let body = GetCertRequestDataPlane::new(attestation_doc) 93 | .into_body() 94 | .map_err(|err| CertProvisionerError::General(err.to_string()))?; 95 | 96 | let response = self 97 | .base_client 98 | .send(None, "POST", &self.uri("/secrets"), body, None) 99 | .await?; 100 | 101 | self.parse_response(response).await 102 | } 103 | 104 | async fn parse_response( 105 | &self, 106 | res: Response, 107 | ) -> Result { 108 | let response_body = res.into_body(); 109 | let response_body = hyper::body::to_bytes(response_body).await?; 110 | Ok(serde_json::from_slice(&response_body)?) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /data-plane/src/base_tls_client/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod error; 2 | pub use error::ClientError; 3 | pub mod server_cert_verifier; 4 | pub mod tls_client_config; 5 | pub use server_cert_verifier::OpenServerCertVerifier; 6 | 7 | use hyper::client::conn::{Connection as HyperConnection, SendRequest}; 8 | use hyper::header::HeaderValue; 9 | use hyper::{Body, HeaderMap, Response}; 10 | use tokio_rustls::rustls::ServerName; 11 | use tokio_rustls::{client::TlsStream, TlsConnector}; 12 | 13 | use crate::connection::{self, Connection}; 14 | use crate::crypto::token::AttestationAuth; 15 | use shared::{CLIENT_MAJOR_VERSION, CLIENT_VERSION}; 16 | 17 | #[derive(Clone)] 18 | pub struct BaseClient { 19 | tls_connector: TlsConnector, 20 | server_name: ServerName, 21 | port: u16, 22 | } 23 | 24 | #[derive(Clone)] 25 | pub enum AuthType { 26 | ApiKey(HeaderValue), 27 | AttestationDoc(AttestationAuth), 28 | } 29 | 30 | impl BaseClient { 31 | pub fn new(tls_connector: TlsConnector, server_name: ServerName, port: u16) -> Self { 32 | Self { 33 | tls_connector, 34 | server_name, 35 | port, 36 | } 37 | } 38 | 39 | async fn get_conn( 40 | &self, 41 | ) -> Result< 42 | ( 43 | SendRequest, 44 | HyperConnection, hyper::Body>, 45 | ), 46 | ClientError, 47 | > { 48 | let client_connection: Connection = connection::get_socket(self.port).await?; 49 | let connection = self 50 | .tls_connector 51 | .connect(self.server_name.clone(), client_connection) 52 | .await?; 53 | 54 | let connection_info = hyper::client::conn::Builder::new() 55 | .handshake::, hyper::Body>(connection) 56 | .await?; 57 | 58 | Ok(connection_info) 59 | } 60 | 61 | pub async fn send( 62 | &self, 63 | auth_type: Option, 64 | method: &str, 65 | uri: &str, 66 | payload: hyper::Body, 67 | headers: Option, 68 | ) -> Result, ClientError> { 69 | let mut request = hyper::Request::builder().uri(uri); 70 | // if headers have been passed, seed the request with the provided set of headers, 71 | // but override with required headers to avoid failed reqs. 72 | if let Some(headers) = headers { 73 | if let Some(req_header_map) = request.headers_mut() { 74 | *req_header_map = headers 75 | } 76 | } 77 | let mut request = request 78 | .header("Content-Type", "application/json") 79 | .header( 80 | "User-Agent", 81 | format!("Cage-Data-Plane/{}", &*CLIENT_VERSION), 82 | ) 83 | .header( 84 | "Accept", 85 | format!("application/json;version={}", &*CLIENT_MAJOR_VERSION), 86 | ) 87 | .method(method) 88 | .body(payload) 89 | .expect("Failed to create request"); 90 | 91 | auth_type.map(|auth| match auth { 92 | AuthType::ApiKey(header_value) => request.headers_mut().insert("api-key", header_value), 93 | AuthType::AttestationDoc(auth) => { 94 | request.headers_mut().insert("attestation-token", auth.doc); 95 | request.headers_mut().insert("auth-token", auth.token) 96 | } 97 | }); 98 | 99 | let (mut request_sender, connection) = self.get_conn().await?; 100 | tokio::spawn(async move { 101 | if let Err(e) = connection.await { 102 | log::error!("Error in client connection: {e}"); 103 | } 104 | }); 105 | 106 | let response = request_sender.send_request(request).await?; 107 | if !response.status().is_success() { 108 | return Err(ClientError::FailedRequest(response.status())); 109 | } 110 | 111 | Ok(response) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /e2e-tests/mockCertProvisionerApi.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs'); 2 | const https = require('https') 3 | const express = require('express') 4 | const app = express() 5 | 6 | console.log("Starting up mock cert provsioner"); 7 | 8 | app.use(express.json()) 9 | app.use(express.urlencoded({extended : true})); 10 | 11 | const options = { 12 | key: process.env.MOCK_CERT_PROVISIONER_SERVER_KEY, 13 | cert: process.env.MOCK_CERT_PROVISIONER_SERVER_CERT, 14 | ca: process.env.MOCK_CERT_PROVISIONER_ROOT_CERT, 15 | port: 3443, 16 | requestCert: true, 17 | rejectUnauthorized: false 18 | }; 19 | 20 | const mutualTlsMiddleware = () => (req, res, next) => { 21 | if (!req.client.authorized) { 22 | console.log("CLIENT NOT AUTHENTICATED"); 23 | return res.status(401).send('Invalid client certificate authentication.'); 24 | } else { 25 | console.log("CLIENT AUTHENTICATED") 26 | res.set("X-MTLS", "Authenticated") 27 | } 28 | return next(); 29 | }; 30 | 31 | app.use(mutualTlsMiddleware()); 32 | 33 | 34 | app.get('/cert/token', async (req, res) => { 35 | try { 36 | console.log("Received cert token request from enclave control plane") 37 | var result = { 38 | token: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ" 39 | }; 40 | res.send(result) 41 | } catch (e) { 42 | console.log("Could not return cert token", e) 43 | } 44 | }) 45 | 46 | app.get('/e3/token', async (req, res) => { 47 | try { 48 | console.log("Received E3 token request from enclave control plane") 49 | var result = { 50 | token: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ", 51 | token_id: "123" 52 | }; 53 | res.send(result) 54 | } catch (e) { 55 | console.log("Could not return E3 token", e) 56 | } 57 | }) 58 | 59 | 60 | https.createServer(options, app).listen(options.port, () => { 61 | console.log(`HTTPS MTLS mock cert provisioner server running on ${options.port}`) 62 | }) 63 | 64 | 65 | const TlsOptions = { 66 | key: process.env.MOCK_CERT_PROVISIONER_SERVER_KEY, 67 | cert: process.env.MOCK_CERT_PROVISIONER_SERVER_CERT, 68 | ca: process.env.MOCK_CERT_PROVISIONER_ROOT_CERT, 69 | port: 3000, 70 | }; 71 | 72 | const tlsApp = express() 73 | tlsApp.post('/cert', async (req, res) => { 74 | try { 75 | let ca_cert = Buffer.from(fs.readFileSync('/services/sample-intermediate-cert.pem', 'utf8')).toString('base64'); 76 | let ca_key_pair = Buffer.from(fs.readFileSync('/services/sample-intermediate-key.pem', 'utf8')).toString('base64'); 77 | 78 | console.log(`Mock cert provisioner - Received cert request from enclave data plane ${req}`); 79 | 80 | var result = { 81 | intermediate_cert: ca_cert, 82 | key_pair: ca_key_pair, 83 | secrets: [{name: "ANOTHER_ENV_VAR", secret: "123"}, {name: "ENCRYPTED_ENV", secret: "ev:123"}], 84 | context: {team_uuid: "team_123", cage_uuid: "enclave_123", app_uuid: "app_12345678", cage_name: "test-enclave"}, 85 | }; 86 | res.status(200) 87 | res.send(result) 88 | } catch (e) { 89 | console.log("Could not return cert ", e) 90 | } 91 | }) 92 | 93 | tlsApp.post('/secrets', async (req, res) => { 94 | try { 95 | console.log(`Mock cert provisioner - Received secrets request from enclave data plane ${req}`); 96 | 97 | var result = { 98 | context: {team_uuid: "team_123", cage_uuid: "enclave_123", app_uuid: "app_12345678", cage_name: "test-enclave"}, 99 | secrets: [{name: "ANOTHER_ENV_VAR", secret: "123"}, {name: "ENCRYPTED_ENV", secret: "ev:123"}] 100 | }; 101 | res.status(200) 102 | res.send(result) 103 | } catch (e) { 104 | console.log("Could not return cert ", e) 105 | } 106 | }) 107 | 108 | 109 | https.createServer(options, tlsApp).listen(TlsOptions.port, () => { 110 | console.log(__filename); 111 | console.log(`HTTPS mock cert provisioner server running on ${TlsOptions.port}`) 112 | }) 113 | 114 | 115 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Evervault Enclaves 4 | 5 | Evervault Enclaves are the easiest way to build, deploy and scale Secure Enclaves. 6 | 7 | Evervault Enclaves allow developers to easily deploy Docker containers in a Secure Enclave, powered by AWS Nitro Enclaves. Enclaves offer easy deployment, invocation and attestation of Secure Enclaves without the engineering overhead. 8 | 9 | This repo consists of two components: the runtime which is run _inside_ the Secure Enclave (the "data plane") and the code used for proxying network traffic and initializing the enclave (the "control plane"). 10 | 11 | ## Notice on Open Source Status of this project 12 | 13 | The Evervault Enclaves product is open source with the aim of providing transparency to users — this is vital given that our process runs in the enclave, and is accounted for in the attestation. 14 | 15 | The current state of this project does not allow for self-hosting. We plan on addressing this by abstracting away the Evervault-specific elements of the Enclaves product. 16 | 17 | ## Steps to get Enclaves running in local dev (macOSarm64) 18 | 19 | If you're using vscode you may want to append a check target to your workspace settings 20 | 21 | `.vscode/settings.json` 22 | ```sh 23 | { 24 | "rust-analyzer.check.targets": "x86_64-unknown-linux-musl" 25 | } 26 | ``` 27 | 28 | The crates can then be cross compiled using zigbuild. To install zigbuild, first [install ziglang](https://ziglang.org/learn/getting-started/#installing-zig). 29 | 30 | Once ziglang is installed, you can install zigbuild as a cargo plugin: 31 | 32 | ```sh 33 | cargo install --locked cargo-zigbuild 34 | ``` 35 | 36 | Generate a cert and key for the data-plane: 37 | ```sh 38 | # install mkcert as a trusted CA 39 | mkcert -install 40 | 41 | mkcert data-plane.localhost 42 | ``` 43 | 44 | Generate test certs: 45 | ```sh 46 | ./e2e-tests/mtls-testing-certs/ca/generate-certs.sh 47 | ``` 48 | 49 | Generate the Root and Intermediate CA for cert provisioning in tests: 50 | ```sh 51 | sh ./e2e-tests/generate-sample-ca.sh 52 | ``` 53 | 54 | Generate certs for TLS in the mock API: 55 | ```sh 56 | mkdir e2e-tests/testing-certs && mkcd e2e-tests/testing-certs 57 | openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./testing.key -out testing.crt 58 | ``` 59 | 60 | Source `export-dev-env-vars.sh` to set certs as environment variables: 61 | ```sh 62 | source ./scripts/export-dev-env-vars.sh 63 | ``` 64 | 65 | Compile: 66 | ```sh 67 | cargo zigbuild --release --target x86_64-unknown-linux-musl --features network_egress 68 | ``` 69 | 70 | Compile the mock crypto crate: 71 | ```sh 72 | pushd e2e-tests/mock-crypto && cargo zigbuild --release --target x86_64-unknown-linux-musl && popd 73 | ``` 74 | 75 | Build and run docker containers: 76 | ```sh 77 | docker compose build 78 | docker compose up 79 | ``` 80 | 81 | Test it out: 82 | ```sh 83 | curl https://enclave.localhost:443/encrypt -k -H 'api-key: placeholder' --data '{"hello": "world"}' -H "Content-Type: application/json" 84 | ``` 85 | 86 | ## Feature flags 87 | 88 | By default, the data plane and control plane will be compiled and run without network egress from the enclave. 89 | ```sh 90 | cargo run 91 | ``` 92 | 93 | The data plane and control plane can be compiled and run with network egress support using the `network_egress` feature flag. 94 | ```sh 95 | cargo run --features network_egress 96 | ``` 97 | 98 | To build with the `enclave` feature flag, you will have to specify the target: 99 | ```sh 100 | sudo cargo clippy --features enclave --target x86_64-unknown-linux-musl 101 | ``` 102 | 103 | You will also need the `x86_64-unknown-linux-musl` target: 104 | ```sh 105 | rustup target add x86_64-unknown-linux-musl 106 | ``` 107 | 108 | ## Query Local DNS Server 109 | 110 | The enclave DNS forwarder is listening on 53. To test lookup from data plane -> control plane -> remote DNS server use the following command: 111 | ```sh 112 | dig evervault.com @127.0.0.1 113 | ``` 114 | 115 | ## Run end-to-end tests 116 | ```sh 117 | sh e2e-tests/run.sh 118 | ``` 119 | 120 | The mock crypto API depends on a (currently private) Rust crate. We plan on making this crate available in future. 121 | Until then, the project will be able to build and run the E2E tests in CI. 122 | -------------------------------------------------------------------------------- /crates/vsock-proxy/src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::{Arg, Command}; 2 | 3 | mod net; 4 | 5 | use net::{Address, Error, Listener}; 6 | 7 | fn main() { 8 | let matches = Command::new("vsock-proxy") 9 | .about("A simple proxy to pipe traffic to/from a vsock connection") 10 | .arg( 11 | Arg::new("tcp-source") 12 | .long("tcp-source") 13 | .help("The tcp address for the proxy to listen on.") 14 | .conflicts_with("vsock-source") 15 | .required(false), 16 | ) 17 | .arg( 18 | Arg::new("tcp-destination") 19 | .long("tcp-dest") 20 | .help("The tcp address for the proxy to forward to.") 21 | .conflicts_with("vsock-destination") 22 | .conflicts_with("tcp-source") 23 | .required(false), 24 | ) 25 | .arg( 26 | Arg::new("vsock-source") 27 | .long("vsock-source") 28 | .help("The vsock address for the proxy to listen on.") 29 | .required(false), 30 | ) 31 | .arg( 32 | Arg::new("vsock-destination") 33 | .long("vsock-dest") 34 | .help("The vsock address for the proxy to forward to.") 35 | .conflicts_with("vsock-source") 36 | .required(false), 37 | ) 38 | .get_matches(); 39 | 40 | let tcp_source = matches.get_one::("tcp-source"); 41 | let vsock_source = matches.get_one::("vsock-source"); 42 | 43 | if tcp_source.is_none() && vsock_source.is_none() { 44 | eprintln!("Error: no source address provided. Either tcp-source or vsock-source must be provided."); 45 | return; 46 | } 47 | 48 | let tcp_destination = matches.get_one::("tcp-destination"); 49 | let vsock_destination = matches.get_one::("vsock-destination"); 50 | 51 | if tcp_destination.is_none() && vsock_destination.is_none() { 52 | eprintln!("Error: no destination address provided. Either tcp-destination or vsock-destination must be provided."); 53 | return; 54 | } 55 | 56 | let parsed_source_address: Result = tcp_source 57 | .map(|tcp_addr| Address::new_tcp_address(tcp_addr.as_str())) 58 | .or_else(|| vsock_source.map(|vsock_addr| Address::new_vsock_address(vsock_addr.as_str()))) 59 | .expect("Infallible: either tcp or vsock source address must exist."); 60 | 61 | let source_address = match parsed_source_address { 62 | Ok(source_addr) => source_addr, 63 | Err(e) => { 64 | eprintln!("Error: {e}"); 65 | return; 66 | } 67 | }; 68 | 69 | let parsed_destination: Result = tcp_destination 70 | .map(|tcp_addr| Address::new_tcp_address(tcp_addr)) 71 | .or_else(|| vsock_destination.map(|vsock_addr| Address::new_vsock_address(vsock_addr))) 72 | .expect("Infallible: either tcp or vsock address must exist"); 73 | 74 | let destination_address = match parsed_destination { 75 | Ok(dest_addr) => dest_addr, 76 | Err(e) => { 77 | eprintln!("Error: {e}"); 78 | return; 79 | } 80 | }; 81 | 82 | let runtime = tokio::runtime::Builder::new_current_thread() 83 | .enable_io() 84 | .build() 85 | .expect("Failed to build tokio runtime"); 86 | 87 | runtime.block_on(async move { 88 | let mut source = match source_address.into_listener().await { 89 | Ok(source_conn) => source_conn, 90 | Err(e) => { 91 | eprintln!("Failed to create source connection - {e}"); 92 | return; 93 | } 94 | }; 95 | 96 | loop { 97 | let mut accepted_conn = match source.accept().await { 98 | Ok(source_conn) => source_conn, 99 | Err(e) => { 100 | eprintln!("Failed to accept incoming connection - {e}"); 101 | continue; 102 | } 103 | }; 104 | 105 | let mut destination = match destination_address.get_destination_connection().await { 106 | Ok(dest_conn) => dest_conn, 107 | Err(e) => { 108 | eprintln!("Failed to create destination connection - {e}"); 109 | continue; 110 | } 111 | }; 112 | 113 | if let Err(e) = 114 | tokio::io::copy_bidirectional(&mut accepted_conn, &mut destination).await 115 | { 116 | eprintln!("Error piping connections - {e}"); 117 | } 118 | } 119 | }); 120 | } 121 | -------------------------------------------------------------------------------- /data-plane/src/server/http/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod parse; 2 | 3 | use bytes::Bytes; 4 | use hyper::{ 5 | http::header::{HeaderName, HeaderValue, InvalidHeaderName}, 6 | Body, HeaderMap, Request, Response, 7 | }; 8 | use std::str::FromStr; 9 | 10 | pub struct RemoteIp(pub String); 11 | 12 | pub enum EncodingError { 13 | UnknownEncoding, 14 | } 15 | 16 | #[derive(Clone, Debug)] 17 | pub enum ContentEncoding { 18 | Gzip, 19 | Br, //brotli 20 | } 21 | 22 | impl std::convert::TryFrom<&HeaderValue> for ContentEncoding { 23 | type Error = EncodingError; 24 | fn try_from(val: &HeaderValue) -> Result { 25 | let encoding = match val.as_bytes() { 26 | b"br" => Self::Br, 27 | b"gzip" => Self::Gzip, 28 | _ => return Err(EncodingError::UnknownEncoding), 29 | }; 30 | Ok(encoding) 31 | } 32 | } 33 | 34 | pub async fn request_to_bytes(request: Request) -> Vec { 35 | let mut bytes = Vec::new(); 36 | 37 | let (req_info, req_body) = request.into_parts(); 38 | 39 | let path = req_info 40 | .uri 41 | .path_and_query() 42 | .map(|path| path.as_str()) 43 | .unwrap_or("/"); 44 | let status_line = format!("{} {} {:?}\r\n", req_info.method, path, req_info.version); 45 | bytes.extend_from_slice(status_line.as_bytes()); 46 | 47 | for (header, val) in req_info.headers.iter() { 48 | let header_str = format!("{}: {}\r\n", header.as_str(), val.to_str().unwrap_or("")); 49 | bytes.extend_from_slice(header_str.as_bytes()); 50 | } 51 | bytes.extend_from_slice(b"\r\n"); 52 | 53 | let body_bytes: Bytes = hyper::body::to_bytes(req_body) 54 | .await 55 | .unwrap_or_else(|_| Bytes::new()); 56 | 57 | bytes.extend_from_slice(&body_bytes); 58 | 59 | bytes 60 | } 61 | 62 | pub async fn response_to_bytes(response: Response) -> Vec { 63 | let mut bytes = Vec::new(); 64 | 65 | let status_line = format!( 66 | "{:?} {} {}\r\n", 67 | response.version(), 68 | response.status().as_u16(), 69 | response.status().canonical_reason().unwrap_or("") 70 | ); 71 | bytes.extend_from_slice(status_line.as_bytes()); 72 | 73 | for (header_name, header_value) in response.headers() { 74 | let header_str = format!( 75 | "{}: {}\r\n", 76 | header_name.as_str(), 77 | header_value.to_str().unwrap_or("") 78 | ); 79 | bytes.extend_from_slice(header_str.as_bytes()); 80 | } 81 | 82 | bytes.extend_from_slice(b"\r\n"); 83 | 84 | let body_bytes: Bytes = hyper::body::to_bytes(response.into_body()) 85 | .await 86 | .unwrap_or_else(|_| Bytes::new()); 87 | 88 | bytes.extend_from_slice(&body_bytes); 89 | 90 | bytes 91 | } 92 | 93 | fn build_header_value_from_str(header_val: &str) -> HeaderValue { 94 | HeaderValue::from_str(header_val).expect("Unable to create HeaderValue from str") 95 | } 96 | 97 | pub fn append_or_insert_header( 98 | header: &str, 99 | header_map: &mut HeaderMap, 100 | value: &str, 101 | ) -> std::result::Result<(), InvalidHeaderName> { 102 | let header_name = HeaderName::from_str(header)?; 103 | if let Some(header_val) = header_map 104 | .get(&header_name) 105 | .and_then(|header_val| header_val.to_str().ok()) 106 | { 107 | let updated_header = format!("{header_val}, {value}"); 108 | header_map.insert(header_name, build_header_value_from_str(&updated_header)); 109 | } else { 110 | header_map.insert(header_name, build_header_value_from_str(value)); 111 | } 112 | Ok(()) 113 | } 114 | 115 | fn add_remote_ip_to_forwarded_for_header(header_map: &mut HeaderMap, remote_ip: &str) { 116 | let _ = append_or_insert_header("X-Forwarded-For", header_map, remote_ip); 117 | let _ = append_or_insert_header("X-Forwarded-Proto", header_map, "https"); 118 | let forwarded_header = format!("for={remote_ip};proto=https"); 119 | let _ = append_or_insert_header("Forwarded", header_map, &forwarded_header); 120 | } 121 | 122 | pub fn build_internal_error_response(msg: Option) -> hyper::Response { 123 | let response_body = serde_json::json!({ 124 | "message": msg.unwrap_or_else(|| "An internal error occurred. Please contact support.".into()) 125 | }) 126 | .to_string(); 127 | hyper::Response::builder() 128 | .status(hyper::StatusCode::INTERNAL_SERVER_ERROR) 129 | .header(hyper::header::CONTENT_TYPE, "application/json") 130 | .header(hyper::header::CONTENT_LENGTH, response_body.len()) 131 | .body(Body::from(response_body)) 132 | .expect("Infallible - hardcoded response") 133 | } 134 | -------------------------------------------------------------------------------- /data-plane/src/server/tls/tls_server.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | 3 | #[cfg(feature = "enclave")] 4 | use once_cell::sync::OnceCell; 5 | #[cfg(feature = "enclave")] 6 | use tokio_rustls::rustls::sign::CertifiedKey; 7 | 8 | use shared::server::proxy_protocol::ProxiedConnection; 9 | use shared::server::Listener; 10 | use std::sync::Arc; 11 | use tokio_rustls::rustls::server::WantsServerCert; 12 | use tokio_rustls::rustls::ConfigBuilder; 13 | use tokio_rustls::rustls::ServerConfig; 14 | use tokio_rustls::server::TlsStream; 15 | use tokio_rustls::TlsAcceptor; 16 | 17 | #[cfg(feature = "enclave")] 18 | use crate::acme; 19 | 20 | use crate::env::{EnvironmentLoader, NeedCert}; 21 | use crate::server::error::ServerResult; 22 | use crate::server::error::TlsError; 23 | use crate::server::tls::cert_resolver::AttestableCertResolver; 24 | 25 | pub struct TlsServer { 26 | tls_acceptor: TlsAcceptor, 27 | inner: S, 28 | } 29 | 30 | impl TlsServer { 31 | fn new(server_config: ServerConfig, tcp_server: S) -> Self { 32 | Self { 33 | tls_acceptor: TlsAcceptor::from(Arc::new(server_config)), 34 | inner: tcp_server, 35 | } 36 | } 37 | } 38 | 39 | /// Mini state machine for wrapping a TCP server with the logic to terminate TLS 40 | pub struct TlsServerBuilder; 41 | 42 | impl TlsServerBuilder { 43 | /// Get instance of TlsServerBuilder, purely for readability 44 | pub fn new() -> Self { 45 | Self 46 | } 47 | 48 | /// Consume underlying server, and move to `WantsCert` state 49 | pub fn with_server(self, server: S) -> WantsCert { 50 | WantsCert { tcp_server: server } 51 | } 52 | } 53 | 54 | impl std::default::Default for TlsServerBuilder { 55 | fn default() -> Self { 56 | Self::new() 57 | } 58 | } 59 | 60 | /// Final state in provisioning a TLS Server, used to inform the source of the certs 61 | pub struct WantsCert { 62 | tcp_server: S, 63 | } 64 | 65 | #[cfg(feature = "enclave")] 66 | pub static TRUSTED_PUB_CERT: OnceCell> = OnceCell::new(); 67 | 68 | impl WantsCert { 69 | /// Get sane defaults for TLS Server config 70 | fn get_base_config() -> ConfigBuilder { 71 | ServerConfig::builder() 72 | .with_safe_defaults() 73 | .with_no_client_auth() 74 | } 75 | 76 | pub async fn with_attestable_cert( 77 | self, 78 | env_loader: EnvironmentLoader, 79 | ) -> ServerResult> { 80 | log::info!("Creating TLSServer with attestable cert"); 81 | 82 | let (env_loader, inter_ca_cert, inter_ca_key_pair) = env_loader 83 | .load_cert() 84 | .await 85 | .map_err(|err| TlsError::CertProvisionerError(err.to_string()))?; 86 | 87 | #[cfg(feature = "enclave")] 88 | let _: Option = enclave_trusted_cert().await; 89 | 90 | // Once intermediate cert and trusted cert retrieved, write cage initialised vars 91 | env_loader.finalize_env().unwrap(); 92 | 93 | let inter_ca_resolver = AttestableCertResolver::new(inter_ca_cert, inter_ca_key_pair)?; 94 | let mut tls_config = 95 | Self::get_base_config().with_cert_resolver(Arc::new(inter_ca_resolver)); 96 | tls_config.alpn_protocols.push(b"http/1.1".to_vec()); 97 | tls_config.alpn_protocols.push(b"h2".to_vec()); 98 | Ok(TlsServer::new(tls_config, self.tcp_server)) 99 | } 100 | } 101 | 102 | #[cfg(feature = "enclave")] 103 | async fn enclave_trusted_cert() -> Option { 104 | match acme::get_trusted_cert().await { 105 | Ok((pub_key, trusted_cert)) => { 106 | let _ = TRUSTED_PUB_CERT.set(pub_key); 107 | Some(trusted_cert) 108 | } 109 | Err(e) => { 110 | //Shutdown if we can't get a trusted cert as it's required. 111 | log::error!( 112 | "Failed to get trusted cert for enclave. Shutting down. Cause of error: {e}" 113 | ); 114 | std::process::exit(1); 115 | } 116 | } 117 | } 118 | 119 | #[async_trait] 120 | impl Listener for TlsServer 121 | where 122 | TlsError: From<::Error>, 123 | ::Connection: ProxiedConnection, 124 | { 125 | type Connection = TlsStream<::Connection>; 126 | type Error = TlsError; 127 | async fn accept(&mut self) -> Result { 128 | let conn = self.inner.accept().await?; 129 | let accepted_tls_conn = self.tls_acceptor.accept(conn).await?; 130 | Ok(accepted_tls_conn) 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /data-plane/src/health/mod.rs: -------------------------------------------------------------------------------- 1 | mod agent; 2 | 3 | use agent::UserProcessHealthcheckSender; 4 | 5 | use hyper::header; 6 | use hyper::{service::service_fn, Body, Response}; 7 | use shared::bridge::{Bridge, BridgeInterface, BridgeServer, Direction}; 8 | use shared::notify_shutdown::Service; 9 | use shared::server::health::{DataPlaneDiagnostic, DataPlaneState, UserProcessHealth}; 10 | use shared::{server::Listener, ENCLAVE_HEALTH_CHECK_PORT}; 11 | use tokio::sync::mpsc::{Sender, UnboundedSender}; 12 | 13 | use crate::health::agent::{HealthcheckAgent, HealthcheckStatusRequest}; 14 | 15 | fn spawn_customer_healthcheck_agent( 16 | customer_process_port: u16, 17 | healthcheck: Option, 18 | use_tls: bool, 19 | ) -> (UserProcessHealthcheckSender, Sender) { 20 | let default_interval = std::time::Duration::from_secs(1); 21 | if use_tls { 22 | let (agent, channel, shutdown_channel) = 23 | HealthcheckAgent::build_tls_agent(customer_process_port, default_interval, healthcheck); 24 | tokio::spawn(async move { agent.run().await }); 25 | (channel, shutdown_channel) 26 | } else { 27 | let (agent, channel, shutdown_channel) = 28 | HealthcheckAgent::build_agent(customer_process_port, default_interval, healthcheck); 29 | tokio::spawn(async move { agent.run().await }); 30 | (channel, shutdown_channel) 31 | } 32 | } 33 | 34 | pub async fn build_health_check_server( 35 | customer_process_port: u16, 36 | healthcheck: Option, 37 | use_tls: bool, 38 | ) -> shared::server::error::ServerResult<(HealthcheckServer, Sender)> { 39 | let (user_process_healthcheck_channel, shutdown_notifier) = 40 | spawn_customer_healthcheck_agent(customer_process_port, healthcheck, use_tls); 41 | let health_check_server = HealthcheckServer::new(user_process_healthcheck_channel).await?; 42 | Ok((health_check_server, shutdown_notifier)) 43 | } 44 | 45 | pub struct HealthcheckServer { 46 | user_process_healthcheck_channel: UnboundedSender, 47 | listener: BridgeServer, 48 | } 49 | 50 | impl HealthcheckServer { 51 | async fn new( 52 | user_process_healthcheck_channel: UnboundedSender, 53 | ) -> shared::server::error::ServerResult { 54 | let listener = 55 | Bridge::get_listener(ENCLAVE_HEALTH_CHECK_PORT, Direction::EnclaveToHost).await?; 56 | Ok(Self { 57 | listener, 58 | user_process_healthcheck_channel, 59 | }) 60 | } 61 | 62 | pub async fn run(mut self) { 63 | log::info!("Data plane health check server running on port {ENCLAVE_HEALTH_CHECK_PORT}"); 64 | loop { 65 | let stream = match self.listener.accept().await { 66 | Ok(stream) => stream, 67 | Err(e) => { 68 | log::error!("Error accepting health check request — {e:?}"); 69 | continue; 70 | } 71 | }; 72 | 73 | let user_process_channel = self.user_process_healthcheck_channel.clone(); 74 | let service = service_fn(move |_| { 75 | let user_process_channel = user_process_channel.clone(); 76 | async move { 77 | let user_process_health = 78 | check_user_process_health(&user_process_channel).await; 79 | 80 | let result = DataPlaneState::Initialized(DataPlaneDiagnostic { 81 | user_process: user_process_health, 82 | }); 83 | 84 | Response::builder() 85 | .status(200) 86 | .header(header::CONTENT_TYPE, "application/json;version=1") 87 | .body(Body::from(serde_json::to_string(&result).unwrap())) 88 | } 89 | }); 90 | 91 | if let Err(error) = hyper::server::conn::Http::new() 92 | .http1_only(true) 93 | .serve_connection(stream, service) 94 | .await 95 | { 96 | log::error!("Data plane health check error: {error}"); 97 | } 98 | } 99 | } 100 | } 101 | 102 | async fn check_user_process_health(channel: &UserProcessHealthcheckSender) -> UserProcessHealth { 103 | let (request, receiver) = HealthcheckStatusRequest::new(); 104 | if let Err(e) = channel.send(request) { 105 | return UserProcessHealth::Error(format!( 106 | "Failed to send healthcheck to user process on channel {e:?}" 107 | )); 108 | } 109 | 110 | match receiver.await { 111 | Ok(health) => health, 112 | Err(e) => UserProcessHealth::Error(format!( 113 | "Failed to receive healthcheck from on channel {e:?}" 114 | )), 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /data-plane/src/acme/account.rs: -------------------------------------------------------------------------------- 1 | use crate::acme::directory::Directory; 2 | use crate::acme::error::*; 3 | 4 | use openssl::pkey::PKey; 5 | use openssl::pkey::Private; 6 | use serde::Deserialize; 7 | use serde_json::json; 8 | use shared::server::config_server::requests::SignatureType; 9 | use std::str::from_utf8; 10 | use std::sync::Arc; 11 | 12 | use super::client::AcmeClientInterface; 13 | use super::provider::Provider; 14 | 15 | #[derive(Deserialize, Eq, PartialEq, Debug, Clone)] 16 | #[serde(rename_all = "camelCase")] 17 | pub enum AccountStatus { 18 | Valid, 19 | Deactivated, 20 | Revoked, 21 | } 22 | 23 | #[derive(Deserialize, Debug, Clone)] 24 | #[serde(rename_all = "camelCase")] 25 | pub struct Account { 26 | #[serde(skip)] 27 | pub directory: Option>>, 28 | #[serde(skip)] 29 | pub private_key: Option>, 30 | #[serde(skip)] 31 | pub id: String, 32 | pub status: AccountStatus, 33 | pub contact: Option>, 34 | pub terms_of_service_agreed: Option, 35 | #[serde(skip)] 36 | pub provider: Option, 37 | } 38 | 39 | #[derive(Debug)] 40 | #[allow(unused)] 41 | pub struct AccountBuilder { 42 | directory: Arc>, 43 | eab_required: bool, 44 | contact: Option>, 45 | terms_of_service_agreed: Option, 46 | only_return_existing: Option, 47 | provider: Provider, 48 | } 49 | 50 | impl AccountBuilder { 51 | pub fn new(directory: Arc>, eab_required: bool, provider: Provider) -> Self { 52 | AccountBuilder { 53 | directory, 54 | eab_required, 55 | contact: None, 56 | terms_of_service_agreed: None, 57 | only_return_existing: None, 58 | provider, 59 | } 60 | } 61 | 62 | pub fn contact(&mut self, contact: Vec) -> &mut Self { 63 | self.contact = Some(contact); 64 | self 65 | } 66 | 67 | pub fn terms_of_service_agreed(&mut self, terms_of_service_agreed: bool) -> &mut Self { 68 | self.terms_of_service_agreed = Some(terms_of_service_agreed); 69 | self 70 | } 71 | 72 | pub fn only_return_existing(&mut self, only_return_existing: bool) -> &mut Self { 73 | self.only_return_existing = Some(only_return_existing); 74 | self 75 | } 76 | 77 | pub async fn build(&mut self) -> Result>, AcmeError> { 78 | let url = self.directory.new_account_url.clone(); 79 | let config_client = self.directory.config_client.clone(); 80 | 81 | let external_account_binding = if self.eab_required { 82 | let jwk_response = config_client.jwk().await?; 83 | let payload = serde_json::to_string(&jwk_response)?; 84 | 85 | let jws = config_client 86 | .jws( 87 | SignatureType::HMAC, 88 | url.clone(), 89 | None, 90 | payload, 91 | None, //Injected in control plane 92 | ) 93 | .await?; 94 | 95 | Some(jws) 96 | } else { 97 | None 98 | }; 99 | 100 | let res = self 101 | .directory 102 | .authenticated_request( 103 | &url, 104 | "POST", 105 | Some(json!({ 106 | "contact": self.contact, 107 | "termsOfServiceAgreed": self.terms_of_service_agreed, 108 | "onlyReturnExisting": self.only_return_existing, 109 | "externalAccountBinding": external_account_binding, 110 | })), 111 | &None, 112 | self.provider.clone(), 113 | ) 114 | .await?; 115 | 116 | let headers = res.headers().clone(); 117 | let resp_bytes = hyper::body::to_bytes(res.into_body()).await?; 118 | let body_str = from_utf8(&resp_bytes)?; 119 | let mut account: Account<_> = serde_json::from_str(body_str)?; 120 | 121 | let account_id = headers 122 | .get(hyper::header::LOCATION) 123 | .ok_or(AcmeError::General(String::from( 124 | "No location header in newAccount request", 125 | )))? 126 | .to_str()? 127 | .to_string(); 128 | 129 | account.directory = Some(self.directory.clone()); 130 | account.id = account_id; 131 | account.provider = Some(self.provider.clone()); 132 | Ok(Arc::new(account)) 133 | } 134 | } 135 | 136 | impl Account { 137 | pub fn private_key(&self) -> Result, AcmeError> { 138 | self.private_key.clone().ok_or(AcmeError::General( 139 | "No private key found for account".to_string(), 140 | )) 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /e2e-tests/mock-crypto/src/main.rs: -------------------------------------------------------------------------------- 1 | use axum::{routing::post, Router}; 2 | use axum::{extract, Json}; 3 | use axum_server::tls_rustls::RustlsConfig; 4 | use rustls::server::ServerConfig; 5 | use serde::{Deserialize, Serialize}; 6 | use serde_json::Value; 7 | use std::convert::Infallible; 8 | use axum::response::IntoResponse; 9 | use axum::http::StatusCode; 10 | use axum::http::HeaderMap; 11 | 12 | mod encrypt_mock; 13 | 14 | lazy_static::lazy_static! { 15 | static ref APP_UUID: String = std::env::var("EV_APP_UUID").expect("No app id given"); 16 | static ref API_KEY: String = std::env::var("EV_API_KEY").expect("No api key given"); 17 | } 18 | 19 | #[tokio::main] 20 | async fn main() { 21 | tokio::join!( 22 | run_https_server(7676), 23 | run_http_server(7677), 24 | ); 25 | } 26 | 27 | async fn run_https_server(port: u16) { 28 | let tls_key = std::env::var("MOCK_CRYPTO_KEY").expect("No key given"); 29 | let tls_cert = std::env::var("MOCK_CRYPTO_CERT").expect("No cert given"); 30 | 31 | let key_bytes = tls_key.as_bytes().to_vec(); 32 | let cert_bytes = tls_cert.as_bytes().to_vec(); 33 | 34 | let cert_chain: Vec = rustls_pemfile::certs(&mut cert_bytes.as_ref()) 35 | .map(|certs| 36 | certs.into_iter().map(rustls::Certificate).collect() 37 | ) 38 | .expect("Failed to parse cert"); 39 | 40 | let keys = rustls_pemfile::pkcs8_private_keys(&mut key_bytes.as_ref()).expect("Failed to parse pk"); 41 | 42 | let tls_cfg = ServerConfig::builder() 43 | .with_safe_defaults() 44 | .with_no_client_auth() 45 | .with_single_cert(cert_chain, rustls::PrivateKey(keys.get(0).unwrap().clone())) 46 | .expect("Failed to build server"); 47 | let tls_cfg = std::sync::Arc::new(tls_cfg); 48 | 49 | let addr = std::net::SocketAddr::from(([127, 0, 0, 1], port)); 50 | let router = get_router(); 51 | println!("Starting https mock e3 on {port}"); 52 | axum_server::bind_rustls(addr, RustlsConfig::from_config(tls_cfg)) 53 | .serve(router.into_make_service()) 54 | .await 55 | .expect("Could not bind https server"); 56 | } 57 | 58 | async fn run_http_server(port: u16) { 59 | let addr = std::net::SocketAddr::from(([127, 0, 0, 1], port)); 60 | let router = get_router(); 61 | println!("Starting http mock e3 on {port}"); 62 | axum::Server::bind(&addr) 63 | .serve(router.into_make_service()) 64 | .await 65 | .expect("Could not bind http server"); 66 | } 67 | 68 | fn get_router() -> Router { 69 | Router::new() 70 | .route("/encrypt", post(encryption_handler)) 71 | .route("/decrypt", post(decryption_handler)) 72 | .route("/authenticate", post(authentication_handler)) 73 | } 74 | 75 | fn encrypt(value: &mut Value) { 76 | if value.is_object() { 77 | value.as_object_mut().unwrap().values_mut().for_each(encrypt); 78 | } else if value.is_array() { 79 | value.as_array_mut().unwrap().iter_mut().for_each(encrypt); 80 | } else { 81 | *value = encrypt_mock::encrypt(value.clone()); 82 | } 83 | } 84 | 85 | fn decrypt(value: &mut Value) { 86 | if value.is_object() { 87 | value.as_object_mut().unwrap().values_mut().for_each(decrypt); 88 | } else if value.is_array() { 89 | value.as_array_mut().unwrap().iter_mut().for_each(decrypt); 90 | } else if value.is_string() { // all encrypted values are strings 91 | let str_val = encrypt_mock::convert_value_to_string(&value); 92 | match encrypt_mock::decrypt(str_val) { 93 | Ok(decrypted) => { 94 | *value = decrypted; 95 | }, 96 | Err(e) => { 97 | eprintln!("Failed to decrypt: {e:?}"); 98 | } 99 | } 100 | } 101 | } 102 | 103 | async fn encryption_handler( 104 | extract::Json(mut request_payload): extract::Json 105 | ) -> Result, Infallible> { 106 | println!("[Mock Crypto API] - Recieved request to encrypt!"); 107 | encrypt(request_payload.data_mut()); 108 | Ok(Json(request_payload)) 109 | } 110 | 111 | async fn decryption_handler( 112 | extract::Json(mut payload): extract::Json, 113 | ) -> Result { 114 | println!("[Mock Crypto API] - Recieved request to decrypt!"); 115 | decrypt(&mut payload); 116 | Ok(Json(payload)) 117 | } 118 | 119 | async fn authentication_handler(headers: HeaderMap) -> impl IntoResponse { 120 | match headers.get("api-key") { 121 | Some(key) => { 122 | if key.to_str().unwrap() == "e0hrSRE7NYXmyoG7aAlRbu/Vgly7ak/4dkqCnB044+VH+xuJkMwiIGt2C4xBQ82um7AwsOX/rvytn4Hlb6izsw==" { 123 | println!("[Mock Crypto API] - Recieved request to authenticate!"); 124 | StatusCode::OK 125 | } else { 126 | println!("[Mock Crypto API] - Invalid API key recieved"); 127 | StatusCode::UNAUTHORIZED 128 | } 129 | }, 130 | None => return StatusCode::UNAUTHORIZED 131 | } 132 | } 133 | 134 | #[derive(Debug, Deserialize, Serialize)] 135 | struct RequestPayload { 136 | data: Value 137 | } 138 | 139 | impl RequestPayload { 140 | fn data_mut(&mut self) -> &mut Value { 141 | &mut self.data 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /installer/scripts/compile-runtime-dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | # If running within github actions, operate within mounted FS, else operate from root 6 | BASE_PATH=$GITHUB_WORKSPACE 7 | OUTPUT_PATH="$BASE_PATH/output" 8 | 9 | PACKAGES_PATH=/packages 10 | cd $PACKAGES_PATH 11 | 12 | ### Build runit 13 | gunzip runit-2.1.2.tar.gz 14 | tar -xpf runit-2.1.2.tar 15 | cd admin/runit-2.1.2 # runit contains a top level folder called admin 16 | 17 | # compile runit 18 | echo "****************************" 19 | echo "* compiling runit binaries *" 20 | echo "****************************" 21 | 22 | # Configure static compilation of runit using dietlibc 23 | echo 'gcc -O2 -Wall -static' >src/conf-cc 24 | echo 'gcc -static -Os -pipe' >src/conf-ld 25 | ./package/compile 26 | ./package/check 27 | 28 | # Create expected directories for runit 29 | mkdir -p "$OUTPUT_PATH/runit-2.1.2/src" 30 | 31 | # Move compiled runit commands into output commands folder 32 | echo "************************************" 33 | echo "* copying runit binaries to output *" 34 | echo "************************************" 35 | cp -r command "$OUTPUT_PATH/runit-2.1.2" 36 | 37 | # Move compiled runit scripts into output scripts folder 38 | cp -r ./package "$OUTPUT_PATH/runit-2.1.2" 39 | 40 | # extract net-tools source 41 | cd $PACKAGES_PATH 42 | echo "************************" 43 | echo "* extracting net-tools *" 44 | echo "************************" 45 | xz -d net-tools-2.10.tar.xz ; tar -xf net-tools-2.10.tar 46 | 47 | echo "**********************" 48 | echo "* building net-tools *" 49 | echo "**********************" 50 | cd net-tools-2.10 51 | # Use preconfigured config for Cage environment 52 | cp "$PACKAGES_PATH/net-tools.h" ./config.h 53 | 54 | 55 | # Run make commands required for ifconfig, include static flag 56 | CFLAGS="-O2 -g -static" make subdirs 57 | CFLAGS="-O2 -g -static" make ifconfig 58 | 59 | mkdir -p "$OUTPUT_PATH/net-tools-2.10" 60 | 61 | # Copy ifconfig binary to output directory 62 | echo "*******************************" 63 | echo "* copying ifconfig to outputs *" 64 | echo "*******************************" 65 | cp ./ifconfig "$OUTPUT_PATH/net-tools-2.10" 66 | 67 | 68 | cd $PACKAGES_PATH 69 | # libmnl is required for nftables support which is required for NAT 70 | echo "************************" 71 | echo "* extracting libmnl *" 72 | echo "************************" 73 | tar -xvf libmnl-1.0.4.tar.bz2 74 | 75 | export CFLAGS='-static' 76 | export LDFLAGS='-static' 77 | 78 | echo "************************" 79 | echo "* building libmnl *" 80 | echo "************************" 81 | cd libmnl-1.0.4 82 | ./configure --enable-static --disable-shared 83 | make 84 | make install 85 | 86 | cd $PACKAGES_PATH 87 | echo "************************" 88 | echo "* extracting libnftnl *" 89 | echo "************************" 90 | xz -d libnftnl-1.2.6.tar.xz ; tar -xf libnftnl-1.2.6.tar 91 | 92 | echo "************************" 93 | echo "* building libnftnl *" 94 | echo "************************" 95 | cd libnftnl-1.2.6 96 | ./configure --enable-static --disable-shared 97 | make 98 | make install 99 | 100 | cd $PACKAGES_PATH 101 | echo "************************" 102 | echo "* extracting iptables *" 103 | echo "************************" 104 | xz -d iptables-1.8.10.tar.xz ; tar -xf iptables-1.8.10.tar 105 | 106 | echo "************************" 107 | echo "* building iptables *" 108 | echo "************************" 109 | cd iptables-1.8.10 110 | export CFLAGS='-static' 111 | export LDFLAGS='--static' 112 | ./configure --disable-shared --enable-static 113 | make 114 | make install 115 | 116 | # Copy iptables binary to output directory 117 | echo "*******************************" 118 | echo "* copying iptables to outputs *" 119 | echo "*******************************" 120 | mkdir -p "$OUTPUT_PATH/iptables-1.8.10" 121 | cp -r ./iptables "$OUTPUT_PATH/iptables-1.8.10" 122 | 123 | cd $PACKAGES_PATH 124 | echo "************************" 125 | echo "* extracting iproute2 *" 126 | echo "************************" 127 | tar -xvf iproute2-6.7.0.tar.gz 128 | mkdir -p "$OUTPUT_PATH/iproute2-6.7.0" 129 | 130 | echo "************************" 131 | echo "* building iproute2 *" 132 | echo "************************" 133 | cd iproute2-6.7.0 134 | unset CFLAGS 135 | unset LDFLAGS 136 | ./configure 137 | make CCOPTS="-O2 -pipe -static" LDFLAGS="--static" SUBDIRS="lib ip" V=1 # Statically compile ip with verbose logging enabled 138 | cp ip/ip "$OUTPUT_PATH/iproute2-6.7.0" 139 | 140 | 141 | # Create archive of static binaries and installer 142 | echo "******************************" 143 | echo "* creating installer archive *" 144 | echo "******************************" 145 | cp "$PACKAGES_PATH/installer.sh" "$OUTPUT_PATH/installer.sh" 146 | cd $OUTPUT_PATH 147 | tar -czf runtime-dependencies.tar.gz net-tools-2.10 runit-2.1.2 installer.sh iptables-1.8.10 iproute2-6.7.0 148 | 149 | # Remove binaries outside of the archive 150 | echo "*****************************" 151 | echo "* removing unused artifacts *" 152 | echo "*****************************" 153 | rm -rf net-tools-2.10 runit-2.1.2 installer.sh iptables-1.8.10 iproute2-6.7.0 154 | --------------------------------------------------------------------------------