├── iris-mpc-cpu ├── .gitignore ├── src │ ├── py_bindings │ │ └── mod.rs │ ├── analysis │ │ └── mod.rs │ ├── utils │ │ ├── constants.rs │ │ ├── mod.rs │ │ └── serialization │ │ │ ├── types │ │ │ ├── mod.rs │ │ │ ├── iris_base64.rs │ │ │ ├── graph_v3.rs │ │ │ ├── graph_v2.rs │ │ │ ├── graph_v1.rs │ │ │ └── graph_v0.rs │ │ │ └── mod.rs │ ├── protocol │ │ └── mod.rs │ ├── hnsw │ │ ├── metrics │ │ │ └── mod.rs │ │ ├── graph │ │ │ ├── mod.rs │ │ │ └── graph_diff │ │ │ │ └── mod.rs │ │ ├── sorting │ │ │ └── mod.rs │ │ └── mod.rs │ ├── execution │ │ ├── mod.rs │ │ └── hawk_main │ │ │ └── session_groups.rs │ ├── hawkers │ │ ├── aby3 │ │ │ └── mod.rs │ │ └── mod.rs │ ├── lib.rs │ └── genesis │ │ ├── utils │ │ ├── mod.rs │ │ ├── errors.rs │ │ └── logger.rs │ │ └── mod.rs ├── benches │ ├── bench_utils.rs │ └── set_hash.rs └── examples │ └── hnsw-ex.rs ├── .envrc ├── iris-mpc-bins ├── .gitignore ├── bin │ ├── iris-mpc-upgrade │ │ ├── nginx │ │ │ ├── cert │ │ │ │ ├── .gitkeep │ │ │ │ └── server-ext.cnf │ │ │ └── nginx.conf │ │ ├── .dockerignore │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── aws_local.sh │ │ ├── ssh_chain.sh │ │ └── docker-compose.rand.yaml │ ├── iris-mpc │ │ ├── client │ │ │ └── client.rs │ │ └── server │ │ │ └── iris_mpc_hawk.rs │ ├── iris-mpc-cpu │ │ ├── hawk_main.rs │ │ └── local_hnsw.rs │ ├── iris-mpc-utils │ │ └── write_node_config_toml.rs │ └── iris-mpc-common │ │ ├── migrator.rs │ │ └── README.md ├── ideal_config.toml ├── ideal_config2.toml └── accuracy1.toml ├── iris-mpc-utils ├── .gitignore ├── src │ ├── pgres │ │ └── mod.rs │ ├── fsys │ │ ├── mod.rs │ │ └── reader.rs │ ├── lib.rs │ ├── irises │ │ └── mod.rs │ ├── aws │ │ ├── mod.rs │ │ ├── errors.rs │ │ └── keys.rs │ ├── client │ │ ├── components │ │ │ └── mod.rs │ │ └── typeset │ │ │ ├── mod.rs │ │ │ ├── data │ │ │ ├── mod.rs │ │ │ └── response.rs │ │ │ ├── errors.rs │ │ │ └── traits.rs │ ├── types.rs │ ├── constants.rs │ └── misc.rs ├── README.md └── Cargo.toml ├── iris-mpc-py ├── examples-py │ └── .gitignore ├── src │ ├── lib.rs │ └── py_hnsw │ │ ├── mod.rs │ │ ├── pyclasses │ │ ├── mod.rs │ │ ├── iris_code_array.rs │ │ ├── graph_store.rs │ │ └── plaintext_store.rs │ │ └── pymodule.rs ├── build.rs ├── Cargo.toml ├── pyproject.toml └── .gitignore ├── direnv.toml ├── iris-mpc-upgrade-hawk ├── src │ └── lib.rs ├── tests │ ├── utils │ │ ├── constants.rs │ │ ├── mod.rs │ │ ├── resources.rs │ │ └── logger.rs │ └── workflows │ │ └── mod.rs └── Cargo.toml ├── rust-toolchain.toml ├── migrations ├── 20240709182244_init.down.sql ├── 20240729175942_results.down.sql ├── 20240725064334_last_request.down.sql ├── 20250519103712_drop_sync_table.up.sql ├── 20250214090415_modifications_table.down.sql ├── 20241121084719_remove_sequence.sql ├── 20250317120117_add_modification_result_msg.down.sql ├── 20250317120117_add_modification_result_msg.up.sql ├── 20250605142156_add_graph_mutation_to_modifications.down.sql ├── 20250605142156_add_graph_mutation_to_modifications.up.sql ├── 20250226181215_graph_id.down.sql ├── 20250513194350_add_persistent_state.down.sql ├── 20240729175942_results.up.sql ├── 20240725064334_last_request.up.sql ├── 20241206150412_add-modified-at.down.sql ├── 20250321144530_add_version_id.down.sql ├── 20250519103712_drop_sync_table.down.sql ├── 20250700000001_multiple_entry_point.down.sql ├── 20240709182244_init.up.sql ├── 20250602103712_drop_results_table.down.sql ├── 20240902075513_start_id_at_1.down.sql ├── 20250700000001_multiple_entry_point.up.sql ├── 20250513194350_add_persistent_state.up.sql ├── 20250214090415_modifications_table.up.sql ├── 20250602103712_drop_results_table.up.sql ├── 20240902075513_start_id_at_1.up.sql ├── 20241206150412_add-modified-at.up.sql ├── 20250313141530_modifications_table_v2.down.sql ├── 20250226181215_graph_id.up.sql ├── 20250521000000_binary_graph_fmt.down.sql ├── 20250521000000_binary_graph_fmt.up.sql ├── 20250530201832_unify_id_repr.down.sql ├── 20250530201832_unify_id_repr.up.sql ├── 20250321144530_add_version_id.up.sql └── 20250313141530_modifications_table_v2.up.sql ├── iris-mpc ├── src │ ├── client │ │ └── mod.rs │ ├── lib.rs │ └── services │ │ ├── aws │ │ ├── mod.rs │ │ ├── s3.rs │ │ ├── sqs.rs │ │ └── clients.rs │ │ ├── mod.rs │ │ └── init │ │ └── mod.rs ├── build.rs └── Cargo.toml ├── iris-mpc-gpu ├── src │ ├── rng │ │ └── mod.rs │ ├── threshold_ring │ │ ├── mod.rs │ │ └── cuda │ │ │ └── mod.rs │ ├── lib.rs │ ├── dot │ │ └── mod.rs │ └── helpers │ │ └── id_wrapper.rs ├── Cargo.toml └── benches │ └── chacha.rs ├── .dockerignore ├── .gitignore ├── mpc-architecture-v2.png ├── scripts ├── tools │ ├── init-db-pgres.sql │ └── init-servers.sh ├── purge_stage │ ├── db-cleaner-helper-pod-ampc-hnsw.yaml │ ├── db-cleaner-helper-pod-iris-mpc.yaml │ ├── db-cleaner-helper-pod.yaml │ ├── delete_mongodb_gpu_collections.sh │ ├── delete_mongodb_cpu_collections.sh │ └── purge_stage_mongo_collections.sh ├── run-client.sh ├── run-client-docker.sh ├── run-anon-stats-server.sh ├── run-tests-hnsw-e2e.sh ├── run-server-docker.sh ├── setup-pre-commit.sh ├── README.md └── run-server.sh ├── .github ├── CODEOWNERS ├── workflows │ ├── check-licenses.yaml │ ├── check-secrets.yml │ ├── relyance-sci.yml │ ├── release.yaml │ ├── build-and-push-debug.yaml │ ├── temp-branch-build-and-push.yaml │ ├── temp-branch-build-and-push-anon-stats-server.yml │ ├── temp-branch-build-and-push-upgrade-hawk.yaml │ ├── temp-branch-build-and-push-hawk.yaml │ ├── build-and-push-shares-encoding.yaml │ ├── build-and-push-upgrade-hawk.yaml │ ├── build-and-push-no-cuda.yaml │ └── temp-branch-build-and-push-hawk-arm64.yaml ├── dependabot.yml └── release-drafter.yml ├── rustfmt.toml ├── CODE_OF_CONDUCT.md ├── .env.test ├── iris-mpc-common ├── src │ ├── helpers │ │ ├── sha256.rs │ │ ├── mod.rs │ │ ├── sqs_s3_helper.rs │ │ ├── kms_dh.rs │ │ └── aws.rs │ ├── error.rs │ ├── iris_db │ │ ├── mod.rs │ │ └── shamir_iris.rs │ └── lib.rs ├── tests │ └── sha256.rs └── Cargo.toml ├── docker-compose.test.hnsw.e2e.yaml ├── justfile ├── iris-mpc-upgrade ├── build.rs ├── protos │ └── reshare.proto ├── src │ ├── proto │ │ └── mod.rs │ └── utils.rs └── Cargo.toml ├── .test.hawk0.env ├── .test.hawk1.env ├── .test.hawk2.env ├── iris-mpc-store └── Cargo.toml ├── Dockerfile.hnsw.test.e2e ├── LICENSE-MIT ├── Dockerfile.shares-encoding ├── Dockerfile.debug ├── docker-compose.dev.yaml ├── Dockerfile.shares-re-randomization ├── deploy ├── stage │ ├── common-values-ampc-hnsw-anon-stats-server.yaml │ ├── common-values-iris-mpc.yaml │ ├── smpcv2-0-stage │ │ └── values-reshare-server.yaml │ ├── smpcv2-1-stage │ │ └── values-reshare-server.yaml │ └── smpcv2-2-stage │ │ └── values-reshare-server.yaml ├── prod │ ├── common-values-anon-stats-server.yaml │ ├── smpcv2-0-prod │ │ └── values-reshare-server.yaml │ ├── smpcv2-1-prod │ │ └── values-reshare-server.yaml │ ├── smpcv2-2-prod │ │ └── values-reshare-server.yaml │ └── common-values-iris-mpc.yaml └── dev │ └── common-values-ampc-hnsw.yaml ├── adr ├── 001-storage-and-processing-of-iris-shares.md └── 002-load-performance-optimisation.md ├── Dockerfile.reshare-protocol ├── .test.env ├── certs ├── ca.crt └── aws_orb_prod_private_ca.crt ├── .pre-commit-config.yaml ├── Dockerfile.nocuda └── Dockerfile /iris-mpc-cpu/.gitignore: -------------------------------------------------------------------------------- 1 | /data 2 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | dotenv_if_exists .test.env 2 | -------------------------------------------------------------------------------- /iris-mpc-bins/.gitignore: -------------------------------------------------------------------------------- 1 | /data 2 | -------------------------------------------------------------------------------- /iris-mpc-utils/.gitignore: -------------------------------------------------------------------------------- 1 | /data 2 | -------------------------------------------------------------------------------- /iris-mpc-py/examples-py/.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | -------------------------------------------------------------------------------- /iris-mpc-py/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod py_hnsw; 2 | -------------------------------------------------------------------------------- /direnv.toml: -------------------------------------------------------------------------------- 1 | [global] 2 | load_dotenv = true 3 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/pgres/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod ops; 2 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/py_bindings/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod hnsw; 2 | -------------------------------------------------------------------------------- /iris-mpc-upgrade-hawk/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod genesis; 2 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-upgrade/nginx/cert/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/analysis/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod accuracy; 2 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.89.0" 3 | -------------------------------------------------------------------------------- /migrations/20240709182244_init.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE irises; 2 | -------------------------------------------------------------------------------- /migrations/20240729175942_results.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE results; -------------------------------------------------------------------------------- /iris-mpc/src/client/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod e2e; 2 | pub mod iris_data; 3 | -------------------------------------------------------------------------------- /migrations/20240725064334_last_request.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE sync; 2 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-upgrade/.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .gitignore 3 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/utils/constants.rs: -------------------------------------------------------------------------------- 1 | pub const N_PARTIES: usize = 3; 2 | -------------------------------------------------------------------------------- /iris-mpc-gpu/src/rng/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod chacha; 2 | pub mod chacha_corr; 3 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target 2 | # Added by SEC-1406 patch 3 | .git 4 | .gitignore 5 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/protocol/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod ops; 2 | pub mod shared_iris; 3 | -------------------------------------------------------------------------------- /iris-mpc-gpu/src/threshold_ring/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cuda; 2 | pub mod protocol; 3 | -------------------------------------------------------------------------------- /iris-mpc-py/src/py_hnsw/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod pyclasses; 2 | pub mod pymodule; 3 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/hnsw/metrics/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod network; 2 | pub mod ops_counter; 3 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod constants; 2 | pub mod serialization; 3 | -------------------------------------------------------------------------------- /iris-mpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | pub mod server; 3 | pub mod services; 4 | -------------------------------------------------------------------------------- /migrations/20250519103712_drop_sync_table.up.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS sync; 2 | -------------------------------------------------------------------------------- /iris-mpc/src/services/aws/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod clients; 2 | pub mod s3; 3 | pub mod sqs; 4 | -------------------------------------------------------------------------------- /iris-mpc/src/services/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod aws; 2 | pub mod init; 3 | pub mod processors; 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | *.db 3 | .vscode/ 4 | .idea/ 5 | .env 6 | *.iml 7 | 8 | .DS_Store 9 | -------------------------------------------------------------------------------- /migrations/20250214090415_modifications_table.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS modifications; 2 | -------------------------------------------------------------------------------- /mpc-architecture-v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/worldcoin/iris-mpc/HEAD/mpc-architecture-v2.png -------------------------------------------------------------------------------- /iris-mpc-gpu/src/threshold_ring/cuda/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) const PTX_SRC: &str = include_str!("kernel.cu"); 2 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/fsys/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod local; 2 | mod reader; 3 | 4 | pub use reader::read_node_config; 5 | -------------------------------------------------------------------------------- /migrations/20241121084719_remove_sequence.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE irises ALTER COLUMN id DROP IDENTITY IF EXISTS; -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-upgrade/.gitignore: -------------------------------------------------------------------------------- 1 | out0/ 2 | out1/ 3 | out2/ 4 | *.log 5 | cert/*.pem 6 | cert/*.key 7 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/execution/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod hawk_main; 2 | 3 | pub use ampc_actor_utils::execution::{local, player, session}; 4 | -------------------------------------------------------------------------------- /migrations/20250317120117_add_modification_result_msg.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE modifications DROP COLUMN result_message_body; 2 | -------------------------------------------------------------------------------- /migrations/20250317120117_add_modification_result_msg.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE modifications ADD COLUMN result_message_body TEXT; 2 | -------------------------------------------------------------------------------- /migrations/20250605142156_add_graph_mutation_to_modifications.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE modifications DROP COLUMN graph_mutation; 2 | -------------------------------------------------------------------------------- /migrations/20250605142156_add_graph_mutation_to_modifications.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE modifications ADD COLUMN graph_mutation bytea; 2 | -------------------------------------------------------------------------------- /scripts/tools/init-db-pgres.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE "SMPC_dev_0"; 2 | CREATE DATABASE "SMPC_dev_1"; 3 | CREATE DATABASE "SMPC_dev_2"; 4 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | /deploy/prod/ @philsippl @worldcoin/proof-of-personhood 2 | /.github/ @philsippl @worldcoin/proof-of-personhood 3 | -------------------------------------------------------------------------------- /iris-mpc-upgrade-hawk/tests/utils/constants.rs: -------------------------------------------------------------------------------- 1 | /// Number of participating MPC parties. 2 | pub const COUNT_OF_PARTIES: usize = 3; 3 | -------------------------------------------------------------------------------- /migrations/20250226181215_graph_id.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS hawk_graph_links; 2 | 3 | DROP TABLE IF EXISTS hawk_graph_entry; 4 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # See: https://github.com/rust-lang/rustfmt/blob/master/Configurations.md 2 | edition = "2021" 3 | use_field_init_shorthand = true 4 | -------------------------------------------------------------------------------- /migrations/20250513194350_add_persistent_state.down.sql: -------------------------------------------------------------------------------- 1 | -- Remove persistent server state table -- 2 | 3 | DROP TABLE IF EXISTS persistent_state; 4 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-upgrade/nginx/cert/server-ext.cnf: -------------------------------------------------------------------------------- 1 | subjectAltName=DNS:*.2.stage.smpcv2.worldcoin.dev,DNS:*.2.stage.smpcv2.worldcoin.dev,IP:0.0.0.0 2 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/hnsw/graph/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod graph_diff; 2 | pub mod graph_store; 3 | pub mod layered_graph; 4 | pub mod neighborhood; 5 | pub mod test_utils; 6 | -------------------------------------------------------------------------------- /iris-mpc-gpu/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::needless_range_loop)] 2 | pub mod dot; 3 | pub mod helpers; 4 | pub mod rng; 5 | pub mod server; 6 | pub mod threshold_ring; 7 | -------------------------------------------------------------------------------- /iris-mpc-py/src/py_hnsw/pyclasses/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod graph_store; 2 | pub mod hnsw_searcher; 3 | pub mod iris_code; 4 | pub mod iris_code_array; 5 | pub mod plaintext_store; 6 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod aws; 2 | pub mod client; 3 | pub mod constants; 4 | pub mod fsys; 5 | pub mod irises; 6 | pub mod misc; 7 | pub mod pgres; 8 | pub mod types; 9 | -------------------------------------------------------------------------------- /migrations/20240729175942_results.up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE results ( 2 | id BIGINT GENERATED BY DEFAULT AS IDENTITY (START WITH 0 MINVALUE 0) PRIMARY KEY, 3 | result_event TEXT NOT NULL 4 | ); -------------------------------------------------------------------------------- /iris-mpc-cpu/src/hnsw/sorting/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod batcher; 2 | pub mod binary_search; 3 | pub mod min_k_batcher; 4 | pub mod quickselect; 5 | pub mod quicksort; 6 | pub mod swap_network; 7 | pub mod tree_min; 8 | -------------------------------------------------------------------------------- /migrations/20240725064334_last_request.up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS sync ( 2 | id BIGINT GENERATED BY DEFAULT AS IDENTITY (START WITH 0 MINVALUE 0) PRIMARY KEY, 3 | request_id TEXT NOT NULL 4 | ); 5 | -------------------------------------------------------------------------------- /migrations/20241206150412_add-modified-at.down.sql: -------------------------------------------------------------------------------- 1 | DROP TRIGGER IF EXISTS set_last_modified_at ON irises; 2 | DROP FUNCTION IF EXISTS update_last_modified_at(); 3 | ALTER TABLE irises DROP COLUMN last_modified_at; 4 | -------------------------------------------------------------------------------- /migrations/20250321144530_add_version_id.down.sql: -------------------------------------------------------------------------------- 1 | DROP TRIGGER IF EXISTS increment_version_id_trigger ON irises; 2 | DROP FUNCTION IF EXISTS increment_version_id(); 3 | ALTER TABLE irises DROP COLUMN version_id; 4 | -------------------------------------------------------------------------------- /migrations/20250519103712_drop_sync_table.down.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS sync ( 2 | id BIGINT GENERATED BY DEFAULT AS IDENTITY (START WITH 0 MINVALUE 0) PRIMARY KEY, 3 | request_id TEXT NOT NULL 4 | ); 5 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Our code of conduct is located in the [orb-software repo][CODE_OF_CONDUCT]. 4 | 5 | [CODE_OF_CONDUCT]: https://github.com/worldcoin/orb-software/blob/main/CODE_OF_CONDUCT.md 6 | -------------------------------------------------------------------------------- /.env.test: -------------------------------------------------------------------------------- 1 | # Database Configuration 2 | SMPC__DATABASE__URL=postgres://postgres:postgres@localhost/postgres 3 | 4 | # General Environment Configuration 5 | SMPC__ENVIRONMENT=test 6 | 7 | # App configuration 8 | SMPC__PARTY_ID=0 9 | -------------------------------------------------------------------------------- /migrations/20250700000001_multiple_entry_point.down.sql: -------------------------------------------------------------------------------- 1 | --- allow only one entry point per graph 2 | ALTER TABLE hawk_graph_entry DROP CONSTRAINT hawk_graph_entry_pkey; 3 | ALTER TABLE hawk_graph_entry ADD CONSTRAINT hawk_graph_entry_pkey PRIMARY KEY (graph_id); -------------------------------------------------------------------------------- /migrations/20240709182244_init.up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS irises ( 2 | id BIGINT GENERATED BY DEFAULT AS IDENTITY (START WITH 0 MINVALUE 0) PRIMARY KEY, 3 | left_code BYTEA, 4 | left_mask BYTEA, 5 | right_code BYTEA, 6 | right_mask BYTEA 7 | ); 8 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/hawkers/aby3/mod.rs: -------------------------------------------------------------------------------- 1 | /// Store with vectors in secret shared form. 2 | /// The underlying operations are secure multi-party computation (MPC) operations. 3 | pub mod aby3_store; 4 | 5 | /// Various utilities for data generation in tests. 6 | pub mod test_utils; 7 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod analysis; 2 | pub mod execution; 3 | pub mod genesis; 4 | pub mod hawkers; 5 | pub mod hnsw; 6 | pub mod protocol; 7 | pub mod py_bindings; 8 | pub mod utils; 9 | 10 | pub use ampc_actor_utils::network; 11 | pub use ampc_secret_sharing::shares; 12 | -------------------------------------------------------------------------------- /migrations/20250602103712_drop_results_table.down.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS results ( 2 | id BIGINT GENERATED BY DEFAULT AS IDENTITY (START WITH 0 MINVALUE 0) PRIMARY KEY, 3 | result_event TEXT NOT NULL 4 | ); 5 | ALTER TABLE modifications ALTER COLUMN serial_id SET NOT NULL; 6 | -------------------------------------------------------------------------------- /migrations/20240902075513_start_id_at_1.down.sql: -------------------------------------------------------------------------------- 1 | -- Add down migration script here 2 | ALTER TABLE irises ALTER COLUMN id SET MINVALUE 0; 3 | SELECT setval(pg_get_serial_sequence('irises', 'id'), coalesce(max(id),0), false) FROM irises; 4 | ALTER TABLE irises ALTER COLUMN id SET START WITH 0; 5 | -------------------------------------------------------------------------------- /migrations/20250700000001_multiple_entry_point.up.sql: -------------------------------------------------------------------------------- 1 | --- need the graph entry table to have multiple entry points. 2 | ALTER TABLE hawk_graph_entry DROP CONSTRAINT hawk_graph_entry_pkey; 3 | ALTER TABLE hawk_graph_entry ADD CONSTRAINT hawk_graph_entry_pkey PRIMARY KEY (graph_id, serial_id, layer); -------------------------------------------------------------------------------- /iris-mpc-utils/src/irises/mod.rs: -------------------------------------------------------------------------------- 1 | mod generator; 2 | pub mod modifications; 3 | pub mod shares; 4 | 5 | pub use generator::{ 6 | generate_iris_code_and_mask_shares, generate_iris_code_and_mask_shares_both_eyes, 7 | generate_iris_shares_locally, generate_iris_shares_locally_mirrored, 8 | }; 9 | -------------------------------------------------------------------------------- /iris-mpc-common/src/helpers/sha256.rs: -------------------------------------------------------------------------------- 1 | use sha2::{Digest, Sha256}; 2 | 3 | pub fn sha256_as_hex_string>(data: T) -> String { 4 | hex::encode(sha256_bytes(data)) 5 | } 6 | 7 | pub fn sha256_bytes>(data: T) -> [u8; 32] { 8 | Sha256::digest(data.as_ref()).into() 9 | } 10 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc/client/client.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use eyre::Result; 3 | use iris_mpc::client::e2e::{run_client, Opt}; 4 | 5 | #[tokio::main] 6 | async fn main() -> Result<()> { 7 | let opts = Opt::parse(); 8 | println!("Running with options: {:?}", opts); 9 | run_client(opts).await 10 | } 11 | -------------------------------------------------------------------------------- /migrations/20250513194350_add_persistent_state.up.sql: -------------------------------------------------------------------------------- 1 | -- Add persistent server state table -- 2 | 3 | CREATE TABLE IF NOT EXISTS persistent_state 4 | ( 5 | domain text NOT NULL, 6 | "key" text NOT NULL, 7 | "value" jsonb NOT NULL, 8 | CONSTRAINT persistent_state_pkey PRIMARY KEY (domain, "key") 9 | ); 10 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/aws/mod.rs: -------------------------------------------------------------------------------- 1 | mod client; 2 | mod config; 3 | mod errors; 4 | mod factory; 5 | mod keys; 6 | mod ops; 7 | pub mod types; 8 | 9 | pub use client::AwsClient; 10 | pub use config::AwsClientConfig; 11 | pub use errors::AwsClientError; 12 | pub use factory::{create_iris_code_party_shares, create_iris_party_shares_for_s3}; 13 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/client/components/mod.rs: -------------------------------------------------------------------------------- 1 | mod request_enqueuer; 2 | mod request_generator; 3 | mod response_dequeuer; 4 | mod shares_uploader; 5 | 6 | pub use request_enqueuer::RequestEnqueuer; 7 | pub use request_generator::RequestGenerator; 8 | pub use response_dequeuer::ResponseDequeuer; 9 | pub use shares_uploader::SharesUploader; 10 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/client/typeset/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | mod errors; 3 | mod traits; 4 | 5 | pub use data::{ 6 | Request, RequestBatch, RequestBatchKind, RequestBatchSize, RequestBody, RequestFactory, 7 | RequestStatus, ResponseBody, 8 | }; 9 | pub use errors::ClientError; 10 | pub use traits::{Initialize, ProcessRequestBatch}; 11 | -------------------------------------------------------------------------------- /migrations/20250214090415_modifications_table.up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS modifications ( 2 | id BIGINT GENERATED BY DEFAULT AS IDENTITY (START WITH 1 MINVALUE 1) PRIMARY KEY, 3 | serial_id BIGINT NOT NULL, 4 | request_type TEXT NOT NULL, 5 | s3_url TEXT, 6 | status TEXT NOT NULL, 7 | persisted BOOLEAN NOT NULL DEFAULT FALSE 8 | ); 9 | -------------------------------------------------------------------------------- /migrations/20250602103712_drop_results_table.up.sql: -------------------------------------------------------------------------------- 1 | -- all results (including uniqueness) will be replayed by modifications table -- 2 | DROP TABLE IF EXISTS results; 3 | 4 | -- allow in progress uniqueness modifications to be inserted. they are only assigned a serial id if result is unique -- 5 | ALTER TABLE modifications ALTER COLUMN serial_id DROP NOT NULL; 6 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/genesis/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod errors; 2 | pub mod logger; 3 | 4 | pub use logger::log_error; 5 | pub use logger::log_info; 6 | pub use logger::log_warn; 7 | 8 | // Count of MPC protocol parties. 9 | pub const COUNT_OF_MPC_PARTIES: usize = 3; 10 | 11 | // Type alias: Identifier of an MPC participant. 12 | pub type PartyId = usize; 13 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/client/typeset/data/mod.rs: -------------------------------------------------------------------------------- 1 | mod request; 2 | mod request_batch; 3 | mod request_factory; 4 | mod request_info; 5 | mod response; 6 | 7 | pub use request::{Request, RequestBody, RequestStatus}; 8 | pub use request_batch::{RequestBatch, RequestBatchKind, RequestBatchSize}; 9 | pub use request_factory::RequestFactory; 10 | pub use response::ResponseBody; 11 | -------------------------------------------------------------------------------- /docker-compose.test.hnsw.e2e.yaml: -------------------------------------------------------------------------------- 1 | include: 2 | - docker-compose.dev.yaml 3 | 4 | services: 5 | hnsw_tests_e2e: 6 | image: hnsw-tests-e2e-local:latest 7 | env_file: 8 | - .test.env 9 | depends_on: 10 | localstack: 11 | condition: service_healthy 12 | dev_db: 13 | condition: service_healthy 14 | entrypoint: tail -f /dev/null 15 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-upgrade/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 ghcr.io/worldcoin/iris-mpc:146c2cae43dbeb586144d9d37d152a6b2bfacdd4 2 | USER non-root 3 | ENTRYPOINT "reshare-server --party-id 2 --sender1-party-id 0 --sender2-party-id 1 --bind-addr 0.0.0.0:7000 --environment testing --db-url postgres://postgres:postgres@new-db-4:6203 --batch-size 100 --healthcheck-port 3000" 4 | -------------------------------------------------------------------------------- /migrations/20240902075513_start_id_at_1.up.sql: -------------------------------------------------------------------------------- 1 | -- Add up migration script here 2 | -- we get the current maxixum id from the table and 1 otherwise and set the sequence to that value 3 | SELECT setval(pg_get_serial_sequence('irises', 'id'), coalesce(max(id),1), false) FROM irises; 4 | ALTER TABLE irises ALTER COLUMN id SET START WITH 1; 5 | ALTER TABLE irises ALTER COLUMN id SET MINVALUE 1; 6 | -------------------------------------------------------------------------------- /iris-mpc-py/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | // When building the Python extension module on macOS, allow unresolved Python C-API 3 | // symbols at link time (they are resolved by the Python interpreter at runtime). 4 | #[cfg(target_os = "macos")] 5 | { 6 | println!("cargo:rustc-cdylib-link-arg=-undefined"); 7 | println!("cargo:rustc-cdylib-link-arg=dynamic_lookup"); 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | default: 2 | just --list 3 | dev-pg-up: 4 | docker run --name gpu-iris-dev-db -d -e POSTGRES_PASSWORD=postgres -p 5432:5432 postgres:16 5 | dev-pg-down: 6 | docker stop gpu-iris-dev-db && docker rm gpu-iris-dev-db 7 | lint: 8 | cargo fmt --all -- --check 9 | cargo clippy --workspace --all-targets --all-features -q -- -D warnings 10 | RUSTDOCFLAGS='-D warnings' cargo doc --workspace -q --no-deps 11 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/fsys/reader.rs: -------------------------------------------------------------------------------- 1 | use iris_mpc_common::config::Config as NodeConfig; 2 | use std::{fs, io::Error, path::Path}; 3 | use toml; 4 | 5 | /// Returns node configuration deserialized from a toml file. 6 | pub fn read_node_config(path_to_config: &Path) -> Result { 7 | assert!(path_to_config.exists()); 8 | 9 | Ok(toml::from_str(&fs::read_to_string(path_to_config)?).unwrap()) 10 | } 11 | -------------------------------------------------------------------------------- /iris-mpc-upgrade/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | println!("cargo:rerun-if-changed=protos/reshare.proto"); 3 | tonic_build::configure() 4 | .out_dir("src/proto/") 5 | .emit_rerun_if_changed(false) // https://github.com/hyperium/tonic/issues/1070#issuecomment-1729075588 6 | .compile_protos( 7 | &["reshare.proto"], // Files in the path 8 | &["protos"], // The include path to search 9 | ) 10 | .unwrap(); 11 | } 12 | -------------------------------------------------------------------------------- /migrations/20241206150412_add-modified-at.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE irises ADD COLUMN last_modified_at BIGINT; 2 | 3 | CREATE OR REPLACE FUNCTION update_last_modified_at() 4 | RETURNS TRIGGER AS $$ 5 | BEGIN 6 | NEW.last_modified_at = EXTRACT(EPOCH FROM NOW())::BIGINT; 7 | RETURN NEW; 8 | END; 9 | $$ LANGUAGE plpgsql; 10 | 11 | CREATE TRIGGER set_last_modified_at 12 | BEFORE INSERT OR UPDATE ON irises 13 | FOR EACH ROW 14 | EXECUTE FUNCTION update_last_modified_at(); 15 | -------------------------------------------------------------------------------- /iris-mpc-py/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iris-mpc-py" 3 | version = "0.1.0" 4 | publish = false 5 | 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | 10 | [lib] 11 | name = "iris_mpc_py" 12 | crate-type = ["cdylib"] 13 | 14 | [dependencies] 15 | iris-mpc-common = { path = "../iris-mpc-common" } 16 | iris-mpc-cpu = { path = "../iris-mpc-cpu" } 17 | pyo3 = { version = "0.24.0", features = ["extension-module"] } 18 | rand.workspace = true 19 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/client/typeset/errors.rs: -------------------------------------------------------------------------------- 1 | use crate::aws::AwsClientError; 2 | use thiserror::Error; 3 | 4 | #[derive(Error, Debug)] 5 | #[allow(clippy::enum_variant_names)] 6 | pub enum ClientError { 7 | #[error("An AWS service error has occured: {0}")] 8 | AwsServiceError(#[from] AwsClientError), 9 | 10 | #[error("Enqueue request error: {0}")] 11 | EnqueueRequestError(String), 12 | 13 | #[error("Initialisation error: {0}")] 14 | InitialisationError(String), 15 | } 16 | -------------------------------------------------------------------------------- /iris-mpc-py/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["maturin>=1.7,<2.0"] 3 | build-backend = "maturin" 4 | 5 | [project] 6 | name = "iris-mpc-py" 7 | requires-python = ">=3.8" 8 | classifiers = [ 9 | "Programming Language :: Rust", 10 | "Programming Language :: Python :: Implementation :: CPython", 11 | "Programming Language :: Python :: Implementation :: PyPy", 12 | ] 13 | dynamic = ["version"] 14 | [tool.maturin] 15 | features = ["pyo3/extension-module"] 16 | module-name = "iris_mpc_py" 17 | -------------------------------------------------------------------------------- /iris-mpc-gpu/src/dot/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod distance_comparator; 2 | pub mod share_db; 3 | 4 | use std::collections::HashMap; 5 | 6 | pub const IRIS_CODE_LENGTH: usize = iris_mpc_common::IRIS_CODE_LENGTH; 7 | pub const MASK_CODE_LENGTH: usize = iris_mpc_common::MASK_CODE_LENGTH; 8 | pub const ROTATIONS: usize = iris_mpc_common::ROTATIONS; 9 | 10 | /// Type alias for partial results with rotations: query_id -> db_id -> list of matching rotations 11 | pub type PartialResultsWithRotations = HashMap>>; 12 | -------------------------------------------------------------------------------- /.github/workflows/check-licenses.yaml: -------------------------------------------------------------------------------- 1 | name: Check Cargo Dependencies 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | 8 | jobs: 9 | cargo-deny: 10 | runs-on: ubuntu-24.04 11 | permissions: 12 | contents: read 13 | steps: 14 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1 15 | - uses: EmbarkStudios/cargo-deny-action@76cd80eb775d7bbbd2d80292136d74d39e1b4918 16 | with: 17 | log-level: "error" 18 | rust-version: "1.89.0" 19 | -------------------------------------------------------------------------------- /iris-mpc-common/src/helpers/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "helpers")] 2 | pub mod aws; 3 | #[cfg(feature = "helpers")] 4 | pub mod aws_sigv4; 5 | #[cfg(feature = "helpers")] 6 | pub mod inmemory_store; 7 | #[cfg(feature = "helpers")] 8 | pub mod key_pair; 9 | #[cfg(feature = "helpers")] 10 | pub mod kms_dh; 11 | #[cfg(feature = "helpers")] 12 | pub mod sha256; 13 | #[cfg(feature = "helpers")] 14 | pub mod smpc_request; 15 | #[cfg(feature = "helpers")] 16 | pub mod smpc_response; 17 | #[cfg(feature = "helpers")] 18 | pub mod sqs_s3_helper; 19 | pub mod sync; 20 | -------------------------------------------------------------------------------- /migrations/20250313141530_modifications_table_v2.down.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS modifications ( 2 | id BIGINT GENERATED BY DEFAULT AS IDENTITY (START WITH 1 MINVALUE 1) PRIMARY KEY, 3 | serial_id BIGINT NOT NULL, 4 | request_type TEXT NOT NULL, 5 | s3_url TEXT, 6 | status TEXT NOT NULL, 7 | persisted BOOLEAN NOT NULL DEFAULT FALSE 8 | ); 9 | 10 | DROP TABLE IF EXISTS modifications; 11 | 12 | DROP TRIGGER IF EXISTS before_insert_modifications ON modifications; 13 | 14 | DROP FUNCTION IF EXISTS assign_modification_id(); 15 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/client/typeset/traits.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | 3 | use super::{data::RequestBatch, errors::ClientError}; 4 | 5 | /// Implemented by components which expose initialisation functions. 6 | #[async_trait] 7 | pub trait Initialize { 8 | async fn init(&mut self) -> Result<(), ClientError>; 9 | } 10 | 11 | /// Implemented by components within batch processing pipeline. 12 | #[async_trait] 13 | pub trait ProcessRequestBatch { 14 | async fn process_batch(&mut self, batch: &mut RequestBatch) -> Result<(), ClientError>; 15 | } 16 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-cpu/hawk_main.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use eyre::Result; 3 | use iris_mpc_cpu::execution::hawk_main::{hawk_main, HawkArgs}; 4 | use std::process::exit; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<()> { 8 | match hawk_main(HawkArgs::parse()).await { 9 | Ok(_) => tracing::info!("Hawk main execution completed successfully!"), 10 | Err(e) => { 11 | tracing::error!("Encountered an error during hawk_main processing: {}", e); 12 | exit(1); 13 | } 14 | }; 15 | Ok(()) 16 | } 17 | -------------------------------------------------------------------------------- /migrations/20250226181215_graph_id.up.sql: -------------------------------------------------------------------------------- 1 | -- Links table -- 2 | 3 | CREATE TABLE IF NOT EXISTS hawk_graph_links 4 | ( 5 | graph_id integer NOT NULL, 6 | source_ref text NOT NULL, 7 | layer integer NOT NULL, 8 | links jsonb NOT NULL, 9 | CONSTRAINT hawk_graph_links_pkey PRIMARY KEY (graph_id, source_ref, layer) 10 | ); 11 | 12 | -- Entry point table -- 13 | 14 | CREATE TABLE IF NOT EXISTS hawk_graph_entry 15 | ( 16 | graph_id integer NOT NULL, 17 | entry_point jsonb, 18 | CONSTRAINT hawk_graph_entry_pkey PRIMARY KEY (graph_id) 19 | ); 20 | -------------------------------------------------------------------------------- /iris-mpc-py/src/py_hnsw/pymodule.rs: -------------------------------------------------------------------------------- 1 | use super::pyclasses::{ 2 | graph_store::PyGraphStore, hnsw_searcher::PyHnswSearcher, iris_code::PyIrisCode, 3 | iris_code_array::PyIrisCodeArray, plaintext_store::PyPlaintextStore, 4 | }; 5 | use pyo3::prelude::*; 6 | 7 | #[pymodule] 8 | fn iris_mpc_py(m: &Bound<'_, PyModule>) -> PyResult<()> { 9 | m.add_class::()?; 10 | m.add_class::()?; 11 | m.add_class::()?; 12 | m.add_class::()?; 13 | m.add_class::()?; 14 | Ok(()) 15 | } 16 | -------------------------------------------------------------------------------- /iris-mpc-bins/ideal_config.toml: -------------------------------------------------------------------------------- 1 | graph_size = 1000 2 | irises_path = "data/store.ndjson" 3 | layer0_path = "store_ideal_knn_fhd.txt" 4 | prf_seed = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 5 | echoice = "NaiveFHD" 6 | sanity_check = true 7 | 8 | [searcher] 9 | layer_mode = "Standard" 10 | layer_distribution = { Geometric = { layer_probability = 0.25 } } 11 | 12 | [searcher.params] 13 | M = [10, 10, 10, 10, 10] 14 | M_max = [10, 10, 10, 10, 10] 15 | ef_constr_search = [320, 320, 320, 320, 320] 16 | ef_constr_insert = [320, 320, 320, 320, 320] 17 | ef_search = [320, 320, 320, 320, 320] 18 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/utils/serialization/types/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module implements stable types to represent serialization formats which 2 | //! will can be stably referenced while the codebase is changing. In general 3 | //! this module aims to have one submodule for each "file format" which is 4 | //! produced or consumed by the codebase at some stage of development, with 5 | //! possible exceptions for lower-level data types related to serialization 6 | //! formats of external libraries. 7 | 8 | pub mod graph_v0; 9 | pub mod graph_v1; 10 | pub mod graph_v2; 11 | pub mod graph_v3; 12 | pub mod iris_base64; 13 | -------------------------------------------------------------------------------- /scripts/purge_stage/db-cleaner-helper-pod-ampc-hnsw.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: db-cleaner 5 | namespace: ampc-hnsw 6 | spec: 7 | containers: 8 | - securityContext: 9 | allowPrivilegeEscalation: false 10 | name: db-cleaner 11 | image: postgres 12 | imagePullPolicy: Always 13 | command: [ "/bin/bash" ] 14 | args: [ "-c", "while true; do ping localhost; sleep 60; done" ] 15 | resources: 16 | limits: 17 | cpu: 1 18 | memory: 1Gi 19 | requests: 20 | cpu: 1 21 | memory: 1Gi 22 | -------------------------------------------------------------------------------- /scripts/purge_stage/db-cleaner-helper-pod-iris-mpc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: db-cleaner 5 | namespace: iris-mpc 6 | spec: 7 | containers: 8 | - securityContext: 9 | allowPrivilegeEscalation: false 10 | name: db-cleaner 11 | image: postgres 12 | imagePullPolicy: Always 13 | command: [ "/bin/bash" ] 14 | args: [ "-c", "while true; do ping localhost; sleep 60; done" ] 15 | resources: 16 | limits: 17 | cpu: 1 18 | memory: 1Gi 19 | requests: 20 | cpu: 1 21 | memory: 1Gi 22 | -------------------------------------------------------------------------------- /iris-mpc/src/services/aws/s3.rs: -------------------------------------------------------------------------------- 1 | use aws_config::retry::RetryConfig; 2 | use aws_sdk_s3::{config::Builder as S3ConfigBuilder, Client as S3Client}; 3 | 4 | /// Creates an S3 client with retry configuration 5 | pub fn create_s3_client(shared_config: &aws_config::SdkConfig, force_path_style: bool) -> S3Client { 6 | let retry_config = RetryConfig::standard().with_max_attempts(5); 7 | 8 | let s3_config = S3ConfigBuilder::from(shared_config) 9 | .force_path_style(force_path_style) 10 | .retry_config(retry_config.clone()) 11 | .build(); 12 | 13 | S3Client::from_conf(s3_config) 14 | } 15 | -------------------------------------------------------------------------------- /iris-mpc-bins/ideal_config2.toml: -------------------------------------------------------------------------------- 1 | graph_size = 1000 2 | irises_path = "data/store.ndjson" 3 | layer0_path = "store_ideal_knn_fhd.txt" 4 | prf_seed = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 5 | echoice = "NaiveFHD" 6 | sanity_check = true 7 | 8 | [searcher] 9 | layer_mode = { LinearScan = { max_graph_layer = 1 } } 10 | layer_distribution = { Geometric = { layer_probability = 0.25 } } 11 | 12 | [searcher.params] 13 | M = [10, 10, 10, 10, 10] 14 | M_max = [10, 10, 10, 10, 10] 15 | ef_constr_search = [320, 320, 320, 320, 320] 16 | ef_constr_insert = [320, 320, 320, 320, 320] 17 | ef_search = [320, 320, 320, 320, 320] 18 | -------------------------------------------------------------------------------- /iris-mpc-common/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | /// An Error enum capturing the errors produced by this crate. 4 | #[derive(Error, Debug)] 5 | pub enum Error { 6 | /// Invalid party id provided 7 | #[error("Invalid Party id {0}")] 8 | Id(usize), 9 | /// Some other error has occurred. 10 | #[error("Err: {0}")] 11 | Other(String), 12 | } 13 | 14 | impl From for Error { 15 | fn from(mes: String) -> Self { 16 | Self::Other(mes) 17 | } 18 | } 19 | 20 | impl From<&str> for Error { 21 | fn from(mes: &str) -> Self { 22 | Self::Other(mes.to_owned()) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /migrations/20250521000000_binary_graph_fmt.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS hawk_graph_links; 2 | DROP TABLE IF EXISTS hawk_graph_entry; 3 | 4 | -- Links table -- 5 | 6 | CREATE TABLE hawk_graph_links 7 | ( 8 | graph_id integer NOT NULL, 9 | source_ref text NOT NULL, 10 | layer integer NOT NULL, 11 | links jsonb NOT NULL, 12 | CONSTRAINT hawk_graph_links_pkey PRIMARY KEY (graph_id, source_ref, layer) 13 | ); 14 | 15 | -- Entry point table -- 16 | 17 | CREATE TABLE hawk_graph_entry 18 | ( 19 | graph_id integer NOT NULL, 20 | entry_point jsonb, 21 | CONSTRAINT hawk_graph_entry_pkey PRIMARY KEY (graph_id) 22 | ); -------------------------------------------------------------------------------- /scripts/purge_stage/db-cleaner-helper-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: db-cleaner 5 | namespace: iris-mpc 6 | spec: 7 | nodeSelector: 8 | beta.kubernetes.io/instance-type: t3.2xlarge # Use migration node for db-cleaner 9 | containers: 10 | - name: db-cleaner 11 | image: postgres 12 | imagePullPolicy: Always 13 | command: [ "/bin/bash" ] 14 | args: [ "-c", "while true; do ping localhost; sleep 60; done" ] 15 | resources: 16 | limits: 17 | cpu: 1 18 | memory: 1Gi 19 | requests: 20 | cpu: 1 21 | memory: 1Gi 22 | -------------------------------------------------------------------------------- /migrations/20250521000000_binary_graph_fmt.up.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS hawk_graph_links; 2 | DROP TABLE IF EXISTS hawk_graph_entry; 3 | 4 | -- Links table -- 5 | 6 | CREATE TABLE hawk_graph_links 7 | ( 8 | graph_id integer NOT NULL, 9 | source_ref text NOT NULL, 10 | layer integer NOT NULL, 11 | links bytea NOT NULL, 12 | CONSTRAINT hawk_graph_links_pkey PRIMARY KEY (graph_id, source_ref, layer) 13 | ); 14 | 15 | -- Entry point table -- 16 | 17 | CREATE TABLE hawk_graph_entry 18 | ( 19 | graph_id integer NOT NULL, 20 | entry_point bytea, 21 | CONSTRAINT hawk_graph_entry_pkey PRIMARY KEY (graph_id) 22 | ); 23 | -------------------------------------------------------------------------------- /migrations/20250530201832_unify_id_repr.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS hawk_graph_links; 2 | DROP TABLE IF EXISTS hawk_graph_entry; 3 | 4 | -- Links table -- 5 | 6 | CREATE TABLE hawk_graph_links 7 | ( 8 | graph_id integer NOT NULL, 9 | source_ref text NOT NULL, 10 | layer integer NOT NULL, 11 | links bytea NOT NULL, 12 | CONSTRAINT hawk_graph_links_pkey PRIMARY KEY (graph_id, source_ref, layer) 13 | ); 14 | 15 | -- Entry point table -- 16 | 17 | CREATE TABLE hawk_graph_entry 18 | ( 19 | graph_id integer NOT NULL, 20 | entry_point bytea, 21 | CONSTRAINT hawk_graph_entry_pkey PRIMARY KEY (graph_id) 22 | ); 23 | -------------------------------------------------------------------------------- /scripts/run-client.sh: -------------------------------------------------------------------------------- 1 | export AWS_ACCESS_KEY_ID=test 2 | export AWS_SECRET_ACCESS_KEY=test 3 | export AWS_ENDPOINT_URL="http://127.0.0.1:4566" 4 | export AWS_ACCESS_KEY_ID=test 5 | export AWS_SECRET_ACCESS_KEY=test 6 | export AWS_REGION=us-east-1 7 | 8 | 9 | cargo run --release -p iris-mpc-bins --bin client -- \ 10 | --request-topic-arn arn:aws:sns:us-east-1:000000000000:iris-mpc-input.fifo \ 11 | --requests-bucket-name wf-smpcv2-dev-sns-requests \ 12 | --public-key-base-url "http://localhost:4566/wf-dev-public-keys" \ 13 | --response-queue-url http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/iris-mpc-results-us-east-1.fifo 14 | -------------------------------------------------------------------------------- /.github/workflows/check-secrets.yml: -------------------------------------------------------------------------------- 1 | name: Check secret leaks 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | check-secret-leaks: 11 | runs-on: ubuntu-latest 12 | permissions: 13 | contents: read 14 | steps: 15 | - name: Checkout code 16 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 17 | 18 | - name: Run Gitleaks v8.28.0 on full history 19 | run: | 20 | docker run --rm -v $(pwd):/repo ghcr.io/gitleaks/gitleaks@sha256:cdbb7c955abce02001a9f6c9f602fb195b7fadc1e812065883f695d1eeaba854 \ 21 | detect --source /repo --verbose --no-git 22 | -------------------------------------------------------------------------------- /.github/workflows/relyance-sci.yml: -------------------------------------------------------------------------------- 1 | name: Relyance SCI Scan 2 | 3 | on: 4 | schedule: 5 | - cron: "11 0 * * *" 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | execute-relyance-sci: 13 | name: Relyance SCI Job 14 | runs-on: 15 | group: arc-public-large-amd64-runner 16 | permissions: 17 | contents: read 18 | 19 | steps: 20 | - name: Run Relyance SCI 21 | uses: worldcoin/gh-actions-public/relyance@main 22 | # More information: https://github.com/worldcoin/gh-actions-public/tree/main/relyance 23 | with: 24 | secrets-dpp-sci-key: ${{ secrets.DPP_SCI_KEY }} 25 | -------------------------------------------------------------------------------- /.test.hawk0.env: -------------------------------------------------------------------------------- 1 | SMPC__CPU_DATABASE__URL=postgres://postgres:postgres@dev_db:5432/SMPC_dev_0 2 | SMPC__ANON_STATS_DATABASE__URL=postgres://postgres:postgres@dev_db:5432/SMPC_dev_0 3 | SMPC__REQUESTS_QUEUE_URL=http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/smpcv2-0-dev.fifo 4 | SMPC__SERVER_COORDINATION__NODE_HOSTNAMES='["0.0.0.0","hawk_participant_1","hawk_participant_2"]' 5 | SMPC__SERVER_COORDINATION__PARTY_ID=0 6 | SMPC__HAWK_SERVER_DELETIONS_ENABLED=true 7 | SMPC__HAWK_SERVER_REAUTHS_ENABLED=true 8 | SMPC__HAWK_SERVER_RESETS_ENABLED=true 9 | SMPC__HAWK_SERVER_HEALTHCHECK_PORT=3000 10 | SMPC__HAWK_PRNG_SEED=0 11 | SMPC__SHUTDOWN_LAST_RESULTS_SYNC_TIMEOUT_SECS=60 12 | -------------------------------------------------------------------------------- /.test.hawk1.env: -------------------------------------------------------------------------------- 1 | SMPC__CPU_DATABASE__URL=postgres://postgres:postgres@dev_db:5432/SMPC_dev_1 2 | SMPC__ANON_STATS_DATABASE__URL=postgres://postgres:postgres@dev_db:5432/SMPC_dev_1 3 | SMPC__REQUESTS_QUEUE_URL=http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/smpcv2-1-dev.fifo 4 | SMPC__SERVER_COORDINATION__NODE_HOSTNAMES='["hawk_participant_0","0.0.0.0","hawk_participant_2"]' 5 | SMPC__SERVER_COORDINATION__PARTY_ID=1 6 | SMPC__HAWK_SERVER_DELETIONS_ENABLED=true 7 | SMPC__HAWK_SERVER_REAUTHS_ENABLED=true 8 | SMPC__HAWK_SERVER_RESETS_ENABLED=true 9 | SMPC__HAWK_SERVER_HEALTHCHECK_PORT=3001 10 | SMPC__HAWK_PRNG_SEED=0 11 | SMPC__SHUTDOWN_LAST_RESULTS_SYNC_TIMEOUT_SECS=60 12 | -------------------------------------------------------------------------------- /.test.hawk2.env: -------------------------------------------------------------------------------- 1 | SMPC__CPU_DATABASE__URL=postgres://postgres:postgres@dev_db:5432/SMPC_dev_2 2 | SMPC__ANON_STATS_DATABASE__URL=postgres://postgres:postgres@dev_db:5432/SMPC_dev_2 3 | SMPC__REQUESTS_QUEUE_URL=http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/smpcv2-2-dev.fifo 4 | SMPC__SERVER_COORDINATION__NODE_HOSTNAMES='["hawk_participant_0","hawk_participant_1","0.0.0.0"]' 5 | SMPC__SERVER_COORDINATION__PARTY_ID=2 6 | SMPC__HAWK_SERVER_DELETIONS_ENABLED=true 7 | SMPC__HAWK_SERVER_REAUTHS_ENABLED=true 8 | SMPC__HAWK_SERVER_RESETS_ENABLED=true 9 | SMPC__HAWK_SERVER_HEALTHCHECK_PORT=3002 10 | SMPC__HAWK_PRNG_SEED=0 11 | SMPC__SHUTDOWN_LAST_RESULTS_SYNC_TIMEOUT_SECS=60 12 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/genesis/mod.rs: -------------------------------------------------------------------------------- 1 | mod batch_generator; 2 | mod hawk_handle; 3 | mod hawk_job; 4 | pub mod plaintext; 5 | pub mod state_accessor; 6 | pub mod state_sync; 7 | pub mod utils; 8 | 9 | pub use batch_generator::{Batch, BatchGenerator, BatchIterator, BatchSize}; 10 | pub use hawk_handle::Handle; 11 | pub use hawk_job::{Job, JobRequest, JobResult}; 12 | pub use state_accessor::{ 13 | get_iris_deletions, get_last_indexed_iris_id, get_last_indexed_modification_id, 14 | set_last_indexed_iris_id, set_last_indexed_modification_id, 15 | }; 16 | pub use utils::logger; 17 | pub use utils::{ 18 | errors::IndexationError, 19 | logger::{log_error, log_info, log_warn}, 20 | }; 21 | -------------------------------------------------------------------------------- /scripts/purge_stage/delete_mongodb_gpu_collections.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | CONNECTION_STRING="$1" 6 | 7 | DATABASE="iris" 8 | COLLECTIONS="mpcv2.results mpcv2.results.partial mpcv2.results.deletion mpcv2.results.anonymized_statistics mpcv2.results.anonymized_statistics_2d mpcv2.reauth.results mpcv2.reauth.results.partial mpcv2.reset.results mpcv2.reset.results.partial mpcv2.reset.results.update_acks" 9 | 10 | for COLLECTION in $COLLECTIONS; do 11 | echo "Deleting collection $COLLECTION from database $DATABASE..." 12 | mongosh "$CONNECTION_STRING" --eval "db.getCollection('$COLLECTION').deleteMany({})" 13 | done 14 | 15 | echo "MPC collections deleted successfully." 16 | -------------------------------------------------------------------------------- /scripts/purge_stage/delete_mongodb_cpu_collections.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | CONNECTION_STRING="$1" 6 | 7 | DATABASE="iris" 8 | COLLECTIONS="mpcv2.hnsw.results mpcv2.hnsw.results.partial mpcv2.hnsw.results.deletion mpcv2.hnsw.results.anonymized_statistics mpcv2.hnsw.reauth.results mpcv2.hnsw.reauth.results.partial mpcv2.hnsw.reset.results mpcv2.hnsw.reset.results.partial mpcv2.hnsw.reset.results.update_acks" 9 | 10 | for COLLECTION in $COLLECTIONS; do 11 | echo "Deleting collection $COLLECTION from database $DATABASE..." 12 | mongosh "$CONNECTION_STRING" --eval "db.getCollection('$COLLECTION').deleteMany({})" 13 | done 14 | 15 | echo "MPC collections deleted successfully." 16 | -------------------------------------------------------------------------------- /iris-mpc-cpu/benches/bench_utils.rs: -------------------------------------------------------------------------------- 1 | use iris_mpc_cpu::shares::{IntRing2k, RingElement, Share}; 2 | use rand::{Rng, RngCore}; 3 | use rand_distr::{Distribution, Standard}; 4 | 5 | pub fn create_random_sharing(rng: &mut R, input: ShareRing) -> Vec> 6 | where 7 | R: RngCore, 8 | ShareRing: IntRing2k + std::fmt::Display, 9 | Standard: Distribution, 10 | { 11 | let val = RingElement(input); 12 | let a = RingElement(rng.gen()); 13 | let b = RingElement(rng.gen()); 14 | let c = val - a - b; 15 | 16 | let share1 = Share::new(a, c); 17 | let share2 = Share::new(b, a); 18 | let share3 = Share::new(c, b); 19 | 20 | vec![share1, share2, share3] 21 | } 22 | -------------------------------------------------------------------------------- /iris-mpc-upgrade-hawk/tests/workflows/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod genesis_100; 2 | pub mod genesis_101; 3 | pub mod genesis_102; 4 | pub mod genesis_103; 5 | pub mod genesis_104; 6 | pub mod genesis_105; 7 | pub mod genesis_106; 8 | 9 | #[macro_export] 10 | macro_rules! join_runners { 11 | ($join_set:expr) => {{ 12 | let res: Result, eyre::Report> = $join_set.join_all().await.into_iter().collect(); 13 | if res.is_err() { 14 | tracing::error!("join failed at line: {}", line!()); 15 | } else { 16 | tracing::info!("join succeeded at line: {}", line!()); 17 | } 18 | let _ = res?; 19 | // allow time to clean up 20 | tokio::time::sleep(std::time::Duration::from_millis(300)).await; 21 | }}; 22 | } 23 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-upgrade/aws_local.sh: -------------------------------------------------------------------------------- 1 | aws_local() { 2 | AWS_ACCESS_KEY_ID=test AWS_SECRET_ACCESS_KEY=test AWS_DEFAULT_REGION=us-east-1 aws --endpoint-url=http://${LOCALSTACK_HOST:-localhost}:4566 "$@" 3 | } 4 | 5 | key1_metadata=$(aws_local kms create-key --region us-east-1 --description "Key for Party1" --key-spec ECC_NIST_P256 --key-usage KEY_AGREEMENT) 6 | echo "Created key1: $key1_metadata" 7 | key1_arn=$(echo "$key1_metadata" | jq ".KeyMetadata.Arn" -r) 8 | echo "Key1 ARN: $key1_arn" 9 | key2_metadata=$(aws_local kms create-key --region us-east-1 --description "Key for Party2" --key-spec ECC_NIST_P256 --key-usage KEY_AGREEMENT) 10 | echo "Created key2: $key2_metadata" 11 | key2_arn=$(echo "$key2_metadata" | jq ".KeyMetadata.Arn" -r) 12 | echo "Key2 ARN: $key2_arn" 13 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/hnsw/mod.rs: -------------------------------------------------------------------------------- 1 | //! This submodule is based off of a local port of functionality in the 2 | //! hawk-pack crate, which implements a generic functionality for HNSW graph 3 | //! search: 4 | //! 5 | //! () 6 | //! 7 | //! This local copy simplifies some of the generic functionality present in the 8 | //! source crate, and modifies the implementation to better support the design 9 | //! and performance constraints of the iris uniqueness application. 10 | 11 | pub mod graph; 12 | pub mod metrics; 13 | pub mod searcher; 14 | pub mod sorting; 15 | pub mod vector_store; 16 | 17 | pub use graph::{layered_graph::GraphMem, neighborhood::SortedNeighborhood}; 18 | pub use searcher::{HnswParams, HnswSearcher}; 19 | pub use vector_store::VectorStore; 20 | -------------------------------------------------------------------------------- /iris-mpc-store/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iris-mpc-store" 3 | version = "0.1.0" 4 | publish = false 5 | 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | aws-config.workspace = true 12 | aws-sdk-s3.workspace = true 13 | async-trait.workspace = true 14 | iris-mpc-common = { path = "../iris-mpc-common" } 15 | bytemuck.workspace = true 16 | dotenvy.workspace = true 17 | futures.workspace = true 18 | sqlx.workspace = true 19 | eyre.workspace = true 20 | itertools.workspace = true 21 | tracing.workspace = true 22 | tokio.workspace = true 23 | rand.workspace = true 24 | ampc-server-utils.workspace = true 25 | 26 | [dev-dependencies] 27 | rand.workspace = true 28 | tokio.workspace = true 29 | 30 | [features] 31 | db_dependent = [] 32 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-utils/write_node_config_toml.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use iris_mpc_common::config::Config as NodeConfig; 3 | use std::fs; 4 | use std::io::Write; 5 | 6 | #[derive(Parser)] 7 | #[allow(non_snake_case)] 8 | struct Args { 9 | // Path to output file. 10 | #[clap(long("output"))] 11 | path_to_output_file: String, 12 | } 13 | 14 | fn main() -> Result<(), Box> { 15 | // Set args. 16 | let args = Args::parse(); 17 | 18 | // Set config. 19 | dotenvy::dotenv().ok(); 20 | let cfg: NodeConfig = NodeConfig::load_config("SMPC")?; 21 | let cfg = toml::to_string_pretty(&cfg)?; 22 | 23 | // Write to fsys. 24 | let mut fhandle = fs::File::create(args.path_to_output_file)?; 25 | fhandle.write_all(cfg.as_bytes())?; 26 | 27 | Ok(()) 28 | } 29 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | push: 5 | paths-ignore: 6 | - 'adr/**' 7 | - 'deploy/**' 8 | - '.github/**' 9 | branches: 10 | - main 11 | pull_request: 12 | paths-ignore: 13 | - 'adr/**' 14 | - 'deploy/**' 15 | - '.github/**' 16 | types: [opened, reopened, synchronize] 17 | 18 | permissions: 19 | contents: read 20 | 21 | jobs: 22 | update_release_draft: 23 | timeout-minutes: 20 24 | name: Update Release Draft 25 | runs-on: 26 | labels: ubuntu-latest 27 | 28 | permissions: 29 | contents: write 30 | pull-requests: write 31 | 32 | steps: 33 | - uses: release-drafter/release-drafter@b1476f6e6eb133afa41ed8589daba6dc69b4d3f5 34 | env: 35 | GITHUB_TOKEN: ${{ github.token }} 36 | -------------------------------------------------------------------------------- /iris-mpc-utils/README.md: -------------------------------------------------------------------------------- 1 | iris-mpc-utils 2 | =============== 3 | 4 | Set of utility functions for working with IRIS-MPC CPU networks. 5 | 6 | Why iris-mpc-utils ? 7 | -------------------------------------- 8 | 9 | Centralised location for utility functions/programs previously scattered and/or duplicated across the monorepo. 10 | 11 | What uses iris-mpc-utils ? 12 | -------------------------------------- 13 | 14 | - Test functions in other crates. 15 | - DevOps tool chains. 16 | 17 | What is iris-mpc-utils roadmap ? 18 | -------------------------------------- 19 | 20 | - DONE 21 | - Initialisation from existing crates 22 | 23 | - QUEUED 24 | - Refactor genesis e2e tests to use iris-mpc-utils 25 | - New utils for simulated plaintext graphs 26 | - Refactor genesis tests to use new simulated plaintext graphs 27 | - Main HNSW binary e2e support 28 | - Refactoring as and when makes sense 29 | -------------------------------------------------------------------------------- /iris-mpc-upgrade-hawk/tests/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod constants; 2 | pub mod genesis_runner; 3 | pub mod irises; 4 | mod logger; 5 | pub mod modifications; 6 | pub mod mpc_node; 7 | pub mod plaintext_genesis; 8 | pub mod resources; 9 | pub mod runner; 10 | pub mod s3_deletions; 11 | 12 | use iris_mpc_common::{config::Config, iris_db::iris::IrisCode}; 13 | use iris_mpc_cpu::protocol::shared_iris::GaloisRingSharedIris; 14 | pub use runner::{TestRun, TestRunContextInfo, TestRunEnvironment}; 15 | 16 | use crate::utils::constants::COUNT_OF_PARTIES; 17 | 18 | // Pair of Iris codes aassociated with left/right eyes. 19 | pub type IrisCodePair = (IrisCode, IrisCode); 20 | 21 | // Pair of Iris shares aassociated with left/right eyes. 22 | pub type GaloisRingSharedIrisPair = (GaloisRingSharedIris, GaloisRingSharedIris); 23 | 24 | // Network wide configuration set. 25 | pub type HawkConfigs = [Config; COUNT_OF_PARTIES]; 26 | -------------------------------------------------------------------------------- /iris-mpc-common/src/iris_db/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::galois_engine::degree4::{GaloisRingIrisCodeShare, GaloisRingTrimmedMaskCodeShare}; 2 | use iris::IrisCode; 3 | use rand::{rngs::StdRng, SeedableRng}; 4 | 5 | pub mod db; 6 | pub mod iris; 7 | pub mod shamir_db; 8 | pub mod shamir_iris; 9 | 10 | pub fn get_dummy_shares_for_deletion( 11 | party_id: usize, 12 | ) -> (GaloisRingIrisCodeShare, GaloisRingTrimmedMaskCodeShare) { 13 | let mut rng: StdRng = StdRng::seed_from_u64(0); 14 | let dummy: IrisCode = IrisCode::default(); 15 | let iris_share: GaloisRingIrisCodeShare = 16 | GaloisRingIrisCodeShare::encode_iris_code(&dummy.code, &dummy.mask, &mut rng)[party_id] 17 | .clone(); 18 | let mask_share: GaloisRingTrimmedMaskCodeShare = 19 | GaloisRingIrisCodeShare::encode_mask_code(&dummy.mask, &mut rng)[party_id] 20 | .clone() 21 | .into(); 22 | (iris_share, mask_share) 23 | } 24 | -------------------------------------------------------------------------------- /scripts/run-client-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | export AWS_ACCESS_KEY_ID=test 5 | export AWS_SECRET_ACCESS_KEY=test 6 | export AWS_ENDPOINT_URL="http://localstack:4566" 7 | export AWS_ACCESS_KEY_ID=test 8 | export AWS_SECRET_ACCESS_KEY=test 9 | export AWS_REGION=us-east-1 10 | export AWS_DEFAULT_REGION=us-east-1 11 | 12 | 13 | for i in 0 1 2 14 | do 15 | echo "Checking hawk\_participant\_${i} at :300${i}/health..." 16 | curl -f hawk_participant_${i}:300${i}/health 17 | done 18 | 19 | echo "All endpoints are healthy. Running now the client..." 20 | 21 | 22 | /bin/client \ 23 | --request-topic-arn arn:aws:sns:$AWS_REGION:000000000000:iris-mpc-input.fifo \ 24 | --requests-bucket-name wf-smpcv2-dev-sns-requests \ 25 | --public-key-base-url "http://localstack:4566/wf-dev-public-keys" \ 26 | --response-queue-url http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/iris-mpc-results-us-east-1.fifo 27 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/aws/errors.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | #[derive(Error, Debug)] 4 | #[allow(clippy::enum_variant_names)] 5 | pub enum AwsClientError { 6 | #[error("Download public key set error: {0}")] 7 | PublicKeysetDownloadError(String), 8 | 9 | #[error("Iris shares encrypt and upload error: {0}")] 10 | IrisSharesEncryptAndUploadError(String), 11 | 12 | #[error("Iris deletions upload error: {0}")] 13 | IrisDeletionsUploadError(String), 14 | 15 | #[error("AWS S3 upload error: key={0}: error={0}")] 16 | S3UploadError(String, String), 17 | 18 | #[error("AWS SNS publish error: {0}")] 19 | SnsPublishError(String), 20 | 21 | #[error("AWS SQS delete message from queue error: {0}")] 22 | SqsDeleteMessageError(String), 23 | 24 | #[error("AWS SQS purge queue error: {0}")] 25 | SqsPurgeQueueError(String), 26 | 27 | #[error("AWS SQS receive message from queue error: {0}")] 28 | SqsReceiveMessageError(String), 29 | } 30 | -------------------------------------------------------------------------------- /iris-mpc/src/services/aws/sqs.rs: -------------------------------------------------------------------------------- 1 | use aws_config::timeout::TimeoutConfig; 2 | use aws_sdk_sqs::config::Builder; 3 | use aws_sdk_sqs::Client as SQSClient; 4 | use std::time::Duration; 5 | 6 | pub fn create_sqs_client( 7 | shared_config: &aws_config::SdkConfig, 8 | wait_time_seconds: usize, 9 | ) -> SQSClient { 10 | // Creates an SQS client with a client-side operation attempt timeout. Per default, there are two retry 11 | // attempts, meaning that every operation has three tries in total. This configuration prevents the sqs 12 | // client from `await`ing forever on broken streams. (see ) 13 | SQSClient::from_conf( 14 | Builder::from(shared_config) 15 | .timeout_config( 16 | TimeoutConfig::builder() 17 | .operation_attempt_timeout(Duration::from_secs((wait_time_seconds + 2) as u64)) 18 | .build(), 19 | ) 20 | .build(), 21 | ) 22 | } 23 | -------------------------------------------------------------------------------- /scripts/tools/init-servers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # needs to run twice to create the keys with both AWSCURRENT and AWSPREVIOUS states 5 | cargo run -p iris-mpc-bins --bin key-manager -- --region us-east-1 --node-id 0 --env dev rotate --public-key-bucket-name wf-dev-public-keys 6 | cargo run -p iris-mpc-bins --bin key-manager -- --region us-east-1 --node-id 1 --env dev rotate --public-key-bucket-name wf-dev-public-keys 7 | cargo run -p iris-mpc-bins --bin key-manager -- --region us-east-1 --node-id 2 --env dev rotate --public-key-bucket-name wf-dev-public-keys 8 | 9 | cargo run -p iris-mpc-bins --bin key-manager -- --region us-east-1 --node-id 0 --env dev rotate --public-key-bucket-name wf-dev-public-keys 10 | cargo run -p iris-mpc-bins --bin key-manager -- --region us-east-1 --node-id 1 --env dev rotate --public-key-bucket-name wf-dev-public-keys 11 | cargo run -p iris-mpc-bins --bin key-manager -- --region us-east-1 --node-id 2 --env dev rotate --public-key-bucket-name wf-dev-public-keys 12 | -------------------------------------------------------------------------------- /iris-mpc-bins/accuracy1.toml: -------------------------------------------------------------------------------- 1 | [irises] 2 | option = "NdjsonFile" 3 | path = "data/store.ndjson" 4 | limit = 1000 5 | selection = "All" 6 | 7 | 8 | [graph] 9 | option = "GenerateDynamic" 10 | size = 500 11 | 12 | [graph.gen_hnsw_config] 13 | ef_construction = 65 14 | ef_search = 65 15 | M = 16 16 | layer_mode = { LinearScan = { max_graph_layer = 1 } } 17 | 18 | 19 | [analysis] 20 | sample_size = 20 21 | seed = 123 22 | distance_fn = "min_fhd" # Must be "fhd" or "min_fhd" 23 | neighborhood_mode = "Unsorted" # Must be "Sorted" or "Unsorted" 24 | k_neighbors = 128 25 | mutations = [0.33, 0.37, 0.51] 26 | output_format = "rate" # Must be "full_csv", "rate" or "histogram" 27 | output_path = "accuracy_results.csv" 28 | metrics_path = "metrics.csv" 29 | 30 | [analysis.rotations] 31 | start = -3 32 | end = 4 # End of range is exclusive 33 | 34 | [analysis.search_hnsw_config] 35 | ef_construction = 320 36 | ef_search = [70, 5] 37 | M = 256 38 | layer_mode = { LinearScan = { max_graph_layer = 1} } 39 | -------------------------------------------------------------------------------- /migrations/20250530201832_unify_id_repr.up.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS hawk_graph_links; 2 | DROP TABLE IF EXISTS hawk_graph_entry; 3 | 4 | -- Links table -- 5 | 6 | CREATE TABLE hawk_graph_links 7 | ( 8 | graph_id smallint NOT NULL CHECK (graph_id = 0 OR graph_id = 1), 9 | serial_id bigint NOT NULL CHECK (serial_id BETWEEN 1 AND 4294967295), 10 | version_id smallint NOT NULL CHECK (version_id >= 0), 11 | layer smallint NOT NULL CHECK (layer >= 0), 12 | links bytea NOT NULL, 13 | CONSTRAINT hawk_graph_links_pkey PRIMARY KEY (graph_id, serial_id, version_id, layer) 14 | ); 15 | 16 | -- Entry point table -- 17 | 18 | CREATE TABLE hawk_graph_entry 19 | ( 20 | graph_id smallint NOT NULL CHECK (graph_id = 0 OR graph_id = 1), 21 | serial_id bigint NOT NULL CHECK (serial_id BETWEEN 1 AND 4294967295), 22 | version_id smallint NOT NULL CHECK (version_id >= 0), 23 | layer smallint NOT NULL CHECK (layer >= 0), 24 | CONSTRAINT hawk_graph_entry_pkey PRIMARY KEY (graph_id) 25 | ); 26 | -------------------------------------------------------------------------------- /iris-mpc-common/src/helpers/sqs_s3_helper.rs: -------------------------------------------------------------------------------- 1 | use crate::helpers::key_pair::SharesDecodingError; 2 | use aws_sdk_s3::{ 3 | primitives::{ByteStream, SdkBody}, 4 | Client, 5 | }; 6 | 7 | pub async fn upload_file_to_s3( 8 | bucket: &str, 9 | key: &str, 10 | s3_client: Client, 11 | contents: &[u8], 12 | ) -> Result { 13 | let content_bytestream = ByteStream::new(SdkBody::from(contents)); 14 | 15 | // Create a PutObject request 16 | match s3_client 17 | .put_object() 18 | .bucket(bucket) 19 | .key(key) 20 | .body(content_bytestream) 21 | .send() 22 | .await 23 | { 24 | Ok(_) => { 25 | tracing::info!("File {} uploaded to s3 successfully", key); 26 | } 27 | Err(e) => { 28 | tracing::error!("Failed to upload file {} to s3: {:?}", key, e); 29 | return Err(SharesDecodingError::UploadS3Error); 30 | } 31 | } 32 | Ok(key.to_string()) 33 | } 34 | -------------------------------------------------------------------------------- /migrations/20250321144530_add_version_id.up.sql: -------------------------------------------------------------------------------- 1 | -- Add version_id column with a default value of 0 2 | ALTER TABLE irises ADD COLUMN IF NOT EXISTS version_id SMALLINT DEFAULT 0 CHECK (version_id >= 0); 3 | 4 | -- Create a function that will be executed by the trigger 5 | CREATE OR REPLACE FUNCTION increment_version_id() 6 | RETURNS TRIGGER AS $$ 7 | BEGIN 8 | -- Only increment version_id if actual data columns changed 9 | IF (OLD.left_code IS DISTINCT FROM NEW.left_code OR 10 | OLD.left_mask IS DISTINCT FROM NEW.left_mask OR 11 | OLD.right_code IS DISTINCT FROM NEW.right_code OR 12 | OLD.right_mask IS DISTINCT FROM NEW.right_mask) THEN 13 | NEW.version_id = COALESCE(OLD.version_id, 0) + 1; 14 | END IF; 15 | RETURN NEW; 16 | END; 17 | $$ LANGUAGE plpgsql; 18 | 19 | -- Create the trigger that calls the function before updates 20 | CREATE TRIGGER increment_version_id_trigger 21 | BEFORE UPDATE ON irises 22 | FOR EACH ROW 23 | EXECUTE FUNCTION increment_version_id(); 24 | -------------------------------------------------------------------------------- /iris-mpc-upgrade/protos/reshare.proto: -------------------------------------------------------------------------------- 1 | 2 | syntax = "proto3"; 3 | package iris_mpc_reshare; 4 | 5 | message IrisCodeReShare { 6 | bytes LeftIrisCodeShare = 1; 7 | bytes LeftMaskShare = 2; 8 | bytes RightIrisCodeShare = 3; 9 | bytes RightMaskShare = 4; 10 | } 11 | 12 | message IrisCodeReShareRequest { 13 | uint64 SenderId = 1; 14 | uint64 OtherId = 2; 15 | uint64 ReceiverId = 3; 16 | int64 IdRangeStartInclusive = 4; 17 | int64 IdRangeEndNonInclusive = 5; 18 | repeated IrisCodeReShare IrisCodeReShares = 6; 19 | bytes ClientCorrelationSanityCheck = 7; 20 | } 21 | 22 | message IrisCodeReShareResponse { 23 | IrisCodeReShareStatus Status = 1; 24 | string Message = 2; 25 | } 26 | 27 | enum IrisCodeReShareStatus { 28 | IRIS_CODE_RE_SHARE_STATUS_OK = 0; 29 | IRIS_CODE_RE_SHARE_STATUS_FULL_QUEUE = 1; 30 | IRIS_CODE_RE_SHARE_STATUS_ERROR = 2; 31 | } 32 | 33 | service IrisCodeReShareService { 34 | rpc ReShare(IrisCodeReShareRequest) returns (IrisCodeReShareResponse); 35 | } 36 | -------------------------------------------------------------------------------- /iris-mpc-py/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | .pytest_cache/ 6 | *.py[cod] 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | .venv/ 14 | env/ 15 | bin/ 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | include/ 26 | man/ 27 | venv/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | pip-selfcheck.json 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | 45 | # Translations 46 | *.mo 47 | 48 | # Mr Developer 49 | .mr.developer.cfg 50 | .project 51 | .pydevproject 52 | 53 | # Rope 54 | .ropeproject 55 | 56 | # Django stuff: 57 | *.log 58 | *.pot 59 | 60 | .DS_Store 61 | 62 | # Sphinx documentation 63 | docs/_build/ 64 | 65 | # PyCharm 66 | .idea/ 67 | 68 | # VSCode 69 | .vscode/ 70 | 71 | # Pyenv 72 | .python-version 73 | -------------------------------------------------------------------------------- /iris-mpc/build.rs: -------------------------------------------------------------------------------- 1 | //! Rust replaces LD_LIBRARY_PATH rather than prepending to it, which makes it 2 | //! difficult to install the CUDA libraries in non-standard directories. 3 | //! 4 | //! This build script is a workaround until the bug is fixed in `cargo` or the 5 | //! `cudarc` build scripts: 6 | //! 7 | //! 8 | //! Usage: 9 | //! ```sh 10 | //! export PRE_CARGO_LD_LIBRARY_PATH="$LD_LIBRARY_PATH" 11 | //! cargo run/test/bench 12 | //! ``` 13 | 14 | fn main() { 15 | let build_path = std::env::var("LD_LIBRARY_PATH"); 16 | let sys_path = std::env::var("PRE_CARGO_LD_LIBRARY_PATH"); 17 | 18 | match (build_path, sys_path) { 19 | (Ok(build), Ok(sys)) => println!("cargo:rustc-env=LD_LIBRARY_PATH={build}:{sys}"), 20 | (Err(_build), Ok(sys)) => println!("cargo:rustc-env=LD_LIBRARY_PATH={sys}"), 21 | (_, Err(_sys)) => { 22 | // We don't have anything to do, so just leave the default build 23 | // path alone 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | # TODO: change to weekly, and delete the detailed schedule 12 | interval: "daily" 13 | # Used to get an immediate test 14 | time: "05:10" 15 | timezone: "Etc/UTC" 16 | 17 | - package-ecosystem: "github-actions" # See documentation for possible values 18 | directory: "/" # Location of package manifests 19 | schedule: 20 | # TODO: change to weekly, and delete the detailed schedule 21 | interval: "daily" 22 | # Used to get an immediate test 23 | time: "05:10" 24 | timezone: "Etc/UTC" 25 | -------------------------------------------------------------------------------- /iris-mpc-upgrade-hawk/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iris-mpc-upgrade-hawk" 3 | version = "0.1.0" 4 | publish = false 5 | 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | aws-config.workspace = true 12 | aws-sdk-rds.workspace = true 13 | aws-sdk-s3.workspace = true 14 | tokio.workspace = true 15 | tracing.workspace = true 16 | tracing-subscriber.workspace = true 17 | serde_json.workspace = true 18 | eyre.workspace = true 19 | rand.workspace = true 20 | iris-mpc = { path = "../iris-mpc" } 21 | iris-mpc-common = { path = "../iris-mpc-common" } 22 | iris-mpc-store = { path = "../iris-mpc-store" } 23 | itertools.workspace = true 24 | metrics = "0.22.1" 25 | serde = { version = "1.0.214", features = ["derive"] } 26 | iris-mpc-cpu.workspace = true 27 | chrono = "0.4.38" 28 | sqlx.workspace = true 29 | toml = { version = "0.8.23", features = ["preserve_order"] } 30 | ampc-server-utils.workspace = true 31 | 32 | [dev-dependencies] 33 | thiserror.workspace = true 34 | serial_test = "3.2.0" 35 | 36 | [features] 37 | default = [] 38 | db_dependent = [] 39 | -------------------------------------------------------------------------------- /Dockerfile.hnsw.test.e2e: -------------------------------------------------------------------------------- 1 | # Set base image. 2 | FROM public.ecr.aws/docker/library/rust:1.89-slim-bullseye AS build-image 3 | 4 | # Install pre-requisites. 5 | RUN apt-get update && apt-get install -y \ 6 | curl \ 7 | build-essential \ 8 | libssl-dev \ 9 | texinfo \ 10 | libcap2-bin \ 11 | pkg-config \ 12 | git \ 13 | devscripts \ 14 | debhelper \ 15 | ca-certificates \ 16 | protobuf-compiler \ 17 | wget \ 18 | vim \ 19 | && apt-get clean \ 20 | && rm -rf /var/lib/apt/lists/* 21 | 22 | # Install root certificate. 23 | RUN apt-get update && apt-get install -y \ 24 | ca-certificates \ 25 | awscli \ 26 | curl 27 | COPY certs /usr/local/share/ca-certificates/ 28 | RUN update-ca-certificates 29 | 30 | # Install non-root user to run tests. 31 | RUN useradd -m -u 1000 -s /bin/bash runner 32 | 33 | # Install software assets. 34 | FROM build-image AS build-app 35 | WORKDIR /src/iris-mpc 36 | COPY . . 37 | 38 | # Set working directory. 39 | WORKDIR / 40 | 41 | # Set entrypoint for the container 42 | # ENTRYPOINT ["/src/iris-mpc/scripts/run-tests-hnsw-e2e.sh"] 43 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/types.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use sodiumoxide::crypto::box_::PublicKey; 4 | 5 | use iris_mpc_common::{ 6 | config::Config as NodeConfig, galois_engine::degree4::GaloisRingIrisCodeShare, 7 | }; 8 | 9 | use super::constants::N_PARTIES; 10 | 11 | // Iris code and mask galois shares. 12 | pub type IrisCodeAndMaskShares = [[GaloisRingIrisCodeShare; N_PARTIES]; 2]; 13 | 14 | // Network wide node configuration set. 15 | pub type NodeConfigSet = [NodeConfig; N_PARTIES]; 16 | 17 | /// Set of node execution hosts. 18 | #[derive(Debug, Clone, Copy)] 19 | pub enum NodeExecutionHost { 20 | BareMetal, 21 | Docker, 22 | } 23 | 24 | impl Default for NodeExecutionHost { 25 | fn default() -> Self { 26 | match Path::new("/.dockerenv").exists() { 27 | true => NodeExecutionHost::Docker, 28 | _ => NodeExecutionHost::BareMetal, 29 | } 30 | } 31 | } 32 | 33 | /// Type alias: Ordinal identifier of an MPC participant. 34 | pub type PartyIdx = usize; 35 | 36 | // MPC party public keys (used for encryption). 37 | pub type PublicKeyset = [PublicKey; N_PARTIES]; 38 | -------------------------------------------------------------------------------- /scripts/run-anon-stats-server.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # Arg :: Node ID :: MPC node ordinal identifier. 5 | NODE_ID="$1" 6 | if [ -z "$NODE_ID" ]; then 7 | echo "Usage: run-anon-stats-server.sh " 8 | exit 1 9 | fi 10 | export RUST_LOG=info 11 | export SMPC__DB_URL="postgres://postgres:postgres@localhost:5432/SMPC_dev_${NODE_ID}" 12 | export SMPC__POLL_INTERVAL_SECS="10" 13 | export SMPC__PARTY_ID="${NODE_ID}" 14 | export SMPC__AWS__ENDPOINT="http://127.0.0.1:4566" 15 | export SMPC__RESULTS_TOPIC_ARN="arn:aws:sns:us-east-1:000000000000:iris-mpc-results.fifo" 16 | export SMPC__SERVER_COORDINATION__NODE_HOSTNAMES='["0.0.0.0","0.0.0.0","0.0.0.0"]' 17 | export SMPC__SERVER_COORDINATION__HEALTHCHECK_PORTS='["6000","6001","6002"]' 18 | export SMPC__SERVER_COORDINATION__PARTY_ID="${NODE_ID}" 19 | export SMPC__SERVICE_PORTS='["7001","7002","7003"]' 20 | export AWS_ENDPOINT_URL="http://127.0.0.1:4566" 21 | 22 | # Set the stack size to 100MB to receive large messages. 23 | export RUST_MIN_STACK=104857600 24 | 25 | cargo run --release -p iris-mpc-bins --bin iris-mpc-anon-stats-server -- --party-id "${NODE_ID}" 26 | -------------------------------------------------------------------------------- /iris-mpc-common/src/helpers/kms_dh.rs: -------------------------------------------------------------------------------- 1 | use aws_sdk_kms::{types::KeyAgreementAlgorithmSpec, Client}; 2 | use eyre::Result; 3 | 4 | /// Derive a shared secret from two KMS keys 5 | pub async fn derive_shared_secret(own_key_arn: &str, other_key_arn: &str) -> Result<[u8; 32]> { 6 | let shared_config = aws_config::from_env().load().await; 7 | 8 | let client = Client::new(&shared_config); 9 | let other_public_key = client.get_public_key().key_id(other_key_arn).send().await?; 10 | let public_key = other_public_key.public_key.unwrap(); 11 | 12 | let res = client 13 | .derive_shared_secret() 14 | .key_id(own_key_arn) 15 | .public_key(public_key) 16 | .key_agreement_algorithm(KeyAgreementAlgorithmSpec::Ecdh) 17 | .send() 18 | .await?; 19 | 20 | let derived_shared_secret = res.shared_secret(); 21 | let unwrapped_secret = derived_shared_secret 22 | .expect("Expected derived shared secret from KMS") 23 | .clone() 24 | .into_inner(); 25 | 26 | let mut array = [0u8; 32]; 27 | array.copy_from_slice(&unwrapped_secret); 28 | 29 | Ok(array) 30 | } 31 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Worldcoin Foundation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /iris-mpc-cpu/benches/set_hash.rs: -------------------------------------------------------------------------------- 1 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 2 | use iris_mpc_common::vector_id::VectorId; 3 | use iris_mpc_cpu::execution::hawk_main::state_check::SetHash; 4 | 5 | /// Benchmark that repeatedly calls insert on SharedIrises. 6 | fn bench_set_hash(c: &mut Criterion) { 7 | c.bench_function("set_hash_vector_id", |b| { 8 | let mut set_hash = SetHash::default(); 9 | let v = black_box(VectorId::from_serial_id(1111)); 10 | 11 | b.iter(|| { 12 | set_hash.add_unordered(v); 13 | set_hash.checksum() 14 | }); 15 | }); 16 | 17 | c.bench_function("set_hash_250_links", |b| { 18 | let mut set_hash = SetHash::default(); 19 | 20 | let lc = 1_u8; 21 | let v = VectorId::from_serial_id(1); 22 | let links = &vec![VectorId::from_serial_id(2); 250]; 23 | let item = black_box((lc, v, links)); 24 | 25 | b.iter(|| { 26 | set_hash.add_unordered(item); 27 | set_hash.checksum() 28 | }); 29 | }); 30 | } 31 | 32 | criterion_group! {benches, bench_set_hash} 33 | criterion_main!(benches); 34 | -------------------------------------------------------------------------------- /scripts/run-tests-hnsw-e2e.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # ---------------------------------------------------------------- 5 | # Executes end to end CPU tests. 6 | # ---------------------------------------------------------------- 7 | function _main() 8 | { 9 | _log "Executing end to end tests" 10 | 11 | pushd "$(_get_path_to_monorepo)/iris-mpc-upgrade-hawk" || exit 12 | cargo test --release --test e2e_genesis -- --include-ignored 13 | popd || exit 14 | } 15 | 16 | function _get_now() 17 | { 18 | echo $(date +%Y-%m-%dT%H:%M:%S.%6N) 19 | } 20 | 21 | function _get_path_to_monorepo() 22 | { 23 | local here 24 | local root 25 | 26 | # Path -> here. 27 | here="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 28 | 29 | # Path -> monorepo root. 30 | root=$( dirname "$here" ) 31 | 32 | echo "${root}" 33 | } 34 | 35 | function _log () 36 | { 37 | local MSG=${1} 38 | 39 | echo -e "$(_get_now) [INFO] [$$] HNSW-E2E :: ${MSG}" 40 | } 41 | 42 | # ---------------------------------------------------------------- 43 | # ENTRY POINT 44 | # ---------------------------------------------------------------- 45 | 46 | _main 47 | -------------------------------------------------------------------------------- /Dockerfile.shares-encoding: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/ubuntu/ubuntu:22.04 AS build-image 2 | 3 | WORKDIR /src 4 | RUN apt-get update && apt-get install -y \ 5 | curl \ 6 | build-essential \ 7 | libssl-dev \ 8 | texinfo \ 9 | libcap2-bin \ 10 | pkg-config \ 11 | git \ 12 | devscripts \ 13 | debhelper \ 14 | ca-certificates \ 15 | protobuf-compiler \ 16 | wget 17 | 18 | RUN curl https://sh.rustup.rs -sSf | sh -s -- -y 19 | ENV PATH "/root/.cargo/bin:${PATH}" 20 | ENV RUSTUP_HOME "/root/.rustup" 21 | ENV CARGO_HOME "/root/.cargo" 22 | RUN rustup toolchain install 1.89.0 23 | RUN rustup default 1.89.0 24 | RUN rustup component add cargo 25 | RUN cargo install cargo-build-deps \ 26 | && cargo install cargo-edit --version 0.13.6 --locked 27 | 28 | FROM build-image AS build-app 29 | WORKDIR /src/gpu-iris-mpc 30 | COPY . . 31 | RUN cargo build -p iris-mpc-bins --bin shares-encoding --release 32 | 33 | FROM public.ecr.aws/ubuntu/ubuntu:22.04 34 | ENV DEBIAN_FRONTEND=noninteractive 35 | 36 | COPY --from=build-app /src/gpu-iris-mpc/target/release/shares-encoding /bin/shares-encoding 37 | 38 | USER 65534 39 | ENTRYPOINT ["/bin/shares-encoding"] 40 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-common/migrator.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use eyre::{Context, Result}; 4 | use sqlx::{migrate::Migrator, postgres::PgPoolOptions, Executor}; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<()> { 8 | dotenvy::dotenv().ok(); 9 | let database_url = std::env::var("DATABASE_URL").unwrap(); 10 | let postgres_pool = PgPoolOptions::new() 11 | .max_connections(1) 12 | .after_connect(|conn, _meta| { 13 | let schema_name = std::env::var("SCHEMA_NAME").unwrap(); 14 | let query = format!("SET search_path = '{}';", schema_name.clone()); 15 | Box::pin(async move { 16 | conn.execute(query.as_str()).await?; 17 | Ok(()) 18 | }) 19 | }) 20 | .connect(&database_url) 21 | .await 22 | .with_context(|| "Could not connect to PostgreSQL!")?; 23 | 24 | let migrator = Migrator::new(Path::new("./migrations")) 25 | .await 26 | .with_context(|| "Could not create Migrator!")?; 27 | migrator 28 | .run(&postgres_pool) 29 | .await 30 | .with_context(|| "Could not run migration!")?; 31 | Ok(()) 32 | } 33 | -------------------------------------------------------------------------------- /iris-mpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iris-mpc" 3 | version = "0.1.0" 4 | publish = false 5 | 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | aws-config.workspace = true 12 | aws-sdk-sns.workspace = true 13 | aws-sdk-sqs.workspace = true 14 | aws-sdk-s3.workspace = true 15 | aws-sdk-secretsmanager.workspace = true 16 | tokio.workspace = true 17 | tracing.workspace = true 18 | serde_json.workspace = true 19 | eyre.workspace = true 20 | clap.workspace = true 21 | bytemuck.workspace = true 22 | rand.workspace = true 23 | base64.workspace = true 24 | uuid.workspace = true 25 | sodiumoxide.workspace = true 26 | iris-mpc-common = { path = "../iris-mpc-common" } 27 | iris-mpc-store = { path = "../iris-mpc-store" } 28 | itertools.workspace = true 29 | metrics.workspace = true 30 | serde = { version = "1.0.214", features = ["derive"] } 31 | iris-mpc-cpu.workspace = true 32 | chrono.workspace = true 33 | sqlx.workspace = true 34 | bincode.workspace = true 35 | pprof = { version = "0.15.0", features = ["flamegraph", "prost-codec"] } 36 | ampc-anon-stats.workspace = true 37 | ampc-server-utils.workspace = true 38 | 39 | [features] 40 | default = [] 41 | -------------------------------------------------------------------------------- /iris-mpc-upgrade/src/proto/mod.rs: -------------------------------------------------------------------------------- 1 | use iris_mpc_common::{IRIS_CODE_LENGTH, MASK_CODE_LENGTH}; 2 | use iris_mpc_reshare::IrisCodeReShare; 3 | use prost::Message; 4 | 5 | // this is generated code so we skip linting it 6 | #[rustfmt::skip] 7 | #[allow(clippy::all)] 8 | pub mod iris_mpc_reshare; 9 | 10 | pub fn get_size_of_reshare_iris_code_share_batch(batch_size: usize) -> usize { 11 | let dummy = iris_mpc_reshare::IrisCodeReShareRequest { 12 | sender_id: 0, 13 | other_id: 1, 14 | receiver_id: 2, 15 | id_range_start_inclusive: 0, 16 | id_range_end_non_inclusive: batch_size as i64, 17 | iris_code_re_shares: vec![ 18 | IrisCodeReShare { 19 | left_iris_code_share: vec![1u8; IRIS_CODE_LENGTH * size_of::()], 20 | left_mask_share: vec![2u8; MASK_CODE_LENGTH * size_of::()], 21 | right_iris_code_share: vec![3u8; IRIS_CODE_LENGTH * size_of::()], 22 | right_mask_share: vec![4u8; MASK_CODE_LENGTH * size_of::()], 23 | }; 24 | batch_size 25 | ], 26 | client_correlation_sanity_check: vec![7u8; 32], 27 | }; 28 | 29 | dummy.encoded_len() 30 | } 31 | -------------------------------------------------------------------------------- /iris-mpc-common/src/helpers/aws.rs: -------------------------------------------------------------------------------- 1 | use aws_sdk_sns::types::MessageAttributeValue; 2 | use eyre::Result; 3 | use std::collections::HashMap; 4 | 5 | pub const TRACE_ID_MESSAGE_ATTRIBUTE_NAME: &str = "TraceID"; 6 | pub const SPAN_ID_MESSAGE_ATTRIBUTE_NAME: &str = "SpanID"; 7 | pub const NODE_ID_MESSAGE_ATTRIBUTE_NAME: &str = "NodeID"; 8 | 9 | pub fn construct_message_attributes( 10 | trace_id: &String, 11 | span_id: &String, 12 | ) -> Result> { 13 | let mut message_attributes = HashMap::new(); 14 | 15 | let trace_id_message_attribute = MessageAttributeValue::builder() 16 | .data_type("String") 17 | .string_value(trace_id) 18 | .build()?; 19 | 20 | message_attributes.insert( 21 | TRACE_ID_MESSAGE_ATTRIBUTE_NAME.to_string(), 22 | trace_id_message_attribute, 23 | ); 24 | 25 | let span_id_message_attribute = MessageAttributeValue::builder() 26 | .data_type("String") 27 | .string_value(span_id) 28 | .build()?; 29 | 30 | message_attributes.insert( 31 | SPAN_ID_MESSAGE_ATTRIBUTE_NAME.to_string(), 32 | span_id_message_attribute, 33 | ); 34 | 35 | Ok(message_attributes) 36 | } 37 | -------------------------------------------------------------------------------- /iris-mpc-gpu/src/helpers/id_wrapper.rs: -------------------------------------------------------------------------------- 1 | use axum::extract::Path; 2 | use cudarc::nccl::Id; 3 | use std::str::FromStr; 4 | 5 | pub struct IdWrapper(pub Id); 6 | 7 | impl FromStr for IdWrapper { 8 | type Err = hex::FromHexError; 9 | 10 | fn from_str(s: &str) -> Result { 11 | let mut id: [std::ffi::c_char; 128] = [0; 128]; 12 | hex::decode_to_slice(s, bytemuck::cast_slice_mut(&mut id))?; 13 | Ok(IdWrapper(Id::uninit(id))) 14 | } 15 | } 16 | 17 | impl std::fmt::Display for IdWrapper { 18 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 19 | #[allow(clippy::unnecessary_cast)] 20 | // depending on the platform, c_char might be signed or unsigned, so the below cast might be 21 | // unnecessary 22 | f.write_str(&hex::encode( 23 | self.0 24 | .internal() 25 | .iter() 26 | .map(|&c| c as u8) 27 | .collect::>(), 28 | )) 29 | } 30 | } 31 | 32 | pub async fn http_root(ids: Vec, Path(device_id): Path) -> String { 33 | let device_id: usize = device_id.parse().unwrap(); 34 | IdWrapper(ids[device_id]).to_string() 35 | } 36 | -------------------------------------------------------------------------------- /iris-mpc-utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iris-mpc-utils" 3 | version = "0.1.0" 4 | edition.workspace = true 5 | license.workspace = true 6 | repository.workspace = true 7 | 8 | [dependencies] 9 | async-trait.workspace = true 10 | aws-config.workspace = true 11 | aws-sdk-rds.workspace = true 12 | aws-sdk-sns.workspace = true 13 | aws-sdk-sqs.workspace = true 14 | aws-sdk-s3.workspace = true 15 | aws-sdk-secretsmanager.workspace = true 16 | base64.workspace = true 17 | bincode.workspace = true 18 | clap.workspace = true 19 | dotenvy.workspace = true 20 | eyre.workspace = true 21 | futures.workspace = true 22 | iris-mpc = { path = "../iris-mpc" } 23 | iris-mpc-common = { path = "../iris-mpc-common" } 24 | iris-mpc-cpu = { path = "../iris-mpc-cpu" } 25 | iris-mpc-store = { path = "../iris-mpc-store" } 26 | itertools.workspace = true 27 | rand.workspace = true 28 | rayon.workspace = true 29 | serde = { version = "1.0.214", features = ["derive"] } 30 | serde_json.workspace = true 31 | sodiumoxide.workspace = true 32 | sqlx.workspace = true 33 | thiserror.workspace = true 34 | toml.workspace = true 35 | tokio.workspace = true 36 | tokio-util.workspace = true 37 | tracing.workspace = true 38 | uuid.workspace = true 39 | dirs = "6.0.0" 40 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: "v$RESOLVED_VERSION" 2 | tag-template: "v$RESOLVED_VERSION" 3 | filter-by-commitish: true 4 | 5 | categories: 6 | - title: "Features" 7 | labels: 8 | - "feature" 9 | - "enhancement" 10 | - title: "Bug Fixes" 11 | labels: 12 | - "fix" 13 | - "bugfix" 14 | - "bug" 15 | - "patch" 16 | - title: "Maintenance" 17 | label: "chore" 18 | 19 | version-resolver: 20 | major: 21 | labels: 22 | - "major" 23 | - "breaking" 24 | minor: 25 | labels: 26 | - "minor" 27 | - "feature" 28 | - "enhancement" 29 | patch: 30 | labels: 31 | - "patch" 32 | - "fix" 33 | default: patch 34 | 35 | template: | 36 | ## Changes 37 | 38 | $CHANGES 39 | 40 | autolabeler: 41 | - label: "chore" 42 | files: 43 | - "*.md" 44 | 45 | - label: "patch" 46 | branch: 47 | - '/fix\/.+/' 48 | title: 49 | - "/fix/i" 50 | 51 | - label: "enhancement" 52 | branch: 53 | - '/feat\/.+/' 54 | - "/TBD-[0-9]{1,4}/" 55 | body: 56 | - "/TBD-[0-9]{1,4}/" 57 | 58 | - label: "breaking" 59 | branch: 60 | - '/breaking\/.+/' 61 | body: 62 | - "/BREAKING/" 63 | -------------------------------------------------------------------------------- /iris-mpc-py/src/py_hnsw/pyclasses/iris_code_array.rs: -------------------------------------------------------------------------------- 1 | use iris_mpc_common::iris_db::iris::IrisCodeArray; 2 | use pyo3::prelude::*; 3 | use rand::rngs::ThreadRng; 4 | 5 | #[pyclass] 6 | #[derive(Clone, Default)] 7 | pub struct PyIrisCodeArray(pub IrisCodeArray); 8 | 9 | #[pymethods] 10 | impl PyIrisCodeArray { 11 | #[new] 12 | pub fn new(input: String) -> Self { 13 | Self::from_base64(input) 14 | } 15 | 16 | pub fn to_base64(&self) -> String { 17 | self.0.to_base64().unwrap() 18 | } 19 | 20 | #[staticmethod] 21 | pub fn from_base64(input: String) -> Self { 22 | Self(IrisCodeArray::from_base64(&input).unwrap()) 23 | } 24 | 25 | #[staticmethod] 26 | pub fn zeros() -> Self { 27 | Self(IrisCodeArray::ZERO) 28 | } 29 | 30 | #[staticmethod] 31 | pub fn ones() -> Self { 32 | Self(IrisCodeArray::ONES) 33 | } 34 | 35 | #[staticmethod] 36 | pub fn uniform_random() -> Self { 37 | let mut rng = ThreadRng::default(); 38 | Self(IrisCodeArray::random_rng(&mut rng)) 39 | } 40 | } 41 | 42 | impl From for PyIrisCodeArray { 43 | fn from(value: IrisCodeArray) -> Self { 44 | Self(value) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /iris-mpc-upgrade/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iris-mpc-upgrade" 3 | version = "0.1.0" 4 | publish = false 5 | 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | ark-bls12-381 = "0.5.0" 12 | ark-ff = "0.5.0" 13 | ark-ec = "0.5.0" 14 | ark-serialize = "0.5.0" 15 | axum.workspace = true 16 | iris-mpc-common = { path = "../iris-mpc-common" } 17 | iris-mpc-store = { path = "../iris-mpc-store" } 18 | clap = { workspace = true, features = ["env"] } 19 | eyre.workspace = true 20 | bytemuck.workspace = true 21 | serde.workspace = true 22 | serde-big-array = "0.5" 23 | tracing.workspace = true 24 | itertools.workspace = true 25 | rand.workspace = true 26 | rand_chacha = "0.3" 27 | tokio.workspace = true 28 | tracing-subscriber.workspace = true 29 | 30 | tonic = { version = "0.12.3", features = [ 31 | "tls", 32 | "tls-native-roots", 33 | "transport", 34 | ] } 35 | prost = "0.13.3" 36 | sha2.workspace = true 37 | thiserror.workspace = true 38 | blake3 = "1.8.2" 39 | 40 | 41 | [dev-dependencies] 42 | criterion = "0.5" 43 | rayon = "1.10.0" 44 | 45 | 46 | [build-dependencies] 47 | tonic-build = "0.12.3" 48 | 49 | [[bench]] 50 | name = "rerandomize" 51 | harness = false 52 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-common/README.md: -------------------------------------------------------------------------------- 1 | # Key Manager CLI 2 | 3 | The Key Manager CLI is a command line interface to rotate public and private keys used to encode shares. 4 | The initial private key is generated using `smpc-setup`, and it is empty. 5 | 6 | Key manager must be run from each of the participant accounts at least once before initiating the protocol. 7 | 8 | Keys can be rotated at any time using the `rotate` command. 9 | 10 | ## Usage 11 | 12 | ```bash 13 | >>> key-manager --node-id 2 --env prod rotate --public-key-bucket-name wf-env-stage-public-keys 14 | ``` 15 | 16 | This will: 17 | 18 | 1. Update the public key in the bucket `wf-env-stage-public-keys` for node 2. 19 | 2. Generate a new private key and store aws secrets manager under the secret name: `prod/iris-mpc/ecdh-private-key-2` 20 | 21 | This key will be immediately valid, though the previous key will retain a validity of 24 hours (dictated by the cloudfront caching behavior, 22 | and by application logic that checks against AWSCURRENT and AWSPREVIOUS version of the secret). 23 | 24 | # Migrator 25 | 26 | Minimal wrapper on top of the `sqlx` to migrate the database in dev mode. Was implemented because of issues with running `sqlx migrate run` 27 | into a custom schema. 28 | -------------------------------------------------------------------------------- /Dockerfile.debug: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 public.ecr.aws/ubuntu/ubuntu:22.04 2 | ENV DEBIAN_FRONTEND=noninteractive 3 | 4 | WORKDIR /app 5 | 6 | RUN apt-get update && apt-get install -y \ 7 | curl \ 8 | build-essential \ 9 | libssl-dev \ 10 | texinfo \ 11 | libcap2-bin \ 12 | pkg-config \ 13 | git \ 14 | devscripts \ 15 | debhelper \ 16 | ca-certificates \ 17 | protobuf-compiler \ 18 | wget 19 | 20 | RUN curl https://sh.rustup.rs -sSf | sh -s -- -y 21 | ENV PATH "/root/.cargo/bin:${PATH}" 22 | ENV RUSTUP_HOME "/root/.rustup" 23 | ENV CARGO_HOME "/root/.cargo" 24 | RUN rustup toolchain install 1.89.0 25 | RUN rustup default 1.89.0 26 | RUN rustup component add cargo 27 | RUN cargo install cargo-build-deps && cargo install cargo-edit --version 0.13.6 --locked 28 | 29 | COPY . . 30 | 31 | RUN apt-get update && apt-get install -y pkg-config wget libssl-dev ca-certificates \ 32 | && rm -rf /var/lib/apt/lists/* 33 | 34 | RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb \ 35 | && dpkg -i cuda-keyring_1.1-1_all.deb \ 36 | && apt-get update \ 37 | && apt-get install -y cuda-toolkit-12-2 libnccl2=2.22.3-1+cuda12.2 libnccl-dev=2.22.3-1+cuda12.2 38 | -------------------------------------------------------------------------------- /iris-mpc-gpu/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iris-mpc-gpu" 3 | version = "0.1.0" 4 | publish = false 5 | 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | cudarc.workspace = true 12 | eyre.workspace = true 13 | tracing.workspace = true 14 | bytemuck.workspace = true 15 | futures.workspace = true 16 | rayon.workspace = true 17 | itertools.workspace = true 18 | hex.workspace = true 19 | tokio.workspace = true 20 | ring = "0.17.8" 21 | axum.workspace = true 22 | num-traits.workspace = true 23 | rand.workspace = true 24 | static_assertions.workspace = true 25 | iris-mpc-common = { path = "../iris-mpc-common" } 26 | iris-mpc-cpu = { path = "../iris-mpc-cpu" } 27 | metrics = "0.22.1" 28 | memmap2.workspace = true 29 | ampc-anon-stats.workspace = true 30 | ampc-server-utils.workspace = true 31 | 32 | [dev-dependencies] 33 | criterion = "0.5" 34 | ndarray = "0.16.0" 35 | float_eq = "1" 36 | tracing-subscriber.workspace = true 37 | uuid.workspace = true 38 | 39 | [features] 40 | default = [] 41 | gpu_dependent = [] 42 | 43 | [[bench]] 44 | name = "preprocessing" 45 | harness = false 46 | 47 | [[bench]] 48 | name = "matmul" 49 | harness = false 50 | 51 | [[bench]] 52 | name = "transpose" 53 | harness = false 54 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/hawkers/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the implementation of the different types of stores that can be used in the HNSW protocol. 2 | //! The stores are used to store the vectors or their references that are indexed by the HNSW protocol. 3 | //! The stores also contain the logic to perform the necessary low-level operations on the stored vectors such as 4 | //! - distance computation, 5 | //! - matching, 6 | //! - distance comparison, 7 | //! - insertion into the store, 8 | //! - preprocessing of queries (vectors) to the HNSW protocol. 9 | //! 10 | //! Each store implements the `Store` trait which defines the common interface for all the stores. 11 | //! The `Store` trait is defined in `hnsw::vector_store.rs`. 12 | 13 | use crate::{execution::hawk_main, hawkers::aby3::aby3_store::DistanceFn}; 14 | 15 | /// Store with vectors in secret shared form. 16 | /// The underlying operations are secure multi-party computation (MPC) operations. 17 | pub mod aby3; 18 | 19 | /// Store with vectors in plaintext form. 20 | pub mod plaintext_store; 21 | 22 | /// Data structure for shared in-memory irises 23 | pub mod shared_irises; 24 | 25 | pub mod build_plaintext; 26 | 27 | pub mod ideal_knn_engines; 28 | 29 | const TEST_DISTANCE_FN: DistanceFn = hawk_main::DISTANCE_FN; 30 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-cpu/local_hnsw.rs: -------------------------------------------------------------------------------- 1 | use aes_prng::AesRng; 2 | use clap::Parser; 3 | use iris_mpc_cpu::hawkers::aby3::test_utils::shared_random_setup_with_grpc; 4 | use rand::SeedableRng; 5 | use std::{error::Error, fs::File}; 6 | use tracing_forest::{tag::NoTag, ForestLayer, PrettyPrinter}; 7 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer}; 8 | 9 | #[derive(Parser)] 10 | struct Args { 11 | #[clap(short = 'n', default_value = "1000")] 12 | database_size: usize, 13 | } 14 | 15 | #[tokio::main] 16 | async fn main() -> Result<(), Box> { 17 | let args = Args::parse(); 18 | let database_size = args.database_size; 19 | 20 | let file = File::create("searcher_network_tree.txt")?; 21 | let file_processor = PrettyPrinter::new().writer(std::sync::Mutex::new(file)); 22 | 23 | tracing_subscriber::registry() 24 | .with( 25 | ForestLayer::new(file_processor, NoTag {}) 26 | .with_filter(EnvFilter::new("searcher::network")), 27 | ) 28 | .init(); 29 | 30 | println!("Starting Local HNSW with {} vectors", database_size); 31 | let mut rng = AesRng::seed_from_u64(0_u64); 32 | 33 | shared_random_setup_with_grpc(&mut rng, database_size).await?; 34 | 35 | Ok(()) 36 | } 37 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-upgrade/ssh_chain.sh: -------------------------------------------------------------------------------- 1 | CERT_PATH="./nginx/cert" 2 | 3 | rm -rf $CERT_PATH/*.key 4 | rm -rf $CERT_PATH:/*.pem 5 | rm -rf $CERT_PATH/*.srl 6 | 7 | 8 | # 1. Generate CA's private key and self-signed certificate 9 | openssl req -x509 -newkey rsa:4096 -days 365 -nodes -keyout $CERT_PATH/ca-key.pem -out $CERT_PATH/ca-cert.pem -subj "/C=DE/ST=Berlin/L=Berlin/O=TFH/OU=Privacy/CN=*.2.stage.smpcv2.worldcoin.dev/emailAddress=carlo.mazzaferro@toolsforhumanity.com" 10 | 11 | echo "CA's self-signed certificate" 12 | openssl x509 -in $CERT_PATH/ca-cert.pem -noout -text 13 | 14 | # 2. Generate web server's private key and certificate signing request (CSR) 15 | openssl req -newkey rsa:4096 -nodes -keyout $CERT_PATH/server-key.pem -out $CERT_PATH/server-req.pem -subj "/C=DE/ST=Berlin/L=Berlin/O=Worldcoin/OU=Computer/CN=*.2.stage.smpcv2.worldcoin.dev/emailAddress=carlo.mazzaferro@toolsforhumanity.com" 16 | 17 | # 3. Use CA's private key to sign web server's CSR and get back the signed certificate 18 | openssl x509 -req -in $CERT_PATH/server-req.pem -days 60 -CA $CERT_PATH/ca-cert.pem -CAkey $CERT_PATH/ca-key.pem -CAcreateserial -out $CERT_PATH/server-cert.pem -extfile $CERT_PATH/server-ext.cnf 19 | 20 | echo "Server's signed certificate" 21 | openssl x509 -in $CERT_PATH/server-cert.pem -noout -text 22 | -------------------------------------------------------------------------------- /iris-mpc-common/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::needless_range_loop)] 2 | pub mod config; 3 | pub mod error; 4 | pub mod galois_engine; 5 | pub mod helpers; 6 | pub mod iris_db; 7 | pub mod job; 8 | pub mod postgres; 9 | pub mod shamir; 10 | #[cfg(feature = "helpers")] 11 | pub mod test; 12 | pub mod tracing; 13 | pub mod vector_id; 14 | 15 | pub const IRIS_CODE_LENGTH: usize = 12_800; 16 | pub const MASK_CODE_LENGTH: usize = 6_400; 17 | pub const ROTATIONS: usize = 31; 18 | 19 | pub const PRE_PROC_ROW_PADDING: usize = 120; 20 | pub const IRIS_CODE_ROWS: usize = 16; 21 | // 16 = 12800 / 800 = (IRIS_CODE_LENGTH) / (CODE_COLS * 4) 22 | pub const PRE_PROC_IRIS_CODE_LENGTH: usize = 23 | IRIS_CODE_LENGTH + (IRIS_CODE_ROWS * PRE_PROC_ROW_PADDING); 24 | pub const PRE_PROC_MASK_CODE_LENGTH: usize = MASK_CODE_LENGTH + (8 * PRE_PROC_ROW_PADDING); 25 | 26 | /// Iris code database type; .0 = iris code, .1 = mask 27 | pub type IrisCodeDb = (Vec, Vec); 28 | /// Borrowed version of iris database; .0 = iris code, .1 = mask 29 | pub type IrisCodeDbSlice<'a> = (&'a [u16], &'a [u16]); 30 | 31 | pub use ampc_secret_sharing::galois; 32 | pub use ampc_secret_sharing::id; 33 | pub use vector_id::SerialId as IrisSerialId; 34 | pub use vector_id::VectorId as IrisVectorId; 35 | pub use vector_id::VersionId as IrisVersionId; 36 | -------------------------------------------------------------------------------- /scripts/run-server-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # Arg :: Node ID :: MPC node ordinal identifier. 5 | NODE_ID="$1" 6 | if [ -z "$NODE_ID" ]; then 7 | echo "Usage: run-server-docker.sh " 8 | exit 1 9 | fi 10 | 11 | # Arg :: Binary :: Binary to run [standard | genesis]. 12 | BINARY="$2" 13 | if [ -z "$BINARY" ]; then 14 | echo "Usage: run-server-docker.sh " 15 | exit 1 16 | fi 17 | 18 | # needs to run twice to create the keys with both AWSCURRENT and AWSPREVIOUS states 19 | if [ "$BINARY" != "genesis" ]; then 20 | echo "Running key manager" 21 | /bin/key-manager \ 22 | --region "$AWS_REGION" \ 23 | --endpoint-url "$AWS_ENDPOINT_URL" \ 24 | --node-id "$NODE_ID" \ 25 | --env dev rotate \ 26 | --public-key-bucket-name wf-dev-public-keys 27 | /bin/key-manager \ 28 | --region "$AWS_REGION" \ 29 | --endpoint-url "$AWS_ENDPOINT_URL" \ 30 | --node-id "$NODE_ID" \ 31 | --env dev rotate \ 32 | --public-key-bucket-name wf-dev-public-keys 33 | fi 34 | 35 | # Set the stack size to 100MB to receive large messages. 36 | export RUST_MIN_STACK=104857600 37 | 38 | if [ "$BINARY" == "genesis" ]; then 39 | /bin/iris-mpc-hawk-genesis --max-height 100 --perform-snapshot=false 40 | else 41 | /bin/iris-mpc-hawk 42 | fi 43 | -------------------------------------------------------------------------------- /migrations/20250313141530_modifications_table_v2.up.sql: -------------------------------------------------------------------------------- 1 | -- drop the old modifications table if it exists 2 | DROP TABLE IF EXISTS modifications; 3 | 4 | -- create the new modifications table 5 | CREATE TABLE IF NOT EXISTS modifications ( 6 | id BIGINT PRIMARY KEY, 7 | serial_id BIGINT NOT NULL, 8 | request_type TEXT NOT NULL, 9 | s3_url TEXT, 10 | status TEXT NOT NULL, 11 | persisted BOOLEAN NOT NULL DEFAULT FALSE 12 | ); 13 | 14 | -- Create a function to assign the next available modification ID 15 | CREATE OR REPLACE FUNCTION assign_modification_id() 16 | RETURNS TRIGGER AS $$ 17 | DECLARE 18 | next_id BIGINT; 19 | BEGIN 20 | -- Lock the table to prevent race conditions 21 | LOCK TABLE modifications IN EXCLUSIVE MODE; 22 | 23 | -- Find the next available ID (max + 1 or 1 if table is empty) 24 | SELECT COALESCE(MAX(id) + 1, 1) INTO next_id FROM modifications; 25 | 26 | -- Assign the ID to the new row 27 | NEW.id := next_id; 28 | 29 | RETURN NEW; 30 | END; 31 | $$ LANGUAGE plpgsql; 32 | 33 | -- Create a trigger to automatically assign modification IDs before insert if not specified explicitly 34 | CREATE TRIGGER before_insert_modifications 35 | BEFORE INSERT ON modifications 36 | FOR EACH ROW 37 | WHEN (NEW.id IS NULL) 38 | EXECUTE FUNCTION assign_modification_id(); 39 | -------------------------------------------------------------------------------- /docker-compose.dev.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | dev_db: 3 | image: public.ecr.aws/docker/library/postgres:16.10-alpine3.22 4 | ports: 5 | - "5432:5432" 6 | user: "postgres" 7 | environment: 8 | POSTGRES_USER: "postgres" 9 | POSTGRES_PASSWORD: "postgres" 10 | volumes: 11 | - ./scripts/tools/init-db-pgres.sql:/docker-entrypoint-initdb.d/init-db-pgres.sql 12 | healthcheck: 13 | test: ["CMD-SHELL", "pg_isready", "-d", "db_prod"] 14 | interval: 10s 15 | timeout: 30s 16 | retries: 5 17 | start_period: 40s 18 | 19 | localstack: 20 | image: public.ecr.aws/localstack/localstack:4.9 21 | depends_on: 22 | dev_db: 23 | condition: service_healthy 24 | ports: 25 | - "127.0.0.1:4566:4566" 26 | - "127.0.0.1:4571:4571" 27 | environment: 28 | - SERVICES=kms,s3,secretsmanager,sqs,sns 29 | - DEFAULT_REGION=us-east-1 30 | - AWS_ACCESS_KEY_ID=test 31 | - AWS_SECRET_ACCESS_KEY=test 32 | healthcheck: 33 | test: >- 34 | sleep 10 && 35 | awslocal sqs list-queues && 36 | awslocal s3 ls && 37 | awslocal kms list-keys 38 | interval: 5s 39 | timeout: 25s 40 | retries: 10 41 | 42 | volumes: 43 | - ./scripts/tools/init-localstack.sh:/etc/localstack/init/ready.d/init-localstack.sh 44 | -------------------------------------------------------------------------------- /scripts/setup-pre-commit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for pre-commit hooks 3 | 4 | set -e 5 | 6 | echo "🔧 Setting up pre-commit hooks..." 7 | 8 | # Check if pre-commit is installed 9 | if ! command -v pre-commit &> /dev/null; then 10 | echo "📦 Installing pre-commit..." 11 | 12 | # Try to install with pip/pip3 13 | if command -v pip3 &> /dev/null; then 14 | pip3 install pre-commit 15 | elif command -v pip &> /dev/null; then 16 | pip install pre-commit 17 | elif command -v brew &> /dev/null; then 18 | echo "Installing via Homebrew..." 19 | brew install pre-commit 20 | else 21 | echo "❌ Error: Could not find pip, pip3, or brew to install pre-commit" 22 | echo "Please install pre-commit manually: https://pre-commit.com/#install" 23 | exit 1 24 | fi 25 | fi 26 | 27 | echo "✅ pre-commit is installed" 28 | 29 | # Install the git hooks 30 | echo "📌 Installing git hooks..." 31 | pre-commit install 32 | 33 | # Install commit-msg hook 34 | pre-commit install --hook-type commit-msg 35 | 36 | echo "🎉 Pre-commit hooks installed successfully!" 37 | echo "" 38 | echo "To run hooks manually on all files: pre-commit run --all-files" 39 | echo "To update hooks to latest versions: pre-commit autoupdate" 40 | echo "" 41 | echo "Hooks will now run automatically on every commit." 42 | -------------------------------------------------------------------------------- /Dockerfile.shares-re-randomization: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/ubuntu/ubuntu:22.04 AS build-image 2 | 3 | WORKDIR /src 4 | RUN apt-get update && apt-get install -y \ 5 | curl \ 6 | build-essential \ 7 | libssl-dev \ 8 | texinfo \ 9 | libcap2-bin \ 10 | pkg-config \ 11 | git \ 12 | devscripts \ 13 | debhelper \ 14 | ca-certificates \ 15 | protobuf-compiler \ 16 | wget 17 | 18 | RUN update-ca-certificates 19 | 20 | RUN curl https://sh.rustup.rs -sSf | sh -s -- -y 21 | ENV PATH "/root/.cargo/bin:${PATH}" 22 | ENV RUSTUP_HOME "/root/.rustup" 23 | ENV CARGO_HOME "/root/.cargo" 24 | RUN rustup toolchain install 1.89.0 25 | RUN rustup default 1.89.0 26 | RUN rustup component add cargo 27 | RUN cargo install cargo-build-deps \ 28 | && cargo install cargo-edit --version 0.13.6 --locked 29 | 30 | FROM build-image AS build-app 31 | WORKDIR /src/iris-mpc 32 | COPY . . 33 | RUN cargo build -p iris-mpc-bins --release --bin rerandomize-db --bin seed-v2-dbs 34 | 35 | FROM ubuntu:22.04 36 | ENV DEBIAN_FRONTEND=noninteractive 37 | 38 | COPY --from=build-app /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ 39 | COPY --from=build-app /src/iris-mpc/target/release/rerandomize-db /bin/rerandomize-db 40 | COPY --from=build-app /src/iris-mpc/target/release/seed-v2-dbs /bin/seed-v2-dbs 41 | 42 | USER 65534 43 | ENTRYPOINT ["/bin/rerandomize-db"] 44 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/client/typeset/data/response.rs: -------------------------------------------------------------------------------- 1 | use iris_mpc_common::helpers::smpc_response; 2 | 3 | /// Enumeration over system responses dequeued from system egress queue. 4 | #[derive(Clone, Debug)] 5 | pub enum ResponseBody { 6 | IdentityDeletion(smpc_response::IdentityDeletionResult), 7 | Reauthorization(smpc_response::ReAuthResult), 8 | ResetCheck(smpc_response::ResetCheckResult), 9 | ResetUpdate(smpc_response::ResetUpdateAckResult), 10 | Uniqueness(smpc_response::UniquenessResult), 11 | } 12 | 13 | impl ResponseBody { 14 | pub fn node_id(&self) -> usize { 15 | match self { 16 | Self::IdentityDeletion(result) => result.node_id, 17 | Self::Reauthorization(result) => result.node_id, 18 | Self::ResetCheck(result) => result.node_id, 19 | Self::ResetUpdate(result) => result.node_id, 20 | Self::Uniqueness(result) => result.node_id, 21 | } 22 | } 23 | 24 | pub fn iris_serial_id(&self) -> Option { 25 | match self { 26 | Self::IdentityDeletion(result) => Some(result.serial_id), 27 | Self::Reauthorization(result) => Some(result.serial_id), 28 | Self::ResetCheck(_) => None, 29 | Self::ResetUpdate(result) => Some(result.serial_id), 30 | Self::Uniqueness(result) => result.serial_id, 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /iris-mpc-upgrade-hawk/tests/utils/resources.rs: -------------------------------------------------------------------------------- 1 | use super::{TestRunContextInfo, TestRunEnvironment}; 2 | use iris_mpc_common::config::Config; 3 | use std::io::Error; 4 | use std::path::{Path, PathBuf}; 5 | 6 | impl TestRunEnvironment { 7 | /// Returns subdirectory name for current test run environment. 8 | pub fn subdirectory(&self) -> &str { 9 | match self { 10 | TestRunEnvironment::Docker => "docker", 11 | TestRunEnvironment::Local => "local", 12 | } 13 | } 14 | } 15 | 16 | /// Returns path to resources root directory. 17 | pub fn get_resources_root() -> String { 18 | let crate_root = env!("CARGO_MANIFEST_DIR"); 19 | 20 | format!("{crate_root}/tests/resources") 21 | } 22 | 23 | /// Returns the path in the source tree of a resource asset. 24 | pub fn get_resource_path(location: &str) -> PathBuf { 25 | Path::new(&get_resources_root()).join(location) 26 | } 27 | 28 | /// Returns node configuration deserialized from a toml file. 29 | pub fn read_node_config(ctx: &TestRunContextInfo, config_fname: String) -> Result { 30 | let path_to_cfg = format!( 31 | "{}/node-config/{}/{}.toml", 32 | get_resources_root(), 33 | ctx.env().subdirectory(), 34 | config_fname 35 | ); 36 | let cfg = std::fs::read_to_string(path_to_cfg)?; 37 | 38 | Ok(toml::from_str(&cfg).unwrap()) 39 | } 40 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-upgrade/docker-compose.rand.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | new-db-1: 3 | image: postgres:16 4 | ports: 5 | - "6200:5432" 6 | environment: 7 | POSTGRES_USER: "postgres" 8 | POSTGRES_PASSWORD: "postgres" 9 | new-db-2: 10 | image: postgres:16 11 | ports: 12 | - "6201:5432" 13 | environment: 14 | POSTGRES_USER: "postgres" 15 | POSTGRES_PASSWORD: "postgres" 16 | new-db-3: 17 | image: postgres:16 18 | ports: 19 | - "6202:5432" 20 | environment: 21 | POSTGRES_USER: "postgres" 22 | POSTGRES_PASSWORD: "postgres" 23 | new-db-4: 24 | image: postgres:16 25 | ports: 26 | - "6203:5432" 27 | environment: 28 | POSTGRES_USER: "postgres" 29 | POSTGRES_PASSWORD: "postgres" 30 | new-db-5: 31 | image: postgres:16 32 | ports: 33 | - "6204:5432" 34 | environment: 35 | POSTGRES_USER: "postgres" 36 | POSTGRES_PASSWORD: "postgres" 37 | new-db-6: 38 | image: postgres:16 39 | ports: 40 | - "6205:5432" 41 | environment: 42 | POSTGRES_USER: "postgres" 43 | POSTGRES_PASSWORD: "postgres" 44 | localstack: 45 | image: localstack/localstack 46 | ports: 47 | - "127.0.0.1:4566:4566" 48 | - "127.0.0.1:4571:4571" 49 | environment: 50 | - SERVICES=secretsmanager,s3 51 | - DEFAULT_REGION=us-east-1 52 | -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | # Scripts 2 | 3 | This directory contains runtime scripts for local development and testing. 4 | 5 | `tools` subdirectory has been introduced to isolate the fundamental scripts used for development and testing from some tooling / debug scripts. 6 | 7 | ## Cleanup of environment 8 | 9 | Between running the server in different modes, it is recommended to clean up the environment. This can be done by running: 10 | 11 | ```bash 12 | docker compose -f docker-compose.dev.yaml down 13 | docker compose -f docker-compose.dev.yaml up -d 14 | ``` 15 | 16 | ## Running server in different deployment modes 17 | 18 | ### Standard 19 | 20 | This is used currently for local only. It is the default mode of operation, in which the server reads/writes data to the CPU database (both irises and graph data) 21 | 22 | ### Genesis Local Testing 23 | 24 | First, you need to generate some test data for the Genesis mode. This is done by running the following command: 25 | 26 | ```bash 27 | cargo run --release -p iris-mpc-bins --bin generate-benchmark-data 28 | ``` 29 | 30 | ```bash 31 | docker compose -f docker-compose.test.genesis.yaml up init_db 32 | ``` 33 | 34 | In another terminal, run: 35 | 36 | ```shell 37 | SMPC__HNSW_SCHEMA_NAME_SUFFIX=_hnsw GENESIS_MAX_HEIGHT=100 ./scripts/run-server.sh 0 genesis 38 | SMPC__HNSW_SCHEMA_NAME_SUFFIX=_hnsw GENESIS_MAX_HEIGHT=100 ./scripts/run-server.sh 1 genesis 39 | SMPC__HNSW_SCHEMA_NAME_SUFFIX=_hnsw GENESIS_MAX_HEIGHT=100 ./scripts/run-server.sh 2 genesis 40 | ``` 41 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc-upgrade/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | worker_processes auto; 2 | 3 | error_log /dev/stderr notice; 4 | pid /tmp/nginx.pid; 5 | 6 | events { 7 | worker_connections 1024; 8 | } 9 | 10 | http { 11 | log_format main '$remote_addr - $remote_user [$time_local]' 12 | 13 | access_log /dev/stdout basic; 14 | 15 | server { 16 | listen 6443 ssl; 17 | http2 on; 18 | 19 | ssl_certificate /etc/nginx/cert/server-cert.pem; 20 | ssl_certificate_key /etc/nginx/cert/server-key.pem; 21 | 22 | ssl_protocols TLSv1.3; 23 | ssl_ciphers HIGH:!aNULL:!MD5; 24 | ssl_prefer_server_ciphers on; 25 | 26 | # Enable session resumption to improve performance 27 | ssl_session_cache shared:SSL:10m; 28 | ssl_session_timeout 1h; 29 | 30 | client_max_body_size 100M; 31 | 32 | # gRPC reverse proxy setup 33 | location / { 34 | grpc_pass grpc://reshare-server-2:7000; # Forward to the gRPC server 35 | error_page 502 = /error502grpc; # Custom error page for GRPC backend issues 36 | } 37 | 38 | # Custom error page 39 | location = /error502grpc { 40 | internal; 41 | default_type text/plain; 42 | return 502 "Bad Gateway: gRPC server unreachable."; 43 | } 44 | 45 | location = /ping { 46 | grpc_pass grpc://127.0.0.1:8000; # Forward to the gRPC server 47 | error_page 502 = /error502grpc; # Custom error page for GRPC backend issues 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /deploy/stage/common-values-ampc-hnsw-anon-stats-server.yaml: -------------------------------------------------------------------------------- 1 | image: "ghcr.io/worldcoin/anon-stats-server:v0.24.1@sha256:9d119acf7b3336d9244444dab51835611c500347d0c8d514eb72b5b952db4cd0" 2 | 3 | environment: stage 4 | replicaCount: 1 5 | 6 | strategy: 7 | type: Recreate 8 | 9 | datadog: 10 | enabled: true 11 | 12 | hostNetwork: false 13 | 14 | ports: 15 | - containerPort: 3000 16 | name: health 17 | protocol: TCP 18 | - containerPort: 4000 19 | name: tcp-4000 20 | protocol: TCP 21 | - containerPort: 4001 22 | name: tcp-4001 23 | protocol: TCP 24 | - containerPort: 4002 25 | name: tcp-4002 26 | protocol: TCP 27 | - containerPort: 4100 28 | name: tcp-4100 29 | protocol: TCP 30 | - containerPort: 4101 31 | name: tcp-4101 32 | protocol: TCP 33 | - containerPort: 4102 34 | name: tcp-4102 35 | protocol: TCP 36 | 37 | startupProbe: 38 | httpGet: 39 | path: /health 40 | port: health 41 | 42 | livenessProbe: 43 | httpGet: 44 | path: /health 45 | port: health 46 | 47 | readinessProbe: 48 | periodSeconds: 20 49 | failureThreshold: 4 50 | httpGet: 51 | path: /health 52 | port: health 53 | 54 | resources: 55 | limits: 56 | cpu: "2" 57 | memory: 10Gi 58 | requests: 59 | cpu: "2" 60 | memory: 10Gi 61 | 62 | imagePullSecrets: 63 | - name: github-secret 64 | 65 | podSecurityContext: 66 | runAsUser: 65534 67 | runAsGroup: 65534 68 | 69 | mountSSLCerts: 70 | enabled: true 71 | mountPath: /etc/ssl/private 72 | -------------------------------------------------------------------------------- /scripts/purge_stage/purge_stage_mongo_collections.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -z "$1" ]; then 4 | printf "\nError: Cleanup type parameter is required\n" 5 | printf "Usage: %s \n" "$0" 6 | printf "Available cleanup types: gpu, cpu\n" 7 | exit 1 8 | fi 9 | 10 | CLEANUP_TYPE=$1 11 | 12 | printf "\n==============================" 13 | printf "\nActivate teleport tunnel to mongo_db iris and delete collections (%s)\n" "$CLEANUP_TYPE" 14 | 15 | # Login to teleport 16 | tsh login --proxy=teleport.worldcoin.dev:443 --auth=okta 17 | tsh db login --db-user developer-read-write mongo-atlas-iris-stage --db-name iris 18 | tsh proxy db --tunnel mongo-atlas-iris-stage -p 60003 --db-user arn:aws:iam::510867353226:role/developer-read-write & 19 | 20 | # Wait for proxy connection to establish 21 | sleep 5 22 | 23 | # MongoDB connection string 24 | MONGO_URI="mongodb://127.0.0.1:60003/iris?serverSelectionTimeoutMS=1000000" 25 | 26 | # Select the appropriate cleanup script based on the cleanup type 27 | case "$CLEANUP_TYPE" in 28 | "gpu") 29 | printf "\nRunning GPU collections cleanup script\n" 30 | ./delete_mongodb_gpu_collections.sh "$MONGO_URI" 31 | ;; 32 | "cpu") 33 | printf "\nRunning CPU collections cleanup script\n" 34 | ./delete_mongodb_cpu_collections.sh "$MONGO_URI" 35 | ;; 36 | *) 37 | printf "\nUnknown cleanup type: %s\n" "$CLEANUP_TYPE" 38 | printf "Available cleanup types: gpu, cpu\n" 39 | exit 1 40 | ;; 41 | esac 42 | 43 | printf "\nCleanup completed successfully\n" 44 | -------------------------------------------------------------------------------- /iris-mpc-cpu/examples/hnsw-ex.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use aes_prng::AesRng; 4 | use eyre::Result; 5 | use iris_mpc_common::iris_db::iris::IrisCode; 6 | use iris_mpc_cpu::{ 7 | hawkers::plaintext_store::PlaintextStore, 8 | hnsw::{GraphMem, HnswSearcher, SortedNeighborhood}, 9 | }; 10 | use rand::SeedableRng; 11 | 12 | const DATABASE_SIZE: usize = 1_000; 13 | 14 | fn main() -> Result<()> { 15 | let rt = tokio::runtime::Builder::new_multi_thread() 16 | .enable_all() 17 | .build() 18 | .unwrap(); 19 | 20 | let (_vector, _graph) = rt.block_on(async move { 21 | let mut rng = AesRng::seed_from_u64(0_u64); 22 | let mut vector = PlaintextStore::new(); 23 | let mut graph = GraphMem::new(); 24 | let searcher = HnswSearcher::new_with_test_parameters(); 25 | 26 | for idx in 0..DATABASE_SIZE { 27 | let raw_query = IrisCode::random_rng(&mut rng); 28 | let query = Arc::new(raw_query); 29 | let insertion_layer = searcher.gen_layer_rng(&mut rng)?; 30 | searcher 31 | .insert::<_, SortedNeighborhood<_>>( 32 | &mut vector, 33 | &mut graph, 34 | &query, 35 | insertion_layer, 36 | ) 37 | .await?; 38 | if idx % 100 == 99 { 39 | println!("{}", idx + 1); 40 | } 41 | } 42 | Ok::<_, eyre::Report>((vector, graph)) 43 | })?; 44 | 45 | Ok(()) 46 | } 47 | -------------------------------------------------------------------------------- /adr/001-storage-and-processing-of-iris-shares.md: -------------------------------------------------------------------------------- 1 | # ADR-001: Storage and processing of iris shares 2 | 3 | ## Context 4 | - We have iris code shares that, in their raw form, are 12800 bytes (u8) each and stored in a PostgreSQL database. 5 | - For secret sharing purposes, these iris codes need to be represented internally as u16. 6 | - The cuBLAS library (used for efficient GPU-accelerated comparisons) requires the data to be in i8 format. 7 | - Performing repeated on-the-fly conversions from u16 to i8 can degrade performance significantly due to large data volumes. 8 | 9 | ## Decision 10 | Store and load iris code shares in a pre-processed i8 format (split into two limbs from each u16), so that runtime conversions are minimized or eliminated. 11 | 12 | ## Rationale 13 | - **Performance**: The conversion from u16 to i8 (subtracting 128 for cuBLAS alignment) can be expensive if done repeatedly for large datasets. 14 | - **cuBLAS Requirements**: The library operates natively on i8 data. Having the data already in the correct format avoids overhead. 15 | - **Simplicity in Usage**: Once data is in i8 format, usage in GPU kernels is straightforward. 16 | 17 | ## Consequences 18 | - **Pros**: 19 | - Streamlined GPU-based operations (no extra conversion step in the processing pipeline). 20 | - **Cons**: 21 | - Requires additional pre-processing upon ingestion. 22 | 23 | By pre-processing iris code shares into i8 and storing them in that format, we reduce runtime overhead and align with the cuBLAS requirements, resulting in more efficient iris-code comparisons. 24 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/constants.rs: -------------------------------------------------------------------------------- 1 | /// AWS: default base URL for downloading node encryption public keys. 2 | pub const AWS_PUBLIC_KEY_BASE_URL: &str = "http://localhost:4566/wf-dev-public-keys"; 3 | 4 | /// AWS: default S3 system request bucket. 5 | pub const AWS_S3_REQUEST_BUCKET_NAME: &str = "wf-smpcv2-dev-sns-requests"; 6 | 7 | /// AWS: default SNS system request ingress queue topic. 8 | pub const AWS_SNS_REQUEST_TOPIC_ARN: &str = 9 | "arn:aws:sns:us-east-1:000000000000:iris-mpc-input.fifo"; 10 | 11 | /// AWS: default SQS system response queue URL. 12 | pub const AWS_SQS_RESPONSE_QUEUE_URL: &str = "http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/iris-mpc-results-us-east-1.fifo"; 13 | 14 | /// AWS: default SQS long polling wait time in seconds. 15 | pub const AWS_SQS_LONG_POLL_WAIT_TIME: usize = 10; 16 | 17 | /// Test graph sizes. 18 | pub const GRAPH_SIZE_RANGE: [usize; 8] = [1, 10, 100, 1_000, 10_000, 100_000, 1_000_000, 2_000_000]; 19 | 20 | /// Node config kinds. 21 | pub const NODE_CONFIG_KIND: [&str; 2] = [NODE_CONFIG_KIND_MAIN, NODE_CONFIG_KIND_GENESIS]; 22 | pub const NODE_CONFIG_KIND_GENESIS: &str = "genesis"; 23 | pub const NODE_CONFIG_KIND_MAIN: &str = "main"; 24 | 25 | /// MPC parties. 26 | pub const N_PARTIES: usize = PARTY_INDICES.len(); 27 | pub const PARTY_INDICES: [usize; 3] = [PARTY_IDX_0, PARTY_IDX_1, PARTY_IDX_2]; 28 | pub const PARTY_IDX_0: usize = 0; 29 | pub const PARTY_IDX_1: usize = 1; 30 | pub const PARTY_IDX_2: usize = 2; 31 | 32 | /// Default application environment. 33 | pub const DEFAULT_ENV: &str = "dev"; 34 | -------------------------------------------------------------------------------- /iris-mpc/src/services/init/mod.rs: -------------------------------------------------------------------------------- 1 | use eyre::Result; 2 | use iris_mpc_common::{config::Config, helpers::kms_dh::derive_shared_secret}; 3 | 4 | pub async fn initialize_chacha_seeds(config: Config) -> Result<([u32; 8], [u32; 8])> { 5 | // Init RNGs 6 | let own_key_arn = config 7 | .kms_key_arns 8 | .0 9 | .get(config.party_id) 10 | .expect("Expected value not found in kms_key_arns"); 11 | let dh_pairs = match config.party_id { 12 | 0 => (1usize, 2usize), 13 | 1 => (2usize, 0usize), 14 | 2 => (0usize, 1usize), 15 | _ => unimplemented!(), 16 | }; 17 | 18 | let dh_pair_0: &str = config 19 | .kms_key_arns 20 | .0 21 | .get(dh_pairs.0) 22 | .expect("Expected value not found in kms_key_arns"); 23 | let dh_pair_1: &str = config 24 | .kms_key_arns 25 | .0 26 | .get(dh_pairs.1) 27 | .expect("Expected value not found in kms_key_arns"); 28 | 29 | // To be used only for e2e testing where we use localstack. There's a bug in 30 | // localstack's implementation of `derive_shared_secret`. See: https://github.com/localstack/localstack/pull/12071 31 | let chacha_seeds: ([u32; 8], [u32; 8]) = if config.fixed_shared_secrets { 32 | ([0u32; 8], [0u32; 8]) 33 | } else { 34 | ( 35 | bytemuck::cast(derive_shared_secret(own_key_arn, dh_pair_0).await?), 36 | bytemuck::cast(derive_shared_secret(own_key_arn, dh_pair_1).await?), 37 | ) 38 | }; 39 | 40 | Ok(chacha_seeds) 41 | } 42 | -------------------------------------------------------------------------------- /deploy/prod/common-values-anon-stats-server.yaml: -------------------------------------------------------------------------------- 1 | # to be later updated to same release at iris-mpc 2 | image: "ghcr.io/worldcoin/anon-stats-server:v0.24.1@sha256:9d119acf7b3336d9244444dab51835611c500347d0c8d514eb72b5b952db4cd0" 3 | 4 | environment: prod 5 | replicaCount: 1 6 | 7 | strategy: 8 | type: Recreate 9 | 10 | datadog: 11 | enabled: true 12 | 13 | hostNetwork: false 14 | 15 | ports: 16 | - containerPort: 3000 17 | name: health 18 | protocol: TCP 19 | - containerPort: 4000 20 | name: tcp-4000 21 | protocol: TCP 22 | - containerPort: 4001 23 | name: tcp-4001 24 | protocol: TCP 25 | - containerPort: 4002 26 | name: tcp-4002 27 | protocol: TCP 28 | - containerPort: 4100 29 | name: tcp-4100 30 | protocol: TCP 31 | - containerPort: 4101 32 | name: tcp-4101 33 | protocol: TCP 34 | - containerPort: 4102 35 | name: tcp-4102 36 | protocol: TCP 37 | 38 | startupProbe: 39 | httpGet: 40 | path: /health 41 | port: health 42 | 43 | livenessProbe: 44 | httpGet: 45 | path: /health 46 | port: health 47 | 48 | readinessProbe: 49 | periodSeconds: 20 50 | failureThreshold: 4 51 | httpGet: 52 | path: /health 53 | port: health 54 | 55 | resources: 56 | limits: 57 | cpu: "14" 58 | memory: 40Gi 59 | requests: 60 | cpu: "14" 61 | memory: 40Gi 62 | 63 | imagePullSecrets: 64 | - name: github-secret 65 | 66 | podSecurityContext: 67 | runAsUser: 65534 68 | runAsGroup: 65534 69 | 70 | mountSSLCerts: 71 | enabled: true 72 | mountPath: /etc/ssl/private 73 | -------------------------------------------------------------------------------- /iris-mpc-bins/bin/iris-mpc/server/iris_mpc_hawk.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::needless_range_loop)] 2 | 3 | use clap::Parser; 4 | use eyre::Result; 5 | use iris_mpc::server::server_main; 6 | use iris_mpc_common::config::{Config, Opt}; 7 | use iris_mpc_common::tracing::initialize_tracing; 8 | use std::process::exit; 9 | 10 | fn main() -> Result<()> { 11 | dotenvy::dotenv().ok(); 12 | 13 | println!("Init config"); 14 | let mut config: Config = Config::load_config("SMPC").unwrap(); 15 | config.overwrite_defaults_with_cli_args(Opt::parse()); 16 | 17 | // Build the Tokio runtime first so any telemetry exporters that spawn tasks have a runtime. 18 | let runtime = tokio::runtime::Builder::new_multi_thread() 19 | .worker_threads(config.tokio_threads) 20 | .enable_all() 21 | .build() 22 | .unwrap(); 23 | 24 | runtime.block_on(async { 25 | println!("Init tracing"); 26 | let _tracing_shutdown_handle = match initialize_tracing(config.service.clone()) { 27 | Ok(handle) => handle, 28 | Err(e) => { 29 | eprintln!("Failed to initialize tracing: {:?}", e); 30 | return Err(e); 31 | } 32 | }; 33 | 34 | match server_main(config).await { 35 | Ok(_) => { 36 | tracing::info!("Server exited normally"); 37 | } 38 | Err(e) => { 39 | tracing::error!("Server exited with error: {:?}", e); 40 | exit(1); 41 | } 42 | } 43 | Ok(()) 44 | }) 45 | } 46 | -------------------------------------------------------------------------------- /iris-mpc-gpu/benches/chacha.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, Criterion}; 2 | use cudarc::driver::CudaDevice; 3 | use iris_mpc_gpu::rng::chacha::ChaChaCudaRng; 4 | 5 | pub fn criterion_benchmark_chacha12_runner(c: &mut Criterion, buf_size: usize) { 6 | let mut group = c.benchmark_group(format!( 7 | "ChaCha12 (buf_size = {}MB)", 8 | buf_size * 4 / (1024 * 1024) 9 | )); 10 | 11 | group.throughput(criterion::Throughput::Bytes( 12 | (buf_size * std::mem::size_of::()) as u64, 13 | )); 14 | let mut chacha = ChaChaCudaRng::init(buf_size, CudaDevice::new(0).unwrap(), [0u32; 8]); 15 | group.bench_function("with copy to host", move |b| { 16 | b.iter(|| { 17 | chacha.fill_rng(); 18 | }) 19 | }); 20 | let mut chacha = ChaChaCudaRng::init(buf_size, CudaDevice::new(0).unwrap(), [0u32; 8]); 21 | let dev = CudaDevice::new(0).unwrap(); 22 | let stream = dev.fork_default_stream().unwrap(); 23 | group.bench_function("without copy to host", move |b| { 24 | b.iter(|| { 25 | chacha.fill_rng_no_host_copy(buf_size, &stream); 26 | }) 27 | }); 28 | group.finish(); 29 | } 30 | 31 | pub fn criterion_benchmark_chacha12(c: &mut Criterion) { 32 | for log_buf_size in 20..=30 { 33 | let buf_size = (1usize << log_buf_size) / 4; 34 | criterion_benchmark_chacha12_runner(c, buf_size); 35 | } 36 | } 37 | 38 | criterion_group!( 39 | name = rng_benches; 40 | config = Criterion::default(); 41 | targets = criterion_benchmark_chacha12 42 | ); 43 | criterion_main!(rng_benches); 44 | -------------------------------------------------------------------------------- /iris-mpc-common/tests/sha256.rs: -------------------------------------------------------------------------------- 1 | mod tests { 2 | use iris_mpc_common::helpers::sha256::sha256_as_hex_string; 3 | 4 | #[test] 5 | fn test_calculate_sha256() { 6 | // Arrange 7 | let data = "Hello, world!"; 8 | let expected_hash = "315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3"; 9 | 10 | // Act 11 | let calculated_hash = sha256_as_hex_string(data); 12 | 13 | // Assert 14 | assert_eq!( 15 | calculated_hash, expected_hash, 16 | "The calculated SHA-256 hash should match the expected hash" 17 | ); 18 | } 19 | 20 | #[test] 21 | fn test_calculate_sha256_empty_string() { 22 | // Arrange 23 | let data = ""; 24 | let expected_hash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; 25 | 26 | // Act 27 | let calculated_hash = sha256_as_hex_string(data); 28 | 29 | // Assert 30 | assert_eq!( 31 | calculated_hash, expected_hash, 32 | "The calculated SHA-256 hash for an empty string should match the expected hash" 33 | ); 34 | } 35 | 36 | #[test] 37 | fn test_calculate_sha256_different_data() { 38 | // Arrange 39 | let data_1 = "Data 1"; 40 | let data_2 = "Data 2"; 41 | let hash_1 = sha256_as_hex_string(data_1); 42 | let hash_2 = sha256_as_hex_string(data_2); 43 | 44 | // Act & Assert 45 | assert_ne!( 46 | hash_1, hash_2, 47 | "SHA-256 hashes of different data should not be equal" 48 | ); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /Dockerfile.reshare-protocol: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 public.ecr.aws/ubuntu/ubuntu:22.04 as build-image 2 | 3 | WORKDIR /src 4 | RUN apt-get update && apt-get install -y \ 5 | curl \ 6 | build-essential \ 7 | libssl-dev \ 8 | texinfo \ 9 | libcap2-bin \ 10 | pkg-config \ 11 | git \ 12 | devscripts \ 13 | debhelper \ 14 | ca-certificates \ 15 | protobuf-compiler \ 16 | wget 17 | 18 | RUN curl https://sh.rustup.rs -sSf | sh -s -- -y 19 | ENV PATH "/root/.cargo/bin:${PATH}" 20 | ENV RUSTUP_HOME "/root/.rustup" 21 | ENV CARGO_HOME "/root/.cargo" 22 | RUN rustup toolchain install 1.89.0 23 | RUN rustup default 1.89.0 24 | RUN rustup component add cargo 25 | RUN cargo install cargo-build-deps \ 26 | && cargo install cargo-edit --version 0.13.6 --locked 27 | 28 | FROM --platform=linux/amd64 build-image as build-app 29 | WORKDIR /src/gpu-iris-mpc 30 | COPY . . 31 | 32 | RUN cargo build -p iris-mpc-bins --release --target x86_64-unknown-linux-gnu --bin reshare-client --bin reshare-server 33 | 34 | FROM --platform=linux/amd64 public.ecr.aws/ubuntu/ubuntu:22.04 35 | ENV DEBIAN_FRONTEND=noninteractive 36 | 37 | RUN apt-get update && apt-get install -y ca-certificates awscli 38 | COPY certs /usr/local/share/ca-certificates/ 39 | RUN update-ca-certificates 40 | 41 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/reshare-client /bin/reshare-client 42 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/reshare-server /bin/reshare-server 43 | COPY scripts/reshare-client.sh /sbin/reshare-client.sh 44 | 45 | USER 65534 46 | ENTRYPOINT ["/sbin/reshare-server.sh"] 47 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/utils/serialization/types/iris_base64.rs: -------------------------------------------------------------------------------- 1 | //! Implements a data serialization format targeting the `IrisCode` type. 2 | //! 3 | //! This format is meant to be compatible with the base64 encoding used by 4 | //! the Open IRIS Python library. 5 | 6 | use eyre::Result; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | /// Iris code representation using base64 encoding compatible with Open IRIS. 10 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 11 | pub struct Base64IrisCode { 12 | pub iris_codes: String, 13 | pub mask_codes: String, 14 | } 15 | 16 | /* ------------------------------- I/O ------------------------------ */ 17 | 18 | pub fn read_iris_base64(reader: &mut R) -> Result { 19 | let data = serde_json::from_reader(reader)?; 20 | Ok(data) 21 | } 22 | 23 | pub fn write_iris_base64(writer: &mut W, data: &Base64IrisCode) -> Result<()> { 24 | serde_json::to_writer(writer, &data)?; 25 | Ok(()) 26 | } 27 | 28 | pub fn read_from_iris_ndjson( 29 | reader: R, 30 | ) -> impl Iterator> { 31 | let iter = serde_json::Deserializer::from_reader(reader) 32 | .into_iter() 33 | .map(|res| res.map_err(Into::into)); 34 | iter 35 | } 36 | 37 | pub fn write_to_iris_ndjson>( 38 | writer: &mut W, 39 | data: D, 40 | ) -> Result<()> { 41 | for json_pt in data { 42 | serde_json::to_writer(&mut *writer, &json_pt)?; 43 | writer.write_all(b"\n")?; // Write a newline after each JSON object 44 | } 45 | Ok(()) 46 | } 47 | -------------------------------------------------------------------------------- /.github/workflows/build-and-push-debug.yaml: -------------------------------------------------------------------------------- 1 | name: Build and push docker Debug image 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | concurrency: 7 | group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' 8 | cancel-in-progress: true 9 | 10 | env: 11 | REGISTRY: ghcr.io 12 | IMAGE_NAME: ${{ github.repository }}-debug 13 | 14 | jobs: 15 | docker: 16 | timeout-minutes: 40 17 | runs-on: 18 | labels: ubuntu-22.04-16core 19 | permissions: 20 | packages: write 21 | contents: read 22 | attestations: write 23 | id-token: write 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 27 | - name: Set up QEMU 28 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 29 | - name: Set up Docker Buildx 30 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 31 | - name: Log in to the Container registry 32 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef 33 | with: 34 | registry: ${{ env.REGISTRY }} 35 | username: ${{ github.repository_owner }} 36 | password: ${{ secrets.GITHUB_TOKEN }} 37 | - name: Build and Push 38 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 39 | with: 40 | context: . 41 | push: true 42 | tags: | 43 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }} 44 | platforms: linux/amd64 45 | cache-from: type=gha 46 | cache-to: type=gha,mode=max 47 | file: Dockerfile.debug 48 | -------------------------------------------------------------------------------- /deploy/stage/common-values-iris-mpc.yaml: -------------------------------------------------------------------------------- 1 | image: "ghcr.io/worldcoin/iris-mpc:v0.24.1@sha256:a6ef7b878011e2024c071657999a345ccb1ed0b72724e0ca887b5b4ce26d22d6" 2 | 3 | environment: stage 4 | replicaCount: 1 5 | 6 | strategy: 7 | type: Recreate 8 | 9 | datadog: 10 | enabled: true 11 | 12 | ports: 13 | - containerPort: 3000 14 | name: health 15 | protocol: TCP 16 | 17 | livenessProbe: 18 | httpGet: 19 | path: /health 20 | port: health 21 | 22 | readinessProbe: 23 | periodSeconds: 30 24 | httpGet: 25 | path: /ready 26 | port: health 27 | 28 | startupProbe: 29 | initialDelaySeconds: 60 30 | failureThreshold: 120 31 | periodSeconds: 30 32 | httpGet: 33 | path: /ready 34 | port: health 35 | 36 | resources: 37 | limits: 38 | cpu: 28 39 | memory: 100Gi 40 | nvidia.com/gpu: 1 41 | vpc.amazonaws.com/efa: 1 42 | requests: 43 | cpu: 27 44 | memory: 99Gi 45 | nvidia.com/gpu: 1 46 | vpc.amazonaws.com/efa: 1 47 | 48 | imagePullSecrets: 49 | - name: github-secret 50 | 51 | nodeSelector: 52 | kubernetes.io/arch: amd64 53 | 54 | hostNetwork: true 55 | 56 | podSecurityContext: 57 | runAsUser: 65534 58 | runAsGroup: 65534 59 | 60 | tolerations: 61 | - key: "dedicated" 62 | operator: "Equal" 63 | value: "gpuGroup" 64 | effect: "NoSchedule" 65 | 66 | keelPolling: 67 | # -- Specifies whether keel should poll for container updates 68 | enabled: true 69 | 70 | preStop: 71 | # preStop.sleepPeriod specifies the time spent in Terminating state before SIGTERM is sent 72 | sleepPeriod: 10 73 | 74 | # terminationGracePeriodSeconds specifies the grace time between SIGTERM and SIGKILL 75 | terminationGracePeriodSeconds: 120 76 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/utils/serialization/mod.rs: -------------------------------------------------------------------------------- 1 | use eyre::Result; 2 | use serde::Deserialize; 3 | use serde::{de::DeserializeOwned, Serialize}; 4 | use std::fs::File; 5 | use std::io::{BufReader, BufWriter}; 6 | use std::path::Path; 7 | 8 | pub mod graph; 9 | pub mod iris_ndjson; 10 | pub mod types; 11 | 12 | pub fn write_bin(data: &T, filename: &str) -> Result<()> { 13 | // nosemgrep: tainted-path 14 | let file = File::create(filename)?; 15 | let writer = BufWriter::new(file); 16 | bincode::serialize_into(writer, data)?; 17 | Ok(()) 18 | } 19 | 20 | pub fn read_bin(filename: &str) -> Result { 21 | // nosemgrep: tainted-path 22 | let file = File::open(filename)?; 23 | let reader = BufReader::new(file); 24 | let data: T = bincode::deserialize_from(reader)?; 25 | Ok(data) 26 | } 27 | 28 | pub fn write_json(data: &T, filename: &str) -> Result<()> { 29 | // nosemgrep: tainted-path 30 | let file = File::create(filename)?; 31 | let writer = BufWriter::new(file); 32 | serde_json::to_writer(writer, &data)?; 33 | Ok(()) 34 | } 35 | 36 | pub fn read_json(filename: &str) -> Result { 37 | // nosemgrep: tainted-path 38 | let file = File::open(filename)?; 39 | let reader = BufReader::new(file); 40 | let data: T = serde_json::from_reader(reader)?; 41 | Ok(data) 42 | } 43 | 44 | pub fn load_toml<'a, T, P>(path: P) -> Result 45 | where 46 | T: Deserialize<'a>, 47 | P: AsRef, 48 | { 49 | let text = std::fs::read_to_string(path)?; 50 | let de = toml::de::Deserializer::new(&text); 51 | let t = serde_path_to_error::deserialize(de)?; 52 | Ok(t) 53 | } 54 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/genesis/utils/errors.rs: -------------------------------------------------------------------------------- 1 | use iris_mpc_common::IrisSerialId; 2 | use thiserror::Error; 3 | 4 | // Encpasulates a non-exhaustive set of errors raised during indexation. 5 | #[derive(Error, Debug)] 6 | pub enum IndexationError { 7 | #[error("AWS RDS cluster URL is invalid ... check dB configuration")] 8 | AwsRdsInvalidClusterURL, 9 | 10 | #[error("AWS RDS cluster ID not found ... snapshotting failed")] 11 | AwsRdsClusterIdNotFound, 12 | 13 | #[error("AWS RDS cluster URL is invalid ... check dB configuration")] 14 | AwsRdsGetClusterURLs, 15 | 16 | #[error("AWS RDS cluster snapshotting failed: {0}")] 17 | AwsRdsCreateSnapshotFailure(String), 18 | 19 | #[error("Failed to download AWS S3 object")] 20 | AwsS3ObjectDownload, 21 | 22 | #[error("Failed to deserialize AWS S3 object")] 23 | AwsS3ObjectDeserialize, 24 | 25 | #[error("Missing CPU db configuration")] 26 | DbConfigError, 27 | 28 | #[error("Failed to fetch Modification batch from PostgreSQL dB: {0}")] 29 | FetchModificationBatch(String), 30 | 31 | #[error("Current height of indexation exceeds maximum allowed")] 32 | IndexationHeightMismatch, 33 | 34 | #[error("Failed to fetch Iris with given serial ID: {0}")] 35 | MissingSerialId(IrisSerialId), 36 | 37 | #[error("Failed to persist genesis Graph indexation state element {0} to PostgreSQL dB: {1}")] 38 | PersistIndexationStateElement(String, String), 39 | 40 | #[error("Failed to unset genesis Graph indexation state element {0} to PostgreSQL dB: {1}")] 41 | UnsetIndexationStateElement(String, String), 42 | 43 | #[error("Failed to make a copy of the database: {0}")] 44 | DatabaseCopyFailure(String), 45 | } 46 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/genesis/utils/logger.rs: -------------------------------------------------------------------------------- 1 | /// Returns a message for logging. 2 | fn get_formatted_message(component: &str, msg: String) -> String { 3 | format!("HNSW-GENESIS :: {} :: {}", component, msg) 4 | } 5 | 6 | /// Logs & returns a component error message. 7 | /// 8 | /// # Arguments 9 | /// 10 | /// * `component` - A component encapsulating a unit of system functionality. 11 | /// * `msg` - An error message. 12 | /// 13 | pub fn log_error(component: &str, msg: String) -> String { 14 | let msg = get_formatted_message(component, msg); 15 | 16 | // In testing print to stdout. 17 | #[cfg(test)] 18 | println!("ERROR :: {}", msg); 19 | 20 | // Trace as normal. 21 | tracing::error!(msg); 22 | 23 | msg 24 | } 25 | 26 | /// Logs & returns a component information message. 27 | /// 28 | /// # Arguments 29 | /// 30 | /// * `component` - A component encapsulating a unit of system functionality. 31 | /// * `msg` - An information message. 32 | /// 33 | pub fn log_info(component: &str, msg: String) -> String { 34 | let msg = get_formatted_message(component, msg); 35 | 36 | // In testing print to stdout. 37 | #[cfg(test)] 38 | println!("{}", msg); 39 | 40 | // Trace as normal. 41 | tracing::info!(msg); 42 | 43 | msg 44 | } 45 | 46 | /// Logs & returns a component warning message. 47 | /// 48 | /// # Arguments 49 | /// 50 | /// * `component` - A component encapsulating a unit of system functionality. 51 | /// * `msg` - An information message. 52 | /// 53 | pub fn log_warn(component: &str, msg: String) -> String { 54 | let msg = get_formatted_message(component, msg); 55 | 56 | // In testing print to stdout. 57 | #[cfg(test)] 58 | println!("WARN :: {}", msg); 59 | 60 | tracing::warn!(msg); 61 | 62 | msg 63 | } 64 | -------------------------------------------------------------------------------- /iris-mpc-py/src/py_hnsw/pyclasses/graph_store.rs: -------------------------------------------------------------------------------- 1 | use iris_mpc_common::IrisVectorId; 2 | use iris_mpc_cpu::{ 3 | hawkers::plaintext_store::PlaintextVectorRef, hnsw::graph::layered_graph::GraphMem, 4 | utils::serialization::graph, 5 | }; 6 | use pyo3::{exceptions::PyIOError, prelude::*}; 7 | 8 | #[pyclass] 9 | #[derive(Clone, Default)] 10 | pub struct PyGraphStore(pub GraphMem); 11 | 12 | #[pymethods] 13 | impl PyGraphStore { 14 | #[new] 15 | pub fn new() -> Self { 16 | Self::default() 17 | } 18 | 19 | #[staticmethod] 20 | pub fn read_from_bin(filename: String) -> PyResult { 21 | let result = graph::try_read_graph_from_file(&filename) 22 | .map_err(|_| PyIOError::new_err("Unable to read graph from file"))?; 23 | 24 | Ok(Self(result)) 25 | } 26 | 27 | pub fn write_to_bin(&self, filename: String) -> PyResult<()> { 28 | graph::write_graph_to_file(&filename, self.0.clone()) 29 | .map_err(|e| PyIOError::new_err(format!("Unable to write to file :: {}", e))) 30 | } 31 | 32 | pub fn get_max_layer(&self) -> u32 { 33 | self.0.layers.len().try_into().unwrap() 34 | } 35 | 36 | pub fn get_layer_nodes(&self, layer_index: usize) -> Option> { 37 | self.0 38 | .layers 39 | .get(layer_index) 40 | .map(|layer| layer.links.keys().map(|k| k.serial_id()).collect()) 41 | } 42 | 43 | pub fn get_links(&self, vector_id: u32, layer_index: usize) -> PyResult>> { 44 | let raw_ret = 45 | self.0.layers[layer_index].get_links(&IrisVectorId::from_serial_id(vector_id)); 46 | Ok(raw_ret.map(|neighborhood| neighborhood.iter().map(|nb| nb.serial_id()).collect())) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /.github/workflows/temp-branch-build-and-push.yaml: -------------------------------------------------------------------------------- 1 | name: Branch - Build and push docker image 2 | 3 | on: 4 | push: 5 | branches: 6 | - "dev" 7 | - "chore/rustc-1.89" 8 | 9 | concurrency: 10 | group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" 11 | cancel-in-progress: true 12 | 13 | env: 14 | REGISTRY: ghcr.io 15 | IMAGE_NAME: ${{ github.repository }} 16 | 17 | jobs: 18 | docker: 19 | timeout-minutes: 40 20 | runs-on: 21 | labels: ubuntu-22.04-16core 22 | permissions: 23 | packages: write 24 | contents: read 25 | attestations: write 26 | id-token: write 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 30 | - name: Set up QEMU 31 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 32 | - name: Set up Docker Buildx 33 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 34 | - name: Log in to the Container registry 35 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef 36 | with: 37 | registry: ${{ env.REGISTRY }} 38 | username: ${{ github.repository_owner }} 39 | password: ${{ secrets.GITHUB_TOKEN }} 40 | - name: Build and Push 41 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 42 | with: 43 | context: . 44 | push: true 45 | tags: | 46 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }} 47 | platforms: linux/amd64 48 | build-args: | 49 | ARCHITECTURE=x86_64 50 | cache-from: type=gha 51 | cache-to: type=gha,mode=max 52 | -------------------------------------------------------------------------------- /deploy/prod/smpcv2-0-prod/values-reshare-server.yaml: -------------------------------------------------------------------------------- 1 | args: 2 | - "--bind-addr" 3 | - "0.0.0.0:7000" 4 | - "--db-url" 5 | - "$(SMPC__DATABASE__URL)" 6 | - "--party-id" 7 | - "0" 8 | - "--environment" 9 | - "$(ENVIRONMENT)" 10 | - "--sender1-party-id" 11 | - "1" 12 | - "--sender2-party-id" 13 | - "2" 14 | - "--batch-size" 15 | - "100" 16 | - "--max-buffer-size" 17 | - "10" 18 | - "--healthcheck-port" 19 | - "3001" 20 | 21 | initContainer: 22 | enabled: true 23 | image: "amazon/aws-cli:2.17.62" 24 | name: "reshare-proto-dns-records-updater" 25 | env: 26 | - name: PARTY_ID 27 | value: "1" 28 | - name: MY_POD_IP 29 | valueFrom: 30 | fieldRef: 31 | fieldPath: status.podIP 32 | configMap: 33 | init.sh: | 34 | #!/usr/bin/env bash 35 | 36 | # Set up environment variables 37 | HOSTED_ZONE_ID=$(aws route53 list-hosted-zones-by-name --dns-name "$PARTY_ID".smpcv2.worldcoin.org --query "HostedZones[].Id" --output text) 38 | 39 | # Generate the JSON content in memory 40 | BATCH_JSON=$(cat < Result<()> { 21 | let app = Router::new().route("/health", get(|| async {})); // Implicit 200 response 22 | let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", healthcheck_port)) 23 | .await 24 | .wrap_err("Healthcheck listener bind error")?; 25 | axum::serve(listener, app) 26 | .await 27 | .wrap_err("healthcheck listener server launch error")?; 28 | Ok(()) 29 | } 30 | 31 | pub fn extract_domain(address: &str, remove_protocol: bool) -> Result { 32 | // Try to split the address into domain and port parts. 33 | let mut address = address.trim().to_string(); 34 | if remove_protocol { 35 | address = address 36 | .strip_prefix("http://") 37 | .or_else(|| address.strip_prefix("https://")) 38 | .unwrap_or(&address) 39 | .to_string(); 40 | } 41 | 42 | if let Some((domain, _port)) = address.rsplit_once(':') { 43 | Ok(domain.to_string()) 44 | } else { 45 | Err(IoError::new( 46 | ErrorKind::InvalidInput, 47 | "Invalid address format", 48 | )) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/utils/serialization/types/graph_v3.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use std::collections::HashMap; 3 | 4 | /// An in-memory implementation of an HNSW hierarchical graph. 5 | /// 6 | /// This type is a serialization-focused adapter, provided for long-term 7 | /// compatibility and portability of serialized data. 8 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 9 | pub struct GraphV3 { 10 | pub entry_point: Vec, 11 | pub layers: Vec, 12 | } 13 | 14 | /// Type associated with the `GraphV3` serialization type. 15 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 16 | pub struct EntryPoint { 17 | pub point: VectorId, 18 | pub layer: usize, 19 | } 20 | 21 | /// Type associated with the `GraphV3` serialization type. 22 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 23 | pub struct Layer { 24 | pub links: HashMap, 25 | pub set_hash: u64, 26 | } 27 | 28 | /// Type associated with the `GraphV3` serialization type. 29 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 30 | pub struct EdgeIds(pub Vec); 31 | 32 | /// Type associated with the `GraphV3` serialization type. 33 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] 34 | pub struct VectorId { 35 | pub id: u32, 36 | pub version: i16, 37 | } 38 | 39 | /* ------------------------------- I/O ------------------------------ */ 40 | 41 | pub fn read_graph_v3(reader: &mut R) -> eyre::Result { 42 | let data = bincode::deserialize_from(reader)?; 43 | Ok(data) 44 | } 45 | 46 | pub fn write_graph_v3(writer: &mut W, data: &GraphV3) -> eyre::Result<()> { 47 | bincode::serialize_into(writer, data)?; 48 | Ok(()) 49 | } 50 | -------------------------------------------------------------------------------- /iris-mpc-upgrade-hawk/tests/utils/logger.rs: -------------------------------------------------------------------------------- 1 | /// Returns a message for logging. 2 | fn get_formatted_message(component: &str, msg: String) -> String { 3 | format!("HNSW-TESTS :: {} :: {}", component, msg) 4 | } 5 | 6 | /// Logs & returns a component error message. 7 | /// 8 | /// # Arguments 9 | /// 10 | /// * `component` - A component encapsulating a unit of system functionality. 11 | /// * `msg` - An error message. 12 | /// 13 | #[allow(dead_code)] 14 | pub fn log_error(component: &str, msg: String) -> String { 15 | let msg = get_formatted_message(component, msg); 16 | 17 | // In testing print to stdout. 18 | #[cfg(test)] 19 | println!("ERROR :: {}", msg); 20 | 21 | // Trace as normal. 22 | tracing::error!(msg); 23 | 24 | msg 25 | } 26 | 27 | /// Logs & returns a component information message. 28 | /// 29 | /// # Arguments 30 | /// 31 | /// * `component` - A component encapsulating a unit of system functionality. 32 | /// * `msg` - An information message. 33 | /// 34 | pub fn log_info(component: &str, msg: &str) -> String { 35 | let msg = get_formatted_message(component, msg.to_string()); 36 | 37 | // In testing print to stdout. 38 | #[cfg(test)] 39 | println!("{}", msg); 40 | 41 | // Trace as normal. 42 | tracing::info!(msg); 43 | 44 | msg 45 | } 46 | 47 | /// Logs & returns a component warning message. 48 | /// 49 | /// # Arguments 50 | /// 51 | /// * `component` - A component encapsulating a unit of system functionality. 52 | /// * `msg` - An information message. 53 | /// 54 | #[allow(dead_code)] 55 | pub fn log_warn(component: &str, msg: String) -> String { 56 | let msg = get_formatted_message(component, msg); 57 | 58 | // In testing print to stdout. 59 | #[cfg(test)] 60 | println!("WARN :: {}", msg); 61 | 62 | tracing::warn!(msg); 63 | 64 | msg 65 | } 66 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/utils/serialization/types/graph_v2.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use std::collections::HashMap; 3 | 4 | /// An in-memory implementation of an HNSW hierarchical graph. 5 | /// 6 | /// This type is a serialization-focused adapter, provided for long-term 7 | /// compatibility and portability of serialized data. 8 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 9 | pub struct GraphV2 { 10 | pub entry_point: Option, 11 | pub layers: Vec, 12 | } 13 | 14 | /// Type associated with the `GraphV2` serialization type. 15 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 16 | pub struct EntryPoint { 17 | pub point: VectorId, 18 | pub layer: usize, 19 | } 20 | 21 | /// Type associated with the `GraphV2` serialization type. 22 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 23 | pub struct Layer { 24 | pub links: HashMap, 25 | pub set_hash: u64, 26 | } 27 | 28 | /// Type associated with the `GraphV2` serialization type. 29 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 30 | pub struct EdgeIds(pub Vec); 31 | 32 | /// Type associated with the `GraphV2` serialization type. 33 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] 34 | pub struct VectorId { 35 | pub id: u32, 36 | pub version: i16, 37 | } 38 | 39 | /* ------------------------------- I/O ------------------------------ */ 40 | 41 | pub fn read_graph_v2(reader: &mut R) -> eyre::Result { 42 | let data = bincode::deserialize_from(reader)?; 43 | Ok(data) 44 | } 45 | 46 | pub fn write_graph_v2(writer: &mut W, data: &GraphV2) -> eyre::Result<()> { 47 | bincode::serialize_into(writer, data)?; 48 | Ok(()) 49 | } 50 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/utils/serialization/types/graph_v1.rs: -------------------------------------------------------------------------------- 1 | //! Implements a data serialization format targeting the `GraphMem` type. 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use std::collections::HashMap; 5 | 6 | /// An in-memory implementation of an HNSW hierarchical graph. 7 | /// 8 | /// This type is a serialization-focused adapter, provided for long-term 9 | /// compatibility and portability of serialized data. 10 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 11 | pub struct GraphV1 { 12 | pub entry_point: Option, 13 | pub layers: Vec, 14 | } 15 | 16 | /// Type associated with the `GraphV1` serialization type. 17 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 18 | pub struct EntryPoint { 19 | pub point: VectorId, 20 | pub layer: usize, 21 | } 22 | 23 | /// Type associated with the `GraphV1` serialization type. 24 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 25 | pub struct Layer { 26 | pub links: HashMap, 27 | } 28 | 29 | /// Type associated with the `GraphV1` serialization type. 30 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 31 | pub struct EdgeIds(pub Vec); 32 | 33 | /// Type associated with the `GraphV1` serialization type. 34 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] 35 | pub struct VectorId(pub u32); 36 | 37 | /* ------------------------------- I/O ------------------------------ */ 38 | 39 | pub fn read_graph_v1(reader: &mut R) -> eyre::Result { 40 | let data = bincode::deserialize_from(reader)?; 41 | Ok(data) 42 | } 43 | 44 | pub fn write_graph_v1(writer: &mut W, data: &GraphV1) -> eyre::Result<()> { 45 | bincode::serialize_into(writer, data)?; 46 | Ok(()) 47 | } 48 | -------------------------------------------------------------------------------- /.github/workflows/temp-branch-build-and-push-hawk.yaml: -------------------------------------------------------------------------------- 1 | name: Branch - Hawk Build and push docker image 2 | 3 | on: 4 | push: 5 | branches: 6 | - "dev" 7 | paths-ignore: 8 | - "deploy/**" 9 | 10 | concurrency: 11 | group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" 12 | cancel-in-progress: true 13 | 14 | env: 15 | REGISTRY: ghcr.io 16 | IMAGE_NAME: ${{ github.repository }}-cpu 17 | 18 | jobs: 19 | docker: 20 | timeout-minutes: 40 21 | runs-on: 22 | labels: ubuntu-22.04-16core 23 | permissions: 24 | packages: write 25 | contents: read 26 | attestations: write 27 | id-token: write 28 | steps: 29 | - name: Checkout 30 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 33 | - name: Set up Docker Buildx 34 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 35 | - name: Log in to the Container registry 36 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef 37 | with: 38 | registry: ${{ env.REGISTRY }} 39 | username: ${{ github.repository_owner }} 40 | password: ${{ secrets.GITHUB_TOKEN }} 41 | - name: Build and Push 42 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 43 | with: 44 | context: . 45 | push: true 46 | tags: | 47 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }} 48 | platforms: linux/amd64 49 | build-args: | 50 | ARCHITECTURE=x86_64 51 | cache-from: type=gha 52 | cache-to: type=gha,mode=max 53 | file: Dockerfile.hawk 54 | -------------------------------------------------------------------------------- /.github/workflows/build-and-push-shares-encoding.yaml: -------------------------------------------------------------------------------- 1 | name: Build and push base docker image for shares encoding binary [nocuda] 2 | 3 | on: 4 | push: 5 | paths: 6 | - Dockerfile.shares-encoding 7 | - .github/workflows/build-and-push-shares-encoding.yaml 8 | - iris-mpc-common/src/bin/shares_encoding.rs 9 | 10 | workflow_dispatch: 11 | 12 | env: 13 | REGISTRY: ghcr.io 14 | IMAGE_NAME: ${{ github.repository }}-shares-encoding 15 | jobs: 16 | docker: 17 | timeout-minutes: 40 18 | runs-on: 19 | labels: ubuntu-22.04-16core 20 | permissions: 21 | packages: write 22 | contents: read 23 | attestations: write 24 | id-token: write 25 | steps: 26 | - name: Checkout 27 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 28 | - name: Set up Docker 29 | uses: docker/setup-docker-action@e61617a16c407a86262fb923c35a616ddbe070b3 30 | - name: Set up Docker Buildx 31 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 32 | - name: Log in to the Container registry 33 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef 34 | with: 35 | registry: ${{ env.REGISTRY }} 36 | username: ${{ github.repository_owner }} 37 | password: ${{ secrets.GITHUB_TOKEN }} 38 | - name: Build and Push 39 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 40 | with: 41 | context: . 42 | push: true 43 | tags: | 44 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }} 45 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 46 | platforms: ${{ matrix.platform }} 47 | cache-from: type=gha 48 | cache-to: type=gha,mode=max 49 | file: Dockerfile.shares-encoding 50 | -------------------------------------------------------------------------------- /deploy/prod/common-values-iris-mpc.yaml: -------------------------------------------------------------------------------- 1 | image: "ghcr.io/worldcoin/iris-mpc:v0.24.1@sha256:a6ef7b878011e2024c071657999a345ccb1ed0b72724e0ca887b5b4ce26d22d6" 2 | 3 | environment: prod 4 | replicaCount: 1 5 | 6 | strategy: 7 | type: Recreate 8 | 9 | datadog: 10 | enabled: true 11 | 12 | ports: 13 | - containerPort: 3000 14 | name: health 15 | protocol: TCP 16 | 17 | livenessProbe: 18 | httpGet: 19 | path: /health 20 | port: health 21 | 22 | readinessProbe: 23 | httpGet: 24 | path: /ready 25 | port: health 26 | 27 | startupProbe: 28 | initialDelaySeconds: 900 29 | failureThreshold: 50 30 | periodSeconds: 30 31 | httpGet: 32 | path: /ready 33 | port: health 34 | 35 | resources: 36 | limits: 37 | cpu: 192 38 | memory: 1900Gi 39 | nvidia.com/gpu: 8 40 | hugepages-2Mi: 5Gi 41 | vpc.amazonaws.com/efa: 32 42 | requests: 43 | cpu: 190 44 | memory: 1900Gi 45 | nvidia.com/gpu: 8 46 | hugepages-2Mi: 5Gi 47 | vpc.amazonaws.com/efa: 32 48 | 49 | imagePullSecrets: 50 | - name: github-secret 51 | 52 | nodeSelector: 53 | kubernetes.io/arch: amd64 54 | 55 | hostNetwork: true 56 | 57 | podSecurityContext: 58 | runAsUser: 65534 59 | runAsGroup: 65534 60 | 61 | tolerations: 62 | - key: "dedicated" 63 | operator: "Equal" 64 | value: "gpuGroup" 65 | effect: "NoSchedule" 66 | 67 | keelPolling: 68 | # -- Specifies whether keel should poll for container updates 69 | enabled: true 70 | 71 | tmpDir: 72 | enabled: true 73 | size: 5Gi 74 | mountPath: /dev/shm 75 | 76 | preStop: 77 | # preStop.sleepPeriod specifies the time spent in Terminating state before SIGTERM is sent 78 | sleepPeriod: 10 79 | 80 | # terminationGracePeriodSeconds specifies the grace time between SIGTERM and SIGKILL 81 | terminationGracePeriodSeconds: 240 # TODO: make it 3x SMPC__PROCESSING_TIMEOUT_SECS 82 | -------------------------------------------------------------------------------- /.test.env: -------------------------------------------------------------------------------- 1 | RUST_LOG=info 2 | RUST_BACKTRACE=full 3 | SMPC__ENVIRONMENT=dev 4 | SMPC__SERVICE__SERVICE_NAME=smpcv2-server-dev 5 | SMPC__CPU_DATABASE__MIGRATE=true 6 | SMPC__CPU_DATABASE__CREATE=true 7 | SMPC__CPU_DATABASE__LOAD_PARALLELISM=8 8 | SMPC__MAX_BATCH_SIZE=1 9 | SMPC__ENABLE_PPROF_PER_BATCH=true 10 | SMPC__PPROF_S3_BUCKET=wf-smpcv2-dev-hnsw-performance-reports 11 | SMPC__MATCH_DISTANCES_BUFFER_SIZE=32 12 | SMPC__N_BUCKETS=5 13 | SMPC__OVERRIDE_MAX_BATCH_SIZE=false 14 | SMPC__MAX_DB_SIZE=10000 15 | SMPC_ENABLE_SENDING_ANONYMIZED_STATS_MESSAGE=true 16 | SMPC_ENABLE_SENDING_MIRROR_ANONYMIZED_STATS_MESSAGE=true 17 | SMPC_ENABLE_SENDING_ANONYMIZED_STATS_2D_MESSAGE=true 18 | SMPC__BATCH_POLLING_TIMEOUT_SECS=1 19 | SMPC__HAWK_REQUEST_PARALLELISM=1 20 | SMPC__DISABLE_PERSISTENCE=false 21 | SMPC__AWS__REGION=us-east-1 22 | SMPC__AWS__ENDPOINT=http://localstack:4566 23 | SMPC__KMS_KEY_ARNS='["arn:aws:kms:us-east-1:000000000000:key/00000000-0000-0000-0000-000000000000","arn:aws:kms:us-east-1:000000000000:key/00000000-0000-0000-0000-000000000001","arn:aws:kms:us-east-1:000000000000:key/00000000-0000-0000-0000-000000000002"]' 24 | SMPC__SERVICE_PORTS='["4000","4001","4002"]' 25 | SMPC__SERVER_COORDINATION__HEALTHCHECK_PORTS='["3000","3001","3002"]' 26 | SMPC__SERVER_COORDINATION__IMAGE_NAME='local' 27 | SMPC__SERVER_COORDINATION__HTTP_QUERY_RETRY_DELAY_MS=5000 28 | SMPC__SERVER_COORDINATION__HEARTBEAT_INTERVAL_SECS=2 29 | SMPC__SERVER_COORDINATION__HEARTBEAT_INITIAL_RETRIES=10 30 | SMPC__SHARES_BUCKET_NAME="wf-smpcv2-dev-sns-requests" 31 | SMPC__SNS_BUFFER_BUCKET_NAME="wf-smpcv2-stage-sns-buffer" 32 | SMPC__RESULTS_TOPIC_ARN="arn:aws:sns:us-east-1:000000000000:iris-mpc-results.fifo" 33 | AWS_ENDPOINT_URL=http://localstack:4566 34 | AWS_ACCESS_KEY_ID=test 35 | AWS_SECRET_ACCESS_KEY=test 36 | AWS_REGION=us-east-1 37 | 38 | # AnonStatsService-specific environment variables 39 | SMPC__MIN_1D_JOB_SIZE=32 40 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/utils/serialization/types/graph_v0.rs: -------------------------------------------------------------------------------- 1 | //! Implements a data serialization format targeting the `GraphMem` type 2 | //! for a plaintext vector store. 3 | 4 | use serde::{Deserialize, Serialize}; 5 | use std::collections::HashMap; 6 | 7 | /// An in-memory implementation of an HNSW hierarchical graph. 8 | /// 9 | /// This type is a serialization-focused adapter, provided for long-term 10 | /// compatibility and portability of serialized data. 11 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 12 | pub struct GraphV0 { 13 | pub entry_point: Option, 14 | pub layers: Vec, 15 | } 16 | 17 | /// Type associated with the `GraphV0` serialization type. 18 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 19 | pub struct EntryPoint { 20 | pub point: PointId, 21 | pub layer: usize, 22 | } 23 | 24 | /// Type associated with the `GraphV0` serialization type. 25 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 26 | pub struct Layer { 27 | pub links: HashMap, 28 | } 29 | 30 | /// Type associated with the `GraphV0` serialization type. 31 | #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 32 | pub struct Edges(pub Vec<(PointId, (u16, u16))>); 33 | 34 | /// Type associated with the `GraphV0` serialization type. 35 | #[derive(Default, Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] 36 | pub struct PointId(pub u32); 37 | 38 | /* ------------------------------- I/O ------------------------------ */ 39 | 40 | pub fn read_graph_v0(reader: &mut R) -> eyre::Result { 41 | let data = bincode::deserialize_from(reader)?; 42 | Ok(data) 43 | } 44 | 45 | pub fn write_graph_v0(writer: &mut W, data: &GraphV0) -> eyre::Result<()> { 46 | bincode::serialize_into(writer, data)?; 47 | Ok(()) 48 | } 49 | -------------------------------------------------------------------------------- /certs/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFBzCCAu+gAwIBAgIUMtBZoAu/Lvi5GHOSIWd3KO+GycgwDQYJKoZIhvcNAQEL 3 | BQAwEzERMA8GA1UEAwwITXlUZXN0Q0EwHhcNMjUwNzE1MTQ0NjA3WhcNMzUwNzEz 4 | MTQ0NjA3WjATMREwDwYDVQQDDAhNeVRlc3RDQTCCAiIwDQYJKoZIhvcNAQEBBQAD 5 | ggIPADCCAgoCggIBAKKG9CgqT0wt7G53xEfQoqEfhPadYHS1Q9tNZ8F/l7k4xkR6 6 | 4zLKYE+5V2YVwSAdCgAG1mcbNM0WiF3Xlt+Ct9joKyNQkkAe3FprnVokQmheYGWg 7 | KnxM3VPG2DkpKswx7cxo6SII5mmO2QJkMxTzA9X9sXOtmtSt0rXvAvRFf7Xf4l0c 8 | Imhj1jY65Gfg4Mi+JXLizQ9xLcjM2Suiy4AfJO1wqr2sbXTIFiaY1cWNBm3wAbfQ 9 | MAwAtAI1buPNADhrCJESoF7Y2iW0DG5WETREq5siSQUZ7lcqGbL8IHcvETDDIr0W 10 | SkowAo1jTXMheIYrN4xYcbZgGfiPUm1Ni0VJaXlDRuIsLW8sWNLNXlk08cPI4PQQ 11 | EoEjPFQ0B9ncmjlY89ILzx7TfHahUt39PRFJ1VMZcdS+9+BJn+UxcATl0YC/8ZwN 12 | 8xlB5dZoRC4zG7PeO8vYVcsQaNRJQbGtZstcmcuGH7hCsp3ehEspuSkypifOfSpi 13 | gi0+DqsRiBEPtc4o12hr2yeBdwuDIHADln66JWZQKJUKtg4+Tolsq5nE4zJ91Fc/ 14 | 2R12pYegIG6RDioF1oJE+f0Hpqa0FvF/N7h3nOk4Neq6L1MzTS9ucp7/0hRv/aH2 15 | dpM7KoMnJcjXo0/D6tta7DzVgIAMAf3oAVCvRZJlRGyXCBU+bZ9LdxdzFqw1AgMB 16 | AAGjUzBRMB0GA1UdDgQWBBTox+JhGrIFNIk7iLeM038md3ew/DAfBgNVHSMEGDAW 17 | gBTox+JhGrIFNIk7iLeM038md3ew/DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 18 | DQEBCwUAA4ICAQA1gk6xPXTImgU4Zieb6HjkQfg50D44+7whZQU2RGNzmbQCOmzX 19 | 6WxS5m0NN+L+/l1wT4SPxt3Hid59/P0tiIjc2fEmHeN0CxhpeV/1LaB1QGfhqq7n 20 | FbVxNhDcm7XG38enBss2wL9JNVZn6xl9RH5i+B2toJK4InUgtIxQ38RjDZI1Hnea 21 | uL2G16G2zZxm7APrHeirNq/m56inR0ivLzYs5RgYKcdKqbT5vu8V58Rj5Qg1X3UY 22 | 5c+63vE9GeYbcnKKFFXPraf8NvQJCKWylmwJWO/Ep/dKNzk/y7V0TmQ71BShoUkq 23 | T80zsvH7GIrw8+W+NXG+9v0ohOIefrRcupEVtSiD8zlBs9arqSU+6CkG63gKeiXj 24 | hFCGcT6xic5o3JM9RyCYBCYbGCXOma9AqthzQfU+S/dVN1pnRvFnwvJqQcEE5v0f 25 | Nx8KqRiZ9L/TUg8+1bWrkAWSXbTjhZmBuQs9xUIVQpadBowltZ78IYAprudCU9C/ 26 | 3zKDSKyuMDSHpsvs1ovxDh/EPxTvYZn8nJDJnVGrymuR9C+LYIUdDD1tTnqBRdDi 27 | RBrZohzohxX6294VjdQCjODqExosSG2Wkuuh9xHK4vbU0bPVZzQQpHLAR/9xgz4g 28 | J2wK0D23qDTU45J74os5nmyVjCCmcuJgHmqF0A9Kvw2bEN0yw+aciKaasQ== 29 | -----END CERTIFICATE----- 30 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # Pre-commit hooks configuration 2 | # See https://pre-commit.com for more information 3 | 4 | repos: 5 | # General pre-commit hooks 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: 2c9f875913ee60ca25ce70243dc24d5b6415598c # v4.6.0 8 | hooks: 9 | - id: trailing-whitespace 10 | exclude: ^(.*\.md|.*\.sage|.*\.sql)$ 11 | - id: end-of-file-fixer 12 | exclude: \.sql$ 13 | - id: check-yaml 14 | args: ['--unsafe'] # Allow custom tags in k8s yaml files 15 | - id: check-toml 16 | - id: check-added-large-files 17 | args: ['--maxkb=1000'] 18 | - id: check-merge-conflict 19 | - id: check-case-conflict 20 | - id: mixed-line-ending 21 | args: ['--fix=lf'] 22 | - id: detect-private-key 23 | 24 | # Gitleaks - Secret detection 25 | - repo: https://github.com/gitleaks/gitleaks 26 | rev: 4fb43823ef3d152d239e92d7d5cb04783b548062 # v8.28.0 27 | hooks: 28 | - id: gitleaks 29 | 30 | # Rust formatting 31 | - repo: local 32 | hooks: 33 | - id: cargo-fmt 34 | name: cargo fmt 35 | description: Format Rust code with rustfmt 36 | entry: cargo fmt 37 | language: system 38 | types: [rust] 39 | pass_filenames: false 40 | 41 | - id: cargo-clippy 42 | name: cargo clippy 43 | description: Lint Rust code with clippy 44 | entry: cargo clippy 45 | args: ['--all-targets', '--all-features', '--', '-D', 'warnings'] 46 | language: system 47 | types: [rust] 48 | pass_filenames: false 49 | 50 | - id: cargo-doc 51 | name: cargo doc 52 | description: Check Rust documentation 53 | entry: cargo doc 54 | args: ['--no-deps', '--document-private-items'] 55 | language: system 56 | types: [rust] 57 | pass_filenames: false 58 | -------------------------------------------------------------------------------- /Dockerfile.nocuda: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 public.ecr.aws/ubuntu/ubuntu:22.04 as build-image 2 | 3 | WORKDIR /src 4 | RUN apt-get update && apt-get install -y \ 5 | curl \ 6 | build-essential \ 7 | libssl-dev \ 8 | texinfo \ 9 | libcap2-bin \ 10 | pkg-config \ 11 | git \ 12 | devscripts \ 13 | debhelper \ 14 | ca-certificates \ 15 | protobuf-compiler \ 16 | wget 17 | 18 | RUN curl https://sh.rustup.rs -sSf | sh -s -- -y 19 | ENV PATH "/root/.cargo/bin:${PATH}" 20 | ENV RUSTUP_HOME "/root/.rustup" 21 | ENV CARGO_HOME "/root/.cargo" 22 | RUN rustup toolchain install 1.89.0 23 | RUN rustup default 1.89.0 24 | RUN rustup component add cargo 25 | RUN cargo install cargo-build-deps \ 26 | && cargo install cargo-edit --version 0.13.6 --locked 27 | 28 | FROM --platform=linux/amd64 build-image as build-app 29 | WORKDIR /src/gpu-iris-mpc 30 | COPY . . 31 | 32 | RUN cargo build -p iris-mpc-bins --release --target x86_64-unknown-linux-gnu --bin key-manager shares-encoding reshare-client client iris-mpc-hawk 33 | 34 | FROM --platform=linux/amd64 public.ecr.aws/ubuntu/ubuntu:22.04 35 | ENV DEBIAN_FRONTEND=noninteractive 36 | 37 | RUN apt-get update && apt-get install -y ca-certificates awscli 38 | COPY certs /usr/local/share/ca-certificates/ 39 | RUN update-ca-certificates 40 | 41 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/key-manager /bin/key-manager 42 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/shares-encoding /bin/shares-encoding 43 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/reshare-client /bin/reshare-client 44 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/client /bin/client 45 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/iris-mpc-hawk /bin/iris-mpc-hawk 46 | 47 | USER 65534 48 | ENTRYPOINT ["/bin/sh"] 49 | -------------------------------------------------------------------------------- /certs/aws_orb_prod_private_ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFGTCCAwGgAwIBAgIRALbhDMpZwlM6R5v4AodgiFEwDQYJKoZIhvcNAQENBQAw 3 | JjESMBAGA1UECgwJV29ybGRjb2luMRAwDgYDVQQDDAdSb290IENBMB4XDTIyMTEx 4 | NzEyNDU1OVoXDTMyMTExNzEzNDU1OVowJjESMBAGA1UECgwJV29ybGRjb2luMRAw 5 | DgYDVQQDDAdSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA 6 | tmRG+kthkbE6Jpfa9p7/Teb+3kx6i1eFkDnat3rE1X+rwa5/KOmN07c/9s2hnrvR 7 | +YkY6sQbWIB/zNPODgQBXHWU0xmEhNJMyFzBAa8ymxaF0sB67feI3hOoigotV0HB 8 | QEk3YvstQrirGaymgOedp0bfKDfv7hcNe8xYFom3pEOXrrZg6hEQH8x5jm59mJQL 9 | N39YGZ8rTr6z75t2u0k9U4C0SaqGOi179Lg2H/Wb0UBSHiItqieBaz/XplKtfLiJ 10 | keMBF31uBH0pAlFIxEdDGJSTMnrtu2n4pHlWgtQhxuLdDcLgMQUC7EN/hfZ7KI1B 11 | LFx3wNo2Rb8I6glowDdbztOFcvV75kJ6Zifrp1bg60IOTCjbLiDkw+GdXsyYbieS 12 | I8AfZDcXJwicG4f6Z+RsgHgoRcKjE2As3oZYT/IopD4Fl0R7HaWkXyGzzfMY+1U5 13 | NUuNy7yyC/MkaasiX4GHOIK6WWqU/OMDvTfc1HrIx+33tH8kw0D0e44RJqBTGtFQ 14 | reS0pa/DTkvAAS96DThHVpNBS8qeVOCYrg0vjd+Nei6RGcvEiHKASR72Uew6OMtL 15 | OalmRJXSd/ulF8lqX7tme95g9iZ9FqVEoaX0mYk4THf27We080fe+Mc5C4RyxNkc 16 | 5/iy/Q+EL0ZyORjlpLhYCjRqji5JOgMdO1M5i51HWh8CAwEAAaNCMEAwDwYDVR0T 17 | AQH/BAUwAwEB/zAdBgNVHQ4EFgQU7mIWe7uYgFviKeoIPN2PI1T/Wo0wDgYDVR0P 18 | AQH/BAQDAgGGMA0GCSqGSIb3DQEBDQUAA4ICAQBLpKCdUvTijU9NYHFeJILNScNU 19 | lXseE0IW9VxAR0TF/jn3pN3aZOUMrAsBjwxq/9PJ+7CdcYk0EJGoallmoySO0pPP 20 | 05bfEK++UDkgSA2j9OeO4SwjM3b4uiirL5nTJIatCQzJv6V/2F11y7oZC65RmtOl 21 | ecKuaumZMm+yGj9HVl9YN9mtFTmlL2SyzZrDh2soRVNSSQzW7TOQoFyWHQtaVnKF 22 | POjdG1jfQdY6UDJcEUaG0+Q0Tuf6o3yqN2YMQegLeuPEa+N0ApenLDXhOdCXDvpR 23 | xx3EGcCcrwWl1GzcX1zhgzJlQjDNwys8W88NcemDikslA6XmSFi++4DZql/Qx8Ct 24 | hmvdvXN4+vwVEyHHevzEBLRFMh6MizbImU/5HC89pjUpmUkroOUHYspTPmDwqbt8 25 | ktlUGKKWI4fsaP5I6ItOv04aEPt7ssU1FwlzqeEgrfaFjGf34+0RAEwfyx3liPR3 26 | tTlXJHbPKNNJ+2JYNdaUo7jQw/ezVYtF+CH9tmc2Xq6RG/3EMhvDn2m0fNHr7eht 27 | b/wsuLvg1s1g2lYSV23iDqWGP3CzR+bC5W6UVqHp6zlTaead30Fb26Q/3HqOWO/j 28 | eaI6nXFCZmuBLnzsED5YDUdqeLIUmAvUr4rfj3hcVMlh3vqRp1zc87Gd2ndxLpyc 29 | PW/6+g2oA717JQZ8Qw== 30 | -----END CERTIFICATE----- 31 | -------------------------------------------------------------------------------- /.github/workflows/build-and-push-upgrade-hawk.yaml: -------------------------------------------------------------------------------- 1 | name: Build and push image for iris-mpc-genesis 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | release: 9 | types: 10 | - 'published' 11 | 12 | concurrency: 13 | group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' 14 | cancel-in-progress: true 15 | 16 | env: 17 | REGISTRY: ghcr.io 18 | IMAGE_NAME: "iris-mpc-genesis" 19 | 20 | jobs: 21 | docker: 22 | timeout-minutes: 40 23 | runs-on: 24 | labels: ubuntu-22.04-16core 25 | permissions: 26 | packages: write 27 | contents: read 28 | attestations: write 29 | id-token: write 30 | steps: 31 | - name: Checkout 32 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 33 | - name: Set up QEMU 34 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 35 | - name: Set up Docker Buildx 36 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 37 | - name: Log in to the Container registry 38 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef 39 | with: 40 | registry: ${{ env.REGISTRY }} 41 | username: ${{ github.repository_owner }} 42 | password: ${{ secrets.GITHUB_TOKEN }} 43 | - name: Build and Push 44 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 45 | with: 46 | context: . 47 | push: true 48 | tags: | 49 | ${{ env.REGISTRY }}/worldcoin/${{ env.IMAGE_NAME }}:${{ github.sha }} 50 | ${{ github.event_name == 'release' && format('{0}/worldcoin/{1}:{2}', env.REGISTRY, env.IMAGE_NAME, github.event.release.tag_name) || '' }} 51 | platforms: linux/amd64 52 | cache-from: type=gha 53 | cache-to: type=gha,mode=max 54 | file: Dockerfile.genesis.hawk 55 | -------------------------------------------------------------------------------- /.github/workflows/build-and-push-no-cuda.yaml: -------------------------------------------------------------------------------- 1 | name: Build and push image from a branch [nocuda] 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | tag: 7 | description: 'tag to apply to the image' 8 | type: string 9 | required: false 10 | default: 'no-cuda-key-manager' 11 | 12 | concurrency: 13 | group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' 14 | cancel-in-progress: true 15 | 16 | env: 17 | REGISTRY: ghcr.io 18 | IMAGE_NAME: ${{ github.repository }} 19 | 20 | jobs: 21 | docker: 22 | timeout-minutes: 40 23 | runs-on: 24 | labels: ubuntu-22.04-16core 25 | permissions: 26 | packages: write 27 | contents: read 28 | attestations: write 29 | id-token: write 30 | steps: 31 | - name: Checkout 32 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 33 | - name: Set up QEMU 34 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 35 | - name: Set up Docker Buildx 36 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 37 | - name: Log in to the Container registry 38 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef 39 | with: 40 | registry: ${{ env.REGISTRY }} 41 | username: ${{ github.repository_owner }} 42 | password: ${{ secrets.GITHUB_TOKEN }} 43 | - name: Build and Push 44 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 45 | with: 46 | context: . 47 | push: true 48 | tags: | 49 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }} 50 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ inputs.tag }} 51 | platforms: linux/amd64 52 | cache-from: type=gha 53 | cache-to: type=gha,mode=max 54 | file: Dockerfile.nocuda 55 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 ubuntu:22.04 as build-image 2 | 3 | WORKDIR /src 4 | RUN apt-get update && apt-get install -y \ 5 | curl \ 6 | build-essential \ 7 | libssl-dev \ 8 | texinfo \ 9 | libcap2-bin \ 10 | pkg-config \ 11 | git \ 12 | devscripts \ 13 | debhelper \ 14 | ca-certificates \ 15 | protobuf-compiler \ 16 | wget 17 | 18 | RUN curl https://sh.rustup.rs -sSf | sh -s -- -y 19 | ENV PATH "/root/.cargo/bin:${PATH}" 20 | ENV RUSTUP_HOME "/root/.rustup" 21 | ENV CARGO_HOME "/root/.cargo" 22 | RUN rustup toolchain install 1.89.0 23 | RUN rustup default 1.89.0 24 | RUN rustup component add cargo 25 | RUN cargo install cargo-build-deps \ 26 | && cargo install cargo-edit --version 0.13.6 --locked 27 | 28 | FROM --platform=linux/amd64 build-image as build-app 29 | WORKDIR /src/gpu-iris-mpc 30 | COPY . . 31 | RUN cargo build -p iris-mpc-bins --release --target x86_64-unknown-linux-gnu --bin nccl --bin iris-mpc-gpu --bin client --bin key-manager --bin reshare-server --bin reshare-client 32 | 33 | FROM --platform=linux/amd64 public.ecr.aws/deep-learning-containers/base:12.8.0-gpu-py312-cu128-ubuntu22.04-ec2-v1.17 34 | ENV DEBIAN_FRONTEND=noninteractive 35 | 36 | # Include client, server and key-manager, upgrade-client and upgrade-server binaries 37 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/nccl /bin/nccl 38 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/iris-mpc-gpu /bin/iris-mpc-gpu 39 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/client /bin/client 40 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/key-manager /bin/key-manager 41 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/reshare-server /bin/reshare-server 42 | COPY --from=build-app /src/gpu-iris-mpc/target/x86_64-unknown-linux-gnu/release/reshare-client /bin/reshare-client 43 | 44 | USER 65534 45 | ENTRYPOINT ["/bin/iris-mpc-gpu"] 46 | -------------------------------------------------------------------------------- /iris-mpc-common/src/iris_db/shamir_iris.rs: -------------------------------------------------------------------------------- 1 | use super::iris::{IrisCode, IrisCodeArray}; 2 | use crate::shamir::{Shamir, P32}; 3 | use rand::Rng; 4 | 5 | #[derive(Debug)] 6 | pub struct ShamirIris { 7 | pub code: [u16; IrisCodeArray::IRIS_CODE_SIZE], 8 | pub mask: [u16; IrisCodeArray::IRIS_CODE_SIZE], 9 | } 10 | 11 | impl Clone for ShamirIris { 12 | fn clone(&self) -> Self { 13 | Self { 14 | code: self.code, 15 | mask: self.mask, 16 | } 17 | } 18 | } 19 | 20 | impl Default for ShamirIris { 21 | fn default() -> Self { 22 | Self { 23 | code: [0; IrisCodeArray::IRIS_CODE_SIZE], 24 | mask: [0; IrisCodeArray::IRIS_CODE_SIZE], 25 | } 26 | } 27 | } 28 | 29 | impl ShamirIris { 30 | fn share_bit(code: bool, mask: bool, rng: &mut R) -> ([u16; 3], [u16; 3]) { 31 | // code needs to be encoded before sharing 32 | let val = (code & mask) as u32; 33 | let to_share = ((mask as u32 + P32 + P32 - val - val) % P32) as u16; 34 | let code_shares = Shamir::share_d1(to_share, rng); 35 | 36 | // mask is directly shared 37 | let mask_shares = Shamir::share_d1(mask as u16, rng); 38 | (code_shares, mask_shares) 39 | } 40 | 41 | pub fn share_iris(iris: &IrisCode, rng: &mut R) -> [ShamirIris; 3] { 42 | let mut result = [ 43 | ShamirIris::default(), 44 | ShamirIris::default(), 45 | ShamirIris::default(), 46 | ]; 47 | for (bitindex, (c_bit, m_bit)) in iris.code.bits().zip(iris.mask.bits()).enumerate() { 48 | let (code_shares, mask_shares) = Self::share_bit(c_bit, m_bit, rng); 49 | for (res, (code, mask)) in result 50 | .iter_mut() 51 | .zip(code_shares.into_iter().zip(mask_shares.into_iter())) 52 | { 53 | res.code[bitindex] = code; 54 | res.mask[bitindex] = mask; 55 | } 56 | } 57 | 58 | result 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/aws/keys.rs: -------------------------------------------------------------------------------- 1 | use base64::{engine::general_purpose, Engine}; 2 | use eyre::Result; 3 | use sodiumoxide::crypto::box_::PublicKey; 4 | use thiserror::Error; 5 | 6 | use iris_mpc_common::helpers::key_pair; 7 | 8 | use crate::{ 9 | constants::{PARTY_IDX_0, PARTY_IDX_1, PARTY_IDX_2}, 10 | types::PublicKeyset, 11 | }; 12 | 13 | #[derive(Error, Debug)] 14 | #[allow(clippy::enum_variant_names)] 15 | pub enum PublicKeyError { 16 | #[error("Download public key decoding error: {0}")] 17 | DecodeError(String), 18 | 19 | #[error("Download public key error: {0}")] 20 | DownloadError(String), 21 | 22 | #[error("Public key format error: {0}")] 23 | FormatError(String), 24 | } 25 | 26 | /// Returns downloaded & deserialised MPC party public keyset. 27 | pub(super) async fn download_public_keyset(base_url: &str) -> Result { 28 | async fn download_public_key( 29 | party_idx: usize, 30 | base_url: &str, 31 | ) -> Result { 32 | let pbk_b64 = key_pair::download_public_key(base_url.to_owned(), party_idx.to_string()) 33 | .await 34 | .map_err(|e| { 35 | tracing::error!("Node-{}: public key download error: {}", party_idx, e); 36 | PublicKeyError::DownloadError(e.to_string()) 37 | })?; 38 | tracing::info!("Node-{}: public key downloaded", party_idx); 39 | 40 | let pbk_bytes = general_purpose::STANDARD 41 | .decode(pbk_b64) 42 | .map_err(|e| PublicKeyError::DecodeError(e.to_string()))?; 43 | 44 | PublicKey::from_slice(&pbk_bytes).ok_or_else(|| { 45 | PublicKeyError::FormatError(format!("Node-{}: public key format is invalid", party_idx)) 46 | }) 47 | } 48 | 49 | let (key0, key1, key2) = tokio::join!( 50 | download_public_key(PARTY_IDX_0, base_url), 51 | download_public_key(PARTY_IDX_1, base_url), 52 | download_public_key(PARTY_IDX_2, base_url) 53 | ); 54 | 55 | Ok([key0?, key1?, key2?]) 56 | } 57 | -------------------------------------------------------------------------------- /.github/workflows/temp-branch-build-and-push-hawk-arm64.yaml: -------------------------------------------------------------------------------- 1 | name: Branch - Anon stats server build and push docker image 2 | 3 | on: 4 | push: 5 | branches: 6 | - "dev" 7 | - "chore/rustc-1.89" 8 | paths-ignore: 9 | - "deploy/**" 10 | 11 | concurrency: 12 | group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" 13 | cancel-in-progress: true 14 | 15 | env: 16 | REGISTRY: ghcr.io 17 | IMAGE_NAME: ${{ github.repository }}-cpu 18 | 19 | jobs: 20 | docker: 21 | timeout-minutes: 40 22 | runs-on: 23 | labels: arm64-ubuntu-22.04-16core 24 | permissions: 25 | packages: write 26 | contents: read 27 | attestations: write 28 | id-token: write 29 | steps: 30 | - name: Checkout 31 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 32 | - name: Install Docker 33 | run: | 34 | curl -fsSL https://get.docker.com | sh 35 | sudo usermod -aG docker $USER 36 | sudo apt-get install acl 37 | sudo setfacl --modify user:$USER:rw /var/run/docker.sock 38 | - name: Set up Docker Buildx 39 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 40 | - name: Log in to the Container registry 41 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef 42 | with: 43 | registry: ${{ env.REGISTRY }} 44 | username: ${{ github.repository_owner }} 45 | password: ${{ secrets.GITHUB_TOKEN }} 46 | - name: Build and Push 47 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 48 | with: 49 | context: . 50 | push: true 51 | tags: | 52 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}-arm64 53 | platforms: linux/arm64 54 | build-args: | 55 | ARCHITECTURE=aarch64 56 | cache-from: type=gha 57 | cache-to: type=gha,mode=max 58 | file: Dockerfile.arm64.hawk 59 | -------------------------------------------------------------------------------- /scripts/run-server.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # Arg :: Node ID :: MPC node ordinal identifier. 5 | NODE_ID="$1" 6 | if [ -z "$NODE_ID" ]; then 7 | echo "Usage: run-server.sh [--init-servers]" 8 | exit 1 9 | fi 10 | 11 | # Arg :: Binary :: Binary to run [standard | genesis]. 12 | BINARY="$2" 13 | if [ -z "$BINARY" ]; then 14 | echo "Usage: run-server.sh [--init-servers]" 15 | exit 1 16 | fi 17 | 18 | INIT_SERVERS=false 19 | if [ "$3" == "--init-servers" ]; then 20 | INIT_SERVERS=true 21 | fi 22 | 23 | export RUST_LOG=info 24 | export SMPC__DATABASE__URL="postgres://postgres:postgres@localhost:5432/SMPC_dev_${NODE_ID}" 25 | export SMPC__CPU_DATABASE__URL="postgres://postgres:postgres@localhost:5432/SMPC_dev_${NODE_ID}" 26 | export SMPC__CPU_DATABASE__MIGRATE=true 27 | export SMPC__ANON_STATS_DATABASE__MIGRATE=true 28 | export SMPC__ANON_STATS_DATABASE__CREATE=true 29 | export SMPC__ANON_STATS_DATABASE__LOAD_PARALLELISM=8 30 | export SMPC__ANON_STATS_DATABASE__URL="postgres://postgres:postgres@localhost:5432/SMPC_dev_${NODE_ID}" 31 | export SMPC__PARTY_ID="${NODE_ID}" 32 | export SMPC__AWS__ENDPOINT="http://127.0.0.1:4566" 33 | export SMPC__REQUESTS_QUEUE_URL="http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/smpcv2-${NODE_ID}-dev.fifo" 34 | export SMPC__SERVER_COORDINATION__NODE_HOSTNAMES='["0.0.0.0","0.0.0.0","0.0.0.0"]' 35 | export SMPC__SERVER_COORDINATION__PARTY_ID="${NODE_ID}" 36 | export SMPC__SERVICE_PORTS='["4000","4001","4002"]' 37 | export SMPC__HAWK_SERVER_HEALTHCHECK_PORT="300${NODE_ID}" 38 | export AWS_ENDPOINT_URL="http://127.0.0.1:4566" 39 | 40 | if [ "$INIT_SERVERS" = true ]; then 41 | ./scripts/tools/init-servers.sh 42 | fi 43 | 44 | # Set the stack size to 100MB to receive large messages. 45 | export RUST_MIN_STACK=104857600 46 | 47 | 48 | if [ "$BINARY" == "genesis" ]; then 49 | cargo run --release -p iris-mpc-bins --bin iris-mpc-hawk-genesis -- --max-height "${GENESIS_MAX_HEIGHT}" --perform-snapshot=false 50 | else 51 | cargo run --release -p iris-mpc-bins --bin iris-mpc-hawk 52 | fi 53 | -------------------------------------------------------------------------------- /iris-mpc-utils/src/misc.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::File, 3 | hash::{DefaultHasher, Hash, Hasher}, 4 | io::{BufReader, BufWriter}, 5 | path::Path, 6 | }; 7 | 8 | use bincode; 9 | use eyre::Result; 10 | use serde::{de::DeserializeOwned, Serialize}; 11 | use serde_json; 12 | 13 | /// Returns a hash computed over a type instance using `DefaultHasher`. 14 | pub fn compute_default_hash(t: &T) -> u64 { 15 | let mut s = DefaultHasher::new(); 16 | t.hash(&mut s); 17 | s.finish() 18 | } 19 | 20 | /// Returns a boxed iterator over the first `limit` elements of `iter`. 21 | pub fn limited_iterator(iter: I, limit: Option) -> Box> 22 | where 23 | I: Iterator + 'static, 24 | { 25 | match limit { 26 | Some(num) => Box::new(iter.take(num)), 27 | None => Box::new(iter), 28 | } 29 | } 30 | 31 | /// Reads binary data from a file & deserializes a domain type. 32 | pub fn read_bin(fpath: &Path) -> Result { 33 | let file = File::open(fpath)?; 34 | let reader = BufReader::new(file); 35 | let data: T = bincode::deserialize_from(reader)?; 36 | 37 | Ok(data) 38 | } 39 | 40 | /// Reads JSON data from a file & deserializes a domain type. 41 | pub fn read_json(fpath: &Path) -> Result { 42 | let file = File::open(fpath)?; 43 | let reader = BufReader::new(file); 44 | let data: T = serde_json::from_reader(reader)?; 45 | 46 | Ok(data) 47 | } 48 | 49 | /// Writes binary data serialized from a domain type to a file. 50 | pub fn write_bin(data: &T, fpath: &Path) -> Result<()> { 51 | let file = File::create(fpath)?; 52 | let writer = BufWriter::new(file); 53 | bincode::serialize_into(writer, data)?; 54 | 55 | Ok(()) 56 | } 57 | 58 | /// Writes JSON data serialized from a domain type to a file. 59 | pub fn write_json(data: &T, fpath: &Path) -> Result<()> { 60 | let file = File::create(fpath)?; 61 | let writer = BufWriter::new(file); 62 | serde_json::to_writer(writer, &data)?; 63 | 64 | Ok(()) 65 | } 66 | -------------------------------------------------------------------------------- /adr/002-load-performance-optimisation.md: -------------------------------------------------------------------------------- 1 | # ADR-002: Load Performance Optimisation 2 | 3 | ### Links 4 | [Notion Page on the boot optimisation](https://www.notion.so/worldcoin/SMPCv2-Boot-time-optimisation-1458614bdf8c80c39170dc0040eb9acf) 5 | 6 | ## Context 7 | - on every boot-up the container needs to load a couple of 100s of GBs (and the number is constantly growing, hopefully into TBs) 8 | - the load is performed directly from the AuroraDB 9 | - we need to be able to boot the container below 10 minutes 10 | 11 | ## Decision 12 | We have decided to prepare an `iris-mpc-db-exporter` service that is going to be periodically storing a dump of the database in an S3 bucket. The loading of the data from the bucket, based on our research are going to be significantly faster than from the AuroraDB. 13 | 14 | Aiming to boost the performance further we have decided to move the conversion of `u8` into `i8` to the exporting service. 15 | 16 | **What was also implemented there was a modification of the order of the bytes for the sake of faster loading into the memory of `iris-mpc` pods.** 17 | 18 | ## Rationale 19 | - The loading of the data from the S3 bucket is significantly faster than from the AuroraDB. 20 | - The conversion of `u8` into `i8` is a simple operation that can be done in the exporting service. 21 | - The modification of the order of the bytes is a simple operation that can be done in the exporting service. 22 | 23 | ## Consequences 24 | Because of the actions required to make the loading of the data more efficient (altered byte order, byte values shifted) right now we have two storages: 25 | - AuroraDB (source of truth) 26 | - S3 bucket (used for improving the data load time) 27 | 28 | They both store the same data **but in different formats**, please refer to implementation (references are accurate in revision `ec14a0c022b73d2d291578502f6bf02cbc0a99d0`): 29 | - `iris-mpc-store/src/lib.rs:41` (universal type for both data formats) 30 | - `iris-mpc-gpu/src/server/actor.rs:464` (method for loading data from AuroraDB into memory) 31 | - `iris-mpc-gpu/src/server/actor.rs:503` (method for loading data from S3 bucket into memory) 32 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/hnsw/graph/graph_diff/mod.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Display; 2 | use std::str::FromStr; 3 | 4 | use crate::hnsw::{vector_store::Ref, GraphMem}; 5 | 6 | pub mod explicit; 7 | pub mod jaccard; 8 | pub mod node_equiv; 9 | 10 | /// A trait that defines a graph diffing strategy using a visitor pattern. 11 | /// 12 | /// A `Differ` implementation can maintain internal state and update it as the 13 | /// `run_diff` function traverses the layers and nodes of the graphs. 14 | pub trait Differ { 15 | /// The final output type of the diffing operation. 16 | type Output: Display; 17 | 18 | /// Called once before graph traversal begins. 19 | fn start_graph(&mut self) {} 20 | 21 | /// Called before traversing each layer. 22 | fn start_layer(&mut self, _layer_index: usize) {} 23 | 24 | /// Called for each node that exists in both the `lhs` and `rhs` layer. 25 | fn diff_neighborhood(&mut self, layer_index: usize, node: &V, lhs: &[V], rhs: &[V]); 26 | 27 | /// Called after traversing each layer. 28 | fn end_layer(&mut self, _layer_index: usize) {} 29 | 30 | /// Called at the very end to consume the differ and produce the final result. 31 | fn finish(self) -> Self::Output; 32 | } 33 | 34 | /// Traverses two graphs and applies a `Differ` to compute a result. 35 | /// 36 | /// It's recommended to run `ensure_node_equivalence` before using this function 37 | /// to ensure the graphs have a comparable structure. 38 | pub fn run_diff(lhs: &GraphMem, rhs: &GraphMem, mut differ: D) -> D::Output 39 | where 40 | V: Ref + Display + FromStr + Ord, 41 | D: Differ, 42 | { 43 | differ.start_graph(); 44 | 45 | for (i, (lhs_layer, rhs_layer)) in lhs.layers.iter().zip(rhs.layers.iter()).enumerate() { 46 | differ.start_layer(i); 47 | for (node, lhs_nbhd) in lhs_layer.links.iter() { 48 | if let Some(rhs_nbhd) = rhs_layer.links.get(node) { 49 | differ.diff_neighborhood(i, node, lhs_nbhd, rhs_nbhd); 50 | } 51 | } 52 | differ.end_layer(i); 53 | } 54 | 55 | differ.finish() 56 | } 57 | -------------------------------------------------------------------------------- /deploy/dev/common-values-ampc-hnsw.yaml: -------------------------------------------------------------------------------- 1 | image: "ghcr.io/worldcoin/iris-mpc-cpu:bf3989860cfda1f9f021a893fe8df68fdfd1b961-arm64" 2 | 3 | environment: dev 4 | replicaCount: 1 5 | 6 | strategy: 7 | type: Recreate 8 | 9 | datadog: 10 | enabled: true 11 | 12 | hostNetwork: false 13 | 14 | ports: 15 | - containerPort: 3000 16 | name: health 17 | protocol: TCP 18 | - containerPort: 4000 19 | name: tcp-4000 20 | protocol: TCP 21 | - containerPort: 4001 22 | name: tcp-4001 23 | protocol: TCP 24 | - containerPort: 4002 25 | name: tcp-4002 26 | protocol: TCP 27 | - containerPort: 4100 28 | name: tcp-4100 29 | protocol: TCP 30 | - containerPort: 4101 31 | name: tcp-4101 32 | protocol: TCP 33 | - containerPort: 4102 34 | name: tcp-4102 35 | protocol: TCP 36 | 37 | livenessProbe: 38 | httpGet: 39 | path: /health 40 | port: health 41 | 42 | readinessProbe: 43 | periodSeconds: 30 44 | httpGet: 45 | path: /ready 46 | port: health 47 | 48 | startupProbe: 49 | initialDelaySeconds: 60 50 | failureThreshold: 120 51 | periodSeconds: 30 52 | httpGet: 53 | path: /ready 54 | port: health 55 | 56 | resources: 57 | limits: 58 | cpu: 188 59 | memory: 2720Gi 60 | requests: 61 | cpu: 188 62 | memory: 2720Gi 63 | 64 | imagePullSecrets: 65 | - name: github-secret 66 | 67 | podAnnotations: 68 | karpenter.sh/do-not-disrupt: "true" 69 | 70 | nodeSelector: 71 | kubernetes.io/arch: arm64 72 | karpenter.sh/capacity-type: on-demand 73 | node.kubernetes.io/instance-type: "x8g.48xlarge" 74 | 75 | podSecurityContext: 76 | runAsUser: 65534 77 | runAsGroup: 65534 78 | 79 | preStop: 80 | # preStop.sleepPeriod specifies the time spent in Terminating state before SIGTERM is sent 81 | sleepPeriod: 10 82 | 83 | # terminationGracePeriodSeconds specifies the grace time between SIGTERM and SIGKILL 84 | # long enough to allow for graceful shutdown to safely process 2 batches 85 | # single batch timeout in stage is 240 seconds 86 | terminationGracePeriodSeconds: 240 87 | 88 | mountSSLCerts: 89 | enabled: true 90 | mountPath: /etc/ssl/private 91 | -------------------------------------------------------------------------------- /iris-mpc-cpu/src/execution/hawk_main/session_groups.rs: -------------------------------------------------------------------------------- 1 | use crate::execution::hawk_main::{rot::Rotations, SearchRotations}; 2 | 3 | use super::{BothEyes, BothOrient, HawkSession, Orientation, LEFT, RIGHT}; 4 | 5 | pub struct SessionGroups { 6 | pub for_search: BothOrient>>, 7 | pub for_intra_batch: BothOrient>>, 8 | } 9 | 10 | impl SessionGroups { 11 | // For each request, we may use parallel sessions for: 12 | // both orientations, both eyes, search+intra_batch, rotations. 13 | pub const N_SESSIONS_PER_REQUEST: usize = 2 * 2 * 2 * SearchRotations::N_ROTATIONS; 14 | 15 | // Group the sessions per orientation, eye, and search+intra_batch. 16 | pub fn new(sessions: BothEyes>) -> Self { 17 | let [left, right] = sessions; 18 | let [l0, l1, l2, l3] = split_in_four(left); 19 | let [r0, r1, r2, r3] = split_in_four(right); 20 | Self { 21 | for_search: [[l0, r0], [l1, r1]], 22 | for_intra_batch: [[l2, r2], [l3, r3]], 23 | } 24 | } 25 | 26 | // This takes &mut to enforce that it is not used in parallel with other methods. 27 | pub fn for_state_check(&mut self) -> BothEyes<&HawkSession> { 28 | [&self.for_search[0][LEFT][0], &self.for_search[0][RIGHT][0]] 29 | } 30 | 31 | pub fn for_mutations(&mut self, orient: Orientation) -> &BothEyes> { 32 | &self.for_search[orient as usize] 33 | } 34 | 35 | pub fn for_search(&self, orient: Orientation) -> &BothEyes> { 36 | &self.for_search[orient as usize] 37 | } 38 | 39 | pub fn for_intra_batch(&self, orient: Orientation) -> BothEyes> { 40 | self.for_intra_batch[orient as usize].clone() 41 | } 42 | } 43 | 44 | fn split_in_four(mut a: Vec) -> [Vec; 4] { 45 | let n = a.len(); 46 | assert!(n % 4 == 0, "Expected length to be divisible by 4, got {n}"); 47 | let quarter = n / 4; 48 | 49 | let d = a.split_off(quarter * 3); 50 | let c = a.split_off(quarter * 2); 51 | let b = a.split_off(quarter); 52 | [a, b, c, d] 53 | } 54 | -------------------------------------------------------------------------------- /iris-mpc/src/services/aws/clients.rs: -------------------------------------------------------------------------------- 1 | use crate::services::aws::s3::create_s3_client; 2 | use crate::services::aws::sqs::create_sqs_client; 3 | use aws_sdk_s3::Client as S3Client; 4 | use aws_sdk_secretsmanager::Client as SecretsManagerClient; 5 | use aws_sdk_sns::Client as SNSClient; 6 | use aws_sdk_sqs::{config::Region, Client as SQSClient}; 7 | use eyre::Result; 8 | use iris_mpc_common::config::{Config, ENV_PROD, ENV_STAGE}; 9 | 10 | const DEFAULT_REGION: &str = "eu-north-1"; 11 | 12 | pub struct AwsClients { 13 | pub sqs_client: SQSClient, 14 | pub sns_client: SNSClient, 15 | pub s3_client: S3Client, 16 | pub secrets_manager_client: SecretsManagerClient, 17 | } 18 | 19 | impl AwsClients { 20 | pub async fn new(config: &Config) -> Result { 21 | // Get region from config or use default 22 | let region = config 23 | .clone() 24 | .aws 25 | .and_then(|aws| aws.region) 26 | .unwrap_or_else(|| DEFAULT_REGION.to_owned()); 27 | 28 | let region_provider = Region::new(region); 29 | let shared_config = aws_config::from_env().region(region_provider).load().await; 30 | 31 | let force_path_style = config.environment != ENV_PROD && config.environment != ENV_STAGE; 32 | 33 | let sns_client = SNSClient::new(&shared_config); 34 | let sqs_client = create_sqs_client(&shared_config, config.sqs_long_poll_wait_time); 35 | let s3_client = create_s3_client(&shared_config, force_path_style); 36 | let secrets_manager_client = SecretsManagerClient::new(&shared_config); 37 | 38 | Ok(Self { 39 | sqs_client, 40 | sns_client, 41 | s3_client, 42 | secrets_manager_client, 43 | }) 44 | } 45 | } 46 | 47 | // implement clone for AwsClients 48 | impl Clone for AwsClients { 49 | fn clone(&self) -> Self { 50 | Self { 51 | sqs_client: self.sqs_client.clone(), 52 | sns_client: self.sns_client.clone(), 53 | s3_client: self.s3_client.clone(), 54 | secrets_manager_client: self.secrets_manager_client.clone(), 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /iris-mpc-common/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iris-mpc-common" 3 | version = "0.1.0" 4 | publish = false 5 | 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | 10 | [features] 11 | default = ["helpers"] 12 | helpers = [ 13 | "dep:aws-config", 14 | "dep:aws-sdk-kms", 15 | "dep:aws-sdk-sns", 16 | "dep:aws-sdk-sqs", 17 | "dep:aws-sdk-s3", 18 | "dep:aws-sdk-secretsmanager", 19 | "dep:reqwest", 20 | ] 21 | 22 | [dependencies] 23 | aws-config = { workspace = true, optional = true } 24 | aws-sdk-kms = { workspace = true, optional = true } 25 | aws-sdk-sns = { workspace = true, optional = true } 26 | aws-sdk-sqs = { workspace = true, optional = true } 27 | aws-sdk-s3 = { workspace = true, optional = true } 28 | aws-sdk-secretsmanager = { workspace = true, optional = true } 29 | clap.workspace = true 30 | rand.workspace = true 31 | bytemuck.workspace = true 32 | eyre.workspace = true 33 | thiserror.workspace = true 34 | rayon.workspace = true 35 | itertools.workspace = true 36 | base64.workspace = true 37 | serde.workspace = true 38 | serde_json.workspace = true 39 | tokio.workspace = true 40 | tracing.workspace = true 41 | tracing-subscriber.workspace = true 42 | uuid.workspace = true 43 | sqlx.workspace = true 44 | telemetry-batteries.workspace = true 45 | config = { version = "0.15.19", default-features = false, features = ["json"] } 46 | chrono = { version = "0.4.38", features = ["serde"] } 47 | reqwest = { workspace = true, optional = true, features = ["blocking", "json"] } 48 | sodiumoxide = "0.2.7" 49 | hmac = "0.12" 50 | http = "1.1.0" 51 | percent-encoding = "2" 52 | sha2 = "0.10" 53 | time = { version = "^0.3.6", features = ["formatting", "macros"] } 54 | url = "2" 55 | hex.workspace = true 56 | zeroize = "1.8.1" 57 | wiremock = "0.6.1" 58 | bincode.workspace = true 59 | serde-big-array.workspace = true 60 | metrics = "0.22.1" 61 | metrics-exporter-statsd = "0.7" 62 | num_cpus = "1.17.0" 63 | ampc-secret-sharing.workspace = true 64 | ampc-actor-utils.workspace = true 65 | ampc-server-utils.workspace = true 66 | ampc-anon-stats.workspace = true 67 | 68 | [dev-dependencies] 69 | float_eq = "1" 70 | aws-credential-types = "1.2.1" 71 | -------------------------------------------------------------------------------- /iris-mpc-py/src/py_hnsw/pyclasses/plaintext_store.rs: -------------------------------------------------------------------------------- 1 | use std::{path::Path, sync::Arc}; 2 | 3 | use super::iris_code::PyIrisCode; 4 | use iris_mpc_cpu::{ 5 | hawkers::plaintext_store::PlaintextStore, utils::serialization::iris_ndjson::IrisSelection, 6 | }; 7 | use pyo3::{exceptions::PyIOError, prelude::*}; 8 | 9 | #[pyclass] 10 | #[derive(Clone, Default)] 11 | pub struct PyPlaintextStore(pub PlaintextStore); 12 | 13 | #[pymethods] 14 | impl PyPlaintextStore { 15 | #[new] 16 | pub fn new() -> Self { 17 | Self::default() 18 | } 19 | 20 | pub fn get(&self, id: u32) -> PyIrisCode { 21 | (self 22 | .0 23 | .storage 24 | .get_vector_by_serial_id(id) 25 | .unwrap() 26 | .as_ref() 27 | .clone()) 28 | .into() 29 | } 30 | 31 | pub fn eval_distance_to_id(&self, lhs: PyIrisCode, rhs: u32) -> (u16, u16) { 32 | let iris_rhs = self.get(rhs); 33 | lhs.get_distance_fraction(iris_rhs) 34 | } 35 | 36 | pub fn eval_distance(&self, lhs: u32, rhs: u32) -> (u16, u16) { 37 | let iris_lhs = self.get(lhs); 38 | self.eval_distance_to_id(iris_lhs, rhs) 39 | } 40 | 41 | pub fn insert(&mut self, iris: PyIrisCode) -> u32 { 42 | self.0.storage.append(Arc::new(iris.0)).serial_id() 43 | } 44 | 45 | pub fn len(&self) -> usize { 46 | self.0.len() 47 | } 48 | 49 | pub fn is_empty(&self) -> bool { 50 | self.0.storage.get_points().is_empty() 51 | } 52 | 53 | #[staticmethod] 54 | #[pyo3(signature = (filename, len=None))] 55 | pub fn read_from_ndjson(filename: String, len: Option) -> PyResult { 56 | let result = 57 | PlaintextStore::from_ndjson_file(Path::new(&filename), len, IrisSelection::All) 58 | .map_err(|_| PyIOError::new_err("Unable to read from file"))?; 59 | Ok(Self(result)) 60 | } 61 | 62 | pub fn write_to_ndjson(&self, filename: String) -> PyResult<()> { 63 | self.0 64 | .to_ndjson_file(Path::new(&filename)) 65 | .map_err(|_| PyIOError::new_err("Unable to write to file")) 66 | } 67 | } 68 | --------------------------------------------------------------------------------