├── migrations ├── postgres │ ├── 0002_add_subscriber.down.sql │ ├── 0002_add_subscriber.up.sql │ ├── 0001_init.down.sql │ ├── 0005_remove_file_path.up.sql │ ├── 0005_remove_file_path.down.sql │ ├── 0003_email_verification.down.sql │ ├── 0004_add_file_data_blob.down.sql │ ├── 0006_device_auth.down.sql │ ├── 0004_add_file_data_blob.up.sql │ ├── 0003_email_verification.up.sql │ ├── 0006_device_auth.up.sql │ └── 0001_init.up.sql └── migrations.go ├── web ├── postcss.config.js ├── src │ ├── Landing.jsx │ ├── messages.proto │ ├── ConfigContext.jsx │ ├── SignupSuccess.jsx │ ├── main.jsx │ ├── DarkModeContext.jsx │ ├── index.css │ ├── VerifyEmail.jsx │ ├── DeviceAuth.jsx │ ├── encryption.js │ ├── AuthContext.jsx │ └── Navbar.jsx ├── public │ ├── robots.txt │ └── sitemap.xml ├── package.json ├── vite.config.js └── index.html ├── sqlc.yaml ├── package.json ├── Dockerfile ├── .dockerignore ├── src ├── db │ ├── db.go │ ├── schema.sql │ ├── models.go │ └── queries.sql ├── client │ ├── network.go │ ├── metadata.go │ ├── protobuf.go │ ├── client_test.go │ └── metadata_test.go ├── relay │ ├── messages.proto │ ├── zero_knowledge_test.go │ ├── protobuf.go │ ├── gzip.go │ ├── api_routes.go │ ├── compatibility_test.go │ ├── iconwords.go │ └── database.go ├── qrcode │ ├── qrcode.go │ └── qrcode_test.go ├── api │ └── middleware.go └── crypto │ └── crypto.go ├── .air.toml ├── go.mod ├── LICENSE ├── docs ├── DATABASE_SETUP.md └── ENCRYPTION.md ├── Dockerfile.build ├── .github └── workflows │ ├── dockerdeploy.yml │ └── homebrew.yml ├── playwright.config.js ├── Makefile ├── install.sh ├── tests ├── run-tests.sh ├── README.md ├── cli-to-web-text.spec.js ├── web-text-transfer.spec.js ├── cli-to-web.spec.js ├── web-to-web.spec.js └── web-to-cli-text.spec.js ├── .gitignore ├── TESTING.md ├── README.md ├── main.go └── main_test.go /migrations/postgres/0002_add_subscriber.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE users DROP COLUMN IF EXISTS subscriber; 2 | -------------------------------------------------------------------------------- /web/postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | '@tailwindcss/postcss': {}, 4 | }, 5 | } 6 | -------------------------------------------------------------------------------- /migrations/postgres/0002_add_subscriber.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE users ADD COLUMN subscriber INTEGER NOT NULL DEFAULT 0; 2 | -------------------------------------------------------------------------------- /migrations/postgres/0001_init.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS files; 2 | DROP TABLE IF EXISTS users; 3 | DROP TABLE IF EXISTS logs; 4 | -------------------------------------------------------------------------------- /web/src/Landing.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import App from "./App"; 3 | 4 | export default function Landing() { 5 | return ; 6 | } 7 | -------------------------------------------------------------------------------- /web/public/robots.txt: -------------------------------------------------------------------------------- 1 | # Allow all bots to crawl all pages 2 | User-agent: * 3 | Allow: / 4 | 5 | # Sitemap location 6 | Sitemap: https://e2ecp.com/sitemap.xml 7 | -------------------------------------------------------------------------------- /migrations/postgres/0005_remove_file_path.up.sql: -------------------------------------------------------------------------------- 1 | -- Remove the file_path column as files are now stored in the database 2 | ALTER TABLE files DROP COLUMN file_path; 3 | -------------------------------------------------------------------------------- /migrations/postgres/0005_remove_file_path.down.sql: -------------------------------------------------------------------------------- 1 | -- Restore file_path column (will be empty for database-stored files) 2 | ALTER TABLE files ADD COLUMN file_path TEXT; 3 | -------------------------------------------------------------------------------- /migrations/postgres/0003_email_verification.down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_users_verification_token; 2 | ALTER TABLE users DROP COLUMN IF EXISTS verification_token; 3 | ALTER TABLE users DROP COLUMN IF EXISTS verified; 4 | -------------------------------------------------------------------------------- /migrations/postgres/0004_add_file_data_blob.down.sql: -------------------------------------------------------------------------------- 1 | -- Remove blob column 2 | ALTER TABLE files DROP COLUMN IF EXISTS file_data; 3 | 4 | -- Restore file_path NOT NULL constraint 5 | ALTER TABLE files ALTER COLUMN file_path SET NOT NULL; 6 | -------------------------------------------------------------------------------- /migrations/migrations.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import "embed" 4 | 5 | // FS contains all SQL migration files embedded for runtime migration execution. 6 | // Organized by database type: postgres/ subdirectory only 7 | // 8 | //go:embed postgres/*.sql 9 | var FS embed.FS 10 | -------------------------------------------------------------------------------- /migrations/postgres/0006_device_auth.down.sql: -------------------------------------------------------------------------------- 1 | -- Rollback device authentication 2 | DROP INDEX IF EXISTS idx_device_auth_expires_at; 3 | DROP INDEX IF EXISTS idx_device_auth_user_code; 4 | DROP INDEX IF EXISTS idx_device_auth_device_code; 5 | DROP TABLE IF EXISTS device_auth_sessions; 6 | -------------------------------------------------------------------------------- /migrations/postgres/0004_add_file_data_blob.up.sql: -------------------------------------------------------------------------------- 1 | -- Add blob column to store encrypted file data directly in database 2 | ALTER TABLE files ADD COLUMN file_data BYTEA; 3 | 4 | -- Make file_path nullable since we'll be storing data in the database 5 | ALTER TABLE files ALTER COLUMN file_path DROP NOT NULL; 6 | -------------------------------------------------------------------------------- /sqlc.yaml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | sql: 3 | - engine: "postgresql" 4 | queries: "src/db/queries.sql" 5 | schema: "migrations/postgres/*.up.sql" 6 | gen: 7 | go: 8 | package: "db" 9 | out: "src/db" 10 | emit_json_tags: true 11 | emit_prepared_queries: false 12 | emit_interface: false 13 | emit_exact_table_names: false 14 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "e2ecp-e2e-tests", 3 | "version": "1.0.0", 4 | "private": true, 5 | "scripts": { 6 | "test:e2e": "playwright test", 7 | "test:e2e:headed": "playwright test --headed", 8 | "test:e2e:debug": "playwright test --debug", 9 | "test:report": "playwright show-report" 10 | }, 11 | "devDependencies": { 12 | "@playwright/test": "^1.57.0" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /migrations/postgres/0003_email_verification.up.sql: -------------------------------------------------------------------------------- 1 | -- Add email verification columns to users table (PostgreSQL) 2 | ALTER TABLE users ADD COLUMN IF NOT EXISTS verified INTEGER NOT NULL DEFAULT 0; 3 | ALTER TABLE users ADD COLUMN IF NOT EXISTS verification_token TEXT; 4 | 5 | -- Add unique constraint on verification_token 6 | CREATE UNIQUE INDEX IF NOT EXISTS idx_users_verification_token ON users(verification_token) WHERE verification_token IS NOT NULL; 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile for e2ecp relay server 2 | # This is a runtime-only Dockerfile that uses a pre-built binary 3 | FROM scratch 4 | 5 | # Copy the pre-built binary 6 | COPY e2ecp /e2ecp 7 | 8 | # Expose the default port 9 | EXPOSE 3001 10 | 11 | # Run the relay server with default settings 12 | # Override with: docker run -p PORT:PORT e2ecp-relay --port PORT [other flags] 13 | ENTRYPOINT ["/e2ecp", "serve"] 14 | CMD ["--port", "3001", "--max-rooms", "10", "--max-rooms-per-ip", "2", "--log-level", "info"] 15 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Git and IDE 2 | .git 3 | .gitignore 4 | .vscode 5 | .idea 6 | 7 | # Build artifacts (but keep e2ecp for Docker build) 8 | share 9 | *.exe 10 | 11 | # Node modules (will be installed in container) 12 | node_modules 13 | web/node_modules 14 | 15 | # Test artifacts 16 | test-results 17 | playwright-report 18 | tests/**/test-*.txt 19 | tests/**/downloaded-*.txt 20 | tests/cli-output 21 | 22 | # Temporary files 23 | tmp 24 | *.log 25 | .env 26 | .env.* 27 | 28 | # Documentation (not needed in Docker images) 29 | *.md 30 | 31 | # CI/CD 32 | .github 33 | -------------------------------------------------------------------------------- /web/public/sitemap.xml: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 8 | 9 | https://e2ecp.com/ 10 | 2025-11-19 11 | weekly 12 | 1.0 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /src/db/db.go: -------------------------------------------------------------------------------- 1 | // Code generated by sqlc. DO NOT EDIT. 2 | // versions: 3 | // sqlc v1.29.0 4 | 5 | package db 6 | 7 | import ( 8 | "context" 9 | "database/sql" 10 | ) 11 | 12 | type DBTX interface { 13 | ExecContext(context.Context, string, ...interface{}) (sql.Result, error) 14 | PrepareContext(context.Context, string) (*sql.Stmt, error) 15 | QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) 16 | QueryRowContext(context.Context, string, ...interface{}) *sql.Row 17 | } 18 | 19 | func New(db DBTX) *Queries { 20 | return &Queries{db: db} 21 | } 22 | 23 | type Queries struct { 24 | db DBTX 25 | } 26 | 27 | func (q *Queries) WithTx(tx *sql.Tx) *Queries { 28 | return &Queries{ 29 | db: tx, 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /migrations/postgres/0006_device_auth.up.sql: -------------------------------------------------------------------------------- 1 | -- Device authentication sessions for CLI 2 | CREATE TABLE IF NOT EXISTS device_auth_sessions ( 3 | id BIGSERIAL PRIMARY KEY, 4 | device_code TEXT NOT NULL UNIQUE, 5 | user_code TEXT NOT NULL UNIQUE, 6 | user_id BIGINT, 7 | approved BOOLEAN NOT NULL DEFAULT FALSE, 8 | token TEXT, 9 | expires_at TIMESTAMP NOT NULL, 10 | created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, 11 | FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE 12 | ); 13 | 14 | CREATE INDEX IF NOT EXISTS idx_device_auth_device_code ON device_auth_sessions(device_code); 15 | CREATE INDEX IF NOT EXISTS idx_device_auth_user_code ON device_auth_sessions(user_code); 16 | CREATE INDEX IF NOT EXISTS idx_device_auth_expires_at ON device_auth_sessions(expires_at); 17 | -------------------------------------------------------------------------------- /.air.toml: -------------------------------------------------------------------------------- 1 | root = "." 2 | testdata_dir = "testdata" 3 | tmp_dir = "tmp" 4 | 5 | [build] 6 | args_bin = [] 7 | bin = "./e2ecp --log-level debug serve" 8 | cmd = "make server" 9 | delay = 0 10 | exclude_dir = ["assets", "tmp", "vendor", "testdata", "web/dist"] 11 | exclude_file = [] 12 | exclude_regex = ["_test.go"] 13 | exclude_unchanged = false 14 | follow_symlink = false 15 | full_bin = "" 16 | include_dir = [] 17 | include_ext = ["go", "tpl", "tmpl", "html", "jsx", "css"] 18 | include_file = [] 19 | kill_delay = "3s" 20 | log = "build-errors.log" 21 | poll = false 22 | poll_interval = 0 23 | rerun = false 24 | rerun_delay = 500 25 | send_interrupt = true 26 | stop_on_error = false 27 | 28 | [color] 29 | app = "" 30 | build = "yellow" 31 | main = "magenta" 32 | runner = "green" 33 | watcher = "cyan" 34 | 35 | [log] 36 | main_only = false 37 | time = false 38 | 39 | [misc] 40 | clean_on_exit = false 41 | 42 | [screen] 43 | clear_on_rebuild = false 44 | keep_scroll = true 45 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/schollz/e2ecp 2 | 3 | go 1.25 4 | 5 | require ( 6 | github.com/golang-jwt/jwt/v5 v5.3.0 7 | github.com/golang-migrate/migrate/v4 v4.19.0 8 | github.com/google/uuid v1.6.0 9 | github.com/gorilla/websocket v1.5.3 10 | github.com/joho/godotenv v1.5.1 11 | github.com/lib/pq v1.10.9 12 | github.com/rs/cors v1.11.1 13 | github.com/schollz/progressbar/v3 v3.18.0 14 | github.com/spf13/cobra v1.10.1 15 | golang.org/x/crypto v0.45.0 16 | golang.org/x/term v0.37.0 17 | google.golang.org/protobuf v1.36.10 18 | rsc.io/qr v0.2.0 19 | ) 20 | 21 | require ( 22 | github.com/hashicorp/errwrap v1.1.0 // indirect 23 | github.com/hashicorp/go-multierror v1.1.1 // indirect 24 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 25 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect 26 | github.com/rivo/uniseg v0.4.7 // indirect 27 | github.com/spf13/pflag v1.0.10 // indirect 28 | golang.org/x/sys v0.38.0 // indirect 29 | ) 30 | -------------------------------------------------------------------------------- /src/client/network.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "net" 5 | ) 6 | 7 | // GetLocalIPAddresses returns all non-loopback local IP addresses 8 | func GetLocalIPAddresses() ([]string, error) { 9 | var localIPs []string 10 | 11 | interfaces, err := net.Interfaces() 12 | if err != nil { 13 | return nil, err 14 | } 15 | 16 | for _, iface := range interfaces { 17 | // Skip loopback and down interfaces 18 | if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 { 19 | continue 20 | } 21 | 22 | addrs, err := iface.Addrs() 23 | if err != nil { 24 | continue 25 | } 26 | 27 | for _, addr := range addrs { 28 | var ip net.IP 29 | switch v := addr.(type) { 30 | case *net.IPNet: 31 | ip = v.IP 32 | case *net.IPAddr: 33 | ip = v.IP 34 | } 35 | 36 | // Skip loopback IPs and IPv6 addresses (for simplicity) 37 | if ip != nil && !ip.IsLoopback() && ip.To4() != nil { 38 | localIPs = append(localIPs, ip.String()) 39 | } 40 | } 41 | } 42 | 43 | return localIPs, nil 44 | } 45 | -------------------------------------------------------------------------------- /web/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "e2ecp-client", 3 | "version": "1.0.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "vite --strictPort --port 5173", 7 | "build": "vite build", 8 | "preview": "vite preview" 9 | }, 10 | "dependencies": { 11 | "@fontsource/monaspace-neon": "^5.2.5", 12 | "@fontsource/monaspace-radon": "^5.2.5", 13 | "@fortawesome/fontawesome-free": "^7.1.0", 14 | "jszip": "^3.10.1", 15 | "protobufjs": "^7.5.4", 16 | "qrcode.react": "^4.2.0", 17 | "react": "^19.2.0", 18 | "react-dom": "^19.2.0", 19 | "react-hot-toast": "^2.6.0", 20 | "react-router-dom": "^7.9.6", 21 | "react-syntax-highlighter": "^16.1.0" 22 | }, 23 | "devDependencies": { 24 | "@playwright/test": "^1.57.0", 25 | "@tailwindcss/postcss": "^4.1.17", 26 | "@vitejs/plugin-react": "^5.1.1", 27 | "postcss": "^8.5.6", 28 | "tailwindcss": "^4.1.17", 29 | "vite": "^7.2.4", 30 | "vite-plugin-compression2": "^2.3.1" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Zack 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/db/schema.sql: -------------------------------------------------------------------------------- 1 | -- Users table for authentication 2 | CREATE TABLE IF NOT EXISTS users ( 3 | id INTEGER PRIMARY KEY AUTOINCREMENT, 4 | email TEXT NOT NULL UNIQUE, 5 | password_hash TEXT NOT NULL, 6 | encryption_salt TEXT NOT NULL, 7 | created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, 8 | updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP 9 | ); 10 | 11 | -- Files table for user uploaded files 12 | CREATE TABLE IF NOT EXISTS files ( 13 | id INTEGER PRIMARY KEY AUTOINCREMENT, 14 | user_id INTEGER NOT NULL, 15 | encrypted_filename TEXT NOT NULL, 16 | file_path TEXT NOT NULL, 17 | file_size INTEGER NOT NULL, 18 | encrypted_key TEXT NOT NULL, 19 | share_token TEXT UNIQUE, 20 | download_count INTEGER NOT NULL DEFAULT 0, 21 | created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, 22 | updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, 23 | FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE 24 | ); 25 | 26 | -- Indexes for better query performance 27 | CREATE INDEX IF NOT EXISTS idx_users_email ON users(email); 28 | CREATE INDEX IF NOT EXISTS idx_files_user_id ON files(user_id); 29 | CREATE INDEX IF NOT EXISTS idx_files_share_token ON files(share_token); 30 | -------------------------------------------------------------------------------- /docs/DATABASE_SETUP.md: -------------------------------------------------------------------------------- 1 | # Database Configuration (PostgreSQL Only) 2 | 3 | The relay now uses **PostgreSQL exclusively**. SQLite support and migrations have been removed. 4 | 5 | ## Environment 6 | 7 | Set a connection string in `DATABASE_URL` (or pass `--db-url` to `serve`): 8 | 9 | ```env 10 | DATABASE_URL=postgresql://user:password@host:5432/database?sslmode=require 11 | ``` 12 | 13 | ## Migrations 14 | 15 | Apply migrations with the Make target (loads `.env` automatically): 16 | 17 | ```bash 18 | make migrate 19 | ``` 20 | 21 | Manual run: 22 | 23 | ```bash 24 | migrate -path migrations/postgres -database "$DATABASE_URL" up 25 | ``` 26 | 27 | Create a new migration: 28 | 29 | ```bash 30 | make migrate:create name=add_table 31 | ``` 32 | 33 | This adds timestamped files under `migrations/postgres/`. 34 | 35 | ## Starting the Server 36 | 37 | ```bash 38 | # Uses DATABASE_URL from the environment if --db-url is omitted 39 | ./e2ecp serve --db-url "$DATABASE_URL" 40 | ``` 41 | 42 | If no URL is provided, session logging is disabled. 43 | 44 | ## Dependencies 45 | 46 | - PostgreSQL driver: `github.com/lib/pq` 47 | - Migrations: `github.com/golang-migrate/migrate/v4` (install CLI with `go install -tags 'postgres' github.com/golang-migrate/migrate/v4/cmd/migrate@latest`) 48 | -------------------------------------------------------------------------------- /src/relay/messages.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package relay; 4 | 5 | option go_package = "github.com/schollz/share/src/relay"; 6 | 7 | // PBIncomingMessage represents messages received by the relay server from clients 8 | message PBIncomingMessage { 9 | string type = 1; 10 | string room_id = 2; 11 | string client_id = 3; 12 | string pub = 4; 13 | string iv_b64 = 7; 14 | string data_b64 = 8; 15 | string chunk_data = 9; 16 | int32 chunk_num = 10; 17 | string encrypted_metadata = 20; // Encrypted metadata (file info, local relay info, etc.) 18 | string metadata_iv = 21; // IV for encrypted_metadata 19 | } 20 | 21 | // PBOutgoingMessage represents messages sent by the relay server to clients 22 | message PBOutgoingMessage { 23 | string type = 1; 24 | string from = 2; 25 | string mnemonic = 3; 26 | string room_id = 4; 27 | string pub = 5; 28 | string iv_b64 = 8; 29 | string data_b64 = 9; 30 | string chunk_data = 10; 31 | int32 chunk_num = 11; 32 | string self_id = 13; 33 | repeated string peers = 14; 34 | int32 count = 15; 35 | string error = 16; 36 | string encrypted_metadata = 20; // Encrypted metadata (file info, local relay info, etc.) 37 | string metadata_iv = 21; // IV for encrypted_metadata 38 | string peer_id = 22; // ID of disconnected peer 39 | } 40 | -------------------------------------------------------------------------------- /web/src/messages.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package relay; 4 | 5 | option go_package = "github.com/schollz/share/src/relay"; 6 | 7 | // PBIncomingMessage represents messages received by the relay server from clients 8 | message PBIncomingMessage { 9 | string type = 1; 10 | string room_id = 2; 11 | string client_id = 3; 12 | string pub = 4; 13 | string iv_b64 = 7; 14 | string data_b64 = 8; 15 | string chunk_data = 9; 16 | int32 chunk_num = 10; 17 | string encrypted_metadata = 20; // Encrypted metadata (name, total_size, is_folder, etc.) 18 | string metadata_iv = 21; // IV for encrypted_metadata 19 | } 20 | 21 | // PBOutgoingMessage represents messages sent by the relay server to clients 22 | message PBOutgoingMessage { 23 | string type = 1; 24 | string from = 2; 25 | string mnemonic = 3; 26 | string room_id = 4; 27 | string pub = 5; 28 | string iv_b64 = 8; 29 | string data_b64 = 9; 30 | string chunk_data = 10; 31 | int32 chunk_num = 11; 32 | string self_id = 13; 33 | repeated string peers = 14; 34 | int32 count = 15; 35 | string error = 16; 36 | string encrypted_metadata = 20; // Encrypted metadata (name, total_size, is_folder, etc.) 37 | string metadata_iv = 21; // IV for encrypted_metadata 38 | string peer_id = 22; // ID of disconnected peer 39 | } 40 | -------------------------------------------------------------------------------- /src/qrcode/qrcode.go: -------------------------------------------------------------------------------- 1 | package qrcode 2 | 3 | import ( 4 | "io" 5 | "strings" 6 | 7 | "rsc.io/qr" 8 | ) 9 | 10 | // PrintHalfBlock prints a QR code using half-block characters for compact display 11 | // leftPadding specifies the number of spaces to add before each line 12 | func PrintHalfBlock(w io.Writer, text string, leftPadding int) error { 13 | // Generate QR code with low error correction for smaller size 14 | code, err := qr.Encode(text, qr.L) 15 | if err != nil { 16 | return err 17 | } 18 | 19 | size := code.Size 20 | padding := strings.Repeat(" ", leftPadding) 21 | 22 | // Print QR code using half-blocks (each char represents 2 vertical pixels) 23 | for y := 0; y < size; y += 2 { 24 | // Write left padding 25 | w.Write([]byte(padding)) 26 | 27 | for x := 0; x < size; x++ { 28 | topBlack := code.Black(x, y) 29 | bottomBlack := false 30 | if y+1 < size { 31 | bottomBlack = code.Black(x, y+1) 32 | } 33 | 34 | // Choose character based on which pixels are black 35 | switch { 36 | case topBlack && bottomBlack: 37 | w.Write([]byte("█")) // Both black 38 | case topBlack && !bottomBlack: 39 | w.Write([]byte("▀")) // Top black 40 | case !topBlack && bottomBlack: 41 | w.Write([]byte("▄")) // Bottom black 42 | default: 43 | w.Write([]byte(" ")) // Both white 44 | } 45 | } 46 | w.Write([]byte("\n")) 47 | } 48 | 49 | return nil 50 | } 51 | -------------------------------------------------------------------------------- /Dockerfile.build: -------------------------------------------------------------------------------- 1 | # Multi-stage build for e2ecp relay server 2 | # This Dockerfile builds the entire application from source 3 | # 4 | # Build with: docker build -f Dockerfile.build -t e2ecp-relay:latest . 5 | # Run with: docker run -p 3001:3001 e2ecp-relay:latest 6 | 7 | # Stage 1: Build the web assets 8 | FROM node:24-alpine AS web-builder 9 | 10 | WORKDIR /build/web 11 | COPY web/package-lock.json . 12 | COPY web/package.json . 13 | RUN npm install 14 | 15 | COPY web/ ./ 16 | RUN npm run build 17 | 18 | # Stage 2: Build the Go binary 19 | FROM golang:1.25-alpine AS go-builder 20 | 21 | # Install ca-certificates for downloading Go modules 22 | RUN apk --no-cache add ca-certificates 23 | 24 | WORKDIR /build 25 | COPY go.mod go.sum ./ 26 | RUN go mod download 27 | 28 | COPY . . 29 | COPY --from=web-builder /build/web/dist ./web/dist 30 | RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-w -s -extldflags '-static'" -o e2ecp . 31 | 32 | # Stage 3: Final runtime image 33 | FROM scratch 34 | 35 | # Copy the binary from the builder stage 36 | COPY --from=go-builder /build/e2ecp /e2ecp 37 | 38 | # Expose the default port 39 | EXPOSE 3001 40 | 41 | # Run the relay server with default settings 42 | # Override with: docker run -p PORT:PORT e2ecp-relay:latest --port PORT [other flags] 43 | ENTRYPOINT ["/e2ecp", "serve"] 44 | CMD ["--port", "3001", "--max-rooms", "10", "--max-rooms-per-ip", "2", "--log-level", "info"] 45 | -------------------------------------------------------------------------------- /.github/workflows/dockerdeploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Docker 2 | 3 | on: 4 | release: 5 | types: [created] 6 | workflow_dispatch: 7 | 8 | jobs: 9 | docker: 10 | runs-on: ubuntu-24.04 11 | steps: 12 | - name: Checkout repository 13 | uses: actions/checkout@v5 14 | 15 | - name: Docker meta 16 | id: meta 17 | uses: docker/metadata-action@v5 18 | with: 19 | images: schollz/e2ecp 20 | tags: | 21 | type=ref,event=branch 22 | type=ref,event=pr 23 | type=semver,pattern={{version}} 24 | type=semver,pattern={{major}}.{{minor}} 25 | type=semver,pattern={{major}} 26 | type=sha 27 | 28 | - name: Setup QEMU 29 | uses: docker/setup-qemu-action@v3 30 | 31 | - name: Set up Docker Buildx 32 | uses: docker/setup-buildx-action@v3 33 | 34 | - name: Login to DockerHub 35 | if: github.event_name != 'pull_request' 36 | uses: docker/login-action@v3 37 | with: 38 | username: ${{ secrets.DOCKERHUB_USERNAME }} 39 | password: ${{ secrets.DOCKERHUB_TOKEN }} 40 | 41 | - name: Build and push 42 | uses: docker/build-push-action@v6 43 | with: 44 | context: . 45 | file: ./Dockerfile.build 46 | platforms: linux/amd64 47 | push: ${{ github.event_name != 'pull_request' }} 48 | tags: ${{ steps.meta.outputs.tags }} 49 | labels: ${{ steps.meta.outputs.labels }} 50 | -------------------------------------------------------------------------------- /web/src/ConfigContext.jsx: -------------------------------------------------------------------------------- 1 | import React, { createContext, useContext, useEffect, useState } from "react"; 2 | 3 | const ConfigContext = createContext(null); 4 | 5 | export const useConfig = () => { 6 | const ctx = useContext(ConfigContext); 7 | if (!ctx) { 8 | throw new Error("useConfig must be used within a ConfigProvider"); 9 | } 10 | return ctx; 11 | }; 12 | 13 | export const ConfigProvider = ({ children }) => { 14 | const [storageEnabled, setStorageEnabled] = useState(false); 15 | const [loading, setLoading] = useState(true); 16 | const [freeLimitBytes, setFreeLimitBytes] = useState(null); 17 | 18 | useEffect(() => { 19 | const loadConfig = async () => { 20 | try { 21 | const res = await fetch("/api/config"); 22 | if (!res.ok) throw new Error("Failed to load config"); 23 | const data = await res.json(); 24 | setStorageEnabled(Boolean(data.storage_profile_enabled)); 25 | if (typeof data.free_storage_limit === "number") { 26 | setFreeLimitBytes(data.free_storage_limit); 27 | } 28 | } catch (error) { 29 | setStorageEnabled(false); 30 | } finally { 31 | setLoading(false); 32 | } 33 | }; 34 | loadConfig(); 35 | }, []); 36 | 37 | return ( 38 | 39 | {children} 40 | 41 | ); 42 | }; 43 | -------------------------------------------------------------------------------- /migrations/postgres/0001_init.up.sql: -------------------------------------------------------------------------------- 1 | -- Initial schema for PostgreSQL 2 | CREATE TABLE IF NOT EXISTS logs ( 3 | session_id TEXT PRIMARY KEY, 4 | ip_from TEXT NOT NULL, 5 | ip_to TEXT, 6 | bandwidth_bytes BIGINT DEFAULT 0, 7 | session_start TIMESTAMP NOT NULL, 8 | session_end TIMESTAMP 9 | ); 10 | 11 | CREATE INDEX IF NOT EXISTS idx_session_start ON logs(session_start); 12 | CREATE INDEX IF NOT EXISTS idx_ip_from ON logs(ip_from); 13 | 14 | -- Users table for authentication 15 | CREATE TABLE IF NOT EXISTS users ( 16 | id BIGSERIAL PRIMARY KEY, 17 | email TEXT NOT NULL UNIQUE, 18 | password_hash TEXT NOT NULL, 19 | encryption_salt TEXT NOT NULL, 20 | created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, 21 | updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP 22 | ); 23 | 24 | -- Files table for user uploaded files 25 | CREATE TABLE IF NOT EXISTS files ( 26 | id BIGSERIAL PRIMARY KEY, 27 | user_id BIGINT NOT NULL, 28 | encrypted_filename TEXT NOT NULL, 29 | file_path TEXT NOT NULL, 30 | file_size BIGINT NOT NULL, 31 | encrypted_key TEXT NOT NULL, 32 | share_token TEXT UNIQUE, 33 | download_count BIGINT NOT NULL DEFAULT 0, 34 | created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, 35 | updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, 36 | FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE 37 | ); 38 | 39 | -- Indexes for better query performance 40 | CREATE INDEX IF NOT EXISTS idx_users_email ON users(email); 41 | CREATE INDEX IF NOT EXISTS idx_files_user_id ON files(user_id); 42 | CREATE INDEX IF NOT EXISTS idx_files_share_token ON files(share_token); 43 | -------------------------------------------------------------------------------- /playwright.config.js: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | const { defineConfig, devices } = require('@playwright/test'); 3 | 4 | /** 5 | * @see https://playwright.dev/docs/test-configuration 6 | */ 7 | module.exports = defineConfig({ 8 | testDir: './tests', 9 | /* Maximum time one test can run for */ 10 | timeout: 120000, // 2 minutes per test 11 | /* Run tests in files in parallel */ 12 | fullyParallel: false, 13 | /* Fail the build on CI if you accidentally left test.only in the source code. */ 14 | forbidOnly: !!process.env.CI, 15 | /* Retry on CI only */ 16 | retries: process.env.CI ? 2 : 0, 17 | /* Opt out of parallel tests on CI. */ 18 | workers: 1, 19 | /* Reporter to use. See https://playwright.dev/docs/test-reporters */ 20 | reporter: 'html', 21 | /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ 22 | use: { 23 | /* Base URL to use in actions like `await page.goto('/')`. */ 24 | // baseURL: 'http://127.0.0.1:3000', 25 | 26 | /* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */ 27 | trace: 'on-first-retry', 28 | 29 | /* Maximum time each action such as `click()` can take */ 30 | actionTimeout: 15000, 31 | 32 | /* Maximum time for navigation */ 33 | navigationTimeout: 30000, 34 | }, 35 | 36 | /* Configure projects for major browsers */ 37 | projects: [ 38 | { 39 | name: 'chromium', 40 | use: { ...devices['Desktop Chrome'] }, 41 | }, 42 | ], 43 | 44 | /* Run your local dev server before starting the tests */ 45 | // webServer: { 46 | // command: 'npm run start', 47 | // url: 'http://127.0.0.1:3000', 48 | // reuseExistingServer: !process.env.CI, 49 | // }, 50 | }); 51 | -------------------------------------------------------------------------------- /src/api/middleware.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "strings" 7 | 8 | "github.com/schollz/e2ecp/src/auth" 9 | ) 10 | 11 | type contextKey string 12 | 13 | const ( 14 | UserIDKey contextKey = "user_id" 15 | UserEmailKey contextKey = "user_email" 16 | ) 17 | 18 | // AuthMiddleware validates JWT tokens and adds user info to context 19 | func AuthMiddleware(authService *auth.Service) func(http.Handler) http.Handler { 20 | return func(next http.Handler) http.Handler { 21 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 22 | authHeader := r.Header.Get("Authorization") 23 | if authHeader == "" { 24 | http.Error(w, "Authorization header required", http.StatusUnauthorized) 25 | return 26 | } 27 | 28 | // Extract token from "Bearer " 29 | parts := strings.Split(authHeader, " ") 30 | if len(parts) != 2 || parts[0] != "Bearer" { 31 | http.Error(w, "Invalid authorization header format", http.StatusUnauthorized) 32 | return 33 | } 34 | 35 | token := parts[1] 36 | userID, email, err := authService.ValidateJWT(token) 37 | if err != nil { 38 | http.Error(w, "Invalid or expired token", http.StatusUnauthorized) 39 | return 40 | } 41 | 42 | // Add user info to context 43 | ctx := context.WithValue(r.Context(), UserIDKey, userID) 44 | ctx = context.WithValue(ctx, UserEmailKey, email) 45 | next.ServeHTTP(w, r.WithContext(ctx)) 46 | }) 47 | } 48 | } 49 | 50 | // GetUserID retrieves the user ID from the request context 51 | func GetUserID(r *http.Request) int64 { 52 | userID, _ := r.Context().Value(UserIDKey).(int64) 53 | return userID 54 | } 55 | 56 | // GetUserEmail retrieves the user email from the request context 57 | func GetUserEmail(r *http.Request) string { 58 | email, _ := r.Context().Value(UserEmailKey).(string) 59 | return email 60 | } 61 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all build clean web server install migrate migrate\:create 2 | 3 | # Paths/commands 4 | MIGRATIONS_DIR ?= migrations 5 | 6 | LDFLAGS ?= 7 | 8 | all: build 9 | @echo "Build completed at $(shell date)" 10 | 11 | node_modules: 12 | npm install 13 | 14 | web/node_modules: 15 | cd web && npm install 16 | 17 | web: web/node_modules 18 | cd web && npm run build 19 | touch web/dist/.keep 20 | 21 | server: web 22 | CGO_ENABLED=0 go build -ldflags "$(LDFLAGS) -extldflags '-static'" -o e2ecp . 23 | 24 | build: server 25 | 26 | install: build 27 | go install 28 | 29 | clean: 30 | rm -f e2ecp 31 | rm -rf web/dist 32 | 33 | test: all 34 | go test -v ./... 35 | cd tests && ./run-tests.sh 36 | 37 | # Ensure migrate CLI is available (installs with postgres support if missing) 38 | define ensure_migrate 39 | @command -v migrate >/dev/null || (echo "Installing golang-migrate CLI..." && go install -tags 'postgres' github.com/golang-migrate/migrate/v4/cmd/migrate@latest) 40 | endef 41 | 42 | # Apply migrations to the configured database 43 | # Requires DATABASE_URL to be set (from the environment or .env) 44 | migrate: 45 | $(call ensure_migrate) 46 | @if [ -f .env ]; then set -a && . ./.env && set +a; fi; \ 47 | if [ -z "$$DATABASE_URL" ]; then \ 48 | echo "DATABASE_URL is not set; cannot run migrations."; \ 49 | exit 1; \ 50 | fi; \ 51 | echo "Applying PostgreSQL migrations..."; \ 52 | migrate -path $(MIGRATIONS_DIR)/postgres -database "$$DATABASE_URL" up 53 | 54 | # Create a new timestamped migration: make migrate:create name=add_table (postgres only) 55 | migrate\:create: 56 | @test -n "$(name)" || (echo "Usage: make migrate:create name=" && exit 1) 57 | $(call ensure_migrate) 58 | @echo "Creating PostgreSQL migration..." 59 | @migrate create -ext sql -dir $(MIGRATIONS_DIR)/postgres -seq $(name) 60 | @echo "Created PostgreSQL migration files." 61 | -------------------------------------------------------------------------------- /src/client/metadata.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | // FileMetadata contains all sensitive file information that should be encrypted 8 | // before transmission through the relay server 9 | type FileMetadata struct { 10 | Name string `json:"name"` 11 | TotalSize int64 `json:"total_size"` 12 | IsFolder bool `json:"is_folder,omitempty"` 13 | OriginalFolderName string `json:"original_folder_name,omitempty"` 14 | IsMultipleFiles bool `json:"is_multiple_files,omitempty"` 15 | Hash string `json:"hash,omitempty"` // SHA256 hash of the file 16 | IsText bool `json:"is_text,omitempty"` // Whether this is a text message instead of a file 17 | Text string `json:"text,omitempty"` // Text content if IsText is true 18 | } 19 | 20 | // MarshalMetadata converts FileMetadata to JSON bytes for encryption 21 | func MarshalMetadata(meta FileMetadata) ([]byte, error) { 22 | return json.Marshal(meta) 23 | } 24 | 25 | // UnmarshalMetadata converts JSON bytes to FileMetadata after decryption 26 | func UnmarshalMetadata(data []byte) (FileMetadata, error) { 27 | var meta FileMetadata 28 | err := json.Unmarshal(data, &meta) 29 | return meta, err 30 | } 31 | 32 | // LocalRelayInfo contains local relay connection information that should be encrypted 33 | // before transmission through the relay server 34 | type LocalRelayInfo struct { 35 | IPs []string `json:"ips"` 36 | Port int `json:"port"` 37 | } 38 | 39 | // MarshalLocalRelayInfo converts LocalRelayInfo to JSON bytes for encryption 40 | func MarshalLocalRelayInfo(info LocalRelayInfo) ([]byte, error) { 41 | return json.Marshal(info) 42 | } 43 | 44 | // UnmarshalLocalRelayInfo converts JSON bytes to LocalRelayInfo after decryption 45 | func UnmarshalLocalRelayInfo(data []byte) (LocalRelayInfo, error) { 46 | var info LocalRelayInfo 47 | err := json.Unmarshal(data, &info) 48 | return info, err 49 | } 50 | -------------------------------------------------------------------------------- /src/qrcode/qrcode_test.go: -------------------------------------------------------------------------------- 1 | package qrcode 2 | 3 | import ( 4 | "bytes" 5 | "strings" 6 | "testing" 7 | 8 | "rsc.io/qr" 9 | ) 10 | 11 | func TestPrintHalfBlockMatchesQRCodePixels(t *testing.T) { 12 | t.Helper() 13 | 14 | text := "https://example.com" 15 | leftPadding := 3 16 | 17 | var buf bytes.Buffer 18 | if err := PrintHalfBlock(&buf, text, leftPadding); err != nil { 19 | t.Fatalf("PrintHalfBlock returned error: %v", err) 20 | } 21 | 22 | rawOutput := buf.String() 23 | if !strings.HasSuffix(rawOutput, "\n") { 24 | t.Fatalf("PrintHalfBlock output should end with a newline, got %q", rawOutput) 25 | } 26 | 27 | output := strings.TrimSuffix(rawOutput, "\n") 28 | lines := strings.Split(output, "\n") 29 | 30 | code, err := qr.Encode(text, qr.L) 31 | if err != nil { 32 | t.Fatalf("failed to encode reference QR code: %v", err) 33 | } 34 | 35 | expectedLines := (code.Size + 1) / 2 36 | if len(lines) != expectedLines { 37 | t.Fatalf("expected %d lines, got %d", expectedLines, len(lines)) 38 | } 39 | 40 | padding := strings.Repeat(" ", leftPadding) 41 | 42 | for row, line := range lines { 43 | if !strings.HasPrefix(line, padding) { 44 | t.Fatalf("line %d is missing left padding %q: %q", row, padding, line) 45 | } 46 | 47 | contentRunes := []rune(line[leftPadding:]) 48 | if len(contentRunes) != code.Size { 49 | t.Fatalf("line %d expected %d runes, got %d", row, code.Size, len(contentRunes)) 50 | } 51 | 52 | y := row * 2 53 | for x, r := range contentRunes { 54 | top := code.Black(x, y) 55 | bottom := false 56 | if y+1 < code.Size { 57 | bottom = code.Black(x, y+1) 58 | } 59 | 60 | var expected rune 61 | switch { 62 | case top && bottom: 63 | expected = '█' 64 | case top && !bottom: 65 | expected = '▀' 66 | case !top && bottom: 67 | expected = '▄' 68 | default: 69 | expected = ' ' 70 | } 71 | 72 | if r != expected { 73 | t.Fatalf("line %d, column %d: expected %q, got %q", row, x, expected, r) 74 | } 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/db/models.go: -------------------------------------------------------------------------------- 1 | // Code generated by sqlc. DO NOT EDIT. 2 | // versions: 3 | // sqlc v1.29.0 4 | 5 | package db 6 | 7 | import ( 8 | "database/sql" 9 | "time" 10 | ) 11 | 12 | type DeviceAuthSession struct { 13 | ID int64 `json:"id"` 14 | DeviceCode string `json:"device_code"` 15 | UserCode string `json:"user_code"` 16 | UserID sql.NullInt64 `json:"user_id"` 17 | Approved bool `json:"approved"` 18 | Token sql.NullString `json:"token"` 19 | ExpiresAt time.Time `json:"expires_at"` 20 | CreatedAt time.Time `json:"created_at"` 21 | } 22 | 23 | type File struct { 24 | ID int64 `json:"id"` 25 | UserID int64 `json:"user_id"` 26 | EncryptedFilename string `json:"encrypted_filename"` 27 | FileSize int64 `json:"file_size"` 28 | EncryptedKey string `json:"encrypted_key"` 29 | ShareToken sql.NullString `json:"share_token"` 30 | DownloadCount int64 `json:"download_count"` 31 | CreatedAt time.Time `json:"created_at"` 32 | UpdatedAt time.Time `json:"updated_at"` 33 | FileData []byte `json:"file_data"` 34 | } 35 | 36 | type Log struct { 37 | SessionID string `json:"session_id"` 38 | IpFrom string `json:"ip_from"` 39 | IpTo sql.NullString `json:"ip_to"` 40 | BandwidthBytes sql.NullInt64 `json:"bandwidth_bytes"` 41 | SessionStart time.Time `json:"session_start"` 42 | SessionEnd sql.NullTime `json:"session_end"` 43 | } 44 | 45 | type User struct { 46 | ID int64 `json:"id"` 47 | Email string `json:"email"` 48 | PasswordHash string `json:"password_hash"` 49 | EncryptionSalt string `json:"encryption_salt"` 50 | CreatedAt time.Time `json:"created_at"` 51 | UpdatedAt time.Time `json:"updated_at"` 52 | Subscriber int32 `json:"subscriber"` 53 | Verified int32 `json:"verified"` 54 | VerificationToken sql.NullString `json:"verification_token"` 55 | } 56 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ARCH=$(uname -m) 4 | OS=$(uname -s | tr '[:upper:]' '[:lower:]') 5 | 6 | if [ "$ARCH" = "x86_64" ] || [ "$ARCH" = "amd64" ]; then 7 | ARCH="amd64" 8 | ARCH_SUFFIX="" 9 | elif [ "$ARCH" = "aarch64" ] || [ "$ARCH" = "arm64" ]; then 10 | ARCH="arm64" 11 | ARCH_SUFFIX="_arm64" 12 | elif [ "$ARCH" = "armv7l" ]; then 13 | ARCH="armv7" 14 | ARCH_SUFFIX="_armv7" 15 | elif [ "$ARCH" = "armv6l" ]; then 16 | ARCH="armv6" 17 | ARCH_SUFFIX="_armv6" 18 | else 19 | echo "The architecture $ARCH is not supported." 20 | exit 1 21 | fi 22 | 23 | if [ "$OS" != "linux" ]; then 24 | echo "The OS $OS is not supported." 25 | exit 1 26 | fi 27 | 28 | if ! command -v unzip >/dev/null 2>&1; then 29 | echo "The \"unzip\" command is required." 30 | exit 1 31 | fi 32 | 33 | echo "Downloading e2ecp for $OS $ARCH..." 34 | 35 | DOWNLOAD_URL=$(curl -s https://api.github.com/repos/schollz/e2ecp/releases/latest | \ 36 | grep 'browser_download_url' | \ 37 | grep "e2ecp_${OS}${ARCH_SUFFIX}.zip" | \ 38 | cut -d '"' -f 4 | head -n 1) 39 | 40 | if [ -z "$DOWNLOAD_URL" ]; then 41 | echo "Failed to locate a release asset for e2ecp ($OS $ARCH)." 42 | exit 1 43 | fi 44 | 45 | TMPDIR=$(mktemp -d) 46 | trap 'rm -rf "$TMPDIR"' EXIT 47 | 48 | if ! curl -L "$DOWNLOAD_URL" -o "$TMPDIR/e2ecp.zip" --progress-bar; then 49 | echo "Failed to download e2ecp." 50 | exit 1 51 | fi 52 | 53 | if ! unzip -o "$TMPDIR/e2ecp.zip" -d "$TMPDIR" >/dev/null; then 54 | echo "Failed to extract e2ecp." 55 | exit 1 56 | fi 57 | 58 | BINARY_PATH="$TMPDIR/e2ecp" 59 | if [ ! -f "$BINARY_PATH" ]; then 60 | echo "e2ecp binary was not found in the downloaded archive." 61 | exit 1 62 | fi 63 | 64 | chmod +x "$BINARY_PATH" 65 | 66 | if [ "$(id -u)" -ne 0 ]; then 67 | if command -v sudo >/dev/null 2>&1; then 68 | SUDO="sudo" 69 | else 70 | echo "Please rerun as root or install sudo to write to /usr/local/bin." 71 | exit 1 72 | fi 73 | else 74 | SUDO="" 75 | fi 76 | 77 | if ! $SUDO mv "$BINARY_PATH" /usr/local/bin/e2ecp; then 78 | echo "Failed to install e2ecp to /usr/local/bin." 79 | exit 1 80 | fi 81 | 82 | echo "Installed e2ecp to /usr/local/bin/e2ecp" 83 | /usr/local/bin/e2ecp --version || true 84 | -------------------------------------------------------------------------------- /src/crypto/crypto.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "crypto/aes" 5 | "crypto/cipher" 6 | "crypto/ecdh" 7 | "crypto/rand" 8 | "crypto/sha256" 9 | "encoding/hex" 10 | "io" 11 | ) 12 | 13 | // GenerateECDHKeyPair generates a new ECDH P-256 key pair 14 | func GenerateECDHKeyPair() (*ecdh.PrivateKey, error) { 15 | return ecdh.P256().GenerateKey(rand.Reader) 16 | } 17 | 18 | // DeriveSharedSecret derives a shared secret from a private key and peer's public key 19 | func DeriveSharedSecret(privKey *ecdh.PrivateKey, peerPubKey *ecdh.PublicKey) ([]byte, error) { 20 | return privKey.ECDH(peerPubKey) 21 | } 22 | 23 | // EncryptAESGCM encrypts plaintext using AES-GCM with the given key 24 | func EncryptAESGCM(key, plaintext []byte) (iv, ciphertext []byte, err error) { 25 | block, err := aes.NewCipher(key) 26 | if err != nil { 27 | return nil, nil, err 28 | } 29 | 30 | gcm, err := cipher.NewGCM(block) 31 | if err != nil { 32 | return nil, nil, err 33 | } 34 | 35 | iv = make([]byte, gcm.NonceSize()) 36 | if _, err := io.ReadFull(rand.Reader, iv); err != nil { 37 | return nil, nil, err 38 | } 39 | 40 | ciphertext = gcm.Seal(nil, iv, plaintext, nil) 41 | return iv, ciphertext, nil 42 | } 43 | 44 | // DecryptAESGCM decrypts ciphertext using AES-GCM with the given key 45 | func DecryptAESGCM(key, iv, ciphertext []byte) ([]byte, error) { 46 | block, err := aes.NewCipher(key) 47 | if err != nil { 48 | return nil, err 49 | } 50 | 51 | gcm, err := cipher.NewGCM(block) 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | return gcm.Open(nil, iv, ciphertext, nil) 57 | } 58 | 59 | // GenerateMnemonic generates a 2-word BIP39 mnemonic from a client ID 60 | func GenerateMnemonic(clientID string) string { 61 | // This is imported from the relay package to avoid circular dependencies 62 | // The implementation is in relay/mnemonic.go 63 | return "" 64 | } 65 | 66 | // CalculateFileHash calculates the SHA256 hash of a file 67 | func CalculateFileHash(reader io.Reader) (string, error) { 68 | hash := sha256.New() 69 | if _, err := io.Copy(hash, reader); err != nil { 70 | return "", err 71 | } 72 | return hex.EncodeToString(hash.Sum(nil)), nil 73 | } 74 | 75 | // CalculateBytesHash calculates the SHA256 hash of a byte slice 76 | func CalculateBytesHash(data []byte) string { 77 | hash := sha256.Sum256(data) 78 | return hex.EncodeToString(hash[:]) 79 | } 80 | -------------------------------------------------------------------------------- /src/relay/zero_knowledge_test.go: -------------------------------------------------------------------------------- 1 | package relay 2 | 3 | import ( 4 | "encoding/base64" 5 | "testing" 6 | ) 7 | 8 | // TestRelayCannotInspectMetadata verifies that the relay server 9 | // cannot access or inspect encrypted file metadata 10 | func TestRelayCannotInspectMetadata(t *testing.T) { 11 | // Simulate what a client sends to the relay 12 | // This is encrypted metadata that the relay should not be able to read 13 | 14 | // Simulated encrypted metadata (this would be real AES-GCM encrypted data) 15 | encryptedMetadata := base64.StdEncoding.EncodeToString([]byte("this-is-encrypted-binary-data")) 16 | metadataIV := base64.StdEncoding.EncodeToString([]byte("random-iv-bytes")) 17 | 18 | // Create an incoming message as the relay would receive it 19 | inMsg := IncomingMessage{ 20 | Type: "file_start", 21 | EncryptedMetadata: encryptedMetadata, 22 | MetadataIV: metadataIV, 23 | } 24 | 25 | // Convert to outgoing message (as relay does) 26 | outMsg := OutgoingMessage{ 27 | Type: inMsg.Type, 28 | EncryptedMetadata: inMsg.EncryptedMetadata, 29 | MetadataIV: inMsg.MetadataIV, 30 | } 31 | 32 | // Verify that encrypted metadata is present 33 | if outMsg.EncryptedMetadata == "" { 34 | t.Error("Encrypted metadata is missing") 35 | } 36 | if outMsg.MetadataIV == "" { 37 | t.Error("Metadata IV is missing") 38 | } 39 | 40 | // Relay should only see that encrypted metadata exists, not its contents 41 | t.Logf("Relay successfully forwards encrypted metadata without inspection") 42 | t.Logf("Encrypted metadata present: %v", outMsg.EncryptedMetadata != "") 43 | } 44 | 45 | // TestZeroKnowledgeNewClients verifies that new clients using encrypted 46 | // metadata don't leak information through legacy fields 47 | func TestZeroKnowledgeNewClients(t *testing.T) { 48 | // Simulate a new client sending only encrypted metadata 49 | newClientMsg := IncomingMessage{ 50 | Type: "file_start", 51 | EncryptedMetadata: base64.StdEncoding.EncodeToString([]byte("encrypted-data")), 52 | MetadataIV: base64.StdEncoding.EncodeToString([]byte("iv-data")), 53 | } 54 | 55 | // Verify encrypted fields are populated 56 | if newClientMsg.EncryptedMetadata == "" { 57 | t.Error("Encrypted metadata missing") 58 | } 59 | if newClientMsg.MetadataIV == "" { 60 | t.Error("Metadata IV missing") 61 | } 62 | 63 | t.Log("New clients properly use zero-knowledge encrypted metadata") 64 | } 65 | -------------------------------------------------------------------------------- /web/src/SignupSuccess.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import { useLocation, useNavigate } from "react-router-dom"; 3 | import Navbar from "./Navbar"; 4 | 5 | export default function SignupSuccess() { 6 | const navigate = useNavigate(); 7 | const location = useLocation(); 8 | const email = location.state?.email; 9 | 10 | return ( 11 |
12 | 13 |
14 |
15 |

16 | Signed up! 17 |

18 |

19 | Check your email for the login link 20 | {email ? ` (${email})` : ""}. We send it right away so you 21 | can finish verifying your account. 22 |

23 |
24 | 31 | 38 |
39 |
40 |
41 |
42 | ); 43 | } 44 | -------------------------------------------------------------------------------- /web/vite.config.js: -------------------------------------------------------------------------------- 1 | import { defineConfig } from "vite"; 2 | import react from "@vitejs/plugin-react"; 3 | import { compression } from "vite-plugin-compression2"; 4 | 5 | // Plugin to inject preload hints for fonts dynamically 6 | function preloadFontsPlugin() { 7 | return { 8 | name: 'preload-fonts', 9 | async transformIndexHtml(html, ctx) { 10 | // Only in build mode 11 | if (!ctx.bundle) return html; 12 | 13 | // Find font files in the bundle 14 | const fontFiles = Object.keys(ctx.bundle).filter( 15 | file => file.match(/fa-solid-900.*\.woff2$/) 16 | ); 17 | 18 | if (fontFiles.length === 0) { 19 | console.warn('Warning: fa-solid-900 font not found in bundle'); 20 | return html; 21 | } 22 | 23 | // Create preload links for each font 24 | const preloadLinks = fontFiles.map(font => 25 | ` ` 26 | ).join('\n'); 27 | 28 | const preloadSection = ` 29 | 30 | ${preloadLinks}`; 31 | 32 | // Inject before closing 33 | return html.replace('', `${preloadSection}\n`); 34 | } 35 | }; 36 | } 37 | 38 | export default defineConfig({ 39 | plugins: [ 40 | react(), 41 | preloadFontsPlugin(), 42 | compression({ 43 | algorithm: "gzip", 44 | exclude: [/\.(br)$/, /\.(gz)$/], 45 | threshold: 1024, // Only compress files larger than 1KB 46 | deleteOriginalAssets: false 47 | }) 48 | ], 49 | preview: { 50 | port: 5173 51 | }, 52 | server: { 53 | port: 5173 54 | }, 55 | build: { 56 | // Generate hashed filenames for better caching 57 | rollupOptions: { 58 | output: { 59 | entryFileNames: "assets/[name]-[hash].js", 60 | chunkFileNames: "assets/[name]-[hash].js", 61 | assetFileNames: "assets/[name]-[hash].[ext]", 62 | // Ensure fonts are treated with proper preload 63 | manualChunks: undefined 64 | } 65 | }, 66 | // Increase chunk size warning limit for font awesome 67 | chunkSizeWarningLimit: 600 68 | } 69 | }); 70 | -------------------------------------------------------------------------------- /web/src/main.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import { createRoot } from "react-dom/client"; 3 | import { BrowserRouter, Routes, Route } from "react-router-dom"; 4 | import { Toaster } from "react-hot-toast"; 5 | import { AuthProvider } from "./AuthContext.jsx"; 6 | import { ConfigProvider } from "./ConfigContext.jsx"; 7 | import { DarkModeProvider } from "./DarkModeContext.jsx"; 8 | import Landing from "./Landing.jsx"; 9 | import Login from "./Login.jsx"; 10 | import Profile from "./Profile.jsx"; 11 | import SharedFile from "./SharedFile.jsx"; 12 | import Settings from "./Settings.jsx"; 13 | import VerifyEmail from "./VerifyEmail.jsx"; 14 | import SignupSuccess from "./SignupSuccess.jsx"; 15 | import DeviceAuth from "./DeviceAuth.jsx"; 16 | import About from "./About.jsx"; 17 | import "@fontsource/monaspace-neon"; 18 | import "./index.css"; 19 | import "@fortawesome/fontawesome-free/css/all.min.css"; 20 | 21 | createRoot(document.getElementById("root")).render( 22 | 23 | 24 | 25 | 26 | 27 | 39 | 40 | } /> 41 | } /> 42 | } /> 43 | } /> 44 | } /> 45 | } /> 46 | } /> 47 | } /> 48 | } /> 49 | } /> 50 | 51 | 52 | 53 | 54 | 55 | 56 | ); 57 | -------------------------------------------------------------------------------- /src/client/protobuf.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "github.com/gorilla/websocket" 5 | "github.com/schollz/e2ecp/src/relay" 6 | "google.golang.org/protobuf/proto" 7 | ) 8 | 9 | // sendProtobufMessage sends a message using protobuf encoding 10 | func sendProtobufMessage(conn *websocket.Conn, msg map[string]interface{}) error { 11 | // Convert map to protobuf message 12 | pbMsg := &relay.PBIncomingMessage{} 13 | 14 | if v, ok := msg["type"].(string); ok { 15 | pbMsg.Type = v 16 | } 17 | if v, ok := msg["roomId"].(string); ok { 18 | pbMsg.RoomId = v 19 | } 20 | if v, ok := msg["clientId"].(string); ok { 21 | pbMsg.ClientId = v 22 | } 23 | if v, ok := msg["pub"].(string); ok { 24 | pbMsg.Pub = v 25 | } 26 | if v, ok := msg["iv_b64"].(string); ok { 27 | pbMsg.IvB64 = v 28 | } 29 | if v, ok := msg["data_b64"].(string); ok { 30 | pbMsg.DataB64 = v 31 | } 32 | if v, ok := msg["chunk_data"].(string); ok { 33 | pbMsg.ChunkData = v 34 | } 35 | if v, ok := msg["chunk_num"].(int); ok { 36 | pbMsg.ChunkNum = int32(v) 37 | } 38 | if v, ok := msg["encrypted_metadata"].(string); ok { 39 | pbMsg.EncryptedMetadata = v 40 | } 41 | if v, ok := msg["metadata_iv"].(string); ok { 42 | pbMsg.MetadataIv = v 43 | } 44 | 45 | data, err := proto.Marshal(pbMsg) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | return conn.WriteMessage(websocket.BinaryMessage, data) 51 | } 52 | 53 | // receiveProtobufMessage receives a message in either protobuf or JSON format 54 | func receiveProtobufMessage(conn *websocket.Conn) (*relay.OutgoingMessage, error) { 55 | msgType, raw, err := conn.ReadMessage() 56 | if err != nil { 57 | return nil, err 58 | } 59 | 60 | if msgType == websocket.BinaryMessage { 61 | // Protobuf message 62 | pbMsg := &relay.PBOutgoingMessage{} 63 | if err := proto.Unmarshal(raw, pbMsg); err != nil { 64 | return nil, err 65 | } 66 | 67 | // Convert to OutgoingMessage 68 | return &relay.OutgoingMessage{ 69 | Type: pbMsg.Type, 70 | From: pbMsg.From, 71 | Mnemonic: pbMsg.Mnemonic, 72 | RoomID: pbMsg.RoomId, 73 | Pub: pbMsg.Pub, 74 | IvB64: pbMsg.IvB64, 75 | DataB64: pbMsg.DataB64, 76 | ChunkData: pbMsg.ChunkData, 77 | ChunkNum: int(pbMsg.ChunkNum), 78 | SelfID: pbMsg.SelfId, 79 | Peers: pbMsg.Peers, 80 | Count: int(pbMsg.Count), 81 | Error: pbMsg.Error, 82 | EncryptedMetadata: pbMsg.EncryptedMetadata, 83 | MetadataIV: pbMsg.MetadataIv, 84 | PeerID: pbMsg.PeerId, 85 | }, nil 86 | } 87 | 88 | // JSON message 89 | var msg relay.OutgoingMessage 90 | if err := conn.ReadJSON(&msg); err != nil { 91 | return nil, err 92 | } 93 | return &msg, nil 94 | } 95 | -------------------------------------------------------------------------------- /web/src/DarkModeContext.jsx: -------------------------------------------------------------------------------- 1 | import { createContext, useContext, useState, useEffect } from "react"; 2 | 3 | const DarkModeContext = createContext(); 4 | 5 | export function DarkModeProvider({ children }) { 6 | const [darkMode, setDarkMode] = useState(false); 7 | 8 | // Load saved preference on mount 9 | useEffect(() => { 10 | try { 11 | const saved = localStorage.getItem("darkMode"); 12 | if (saved !== null) { 13 | setDarkMode(saved === "true"); 14 | } else { 15 | // If no saved preference, use system preference 16 | const prefersDark = window.matchMedia("(prefers-color-scheme: dark)").matches; 17 | setDarkMode(prefersDark); 18 | } 19 | } catch (error) { 20 | console.warn("Could not load dark mode preference:", error); 21 | } 22 | }, []); 23 | 24 | // Listen for system theme changes 25 | useEffect(() => { 26 | const mediaQuery = window.matchMedia("(prefers-color-scheme: dark)"); 27 | const handleChange = (e) => { 28 | // Only update if user hasn't manually set a preference 29 | try { 30 | const userPref = localStorage.getItem("darkMode"); 31 | if (userPref === null) { 32 | setDarkMode(e.matches); 33 | } 34 | } catch (error) { 35 | console.warn("Could not check dark mode preference:", error); 36 | } 37 | }; 38 | 39 | mediaQuery.addEventListener("change", handleChange); 40 | return () => mediaQuery.removeEventListener("change", handleChange); 41 | }, []); 42 | 43 | // Apply dark mode class to document 44 | useEffect(() => { 45 | if (darkMode) { 46 | document.documentElement.classList.add("dark"); 47 | } else { 48 | document.documentElement.classList.remove("dark"); 49 | } 50 | }, [darkMode]); 51 | 52 | const toggleDarkMode = () => { 53 | setDarkMode((prev) => { 54 | const newValue = !prev; 55 | try { 56 | localStorage.setItem("darkMode", String(newValue)); 57 | } catch (error) { 58 | console.warn("Could not save dark mode preference:", error); 59 | } 60 | return newValue; 61 | }); 62 | }; 63 | 64 | return ( 65 | 66 | {children} 67 | 68 | ); 69 | } 70 | 71 | export function useDarkMode() { 72 | const context = useContext(DarkModeContext); 73 | if (!context) { 74 | throw new Error("useDarkMode must be used within a DarkModeProvider"); 75 | } 76 | return context; 77 | } 78 | -------------------------------------------------------------------------------- /tests/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script to run Playwright tests for Share application 4 | # This script ensures the server is built and browsers are installed before running tests 5 | 6 | set -e 7 | 8 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 9 | ROOT_DIR="$(dirname "$SCRIPT_DIR")" 10 | 11 | echo "=== Share Playwright Test Runner ===" 12 | echo "" 13 | 14 | # Step 1: Check if e2ecp binary exists 15 | if [ ! -f "$ROOT_DIR/e2ecp" ]; then 16 | echo "❌ Share binary not found. Building..." 17 | cd "$ROOT_DIR" 18 | if [ -d "web/node_modules" ]; then 19 | echo "✓ Web dependencies already installed" 20 | else 21 | echo "Installing web dependencies..." 22 | cd web && npm install && cd .. 23 | fi 24 | echo "Building e2ecp binary..." 25 | make build 26 | echo "✓ Share binary built successfully" 27 | else 28 | echo "✓ Share binary found" 29 | fi 30 | 31 | # Step 2: Check if test dependencies are installed 32 | cd "$ROOT_DIR" 33 | if [ ! -d "node_modules" ]; then 34 | echo "Installing test dependencies..." 35 | npm install 36 | echo "✓ Test dependencies installed" 37 | else 38 | echo "✓ Test dependencies found" 39 | fi 40 | 41 | # Step 3: Check if Playwright browsers are installed 42 | PLAYWRIGHT_BROWSERS_PATH="${PLAYWRIGHT_BROWSERS_PATH:-$HOME/.cache/ms-playwright}" 43 | CHROMIUM_PATH="$PLAYWRIGHT_BROWSERS_PATH/chromium-*" 44 | 45 | if ! ls $CHROMIUM_PATH 1> /dev/null 2>&1; then 46 | echo "Installing Playwright Chromium browser..." 47 | npx playwright install chromium 48 | if [ $? -ne 0 ]; then 49 | echo "⚠️ Browser installation failed. Trying with system dependencies..." 50 | npx playwright install --with-deps chromium 51 | if [ $? -ne 0 ]; then 52 | echo "❌ Failed to install Playwright browsers" 53 | echo "Please install manually with: npx playwright install chromium" 54 | exit 1 55 | fi 56 | fi 57 | echo "✓ Playwright browsers installed" 58 | else 59 | echo "✓ Playwright browsers found" 60 | fi 61 | 62 | # Step 4: Run tests 63 | echo "" 64 | echo "=== Running Playwright Tests ===" 65 | echo "" 66 | 67 | if [ "$1" = "--headed" ]; then 68 | npx playwright test --headed 69 | elif [ "$1" = "--debug" ]; then 70 | npx playwright test --debug 71 | elif [ "$1" = "--ui" ]; then 72 | npx playwright test --ui 73 | else 74 | npx playwright test "$@" 75 | fi 76 | 77 | TEST_EXIT_CODE=$? 78 | 79 | # Step 5: Show results 80 | echo "" 81 | if [ $TEST_EXIT_CODE -eq 0 ]; then 82 | echo "✓ All tests passed!" 83 | echo "" 84 | echo "View HTML report with: npm run test:report" 85 | else 86 | echo "❌ Some tests failed (exit code: $TEST_EXIT_CODE)" 87 | echo "" 88 | echo "View HTML report with: npm run test:report" 89 | fi 90 | 91 | exit $TEST_EXIT_CODE 92 | -------------------------------------------------------------------------------- /web/src/index.css: -------------------------------------------------------------------------------- 1 | @import 'tailwindcss'; 2 | 3 | @theme { 4 | --color-magenta-400: #ff006e; 5 | } 6 | 7 | @variant dark (&:is(.dark *)); 8 | 9 | /* 10 | The default border color has changed to `currentcolor` in Tailwind CSS v4, 11 | so we've added these compatibility styles to make sure everything still 12 | looks the same as it did with Tailwind CSS v3. 13 | 14 | If we ever want to remove these styles, we need to add an explicit border 15 | color utility to any element that depends on these defaults. 16 | */ 17 | @layer base { 18 | 19 | *, 20 | ::after, 21 | ::before, 22 | ::backdrop, 23 | ::file-selector-button { 24 | border-color: var(--color-gray-200, currentcolor); 25 | } 26 | 27 | /* Ensure consistent font across all pages */ 28 | :root, 29 | html, 30 | body, 31 | #root { 32 | font-family: "Monaspace Neon", monospace; 33 | text-transform: uppercase; 34 | } 35 | 36 | button, 37 | input, 38 | select, 39 | textarea { 40 | font-family: inherit; 41 | } 42 | 43 | input::placeholder, 44 | textarea::placeholder { 45 | text-transform: uppercase; 46 | } 47 | 48 | /* Toast notification color variables */ 49 | :root { 50 | --toast-bg: #fff; 51 | --toast-text: #000; 52 | --toast-border: 2px solid #000; 53 | --toast-icon-primary: #000; 54 | --toast-icon-secondary: #fff; 55 | } 56 | 57 | :root.dark { 58 | --toast-bg: #000; 59 | --toast-text: #fff; 60 | --toast-border: 2px solid #fff; 61 | --toast-icon-primary: #fff; 62 | --toast-icon-secondary: #000; 63 | } 64 | 65 | /* Header box shadow for dark mode */ 66 | .header-shadow { 67 | box-shadow: 4px 4px 0px 0px rgb(229, 231, 235), 0 0 0 4px black; 68 | } 69 | 70 | :root.dark .header-shadow { 71 | box-shadow: 4px 4px 0px 0px #fff, 0 0 0 4px #fff; 72 | } 73 | 74 | /* Override FontAwesome font-display from block to swap for better performance 75 | This prevents FOIT (Flash of Invisible Text) and improves FCP/LCP metrics */ 76 | @font-face { 77 | font-family: "Font Awesome 7 Brands"; 78 | font-style: normal; 79 | font-weight: 400; 80 | font-display: swap; 81 | src: url(@fortawesome/fontawesome-free/webfonts/fa-brands-400.woff2) format("woff2"); 82 | } 83 | 84 | @font-face { 85 | font-family: "Font Awesome 7 Free"; 86 | font-style: normal; 87 | font-weight: 400; 88 | font-display: swap; 89 | src: url(@fortawesome/fontawesome-free/webfonts/fa-regular-400.woff2) format("woff2"); 90 | } 91 | 92 | @font-face { 93 | font-family: "Font Awesome 7 Free"; 94 | font-style: normal; 95 | font-weight: 900; 96 | font-display: swap; 97 | src: url(@fortawesome/fontawesome-free/webfonts/fa-solid-900.woff2) format("woff2"); 98 | } 99 | 100 | .nocase { 101 | text-transform: none; 102 | } 103 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | 9 | # Diagnostic reports (https://nodejs.org/api/report.html) 10 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 11 | 12 | # Runtime data 13 | pids 14 | *.pid 15 | *.seed 16 | *.pid.lock 17 | 18 | # Directory for instrumented libs generated by jscoverage/JSCover 19 | lib-cov 20 | 21 | # Coverage directory used by tools like istanbul 22 | coverage 23 | *.lcov 24 | 25 | # nyc test coverage 26 | .nyc_output 27 | 28 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 29 | .grunt 30 | 31 | # Bower dependency directory (https://bower.io/) 32 | bower_components 33 | 34 | # node-waf configuration 35 | .lock-wscript 36 | 37 | # Compiled binary addons (https://nodejs.org/api/addons.html) 38 | build/Release 39 | 40 | # Dependency directories 41 | node_modules/ 42 | jspm_packages/ 43 | 44 | # Snowpack dependency directory (https://snowpack.dev/) 45 | web_modules/ 46 | 47 | # TypeScript cache 48 | *.tsbuildinfo 49 | 50 | # Optional npm cache directory 51 | .npm 52 | 53 | # Optional eslint cache 54 | .eslintcache 55 | 56 | # Optional stylelint cache 57 | .stylelintcache 58 | 59 | # Optional REPL history 60 | .node_repl_history 61 | 62 | # Output of 'npm pack' 63 | *.tgz 64 | 65 | # Yarn Integrity file 66 | .yarn-integrity 67 | 68 | # dotenv environment variable files 69 | .env 70 | .env.* 71 | !.env.example 72 | 73 | # parcel-bundler cache (https://parceljs.org/) 74 | .cache 75 | .parcel-cache 76 | 77 | # Next.js build output 78 | .next 79 | out 80 | 81 | # Nuxt.js build / generate output 82 | .nuxt 83 | dist/ 84 | !web/dist/ 85 | web/dist/* 86 | !web/dist/.keep 87 | 88 | # Gatsby files 89 | .cache/ 90 | # Comment in the public line in if your project uses Gatsby and not Next.js 91 | # https://nextjs.org/blog/next-9-1#public-directory-support 92 | # public 93 | 94 | # vuepress build output 95 | .vuepress/dist 96 | 97 | # vuepress v2.x temp and cache directory 98 | .temp 99 | .cache 100 | 101 | # Sveltekit cache directory 102 | .svelte-kit/ 103 | 104 | # vitepress build output 105 | **/.vitepress/dist 106 | 107 | # vitepress cache directory 108 | **/.vitepress/cache 109 | 110 | # Docusaurus cache and generated files 111 | .docusaurus 112 | 113 | # Serverless directories 114 | .serverless/ 115 | 116 | # FuseBox cache 117 | .fusebox/ 118 | 119 | # DynamoDB Local files 120 | .dynamodb/ 121 | 122 | # Firebase cache directory 123 | .firebase/ 124 | 125 | # TernJS port file 126 | .tern-port 127 | 128 | # Stores VSCode versions used for testing VSCode extensions 129 | .vscode-test 130 | 131 | # yarn v3 132 | .pnp.* 133 | .yarn/* 134 | !.yarn/patches 135 | !.yarn/plugins 136 | !.yarn/releases 137 | !.yarn/sdks 138 | !.yarn/versions 139 | 140 | # Vite logs files 141 | vite.config.js.timestamp-* 142 | vite.config.ts.timestamp-* 143 | share 144 | node_modules 145 | tmp/* 146 | web/dist/* 147 | test_folder 148 | tmp.zip 149 | *.wav 150 | *.deb 151 | 152 | # Playwright 153 | playwright-report/ 154 | test-results/ 155 | tests/**/test-*.txt 156 | tests/**/downloaded-*.txt 157 | tests/cli-output/ 158 | e2eecp 159 | e2ecp 160 | .claude/ 161 | *.db 162 | uploads/* 163 | -------------------------------------------------------------------------------- /docs/ENCRYPTION.md: -------------------------------------------------------------------------------- 1 | # Client-Side File Encryption 2 | 3 | ## Overview 4 | 5 | All files are encrypted **client-side** before being uploaded to the server. The server only stores encrypted blobs and cannot access the plaintext files. 6 | 7 | ## How It Works 8 | 9 | ### 1. Key Derivation 10 | - When a user registers, a random 32-byte salt is generated and stored in the database 11 | - When logging in, the client derives an AES-256 encryption key from: 12 | - User's password 13 | - User's unique salt 14 | - PBKDF2 with 100,000 iterations and SHA-256 15 | 16 | ### 2. File Upload (Encryption) 17 | 1. User selects a file 18 | 2. **Client-side**: File is encrypted using AES-GCM with the derived key 19 | 3. A random 12-byte IV is generated for each file 20 | 4. IV is prepended to the encrypted data 21 | 5. Encrypted blob is uploaded to server 22 | 6. Server stores the encrypted blob (cannot decrypt it) 23 | 24 | ### 3. File Download (Decryption) 25 | 1. User requests to download a file 26 | 2. Server sends the encrypted blob 27 | 3. **Client-side**: IV is extracted from the first 12 bytes 28 | 4. File is decrypted using AES-GCM with the derived key 29 | 5. Decrypted file is saved to user's device 30 | 31 | ## Security Features 32 | 33 | ✅ **Zero-Knowledge**: Server never has access to plaintext files 34 | ✅ **Password-Based**: Encryption key is derived from user's password 35 | ✅ **Unique Keys**: Each user has a unique salt for key derivation 36 | ✅ **Strong Encryption**: AES-256-GCM with authenticated encryption 37 | ✅ **Random IVs**: Each file gets a unique IV for encryption 38 | ✅ **Ephemeral Keys**: Encryption key only exists in memory during session 39 | 40 | ## Important Notes 41 | 42 | ### Storage Tracking 43 | - Storage usage reflects the **encrypted** file size 44 | - Encrypted files are slightly larger than originals (~12 bytes for IV + GCM tag) 45 | 46 | ### Password Reset 47 | ⚠️ **If a user forgets their password, their files are permanently lost** 48 | - The encryption key can only be derived from the password 49 | - There is no password recovery mechanism 50 | - This is by design for zero-knowledge encryption 51 | 52 | ### Session Management 53 | - Encryption key is only available during the active session 54 | - After page refresh, user must log in again to re-derive the key 55 | - This ensures the key is never persisted 56 | 57 | ### Share Links 58 | ⚠️ **Current limitation**: Share links provide encrypted files 59 | - Recipients would need the decryption key to access files 60 | - Consider implementing a separate sharing mechanism with re-encryption 61 | 62 | ## Technical Implementation 63 | 64 | ### Backend 65 | - `users.encryption_salt`: Stores the user's unique salt (hex-encoded) 66 | - Files are stored as-is (encrypted blobs) 67 | - No decryption logic on server 68 | 69 | ### Frontend 70 | - `encryption.js`: Encryption utilities using Web Crypto API 71 | - `AuthContext`: Derives and manages encryption key 72 | - `Profile`: Encrypts before upload, decrypts after download 73 | 74 | ### Crypto Primitives 75 | - **KDF**: PBKDF2 with 100,000 iterations 76 | - **Hash**: SHA-256 77 | - **Cipher**: AES-GCM 78 | - **Key Size**: 256 bits 79 | - **IV Size**: 96 bits (12 bytes) 80 | 81 | ## Future Enhancements 82 | 83 | 1. **Secure Sharing**: Implement file sharing with recipient's public key 84 | 2. **Password Change**: Allow password change with file re-encryption 85 | 3. **Key Backup**: Optional encrypted key backup with recovery phrase 86 | 4. **Metadata Encryption**: Encrypt filenames as well 87 | -------------------------------------------------------------------------------- /src/db/queries.sql: -------------------------------------------------------------------------------- 1 | -- name: CreateUser :one 2 | INSERT INTO users (email, password_hash, encryption_salt, subscriber, verified, verification_token) 3 | VALUES ($1, $2, $3, $4, $5, $6) 4 | RETURNING id, email, password_hash, encryption_salt, created_at, updated_at, subscriber, verified, verification_token; 5 | 6 | -- name: GetUserByEmail :one 7 | SELECT id, email, password_hash, encryption_salt, created_at, updated_at, subscriber, verified, verification_token FROM users 8 | WHERE email = $1 9 | LIMIT 1; 10 | 11 | -- name: GetUserByID :one 12 | SELECT id, email, password_hash, encryption_salt, created_at, updated_at, subscriber, verified, verification_token FROM users 13 | WHERE id = $1 14 | LIMIT 1; 15 | 16 | -- name: GetUserByVerificationToken :one 17 | SELECT id, email, password_hash, encryption_salt, created_at, updated_at, subscriber, verified, verification_token FROM users 18 | WHERE verification_token = $1 19 | LIMIT 1; 20 | 21 | -- name: UpdateUserPassword :exec 22 | UPDATE users 23 | SET password_hash = $1, updated_at = CURRENT_TIMESTAMP 24 | WHERE id = $2; 25 | 26 | -- name: VerifyUserByToken :one 27 | UPDATE users 28 | SET verified = 1, verification_token = NULL, updated_at = CURRENT_TIMESTAMP 29 | WHERE verification_token = $1 30 | RETURNING *; 31 | 32 | -- name: DeleteUserByID :exec 33 | DELETE FROM users 34 | WHERE id = $1; 35 | 36 | -- name: CreateFile :one 37 | INSERT INTO files (user_id, encrypted_filename, file_size, encrypted_key, share_token, download_count, file_data) 38 | VALUES ($1, $2, $3, $4, $5, 0, $6) 39 | RETURNING *; 40 | 41 | -- name: GetFilesByUserID :many 42 | SELECT * FROM files 43 | WHERE user_id = $1 44 | ORDER BY created_at DESC; 45 | 46 | -- name: GetFileByID :one 47 | SELECT * FROM files 48 | WHERE id = $1 AND user_id = $2 49 | LIMIT 1; 50 | 51 | -- name: GetFileByShareToken :one 52 | SELECT * FROM files 53 | WHERE share_token = $1 54 | LIMIT 1; 55 | 56 | -- name: IncrementDownloadCountByToken :one 57 | UPDATE files 58 | SET download_count = download_count + 1, updated_at = CURRENT_TIMESTAMP 59 | WHERE share_token = $1 60 | RETURNING *; 61 | 62 | -- name: GetTotalStorageByUserID :one 63 | SELECT COALESCE(SUM(file_size), 0)::bigint as total_storage 64 | FROM files 65 | WHERE user_id = $1; 66 | 67 | -- name: DeleteFile :exec 68 | DELETE FROM files 69 | WHERE id = $1 AND user_id = $2; 70 | 71 | -- name: DeleteFileByID :exec 72 | DELETE FROM files 73 | WHERE id = $1; 74 | 75 | -- name: UpdateFileShareToken :one 76 | UPDATE files 77 | SET share_token = $1, updated_at = CURRENT_TIMESTAMP 78 | WHERE id = $2 AND user_id = $3 79 | RETURNING *; 80 | 81 | -- name: UpdateFileEncryption :exec 82 | UPDATE files 83 | SET encrypted_filename = $1, encrypted_key = $2, updated_at = CURRENT_TIMESTAMP 84 | WHERE id = $3 AND user_id = $4; 85 | 86 | -- Device Auth Sessions 87 | -- name: CreateDeviceAuthSession :one 88 | INSERT INTO device_auth_sessions (device_code, user_code, expires_at) 89 | VALUES ($1, $2, $3) 90 | RETURNING *; 91 | 92 | -- name: GetDeviceAuthSessionByDeviceCode :one 93 | SELECT * FROM device_auth_sessions 94 | WHERE device_code = $1 95 | LIMIT 1; 96 | 97 | -- name: GetDeviceAuthSessionByUserCode :one 98 | SELECT * FROM device_auth_sessions 99 | WHERE user_code = $1 100 | LIMIT 1; 101 | 102 | -- name: ApproveDeviceAuthSession :one 103 | UPDATE device_auth_sessions 104 | SET approved = TRUE, user_id = $1, token = $2 105 | WHERE user_code = $3 AND approved = FALSE 106 | RETURNING *; 107 | 108 | -- name: DeleteExpiredDeviceAuthSessions :exec 109 | DELETE FROM device_auth_sessions 110 | WHERE expires_at < CURRENT_TIMESTAMP; 111 | -------------------------------------------------------------------------------- /src/relay/protobuf.go: -------------------------------------------------------------------------------- 1 | package relay 2 | 3 | import ( 4 | "github.com/gorilla/websocket" 5 | "google.golang.org/protobuf/proto" 6 | ) 7 | 8 | // Convert JSON IncomingMessage to Protobuf PBIncomingMessage 9 | func incomingToPB(msg *IncomingMessage) *PBIncomingMessage { 10 | return &PBIncomingMessage{ 11 | Type: msg.Type, 12 | RoomId: msg.RoomID, 13 | ClientId: msg.ClientID, 14 | Pub: msg.Pub, 15 | IvB64: msg.IvB64, 16 | DataB64: msg.DataB64, 17 | ChunkData: msg.ChunkData, 18 | ChunkNum: int32(msg.ChunkNum), 19 | EncryptedMetadata: msg.EncryptedMetadata, 20 | MetadataIv: msg.MetadataIV, 21 | } 22 | } 23 | 24 | // Convert Protobuf PBIncomingMessage to JSON IncomingMessage 25 | func pbToIncoming(pb *PBIncomingMessage) *IncomingMessage { 26 | return &IncomingMessage{ 27 | Type: pb.Type, 28 | RoomID: pb.RoomId, 29 | ClientID: pb.ClientId, 30 | Pub: pb.Pub, 31 | IvB64: pb.IvB64, 32 | DataB64: pb.DataB64, 33 | ChunkData: pb.ChunkData, 34 | ChunkNum: int(pb.ChunkNum), 35 | EncryptedMetadata: pb.EncryptedMetadata, 36 | MetadataIV: pb.MetadataIv, 37 | } 38 | } 39 | 40 | // Convert JSON OutgoingMessage to Protobuf PBOutgoingMessage 41 | func outgoingToPB(msg *OutgoingMessage) *PBOutgoingMessage { 42 | return &PBOutgoingMessage{ 43 | Type: msg.Type, 44 | From: msg.From, 45 | Mnemonic: msg.Mnemonic, 46 | RoomId: msg.RoomID, 47 | Pub: msg.Pub, 48 | IvB64: msg.IvB64, 49 | DataB64: msg.DataB64, 50 | ChunkData: msg.ChunkData, 51 | ChunkNum: int32(msg.ChunkNum), 52 | SelfId: msg.SelfID, 53 | Peers: msg.Peers, 54 | Count: int32(msg.Count), 55 | Error: msg.Error, 56 | EncryptedMetadata: msg.EncryptedMetadata, 57 | MetadataIv: msg.MetadataIV, 58 | PeerId: msg.PeerID, 59 | } 60 | } 61 | 62 | // Convert Protobuf PBOutgoingMessage to JSON OutgoingMessage 63 | func pbToOutgoing(pb *PBOutgoingMessage) *OutgoingMessage { 64 | return &OutgoingMessage{ 65 | Type: pb.Type, 66 | From: pb.From, 67 | Mnemonic: pb.Mnemonic, 68 | RoomID: pb.RoomId, 69 | Pub: pb.Pub, 70 | IvB64: pb.IvB64, 71 | DataB64: pb.DataB64, 72 | ChunkData: pb.ChunkData, 73 | ChunkNum: int(pb.ChunkNum), 74 | SelfID: pb.SelfId, 75 | Peers: pb.Peers, 76 | Count: int(pb.Count), 77 | Error: pb.Error, 78 | EncryptedMetadata: pb.EncryptedMetadata, 79 | MetadataIV: pb.MetadataIv, 80 | PeerID: pb.PeerId, 81 | } 82 | } 83 | 84 | // Encode OutgoingMessage to protobuf binary format 85 | func encodeProtobuf(msg *OutgoingMessage) ([]byte, error) { 86 | pb := outgoingToPB(msg) 87 | return proto.Marshal(pb) 88 | } 89 | 90 | // Decode protobuf binary to IncomingMessage 91 | func decodeProtobuf(data []byte) (*IncomingMessage, error) { 92 | pb := &PBIncomingMessage{} 93 | if err := proto.Unmarshal(data, pb); err != nil { 94 | return nil, err 95 | } 96 | return pbToIncoming(pb), nil 97 | } 98 | 99 | // Helper to send a protobuf message to a websocket connection 100 | func sendMessage(client *Client, msg *OutgoingMessage) error { 101 | data, err := encodeProtobuf(msg) 102 | if err != nil { 103 | return err 104 | } 105 | client.WriteMutex.Lock() 106 | defer client.WriteMutex.Unlock() 107 | return client.Conn.WriteMessage(websocket.BinaryMessage, data) 108 | } 109 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # Playwright End-to-End Tests 2 | 3 | This directory contains Playwright tests for the Share application, testing both web-to-web and web-to-CLI file transfers. 4 | 5 | ## Prerequisites 6 | 7 | 1. **Build the Share binary**: 8 | ```bash 9 | cd /path/to/e2ecp 10 | cd web && npm install 11 | make build 12 | ``` 13 | 14 | 2. **Install Playwright dependencies**: 15 | ```bash 16 | npm install 17 | ``` 18 | 19 | 3. **Install Playwright browsers**: 20 | 21 | The Playwright browsers need to be installed before running tests. Try one of these methods: 22 | 23 | **Method A - Direct install** (recommended for local development): 24 | ```bash 25 | npx playwright install chromium 26 | ``` 27 | 28 | **Method B - With system dependencies** (if Method A fails): 29 | ```bash 30 | npx playwright install --with-deps chromium 31 | ``` 32 | 33 | **Method C - System browser** (fallback): 34 | If the download fails due to network restrictions, you can configure Playwright to use system-installed Chromium/Chrome. See the [Playwright documentation](https://playwright.dev/docs/browsers#installing-browsers) for details. 35 | 36 | **Note**: In CI/CD environments, you may need to use the Playwright GitHub Action which handles browser installation automatically. 37 | 38 | ## Running Tests 39 | 40 | ### Quick Start (Recommended) 41 | The easiest way to run tests is using the helper script: 42 | 43 | ```bash 44 | cd tests 45 | ./run-tests.sh 46 | ``` 47 | 48 | This script will: 49 | - Check if the e2ecp binary exists (and build it if needed) 50 | - Install test dependencies 51 | - Install Playwright browsers if needed 52 | - Run all Playwright tests 53 | - Show results and test report location 54 | 55 | ### Run all tests (manual) 56 | ```bash 57 | npm run test:e2e 58 | ``` 59 | 60 | ### Run tests in headed mode (see the browser) 61 | ```bash 62 | ./run-tests.sh --headed 63 | # Or manually: 64 | npm run test:e2e:headed 65 | ``` 66 | 67 | ### Debug tests 68 | ```bash 69 | ./run-tests.sh --debug 70 | # Or manually: 71 | npm run test:e2e:debug 72 | ``` 73 | 74 | ### View test report 75 | ```bash 76 | npm run test:report 77 | ``` 78 | 79 | ### Run specific test file 80 | ```bash 81 | npx playwright test tests/web-to-web.spec.js 82 | npx playwright test tests/web-to-cli.spec.js 83 | ``` 84 | 85 | ## Test Suites 86 | 87 | ### 1. Web to Web Transfer (`web-to-web.spec.js`) 88 | Tests file transfer between two browser clients: 89 | - Starts a local relay server 90 | - Opens two browser contexts (sender and receiver) 91 | - Transfers a test file from sender to receiver 92 | - Verifies the file content matches 93 | 94 | ### 2. Web to CLI Transfer (`web-to-cli.spec.js`) 95 | Tests file transfer between web client and CLI: 96 | - **Web to CLI**: Uploads file from browser, receives with CLI tool 97 | - **CLI to Web**: Sends file with CLI tool, downloads in browser 98 | - Verifies file integrity in both directions 99 | 100 | ## Test Architecture 101 | 102 | - Each test suite starts its own relay server on a random port to avoid conflicts 103 | - Tests use unique room names to prevent interference 104 | - Server processes are properly cleaned up after tests complete 105 | - Test files are created and deleted automatically 106 | 107 | ## Troubleshooting 108 | 109 | ### Browser not installed 110 | If you get an error about browsers not being installed: 111 | ```bash 112 | npx playwright install chromium 113 | ``` 114 | 115 | ### Port conflicts 116 | Tests use random ports (8080+ range) to avoid conflicts. If you still have issues, ensure no other services are using these ports. 117 | 118 | ### Server not starting 119 | Make sure the `e2ecp` binary exists in the root directory: 120 | ```bash 121 | ls -la ../e2ecp 122 | ``` 123 | 124 | If it doesn't exist, build it: 125 | ```bash 126 | cd .. && make build 127 | ``` 128 | -------------------------------------------------------------------------------- /src/relay/gzip.go: -------------------------------------------------------------------------------- 1 | package relay 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "strings" 7 | ) 8 | 9 | // gzipFileHandler wraps an http.Handler to serve pre-compressed .gz files 10 | // when available and the client accepts gzip encoding 11 | type gzipFileHandler struct { 12 | handler http.Handler 13 | } 14 | 15 | func (g gzipFileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 16 | // Check if this is a curl request - if so, let spaHandler handle it 17 | // to serve the install script instead of the web UI 18 | ua := strings.ToLower(r.UserAgent()) 19 | if strings.Contains(ua, "curl") { 20 | g.handler.ServeHTTP(w, r) 21 | return 22 | } 23 | 24 | // Get the path 25 | path := r.URL.Path 26 | if path == "/" { 27 | path = "/index.html" 28 | } 29 | 30 | // Set cache headers based on file type 31 | setCacheHeaders(w, path) 32 | 33 | // Check if client accepts gzip 34 | if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { 35 | g.handler.ServeHTTP(w, r) 36 | return 37 | } 38 | 39 | // Try to open the .gz version from the embedded filesystem 40 | if fs, ok := g.handler.(spaHandler); ok { 41 | gzPath := path + ".gz" 42 | gzFile, err := fs.staticFS.Open(gzPath) 43 | if err == nil { 44 | defer gzFile.Close() 45 | 46 | // Get file info for content type detection 47 | stat, err := gzFile.Stat() 48 | if err == nil { 49 | // Set appropriate headers 50 | w.Header().Set("Content-Encoding", "gzip") 51 | w.Header().Set("Vary", "Accept-Encoding") 52 | 53 | // Detect and set content type based on original file extension 54 | contentType := detectContentType(path) 55 | if contentType != "" { 56 | w.Header().Set("Content-Type", contentType) 57 | } 58 | 59 | // Serve the gzipped file 60 | http.ServeContent(w, r, path, stat.ModTime(), gzFile.(io.ReadSeeker)) 61 | return 62 | } 63 | } 64 | } 65 | 66 | // Fall back to original file 67 | g.handler.ServeHTTP(w, r) 68 | } 69 | 70 | // setCacheHeaders sets appropriate cache headers based on the file path 71 | func setCacheHeaders(w http.ResponseWriter, path string) { 72 | // Assets with hashes in the filename can be cached indefinitely (immutable) 73 | // Pattern: /assets/filename-[hash].ext 74 | if strings.HasPrefix(path, "/assets/") && strings.Contains(path, "-") { 75 | // Check if the file has a hash (contains hyphen before extension) 76 | lastDot := strings.LastIndex(path, ".") 77 | lastHyphen := strings.LastIndex(path, "-") 78 | if lastHyphen > 0 && lastDot > lastHyphen { 79 | // This appears to be a hashed asset file 80 | // Cache for 1 year and mark as immutable 81 | w.Header().Set("Cache-Control", "public, max-age=31536000, immutable") 82 | return 83 | } 84 | } 85 | 86 | // HTML files should not be cached (always check for updates) 87 | if strings.HasSuffix(path, ".html") || path == "/" { 88 | w.Header().Set("Cache-Control", "no-cache, must-revalidate") 89 | return 90 | } 91 | 92 | // Other static assets: cache but revalidate 93 | w.Header().Set("Cache-Control", "public, max-age=3600, must-revalidate") 94 | } 95 | 96 | // detectContentType returns the MIME type based on file extension 97 | func detectContentType(path string) string { 98 | switch { 99 | case strings.HasSuffix(path, ".html"): 100 | return "text/html; charset=utf-8" 101 | case strings.HasSuffix(path, ".css"): 102 | return "text/css; charset=utf-8" 103 | case strings.HasSuffix(path, ".js"): 104 | return "application/javascript; charset=utf-8" 105 | case strings.HasSuffix(path, ".json"): 106 | return "application/json" 107 | case strings.HasSuffix(path, ".png"): 108 | return "image/png" 109 | case strings.HasSuffix(path, ".jpg"), strings.HasSuffix(path, ".jpeg"): 110 | return "image/jpeg" 111 | case strings.HasSuffix(path, ".gif"): 112 | return "image/gif" 113 | case strings.HasSuffix(path, ".svg"): 114 | return "image/svg+xml" 115 | case strings.HasSuffix(path, ".woff"): 116 | return "font/woff" 117 | case strings.HasSuffix(path, ".woff2"): 118 | return "font/woff2" 119 | case strings.HasSuffix(path, ".ttf"): 120 | return "font/ttf" 121 | case strings.HasSuffix(path, ".ico"): 122 | return "image/x-icon" 123 | default: 124 | return "" 125 | } 126 | } 127 | 128 | // newGzipFileHandler wraps a handler to serve pre-compressed gzip files 129 | func newGzipFileHandler(handler http.Handler) http.Handler { 130 | return gzipFileHandler{handler: handler} 131 | } 132 | -------------------------------------------------------------------------------- /.github/workflows/homebrew.yml: -------------------------------------------------------------------------------- 1 | name: Update Homebrew Formula 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | homebrew: 9 | runs-on: ubuntu-latest 10 | permissions: 11 | contents: write 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v4 15 | 16 | - name: Setup Node.js 17 | uses: actions/setup-node@v4 18 | with: 19 | node-version: "24" 20 | 21 | - name: Setup Go 22 | uses: actions/setup-go@v6 23 | with: 24 | go-version: "stable" 25 | 26 | - name: Create vendored source 27 | id: release_info 28 | run: | 29 | # Clean version string (remove 'v' prefix if present) 30 | VERSION="${{ github.event.release.tag_name }}" 31 | VERSION="${VERSION#v}" 32 | echo "clean_version=$VERSION" >> $GITHUB_OUTPUT 33 | echo "version=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT 34 | 35 | # Download and extract the source code 36 | curl -L -o source.tar.gz "https://github.com/schollz/e2ecp/archive/refs/tags/${{ github.event.release.tag_name }}.tar.gz" 37 | tar -xzf source.tar.gz 38 | cd e2ecp-* 39 | 40 | # Vendor the dependencies 41 | go mod vendor 42 | 43 | # Create vendored tarball 44 | cd .. 45 | mv e2ecp-* e2ecp-vendored 46 | tar -czf source_code_vendored.tar.gz e2ecp-vendored/ 47 | 48 | # Calculate SHA256 of vendored tarball 49 | SHA256=$(shasum -a 256 source_code_vendored.tar.gz | cut -d' ' -f1) 50 | echo "sha256=$SHA256" >> $GITHUB_OUTPUT 51 | 52 | - name: Upload vendored source to release 53 | uses: softprops/action-gh-release@v2 54 | with: 55 | tag_name: ${{ github.event.release.tag_name }} 56 | files: source_code_vendored.tar.gz 57 | env: 58 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 59 | 60 | - name: Generate Homebrew formula 61 | run: | 62 | cat > e2ecp.rb << 'EOF' 63 | class E2ecp < Formula 64 | desc "Secure E2E encrypted file transfer using ECDH and AES-GCM" 65 | homepage "https://github.com/schollz/e2ecp" 66 | url "https://github.com/schollz/e2ecp/releases/download/${{ github.event.release.tag_name }}/source_code_vendored.tar.gz" 67 | sha256 "${{ steps.release_info.outputs.sha256 }}" 68 | version "${{ steps.release_info.outputs.clean_version }}" 69 | license "MIT" 70 | 71 | depends_on "go" => :build 72 | depends_on "node" => :build 73 | 74 | def install 75 | # Install web dependencies 76 | system "npm", "install" 77 | 78 | # Install web dependencies 79 | cd "web" do 80 | system "npm", "install" 81 | end 82 | 83 | # Build web assets 84 | cd "web" do 85 | system "npm", "run", "build" 86 | end 87 | system "touch", "web/dist/.keep" 88 | 89 | # Build with proper flags using vendored dependencies 90 | ENV["CGO_ENABLED"] = "0" 91 | system "go", "build", "-v", "-mod=vendor", "-ldflags", "-X main.Version=#{version} -extldflags '-static'", "-o", "e2ecp" 92 | bin.install "e2ecp" 93 | end 94 | 95 | test do 96 | system "#{bin}/e2ecp", "--help" 97 | end 98 | end 99 | EOF 100 | 101 | - name: Update Homebrew tap 102 | env: 103 | HOMEBREW_TAP_TOKEN: ${{ secrets.HOMEBREW_TAP_TOKEN }} 104 | run: | 105 | # Configure git 106 | git config --global user.name "github-actions[bot]" 107 | git config --global user.email "github-actions[bot]@users.noreply.github.com" 108 | 109 | # Clone the tap repository 110 | git clone https://x-access-token:${HOMEBREW_TAP_TOKEN}@github.com/schollz/homebrew-tap.git tap 111 | 112 | # Ensure Formula directory exists 113 | mkdir -p tap/Formula 114 | 115 | # Copy the formula 116 | cp e2ecp.rb tap/Formula/e2ecp.rb 117 | 118 | # Commit and push 119 | cd tap 120 | git add Formula/e2ecp.rb 121 | git commit -m "Update e2ecp to ${{ steps.release_info.outputs.version }}" || exit 0 122 | git push 123 | -------------------------------------------------------------------------------- /src/relay/api_routes.go: -------------------------------------------------------------------------------- 1 | package relay 2 | 3 | import ( 4 | "database/sql" 5 | "log/slog" 6 | "net/http" 7 | "os" 8 | "strings" 9 | 10 | "github.com/schollz/e2ecp/src/api" 11 | "github.com/schollz/e2ecp/src/auth" 12 | ) 13 | 14 | // SetupAPIRoutes initializes all API routes for authentication and file management 15 | func SetupAPIRoutes(mux *http.ServeMux, database *sql.DB, log *slog.Logger, enabled bool) { 16 | if !enabled { 17 | log.Info("Storage/profile endpoints disabled (set ALLOW_STORAGE_PROFILE=yes to enable)") 18 | return 19 | } 20 | 21 | // Get JWT secret from environment or use default 22 | jwtSecret := os.Getenv("JWT_SECRET") 23 | if jwtSecret == "" { 24 | jwtSecret = "default-secret-change-in-production" 25 | log.Warn("Using default JWT secret. Set JWT_SECRET environment variable in production.") 26 | } 27 | 28 | // Initialize services and handlers 29 | authService := auth.NewService(database, jwtSecret, log) 30 | authHandlers := api.NewAuthHandlers(authService, log) 31 | fileHandlers := api.NewFileHandlers(database, log) 32 | authMiddleware := api.AuthMiddleware(authService) 33 | 34 | // Auth routes (no authentication required) 35 | mux.HandleFunc("/api/auth/register", authHandlers.Register) 36 | mux.HandleFunc("/api/auth/login", authHandlers.Login) 37 | mux.HandleFunc("/api/auth/verify-email", authHandlers.VerifyEmailToken) 38 | mux.HandleFunc("/api/auth/captcha", authHandlers.Captcha) 39 | 40 | // Device auth routes 41 | mux.HandleFunc("/api/auth/device/init", authHandlers.InitiateDeviceAuth) 42 | mux.HandleFunc("/api/auth/device/poll", authHandlers.PollDeviceAuth) 43 | mux.Handle("/api/auth/device/approve", authMiddleware(http.HandlerFunc(authHandlers.ApproveDeviceAuth))) 44 | 45 | // Auth verify route (authentication required) 46 | mux.Handle("/api/auth/verify", authMiddleware(http.HandlerFunc(authHandlers.Verify))) 47 | mux.Handle("/api/auth/change-password", authMiddleware(http.HandlerFunc(authHandlers.ChangePassword))) 48 | mux.Handle("/api/auth/delete-account", authMiddleware(http.HandlerFunc(authHandlers.DeleteAccount))) 49 | 50 | // File routes (authentication required) 51 | mux.Handle("/api/files/upload", authMiddleware(http.HandlerFunc(fileHandlers.Upload))) 52 | mux.Handle("/api/files/list", authMiddleware(http.HandlerFunc(fileHandlers.List))) 53 | mux.Handle("/api/files/rekey", authMiddleware(http.HandlerFunc(fileHandlers.Rekey))) 54 | 55 | // File download route (authentication required) 56 | mux.Handle("/api/files/download/", authMiddleware(http.HandlerFunc(fileHandlers.Download))) 57 | 58 | // File delete route (authentication required) 59 | mux.Handle("/api/files/", authMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 60 | if r.Method == http.MethodDelete && strings.HasPrefix(r.URL.Path, "/api/files/") { 61 | fileHandlers.Delete(w, r) 62 | } else { 63 | http.NotFound(w, r) 64 | } 65 | }))) 66 | 67 | // Share link generation (authentication required) 68 | mux.Handle("/api/files/share/generate/", authMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 69 | if r.Method == http.MethodPost { 70 | // Rewrite path for handler 71 | r.URL.Path = strings.Replace(r.URL.Path, "/generate", "", 1) 72 | fileHandlers.GenerateShareLink(w, r) 73 | } else { 74 | http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) 75 | } 76 | }))) 77 | 78 | // Public share link download (no authentication required) 79 | mux.HandleFunc("/api/files/share/", func(w http.ResponseWriter, r *http.Request) { 80 | if r.Method == http.MethodGet && !strings.Contains(r.URL.Path, "/generate/") { 81 | fileHandlers.DownloadByToken(w, r) 82 | } else { 83 | http.NotFound(w, r) 84 | } 85 | }) 86 | 87 | log.Info("API routes initialized", "endpoints", []string{ 88 | "/api/auth/register", 89 | "/api/auth/login", 90 | "/api/auth/verify-email", 91 | "/api/auth/captcha", 92 | "/api/auth/verify", 93 | "/api/auth/device/init", 94 | "/api/auth/device/poll", 95 | "/api/auth/device/approve", 96 | "/api/files/upload", 97 | "/api/files/list", 98 | "/api/files/rekey", 99 | "/api/files/download/{id}", 100 | "/api/files/{id} (DELETE)", 101 | "/api/files/share/generate/{id}", 102 | "/api/files/share/{token}", 103 | }) 104 | } 105 | 106 | func allowStorageProfile(log *slog.Logger) bool { 107 | val, ok := os.LookupEnv("ALLOW_STORAGE_PROFILE") 108 | if !ok { 109 | log.Info("ALLOW_STORAGE_PROFILE not set; storage/profile endpoints disabled by default") 110 | return false 111 | } 112 | 113 | if strings.EqualFold(val, "yes") { 114 | return true 115 | } 116 | 117 | log.Info("Storage/profile endpoints disabled (set ALLOW_STORAGE_PROFILE=yes to enable)", "value", val) 118 | return false 119 | } 120 | -------------------------------------------------------------------------------- /src/relay/compatibility_test.go: -------------------------------------------------------------------------------- 1 | package relay 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | "time" 7 | 8 | "github.com/gorilla/websocket" 9 | "google.golang.org/protobuf/proto" 10 | ) 11 | 12 | // TestProtobufMessaging tests that Protobuf clients can communicate through the relay 13 | func TestProtobufMessaging(t *testing.T) { 14 | // Setup test server 15 | roomMux.Lock() 16 | rooms = make(map[string]*Room) 17 | roomMux.Unlock() 18 | 19 | server := setupTestServer(t) 20 | defer server.Close() 21 | 22 | wsURL := "ws" + strings.TrimPrefix(server.URL, "http") + "/ws" 23 | 24 | // Connect first client using Protobuf 25 | pbConn1, _, err := websocket.DefaultDialer.Dial(wsURL, nil) 26 | if err != nil { 27 | t.Fatalf("Failed to connect first Protobuf client: %v", err) 28 | } 29 | defer pbConn1.Close() 30 | 31 | // Send Protobuf join message 32 | pbJoinMsg1 := &PBIncomingMessage{ 33 | Type: "join", 34 | RoomId: "protobuf-test-room", 35 | ClientId: "pb-client-1", 36 | } 37 | pbData1, _ := proto.Marshal(pbJoinMsg1) 38 | pbConn1.WriteMessage(websocket.BinaryMessage, pbData1) 39 | 40 | // Read joined response 41 | pbConn1.SetReadDeadline(time.Now().Add(2 * time.Second)) 42 | msgType, raw, err := pbConn1.ReadMessage() 43 | if err != nil { 44 | t.Fatalf("Failed to read Protobuf response: %v", err) 45 | } 46 | 47 | if msgType != websocket.BinaryMessage { 48 | t.Fatalf("Expected binary message, got type %d", msgType) 49 | } 50 | 51 | pbResp1 := &PBOutgoingMessage{} 52 | if err := proto.Unmarshal(raw, pbResp1); err != nil { 53 | t.Fatalf("Failed to unmarshal Protobuf response: %v", err) 54 | } 55 | 56 | if pbResp1.Type != "joined" { 57 | t.Fatalf("Expected 'joined' message, got '%s'", pbResp1.Type) 58 | } 59 | 60 | // Read initial peers message for first client (count will be 1) 61 | pbConn1.SetReadDeadline(time.Now().Add(2 * time.Second)) 62 | _, _, _ = pbConn1.ReadMessage() 63 | 64 | // Connect second client using Protobuf 65 | pbConn2, _, err := websocket.DefaultDialer.Dial(wsURL, nil) 66 | if err != nil { 67 | t.Fatalf("Failed to connect second Protobuf client: %v", err) 68 | } 69 | defer pbConn2.Close() 70 | 71 | // Send Protobuf join message 72 | pbJoinMsg2 := &PBIncomingMessage{ 73 | Type: "join", 74 | RoomId: "protobuf-test-room", 75 | ClientId: "pb-client-2", 76 | } 77 | pbData2, _ := proto.Marshal(pbJoinMsg2) 78 | pbConn2.WriteMessage(websocket.BinaryMessage, pbData2) 79 | 80 | // Read joined response for second client 81 | pbConn2.SetReadDeadline(time.Now().Add(2 * time.Second)) 82 | msgType2, raw2, err := pbConn2.ReadMessage() 83 | if err != nil { 84 | t.Fatalf("Failed to read Protobuf response for second client: %v", err) 85 | } 86 | 87 | if msgType2 != websocket.BinaryMessage { 88 | t.Fatalf("Expected binary message, got type %d", msgType2) 89 | } 90 | 91 | pbResp2 := &PBOutgoingMessage{} 92 | if err := proto.Unmarshal(raw2, pbResp2); err != nil { 93 | t.Fatalf("Failed to unmarshal Protobuf response: %v", err) 94 | } 95 | 96 | if pbResp2.Type != "joined" { 97 | t.Fatalf("Expected 'joined' message, got '%s'", pbResp2.Type) 98 | } 99 | 100 | // Read peers message from first client 101 | pbConn1.SetReadDeadline(time.Now().Add(2 * time.Second)) 102 | msgType3, raw3, err := pbConn1.ReadMessage() 103 | if err != nil { 104 | t.Fatalf("Failed to read peers message for first client: %v", err) 105 | } 106 | 107 | if msgType3 != websocket.BinaryMessage { 108 | t.Fatalf("Expected binary message, got type %d", msgType3) 109 | } 110 | 111 | peersMsg1 := &PBOutgoingMessage{} 112 | if err := proto.Unmarshal(raw3, peersMsg1); err != nil { 113 | t.Fatalf("Failed to unmarshal peers message: %v", err) 114 | } 115 | 116 | if peersMsg1.Type != "peers" { 117 | t.Fatalf("Expected 'peers' message, got type='%s'", peersMsg1.Type) 118 | } 119 | 120 | // Read peers message from second client 121 | pbConn2.SetReadDeadline(time.Now().Add(2 * time.Second)) 122 | msgType4, raw4, err := pbConn2.ReadMessage() 123 | if err != nil { 124 | t.Fatalf("Failed to read peers message for second client: %v", err) 125 | } 126 | 127 | if msgType4 != websocket.BinaryMessage { 128 | t.Fatalf("Expected binary message, got type %d", msgType4) 129 | } 130 | 131 | peersMsg2 := &PBOutgoingMessage{} 132 | if err := proto.Unmarshal(raw4, peersMsg2); err != nil { 133 | t.Fatalf("Failed to unmarshal peers message: %v", err) 134 | } 135 | 136 | if peersMsg2.Type != "peers" { 137 | t.Fatalf("Expected 'peers' message, got type='%s'", peersMsg2.Type) 138 | } 139 | 140 | // Verify both clients see 2 peers 141 | if peersMsg1.Count != 2 || peersMsg2.Count != 2 { 142 | t.Fatalf("Expected both clients to see 2 peers, got %d and %d", peersMsg1.Count, peersMsg2.Count) 143 | } 144 | 145 | t.Log("Successfully tested Protobuf messaging between clients") 146 | } 147 | -------------------------------------------------------------------------------- /tests/cli-to-web-text.spec.js: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | const { test, expect } = require('@playwright/test'); 3 | const path = require('path'); 4 | const { spawn } = require('child_process'); 5 | 6 | /** 7 | * Test: CLI to Web text transfer 8 | * 9 | * This test verifies that text can be successfully transferred from the command-line 10 | * tool to a web client through the relay server. 11 | */ 12 | test.describe('CLI to Web Text Transfer', () => { 13 | let relayServer; 14 | let serverPort; 15 | let serverUrl; 16 | 17 | test.beforeAll(async () => { 18 | // Start the relay server 19 | serverPort = 8082 + Math.floor(Math.random() * 1000); // Random port to avoid conflicts 20 | 21 | return new Promise((resolve, reject) => { 22 | relayServer = spawn('./e2ecp', ['serve', '--port', serverPort.toString(), '--db-url', ''], { 23 | cwd: path.join(__dirname, '..'), 24 | }); 25 | 26 | let started = false; 27 | 28 | relayServer.stdout.on('data', (data) => { 29 | console.log(`Server: ${data}`); 30 | if (!started && data.toString().includes('Starting')) { 31 | started = true; 32 | serverUrl = `http://localhost:${serverPort}`; 33 | // Give server more time to fully initialize WebSocket handlers 34 | setTimeout(resolve, 3000); 35 | } 36 | }); 37 | 38 | relayServer.stderr.on('data', (data) => { 39 | console.error(`Server Error: ${data}`); 40 | }); 41 | 42 | relayServer.on('error', (error) => { 43 | reject(error); 44 | }); 45 | 46 | // Timeout in case server doesn't start 47 | setTimeout(() => { 48 | if (!started) { 49 | started = true; 50 | serverUrl = `http://localhost:${serverPort}`; 51 | resolve(); 52 | } 53 | }, 5000); 54 | }); 55 | }); 56 | 57 | test.afterAll(async () => { 58 | // Stop the relay server and wait for cleanup 59 | if (relayServer) { 60 | relayServer.kill('SIGTERM'); 61 | // Wait for server to fully shutdown 62 | await new Promise(resolve => setTimeout(resolve, 2000)); 63 | } 64 | }); 65 | 66 | test('should transfer text from CLI sender to web receiver', async ({ browser }) => { 67 | // Test text to send 68 | const testText = 'Hello from CLI to web! This is a Playwright test for CLI-to-web text transfer.'; 69 | 70 | const context = await browser.newContext({ 71 | permissions: ['clipboard-read', 'clipboard-write'] 72 | }); 73 | const page = await context.newPage(); 74 | 75 | try { 76 | // Generate a unique room name 77 | const roomName = `test-cli-web-text-${Date.now()}`; 78 | 79 | // Navigate web receiver to the room first 80 | await page.goto(`${serverUrl}/${roomName}`, { waitUntil: 'networkidle', timeout: 30000 }); 81 | 82 | // Wait for the page to be ready 83 | await page.waitForSelector('button:has-text("CLICK OR DROP FILES HERE"), div:has-text("WAITING FOR PEER")', { timeout: 15000 }); 84 | 85 | // Extra wait to ensure WebSocket connection is established 86 | await new Promise(resolve => setTimeout(resolve, 2000)); 87 | 88 | // Start CLI sender with text (since "test text message" is not a file, it will be sent as text) 89 | const wsUrl = `ws://localhost:${serverPort}`; 90 | const cliSender = spawn('./e2ecp', ['send', testText, roomName, '--server', wsUrl], { 91 | cwd: path.join(__dirname, '..'), 92 | }); 93 | 94 | let senderOutput = ''; 95 | 96 | cliSender.stdout.on('data', (data) => { 97 | const output = data.toString(); 98 | senderOutput += output; 99 | console.log(`CLI Sender: ${output}`); 100 | }); 101 | 102 | cliSender.stderr.on('data', (data) => { 103 | console.error(`CLI Sender Error: ${data}`); 104 | }); 105 | 106 | // Wait for the text message modal to appear 107 | await page.waitForSelector('text=RECEIVED TEXT', { timeout: 30000 }); 108 | 109 | // Get the text content from the modal 110 | const receivedText = await page.locator('.bg-gray-200 .font-bold.break-words').textContent(); 111 | 112 | // Verify the received text matches what was sent 113 | expect(receivedText.trim()).toBe(testText); 114 | 115 | // Test copy button functionality 116 | await page.click('button[title="Copy to clipboard"]'); 117 | 118 | // Verify clipboard content directly (more reliable than checking toast) 119 | const clipboardText = await page.evaluate(async () => { 120 | return await navigator.clipboard.readText(); 121 | }); 122 | expect(clipboardText).toBe(testText); 123 | 124 | // Close the modal 125 | await page.click('button:has-text("Close")'); 126 | 127 | // Clean up 128 | cliSender.kill('SIGTERM'); 129 | await new Promise(resolve => setTimeout(resolve, 1000)); 130 | 131 | } finally { 132 | await page.close(); 133 | await context.close(); 134 | } 135 | }); 136 | }); 137 | -------------------------------------------------------------------------------- /TESTING.md: -------------------------------------------------------------------------------- 1 | # Testing Documentation 2 | 3 | This document describes the testing infrastructure for the Share application. 4 | 5 | ## Test Types 6 | 7 | ### 1. Unit Tests (Go) 8 | **Location:** `*_test.go` files throughout the codebase 9 | 10 | **Coverage:** 11 | - Cryptographic functions (`src/crypto/crypto_test.go`) 12 | - Client functionality (`src/client/client_test.go`, `src/client/metadata_test.go`, `src/client/zip_test.go`) 13 | - Relay server (`src/relay/relay_test.go`, `src/relay/compatibility_test.go`, `src/relay/zero_knowledge_test.go`) 14 | - Protocol handling (`main_test.go`) 15 | 16 | **Running:** 17 | ```bash 18 | go test -v ./... 19 | ``` 20 | 21 | **Coverage Report:** 22 | ```bash 23 | go test -v -cover ./... 24 | ``` 25 | 26 | ### 2. Integration Tests (Go) 27 | **Location:** `integration_test.go` 28 | 29 | **Coverage:** 30 | - Full file transfer: CLI sender → CLI receiver 31 | - Folder transfer: CLI sender → CLI receiver (with zip/unzip) 32 | - Hash verification during transfer 33 | 34 | **Running:** 35 | ```bash 36 | go test -v ./... 37 | # Or skip integration tests: 38 | go test -v -short ./... 39 | ``` 40 | 41 | ### 3. End-to-End Tests (Playwright) 42 | **Location:** `tests/` directory 43 | 44 | **Coverage:** 45 | - **Web-to-Web Transfer** (`web-to-web.spec.js`): 46 | - Browser client → Browser client file transfer 47 | - E2E encryption verification 48 | - File integrity validation 49 | 50 | - **Web-to-CLI Transfer** (`web-to-cli.spec.js`): 51 | - Browser client → CLI tool file transfer 52 | - CLI tool → Browser client file transfer 53 | - Bidirectional compatibility testing 54 | 55 | **Running:** 56 | ```bash 57 | cd tests 58 | ./run-tests.sh 59 | ``` 60 | 61 | See [tests/README.md](tests/README.md) for detailed Playwright test documentation. 62 | 63 | ## Test Coverage Summary 64 | 65 | | Component | Test Type | Coverage | 66 | |-----------|-----------|----------| 67 | | Crypto (ECDH, AES-GCM) | Unit | ~81% | 68 | | Client (Send/Receive) | Unit + Integration | ~50% | 69 | | Relay Server | Unit | ~50% | 70 | | Web UI | E2E (Playwright) | Web-to-web, Web-to-CLI | 71 | | CLI | Integration + E2E | CLI-to-CLI, CLI-to-web | 72 | 73 | ## Testing Philosophy 74 | 75 | 1. **Unit Tests**: Fast, isolated tests for individual functions and components 76 | 2. **Integration Tests**: Test interactions between components (e.g., full CLI file transfer) 77 | 3. **E2E Tests**: Test complete user workflows through real interfaces (web browser, CLI) 78 | 79 | ## Running All Tests 80 | 81 | ### Local Development 82 | ```bash 83 | # Go tests 84 | go test -v ./... 85 | 86 | # Playwright tests 87 | cd tests && ./run-tests.sh 88 | ``` 89 | 90 | ### CI/CD 91 | All tests run automatically in GitHub Actions: 92 | - `build.yml`: Builds and runs Go tests on macOS, Linux, and Windows, plus E2E Playwright tests on Linux 93 | 94 | ## Test Data 95 | 96 | Tests create temporary files and directories: 97 | - Go tests: Use `os.MkdirTemp()` with automatic cleanup 98 | - Playwright tests: Create files in `tests/` directory, cleaned up after each test 99 | 100 | All test artifacts are excluded via `.gitignore`. 101 | 102 | ## Adding New Tests 103 | 104 | ### Adding a Go Test 105 | 1. Create or update `*_test.go` file in the relevant package 106 | 2. Follow existing test patterns (use `testing.T`, `t.Fatalf`, etc.) 107 | 3. Run `go test -v ./...` to verify 108 | 109 | ### Adding a Playwright Test 110 | 1. Create `*.spec.js` file in `tests/` directory 111 | 2. Follow existing patterns (see `web-to-web.spec.js` example) 112 | 3. Start relay server in `beforeAll` hook 113 | 4. Clean up in `afterAll` hook 114 | 5. Run `./run-tests.sh` to verify 115 | 116 | ## Debugging Tests 117 | 118 | ### Go Tests 119 | ```bash 120 | # Verbose output 121 | go test -v ./... 122 | 123 | # Run specific test 124 | go test -v -run TestIntegrationFileTransfer ./... 125 | 126 | # With race detection 127 | go test -v -race ./... 128 | ``` 129 | 130 | ### Playwright Tests 131 | ```bash 132 | # Headed mode (see browser) 133 | cd tests && ./run-tests.sh --headed 134 | 135 | # Debug mode (step through tests) 136 | cd tests && ./run-tests.sh --debug 137 | 138 | # View test report 139 | npm run test:report 140 | ``` 141 | 142 | ## Known Issues 143 | 144 | ### Network Restrictions 145 | Integration and E2E tests require network access to: 146 | - Start local relay server 147 | - Create WebSocket connections 148 | - Download Playwright browsers (first run only) 149 | 150 | In restricted environments, tests may be skipped automatically. 151 | 152 | ### Port Conflicts 153 | Tests use random ports (8080+ range) to avoid conflicts. If you encounter port issues, ensure no other services are using this range. 154 | 155 | ## Future Test Improvements 156 | 157 | - [ ] Add tests for folder transfers in Playwright 158 | - [ ] Add tests for multiple file transfers 159 | - [ ] Add performance/benchmark tests 160 | - [ ] Add tests for error conditions (network failures, corrupted data) 161 | - [ ] Add visual regression tests for web UI 162 | - [ ] Increase overall test coverage to 80%+ 163 | -------------------------------------------------------------------------------- /web/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 8 | 9 | 10 | 11 | e2ecp - Secure End-to-End Encrypted File Transfer 12 | 13 | 15 | 17 | 18 | 19 | 20 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 51 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 62 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 |
80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /web/src/VerifyEmail.jsx: -------------------------------------------------------------------------------- 1 | import React, { useEffect, useRef, useState } from "react"; 2 | import { useNavigate, useSearchParams } from "react-router-dom"; 3 | import toast from "react-hot-toast"; 4 | import { useAuth } from "./AuthContext"; 5 | import { deriveKey } from "./encryption"; 6 | import Navbar from "./Navbar"; 7 | 8 | export default function VerifyEmail() { 9 | const [searchParams] = useSearchParams(); 10 | const tokenParam = searchParams.get("token") || ""; 11 | const { verifyEmailToken, setEncryptionKey } = useAuth(); 12 | const navigate = useNavigate(); 13 | const [status, setStatus] = useState("pending"); 14 | const [user, setUser] = useState(null); 15 | const [password, setPassword] = useState(""); 16 | const [unlocking, setUnlocking] = useState(false); 17 | const attempted = useRef(false); 18 | 19 | useEffect(() => { 20 | const run = async () => { 21 | if (attempted.current) return; 22 | attempted.current = true; 23 | if (!tokenParam) { 24 | setStatus("error"); 25 | return; 26 | } 27 | try { 28 | const data = await verifyEmailToken(tokenParam); 29 | if (!data?.user) { 30 | throw new Error("Verification response invalid"); 31 | } 32 | setUser(data.user); 33 | setStatus("verified"); 34 | toast.success("Email verified!"); 35 | } catch (err) { 36 | setStatus("error"); 37 | toast.error(err.message || "Verification failed"); 38 | } 39 | }; 40 | run(); 41 | }, [tokenParam, verifyEmailToken]); 42 | 43 | const handleUnlock = async (e) => { 44 | e.preventDefault(); 45 | if (!user) return; 46 | setUnlocking(true); 47 | try { 48 | const key = await deriveKey(password, user.encryption_salt); 49 | setEncryptionKey(key); 50 | 51 | const exportedKey = await window.crypto.subtle.exportKey("raw", key); 52 | const keyArray = new Uint8Array(exportedKey); 53 | const keyHex = Array.from(keyArray) 54 | .map((b) => b.toString(16).padStart(2, "0")) 55 | .join(""); 56 | localStorage.setItem("encryptionKey", keyHex); 57 | 58 | toast.success("Signed in!"); 59 | navigate("/storage"); 60 | } catch (err) { 61 | toast.error("Invalid password for unlocking encryption key"); 62 | } finally { 63 | setUnlocking(false); 64 | } 65 | }; 66 | 67 | return ( 68 |
69 | 70 |
71 |
72 | {status === "pending" && ( 73 |
Verifying...
74 | )} 75 | {status === "error" && ( 76 |
77 |

78 | Verification failed 79 |

80 |

The link is invalid or expired.

81 | 87 |
88 | )} 89 | {status === "verified" && user && ( 90 |
91 |

92 | Email Verified 93 |

94 |

95 | Enter your password to unlock your encryption key and finish 96 | signing in. 97 |

98 |
99 |
100 | 103 | setPassword(e.target.value)} 107 | required 108 | className="w-full border-2 border-black dark:border-white bg-white dark:bg-black text-black dark:text-white px-3 py-2 focus:outline-none" 109 | /> 110 |
111 | 118 |
119 |
120 | )} 121 |
122 |
123 |
124 | ); 125 | } 126 | -------------------------------------------------------------------------------- /web/src/DeviceAuth.jsx: -------------------------------------------------------------------------------- 1 | import React, { useState } from "react"; 2 | import { useNavigate } from "react-router-dom"; 3 | import { useAuth } from "./AuthContext"; 4 | import toast from "react-hot-toast"; 5 | import Navbar from "./Navbar"; 6 | 7 | export default function DeviceAuth() { 8 | const { token } = useAuth(); 9 | const navigate = useNavigate(); 10 | const [userCode, setUserCode] = useState(""); 11 | const [approving, setApproving] = useState(false); 12 | 13 | const handleApprove = async (e) => { 14 | e.preventDefault(); 15 | 16 | if (!token) { 17 | toast.error("You must be logged in to approve a device"); 18 | navigate("/login"); 19 | return; 20 | } 21 | 22 | const trimmedCode = userCode.trim().toUpperCase(); 23 | console.log("Sending user_code:", trimmedCode); 24 | 25 | setApproving(true); 26 | try { 27 | const requestBody = { user_code: trimmedCode }; 28 | console.log("Request body:", JSON.stringify(requestBody)); 29 | 30 | const response = await fetch("/api/auth/device/approve", { 31 | method: "POST", 32 | headers: { 33 | "Content-Type": "application/json", 34 | "Authorization": `Bearer ${token}`, 35 | }, 36 | body: JSON.stringify(requestBody), 37 | }); 38 | 39 | if (!response.ok) { 40 | const errorText = await response.text(); 41 | console.error("Error response:", errorText); 42 | let errorMsg; 43 | try { 44 | const error = JSON.parse(errorText); 45 | errorMsg = error.error || "Failed to approve device"; 46 | } catch { 47 | errorMsg = errorText || "Failed to approve device"; 48 | } 49 | throw new Error(errorMsg); 50 | } 51 | 52 | toast.success("Device approved successfully!"); 53 | setUserCode(""); 54 | // Optionally redirect to storage 55 | setTimeout(() => navigate("/storage"), 1500); 56 | } catch (err) { 57 | console.error("Approval error:", err); 58 | toast.error(err.message || "Failed to approve device"); 59 | } finally { 60 | setApproving(false); 61 | } 62 | }; 63 | 64 | return ( 65 |
66 | 67 |
68 |
69 |

Device Auth

70 |

71 | Enter the code displayed on your device to authorize it to access your account. 72 |

73 | 74 |
75 |
76 | 79 | setUserCode(e.target.value.toUpperCase())} 83 | placeholder="XXXXXX" 84 | maxLength="6" 85 | required 86 | className="w-full border-2 border-black dark:border-white bg-white dark:bg-black text-black dark:text-white px-3 py-2 focus:outline-none text-center text-2xl font-bold tracking-widest" 87 | /> 88 |
89 | 90 | 97 |
98 | 99 |
100 | 106 |
107 |
108 |
109 |
110 | ); 111 | } 112 | -------------------------------------------------------------------------------- /src/relay/iconwords.go: -------------------------------------------------------------------------------- 1 | package relay 2 | 3 | import ( 4 | "crypto/rand" 5 | "math/big" 6 | ) 7 | 8 | // IconWord represents a Font Awesome icon and its associated word 9 | type IconWord struct { 10 | Icon string 11 | Word string 12 | } 13 | 14 | // IconWords is a list of Font Awesome icons with their associated words 15 | // This list matches the ICON_CLASSES array in web/src/App.jsx 16 | var IconWords = []IconWord{ 17 | {"fa-anchor", "anchor"}, 18 | {"fa-apple-whole", "apple"}, 19 | {"fa-atom", "atom"}, 20 | {"fa-award", "award"}, 21 | {"fa-basketball", "basketball"}, 22 | {"fa-bell", "bell"}, 23 | {"fa-bicycle", "bicycle"}, 24 | {"fa-bolt", "bolt"}, 25 | {"fa-bomb", "bomb"}, 26 | {"fa-book", "book"}, 27 | {"fa-box", "box"}, 28 | {"fa-brain", "brain"}, 29 | {"fa-briefcase", "briefcase"}, 30 | {"fa-bug", "bug"}, 31 | {"fa-cake-candles", "cake"}, 32 | {"fa-calculator", "calculator"}, 33 | {"fa-camera", "camera"}, 34 | {"fa-campground", "campground"}, 35 | {"fa-car", "car"}, 36 | {"fa-carrot", "carrot"}, 37 | {"fa-cat", "cat"}, 38 | {"fa-chess-knight", "knight"}, 39 | {"fa-chess-rook", "rook"}, 40 | {"fa-cloud", "cloud"}, 41 | {"fa-code", "code"}, 42 | {"fa-gear", "gear"}, 43 | {"fa-compass", "compass"}, 44 | {"fa-cookie", "cookie"}, 45 | {"fa-crow", "crow"}, 46 | {"fa-cube", "cube"}, 47 | {"fa-diamond", "diamond"}, 48 | {"fa-dog", "dog"}, 49 | {"fa-dove", "dove"}, 50 | {"fa-dragon", "dragon"}, 51 | {"fa-droplet", "droplet"}, 52 | {"fa-drum", "drum"}, 53 | {"fa-earth-americas", "earth"}, 54 | {"fa-egg", "egg"}, 55 | {"fa-envelope", "envelope"}, 56 | {"fa-fan", "fan"}, 57 | {"fa-feather", "feather"}, 58 | {"fa-fire", "fire"}, 59 | {"fa-fish", "fish"}, 60 | {"fa-flag", "flag"}, 61 | {"fa-flask", "flask"}, 62 | {"fa-floppy-disk", "floppy"}, 63 | {"fa-folder", "folder"}, 64 | {"fa-football", "football"}, 65 | {"fa-frog", "frog"}, 66 | {"fa-gamepad", "gamepad"}, 67 | {"fa-gavel", "gavel"}, 68 | {"fa-gem", "gem"}, 69 | {"fa-ghost", "ghost"}, 70 | {"fa-gift", "gift"}, 71 | {"fa-guitar", "guitar"}, 72 | {"fa-hammer", "hammer"}, 73 | {"fa-hat-cowboy", "cowboy"}, 74 | {"fa-hat-wizard", "wizard"}, 75 | {"fa-heart", "heart"}, 76 | {"fa-helicopter", "helicopter"}, 77 | {"fa-helmet-safety", "helmet"}, 78 | {"fa-hippo", "hippo"}, 79 | {"fa-horse", "horse"}, 80 | {"fa-hourglass-half", "hourglass"}, 81 | {"fa-snowflake", "snowflake"}, 82 | {"fa-key", "key"}, 83 | {"fa-leaf", "leaf"}, 84 | {"fa-lightbulb", "lightbulb"}, 85 | {"fa-magnet", "magnet"}, 86 | {"fa-map", "map"}, 87 | {"fa-microphone", "microphone"}, 88 | {"fa-moon", "moon"}, 89 | {"fa-mountain", "mountain"}, 90 | {"fa-mug-hot", "mug"}, 91 | {"fa-music", "music"}, 92 | {"fa-paintbrush", "paintbrush"}, 93 | {"fa-paper-plane", "paper"}, 94 | {"fa-paw", "paw"}, 95 | {"fa-pen", "pen"}, 96 | {"fa-pepper-hot", "pepper"}, 97 | {"fa-rocket", "rocket"}, 98 | {"fa-road", "road"}, 99 | {"fa-school", "school"}, 100 | {"fa-screwdriver-wrench", "screwdriver"}, 101 | {"fa-scroll", "scroll"}, 102 | {"fa-seedling", "seedling"}, 103 | {"fa-shield-heart", "shield"}, 104 | {"fa-ship", "ship"}, 105 | {"fa-skull", "skull"}, 106 | {"fa-sliders", "sliders"}, 107 | {"fa-splotch", "splotch"}, 108 | {"fa-spider", "spider"}, 109 | {"fa-star", "star"}, 110 | {"fa-sun", "sun"}, 111 | {"fa-toolbox", "toolbox"}, 112 | {"fa-tornado", "tornado"}, 113 | {"fa-tree", "tree"}, 114 | {"fa-trophy", "trophy"}, 115 | {"fa-truck", "truck"}, 116 | {"fa-user-astronaut", "astronaut"}, 117 | {"fa-wand-magic-sparkles", "wand"}, 118 | {"fa-wrench", "wrench"}, 119 | {"fa-pizza-slice", "pizza"}, 120 | {"fa-burger", "burger"}, 121 | {"fa-lemon", "lemon"}, 122 | } 123 | 124 | // GenerateRandomIconMnemonic generates a mnemonic consisting of N random icon words 125 | func GenerateRandomIconMnemonic(count int) string { 126 | if count <= 0 { 127 | count = 3 128 | } 129 | 130 | words := make([]string, count) 131 | for i := 0; i < count; i++ { 132 | n, err := rand.Int(rand.Reader, big.NewInt(int64(len(IconWords)))) 133 | if err != nil { 134 | // Fallback to a deterministic but acceptable index 135 | n = big.NewInt(int64(i % len(IconWords))) 136 | } 137 | words[i] = IconWords[n.Int64()].Word 138 | } 139 | 140 | return words[0] + "-" + words[1] + "-" + words[2] 141 | } 142 | 143 | // GenerateIconMnemonicFromID generates a deterministic mnemonic from a client ID 144 | // using 3 icon words 145 | func GenerateIconMnemonicFromID(clientID string) string { 146 | // Use hash-based selection to deterministically pick 3 icons 147 | hash := 0 148 | for _, ch := range clientID { 149 | hash = (hash*31 + int(ch)) & 0x7FFFFFFF 150 | } 151 | 152 | // Pick 3 different icons based on different parts of the hash 153 | idx1 := hash % len(IconWords) 154 | idx2 := (hash / len(IconWords)) % len(IconWords) 155 | idx3 := (hash / (len(IconWords) * len(IconWords))) % len(IconWords) 156 | 157 | // Ensure all three are different 158 | if idx2 == idx1 { 159 | idx2 = (idx2 + 1) % len(IconWords) 160 | } 161 | if idx3 == idx1 || idx3 == idx2 { 162 | idx3 = (idx3 + 1) % len(IconWords) 163 | if idx3 == idx1 { 164 | idx3 = (idx3 + 1) % len(IconWords) 165 | } 166 | } 167 | 168 | return IconWords[idx1].Word + "-" + IconWords[idx2].Word + "-" + IconWords[idx3].Word 169 | } 170 | -------------------------------------------------------------------------------- /tests/web-text-transfer.spec.js: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | const { test, expect } = require('@playwright/test'); 3 | const path = require('path'); 4 | const { spawn } = require('child_process'); 5 | 6 | /** 7 | * Test: Web to Web text transfer 8 | * 9 | * This test verifies that two browser clients can successfully transfer text 10 | * through the relay server using end-to-end encryption. 11 | */ 12 | test.describe('Web to Web Text Transfer', () => { 13 | let relayServer; 14 | let serverPort; 15 | let serverUrl; 16 | 17 | test.beforeAll(async () => { 18 | // Start the relay server 19 | serverPort = 8080 + Math.floor(Math.random() * 1000); // Random port to avoid conflicts 20 | 21 | return new Promise((resolve, reject) => { 22 | relayServer = spawn('./e2ecp', ['serve', '--port', serverPort.toString(), '--db-url', ''], { 23 | cwd: path.join(__dirname, '..'), 24 | }); 25 | 26 | let started = false; 27 | 28 | relayServer.stdout.on('data', (data) => { 29 | console.log(`Server: ${data}`); 30 | if (!started && data.toString().includes('Starting')) { 31 | started = true; 32 | serverUrl = `http://localhost:${serverPort}`; 33 | // Give server more time to fully initialize WebSocket handlers 34 | setTimeout(resolve, 3000); 35 | } 36 | }); 37 | 38 | relayServer.stderr.on('data', (data) => { 39 | console.error(`Server Error: ${data}`); 40 | }); 41 | 42 | relayServer.on('error', (error) => { 43 | reject(error); 44 | }); 45 | 46 | // Timeout in case server doesn't start 47 | setTimeout(() => { 48 | if (!started) { 49 | started = true; 50 | serverUrl = `http://localhost:${serverPort}`; 51 | resolve(); 52 | } 53 | }, 5000); 54 | }); 55 | }); 56 | 57 | test.afterAll(async () => { 58 | // Stop the relay server and wait for cleanup 59 | if (relayServer) { 60 | relayServer.kill('SIGTERM'); 61 | // Wait for server to fully shutdown 62 | await new Promise(resolve => setTimeout(resolve, 2000)); 63 | } 64 | }); 65 | 66 | test('should transfer text from one web client to another', async ({ browser }) => { 67 | // Test text to send 68 | const testText = 'Hello from Playwright! This is a test text message for web-to-web transfer.'; 69 | 70 | // Create two browser contexts (sender and receiver) with clipboard permissions 71 | const senderContext = await browser.newContext({ 72 | permissions: ['clipboard-read', 'clipboard-write'] 73 | }); 74 | const receiverContext = await browser.newContext({ 75 | permissions: ['clipboard-read', 'clipboard-write'] 76 | }); 77 | 78 | const senderPage = await senderContext.newPage(); 79 | const receiverPage = await receiverContext.newPage(); 80 | 81 | try { 82 | // Generate a unique room name 83 | const roomName = `test-room-${Date.now()}`; 84 | 85 | // Navigate receiver first and wait for it to be ready 86 | await receiverPage.goto(`${serverUrl}/${roomName}`, { waitUntil: 'networkidle', timeout: 30000 }); 87 | 88 | // Wait a bit for receiver's WebSocket connection to establish 89 | await new Promise(resolve => setTimeout(resolve, 2000)); 90 | 91 | // Now navigate sender 92 | await senderPage.goto(`${serverUrl}/${roomName}`, { waitUntil: 'networkidle', timeout: 30000 }); 93 | 94 | // Wait for WebSocket connections to be established (both peers need to be connected) 95 | // Look for the text input to be enabled on sender (indicates peer connection) 96 | await senderPage.waitForSelector('input[type="text"]:not([disabled])', { timeout: 15000 }); 97 | 98 | // Extra wait to ensure connection is stable 99 | await new Promise(resolve => setTimeout(resolve, 1000)); 100 | 101 | // Sender: Type and send the text 102 | const textInput = await senderPage.locator('input[type="text"][placeholder*="message"]'); 103 | await textInput.fill(testText); 104 | 105 | // Click the Send button 106 | await senderPage.click('button:has-text("SEND")'); 107 | 108 | // Receiver: Wait for the text message modal to appear 109 | await receiverPage.waitForSelector('text=RECEIVED TEXT', { timeout: 30000 }); 110 | 111 | // Get the text content from the modal 112 | const receivedText = await receiverPage.locator('.bg-gray-200 .font-bold.break-words').textContent(); 113 | 114 | // Verify the received text matches what was sent 115 | expect(receivedText.trim()).toBe(testText); 116 | 117 | // Test copy button functionality 118 | await receiverPage.click('button[title="Copy to clipboard"]'); 119 | 120 | // Verify clipboard content directly (more reliable than checking toast) 121 | const clipboardText = await receiverPage.evaluate(async () => { 122 | return await navigator.clipboard.readText(); 123 | }); 124 | expect(clipboardText).toBe(testText); 125 | 126 | // Close the modal 127 | await receiverPage.click('button:has-text("Close")'); 128 | 129 | } finally { 130 | await senderPage.close(); 131 | await receiverPage.close(); 132 | await senderContext.close(); 133 | await receiverContext.close(); 134 | } 135 | }); 136 | }); 137 | -------------------------------------------------------------------------------- /tests/cli-to-web.spec.js: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | const { test, expect } = require('@playwright/test'); 3 | const path = require('path'); 4 | const fs = require('fs'); 5 | const { spawn } = require('child_process'); 6 | 7 | /** 8 | * Test: CLI to Web file transfer 9 | * 10 | * This test verifies that a file can be successfully transferred from the command-line 11 | * tool to a web client through the relay server. 12 | */ 13 | test.describe('CLI to Web Transfer', () => { 14 | let relayServer; 15 | let serverPort; 16 | let serverUrl; 17 | 18 | test.beforeAll(async () => { 19 | // Start the relay server 20 | serverPort = 8082 + Math.floor(Math.random() * 1000); // Random port to avoid conflicts 21 | 22 | return new Promise((resolve, reject) => { 23 | relayServer = spawn('./e2ecp', ['serve', '--port', serverPort.toString(), '--db-url', ''], { 24 | cwd: path.join(__dirname, '..'), 25 | }); 26 | 27 | let started = false; 28 | 29 | relayServer.stdout.on('data', (data) => { 30 | console.log(`Server: ${data}`); 31 | if (!started && data.toString().includes('Starting')) { 32 | started = true; 33 | serverUrl = `http://localhost:${serverPort}`; 34 | // Give server more time to fully initialize WebSocket handlers 35 | setTimeout(resolve, 3000); 36 | } 37 | }); 38 | 39 | relayServer.stderr.on('data', (data) => { 40 | console.error(`Server Error: ${data}`); 41 | }); 42 | 43 | relayServer.on('error', (error) => { 44 | reject(error); 45 | }); 46 | 47 | // Timeout in case server doesn't start 48 | setTimeout(() => { 49 | if (!started) { 50 | started = true; 51 | serverUrl = `http://localhost:${serverPort}`; 52 | resolve(); 53 | } 54 | }, 5000); 55 | }); 56 | }); 57 | 58 | test.afterAll(async () => { 59 | // Stop the relay server and wait for cleanup 60 | if (relayServer) { 61 | relayServer.kill('SIGTERM'); 62 | // Wait for server to fully shutdown 63 | await new Promise(resolve => setTimeout(resolve, 2000)); 64 | } 65 | }); 66 | 67 | test('should transfer a file from CLI sender to web receiver', async ({ browser }) => { 68 | // Create a test file 69 | const testFilePath = path.join(__dirname, 'test-cli-to-web.txt'); 70 | const testContent = 'Hello from CLI to web! This is a Playwright test for CLI-to-web transfer.'; 71 | fs.writeFileSync(testFilePath, testContent); 72 | 73 | const context = await browser.newContext(); 74 | const page = await context.newPage(); 75 | 76 | try { 77 | // Generate a unique room name 78 | const roomName = `test-cli-web-${Date.now()}`; 79 | 80 | // Navigate web receiver to the room first 81 | await page.goto(`${serverUrl}/${roomName}`, { waitUntil: 'networkidle', timeout: 30000 }); 82 | 83 | // Wait for the page to be ready (CLICK OR DROP FILES HERE button visible means ready to receive) 84 | await page.waitForSelector('button:has-text("CLICK OR DROP FILES HERE"), div:has-text("WAITING FOR PEER")', { timeout: 15000 }); 85 | 86 | // Extra wait to ensure WebSocket connection is established 87 | await new Promise(resolve => setTimeout(resolve, 2000)); 88 | 89 | // Set up download handler for receiver with longer timeout 90 | const downloadPromise = page.waitForEvent('download', { timeout: 60000 }); 91 | 92 | // Start CLI sender 93 | const wsUrl = `ws://localhost:${serverPort}`; 94 | const cliSender = spawn('./e2ecp', ['send', testFilePath, roomName, '--server', wsUrl], { 95 | cwd: path.join(__dirname, '..'), 96 | }); 97 | 98 | let senderOutput = ''; 99 | 100 | cliSender.stdout.on('data', (data) => { 101 | const output = data.toString(); 102 | senderOutput += output; 103 | console.log(`CLI Sender: ${output}`); 104 | }); 105 | 106 | cliSender.stderr.on('data', (data) => { 107 | console.error(`CLI Sender Error: ${data}`); 108 | }); 109 | 110 | // Wait for the download confirmation modal to appear 111 | await page.waitForSelector('text=DOWNLOAD FILE?', { timeout: 30000 }); 112 | 113 | // Wait for the Download button to be visible and clickable 114 | await page.waitForSelector('button.bg-black:has-text("Download")', { state: 'visible', timeout: 5000 }); 115 | await new Promise(resolve => setTimeout(resolve, 500)); 116 | 117 | // Click the Download button in the confirmation modal 118 | await page.click('button.bg-black:has-text("Download")'); 119 | 120 | // Wait for download to complete 121 | const download = await downloadPromise; 122 | 123 | // Save the downloaded file 124 | const downloadPath = path.join(__dirname, 'downloaded-cli-to-web.txt'); 125 | await download.saveAs(downloadPath); 126 | 127 | // Verify the downloaded file content 128 | const downloadedContent = fs.readFileSync(downloadPath, 'utf-8'); 129 | expect(downloadedContent).toBe(testContent); 130 | 131 | // Clean up 132 | cliSender.kill('SIGTERM'); 133 | await new Promise(resolve => setTimeout(resolve, 1000)); 134 | fs.unlinkSync(testFilePath); 135 | fs.unlinkSync(downloadPath); 136 | 137 | } finally { 138 | await page.close(); 139 | await context.close(); 140 | } 141 | }); 142 | }); 143 | -------------------------------------------------------------------------------- /tests/web-to-web.spec.js: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | const { test, expect } = require('@playwright/test'); 3 | const path = require('path'); 4 | const fs = require('fs'); 5 | const { spawn } = require('child_process'); 6 | 7 | /** 8 | * Test: Web to Web file transfer 9 | * 10 | * This test verifies that two browser clients can successfully transfer a file 11 | * through the relay server using end-to-end encryption. 12 | */ 13 | test.describe('Web to Web Transfer', () => { 14 | let relayServer; 15 | let serverPort; 16 | let serverUrl; 17 | 18 | test.beforeAll(async () => { 19 | // Start the relay server 20 | serverPort = 8080 + Math.floor(Math.random() * 1000); // Random port to avoid conflicts 21 | 22 | return new Promise((resolve, reject) => { 23 | relayServer = spawn('./e2ecp', ['serve', '--port', serverPort.toString(), '--db-url', ''], { 24 | cwd: path.join(__dirname, '..'), 25 | }); 26 | 27 | let started = false; 28 | 29 | relayServer.stdout.on('data', (data) => { 30 | console.log(`Server: ${data}`); 31 | if (!started && data.toString().includes('Starting')) { 32 | started = true; 33 | serverUrl = `http://localhost:${serverPort}`; 34 | // Give server more time to fully initialize WebSocket handlers 35 | setTimeout(resolve, 3000); 36 | } 37 | }); 38 | 39 | relayServer.stderr.on('data', (data) => { 40 | console.error(`Server Error: ${data}`); 41 | }); 42 | 43 | relayServer.on('error', (error) => { 44 | reject(error); 45 | }); 46 | 47 | // Timeout in case server doesn't start 48 | setTimeout(() => { 49 | if (!started) { 50 | started = true; 51 | serverUrl = `http://localhost:${serverPort}`; 52 | resolve(); 53 | } 54 | }, 5000); 55 | }); 56 | }); 57 | 58 | test.afterAll(async () => { 59 | // Stop the relay server and wait for cleanup 60 | if (relayServer) { 61 | relayServer.kill('SIGTERM'); 62 | // Wait for server to fully shutdown 63 | await new Promise(resolve => setTimeout(resolve, 2000)); 64 | } 65 | }); 66 | 67 | test('should transfer a file from one web client to another', async ({ browser }) => { 68 | // Create a test file 69 | const testFilePath = path.join(__dirname, 'test-web-to-web.txt'); 70 | const testContent = 'Hello from Playwright! This is a test file for web-to-web transfer.'; 71 | fs.writeFileSync(testFilePath, testContent); 72 | 73 | // Create two browser contexts (sender and receiver) 74 | const senderContext = await browser.newContext(); 75 | const receiverContext = await browser.newContext(); 76 | 77 | const senderPage = await senderContext.newPage(); 78 | const receiverPage = await receiverContext.newPage(); 79 | 80 | try { 81 | // Generate a unique room name 82 | const roomName = `test-room-${Date.now()}`; 83 | 84 | // Navigate receiver first and wait for it to be ready 85 | await receiverPage.goto(`${serverUrl}/${roomName}`, { waitUntil: 'networkidle', timeout: 30000 }); 86 | 87 | // Wait a bit for receiver's WebSocket connection to establish 88 | await new Promise(resolve => setTimeout(resolve, 2000)); 89 | 90 | // Now navigate sender 91 | await senderPage.goto(`${serverUrl}/${roomName}`, { waitUntil: 'networkidle', timeout: 30000 }); 92 | 93 | // Wait for WebSocket connections to be established (both peers need to be connected) 94 | // Look for the "CLICK OR DROP FILES HERE" button to be enabled on sender (indicates peer connection) 95 | await senderPage.waitForSelector('button:has-text("CLICK OR DROP FILES HERE"):not([disabled])', { timeout: 15000 }); 96 | 97 | // Extra wait to ensure connection is stable 98 | await new Promise(resolve => setTimeout(resolve, 1000)); 99 | 100 | // Set up download handler for receiver before sending 101 | const downloadPromise = receiverPage.waitForEvent('download', { timeout: 60000 }); 102 | 103 | // Sender: Upload the file 104 | // Find the hidden file input and set the file 105 | const fileInput = await senderPage.locator('input[type="file"]'); 106 | await fileInput.setInputFiles(testFilePath); 107 | 108 | // The app automatically sends the file once selected 109 | 110 | // Receiver: Wait for the download confirmation modal to appear 111 | await receiverPage.waitForSelector('text=DOWNLOAD FILE?', { timeout: 30000 }); 112 | 113 | // Click the Download button in the confirmation modal 114 | await receiverPage.click('button:has-text("Download")'); 115 | 116 | // Receiver: Wait for download to start and complete 117 | const download = await downloadPromise; 118 | 119 | // Save the downloaded file 120 | const downloadPath = path.join(__dirname, 'downloaded-web-to-web.txt'); 121 | await download.saveAs(downloadPath); 122 | 123 | // Verify the downloaded file content 124 | const downloadedContent = fs.readFileSync(downloadPath, 'utf-8'); 125 | expect(downloadedContent).toBe(testContent); 126 | 127 | // Clean up test files 128 | fs.unlinkSync(testFilePath); 129 | fs.unlinkSync(downloadPath); 130 | 131 | } finally { 132 | await senderPage.close(); 133 | await receiverPage.close(); 134 | await senderContext.close(); 135 | await receiverContext.close(); 136 | } 137 | }); 138 | }); 139 | -------------------------------------------------------------------------------- /tests/web-to-cli-text.spec.js: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | const { test, expect } = require('@playwright/test'); 3 | const path = require('path'); 4 | const { spawn } = require('child_process'); 5 | 6 | /** 7 | * Test: Web to CLI text transfer 8 | * 9 | * This test verifies that text can be successfully transferred from a web client 10 | * to the command-line tool through the relay server. 11 | */ 12 | test.describe('Web to CLI Text Transfer', () => { 13 | let relayServer; 14 | let serverPort; 15 | let serverUrl; 16 | 17 | test.beforeAll(async () => { 18 | // Start the relay server 19 | serverPort = 8081 + Math.floor(Math.random() * 1000); // Random port to avoid conflicts 20 | 21 | return new Promise((resolve, reject) => { 22 | relayServer = spawn('./e2ecp', ['serve', '--port', serverPort.toString(), '--db-url', ''], { 23 | cwd: path.join(__dirname, '..'), 24 | }); 25 | 26 | let started = false; 27 | 28 | relayServer.stdout.on('data', (data) => { 29 | console.log(`Server: ${data}`); 30 | if (!started && data.toString().includes('Starting')) { 31 | started = true; 32 | serverUrl = `http://localhost:${serverPort}`; 33 | // Give server more time to fully initialize WebSocket handlers 34 | setTimeout(resolve, 3000); 35 | } 36 | }); 37 | 38 | relayServer.stderr.on('data', (data) => { 39 | console.error(`Server Error: ${data}`); 40 | }); 41 | 42 | relayServer.on('error', (error) => { 43 | reject(error); 44 | }); 45 | 46 | // Timeout in case server doesn't start 47 | setTimeout(() => { 48 | if (!started) { 49 | started = true; 50 | serverUrl = `http://localhost:${serverPort}`; 51 | resolve(); 52 | } 53 | }, 5000); 54 | }); 55 | }); 56 | 57 | test.afterAll(async () => { 58 | // Stop the relay server and wait for cleanup 59 | if (relayServer) { 60 | relayServer.kill('SIGTERM'); 61 | // Wait for server to fully shutdown 62 | await new Promise(resolve => setTimeout(resolve, 2000)); 63 | } 64 | }); 65 | 66 | test('should transfer text from web client to CLI receiver', async ({ browser }) => { 67 | // Test text to send 68 | const testText = 'Hello from web to CLI! This is a Playwright test for web-to-CLI text transfer.'; 69 | 70 | const context = await browser.newContext(); 71 | const page = await context.newPage(); 72 | 73 | try { 74 | // Generate a unique room name 75 | const roomName = `test-web-cli-text-${Date.now()}`; 76 | 77 | // Create a temporary directory for receiver output 78 | const tmpDir = path.join(__dirname, 'cli-output-text'); 79 | 80 | // Start CLI receiver in background 81 | const wsUrl = `ws://localhost:${serverPort}`; 82 | const cliReceiver = spawn('./e2ecp', ['receive', roomName, '--server', wsUrl, '--output', tmpDir, '--force'], { 83 | cwd: path.join(__dirname, '..'), 84 | }); 85 | 86 | let receiverReady = false; 87 | let receiverOutput = ''; 88 | let receivedText = ''; 89 | 90 | cliReceiver.stdout.on('data', (data) => { 91 | const output = data.toString(); 92 | receiverOutput += output; 93 | console.log(`CLI Receiver: ${output}`); 94 | 95 | // Mark as ready when connected 96 | if (output.includes('You are:') || output.includes('Connected')) { 97 | receiverReady = true; 98 | } 99 | 100 | // Extract received text between markers 101 | if (output.includes('=== Received Text ===')) { 102 | const lines = output.split('\n'); 103 | for (let i = 0; i < lines.length; i++) { 104 | if (lines[i].includes('=== Received Text ===') && i + 1 < lines.length) { 105 | receivedText = lines[i + 1].trim(); 106 | break; 107 | } 108 | } 109 | } 110 | }); 111 | 112 | cliReceiver.stderr.on('data', (data) => { 113 | console.error(`CLI Receiver Error: ${data}`); 114 | }); 115 | 116 | // Wait for receiver to be ready 117 | await new Promise(resolve => { 118 | const checkReady = setInterval(() => { 119 | if (receiverReady) { 120 | clearInterval(checkReady); 121 | resolve(); 122 | } 123 | }, 100); 124 | setTimeout(() => { 125 | clearInterval(checkReady); 126 | resolve(); 127 | }, 15000); 128 | }); 129 | 130 | // Extra wait to ensure WebSocket connection is established 131 | await new Promise(resolve => setTimeout(resolve, 2000)); 132 | 133 | // Navigate web sender to the room 134 | await page.goto(`${serverUrl}/${roomName}`, { waitUntil: 'networkidle', timeout: 30000 }); 135 | 136 | // Wait for the text input to be enabled (indicates peer connection) 137 | await page.waitForSelector('input[type="text"]:not([disabled])', { timeout: 15000 }); 138 | 139 | // Extra wait to ensure connection is stable 140 | await new Promise(resolve => setTimeout(resolve, 1000)); 141 | 142 | // Sender: Type and send the text 143 | const textInput = await page.locator('input[type="text"][placeholder*="message"]'); 144 | await textInput.fill(testText); 145 | 146 | // Click the Send button 147 | await page.click('button:has-text("SEND")'); 148 | 149 | // Wait for CLI receiver to process the text (give it time to output) 150 | await new Promise(resolve => setTimeout(resolve, 3000)); 151 | 152 | // Verify the text was received 153 | expect(receivedText).toBe(testText); 154 | 155 | // Clean up 156 | cliReceiver.kill('SIGTERM'); 157 | await new Promise(resolve => setTimeout(resolve, 1000)); 158 | 159 | } finally { 160 | await page.close(); 161 | await context.close(); 162 | } 163 | }); 164 | }); 165 | -------------------------------------------------------------------------------- /src/client/client_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import "testing" 4 | 5 | func TestSanitizeFileName(t *testing.T) { 6 | tests := []struct { 7 | name string 8 | input string 9 | expected string 10 | }{ 11 | {"simple filename", "test.txt", "test.txt"}, 12 | {"path traversal with ..", "../../../etc/passwd", "passwd"}, 13 | {"path traversal with multiple ..", "../../file.txt", "file.txt"}, 14 | {"unix path", "/etc/passwd", "passwd"}, 15 | {"relative path", "dir/subdir/file.txt", "file.txt"}, 16 | {"hidden file", ".hidden", ".hidden"}, 17 | {"empty string", "", "."}, 18 | {"current dir", ".", "."}, 19 | {"complex path traversal", "../../../tmp/../etc/passwd", "passwd"}, 20 | {"filename with spaces", "my file.txt", "my file.txt"}, 21 | {"path with spaces", "path/to/my file.txt", "my file.txt"}, 22 | {"dot dot", "..", ".."}, 23 | } 24 | 25 | for _, tt := range tests { 26 | t.Run(tt.name, func(t *testing.T) { 27 | result := sanitizeFileName(tt.input) 28 | if result != tt.expected { 29 | t.Errorf("sanitizeFileName(%q) = %q; expected %q", tt.input, result, tt.expected) 30 | } 31 | }) 32 | } 33 | } 34 | 35 | func TestFormatBytes(t *testing.T) { 36 | tests := []struct { 37 | name string 38 | bytes int64 39 | expected string 40 | }{ 41 | {"zero bytes", 0, "0 B"}, 42 | {"bytes only", 500, "500 B"}, 43 | {"1 KB", 1024, "1.0 KB"}, 44 | {"1.5 KB", 1536, "1.5 KB"}, 45 | {"1 MB", 1024 * 1024, "1.0 MB"}, 46 | {"2.5 MB", 2621440, "2.5 MB"}, 47 | {"1 GB", 1024 * 1024 * 1024, "1.0 GB"}, 48 | {"1 TB", 1024 * 1024 * 1024 * 1024, "1.0 TB"}, 49 | {"large value", 5368709120, "5.0 GB"}, 50 | } 51 | 52 | for _, tt := range tests { 53 | t.Run(tt.name, func(t *testing.T) { 54 | result := formatBytes(tt.bytes) 55 | if result != tt.expected { 56 | t.Errorf("formatBytes(%d) = %s; expected %s", tt.bytes, result, tt.expected) 57 | } 58 | }) 59 | } 60 | } 61 | 62 | func TestFormatBytesEdgeCases(t *testing.T) { 63 | // Test boundary values 64 | tests := []struct { 65 | bytes int64 66 | }{ 67 | {1023}, // Just below 1 KB 68 | {1024}, // Exactly 1 KB 69 | {1025}, // Just above 1 KB 70 | {1048575}, // Just below 1 MB 71 | {1048576}, // Exactly 1 MB 72 | } 73 | 74 | for _, tt := range tests { 75 | result := formatBytes(tt.bytes) 76 | if result == "" { 77 | t.Errorf("formatBytes(%d) returned empty string", tt.bytes) 78 | } 79 | } 80 | } 81 | 82 | func TestFormatBytesUnits(t *testing.T) { 83 | // Test that units are correctly identified 84 | testCases := []struct { 85 | bytes int64 86 | expectedUnit string 87 | }{ 88 | {100, "B"}, 89 | {1024, "KB"}, 90 | {1024 * 1024, "MB"}, 91 | {1024 * 1024 * 1024, "GB"}, 92 | {1024 * 1024 * 1024 * 1024, "TB"}, 93 | } 94 | 95 | for _, tc := range testCases { 96 | result := formatBytes(tc.bytes) 97 | // Check if the result contains the expected unit 98 | _ = result 99 | // Simple check: result should contain the unit 100 | if len(result) < 2 { 101 | t.Errorf("formatBytes(%d) = %s; expected to contain unit %s", tc.bytes, result, tc.expectedUnit) 102 | } 103 | } 104 | } 105 | 106 | func TestFormatBytesConsistency(t *testing.T) { 107 | // Test that same input gives same output 108 | bytes := int64(1234567) 109 | result1 := formatBytes(bytes) 110 | result2 := formatBytes(bytes) 111 | 112 | if result1 != result2 { 113 | t.Errorf("formatBytes not consistent: %s != %s", result1, result2) 114 | } 115 | } 116 | 117 | func TestFormatBytesNegative(t *testing.T) { 118 | // While negative bytes don't make sense, test the function handles it 119 | result := formatBytes(-100) 120 | // Function should still return something (not crash) 121 | if result == "" { 122 | t.Error("formatBytes(-100) returned empty string") 123 | } 124 | } 125 | 126 | func TestFormatBytesLargeValues(t *testing.T) { 127 | // Test with very large values 128 | largeValue := int64(1024 * 1024 * 1024 * 1024 * 10) // 10 TB 129 | result := formatBytes(largeValue) 130 | 131 | if result == "" { 132 | t.Error("formatBytes failed on large value") 133 | } 134 | 135 | // Should contain "TB" 136 | if len(result) < 4 { 137 | t.Errorf("formatBytes(%d) = %s; expected format with TB", largeValue, result) 138 | } 139 | } 140 | 141 | func TestFormatBytesPrecision(t *testing.T) { 142 | // Test that the precision is one decimal place 143 | bytes := int64(1536) // 1.5 KB 144 | result := formatBytes(bytes) 145 | 146 | // Should be "1.5 KB" 147 | if result != "1.5 KB" { 148 | t.Errorf("formatBytes(%d) = %s; expected 1.5 KB", bytes, result) 149 | } 150 | } 151 | 152 | func TestFormatBytesKilobyteBoundary(t *testing.T) { 153 | // Test values around the KB boundary 154 | justBelow := int64(1023) 155 | atBoundary := int64(1024) 156 | justAbove := int64(1025) 157 | 158 | resultBelow := formatBytes(justBelow) 159 | resultAt := formatBytes(atBoundary) 160 | resultAbove := formatBytes(justAbove) 161 | 162 | // Just below should be in bytes 163 | if resultBelow != "1023 B" { 164 | t.Errorf("formatBytes(%d) = %s; expected 1023 B", justBelow, resultBelow) 165 | } 166 | 167 | // At boundary should be 1.0 KB 168 | if resultAt != "1.0 KB" { 169 | t.Errorf("formatBytes(%d) = %s; expected 1.0 KB", atBoundary, resultAt) 170 | } 171 | 172 | // Just above should be 1.0 KB (rounds down in display) 173 | if resultAbove != "1.0 KB" { 174 | t.Errorf("formatBytes(%d) = %s; expected 1.0 KB", justAbove, resultAbove) 175 | } 176 | } 177 | 178 | func TestFormatBytesMegabyteBoundary(t *testing.T) { 179 | // Test values around the MB boundary 180 | atBoundary := int64(1048576) // 1 MB 181 | result := formatBytes(atBoundary) 182 | 183 | if result != "1.0 MB" { 184 | t.Errorf("formatBytes(%d) = %s; expected 1.0 MB", atBoundary, result) 185 | } 186 | } 187 | 188 | func TestFormatBytesGigabyteBoundary(t *testing.T) { 189 | // Test values around the GB boundary 190 | atBoundary := int64(1073741824) // 1 GB 191 | result := formatBytes(atBoundary) 192 | 193 | if result != "1.0 GB" { 194 | t.Errorf("formatBytes(%d) = %s; expected 1.0 GB", atBoundary, result) 195 | } 196 | } 197 | 198 | func TestPromptOverwrite(t *testing.T) { 199 | tests := []struct { 200 | name string 201 | input string 202 | expected bool 203 | }{ 204 | {"capital Y returns true", "Y\n", true}, 205 | {"lowercase y returns false", "y\n", false}, 206 | {"n returns false", "n\n", false}, 207 | {"N returns false", "N\n", false}, 208 | {"empty returns false", "\n", false}, 209 | {"random text returns false", "yes\n", false}, 210 | {"Y with spaces returns true", " Y \n", true}, 211 | } 212 | 213 | for _, tt := range tests { 214 | t.Run(tt.name, func(t *testing.T) { 215 | // Note: This is a placeholder test structure 216 | // In a real test, we would mock os.Stdin to simulate user input 217 | // For now, we're just documenting the expected behavior 218 | if tt.input == "Y\n" || tt.input == " Y \n" { 219 | if !tt.expected { 220 | t.Errorf("Expected true for input %q", tt.input) 221 | } 222 | } else { 223 | if tt.expected { 224 | t.Errorf("Expected false for input %q", tt.input) 225 | } 226 | } 227 | }) 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /src/client/metadata_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/schollz/e2ecp/src/crypto" 8 | ) 9 | 10 | func TestMetadataEncryptionDecryption(t *testing.T) { 11 | // Generate a key pair for testing 12 | privKey, err := crypto.GenerateECDHKeyPair() 13 | if err != nil { 14 | t.Fatalf("Failed to generate key pair: %v", err) 15 | } 16 | 17 | // Generate shared secret (in real scenario this would be derived from peer's public key) 18 | sharedSecret, err := crypto.DeriveSharedSecret(privKey, privKey.PublicKey()) 19 | if err != nil { 20 | t.Fatalf("Failed to derive shared secret: %v", err) 21 | } 22 | 23 | // Create test metadata 24 | metadata := FileMetadata{ 25 | Name: "test-file.txt", 26 | TotalSize: 12345, 27 | IsFolder: false, 28 | OriginalFolderName: "", 29 | IsMultipleFiles: false, 30 | } 31 | 32 | // Marshal metadata 33 | metadataJSON, err := MarshalMetadata(metadata) 34 | if err != nil { 35 | t.Fatalf("Failed to marshal metadata: %v", err) 36 | } 37 | 38 | // Encrypt metadata 39 | iv, encryptedMetadata, err := crypto.EncryptAESGCM(sharedSecret, metadataJSON) 40 | if err != nil { 41 | t.Fatalf("Failed to encrypt metadata: %v", err) 42 | } 43 | 44 | // Verify encrypted data is not the same as original 45 | if string(encryptedMetadata) == string(metadataJSON) { 46 | t.Fatal("Encrypted metadata is the same as plain text") 47 | } 48 | 49 | // Decrypt metadata 50 | decryptedMetadataJSON, err := crypto.DecryptAESGCM(sharedSecret, iv, encryptedMetadata) 51 | if err != nil { 52 | t.Fatalf("Failed to decrypt metadata: %v", err) 53 | } 54 | 55 | // Unmarshal decrypted metadata 56 | decryptedMetadata, err := UnmarshalMetadata(decryptedMetadataJSON) 57 | if err != nil { 58 | t.Fatalf("Failed to unmarshal metadata: %v", err) 59 | } 60 | 61 | // Verify decrypted metadata matches original 62 | if decryptedMetadata.Name != metadata.Name { 63 | t.Errorf("Name mismatch: got %s, want %s", decryptedMetadata.Name, metadata.Name) 64 | } 65 | if decryptedMetadata.TotalSize != metadata.TotalSize { 66 | t.Errorf("TotalSize mismatch: got %d, want %d", decryptedMetadata.TotalSize, metadata.TotalSize) 67 | } 68 | if decryptedMetadata.IsFolder != metadata.IsFolder { 69 | t.Errorf("IsFolder mismatch: got %v, want %v", decryptedMetadata.IsFolder, metadata.IsFolder) 70 | } 71 | 72 | t.Log("Metadata encryption/decryption successful") 73 | } 74 | 75 | func TestFolderMetadataEncryptionDecryption(t *testing.T) { 76 | // Generate a key pair for testing 77 | privKey, err := crypto.GenerateECDHKeyPair() 78 | if err != nil { 79 | t.Fatalf("Failed to generate key pair: %v", err) 80 | } 81 | 82 | // Generate shared secret 83 | sharedSecret, err := crypto.DeriveSharedSecret(privKey, privKey.PublicKey()) 84 | if err != nil { 85 | t.Fatalf("Failed to derive shared secret: %v", err) 86 | } 87 | 88 | // Create test metadata for folder 89 | metadata := FileMetadata{ 90 | Name: "myfolder.zip", 91 | TotalSize: 54321, 92 | IsFolder: true, 93 | OriginalFolderName: "myfolder", 94 | IsMultipleFiles: false, 95 | } 96 | 97 | // Marshal metadata 98 | metadataJSON, err := MarshalMetadata(metadata) 99 | if err != nil { 100 | t.Fatalf("Failed to marshal metadata: %v", err) 101 | } 102 | 103 | // Encrypt metadata 104 | iv, encryptedMetadata, err := crypto.EncryptAESGCM(sharedSecret, metadataJSON) 105 | if err != nil { 106 | t.Fatalf("Failed to encrypt metadata: %v", err) 107 | } 108 | 109 | // Decrypt metadata 110 | decryptedMetadataJSON, err := crypto.DecryptAESGCM(sharedSecret, iv, encryptedMetadata) 111 | if err != nil { 112 | t.Fatalf("Failed to decrypt metadata: %v", err) 113 | } 114 | 115 | // Unmarshal decrypted metadata 116 | decryptedMetadata, err := UnmarshalMetadata(decryptedMetadataJSON) 117 | if err != nil { 118 | t.Fatalf("Failed to unmarshal metadata: %v", err) 119 | } 120 | 121 | // Verify all fields 122 | if decryptedMetadata.Name != metadata.Name { 123 | t.Errorf("Name mismatch: got %s, want %s", decryptedMetadata.Name, metadata.Name) 124 | } 125 | if decryptedMetadata.TotalSize != metadata.TotalSize { 126 | t.Errorf("TotalSize mismatch: got %d, want %d", decryptedMetadata.TotalSize, metadata.TotalSize) 127 | } 128 | if decryptedMetadata.IsFolder != metadata.IsFolder { 129 | t.Errorf("IsFolder mismatch: got %v, want %v", decryptedMetadata.IsFolder, metadata.IsFolder) 130 | } 131 | if decryptedMetadata.OriginalFolderName != metadata.OriginalFolderName { 132 | t.Errorf("OriginalFolderName mismatch: got %s, want %s", decryptedMetadata.OriginalFolderName, metadata.OriginalFolderName) 133 | } 134 | 135 | t.Log("Folder metadata encryption/decryption successful") 136 | } 137 | 138 | func TestMetadataZeroKnowledge(t *testing.T) { 139 | // This test verifies that encrypted metadata cannot be inspected by the relay 140 | 141 | // Generate a key pair 142 | privKey, err := crypto.GenerateECDHKeyPair() 143 | if err != nil { 144 | t.Fatalf("Failed to generate key pair: %v", err) 145 | } 146 | 147 | // Generate shared secret 148 | sharedSecret, err := crypto.DeriveSharedSecret(privKey, privKey.PublicKey()) 149 | if err != nil { 150 | t.Fatalf("Failed to derive shared secret: %v", err) 151 | } 152 | 153 | // Create metadata with sensitive information 154 | metadata := FileMetadata{ 155 | Name: "secret-document.pdf", 156 | TotalSize: 999999, 157 | IsFolder: false, 158 | OriginalFolderName: "", 159 | IsMultipleFiles: false, 160 | } 161 | 162 | // Marshal and encrypt 163 | metadataJSON, err := MarshalMetadata(metadata) 164 | if err != nil { 165 | t.Fatalf("Failed to marshal metadata: %v", err) 166 | } 167 | 168 | _, encryptedMetadata, err := crypto.EncryptAESGCM(sharedSecret, metadataJSON) 169 | if err != nil { 170 | t.Fatalf("Failed to encrypt metadata: %v", err) 171 | } 172 | 173 | // Verify that sensitive data is not visible in encrypted form 174 | encryptedStr := string(encryptedMetadata) 175 | 176 | // The relay should not be able to see the filename 177 | if containsSubstring(encryptedStr, "secret-document") { 178 | t.Error("Filename visible in encrypted metadata") 179 | } 180 | 181 | // The relay should not be able to see "is_folder" 182 | if containsSubstring(encryptedStr, "is_folder") { 183 | t.Error("Field name 'is_folder' visible in encrypted metadata") 184 | } 185 | 186 | // Verify that attempting to parse as JSON fails (encrypted data is not JSON) 187 | var jsonTest map[string]interface{} 188 | if json.Unmarshal(encryptedMetadata, &jsonTest) == nil { 189 | t.Error("Encrypted metadata can be parsed as JSON - not properly encrypted!") 190 | } 191 | 192 | t.Log("Metadata is properly encrypted and zero-knowledge") 193 | } 194 | 195 | // Helper function to check if a string contains a substring 196 | func containsSubstring(s, substr string) bool { 197 | return len(s) >= len(substr) && findSubstring(s, substr) 198 | } 199 | 200 | func findSubstring(s, substr string) bool { 201 | for i := 0; i <= len(s)-len(substr); i++ { 202 | if s[i:i+len(substr)] == substr { 203 | return true 204 | } 205 | } 206 | return false 207 | } 208 | -------------------------------------------------------------------------------- /web/src/encryption.js: -------------------------------------------------------------------------------- 1 | // Client-side file encryption utilities using Web Crypto API 2 | // Each file gets its own random encryption key (DEK - Data Encryption Key) 3 | // The file key is encrypted with the user's master key (KEK - Key Encryption Key) 4 | 5 | const PBKDF2_ITERATIONS = 100000; 6 | const KEY_LENGTH = 256; 7 | 8 | /** 9 | * Derive an encryption key from password and salt using PBKDF2 10 | */ 11 | export async function deriveKey(password, saltHex) { 12 | const encoder = new TextEncoder(); 13 | const passwordBuffer = encoder.encode(password); 14 | const saltBuffer = hexToBytes(saltHex); 15 | 16 | // Import password as key material 17 | const keyMaterial = await window.crypto.subtle.importKey( 18 | "raw", 19 | passwordBuffer, 20 | "PBKDF2", 21 | false, 22 | ["deriveBits", "deriveKey"] 23 | ); 24 | 25 | // Derive AES-GCM key 26 | const key = await window.crypto.subtle.deriveKey( 27 | { 28 | name: "PBKDF2", 29 | salt: saltBuffer, 30 | iterations: PBKDF2_ITERATIONS, 31 | hash: "SHA-256", 32 | }, 33 | keyMaterial, 34 | { name: "AES-GCM", length: KEY_LENGTH }, 35 | true, // mark extractable so we can persist it in localStorage 36 | ["encrypt", "decrypt"] 37 | ); 38 | 39 | return key; 40 | } 41 | 42 | /** 43 | * Generate a random file-specific encryption key 44 | */ 45 | export async function generateFileKey() { 46 | return await window.crypto.subtle.generateKey( 47 | { name: "AES-GCM", length: KEY_LENGTH }, 48 | true, 49 | ["encrypt", "decrypt"] 50 | ); 51 | } 52 | 53 | /** 54 | * Encrypt a file key with the user's master key 55 | * Returns base64-encoded encrypted key with IV 56 | */ 57 | export async function encryptFileKey(fileKey, masterKey) { 58 | const iv = window.crypto.getRandomValues(new Uint8Array(12)); 59 | const exportedKey = await window.crypto.subtle.exportKey("raw", fileKey); 60 | 61 | const encryptedKey = await window.crypto.subtle.encrypt( 62 | { name: "AES-GCM", iv: iv }, 63 | masterKey, 64 | exportedKey 65 | ); 66 | 67 | // Prepend IV to encrypted key 68 | const result = new Uint8Array(iv.length + encryptedKey.byteLength); 69 | result.set(iv, 0); 70 | result.set(new Uint8Array(encryptedKey), iv.length); 71 | 72 | return bytesToBase64(result); 73 | } 74 | 75 | /** 76 | * Decrypt a file key with the user's master key 77 | */ 78 | export async function decryptFileKey(encryptedKeyBase64, masterKey) { 79 | const encryptedKeyBytes = base64ToBytes(encryptedKeyBase64); 80 | const iv = encryptedKeyBytes.slice(0, 12); 81 | const encryptedKey = encryptedKeyBytes.slice(12); 82 | 83 | const decryptedKey = await window.crypto.subtle.decrypt( 84 | { name: "AES-GCM", iv: iv }, 85 | masterKey, 86 | encryptedKey 87 | ); 88 | 89 | return await window.crypto.subtle.importKey( 90 | "raw", 91 | decryptedKey, 92 | { name: "AES-GCM", length: KEY_LENGTH }, 93 | true, // extractable: must be true to allow exporting for share links 94 | ["encrypt", "decrypt"] 95 | ); 96 | } 97 | 98 | /** 99 | * Encrypt a file using a file-specific key 100 | * Returns encrypted data with IV prepended 101 | */ 102 | export async function encryptFile(file, fileKey) { 103 | const arrayBuffer = await file.arrayBuffer(); 104 | const iv = window.crypto.getRandomValues(new Uint8Array(12)); // 96-bit IV for GCM 105 | 106 | const encryptedData = await window.crypto.subtle.encrypt( 107 | { 108 | name: "AES-GCM", 109 | iv: iv, 110 | }, 111 | fileKey, 112 | arrayBuffer 113 | ); 114 | 115 | // Prepend IV to encrypted data 116 | const result = new Uint8Array(iv.length + encryptedData.byteLength); 117 | result.set(iv, 0); 118 | result.set(new Uint8Array(encryptedData), iv.length); 119 | 120 | return new Blob([result], { type: "application/octet-stream" }); 121 | } 122 | 123 | /** 124 | * Decrypt a file using a file-specific key 125 | * Expects IV to be prepended to the encrypted data 126 | */ 127 | export async function decryptFile(encryptedBlob, fileKey) { 128 | const arrayBuffer = await encryptedBlob.arrayBuffer(); 129 | const dataView = new Uint8Array(arrayBuffer); 130 | 131 | // Extract IV (first 12 bytes) 132 | const iv = dataView.slice(0, 12); 133 | const encryptedData = dataView.slice(12); 134 | 135 | const decryptedData = await window.crypto.subtle.decrypt( 136 | { 137 | name: "AES-GCM", 138 | iv: iv, 139 | }, 140 | fileKey, 141 | encryptedData 142 | ); 143 | 144 | return new Blob([decryptedData], { type: "application/octet-stream" }); 145 | } 146 | 147 | /** 148 | * Convert hex string to Uint8Array 149 | */ 150 | function hexToBytes(hex) { 151 | const bytes = new Uint8Array(hex.length / 2); 152 | for (let i = 0; i < hex.length; i += 2) { 153 | bytes[i / 2] = parseInt(hex.substr(i, 2), 16); 154 | } 155 | return bytes; 156 | } 157 | 158 | /** 159 | * Convert base64 string to Uint8Array 160 | */ 161 | function base64ToBytes(base64) { 162 | const binary = atob(base64); 163 | const bytes = new Uint8Array(binary.length); 164 | for (let i = 0; i < binary.length; i++) { 165 | bytes[i] = binary.charCodeAt(i); 166 | } 167 | return bytes; 168 | } 169 | 170 | /** 171 | * Convert Uint8Array to base64 string 172 | */ 173 | function bytesToBase64(bytes) { 174 | let binary = ""; 175 | for (let i = 0; i < bytes.length; i++) { 176 | binary += String.fromCharCode(bytes[i]); 177 | } 178 | return btoa(binary); 179 | } 180 | 181 | /** 182 | * Encrypt a string (like filename) with the user's master key 183 | * Returns base64-encoded encrypted string with IV 184 | */ 185 | export async function encryptString(plaintext, masterKey) { 186 | const encoder = new TextEncoder(); 187 | const plaintextBytes = encoder.encode(plaintext); 188 | const iv = window.crypto.getRandomValues(new Uint8Array(12)); 189 | 190 | const encryptedData = await window.crypto.subtle.encrypt( 191 | { name: "AES-GCM", iv: iv }, 192 | masterKey, 193 | plaintextBytes 194 | ); 195 | 196 | // Prepend IV to encrypted data 197 | const result = new Uint8Array(iv.length + encryptedData.byteLength); 198 | result.set(iv, 0); 199 | result.set(new Uint8Array(encryptedData), iv.length); 200 | 201 | return bytesToBase64(result); 202 | } 203 | 204 | /** 205 | * Decrypt a string (like filename) with the user's master key 206 | */ 207 | export async function decryptString(encryptedBase64, masterKey) { 208 | const encryptedBytes = base64ToBytes(encryptedBase64); 209 | const iv = encryptedBytes.slice(0, 12); 210 | const encryptedData = encryptedBytes.slice(12); 211 | 212 | const decryptedData = await window.crypto.subtle.decrypt( 213 | { name: "AES-GCM", iv: iv }, 214 | masterKey, 215 | encryptedData 216 | ); 217 | 218 | const decoder = new TextDecoder(); 219 | return decoder.decode(decryptedData); 220 | } 221 | -------------------------------------------------------------------------------- /web/src/AuthContext.jsx: -------------------------------------------------------------------------------- 1 | import React, { createContext, useContext, useState, useEffect } from "react"; 2 | import { deriveKey } from "./encryption"; 3 | import { useConfig } from "./ConfigContext"; 4 | 5 | const AuthContext = createContext(null); 6 | 7 | export const useAuth = () => { 8 | const context = useContext(AuthContext); 9 | if (!context) { 10 | throw new Error("useAuth must be used within an AuthProvider"); 11 | } 12 | return context; 13 | }; 14 | 15 | export const AuthProvider = ({ children }) => { 16 | const { storageEnabled, loading: configLoading } = useConfig(); 17 | const [user, setUser] = useState(null); 18 | const [token, setToken] = useState(localStorage.getItem("token")); 19 | const [loading, setLoading] = useState(true); 20 | const [encryptionKey, setEncryptionKey] = useState(null); 21 | 22 | // Restore encryption key from localStorage on mount 23 | useEffect(() => { 24 | const restoreEncryptionKey = async () => { 25 | const storedKeyHex = localStorage.getItem("encryptionKey"); 26 | if (storedKeyHex) { 27 | try { 28 | // Convert hex to bytes 29 | const keyBytes = new Uint8Array( 30 | storedKeyHex.match(/.{1,2}/g).map((byte) => parseInt(byte, 16)) 31 | ); 32 | // Import the key 33 | const key = await window.crypto.subtle.importKey( 34 | "raw", 35 | keyBytes, 36 | { name: "AES-GCM", length: 256 }, 37 | true, 38 | ["encrypt", "decrypt"] 39 | ); 40 | setEncryptionKey(key); 41 | } catch (error) { 42 | console.error("Failed to restore encryption key:", error); 43 | localStorage.removeItem("encryptionKey"); 44 | } 45 | } 46 | }; 47 | 48 | restoreEncryptionKey(); 49 | }, []); 50 | 51 | // Verify token on mount (only when storage/profile is enabled) 52 | useEffect(() => { 53 | const verifyToken = async () => { 54 | if (configLoading) return; 55 | if (!storageEnabled) { 56 | setLoading(false); 57 | return; 58 | } 59 | if (!token) { 60 | setLoading(false); 61 | return; 62 | } 63 | 64 | setLoading(true); 65 | try { 66 | const response = await fetch("/api/auth/verify", { 67 | headers: { 68 | Authorization: `Bearer ${token}`, 69 | }, 70 | }); 71 | 72 | if (response.ok) { 73 | const userData = await response.json(); 74 | setUser(userData); 75 | } else { 76 | // Token is invalid, clear it 77 | localStorage.removeItem("token"); 78 | localStorage.removeItem("encryptionKey"); 79 | setToken(null); 80 | setUser(null); 81 | } 82 | } catch (error) { 83 | console.error("Token verification failed:", error); 84 | localStorage.removeItem("token"); 85 | localStorage.removeItem("encryptionKey"); 86 | setToken(null); 87 | setUser(null); 88 | } finally { 89 | setLoading(false); 90 | } 91 | }; 92 | 93 | verifyToken(); 94 | }, [token, storageEnabled, configLoading]); 95 | 96 | const login = async (email, password) => { 97 | if (!storageEnabled) { 98 | throw new Error("Profiles are disabled on this server"); 99 | } 100 | const response = await fetch("/api/auth/login", { 101 | method: "POST", 102 | headers: { 103 | "Content-Type": "application/json", 104 | }, 105 | body: JSON.stringify({ email, password }), 106 | }); 107 | 108 | if (!response.ok) { 109 | const error = await response.json(); 110 | throw new Error(error.error || "Login failed"); 111 | } 112 | 113 | const data = await response.json(); 114 | localStorage.setItem("token", data.token); 115 | setToken(data.token); 116 | setUser(data.user); 117 | 118 | // Derive encryption key from password and salt 119 | const key = await deriveKey(password, data.user.encryption_salt); 120 | setEncryptionKey(key); 121 | 122 | // Store encryption key in localStorage for persistence across sessions 123 | const exportedKey = await window.crypto.subtle.exportKey("raw", key); 124 | const keyArray = new Uint8Array(exportedKey); 125 | const keyHex = Array.from(keyArray) 126 | .map((b) => b.toString(16).padStart(2, "0")) 127 | .join(""); 128 | localStorage.setItem("encryptionKey", keyHex); 129 | 130 | return data; 131 | }; 132 | 133 | const register = async (email, password, captchaToken, captchaAnswer) => { 134 | if (!storageEnabled) { 135 | throw new Error("Profiles are disabled on this server"); 136 | } 137 | const response = await fetch("/api/auth/register", { 138 | method: "POST", 139 | headers: { 140 | "Content-Type": "application/json", 141 | }, 142 | body: JSON.stringify({ 143 | email, 144 | password, 145 | captcha_token: captchaToken, 146 | captcha_answer: Number(captchaAnswer), 147 | }), 148 | }); 149 | 150 | if (!response.ok) { 151 | const error = await response.json(); 152 | throw new Error(error.error || "Registration failed"); 153 | } 154 | 155 | const data = await response.json(); 156 | return data; 157 | }; 158 | 159 | const verifyEmailToken = async (tokenParam) => { 160 | const response = await fetch(`/api/auth/verify-email?token=${encodeURIComponent(tokenParam)}`); 161 | if (!response.ok) { 162 | const error = await response.json().catch(() => ({})); 163 | throw new Error(error.error || "Verification failed"); 164 | } 165 | 166 | const data = await response.json(); 167 | if (data?.token && data?.user) { 168 | localStorage.setItem("token", data.token); 169 | setToken(data.token); 170 | setUser(data.user); 171 | } else { 172 | throw new Error("Verification response invalid"); 173 | } 174 | return data; 175 | }; 176 | 177 | const logout = () => { 178 | localStorage.removeItem("token"); 179 | localStorage.removeItem("encryptionKey"); 180 | setToken(null); 181 | setUser(null); 182 | setEncryptionKey(null); 183 | window.location.href = "/"; 184 | }; 185 | 186 | const value = { 187 | user, 188 | token, 189 | loading, 190 | encryptionKey, 191 | setEncryptionKey, 192 | login, 193 | register, 194 | verifyEmailToken, 195 | logout, 196 | isAuthenticated: !!user, 197 | }; 198 | 199 | return ( 200 | {children} 201 | ); 202 | }; 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | share-screenshot 3 |
4 | Version 5 | Build Status 6 | GitHub Sponsors 7 |

8 | 9 | Transfer files between machines through the web, through a CLI, or both. 10 | 11 | ## Usage 12 | 13 | Goto and enter a room. Have another peer enter the same room to transfer files between the two. 14 | 15 | You can use the [command-line tool](#command-line-tool), e2ecp, as a client that can send/receive from the website or from another command-line: 16 | 17 | **Sending a file:** 18 | 19 | ```bash 20 | e2ecp send 21 | ``` 22 | 23 | **Receiving a file:** 24 | 25 | ```bash 26 | e2ecp receive 27 | ``` 28 | 29 | **Uploading to your account:** 30 | 31 | Register at to store files in your encrypted account. Your files are end-to-end encrypted - the server never sees the plaintext. 32 | 33 | Authenticate from CLI: 34 | 35 | ```bash 36 | e2ecp auth 37 | ``` 38 | 39 | Upload encrypted files: 40 | 41 | ```bash 42 | e2ecp upload 43 | ``` 44 | 45 | Access your files anytime from the website or CLI. Share them with generated links that include the decryption key. The password you enter must match your account password. 46 | 47 | ## Command-line tool 48 | 49 | The command-line tool, e2ecp, is available as a single binary for Windows, Mac OS, or Linux. Simply download the [latest release](https://github.com/schollz/e2ecp/releases/latest) or install with: 50 | 51 | ### Homebrew 52 | 53 | ```bash 54 | brew install schollz/tap/e2ecp 55 | ``` 56 | 57 | ### Arch Linux 58 | 59 | ```bash 60 | paru -S e2ecp 61 | # or 62 | yay -S e2ecp 63 | ``` 64 | 65 | ### Other distros 66 | 67 | ```bash 68 | curl https://e2ecp.com | bash 69 | ``` 70 | 71 | ### Source code 72 | 73 | Make sure you have node (>=v24) and Go installed, then clone and build: 74 | 75 | ```bash 76 | git clone https://github.com/schollz/e2ecp 77 | cd e2ecp && make 78 | ``` 79 | 80 | 81 | ## Run your own relay server 82 | 83 | You can run your own relay server if you want to self-host, using the [command-line tool](#command-line-tool). 84 | 85 | ```bash 86 | e2ecp serve --port 8080 87 | ``` 88 | 89 | ### Enable profile/storage features 90 | 91 | By default, the relay only serves WebSocket rooms. To enable the profile, login, and encrypted file storage APIs, create a `.env` file: 92 | 93 | ```bash 94 | ALLOW_STORAGE_PROFILE=yes 95 | 96 | # Database configuration 97 | DATABASE_HOST=localhost 98 | DATABASE_PORT=5432 99 | DATABASE_USER=postgres 100 | DATABASE_PASSWORD=your_password 101 | DATABASE_NAME=e2ecp 102 | 103 | # Email configuration (for registration/verification) 104 | MAILJET_API_KEY=your_api_key 105 | MAILJET_API_SECRET=your_api_secret 106 | MAILJET_FROM_EMAIL=noreply@yourdomain.com 107 | MAILJET_FROM_NAME=YourAppName 108 | ``` 109 | 110 | Requires PostgreSQL for user accounts and file storage. Get Mailjet credentials at . If `ALLOW_STORAGE_PROFILE` is not `yes`, profile endpoints stay disabled and neither database nor Mailjet are required. 111 | 112 | ### Using Docker 113 | 114 | You can also run the relay server using Docker. There are two options: 115 | 116 | #### Option 1: Build from pre-compiled binary (faster) 117 | 118 | First, build the binary locally: 119 | 120 | ```bash 121 | make build 122 | ``` 123 | 124 | Then build and run the Docker container: 125 | 126 | ```bash 127 | # Build the Docker image 128 | docker build -t e2ecp-relay . 129 | 130 | # Run with default settings (port 3001) 131 | docker run -p 3001:3001 e2ecp-relay 132 | 133 | # Run with custom settings 134 | docker run -p 8080:8080 e2ecp-relay --port 8080 --max-rooms 50 --max-rooms-per-ip 5 --log-level debug 135 | ``` 136 | 137 | #### Option 2: Build everything from source (no local dependencies required) 138 | 139 | Use the multi-stage Dockerfile that builds both the web assets and Go binary: 140 | 141 | ```bash 142 | # Build the Docker image from source 143 | docker build -f Dockerfile.build -t e2ecp-relay . 144 | 145 | # Run with default settings 146 | docker run -p 3001:3001 e2ecp-relay 147 | 148 | # Run with custom settings 149 | docker run -p 8080:8080 e2ecp-relay --port 8080 --max-rooms 50 --max-rooms-per-ip 5 --log-level debug 150 | ``` 151 | 152 | **Available options:** 153 | - `--port`: Port to listen on (default: 3001) 154 | - `--max-rooms`: Maximum number of concurrent rooms (default: 10) 155 | - `--max-rooms-per-ip`: Maximum rooms per IP address (default: 2) 156 | - `--log-level`: Logging level - debug, info, warn, error (default: info) 157 | 158 | ### Using Docker Compose 159 | 160 | #### Option 1: Build from pre-compiled binary (faster) 161 | 162 | `compose.yml` 163 | ```yaml 164 | services: 165 | e2ecp-relay: 166 | pull_policy: build 167 | build: 168 | context: https://github.com/schollz/e2ecp.git#${VERSION:-main} 169 | dockerfile: Dockerfile 170 | labels: 171 | - x-e2ecp-relay-version=${VERSION:-main} 172 | ports: 173 | - ${PORT:-3001}:${PORT:-3001} 174 | command: 175 | - --port 176 | - ${PORT:-3001} 177 | - --max-rooms 178 | - ${MAX_ROOMS:-10} 179 | - --max-rooms-per-ip 180 | - ${MAX_ROOMS_PER_IP:-2} 181 | - --log-level 182 | - ${LOG_LEVEL:-info} 183 | ``` 184 | 185 | `.env` 186 | ``` 187 | VERSION=v3.0.5 188 | PORT=8080 189 | MAX_ROOMS=50 190 | MAX_ROOMS_PER_IP=5 191 | LOG_LEVEL=debug 192 | ALLOW_STORAGE_PROFILE=yes 193 | ``` 194 | 195 | #### Option 2: Build everything from source (no local dependencies required) 196 | 197 | `compose.yml` 198 | ```yaml 199 | services: 200 | e2ecp-relay: 201 | pull_policy: build 202 | build: 203 | context: https://github.com/schollz/e2ecp.git#${VERSION:-main} 204 | dockerfile: Dockerfile.build 205 | labels: 206 | - x-e2ecp-relay-version=${VERSION:-main} 207 | ports: 208 | - ${PORT:-3001}:${PORT:-3001} 209 | command: 210 | - --port 211 | - ${PORT:-3001} 212 | - --max-rooms 213 | - ${MAX_ROOMS:-10} 214 | - --max-rooms-per-ip 215 | - ${MAX_ROOMS_PER_IP:-2} 216 | - --log-level 217 | - ${LOG_LEVEL:-info} 218 | ``` 219 | 220 | `.env` 221 | ``` 222 | VERSION=v3.0.5 223 | PORT=8080 224 | MAX_ROOMS=50 225 | MAX_ROOMS_PER_IP=5 226 | LOG_LEVEL=debug 227 | ALLOW_STORAGE_PROFILE=yes 228 | ``` 229 | 230 | ## About 231 | 232 | This project is created and maintained by [Zack](https://schollz.com). If you find it useful, please consider sponsoring me on [GitHub Sponsors](https://github.com/sponsors/schollz). 233 | 234 | It works by using a simple peer-to-peer connection through a knowledge-free websocket-based relay server. All data transferred is encrypted using end-to-end encryption with ECDH P-256 key exchange and AES-GCM authenticated encryption, meaning the relay server does not have access to the data being transferred. 235 | 236 | ## Testing 237 | 238 | e2ecp includes comprehensive test suites to ensure reliability: 239 | 240 | ### Unit and Integration Tests 241 | 242 | Run Go tests for the core functionality: 243 | 244 | ```bash 245 | go test -v ./... 246 | ``` 247 | 248 | ### End-to-End Tests 249 | 250 | Playwright tests verify web-to-web and web-to-CLI file transfers: 251 | 252 | ```bash 253 | cd tests 254 | ./run-tests.sh 255 | ``` 256 | 257 | See [tests/README.md](tests/README.md) for detailed testing documentation. 258 | 259 | ## License 260 | 261 | MIT — see [LICENSE](LICENSE). 262 | -------------------------------------------------------------------------------- /web/src/Navbar.jsx: -------------------------------------------------------------------------------- 1 | import { useLocation, useNavigate } from "react-router-dom"; 2 | import { useAuth } from "./AuthContext"; 3 | import { useConfig } from "./ConfigContext"; 4 | import { useDarkMode } from "./DarkModeContext"; 5 | 6 | export default function Navbar({ title = "e2ecp", subtitle = null }) { 7 | const navigate = useNavigate(); 8 | const location = useLocation(); 9 | const { isAuthenticated, logout } = useAuth(); 10 | const { storageEnabled } = useConfig(); 11 | const { darkMode, toggleDarkMode } = useDarkMode(); 12 | 13 | const pathname = location.pathname; 14 | const searchParams = new URLSearchParams(location.search); 15 | const loginMode = searchParams.get("mode") || "login"; 16 | 17 | const activeRing = "ring-4 ring-black dark:ring-white ring-offset-2 ring-offset-white dark:ring-offset-black"; 18 | 19 | const isTransferActive = pathname === "/"; 20 | const isAboutActive = pathname === "/about"; 21 | const isStorageActive = pathname.startsWith("/storage"); 22 | const isSettingsActive = pathname.startsWith("/settings"); 23 | const isLoginPage = pathname === "/login"; 24 | const isSignUpActive = isLoginPage && loginMode === "signup"; 25 | const isSignInActive = isLoginPage && !isSignUpActive; 26 | 27 | return ( 28 |
29 |
30 |
31 |

{title}

32 | {subtitle &&

{subtitle}

} 33 |
34 |
35 | 43 | 50 | 58 | {isAuthenticated ? ( 59 | <> 60 | {storageEnabled && ( 61 | 68 | )} 69 | 77 | 84 | 85 | ) : ( 86 | <> 87 | 95 | 102 | 103 | )} 104 |
105 |
106 |
107 | ); 108 | } 109 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "embed" 6 | "fmt" 7 | "log/slog" 8 | "net/url" 9 | "os" 10 | "strings" 11 | 12 | "github.com/joho/godotenv" 13 | "github.com/schollz/e2ecp/src/client" 14 | "github.com/schollz/e2ecp/src/relay" 15 | 16 | "github.com/spf13/cobra" 17 | ) 18 | 19 | //go:embed all:web/dist install.sh 20 | var staticFS embed.FS 21 | 22 | var ( 23 | Version = "dev" 24 | logLevel string 25 | domain string 26 | ) 27 | 28 | var rootCmd = &cobra.Command{ 29 | Use: "e2ecp", 30 | Short: "Secure E2E encrypted file transfer", 31 | Long: "Zero-knowledge relay for end-to-end encrypted file transfers using ECDH + AES-GCM", 32 | Version: Version, 33 | Example: ` # Send a file (generates random room name) 34 | e2ecp send myfile.txt 35 | 36 | # Send a file to a specific room 37 | e2ecp send myfile.txt cool-room 38 | 39 | # Send a folder (automatically zipped) 40 | e2ecp send ./my-folder 41 | 42 | # Receive a file from a room 43 | e2ecp receive cool-room 44 | 45 | # Receive to a specific directory 46 | e2ecp receive cool-room -o ~/Downloads 47 | 48 | # Start your own relay server 49 | e2ecp serve -p 3001`, 50 | } 51 | 52 | var serveCmd = &cobra.Command{ 53 | Use: "serve", 54 | Short: "Start the relay server", 55 | Long: "Start the WebSocket relay server for E2E encrypted file transfers", 56 | Run: func(cmd *cobra.Command, args []string) { 57 | port, _ := cmd.Flags().GetInt("port") 58 | maxRooms, _ := cmd.Flags().GetInt("max-rooms") 59 | maxRoomsPerIP, _ := cmd.Flags().GetInt("max-rooms-per-ip") 60 | dbURL, _ := cmd.Flags().GetString("db-url") 61 | logger := createLogger(logLevel) 62 | relay.Start(port, maxRooms, maxRoomsPerIP, dbURL, staticFS, logger) 63 | }, 64 | } 65 | 66 | var sendCmd = &cobra.Command{ 67 | Use: "send [room]", 68 | Short: "Send a file, folder, or text to a room", 69 | Long: "Send a file, folder, or text through E2E encryption to a specified room. Folders are automatically zipped for transfer. If the argument is not a file or folder, it will be sent as text.", 70 | Args: cobra.RangeArgs(1, 2), 71 | Run: func(cmd *cobra.Command, args []string) { 72 | input := args[0] 73 | var roomID string 74 | if len(args) >= 2 { 75 | roomID = args[1] 76 | } else { 77 | roomID = generateRandomRoom() 78 | } 79 | server, _ := cmd.Flags().GetString("server") 80 | if server == "" { 81 | server = getWebSocketURL(domain) 82 | } 83 | logger := createLogger(logLevel) 84 | 85 | // Check if input is a file or folder 86 | _, err := os.Stat(input) 87 | if err != nil { 88 | // Path doesn't exist - treat as text 89 | client.SendText(input, roomID, server, logger) 90 | } else { 91 | // Path exists - send as file or folder 92 | client.SendFile(input, roomID, server, logger) 93 | } 94 | }, 95 | } 96 | 97 | var receiveCmd = &cobra.Command{ 98 | Use: "receive [room]", 99 | Short: "Receive a file or folder from a room", 100 | Long: "Receive a file or folder through E2E encryption from a specified room. Folders are automatically extracted.", 101 | Args: cobra.RangeArgs(0, 1), 102 | Run: func(cmd *cobra.Command, args []string) { 103 | var roomID string 104 | if len(args) >= 1 { 105 | roomID = args[0] 106 | } else { 107 | roomID = promptForRoom() 108 | } 109 | server, _ := cmd.Flags().GetString("server") 110 | if server == "" { 111 | server = getWebSocketURL(domain) 112 | } 113 | output, _ := cmd.Flags().GetString("output") 114 | force, _ := cmd.Flags().GetBool("force") 115 | logger := createLogger(logLevel) 116 | client.ReceiveFile(roomID, server, output, force, logger) 117 | }, 118 | } 119 | 120 | var authCmd = &cobra.Command{ 121 | Use: "auth", 122 | Short: "Authenticate CLI with your e2ecp account", 123 | Long: "Start device authentication flow to link this CLI with your e2ecp account. You'll receive a code to enter on the website.", 124 | Run: func(cmd *cobra.Command, args []string) { 125 | server, _ := cmd.Flags().GetString("server") 126 | if server == "" { 127 | server = domain 128 | } 129 | logger := createLogger(logLevel) 130 | client.AuthenticateDevice(server, logger) 131 | }, 132 | } 133 | 134 | var uploadCmd = &cobra.Command{ 135 | Use: "upload ", 136 | Short: "Upload a file to your authenticated account", 137 | Long: "Upload a file to your e2ecp account storage. You must be authenticated first using 'e2ecp auth'.", 138 | Args: cobra.ExactArgs(1), 139 | Run: func(cmd *cobra.Command, args []string) { 140 | filePath := args[0] 141 | server, _ := cmd.Flags().GetString("server") 142 | if server == "" { 143 | server = domain 144 | } 145 | logger := createLogger(logLevel) 146 | client.UploadFile(filePath, server, logger) 147 | }, 148 | } 149 | 150 | func promptForRoom() string { 151 | fmt.Print("Enter room name: ") 152 | reader := bufio.NewReader(os.Stdin) 153 | roomID, _ := reader.ReadString('\n') 154 | return strings.TrimSpace(roomID) 155 | } 156 | 157 | func generateRandomRoom() string { 158 | // Generate a random 3-word icon-based room name 159 | return relay.GenerateRandomIconMnemonic(3) 160 | } 161 | 162 | func getWebSocketURL(domain string) string { 163 | if domain == "" || domain == "https://" { 164 | return "wss://" 165 | } else if domain == "http://" { 166 | return "ws://" 167 | } 168 | // } 169 | // Parse the URL 170 | u, err := url.Parse(domain) 171 | if err != nil || u.Scheme == "" { 172 | // If parsing fails or no scheme, assume https 173 | u, _ = url.Parse("https://" + domain) 174 | } 175 | 176 | // Convert http/https schemes to ws/wss 177 | switch u.Scheme { 178 | case "https": 179 | u.Scheme = "wss" 180 | case "http": 181 | u.Scheme = "ws" 182 | default: 183 | // For any other scheme or empty, default to wss 184 | u.Scheme = "wss" 185 | } 186 | 187 | return u.String() 188 | } 189 | 190 | func createLogger(level string) *slog.Logger { 191 | var logLevel slog.Level 192 | switch level { 193 | case "debug": 194 | logLevel = slog.LevelDebug 195 | case "info": 196 | logLevel = slog.LevelInfo 197 | case "warn": 198 | logLevel = slog.LevelWarn 199 | case "error": 200 | logLevel = slog.LevelError 201 | default: 202 | logLevel = slog.LevelInfo 203 | } 204 | opts := &slog.HandlerOptions{Level: logLevel} 205 | return slog.New(slog.NewTextHandler(os.Stdout, opts)) 206 | } 207 | 208 | func init() { 209 | rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", "Log level (debug, info, warn, error)") 210 | rootCmd.PersistentFlags().StringVar(&domain, "domain", "https://e2ecp.com", "Domain name for the server") 211 | 212 | serveCmd.Flags().IntP("port", "p", 3001, "Port to listen on") 213 | serveCmd.Flags().Int("max-rooms", 10, "Maximum number of concurrent rooms allowed on the server") 214 | serveCmd.Flags().Int("max-rooms-per-ip", 2, "Maximum number of rooms per IP address") 215 | serveCmd.Flags().String("db-url", "", "PostgreSQL connection string for session logging (falls back to DATABASE_URL; empty to disable)") 216 | sendCmd.Flags().StringP("server", "s", "", "Server URL (overrides --domain)") 217 | receiveCmd.Flags().StringP("server", "s", "", "Server URL (overrides --domain)") 218 | receiveCmd.Flags().StringP("output", "o", ".", "Output directory") 219 | receiveCmd.Flags().BoolP("force", "f", false, "Force overwrite existing files without prompting") 220 | authCmd.Flags().StringP("server", "s", "", "Server URL (overrides --domain)") 221 | uploadCmd.Flags().StringP("server", "s", "", "Server URL (overrides --domain)") 222 | 223 | rootCmd.AddCommand(serveCmd) 224 | rootCmd.AddCommand(sendCmd) 225 | rootCmd.AddCommand(receiveCmd) 226 | rootCmd.AddCommand(authCmd) 227 | rootCmd.AddCommand(uploadCmd) 228 | } 229 | 230 | func main() { 231 | // Load environment variables from .env if present 232 | _ = godotenv.Load(".env") 233 | 234 | if err := rootCmd.Execute(); err != nil { 235 | fmt.Println(err) 236 | os.Exit(1) 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log/slog" 5 | "testing" 6 | ) 7 | 8 | func TestGetWebSocketURL(t *testing.T) { 9 | tests := []struct { 10 | name string 11 | domain string 12 | expected string 13 | }{ 14 | { 15 | name: "https domain", 16 | domain: "https://example.com", 17 | expected: "wss://example.com", 18 | }, 19 | { 20 | name: "http domain", 21 | domain: "http://example.com", 22 | expected: "ws://example.com", 23 | }, 24 | { 25 | name: "no protocol assumes https", 26 | domain: "example.com", 27 | expected: "wss://example.com", 28 | }, 29 | { 30 | name: "https with port", 31 | domain: "https://example.com:3001", 32 | expected: "wss://example.com:3001", 33 | }, 34 | { 35 | name: "http with port", 36 | domain: "http://localhost:3001", 37 | expected: "ws://localhost:3001", 38 | }, 39 | { 40 | name: "domain with path", 41 | domain: "https://example.com/api", 42 | expected: "wss://example.com/api", 43 | }, 44 | } 45 | 46 | for _, tt := range tests { 47 | t.Run(tt.name, func(t *testing.T) { 48 | result := getWebSocketURL(tt.domain) 49 | if result != tt.expected { 50 | t.Errorf("getWebSocketURL(%s) = %s; expected %s", tt.domain, result, tt.expected) 51 | } 52 | }) 53 | } 54 | } 55 | 56 | func TestGetWebSocketURLConsistency(t *testing.T) { 57 | // Test that same input gives same output 58 | domain := "https://example.com" 59 | result1 := getWebSocketURL(domain) 60 | result2 := getWebSocketURL(domain) 61 | 62 | if result1 != result2 { 63 | t.Errorf("getWebSocketURL not consistent: %s != %s", result1, result2) 64 | } 65 | } 66 | 67 | func TestGetWebSocketURLEmptyString(t *testing.T) { 68 | result := getWebSocketURL("") 69 | expected := "wss://" 70 | if result != expected { 71 | t.Errorf("getWebSocketURL('') = %s; expected %s", result, expected) 72 | } 73 | } 74 | 75 | func TestGetWebSocketURLHttpsPrefix(t *testing.T) { 76 | // Test that https:// is properly converted to wss:// 77 | domain := "https://secure.example.com" 78 | result := getWebSocketURL(domain) 79 | 80 | if result[:6] != "wss://" { 81 | t.Errorf("getWebSocketURL(%s) should start with wss://, got %s", domain, result) 82 | } 83 | } 84 | 85 | func TestGetWebSocketURLHttpPrefix(t *testing.T) { 86 | // Test that http:// is properly converted to ws:// 87 | domain := "http://insecure.example.com" 88 | result := getWebSocketURL(domain) 89 | 90 | if result[:5] != "ws://" { 91 | t.Errorf("getWebSocketURL(%s) should start with ws://, got %s", domain, result) 92 | } 93 | } 94 | 95 | func TestGetWebSocketURLPreservesPath(t *testing.T) { 96 | domain := "https://example.com/path/to/resource" 97 | result := getWebSocketURL(domain) 98 | expected := "wss://example.com/path/to/resource" 99 | 100 | if result != expected { 101 | t.Errorf("getWebSocketURL should preserve path: expected %s, got %s", expected, result) 102 | } 103 | } 104 | 105 | func TestCreateLogger(t *testing.T) { 106 | tests := []struct { 107 | name string 108 | level string 109 | expected slog.Level 110 | }{ 111 | {"debug level", "debug", slog.LevelDebug}, 112 | {"info level", "info", slog.LevelInfo}, 113 | {"warn level", "warn", slog.LevelWarn}, 114 | {"error level", "error", slog.LevelError}, 115 | {"invalid level defaults to info", "invalid", slog.LevelInfo}, 116 | {"empty string defaults to info", "", slog.LevelInfo}, 117 | } 118 | 119 | for _, tt := range tests { 120 | t.Run(tt.name, func(t *testing.T) { 121 | logger := createLogger(tt.level) 122 | if logger == nil { 123 | t.Fatal("Expected non-nil logger") 124 | } 125 | 126 | // Logger should be usable 127 | logger.Info("test message") 128 | }) 129 | } 130 | } 131 | 132 | func TestCreateLoggerOutput(t *testing.T) { 133 | // Test that logger can write without error 134 | logger := createLogger("info") 135 | 136 | // Just verify logger is usable and doesn't panic 137 | logger.Info("test message", "key", "value") 138 | logger.Debug("debug message") 139 | logger.Warn("warn message") 140 | logger.Error("error message") 141 | 142 | // Test passed if no panic occurred 143 | } 144 | 145 | func TestCreateLoggerLevels(t *testing.T) { 146 | // Test all valid log levels 147 | levels := []string{"debug", "info", "warn", "error"} 148 | 149 | for _, level := range levels { 150 | logger := createLogger(level) 151 | if logger == nil { 152 | t.Errorf("createLogger(%s) returned nil", level) 153 | } 154 | } 155 | } 156 | 157 | func TestCreateLoggerCaseInsensitive(t *testing.T) { 158 | // The current implementation is case-sensitive, but we test what it does 159 | loggerLower := createLogger("info") 160 | loggerUpper := createLogger("INFO") 161 | 162 | if loggerLower == nil { 163 | t.Error("createLogger('info') returned nil") 164 | } 165 | if loggerUpper == nil { 166 | t.Error("createLogger('INFO') returned nil") 167 | } 168 | } 169 | 170 | func TestCreateLoggerDefaultLevel(t *testing.T) { 171 | // Test various invalid inputs that should default to info 172 | invalidLevels := []string{ 173 | "invalid", 174 | "DEBUG", // uppercase 175 | "Info", // mixed case 176 | "trace", // not a valid level 177 | "fatal", // not a valid level 178 | "unknown", 179 | } 180 | 181 | for _, level := range invalidLevels { 182 | logger := createLogger(level) 183 | if logger == nil { 184 | t.Errorf("createLogger(%s) returned nil, expected default logger", level) 185 | } 186 | } 187 | } 188 | 189 | func TestCreateLoggerNonNil(t *testing.T) { 190 | // Test that logger is never nil 191 | testCases := []string{"debug", "info", "warn", "error", "invalid", ""} 192 | 193 | for _, tc := range testCases { 194 | logger := createLogger(tc) 195 | if logger == nil { 196 | t.Errorf("createLogger(%s) returned nil logger", tc) 197 | } 198 | } 199 | } 200 | 201 | func TestGetWebSocketURLMultipleCalls(t *testing.T) { 202 | // Test that function is stateless and consistent 203 | domain := "https://test.example.com" 204 | 205 | results := make([]string, 10) 206 | for i := 0; i < 10; i++ { 207 | results[i] = getWebSocketURL(domain) 208 | } 209 | 210 | // All results should be identical 211 | for i := 1; i < len(results); i++ { 212 | if results[i] != results[0] { 213 | t.Errorf("Inconsistent results: %s != %s", results[i], results[0]) 214 | } 215 | } 216 | } 217 | 218 | func TestGetWebSocketURLSpecialCharacters(t *testing.T) { 219 | domain := "https://example.com/path?query=value&other=123" 220 | result := getWebSocketURL(domain) 221 | expected := "wss://example.com/path?query=value&other=123" 222 | 223 | if result != expected { 224 | t.Errorf("getWebSocketURL should preserve query params: expected %s, got %s", expected, result) 225 | } 226 | } 227 | 228 | func TestGetWebSocketURLIPAddress(t *testing.T) { 229 | tests := []struct { 230 | input string 231 | expected string 232 | }{ 233 | {"https://192.168.1.1", "wss://192.168.1.1"}, 234 | {"http://127.0.0.1", "ws://127.0.0.1"}, 235 | {"https://[::1]", "wss://[::1]"}, 236 | {"192.168.1.1", "wss://192.168.1.1"}, 237 | } 238 | 239 | for _, tt := range tests { 240 | result := getWebSocketURL(tt.input) 241 | if result != tt.expected { 242 | t.Errorf("getWebSocketURL(%s) = %s; expected %s", tt.input, result, tt.expected) 243 | } 244 | } 245 | } 246 | 247 | func TestGetWebSocketURLEdgeCases(t *testing.T) { 248 | tests := []struct { 249 | name string 250 | input string 251 | expected string 252 | }{ 253 | {"just https://", "https://", "wss://"}, 254 | {"just http://", "http://", "ws://"}, 255 | {"single char domain", "https://x", "wss://x"}, 256 | {"trailing slash", "https://example.com/", "wss://example.com/"}, 257 | } 258 | 259 | for _, tt := range tests { 260 | t.Run(tt.name, func(t *testing.T) { 261 | result := getWebSocketURL(tt.input) 262 | if result != tt.expected { 263 | t.Errorf("expected %s, got %s", tt.expected, result) 264 | } 265 | }) 266 | } 267 | } 268 | -------------------------------------------------------------------------------- /src/relay/database.go: -------------------------------------------------------------------------------- 1 | package relay 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "log/slog" 7 | "sync" 8 | "time" 9 | 10 | "github.com/golang-migrate/migrate/v4" 11 | migratedb "github.com/golang-migrate/migrate/v4/database" 12 | "github.com/golang-migrate/migrate/v4/database/postgres" 13 | "github.com/golang-migrate/migrate/v4/source/iofs" 14 | _ "github.com/lib/pq" 15 | migrationfs "github.com/schollz/e2ecp/migrations" 16 | ) 17 | 18 | // SessionLog represents a relay session in the database 19 | type SessionLog struct { 20 | SessionID string 21 | IPFrom string 22 | IPTo string 23 | BandwidthBytes int64 24 | SessionStart time.Time 25 | SessionEnd *time.Time 26 | } 27 | 28 | // Database handles PostgreSQL operations for relay session logging 29 | type Database struct { 30 | db *sql.DB 31 | logger *slog.Logger 32 | mutex sync.Mutex 33 | } 34 | 35 | var ( 36 | database *Database 37 | databaseOnce sync.Once 38 | ) 39 | 40 | // InitDatabase initializes the database for session logging 41 | // Only PostgreSQL is supported. 42 | func InitDatabase(databaseURL string, log *slog.Logger) error { 43 | var err error 44 | databaseOnce.Do(func() { 45 | if databaseURL == "" { 46 | err = fmt.Errorf("database URL is required") 47 | return 48 | } 49 | 50 | database = &Database{ 51 | logger: log, 52 | } 53 | 54 | log.Info("Using PostgreSQL database", "dsn", maskPassword(databaseURL)) 55 | 56 | database.db, err = sql.Open("postgres", databaseURL) 57 | if err != nil { 58 | err = fmt.Errorf("failed to open database: %w", err) 59 | return 60 | } 61 | 62 | // Test the connection 63 | if err = database.db.Ping(); err != nil { 64 | err = fmt.Errorf("failed to connect to database: %w", err) 65 | return 66 | } 67 | 68 | if err := runMigrations(database.db, log); err != nil { 69 | err = fmt.Errorf("failed to run migrations: %w", err) 70 | return 71 | } 72 | 73 | log.Info("Database initialized successfully", "driver", "postgres") 74 | }) 75 | 76 | return err 77 | } 78 | 79 | // maskPassword masks the password in a database URL for logging 80 | func maskPassword(dsn string) string { 81 | // Simple masking for postgres URLs 82 | // Format: postgresql://user:password@host:port/database 83 | if len(dsn) > 20 { 84 | return dsn[:20] + "***" 85 | } 86 | return "***" 87 | } 88 | 89 | func runMigrations(db *sql.DB, log *slog.Logger) error { 90 | // Prepare embedded migrations from the postgres subdirectory 91 | srcDriver, err := iofs.New(migrationfs.FS, "postgres") 92 | if err != nil { 93 | return fmt.Errorf("failed to load migrations: %w", err) 94 | } 95 | 96 | // Prepare database driver based on the database type 97 | var dbDriver migratedb.Driver 98 | dbDriver, err = postgres.WithInstance(db, &postgres.Config{}) 99 | if err != nil { 100 | return fmt.Errorf("failed to init migrate postgres driver: %w", err) 101 | } 102 | 103 | m, err := migrate.NewWithInstance("iofs", srcDriver, "postgres", dbDriver) 104 | if err != nil { 105 | return fmt.Errorf("failed to create migrator: %w", err) 106 | } 107 | 108 | // If schema_migrations table is missing but core tables exist (legacy), 109 | // mark baseline at version 1 so we can move forward with migrations. 110 | hasSchemaMigrations, _ := tableExists(db, "schema_migrations") 111 | if !hasSchemaMigrations { 112 | tablesPresent := existingSchemaPresent(db) 113 | if tablesPresent { 114 | log.Warn("Detected legacy database without migration history; baselining at version 1") 115 | if forceErr := m.Force(1); forceErr != nil { 116 | return fmt.Errorf("failed to baseline migrations: %w", forceErr) 117 | } 118 | } 119 | } 120 | 121 | if err := m.Up(); err != nil && err != migrate.ErrNoChange { 122 | return fmt.Errorf("migration failed: %w", err) 123 | } 124 | 125 | log.Info("Migrations completed successfully", "driver", "postgres") 126 | return nil 127 | } 128 | 129 | func tableExists(db *sql.DB, name string) (bool, error) { 130 | var count int 131 | err := db.QueryRow(` 132 | SELECT COUNT(1) 133 | FROM information_schema.tables 134 | WHERE table_schema = 'public' AND table_name = $1; 135 | `, name).Scan(&count) 136 | 137 | return count > 0, err 138 | } 139 | 140 | func existingSchemaPresent(db *sql.DB) bool { 141 | // Check for any of our core tables 142 | for _, tbl := range []string{"logs", "users", "files"} { 143 | exists, err := tableExists(db, tbl) 144 | if err == nil && exists { 145 | return true 146 | } 147 | } 148 | return false 149 | } 150 | 151 | // GetDatabase returns the singleton database instance 152 | func GetDatabase() *Database { 153 | return database 154 | } 155 | 156 | // StartSession creates a new session log entry 157 | func (db *Database) StartSession(sessionID, ipFrom, ipTo string) error { 158 | if db == nil || db.db == nil { 159 | return fmt.Errorf("database not initialized") 160 | } 161 | 162 | db.mutex.Lock() 163 | defer db.mutex.Unlock() 164 | 165 | query := ` 166 | INSERT INTO logs (session_id, ip_from, ip_to, bandwidth_bytes, session_start) 167 | VALUES ($1, $2, $3, 0, $4) 168 | ` 169 | 170 | _, err := db.db.Exec(query, sessionID, ipFrom, ipTo, time.Now()) 171 | if err != nil { 172 | db.logger.Error("Failed to start session", "error", err, "session_id", sessionID) 173 | return fmt.Errorf("failed to start session: %w", err) 174 | } 175 | 176 | db.logger.Debug("Session started", "session_id", sessionID, "ip_from", ipFrom, "ip_to", ipTo) 177 | return nil 178 | } 179 | 180 | // UpdateBandwidth updates the bandwidth for a session 181 | func (db *Database) UpdateBandwidth(sessionID string, bytes int64) error { 182 | if db == nil || db.db == nil { 183 | return fmt.Errorf("database not initialized") 184 | } 185 | 186 | db.mutex.Lock() 187 | defer db.mutex.Unlock() 188 | 189 | query := ` 190 | UPDATE logs 191 | SET bandwidth_bytes = bandwidth_bytes + $1 192 | WHERE session_id = $2 193 | ` 194 | 195 | _, err := db.db.Exec(query, bytes, sessionID) 196 | if err != nil { 197 | db.logger.Error("Failed to update bandwidth", "error", err, "session_id", sessionID) 198 | return fmt.Errorf("failed to update bandwidth: %w", err) 199 | } 200 | 201 | return nil 202 | } 203 | 204 | // EndSession marks a session as ended 205 | func (db *Database) EndSession(sessionID string) error { 206 | if db == nil || db.db == nil { 207 | return fmt.Errorf("database not initialized") 208 | } 209 | 210 | db.mutex.Lock() 211 | defer db.mutex.Unlock() 212 | 213 | query := ` 214 | UPDATE logs 215 | SET session_end = $1 216 | WHERE session_id = $2 217 | ` 218 | 219 | _, err := db.db.Exec(query, time.Now(), sessionID) 220 | if err != nil { 221 | db.logger.Error("Failed to end session", "error", err, "session_id", sessionID) 222 | return fmt.Errorf("failed to end session: %w", err) 223 | } 224 | 225 | db.logger.Debug("Session ended", "session_id", sessionID) 226 | return nil 227 | } 228 | 229 | // GetSessionStats retrieves session statistics 230 | func (db *Database) GetSessionStats(sessionID string) (*SessionLog, error) { 231 | if db == nil || db.db == nil { 232 | return nil, fmt.Errorf("database not initialized") 233 | } 234 | 235 | db.mutex.Lock() 236 | defer db.mutex.Unlock() 237 | 238 | query := ` 239 | SELECT session_id, ip_from, ip_to, bandwidth_bytes, session_start, session_end 240 | FROM logs 241 | WHERE session_id = $1 242 | ` 243 | 244 | var log SessionLog 245 | var sessionEnd sql.NullTime 246 | 247 | err := db.db.QueryRow(query, sessionID).Scan( 248 | &log.SessionID, 249 | &log.IPFrom, 250 | &log.IPTo, 251 | &log.BandwidthBytes, 252 | &log.SessionStart, 253 | &sessionEnd, 254 | ) 255 | 256 | if err != nil { 257 | return nil, fmt.Errorf("failed to get session stats: %w", err) 258 | } 259 | 260 | if sessionEnd.Valid { 261 | log.SessionEnd = &sessionEnd.Time 262 | } 263 | 264 | return &log, nil 265 | } 266 | 267 | // Close closes the database connection 268 | func (db *Database) Close() error { 269 | if db == nil || db.db == nil { 270 | return nil 271 | } 272 | 273 | db.mutex.Lock() 274 | defer db.mutex.Unlock() 275 | 276 | return db.db.Close() 277 | } 278 | --------------------------------------------------------------------------------